diff --git a/.github/script/amd64-18.04.Dockerfile b/.github/script/amd64-18.04.Dockerfile new file mode 100644 index 00000000..e11678b0 --- /dev/null +++ b/.github/script/amd64-18.04.Dockerfile @@ -0,0 +1,18 @@ +FROM ubuntu:18.04 + +RUN apt update +RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt-get -y install tzdata +RUN apt install -y build-essential cmake clang openssl libssl-dev zlib1g-dev gperf wget git curl libreadline-dev ccache libmicrohttpd-dev ninja-build + +WORKDIR / + +RUN git clone --recurse-submodules https://github.com/ton-blockchain/ton.git + +WORKDIR /ton +RUN mkdir /ton/build +WORKDIR /ton/build +ENV CC clang +ENV CXX clang++ +ENV CCACHE_DISABLE 1 +RUN cmake -GNinja -DCMAKE_BUILD_TYPE=Release .. +RUN ninja tonlibjson blockchain-explorer fift func validator-engine validator-engine-console create-state generate-random-id create-hardfork dht-server lite-client \ No newline at end of file diff --git a/.github/script/amd64-20.04.Dockerfile b/.github/script/amd64-20.04.Dockerfile new file mode 100644 index 00000000..eb7fa890 --- /dev/null +++ b/.github/script/amd64-20.04.Dockerfile @@ -0,0 +1,18 @@ +FROM ubuntu:20.04 + +RUN apt update +RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt-get -y install tzdata +RUN apt install -y build-essential cmake clang openssl libssl-dev zlib1g-dev gperf wget git curl libreadline-dev ccache libmicrohttpd-dev ninja-build + +WORKDIR / + +RUN git clone --recurse-submodules https://github.com/ton-blockchain/ton.git + +WORKDIR /ton +RUN mkdir /ton/build +WORKDIR /ton/build +ENV CC clang +ENV CXX clang++ +ENV CCACHE_DISABLE 1 +RUN cmake -GNinja -DCMAKE_BUILD_TYPE=Release .. +RUN ninja tonlibjson blockchain-explorer fift func validator-engine validator-engine-console create-state generate-random-id create-hardfork dht-server lite-client \ No newline at end of file diff --git a/.github/script/amd64-22.04.Dockerfile b/.github/script/amd64-22.04.Dockerfile new file mode 100644 index 00000000..68d1f652 --- /dev/null +++ b/.github/script/amd64-22.04.Dockerfile @@ -0,0 +1,18 @@ +FROM ubuntu:22.04 + +RUN apt update +RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt-get -y install tzdata +RUN apt install -y build-essential cmake clang openssl libssl-dev zlib1g-dev gperf wget git curl libreadline-dev ccache libmicrohttpd-dev ninja-build + +WORKDIR / + +RUN git clone --recurse-submodules https://github.com/ton-blockchain/ton.git + +WORKDIR /ton +RUN mkdir /ton/build +WORKDIR /ton/build +ENV CC clang +ENV CXX clang++ +ENV CCACHE_DISABLE 1 +RUN cmake -GNinja -DCMAKE_BUILD_TYPE=Release .. +RUN ninja tonlibjson blockchain-explorer fift func validator-engine validator-engine-console create-state generate-random-id create-hardfork dht-server lite-client \ No newline at end of file diff --git a/.github/script/arm64-18.04.Dockerfile b/.github/script/arm64-18.04.Dockerfile new file mode 100644 index 00000000..88b6a77e --- /dev/null +++ b/.github/script/arm64-18.04.Dockerfile @@ -0,0 +1,18 @@ +FROM ubuntu:18.04 + +RUN apt update +RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt-get -y install tzdata +RUN apt install -y build-essential cmake clang openssl libssl-dev zlib1g-dev gperf wget git curl libreadline-dev ccache libmicrohttpd-dev ninja-build + +WORKDIR / + +RUN git clone --recurse-submodules https://github.com/ton-blockchain/ton.git + +WORKDIR /ton +RUN mkdir /ton/build +WORKDIR /ton/build +ENV CC clang +ENV CXX clang++ +ENV CCACHE_DISABLE 1 +RUN cmake -GNinja -DCMAKE_BUILD_TYPE=Release -DTON_ARCH= .. +RUN ninja tonlibjson blockchain-explorer fift func validator-engine validator-engine-console create-state generate-random-id dht-server lite-client \ No newline at end of file diff --git a/.github/script/arm64-20.04.Dockerfile b/.github/script/arm64-20.04.Dockerfile new file mode 100644 index 00000000..267f97ac --- /dev/null +++ b/.github/script/arm64-20.04.Dockerfile @@ -0,0 +1,18 @@ +FROM ubuntu:20.04 + +RUN apt update +RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt-get -y install tzdata +RUN apt install -y build-essential cmake clang openssl libssl-dev zlib1g-dev gperf wget git curl libreadline-dev ccache libmicrohttpd-dev ninja-build + +WORKDIR / + +RUN git clone --recurse-submodules https://github.com/ton-blockchain/ton.git + +WORKDIR /ton +RUN mkdir /ton/build +WORKDIR /ton/build +ENV CC clang +ENV CXX clang++ +ENV CCACHE_DISABLE 1 +RUN cmake -GNinja -DCMAKE_BUILD_TYPE=Release -DTON_ARCH= .. +RUN ninja tonlibjson blockchain-explorer fift func validator-engine validator-engine-console create-state generate-random-id dht-server lite-client \ No newline at end of file diff --git a/.github/script/arm64-22.04.Dockerfile b/.github/script/arm64-22.04.Dockerfile new file mode 100644 index 00000000..47d24821 --- /dev/null +++ b/.github/script/arm64-22.04.Dockerfile @@ -0,0 +1,18 @@ +FROM ubuntu:22.04 + +RUN apt update +RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt-get -y install tzdata +RUN apt install -y build-essential cmake clang openssl libssl-dev zlib1g-dev gperf wget git curl libreadline-dev ccache libmicrohttpd-dev ninja-build + +WORKDIR / + +RUN git clone --recurse-submodules https://github.com/ton-blockchain/ton.git + +WORKDIR /ton +RUN mkdir /ton/build +WORKDIR /ton/build +ENV CC clang +ENV CXX clang++ +ENV CCACHE_DISABLE 1 +RUN cmake -GNinja -DCMAKE_BUILD_TYPE=Release -DTON_ARCH= .. +RUN ninja tonlibjson blockchain-explorer fift func validator-engine validator-engine-console create-state generate-random-id dht-server lite-client \ No newline at end of file diff --git a/.github/workflows/create-release.yml b/.github/workflows/create-release.yml new file mode 100644 index 00000000..089af583 --- /dev/null +++ b/.github/workflows/create-release.yml @@ -0,0 +1,167 @@ +name: Create release + +on: [workflow_dispatch] + +permissions: write-all + +jobs: + create-release: + runs-on: ubuntu-22.04 + + steps: + - uses: actions/checkout@v3 + + - name: Show all artifacts + run: | + mkdir artifacts + ls -lart artifacts + + - name: Download Ubuntu x86-64 artifacts + uses: dawidd6/action-download-artifact@v2 + with: + workflow: ubuntu-compile.yml + path: artifacts + workflow_conclusion: success + skip_unpack: true + + - name: Download Ubuntu arm64 artifacts + uses: dawidd6/action-download-artifact@v2 + with: + workflow: docker-compile-ubuntu.yml + path: artifacts + workflow_conclusion: success + skip_unpack: true + + - name: Download MacOS 11.7 artifacts + uses: dawidd6/action-download-artifact@v2 + with: + workflow: macos-11.7-compile.yml + path: artifacts + workflow_conclusion: success + skip_unpack: true + + - name: Download MacOS 12.6 artifacts + uses: dawidd6/action-download-artifact@v2 + with: + workflow: macos-12.6-compile.yml + path: artifacts + workflow_conclusion: success + skip_unpack: true + + - name: Download Windows artifacts + uses: dawidd6/action-download-artifact@v2 + with: + workflow: win-2019-compile.yml + path: artifacts + workflow_conclusion: success + skip_unpack: true + + - name: Show all artifacts + run: | + tree artifacts + +# create release + + - name: Read Changelog.md and use it as a body of new release + id: read_release + shell: bash + run: | + r=$(cat Changelog.md) + r="${r//'%'/'%25'}" + r="${r//$'\n'/'%0A'}" + r="${r//$'\r'/'%0D'}" + echo "::set-output name=CHANGELOG_BODY::$r" + + - name: Get current date + id: date + run: echo "::set-output name=date::$(date +'%Y.%m')" + + - name: Get registration token + id: getRegToken + run: | + curl -X POST -H \"Accept: application/vnd.github+json\" -H 'Authorization: token ${{ secrets.GITHUB_TOKEN }}' https://api.github.com/repos/neodix42/HardTestDevelopment/actions/runners/registration-token + + - name: Create release + id: create_release + uses: actions/create-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: v${{ steps.date.outputs.date }} + release_name: v${{ steps.date.outputs.date }} + body: | + ${{ steps.read_release.outputs.CHANGELOG_BODY }} + draft: false + prerelease: false + + - name: Upload Windows 2019 artifacts + uses: svenstaro/upload-release-action@v2 + with: + repo_token: ${{ secrets.GITHUB_TOKEN }} + file: artifacts/ton-win-binaries.zip + asset_name: ton-windows-2019-x86-64.zip + tag: v${{ steps.date.outputs.date }} + + - name: Upload MacOS 11.7 x86-64 artifacts + uses: svenstaro/upload-release-action@v2 + with: + repo_token: ${{ secrets.GITHUB_TOKEN }} + file: artifacts/ton-macos-11.7.zip + asset_name: ton-macos-11.7-x86-64.zip + tag: v${{ steps.date.outputs.date }} + + - name: Upload MacOS 12.6 x86-64 artifacts + uses: svenstaro/upload-release-action@v2 + with: + repo_token: ${{ secrets.GITHUB_TOKEN }} + file: artifacts/ton-macos-12.6.zip + asset_name: ton-macos-12.6-x86-64.zip + tag: v${{ steps.date.outputs.date }} + + - name: Upload Ubuntu 18.04 x86-64 artifacts + uses: svenstaro/upload-release-action@v2 + with: + repo_token: ${{ secrets.GITHUB_TOKEN }} + file: artifacts/ton-binaries-ubuntu-18.04.zip + asset_name: ton-ubuntu-18.04-x86-64.zip + tag: v${{ steps.date.outputs.date }} + + - name: Upload Ubuntu 20.04 x86-64 artifacts + uses: svenstaro/upload-release-action@v2 + with: + repo_token: ${{ secrets.GITHUB_TOKEN }} + file: artifacts/ton-binaries-ubuntu-20.04.zip + asset_name: ton-ubuntu-20.04-x86-64.zip + tag: v${{ steps.date.outputs.date }} + + - name: Upload Ubuntu 22.04 x86-64 artifacts + uses: svenstaro/upload-release-action@v2 + with: + repo_token: ${{ secrets.GITHUB_TOKEN }} + file: artifacts/ton-binaries-ubuntu-22.04.zip + asset_name: ton-ubuntu-22.04-x86-64.zip + tag: v${{ steps.date.outputs.date }} + + - name: Upload Ubuntu 18.04 arm64 artifacts + uses: svenstaro/upload-release-action@v2 + with: + repo_token: ${{ secrets.GITHUB_TOKEN }} + file: artifacts/ton-ubuntu-18.04-arm64.zip + asset_name: ton-ubuntu-18.04-arm64.zip + tag: v${{ steps.date.outputs.date }} + + - name: Upload Ubuntu 20.04 arm64 artifacts + uses: svenstaro/upload-release-action@v2 + with: + repo_token: ${{ secrets.GITHUB_TOKEN }} + file: artifacts/ton-ubuntu-20.04-arm64.zip + asset_name: ton-ubuntu-20.04-arm64.zip + tag: v${{ steps.date.outputs.date }} + + - name: Upload Ubuntu 22.04 arm64 artifacts + uses: svenstaro/upload-release-action@v2 + with: + repo_token: ${{ secrets.GITHUB_TOKEN }} + file: artifacts/ton-ubuntu-22.04-arm64.zip + asset_name: ton-ubuntu-22.04-arm64.zip + tag: v${{ steps.date.outputs.date }} \ No newline at end of file diff --git a/.github/workflows/docker-compile-ubuntu.yml b/.github/workflows/docker-compile-ubuntu.yml new file mode 100644 index 00000000..9c02625e --- /dev/null +++ b/.github/workflows/docker-compile-ubuntu.yml @@ -0,0 +1,47 @@ +name: Docker Ubuntu Compile arm64 + +on: [push,workflow_dispatch,workflow_call] + +jobs: + build: + strategy: + fail-fast: false + max-parallel: 3 + matrix: + arch: [arm64] + ver: [22.04, 18.04, 20.04 ] + + runs-on: ubuntu-22.04 + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Build with docker buildx + run: | + mkdir build-${{matrix.ver}}-${{matrix.arch}} + docker buildx build --platform=linux/${{matrix.arch}} --progress=plain --load . -t build-${{matrix.ver}}-${{matrix.arch}} -f .github/script/${{matrix.arch}}-${{matrix.ver}}.Dockerfile + container_id=$(docker create --platform=linux/${{matrix.arch}} build-${{matrix.ver}}-${{matrix.arch}}) + docker cp $container_id:/ton/build/dht-server/dht-server build-${{matrix.ver}}-${{matrix.arch}}/ + docker cp -a $container_id:/ton/build/validator-engine/validator-engine build-${{matrix.ver}}-${{matrix.arch}}/ + docker cp -a $container_id:/ton/build/validator-engine-console/validator-engine-console build-${{matrix.ver}}-${{matrix.arch}}/ + docker cp -a $container_id:/ton/build/crypto/fift build-${{matrix.ver}}-${{matrix.arch}}/ + docker cp -a $container_id:/ton/build/crypto/func build-${{matrix.ver}}-${{matrix.arch}}/ + docker cp -a $container_id:/ton/build/crypto/create-state build-${{matrix.ver}}-${{matrix.arch}}/ + docker cp -a $container_id:/ton/build/blockchain-explorer/blockchain-explorer build-${{matrix.ver}}-${{matrix.arch}}/ + docker cp -a $container_id:/ton/build/lite-client/lite-client build-${{matrix.ver}}-${{matrix.arch}}/ + docker cp -a $container_id:/ton/build/utils/generate-random-id build-${{matrix.ver}}-${{matrix.arch}}/ + docker cp -a $container_id:/ton/build/tonlib/libtonlibjson.so.0.5 build-${{matrix.ver}}-${{matrix.arch}}/tonlibjson.so + docker cp -a $container_id:/ton/crypto/smartcont build-${{matrix.ver}}-${{matrix.arch}}/ + docker cp -a $container_id:/ton/crypto/fift/lib build-${{matrix.ver}}-${{matrix.arch}}/ + + - name: Upload artifacts + uses: actions/upload-artifact@v1 + with: + name: ton-ubuntu-${{matrix.ver}}-${{matrix.arch}} + path: build-${{matrix.ver}}-${{matrix.arch}} diff --git a/.github/workflows/docker-ubuntu-image.yml b/.github/workflows/docker-ubuntu-image.yml index ad83c099..b28cb381 100644 --- a/.github/workflows/docker-ubuntu-image.yml +++ b/.github/workflows/docker-ubuntu-image.yml @@ -1,4 +1,4 @@ -name: Docker Ubuntu 18.04 image +name: Docker Ubuntu 20.04 image on: workflow_dispatch: @@ -10,10 +10,10 @@ env: jobs: build-and-push: - runs-on: ubuntu-18.04 + runs-on: ubuntu-20.04 steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Set up QEMU uses: docker/setup-qemu-action@v1 diff --git a/.github/workflows/macos-10.15-compile.yml b/.github/workflows/macos-10.15-compile.yml deleted file mode 100644 index 5f2d94b0..00000000 --- a/.github/workflows/macos-10.15-compile.yml +++ /dev/null @@ -1,41 +0,0 @@ -name: C/C++ CI macOS-10.15 Compile - -on: - workflow_dispatch: - push: - -jobs: - build: - - runs-on: macos-10.15 - - steps: - - name: Check out repository - uses: actions/checkout@v2 - with: - submodules: 'recursive' - - name: Compile OpenSSL - run: | - git clone https://github.com/openssl/openssl openssl_1_1_1 - cd openssl_1_1_1 - git checkout OpenSSL_1_1_1-stable - ./Configure --prefix=/usr/local/macos darwin64-x86_64-cc -static -mmacosx-version-min=10.15 - make build_libs -j4 - - - name: Build all - run: | - rootPath=`pwd` - mkdir build - cd build - cmake -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=$rootPath/openssl_1_1_1/include -DOPENSSL_CRYPTO_LIBRARY=$rootPath/openssl_1_1_1/libcrypto.a -DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=10.15 -DCMAKE_CXX_FLAGS="-stdlib=libc++" -DCMAKE_BUILD_TYPE=Release .. - make -j4 - - - name: Find & copy binaries - run: | - rsync -r --exclude 'CMakeFiles' --exclude 'Makefile' --exclude '*.a' --exclude '*.cmake' --exclude 'third-party' --exclude 'test-*' --exclude '*.cc' --exclude '*.json' --exclude '*.txt' build/* artifacts/ - - - name: Upload artifacts - uses: actions/upload-artifact@master - with: - name: ton-macos-binaries - path: artifacts diff --git a/.github/workflows/macos-10.15-tonlib-java.yml b/.github/workflows/macos-10.15-tonlib-java.yml deleted file mode 100644 index e5199f58..00000000 --- a/.github/workflows/macos-10.15-tonlib-java.yml +++ /dev/null @@ -1,60 +0,0 @@ -name: macOS-10.15 tonlib-java - - -on: - workflow_dispatch: - push: - branches: - - 'wallets' -jobs: - build: - - runs-on: macos-10.15 - - steps: - - name: Check out repository - uses: actions/checkout@v2 - with: - submodules: 'recursive' - - - name: Compile OpenSSL - run: | - git clone https://github.com/openssl/openssl openssl_1_1_1 - cd openssl_1_1_1 - git checkout OpenSSL_1_1_1-stable - ./Configure --prefix=/usr/local/macos darwin64-x86_64-cc -static -mmacosx-version-min=10.15 - make build_libs -j4 - - - name: Configure & Build - run: | - rootPath=`pwd` - - export CC=$(which clang) - export CXX=$(which clang++) - export CCACHE_DISABLE=1 - - export JAVA_AWT_LIBRARY=NotNeeded - export JAVA_JVM_LIBRARY=NotNeeded - export JAVA_INCLUDE_PATH=${JAVA_HOME}/include - export JAVA_AWT_INCLUDE_PATH=${JAVA_HOME}/include - export JAVA_INCLUDE_PATH2=${JAVA_HOME}/include/darwin - - cd example/android/ - mkdir build - cd build - - cmake -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=$rootPath/openssl_1_1_1/include -DOPENSSL_CRYPTO_LIBRARY=$rootPath/openssl_1_1_1/libcrypto.a -DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=10.15 -DCMAKE_CXX_FLAGS="-stdlib=libc++" -DTON_ONLY_TONLIB=ON .. - cmake --build . --target prepare_cross_compiling - cmake --build . --target native-lib --config Release - - - name: find & copy binaries - run: | - mkdir -p artifacts/tonlib-java - cp example/android/src/drinkless/org/ton/TonApi.java artifacts/tonlib-java/ - cp example/android/build/libnative-lib.dylib artifacts/tonlib-java/ - - - name: Upload artifacts - uses: actions/upload-artifact@master - with: - name: tonlib-macos-java - path: artifacts diff --git a/.github/workflows/macos-11.7-compile.yml b/.github/workflows/macos-11.7-compile.yml new file mode 100644 index 00000000..59c50539 --- /dev/null +++ b/.github/workflows/macos-11.7-compile.yml @@ -0,0 +1,59 @@ +name: MacOS 11.7 Big Sur x86-64 Compile + +on: [push,workflow_dispatch,workflow_call] + +jobs: + build: + + runs-on: macos-11 + + steps: + - name: Check out repository + uses: actions/checkout@v3 + with: + submodules: 'recursive' + - name: Compile OpenSSL + run: | + git clone https://github.com/openssl/openssl openssl_1_1_1 + cd openssl_1_1_1 + git checkout OpenSSL_1_1_1-stable + ./Configure --prefix=/usr/local/macos darwin64-x86_64-cc -static -mmacosx-version-min=11.7 + make build_libs -j4 + + - name: Build all + run: | + export NONINTERACTIVE=1 + brew install ninja + rootPath=`pwd` + mkdir build + cd build + cmake -GNinja -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=$rootPath/openssl_1_1_1/include -DOPENSSL_CRYPTO_LIBRARY=$rootPath/openssl_1_1_1/libcrypto.a -DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=11.7 -DCMAKE_CXX_FLAGS="-stdlib=libc++" -DCMAKE_BUILD_TYPE=Release .. + ninja fift func tonlib tonlibjson tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork tlbc + + - name: Find & copy binaries + run: | + mkdir artifacts + cp build/crypto/fift artifacts/ + cp build/crypto/func artifacts/ + cp build/crypto/create-state artifacts/ + cp build/crypto/tlbc artifacts/ + cp build/validator-engine-console/validator-engine-console artifacts/ + cp build/tonlib/tonlib-cli artifacts/ + cp build/tonlib/libtonlibjson.0.5.dylib artifacts/ + cp build/http/http-proxy artifacts/ + cp build/rldp-http-proxy/rldp-http-proxy artifacts/ + cp build/dht-server/dht-server artifacts/ + cp build/lite-client/lite-client artifacts/ + cp build/validator-engine/validator-engine artifacts/ + cp build/utils/generate-random-id artifacts/ + cp build/utils/json2tlo artifacts/ + cp build/adnl/adnl-proxy artifacts/ + rsync -r crypto/smartcont artifacts/ + rsync -r crypto/fift/lib artifacts/ + ls -laRt artifacts + + - name: Upload artifacts + uses: actions/upload-artifact@master + with: + name: ton-macos-11.7 + path: artifacts diff --git a/.github/workflows/macos-12.6-compile.yml b/.github/workflows/macos-12.6-compile.yml new file mode 100644 index 00000000..f9b3ed1f --- /dev/null +++ b/.github/workflows/macos-12.6-compile.yml @@ -0,0 +1,59 @@ +name: MacOS 12.6 Monterey x86-64 Compile + +on: [push,workflow_dispatch,workflow_call] + +jobs: + build: + + runs-on: macos-12 + + steps: + - name: Check out repository + uses: actions/checkout@v3 + with: + submodules: 'recursive' + - name: Compile OpenSSL + run: | + git clone https://github.com/openssl/openssl openssl_1_1_1 + cd openssl_1_1_1 + git checkout OpenSSL_1_1_1-stable + ./Configure --prefix=/usr/local/macos darwin64-x86_64-cc -static -mmacosx-version-min=12.6 + make build_libs -j4 + + - name: Build all + run: | + export NONINTERACTIVE=1 + brew install ninja + rootPath=`pwd` + mkdir build + cd build + cmake -GNinja -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=$rootPath/openssl_1_1_1/include -DOPENSSL_CRYPTO_LIBRARY=$rootPath/openssl_1_1_1/libcrypto.a -DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=12.6 -DCMAKE_CXX_FLAGS="-stdlib=libc++" -DCMAKE_BUILD_TYPE=Release .. + ninja fift func tonlib tonlibjson tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork tlbc + + - name: Find & copy binaries + run: | + mkdir artifacts + cp build/crypto/fift artifacts/ + cp build/crypto/func artifacts/ + cp build/crypto/create-state artifacts/ + cp build/crypto/tlbc artifacts/ + cp build/validator-engine-console/validator-engine-console artifacts/ + cp build/tonlib/tonlib-cli artifacts/ + cp build/tonlib/libtonlibjson.0.5.dylib artifacts/ + cp build/http/http-proxy artifacts/ + cp build/rldp-http-proxy/rldp-http-proxy artifacts/ + cp build/dht-server/dht-server artifacts/ + cp build/lite-client/lite-client artifacts/ + cp build/validator-engine/validator-engine artifacts/ + cp build/utils/generate-random-id artifacts/ + cp build/utils/json2tlo artifacts/ + cp build/adnl/adnl-proxy artifacts/ + rsync -r crypto/smartcont artifacts/ + rsync -r crypto/fift/lib artifacts/ + ls -laRt artifacts + + - name: Upload artifacts + uses: actions/upload-artifact@master + with: + name: ton-macos-12.6 + path: artifacts diff --git a/.github/workflows/tonlib-android-jni.yml b/.github/workflows/tonlib-android-jni.yml new file mode 100644 index 00000000..cdc16841 --- /dev/null +++ b/.github/workflows/tonlib-android-jni.yml @@ -0,0 +1,56 @@ +name: Tonlib Android JNI + +on: [push,workflow_dispatch,workflow_call] + +jobs: + build: + + runs-on: ubuntu-22.04 + + steps: + - name: Check out repository + uses: actions/checkout@v3 + with: + submodules: 'recursive' + + - name: Install libraries + run: | + sudo apt update + sudo apt install -y build-essential git make cmake clang libgflags-dev zlib1g-dev libssl-dev libreadline-dev libmicrohttpd-dev pkg-config libgsl-dev python3 python3-dev ninja-build + + - name: Configure & Build + run: | + wget https://dl.google.com/android/repository/android-ndk-r25b-linux.zip + unzip android-ndk-r25b-linux.zip + export JAVA_AWT_LIBRARY=NotNeeded + export JAVA_JVM_LIBRARY=NotNeeded + export JAVA_INCLUDE_PATH=${JAVA_HOME}/include + export JAVA_AWT_INCLUDE_PATH=${JAVA_HOME}/include + export JAVA_INCLUDE_PATH2=${JAVA_HOME}/include/linux + + export ANDROID_NDK_ROOT=$(pwd)/android-ndk-r25b + export OPENSSL_DIR=$(pwd)/example/android/third_party/crypto + + rm -rf example/android/src/drinkless/org/ton/TonApi.java + cd example/android/ + cmake -GNinja -DTON_ONLY_TONLIB=ON . + ninja prepare_cross_compiling + rm CMakeCache.txt + ./build-all.sh + ../../android-ndk-r25b/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip libs/x86/libnative-lib.so + ../../android-ndk-r25b/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip libs/x86_64/libnative-lib.so + ../../android-ndk-r25b/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip libs/armeabi-v7a/libnative-lib.so + ../../android-ndk-r25b/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip libs/arm64-v8a/libnative-lib.so + find . -name "*.debug" -type f -delete + + - name: Find & copy binaries + run: | + mkdir -p artifacts/tonlib-android-jni + cp example/android/src/drinkless/org/ton/TonApi.java artifacts/tonlib-android-jni/ + cp -R example/android/libs/* artifacts/tonlib-android-jni/ + + - name: Upload artifacts + uses: actions/upload-artifact@master + with: + name: Tonlib JNI libraries for Android + path: artifacts \ No newline at end of file diff --git a/.github/workflows/ubuntu-18.04-compile.yml b/.github/workflows/ubuntu-18.04-compile.yml index e215c7bd..6709bf83 100644 --- a/.github/workflows/ubuntu-18.04-compile.yml +++ b/.github/workflows/ubuntu-18.04-compile.yml @@ -1,6 +1,6 @@ -name: C/C++ CI Ubuntu 18.04 Compile +name: Ubuntu 18.04 Compile -on: [push,workflow_dispatch] +on: [push,workflow_dispatch,workflow_call] jobs: build: @@ -9,7 +9,7 @@ jobs: steps: - name: Check out repository - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: submodules: 'recursive' @@ -32,9 +32,11 @@ jobs: run: | mkdir artifacts cp build/crypto/fift build/crypto/tlbc build/crypto/func build/crypto/create-state build/validator-engine-console/validator-engine-console build/tonlib/tonlib-cli build/tonlib/libtonlibjson.so.0.5 build/http/http-proxy build/rldp-http-proxy/rldp-http-proxy build/dht-server/dht-server build/lite-client/lite-client build/validator-engine/validator-engine build/utils/generate-random-id build/utils/json2tlo build/adnl/adnl-proxy artifacts + cp -R crypto/smartcont artifacts/ + cp -R crypto/fift/lib artifacts/ - name: Upload artifacts uses: actions/upload-artifact@master with: - name: ton-binaries + name: ton-ubuntu-binaries path: artifacts diff --git a/.github/workflows/ubuntu-18.04-ton-ccpcheck.yml b/.github/workflows/ubuntu-18.04-ton-ccpcheck.yml index e8d4548e..f440d7a5 100644 --- a/.github/workflows/ubuntu-18.04-ton-ccpcheck.yml +++ b/.github/workflows/ubuntu-18.04-ton-ccpcheck.yml @@ -1,8 +1,6 @@ -name: Ubuntu 18.04 TON ccpcheck +name: TON ccpcheck -on: - workflow_dispatch: - push: +on: [push,workflow_dispatch,workflow_call] jobs: build: @@ -11,7 +9,7 @@ jobs: steps: - name: Check out repository - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: submodules: 'recursive' diff --git a/.github/workflows/ubuntu-18.04-tonlib-java.yml b/.github/workflows/ubuntu-18.04-tonlib-java.yml deleted file mode 100644 index 8c746b26..00000000 --- a/.github/workflows/ubuntu-18.04-tonlib-java.yml +++ /dev/null @@ -1,50 +0,0 @@ -name: Ubuntu 18.04 tonlib-java - -on: - workflow_dispatch: - push: - branches: - - 'wallets' - -jobs: - build: - - runs-on: ubuntu-18.04 - - steps: - - name: Check out repository - uses: actions/checkout@v2 - with: - submodules: 'recursive' - - - name: Install libraries - run: | - sudo apt update - sudo apt install -y build-essential git make cmake clang libgflags-dev zlib1g-dev libssl-dev libreadline-dev libmicrohttpd-dev pkg-config libgsl-dev python3 python3-dev ninja-build - - - name: Configure & Build - run: | - export JAVA_AWT_LIBRARY=NotNeeded - export JAVA_JVM_LIBRARY=NotNeeded - export JAVA_INCLUDE_PATH=${JAVA_HOME}/include - export JAVA_AWT_INCLUDE_PATH=${JAVA_HOME}/include - export JAVA_INCLUDE_PATH2=${JAVA_HOME}/include/linux - - cd example/android/ - mkdir build - cd build - cmake -DTON_ONLY_TONLIB=ON .. - cmake --build . --target prepare_cross_compiling - cmake --build . --target native-lib - - - name: find & copy binaries - run: | - mkdir -p artifacts/tonlib-java - cp example/android/src/drinkless/org/ton/TonApi.java artifacts/tonlib-java/ - cp example/android/build/libnative-lib.so artifacts/tonlib-java/ - - - name: Upload artifacts - uses: actions/upload-artifact@master - with: - name: tonlib-ubuntu-java - path: artifacts \ No newline at end of file diff --git a/.github/workflows/ubuntu-compile.yml b/.github/workflows/ubuntu-compile.yml index 2862a471..653d7c0a 100644 --- a/.github/workflows/ubuntu-compile.yml +++ b/.github/workflows/ubuntu-compile.yml @@ -1,6 +1,6 @@ -name: Ubuntu Compile +name: Ubuntu Compile x86-64 -on: [push,workflow_dispatch] +on: [push,workflow_dispatch,workflow_call] jobs: build: @@ -12,7 +12,7 @@ jobs: steps: - name: Check out repository - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: submodules: 'recursive' @@ -29,10 +29,12 @@ jobs: cd build-${{ matrix.os }} cmake -GNinja -DCMAKE_BUILD_TYPE=Release .. ninja fift func tonlib tonlibjson tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork - - name: find & copy binaries + - name: Find & copy binaries run: | mkdir artifacts-${{ matrix.os }} cp build-${{ matrix.os }}/crypto/fift build-${{ matrix.os }}/crypto/tlbc build-${{ matrix.os }}/crypto/func build-${{ matrix.os }}/crypto/create-state build-${{ matrix.os }}/validator-engine-console/validator-engine-console build-${{ matrix.os }}/tonlib/tonlib-cli build-${{ matrix.os }}/tonlib/libtonlibjson.so.0.5 build-${{ matrix.os }}/http/http-proxy build-${{ matrix.os }}/rldp-http-proxy/rldp-http-proxy build-${{ matrix.os }}/dht-server/dht-server build-${{ matrix.os }}/lite-client/lite-client build-${{ matrix.os }}/validator-engine/validator-engine build-${{ matrix.os }}/utils/generate-random-id build-${{ matrix.os }}/utils/json2tlo build-${{ matrix.os }}/adnl/adnl-proxy artifacts-${{ matrix.os }} + cp -R crypto/smartcont artifacts-${{ matrix.os }} + cp -R crypto/fift/lib artifacts-${{ matrix.os }} - name: Upload artifacts uses: actions/upload-artifact@master with: diff --git a/.github/workflows/win-2019-compile.yml b/.github/workflows/win-2019-compile.yml new file mode 100644 index 00000000..a2ccab66 --- /dev/null +++ b/.github/workflows/win-2019-compile.yml @@ -0,0 +1,88 @@ +name: Windows Server 2019 x64 Compile + +on: [push,workflow_dispatch,workflow_call] + +defaults: + run: + shell: cmd + +jobs: + build: + + runs-on: windows-2019 + + steps: + - name: Get Current OS version + run: | + systeminfo | findstr /B /C:"OS Name" /C:"OS Version" + + - name: Check out current repository + uses: actions/checkout@v3 + with: + submodules: 'recursive' + + - name: Check out zlib repository + uses: actions/checkout@v3 + with: + repository: desktop-app/zlib + path: zlib + + - name: Setup msbuild.exe + uses: microsoft/setup-msbuild@v1.0.2 + + - name: Compile zlib Win64 + run: | + cd zlib\contrib\vstudio\vc14 + msbuild zlibstat.vcxproj /p:Configuration=ReleaseWithoutAsm /p:platform=x64 -p:PlatformToolset=v142 + + - name: Install pre-compiled OpenSSL Win64 + run: | + curl -Lo openssl-1.1.1o.zip https://github.com/neodiX42/precompiled-openssl-win64/raw/main/openssl-1.1.1o.zip + jar xf openssl-1.1.1o.zip + + - name: Install pre-compiled libmicrohttpd Win64 + run: | + curl -Lo libmicrohttpd-latest-w32-bin.zip https://ftpmirror.gnu.org/libmicrohttpd/libmicrohttpd-latest-w32-bin.zip + unzip libmicrohttpd-latest-w32-bin.zip + + - name: Install pre-compiled Readline Win64 + run: | + curl -Lo readline-5.0-1-lib.zip https://github.com/neodiX42/precompiled-openssl-win64/raw/main/readline-5.0-1-lib.zip + unzip readline-5.0-1-lib.zip + + - name: Compile + run: | + set root=%cd% + echo %root% + mkdir build + cd build + cmake -DREADLINE_INCLUDE_DIR=%root%\readline-5.0-1-lib\include\readline -DREADLINE_LIBRARY=%root%\readline-5.0-1-lib\lib\readline.lib -DZLIB_FOUND=1 -DMHD_FOUND=1 -DMHD_LIBRARY=%root%\libmicrohttpd-0.9.75-w32-bin\x86_64\VS2019\Release-static\libmicrohttpd.lib -DMHD_INCLUDE_DIR=%root%\libmicrohttpd-0.9.75-w32-bin\x86_64\VS2019\Release-static -DZLIB_INCLUDE_DIR=%root%\zlib -DZLIB_LIBRARY=%root%\zlib\contrib\vstudio\vc14\x64\ZlibStatReleaseWithoutAsm\zlibstat.lib -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=%root%/openssl-1.1/x64/include -DOPENSSL_CRYPTO_LIBRARY=%root%/openssl-1.1/x64/lib/libcrypto.lib -DCMAKE_CXX_FLAGS="/DTD_WINDOWS=1 /EHsc /bigobj /W0" .. + cmake --build . --target fift func tonlib tonlibjson tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork --config Release + + - name: Show executables + run: | + cd build + del Release\test-* + dir *.exe /a-D /S /B + dir *.dll /a-D /S /B + + - name: Check if validator-engine.exe exists + run: | + set root=%cd% + copy %root%\build\validator-engine\Release\validator-engine.exe test + + - name: Find & copy binaries + run: | + mkdir artifacts + mkdir artifacts\smartcont + mkdir artifacts\lib + + for %%I in (build\crypto\Release\fift.exe build\crypto\Release\tlbc.exe build\crypto\Release\func.exe build\crypto\Release\create-state.exe build\validator-engine-console\Release\validator-engine-console.exe build\tonlib\Release\tonlib-cli.exe build\tonlib\Release\tonlibjson.dll build\http\Release\http-proxy.exe build\rldp-http-proxy\Release\rldp-http-proxy.exe build\dht-server\Release\dht-server.exe build\lite-client\Release\lite-client.exe build\validator-engine\Release\validator-engine.exe build\utils\Release\generate-random-id.exe build\utils\Release\json2tlo.exe build\adnl\Release\adnl-proxy.exe) do copy %%I artifacts\ + xcopy /e /k /h /i crypto\smartcont artifacts\smartcont + xcopy /e /k /h /i crypto\fift\lib artifacts\lib + + - name: Upload artifacts + uses: actions/upload-artifact@master + with: + name: ton-win-binaries + path: artifacts diff --git a/.github/workflows/windows2019x64-compile.yml b/.github/workflows/windows2019x64-compile.yml deleted file mode 100644 index f1dce51c..00000000 --- a/.github/workflows/windows2019x64-compile.yml +++ /dev/null @@ -1,80 +0,0 @@ -name: Windows Server 2019 x64 Compile - -on: - workflow_dispatch: - push: - -defaults: - run: - shell: cmd - -jobs: - build: - - runs-on: windows-2019 - - steps: - - name: Get Current OS version - run: | - systeminfo | findstr /B /C:"OS Name" /C:"OS Version" - - - name: Check out current repository - uses: actions/checkout@v2 - with: - submodules: 'recursive' - - - name: Check out zlib repository - uses: actions/checkout@v2 - with: - repository: desktop-app/zlib - path: zlib - - - name: Setup msbuild.exe - uses: microsoft/setup-msbuild@v1.0.2 - - - name: Compile zlib Win64 - run: | - cd zlib\contrib\vstudio\vc14 - msbuild zlibstat.vcxproj /p:Configuration=ReleaseWithoutAsm /p:platform=x64 -p:PlatformToolset=v142 - - - name: Install pre-compiled OpenSSL Win64 - run: | - curl -Lo openssl-1.1.1o.zip https://github.com/neodiX42/precompiled-openssl-win64/raw/main/openssl-1.1.1o.zip - jar xf openssl-1.1.1o.zip - - - name: Configure - run: | - set root=%cd% - echo %root% - mkdir build - cd build - cmake -DZLIB_FOUND=1 -DZLIB_INCLUDE_DIR=%root%\zlib -DZLIB_LIBRARY=%root%\zlib\contrib\vstudio\vc14\x64\ZlibStatReleaseWithoutAsm\zlibstat.lib -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=%root%/openssl-1.1/x64/include -DOPENSSL_CRYPTO_LIBRARY=%root%/openssl-1.1/x64/lib/libcrypto.lib -DCMAKE_CXX_FLAGS="/DTD_WINDOWS=1 /EHsc /bigobj /W0" .. - - - name: Build - run: | - cd build - cmake --build . --target fift func tonlib tonlibjson tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork --config Release - - - name: Show executables - run: | - cd build - del Release\test-* - dir *.exe /a-D /S /B - dir *.dll /a-D /S /B - - - name: Check if validator-engine.exe exists - run: | - set root=%cd% - copy %root%\build\validator-engine\Release\validator-engine.exe test - - - name: Find & copy binaries - run: | - mkdir artifacts - for /f %%a in ('dir *.exe /b /a /s') do copy /Y %%a artifacts - copy build\tonlib\Release\tonlibjson.dll artifacts - - - name: Upload artifacts - uses: actions/upload-artifact@master - with: - name: ton-win64-binaries - path: artifacts diff --git a/.github/workflows/windows2019x64-tonlib-java.yml b/.github/workflows/windows2019x64-tonlib-java.yml deleted file mode 100644 index f03212b8..00000000 --- a/.github/workflows/windows2019x64-tonlib-java.yml +++ /dev/null @@ -1,71 +0,0 @@ -name: Windows 2019 tonlib-java - -on: - workflow_dispatch: - push: - branches: - - 'wallets' - -defaults: - run: - shell: cmd - -jobs: - build: - - runs-on: windows-2019 - - steps: - - name: Get Current OS version - run: | - systeminfo | findstr /B /C:"OS Name" /C:"OS Version" - - name: Check out current repository - uses: actions/checkout@v2 - with: - submodules: 'recursive' - - - name: Check out zlib repository - uses: actions/checkout@v2 - with: - repository: desktop-app/zlib - path: zlib - - - name: Setup msbuild.exe - uses: microsoft/setup-msbuild@v1.0.2 - - - name: Compile zlib Win64 - run: | - cd zlib\contrib\vstudio\vc14 - msbuild zlibstat.vcxproj /p:Configuration=ReleaseWithoutAsm /p:platform=x64 -p:PlatformToolset=v142 - - - name: Install precompiled OpenSSL Win64 - run: | - curl -Lo openssl-1.1.1o.zip https://github.com/neodiX42/precompiled-openssl-win64/raw/main/openssl-1.1.1o.zip - jar xf openssl-1.1.1o.zip - - - name: Configure & Build - run: | - set JAVA_AWT_LIBRARY=NotNeeded - set JAVA_JVM_LIBRARY=NotNeeded - set JAVA_INCLUDE_PATH=${JAVA_HOME}/include - set JAVA_AWT_INCLUDE_PATH=${JAVA_HOME}/include - set JAVA_INCLUDE_PATH2=${JAVA_HOME}/include/win32 - - set root=%cd% - echo %root% - cd example/android - mkdir build - cd build - cmake -DZLIB_FOUND=1 -DZLIB_INCLUDE_DIR=%root%\zlib -DZLIB_LIBRARY=%root%\zlib\contrib\vstudio\vc14\x64\ZlibStatReleaseWithoutAsm\zlibstat.lib -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=%root%/openssl-1.1/x64/include -DOPENSSL_CRYPTO_LIBRARY=%root%/openssl-1.1/x64/lib/libcrypto.lib -DTON_ONLY_TONLIB=ON -DCMAKE_CXX_FLAGS="/DTD_WINDOWS=1 /EHsc /bigobj /W0" .. - cmake --build . --target native-lib --config Release - - - name: Find & copy binaries - run: | - mkdir tonlib-java - cp example/android/build/Release/native-lib.dll tonlib-java/ - - - name: Upload artifacts - uses: actions/upload-artifact@master - with: - name: tonlib-win64-java - path: tonlib-java diff --git a/CMakeLists.txt b/CMakeLists.txt index 75a89c41..1e52f969 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -290,8 +290,9 @@ if (MSVC) endif() if (NOT MSVC) add_cxx_compiler_flag("-Wall") + add_cxx_compiler_flag("-Wextra") endif() -add_cxx_compiler_flag("-Wextra") + add_cxx_compiler_flag("-Wimplicit-fallthrough=2") add_cxx_compiler_flag("-Wpointer-arith") add_cxx_compiler_flag("-Wcast-qual") diff --git a/Changelog.md b/Changelog.md index b4749780..a5dc5402 100644 --- a/Changelog.md +++ b/Changelog.md @@ -26,3 +26,15 @@ Besides the work of the core team, this update is based on the efforts of @tvoro Update coincided with persistent state serialization event which lead to block production speed deterioration (issue substantially mitigated in update itself). This phenomena was aggravated by the fact that after update some validators lost ability to participate in block creation. The last was caused by threshold based hardcoded protocol version bump, where threshold was set in such manner (based on block height with value higher than 9m), that it eluded detection in private net tests. The update was temporarily paused and resumed after persistent state serialization ended and issues with block creation were resolved. Besides the work of the core team, this update is based on the efforts of @awesome-doge (help with abseil-cpp upgrade), @rec00rsiff (noted issues for exotic endianess and implemented network stats) and third-party security auditors. + +## 10.2022 Update +* Added extended block creation and general perfomance stats gathering +* Forbidden report data on blocks not committed to the master chain for LS +* Improved debug in TVM +* FunC 0.3.0: multi-line asms, bitwise operations for constants, duplication of identical definition for constants and asms now allowed +* New tonlib methods: sendMessageReturnHash, getTransactionsV2, getMasterchainBlockSignatures, getShardBlockProof, getLibraries. +* Fixed bugs related to invalid TVM output (c4, c5, libaries) and non-validated network data; avoided too deep recursion in libraries loading +* Fixed multiple undefined behavior issues +* Added build of FunC and Fift to WASM + +Besides the work of the core team, this update is based on the efforts of @tvorogme (debug improvements), @AlexeyFSL (WASM builds) and third-party security auditors. diff --git a/README.md b/README.md index 177bc3ee..893717ba 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,12 @@ +
+ + + TON logo + +
+
+ +[![TON Overflow Group][ton-overflow-badge]][ton-overflow-url] [![Stack Overflow Group][stack-overflow-badge]][stack-overflow-url] [![Telegram Foundation Group][telegram-foundation-badge]][telegram-foundation-url] [![Telegram Community Group][telegram-community-badge]][telegram-community-url] @@ -11,11 +20,11 @@ [twitter-url]: https://twitter.com/ton_blockchain [stack-overflow-badge]: https://img.shields.io/badge/-Stack%20Overflow-FE7A16?style=flat&logo=stack-overflow&logoColor=white [stack-overflow-url]: https://stackoverflow.com/questions/tagged/ton +[ton-overflow-badge]: https://img.shields.io/badge/-TON%20Overflow-FE7A16?style=flat&logo=stack-overflow&logoColor=white +[ton-overflow-url]: https://answers.ton.org -# TON - Main TON monorepo, which includes the code of the node/validator, lite-client, tonlib, FunC compiler, etc. ## Updates flow: diff --git a/adnl/adnl-address-list.cpp b/adnl/adnl-address-list.cpp index 7bd36374..912943a8 100644 --- a/adnl/adnl-address-list.cpp +++ b/adnl/adnl-address-list.cpp @@ -185,7 +185,8 @@ td::Ref AdnlAddressImpl::create(const tl_object_ptr(addr.get()), td::overloaded([&](const ton_api::adnl_address_udp &obj) { res = td::make_ref(obj); }, [&](const ton_api::adnl_address_udp6 &obj) { res = td::make_ref(obj); }, - [&](const ton_api::adnl_address_tunnel &obj) { res = td::make_ref(obj); })); + [&](const ton_api::adnl_address_tunnel &obj) { res = td::make_ref(obj); }, + [&](const ton_api::adnl_address_reverse &obj) { res = td::make_ref(); })); return res; } @@ -202,7 +203,12 @@ AdnlAddressList::AdnlAddressList(const tl_object_ptr version_ = static_cast(addrs->version_); std::vector> vec; for (auto &addr : addrs->addrs_) { - vec.push_back(AdnlAddressImpl::create(addr)); + auto obj = AdnlAddressImpl::create(addr); + if (obj->is_reverse()) { + has_reverse_ = true; + } else { + vec.push_back(std::move(obj)); + } } addrs_ = std::move(vec); reinit_date_ = addrs->reinit_date_; @@ -215,6 +221,9 @@ tl_object_ptr AdnlAddressList::tl() const { for (auto &v : addrs_) { addrs.emplace_back(v->tl()); } + if (has_reverse_) { + addrs.push_back(create_tl_object()); + } return create_tl_object(std::move(addrs), version_, reinit_date_, priority_, expire_at_); } diff --git a/adnl/adnl-address-list.h b/adnl/adnl-address-list.h index 80d7f96b..ebc7473a 100644 --- a/adnl/adnl-address-list.h +++ b/adnl/adnl-address-list.h @@ -39,6 +39,9 @@ class AdnlAddressImpl : public td::CntObject { virtual td::actor::ActorOwn create_connection( td::actor::ActorId network_manager, td::actor::ActorId adnl, std::unique_ptr callback) const = 0; + virtual bool is_reverse() const { + return false; + } static td::Ref create(const tl_object_ptr &addr); }; @@ -54,6 +57,7 @@ class AdnlAddressList { td::int32 priority_; td::int32 expire_at_; std::vector addrs_; + bool has_reverse_{false}; public: static constexpr td::uint32 max_serialized_size() { @@ -102,6 +106,13 @@ class AdnlAddressList { static td::Result create(const tl_object_ptr &addr_list); td::Status add_udp_address(td::IPAddress addr); + + void set_reverse(bool x = true) { + has_reverse_ = x; + } + bool has_reverse() const { + return has_reverse_; + } }; } // namespace adnl diff --git a/adnl/adnl-address-list.hpp b/adnl/adnl-address-list.hpp index efc226d2..0c869378 100644 --- a/adnl/adnl-address-list.hpp +++ b/adnl/adnl-address-list.hpp @@ -116,6 +116,31 @@ class AdnlAddressTunnel : public AdnlAddressImpl { std::unique_ptr callback) const override; }; +class AdnlAddressReverse : public AdnlAddressImpl { + public: + AdnlAddressReverse *make_copy() const override { + return new AdnlAddressReverse(); + } + bool is_public() const override { + return true; + } + td::uint32 serialized_size() const override { + return 4; + } + tl_object_ptr tl() const override { + return create_tl_object(); + } + td::actor::ActorOwn create_connection( + td::actor::ActorId network_manager, td::actor::ActorId adnl, + std::unique_ptr callback) const override { + LOG(ERROR) << "Cannot create connection for AdnlAddressReverse"; + return {}; + } + bool is_reverse() const override { + return true; + } +}; + } // namespace adnl } // namespace ton diff --git a/adnl/adnl-local-id.cpp b/adnl/adnl-local-id.cpp index 3461efe1..b4818276 100644 --- a/adnl/adnl-local-id.cpp +++ b/adnl/adnl-local-id.cpp @@ -121,7 +121,7 @@ void AdnlLocalId::update_address_list(AdnlAddressList addr_list) { } void AdnlLocalId::publish_address_list() { - if (dht_node_.empty() || addr_list_.empty() || addr_list_.size() == 0) { + if (dht_node_.empty() || addr_list_.empty() || (addr_list_.size() == 0 && !addr_list_.has_reverse())) { VLOG(ADNL_NOTICE) << this << ": skipping public addr list, because localid (or dht node) not fully initialized"; return; } @@ -175,6 +175,17 @@ void AdnlLocalId::publish_address_list() { td::actor::send_closure(keyring_, &keyring::Keyring::sign_message, short_id_.pubkey_hash(), std::move(B), std::move(P)); + + if (addr_list_.has_reverse()) { + td::actor::send_closure( + dht_node_, &dht::Dht::register_reverse_connection, id_, [print_id = print_id()](td::Result R) { + if (R.is_error()) { + VLOG(ADNL_NOTICE) << print_id << ": failed to register reverse connection in DHT: " << R.move_as_error(); + } else { + VLOG(ADNL_INFO) << print_id << ": registered reverse connection"; + } + }); + } } AdnlLocalId::AdnlLocalId(AdnlNodeIdFull id, AdnlAddressList addr_list, td::uint32 mode, diff --git a/adnl/adnl-peer.cpp b/adnl/adnl-peer.cpp index 46a097d9..528dd78f 100644 --- a/adnl/adnl-peer.cpp +++ b/adnl/adnl-peer.cpp @@ -113,6 +113,7 @@ void AdnlPeerPairImpl::discover() { } void AdnlPeerPairImpl::receive_packet_checked(AdnlPacket packet) { + request_reverse_ping_after_ = td::Timestamp::in(15.0); auto d = Adnl::adnl_start_time(); if (packet.dst_reinit_date() > d) { VLOG(ADNL_WARNING) << this << ": dropping IN message: too new our reinit date " << packet.dst_reinit_date(); @@ -669,10 +670,15 @@ td::Result, bool>> AdnlPeerP } if (conns_.size() == 0 && priority_conns_.size() == 0) { - return td::Status::Error(ErrorCode::notready, PSTRING() - << "empty network information: version=" << addr_list_.version() - << " reinit_date=" << addr_list_.reinit_date() - << " real_reinit_date=" << reinit_date_); + if (has_reverse_addr_) { + request_reverse_ping(); + return td::Status::Error(ErrorCode::notready, "waiting for reverse ping"); + } else { + return td::Status::Error(ErrorCode::notready, PSTRING() + << "empty network information: version=" << addr_list_.version() + << " reinit_date=" << addr_list_.reinit_date() + << " real_reinit_date=" << reinit_date_); + } } for (auto &conn : priority_conns_) { @@ -720,11 +726,18 @@ void AdnlPeerPairImpl::update_addr_list(AdnlAddressList addr_list) { VLOG(ADNL_INFO) << this << ": updating addr list to version " << addr_list.version() << " size=" << addr_list.size(); const auto addrs = addr_list.addrs(); + has_reverse_addr_ = addr_list.has_reverse(); + if (has_reverse_addr_ && addrs.empty()) { + return; + } std::vector conns; auto &old_conns = priority ? priority_conns_ : conns_; size_t idx = 0; for (const auto &addr : addrs) { + if (addr->is_reverse()) { + continue; + } if ((mode_ & static_cast(AdnlLocalIdMode::direct_only)) && !addr->is_public()) { continue; } @@ -746,7 +759,7 @@ void AdnlPeerPairImpl::get_conn_ip_str(td::Promise promise) { promise.set_value("undefined"); return; } - + for (auto &conn : priority_conns_) { if (conn.ready()) { td::actor::send_closure(conn.conn, &AdnlNetworkConnection::get_ip_str, std::move(promise)); @@ -759,7 +772,7 @@ void AdnlPeerPairImpl::get_conn_ip_str(td::Promise promise) { return; } } - + promise.set_value("undefined"); } @@ -827,7 +840,7 @@ void AdnlPeerImpl::receive_packet(AdnlNodeIdShort dst, td::uint32 dst_mode, td:: } } - td::actor::send_closure(it->second.get(), &AdnlPeerPair::receive_packet_checked, std::move(packet)); + td::actor::send_closure(it->second.get(), &AdnlPeerPair::receive_packet, std::move(packet)); } void AdnlPeerImpl::send_messages(AdnlNodeIdShort src, td::uint32 src_mode, td::actor::ActorId src_actor, @@ -884,7 +897,7 @@ void AdnlPeerImpl::get_conn_ip_str(AdnlNodeIdShort l_id, td::Promise if (it == peer_pairs_.end()) { promise.set_value("undefined"); return; - } + } td::actor::send_closure(it->second, &AdnlPeerPair::get_conn_ip_str, std::move(promise)); } @@ -960,6 +973,36 @@ void AdnlPeerPairImpl::update_peer_id(AdnlNodeIdFull id) { CHECK(!peer_id_.empty()); } +void AdnlPeerPairImpl::request_reverse_ping() { + if (request_reverse_ping_active_ || !request_reverse_ping_after_.is_in_past()) { + return; + } + VLOG(ADNL_INFO) << this << ": requesting reverse ping"; + request_reverse_ping_after_ = td::Timestamp::in(15.0); + request_reverse_ping_active_ = true; + td::actor::send_closure( + local_actor_, &AdnlLocalId::get_self_node, + [SelfId = actor_id(this), peer = peer_id_short_, dht = dht_node_](td::Result R) { + if (R.is_error()) { + td::actor::send_closure(SelfId, &AdnlPeerPairImpl::request_reverse_ping_result, R.move_as_error()); + return; + } + td::actor::send_closure( + dht, &dht::Dht::request_reverse_ping, R.move_as_ok(), peer, [SelfId](td::Result R) { + td::actor::send_closure(SelfId, &AdnlPeerPairImpl::request_reverse_ping_result, std::move(R)); + }); + }); +} + +void AdnlPeerPairImpl::request_reverse_ping_result(td::Result R) { + request_reverse_ping_active_ = false; + if (R.is_ok()) { + VLOG(ADNL_INFO) << this << ": reverse ping requested"; + } else { + VLOG(ADNL_INFO) << this << ": failed to request reverse ping: " << R.move_as_error(); + } +} + } // namespace adnl } // namespace ton diff --git a/adnl/adnl-peer.hpp b/adnl/adnl-peer.hpp index ec7166ed..4e1d8db7 100644 --- a/adnl/adnl-peer.hpp +++ b/adnl/adnl-peer.hpp @@ -154,6 +154,9 @@ class AdnlPeerPairImpl : public AdnlPeerPair { } } + void request_reverse_ping(); + void request_reverse_ping_result(td::Result R); + struct Conn { class ConnCallback : public AdnlNetworkConnection::Callback { public: @@ -251,6 +254,10 @@ class AdnlPeerPairImpl : public AdnlPeerPair { td::Timestamp next_dht_query_at_ = td::Timestamp::never(); td::Timestamp next_db_update_at_ = td::Timestamp::never(); td::Timestamp retry_send_at_ = td::Timestamp::never(); + + bool has_reverse_addr_ = false; + td::Timestamp request_reverse_ping_after_ = td::Timestamp::now(); + bool request_reverse_ping_active_ = false; }; class AdnlPeerImpl : public AdnlPeer { diff --git a/crypto/CMakeLists.txt b/crypto/CMakeLists.txt index c8c85370..fab75bfc 100644 --- a/crypto/CMakeLists.txt +++ b/crypto/CMakeLists.txt @@ -275,7 +275,12 @@ target_include_directories(ton_crypto PUBLIC $) target_link_libraries(ton_crypto PUBLIC ${OPENSSL_CRYPTO_LIBRARY} tdutils tddb_utils) if (NOT WIN32) - target_link_libraries(ton_crypto PUBLIC dl z) + find_library(DL dl) + if (DL) + target_link_libraries(ton_crypto PUBLIC dl z) + else() + target_link_libraries(ton_crypto PUBLIC z) + endif() endif() target_include_directories(ton_crypto SYSTEM PUBLIC $) diff --git a/crypto/block/block.tlb b/crypto/block/block.tlb index c1bd9c66..40ab4dee 100644 --- a/crypto/block/block.tlb +++ b/crypto/block/block.tlb @@ -113,7 +113,10 @@ var_uint$_ {n:#} len:(#< n) value:(uint (len * 8)) = VarUInteger n; var_int$_ {n:#} len:(#< n) value:(int (len * 8)) = VarInteger n; -nanograms$_ amount:(VarUInteger 16) = Grams; +nanograms$_ amount:(VarUInteger 16) = Grams; + +_ grams:Grams = Coins; + // extra_currencies$_ dict:(HashmapE 32 (VarUInteger 32)) = ExtraCurrencyCollection; @@ -611,15 +614,29 @@ wfmt_ext#0 min_addr_len:(## 12) max_addr_len:(## 12) addr_len_step:(## 12) workchain_type_id:(## 32) { workchain_type_id >= 1 } = WorkchainFormat 0; -workchain#a6 enabled_since:uint32 actual_min_split:(## 8) - min_split:(## 8) max_split:(## 8) { actual_min_split <= min_split } +wc_split_merge_timings#0 + split_merge_delay:uint32 split_merge_interval:uint32 + min_split_merge_interval:uint32 max_split_merge_delay:uint32 + = WcSplitMergeTimings; + //workchain#a5 enabled_since:uint32 min_split:(## 8) max_split:(## 8) // { min_split <= max_split } { max_split <= 60 } + +workchain#a6 enabled_since:uint32 actual_min_split:(## 8) + min_split:(## 8) max_split:(## 8) { actual_min_split <= min_split } basic:(## 1) active:Bool accept_msgs:Bool flags:(## 13) { flags = 0 } zerostate_root_hash:bits256 zerostate_file_hash:bits256 version:uint32 format:(WorkchainFormat basic) = WorkchainDescr; +workchain_v2#a7 enabled_since:uint32 actual_min_split:(## 8) + min_split:(## 8) max_split:(## 8) { actual_min_split <= min_split } + basic:(## 1) active:Bool accept_msgs:Bool flags:(## 13) { flags = 0 } + zerostate_root_hash:bits256 zerostate_file_hash:bits256 + version:uint32 format:(WorkchainFormat basic) + split_merge_timings:WcSplitMergeTimings + = WorkchainDescr; + _ workchains:(HashmapE 32 WorkchainDescr) = ConfigParam 12; complaint_prices#1a deposit:Grams bit_price:Grams cell_price:Grams = ComplaintPricing; @@ -739,11 +756,32 @@ collator_info#0 full_node_id:(Maybe uint256) = CollatorInfo; colator_config#a0 full_collated_data:Bool collator_nodes:(HashmapE 352 CollatorInfo) = CollatorConfig; _ CollatorConfig = ConfigParam 41; +size_limits_config#01 max_msg_bits:uint32 max_msg_cells:uint32 max_library_cells:uint32 max_vm_data_depth:uint16 + max_ext_msg_size:uint32 max_ext_msg_depth:uint16 = SizeLimitsConfig; +size_limits_config_v2#02 max_msg_bits:uint32 max_msg_cells:uint32 max_library_cells:uint32 max_vm_data_depth:uint16 + max_ext_msg_size:uint32 max_ext_msg_depth:uint16 max_acc_state_cells:uint32 max_acc_state_bits:uint32 = SizeLimitsConfig; +_ SizeLimitsConfig = ConfigParam 43; + oracle_bridge_params#_ bridge_address:bits256 oracle_mutlisig_address:bits256 oracles:(HashmapE 256 uint256) external_chain_address:bits256 = OracleBridgeParams; _ OracleBridgeParams = ConfigParam 71; // Ethereum bridge _ OracleBridgeParams = ConfigParam 72; // Binance Smart Chain bridge _ OracleBridgeParams = ConfigParam 73; // Polygon bridge +// Note that chains in which bridge, minter and jetton-wallet operate are fixated +jetton_bridge_prices#_ bridge_burn_fee:Coins bridge_mint_fee:Coins + wallet_min_tons_for_storage:Coins + wallet_gas_consumption:Coins + minter_min_tons_for_storage:Coins + discover_gas_consumption:Coins = JettonBridgePrices; + +jetton_bridge_params_v0#00 bridge_address:bits256 oracles_address:bits256 oracles:(HashmapE 256 uint256) state_flags:uint8 burn_bridge_fee:Coins = JettonBridgeParams; +jetton_bridge_params_v1#01 bridge_address:bits256 oracles_address:bits256 oracles:(HashmapE 256 uint256) state_flags:uint8 prices:^JettonBridgePrices external_chain_address:bits256 = JettonBridgeParams; + +_ JettonBridgeParams = ConfigParam 79; // ETH->TON token bridge +_ JettonBridgeParams = ConfigParam 80; // BNB->TON token bridge +_ JettonBridgeParams = ConfigParam 81; // Polygon->TON token bridge + + // // PROOFS // @@ -870,4 +908,3 @@ chan_op_cmd#912838d1 msg:ChanSignedMsg = ChanOp; chan_data$_ config:^ChanConfig state:^ChanState = ChanData; - diff --git a/crypto/block/mc-config.cpp b/crypto/block/mc-config.cpp index 6b3e51ff..c8b9a470 100644 --- a/crypto/block/mc-config.cpp +++ b/crypto/block/mc-config.cpp @@ -1913,6 +1913,38 @@ std::vector Config::compute_total_validator_set(int next) c return res.move_as_ok()->export_validator_set(); } +td::Result Config::get_size_limits_config() const { + SizeLimitsConfig limits; + td::Ref param = get_config_param(43); + if (param.is_null()) { + return limits; + } + auto unpack_v1 = [&](auto& rec) { + limits.max_msg_bits = rec.max_msg_bits; + limits.max_msg_cells = rec.max_msg_cells; + limits.max_library_cells = rec.max_library_cells; + limits.max_vm_data_depth = static_cast(rec.max_vm_data_depth); + limits.ext_msg_limits.max_size = rec.max_ext_msg_size; + limits.ext_msg_limits.max_depth = static_cast(rec.max_ext_msg_depth); + }; + + auto unpack_v2 = [&](auto& rec) { + unpack_v1(rec); + limits.max_acc_state_bits = rec.max_acc_state_bits; + limits.max_acc_state_cells = rec.max_acc_state_cells; + }; + gen::SizeLimitsConfig::Record_size_limits_config rec_v1; + gen::SizeLimitsConfig::Record_size_limits_config_v2 rec_v2; + if (tlb::unpack_cell(param, rec_v1)) { + unpack_v1(rec_v1); + } else if (tlb::unpack_cell(param, rec_v2)) { + unpack_v2(rec_v2); + } else { + return td::Status::Error("configuration parameter 43 is invalid"); + } + return limits; +} + td::Result> Config::unpack_validator_set_start_stop(Ref vset_root) { if (vset_root.is_null()) { return td::Status::Error("validator set absent"); @@ -1942,31 +1974,58 @@ bool WorkchainInfo::unpack(ton::WorkchainId wc, vm::CellSlice& cs) { if (wc == ton::workchainInvalid) { return false; } - block::gen::WorkchainDescr::Record info; - if (!tlb::unpack(cs, info)) { - return false; - } - enabled_since = info.enabled_since; - actual_min_split = info.actual_min_split; - min_split = info.min_split; - max_split = info.max_split; - basic = info.basic; - active = info.active; - accept_msgs = info.accept_msgs; - flags = info.flags; - zerostate_root_hash = info.zerostate_root_hash; - zerostate_file_hash = info.zerostate_file_hash; - version = info.version; - if (basic) { - min_addr_len = max_addr_len = addr_len_step = 256; - } else { - block::gen::WorkchainFormat::Record_wfmt_ext ext; - if (!tlb::type_unpack(cs, block::gen::WorkchainFormat{basic}, ext)) { + auto unpack_v1 = [this](auto& info) { + enabled_since = info.enabled_since; + actual_min_split = info.actual_min_split; + min_split = info.min_split; + max_split = info.max_split; + basic = info.basic; + active = info.active; + accept_msgs = info.accept_msgs; + flags = info.flags; + zerostate_root_hash = info.zerostate_root_hash; + zerostate_file_hash = info.zerostate_file_hash; + version = info.version; + if (basic) { + min_addr_len = max_addr_len = addr_len_step = 256; + } else { + block::gen::WorkchainFormat::Record_wfmt_ext ext; + if (!tlb::csr_type_unpack(info.format, block::gen::WorkchainFormat{basic}, ext)) { + return false; + } + min_addr_len = ext.min_addr_len; + max_addr_len = ext.max_addr_len; + addr_len_step = ext.addr_len_step; + } + return true; + }; + auto unpack_v2 = [&, this](auto& info) { + if (!unpack_v1(info)) { return false; } - min_addr_len = ext.min_addr_len; - max_addr_len = ext.max_addr_len; - addr_len_step = ext.addr_len_step; + block::gen::WcSplitMergeTimings::Record rec; + if (!tlb::csr_unpack(info.split_merge_timings, rec)) { + return false; + } + split_merge_delay = rec.split_merge_delay; + split_merge_interval = rec.split_merge_interval; + min_split_merge_interval = rec.min_split_merge_interval; + max_split_merge_delay = rec.max_split_merge_delay; + return true; + }; + block::gen::WorkchainDescr::Record_workchain info_v1; + block::gen::WorkchainDescr::Record_workchain_v2 info_v2; + vm::CellSlice cs0 = cs; + if (tlb::unpack(cs, info_v1)) { + if (!unpack_v1(info_v1)) { + return false; + } + } else if (tlb::unpack(cs = cs0, info_v2)) { + if (!unpack_v2(info_v2)) { + return false; + } + } else { + return false; } workchain = wc; LOG(DEBUG) << "unpacked info for workchain " << wc << ": basic=" << basic << ", active=" << active diff --git a/crypto/block/mc-config.h b/crypto/block/mc-config.h index e00a045e..29467ae8 100644 --- a/crypto/block/mc-config.h +++ b/crypto/block/mc-config.h @@ -376,6 +376,21 @@ struct MsgPrices { td::RefInt256 get_next_part(td::RefInt256 total) const; }; +struct SizeLimitsConfig { + // Default values are used when not present in global config + struct ExtMsgLimits { + td::uint32 max_size = 65535; + td::uint16 max_depth = 512; + }; + td::uint32 max_msg_bits = 1 << 21; + td::uint32 max_msg_cells = 1 << 13; + td::uint32 max_library_cells = 1000; + td::uint16 max_vm_data_depth = 512; + ExtMsgLimits ext_msg_limits; + td::uint32 max_acc_state_cells = 1 << 16; + td::uint32 max_acc_state_bits = (1 << 16) * 1023; +}; + struct CatchainValidatorsConfig { td::uint32 mc_cc_lifetime, shard_cc_lifetime, shard_val_lifetime, shard_val_num; bool shuffle_mc_val; @@ -402,6 +417,13 @@ struct WorkchainInfo : public td::CntObject { ton::RootHash zerostate_root_hash; ton::FileHash zerostate_file_hash; int min_addr_len, max_addr_len, addr_len_step; + + // Default values are used when split_merge_timings is not set in config + unsigned split_merge_delay = 100; // prepare (delay) split/merge for 100 seconds + unsigned split_merge_interval = 100; // split/merge is enabled during 60 second interval + unsigned min_split_merge_interval = 30; // split/merge interval must be at least 30 seconds + unsigned max_split_merge_delay = 1000; // end of split/merge interval must be at most 1000 seconds in the future + bool is_valid() const { return workchain != ton::workchainInvalid; } @@ -605,6 +627,7 @@ class Config { ton::CatchainSeqno cc_seqno) const; std::vector compute_total_validator_set(int next) const; CollatorConfig get_collator_config(bool need_collator_nodes) const; + td::Result get_size_limits_config() const; static std::vector do_compute_validator_set(const block::CatchainValidatorsConfig& ccv_conf, ton::ShardIdFull shard, const block::ValidatorSet& vset, ton::UnixTime time, diff --git a/crypto/block/transaction.cpp b/crypto/block/transaction.cpp index 3b2334d6..293677ca 100644 --- a/crypto/block/transaction.cpp +++ b/crypto/block/transaction.cpp @@ -24,6 +24,7 @@ #include "td/utils/uint128.h" #include "ton/ton-shard.h" #include "vm/vm.h" +#include "td/utils/Timer.h" namespace { class StringLoggerTail : public td::LogInterface { @@ -345,7 +346,7 @@ bool Account::unpack(Ref shard_account, Ref extra, block::gen::AccountStorage::Record storage; if (!(tlb::unpack_exact(acc_cs, acc) && (my_addr = acc.addr).not_null() && unpack_address(acc.addr.write()) && compute_my_addr() && unpack_storage_info(acc.storage_stat.write()) && - tlb::csr_unpack(std::move(acc.storage), storage) && + tlb::csr_unpack(this->storage = std::move(acc.storage), storage) && std::max(storage.last_trans_lt, 1ULL) > acc_info.last_trans_lt && balance.unpack(std::move(storage.balance)))) { return false; } @@ -459,7 +460,6 @@ bool Account::deactivate() { return true; } - bool Account::belongs_to_shard(ton::ShardIdFull shard) const { return workchain == shard.workchain && ton::shard_is_ancestor(shard.shard, addr); } @@ -593,7 +593,7 @@ bool Transaction::unpack_input_msg(bool ihr_delivered, const ActionPhaseConfig* sstat.bits -= cs.size(); // bits in the root cells are free sstat.cells--; // the root cell itself is not counted as a cell LOG(DEBUG) << "storage paid for a message: " << sstat.cells << " cells, " << sstat.bits << " bits"; - if (sstat.bits > max_msg_bits || sstat.cells > max_msg_cells) { + if (sstat.bits > cfg->size_limits.max_msg_bits || sstat.cells > cfg->size_limits.max_msg_cells) { LOG(DEBUG) << "inbound external message too large, invalid"; return false; } @@ -1043,12 +1043,15 @@ bool Transaction::prepare_compute_phase(const ComputePhaseConfig& cfg) { vm_log.log_options = td::LogOptions(VERBOSITY_NAME(DEBUG), true, false); } vm::VmState vm{new_code, std::move(stack), gas, 1, new_data, vm_log, compute_vm_libraries(cfg)}; + vm.set_max_data_depth(cfg.max_vm_data_depth); vm.set_c7(prepare_vm_c7(cfg)); // tuple with SmartContractInfo // vm.incr_stack_trace(1); // enable stack dump after each step LOG(DEBUG) << "starting VM"; cp.vm_init_state_hash = vm.get_state_hash(); + td::Timer timer; cp.exit_code = ~vm.run(); + double elapsed = timer.elapsed(); LOG(DEBUG) << "VM terminated with exit code " << cp.exit_code; cp.out_of_gas = (cp.exit_code == ~(int)vm::Excno::out_of_gas); cp.vm_final_state_hash = vm.get_final_state_hash(cp.exit_code); @@ -1064,7 +1067,8 @@ bool Transaction::prepare_compute_phase(const ComputePhaseConfig& cfg) { } LOG(INFO) << "steps: " << vm.get_steps_count() << " gas: used=" << gas.gas_consumed() << ", max=" << gas.gas_max << ", limit=" << gas.gas_limit << ", credit=" << gas.gas_credit; - LOG(INFO) << "out_of_gas=" << cp.out_of_gas << ", accepted=" << cp.accepted << ", success=" << cp.success; + LOG(INFO) << "out_of_gas=" << cp.out_of_gas << ", accepted=" << cp.accepted << ", success=" << cp.success + << ", time=" << elapsed << "s"; if (logger != nullptr) { cp.vm_log = logger->get_log(); } @@ -1121,6 +1125,25 @@ bool Transaction::prepare_action_phase(const ActionPhaseConfig& cfg) { ap.total_action_fees = td::zero_refint(); ap.reserved_balance.set_zero(); + td::Ref old_code = new_code, old_data = new_data, old_library = new_library; + auto enforce_state_size_limits = [&]() { + if (account.is_special) { + return true; + } + if (!check_state_size_limit(cfg)) { + // Rollback changes to state, fail action phase + LOG(INFO) << "Account state size exceeded limits"; + new_storage_stat.clear(); + new_code = old_code; + new_data = old_data; + new_library = old_library; + ap.result_code = 50; + ap.state_size_too_big = true; + return false; + } + return true; + }; + int n = 0; while (true) { ap.action_list.push_back(list); @@ -1196,9 +1219,21 @@ bool Transaction::prepare_action_phase(const ActionPhaseConfig& cfg) { ap.no_funds = true; } LOG(DEBUG) << "invalid action " << ap.result_arg << " in action list: error code " << ap.result_code; + // This is reuqired here because changes to libraries are applied even if actipn phase fails + enforce_state_size_limits(); return true; } } + + end_lt = ap.end_lt; + if (ap.new_code.not_null()) { + new_code = ap.new_code; + } + new_data = compute_phase->new_data; // tentative persistent data update applied + if (!enforce_state_size_limits()) { + return true; + } + ap.result_arg = 0; ap.result_code = 0; CHECK(ap.remaining_balance.grams->sgn() >= 0); @@ -1212,12 +1247,7 @@ bool Transaction::prepare_action_phase(const ActionPhaseConfig& cfg) { was_deleted = true; } ap.success = true; - end_lt = ap.end_lt; out_msgs = std::move(ap.out_msgs); - if (ap.new_code.not_null()) { - new_code = ap.new_code; - } - new_data = compute_phase->new_data; // tentative persistent data update applied total_fees += ap.total_action_fees; // NB: forwarding fees are not accounted here (they are not collected by the validators in this transaction) balance = ap.remaining_balance; @@ -1272,6 +1302,11 @@ int Transaction::try_action_change_library(vm::CellSlice& cs, ActionPhase& ap, c // library code not found return 41; } + vm::CellStorageStat sstat; + sstat.compute_used_storage(lib_ref); + if (sstat.cells > cfg.size_limits.max_library_cells) { + return 43; + } vm::CellBuilder cb; CHECK(cb.store_bool_bool(rec.mode >> 1) && cb.store_ref_bool(std::move(lib_ref))); CHECK(dict.set_builder(hash, cb)); @@ -1546,7 +1581,7 @@ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap, sstat.add_used_storage(info.value->prefetch_ref()); } LOG(DEBUG) << "storage paid for a message: " << sstat.cells << " cells, " << sstat.bits << " bits"; - if (sstat.bits > max_msg_bits || sstat.cells > max_msg_cells) { + if (sstat.bits > cfg.size_limits.max_msg_bits || sstat.cells > cfg.size_limits.max_msg_cells) { LOG(DEBUG) << "message too large, invalid"; return skip_invalid ? 0 : 40; } @@ -1801,6 +1836,35 @@ int Transaction::try_action_reserve_currency(vm::CellSlice& cs, ActionPhase& ap, return 0; } +bool Transaction::check_state_size_limit(const ActionPhaseConfig& cfg) { + auto cell_equal = [](const td::Ref& a, const td::Ref& b) -> bool { + if (a.is_null()) { + return b.is_null(); + } + if (b.is_null()) { + return false; + } + return a->get_hash() == b->get_hash(); + }; + if (cell_equal(account.code, new_code) && cell_equal(account.data, new_data) && + cell_equal(account.library, new_library)) { + return true; + } + // new_storage_stat is used here beause these stats will be reused in compute_state() + new_storage_stat.limit_cells = cfg.size_limits.max_acc_state_cells; + new_storage_stat.limit_bits = cfg.size_limits.max_acc_state_bits; + new_storage_stat.add_used_storage(new_code); + new_storage_stat.add_used_storage(new_data); + new_storage_stat.add_used_storage(new_library); + if (acc_status == Account::acc_active) { + new_storage_stat.clear_limit(); + } else { + new_storage_stat.clear(); + } + return new_storage_stat.cells <= cfg.size_limits.max_acc_state_cells && + new_storage_stat.bits <= cfg.size_limits.max_acc_state_bits; +} + bool Transaction::prepare_bounce_phase(const ActionPhaseConfig& cfg) { if (in_msg.is_null() || !bounce_enabled) { return false; @@ -1924,6 +1988,32 @@ bool Account::store_acc_status(vm::CellBuilder& cb, int acc_status) const { return cb.store_long_bool(v, 2); } +static td::optional try_update_storage_stat(const vm::CellStorageStat& old_stat, + td::Ref old_cs, + td::Ref new_cell) { + if (old_stat.cells == 0 || old_cs.is_null()) { + return {}; + } + vm::CellSlice new_cs = vm::CellSlice(vm::NoVm(), new_cell); + if (old_cs->size_refs() != new_cs.size_refs()) { + return {}; + } + for (unsigned i = 0; i < old_cs->size_refs(); ++i) { + if (old_cs->prefetch_ref(i)->get_hash() != new_cs.prefetch_ref(i)->get_hash()) { + return {}; + } + } + if (old_stat.bits < old_cs->size()) { + return {}; + } + + vm::CellStorageStat new_stat; + new_stat.cells = old_stat.cells; + new_stat.bits = old_stat.bits - old_cs->size() + new_cs.size(); + new_stat.public_cells = old_stat.public_cells; + return new_stat; +} + bool Transaction::compute_state() { if (new_total_state.not_null()) { return true; @@ -1985,6 +2075,7 @@ bool Transaction::compute_state() { // code:(Maybe ^Cell) data:(Maybe ^Cell) library:(HashmapE 256 SimpleLib) } auto storage = cb.finalize(); + new_storage = td::Ref(true, vm::NoVm(), storage); if (si_pos) { auto cs_ref = load_cell_slice_ref(storage); CHECK(cs_ref.unique_write().skip_ext(si_pos)); @@ -1993,7 +2084,16 @@ bool Transaction::compute_state() { new_inner_state.clear(); } vm::CellStorageStat& stats = new_storage_stat; - CHECK(stats.compute_used_storage(Ref(storage))); + auto new_stats = try_update_storage_stat(account.storage_stat, account.storage, storage); + if (new_stats) { + stats = new_stats.unwrap(); + } else { + td::Timer timer; + CHECK(stats.add_used_storage(Ref(storage))); + if (timer.elapsed() > 0.1) { + LOG(INFO) << "Compute used storage took " << timer.elapsed() << "s"; + } + } CHECK(cb.store_long_bool(1, 1) // account$1 && cb.append_cellslice_bool(account.my_addr) // addr:MsgAddressInt && block::store_UInt7(cb, stats.cells) // storage_used$_ cells:(VarUInteger 7) @@ -2265,9 +2365,15 @@ bool Transaction::would_fit(unsigned cls, const block::BlockLimitStatus& blimst) return blimst.would_fit(cls, end_lt, gas_used(), &extra); } -bool Transaction::update_limits(block::BlockLimitStatus& blimst) const { - return blimst.update_lt(end_lt) && blimst.update_gas(gas_used()) && blimst.add_proof(new_total_state) && - blimst.add_cell(root) && blimst.add_transaction() && blimst.add_account(is_first); +bool Transaction::update_limits(block::BlockLimitStatus& blimst, bool with_size) const { + if (!(blimst.update_lt(end_lt) && blimst.update_gas(gas_used()))) { + return false; + } + if (with_size) { + return blimst.add_proof(new_total_state) && blimst.add_cell(root) && blimst.add_transaction() && + blimst.add_account(is_first); + } + return true; } /* @@ -2296,6 +2402,7 @@ Ref Transaction::commit(Account& acc) { acc.last_trans_hash_ = root->get_hash().bits(); acc.last_paid = last_paid; acc.storage_stat = new_storage_stat; + acc.storage = new_storage; acc.balance = std::move(balance); acc.due_payment = std::move(due_payment); acc.total_state = std::move(new_total_state); diff --git a/crypto/block/transaction.h b/crypto/block/transaction.h index 75762260..6a171418 100644 --- a/crypto/block/transaction.h +++ b/crypto/block/transaction.h @@ -107,6 +107,7 @@ struct ComputePhaseConfig { Ref global_config; td::BitArray<256> block_rand_seed; bool with_vm_log{false}; + td::uint16 max_vm_data_depth = 512; ComputePhaseConfig(td::uint64 _gas_price = 0, td::uint64 _gas_limit = 0, td::uint64 _gas_credit = 0) : gas_price(_gas_price), gas_limit(_gas_limit), special_gas_limit(_gas_limit), gas_credit(_gas_credit) { compute_threshold(); @@ -143,6 +144,7 @@ struct ActionPhaseConfig { int bounce_msg_body{0}; // usually 0 or 256 bits MsgPrices fwd_std; MsgPrices fwd_mc; // from/to masterchain + SizeLimitsConfig size_limits; const WorkchainSet* workchains{nullptr}; const MsgPrices& fetch_msg_prices(bool is_masterchain) const { return is_masterchain ? fwd_mc : fwd_std; @@ -182,6 +184,7 @@ struct ActionPhase { bool code_changed{false}; bool action_list_invalid{false}; bool acc_delete_req{false}; + bool state_size_too_big{false}; enum { acst_unchanged = 0, acst_frozen = 2, acst_deleted = 3 }; int acc_status_change{acst_unchanged}; td::RefInt256 total_fwd_fees; // all fees debited from the account @@ -235,6 +238,7 @@ struct Account { td::RefInt256 due_payment; Ref orig_total_state; // ^Account Ref total_state; // ^Account + Ref storage; // AccountStorage Ref inner_state; // StateInit ton::Bits256 state_hash; // hash of StateInit for frozen accounts Ref code, data, library, orig_library; @@ -283,7 +287,6 @@ struct Account { }; struct Transaction { - static constexpr unsigned max_msg_bits = (1 << 21), max_msg_cells = (1 << 13); enum { tr_none, tr_ord, @@ -323,6 +326,7 @@ struct Transaction { ton::UnixTime last_paid; Ref root; Ref new_total_state; + Ref new_storage; Ref new_inner_state; Ref new_code, new_data, new_library; Ref in_msg, in_msg_state; @@ -348,6 +352,7 @@ struct Transaction { std::vector> compute_vm_libraries(const ComputePhaseConfig& cfg); bool prepare_compute_phase(const ComputePhaseConfig& cfg); bool prepare_action_phase(const ActionPhaseConfig& cfg); + bool check_state_size_limit(const ActionPhaseConfig& cfg); bool prepare_bounce_phase(const ActionPhaseConfig& cfg); bool compute_state(); bool serialize(); @@ -359,7 +364,7 @@ struct Transaction { const vm::NewCellStorageStat& store_stat, const vm::CellUsageTree* usage_tree) const; bool update_block_storage_profile(vm::NewCellStorageStat& store_stat, const vm::CellUsageTree* usage_tree) const; bool would_fit(unsigned cls, const block::BlockLimitStatus& blk_lim_st) const; - bool update_limits(block::BlockLimitStatus& blk_lim_st) const; + bool update_limits(block::BlockLimitStatus& blk_lim_st, bool with_size = true) const; Ref commit(Account& _account); // _account should point to the same account LtCellRef extract_out_msg(unsigned i); diff --git a/crypto/func/analyzer.cpp b/crypto/func/analyzer.cpp index 47cb141a..006d119e 100644 --- a/crypto/func/analyzer.cpp +++ b/crypto/func/analyzer.cpp @@ -520,6 +520,14 @@ bool Op::compute_used_vars(const CodeBlob& code, bool edit) { } while (changes <= edit); return set_var_info(std::move(new_var_info)); } + case _TryCatch: { + code.compute_used_code_vars(block0, next_var_info, edit); + code.compute_used_code_vars(block1, next_var_info, edit); + VarDescrList merge_info = block0->var_info + block1->var_info + next_var_info; + merge_info -= left; + merge_info.clear_last(); + return set_var_info(std::move(merge_info)); + } default: std::cerr << "fatal: unknown operation in compute_used_vars()\n"; throw src::ParseError{where, "unknown operation"}; @@ -645,6 +653,10 @@ bool prune_unreachable(std::unique_ptr& ops) { reach = true; break; } + case Op::_TryCatch: { + reach = prune_unreachable(op.block0) | prune_unreachable(op.block1); + break; + } default: std::cerr << "fatal: unknown operation \n"; throw src::ParseError{op.where, "unknown operation in prune_unreachable()"}; @@ -825,6 +837,12 @@ VarDescrList Op::fwd_analyze(VarDescrList values) { values = block0->fwd_analyze(values); break; } + case _TryCatch: { + VarDescrList val1 = block0->fwd_analyze(values); + VarDescrList val2 = block1->fwd_analyze(std::move(values)); + values = val1 | val2; + break; + } default: std::cerr << "fatal: unknown operation \n"; throw src::ParseError{where, "unknown operation in fwd_analyze()"}; @@ -866,6 +884,7 @@ bool Op::mark_noreturn() { case _Return: return set_noreturn(true); case _If: + case _TryCatch: return set_noreturn((block0->mark_noreturn() & (block1 && block1->mark_noreturn())) | next->mark_noreturn()); case _Again: block0->mark_noreturn(); diff --git a/crypto/func/auto-tests/stress_tester.py b/crypto/func/auto-tests/stress_tester.py index 8f3ec624..c7b63934 100644 --- a/crypto/func/auto-tests/stress_tester.py +++ b/crypto/func/auto-tests/stress_tester.py @@ -13,7 +13,7 @@ def getenv(name, default=None): print("Environemnt variable", name, "is not set", file=sys.stderr) exit(1) -VAR_CNT = 5 +VAR_CNT = 10 TMP_DIR = tempfile.mkdtemp() FUNC_EXECUTABLE = getenv("FUNC_EXECUTABLE", "func") FIFT_EXECUTABLE = getenv("FIFT_EXECUTABLE", "fift") @@ -31,6 +31,15 @@ class State: self.x = x self.vs = [0] * VAR_CNT + def copy(self): + s = State(self.x) + s.vs = self.vs.copy() + return s + + def copy_from(self, s): + self.x = s.x + self.vs = s.vs.copy() + class Code: pass @@ -136,6 +145,37 @@ class CodeRepeat(Code): print(" " * (indent + 1) + "%s += 1;" % var, file=f) print(" " * indent + "} until (%s >= %d);" % (var, self.n), file=f) +class CodeThrow(Code): + def __init__(self): + pass + + def execute(self, state): + return "EXCEPTION" + + def write(self, f, indent=0): + print(" " * indent + "throw(42);", file=f) + +class CodeTryCatch(Code): + def __init__(self, c1, c2): + self.c1 = c1 + self.c2 = c2 + + def execute(self, state): + state0 = state.copy() + res = self.c1.execute(state) + if res == "EXCEPTION": + state.copy_from(state0) + return self.c2.execute(state) + else: + return res + + def write(self, f, indent=0): + print(" " * indent + "try {", file=f) + self.c1.write(f, indent + 1) + print(" " * indent + "} catch (_, _) {", file=f) + self.c2.write(f, indent + 1) + print(" " * indent + "}", file=f) + def write_function(f, name, body, inline=False, inline_ref=False, method_id=None): print("_ %s(int x)" % name, file=f, end="") if inline: @@ -147,31 +187,37 @@ def write_function(f, name, body, inline=False, inline_ref=False, method_id=None print(" {", file=f) for i in range(VAR_CNT): print(" int v%d = 0;" % i, file=f) - body.write(f, 1); + body.write(f, 1) print("}", file=f) -def gen_code(xl, xr, with_return, loop_depth=0): +def gen_code(xl, xr, with_return, loop_depth=0, try_catch_depth=0, can_throw=False): + if try_catch_depth < 3 and random.randint(0, 5) == 0: + c1 = gen_code(xl, xr, with_return, loop_depth, try_catch_depth + 1, random.randint(0, 1) == 0) + c2 = gen_code(xl, xr, with_return, loop_depth, try_catch_depth + 1, can_throw) + return CodeTryCatch(c1, c2) code = [] for _ in range(random.randint(0, 2)): if random.randint(0, 3) == 0 and loop_depth < 3: - c = gen_code(xl, xr, False, loop_depth + 1) + c = gen_code(xl, xr, False, loop_depth + 1, try_catch_depth, can_throw) code.append(CodeRepeat(random.randint(0, 3), c, random.randint(0, 2))) elif xr - xl > 1: xmid = random.randrange(xl + 1, xr) ret = random.choice((0, 0, 0, 0, 0, 1, 2)) - c1 = gen_code(xl, xmid, ret == 1, loop_depth) + c1 = gen_code(xl, xmid, ret == 1, loop_depth, try_catch_depth, can_throw) if random.randrange(5) == 0: c2 = CodeEmpty() else: - c2 = gen_code(xmid, xr, ret == 2, loop_depth) + c2 = gen_code(xmid, xr, ret == 2, loop_depth, try_catch_depth, can_throw) code.append(CodeIfRange(xl, xmid, c1, c2)) + if xr - xl == 1 and can_throw and random.randint(0, 5) == 0: + code.append(CodeThrow()) if with_return: if xr - xl == 1: code.append(CodeReturn(random.randrange(10**9))) else: xmid = random.randrange(xl + 1, xr) - c1 = gen_code(xl, xmid, True, loop_depth) - c2 = gen_code(xmid, xr, True, loop_depth) + c1 = gen_code(xl, xmid, True, loop_depth, try_catch_depth, can_throw) + c2 = gen_code(xmid, xr, True, loop_depth, try_catch_depth, can_throw) code.append(CodeIfRange(xl, xmid, c1, c2)) for _ in range(random.randint(0, 3)): pos = random.randint(0, len(code)) @@ -203,6 +249,7 @@ def runvm(compiled_fif, xl, xr): output.append(list(map(int, s.split()))) return output + cnt_ok = 0 cnt_fail = 0 for test_id in range(0, 1000000): diff --git a/crypto/func/builtins.cpp b/crypto/func/builtins.cpp index 22f4282c..2c103d46 100644 --- a/crypto/func/builtins.cpp +++ b/crypto/func/builtins.cpp @@ -229,16 +229,25 @@ int emulate_xor(int a, int b) { } int emulate_not(int a) { + if ((a & VarDescr::ConstZero) == VarDescr::ConstZero) { + return VarDescr::ConstTrue; + } + if ((a & VarDescr::ConstTrue) == VarDescr::ConstTrue) { + return VarDescr::ConstZero; + } + int a2 = a; int f = VarDescr::_Even | VarDescr::_Odd; - if ((a & f) && (~a & f)) { - a ^= f; + if ((a2 & f) && (~a2 & f)) { + a2 ^= f; } - f = VarDescr::_Pos | VarDescr::_Neg; - if ((a & f) && (~a & f)) { - a ^= f; + a2 &= ~(VarDescr::_Zero | VarDescr::_NonZero | VarDescr::_Bit | VarDescr::_Pos | VarDescr::_Neg); + if ((a & VarDescr::_Neg) && (a & VarDescr::_NonZero)) { + a2 |= VarDescr::_Pos; } - a &= ~(VarDescr::_Zero | VarDescr::_NonZero | VarDescr::_Bit); - return a; + if (a & VarDescr::_Pos) { + a2 |= VarDescr::_Neg; + } + return a2; } int emulate_lshift(int a, int b) { @@ -977,6 +986,38 @@ AsmOp compile_cond_throw(std::vector& res, std::vector& args } } +AsmOp compile_throw_arg(std::vector& res, std::vector& args) { + assert(res.empty() && args.size() == 2); + VarDescr &x = args[1]; + if (x.is_int_const() && x.int_const->unsigned_fits_bits(11)) { + x.unused(); + return exec_arg_op("THROWARG", x.int_const, 1, 0); + } else { + return exec_op("THROWARGANY", 2, 0); + } +} + +AsmOp compile_cond_throw_arg(std::vector& res, std::vector& args, bool mode) { + assert(res.empty() && args.size() == 3); + VarDescr &x = args[1], &y = args[2]; + std::string suff = (mode ? "IF" : "IFNOT"); + bool skip_cond = false; + if (y.always_true() || y.always_false()) { + y.unused(); + skip_cond = true; + if (y.always_true() != mode) { + x.unused(); + return AsmOp::Nop(); + } + } + if (x.is_int_const() && x.int_const->unsigned_fits_bits(11)) { + x.unused(); + return skip_cond ? exec_arg_op("THROWARG", x.int_const, 1, 0) : exec_arg_op("THROWARG"s + suff, x.int_const, 2, 0); + } else { + return skip_cond ? exec_op("THROWARGANY", 2, 0) : exec_op("THROWARGANY"s + suff, 3, 0); + } +} + AsmOp compile_bool_const(std::vector& res, std::vector& args, bool val) { assert(res.size() == 1 && args.empty()); VarDescr& r = res[0]; @@ -1102,6 +1143,8 @@ void define_builtins() { auto fetch_slice_op = TypeExpr::new_map(SliceInt, TypeExpr::new_tensor({Slice, Slice})); auto prefetch_slice_op = TypeExpr::new_map(SliceInt, Slice); //auto arith_null_op = TypeExpr::new_map(TypeExpr::new_unit(), Int); + auto throw_arg_op = TypeExpr::new_forall({X}, TypeExpr::new_map(TypeExpr::new_tensor({X, Int}), Unit)); + auto cond_throw_arg_op = TypeExpr::new_forall({X}, TypeExpr::new_map(TypeExpr::new_tensor({X, Int, Int}), Unit)); define_builtin_func("_+_", arith_bin_op, compile_add); define_builtin_func("_-_", arith_bin_op, compile_sub); define_builtin_func("-_", arith_un_op, compile_negate); @@ -1161,6 +1204,9 @@ void define_builtins() { define_builtin_func("throw", impure_un_op, compile_throw, true); define_builtin_func("throw_if", impure_bin_op, std::bind(compile_cond_throw, _1, _2, true), true); define_builtin_func("throw_unless", impure_bin_op, std::bind(compile_cond_throw, _1, _2, false), true); + define_builtin_func("throw_arg", throw_arg_op, compile_throw_arg, true); + define_builtin_func("throw_arg_if", cond_throw_arg_op, std::bind(compile_cond_throw_arg, _1, _2, true), true); + define_builtin_func("throw_arg_unless", cond_throw_arg_op, std::bind(compile_cond_throw_arg, _1, _2, false), true); define_builtin_func("load_int", fetch_int_op, std::bind(compile_fetch_int, _1, _2, true, true), {}, {1, 0}); define_builtin_func("load_uint", fetch_int_op, std::bind(compile_fetch_int, _1, _2, true, false), {}, {1, 0}); define_builtin_func("preload_int", prefetch_int_op, std::bind(compile_fetch_int, _1, _2, false, true)); diff --git a/crypto/func/codegen.cpp b/crypto/func/codegen.cpp index 1f504bcd..13b808ea 100644 --- a/crypto/func/codegen.cpp +++ b/crypto/func/codegen.cpp @@ -782,6 +782,77 @@ bool Op::generate_code_step(Stack& stack) { return false; } } + case _TryCatch: { + if (block0->is_empty() && block1->is_empty()) { + return true; + } + if (block0->noreturn() || block1->noreturn()) { + stack.o.retalt_ = true; + } + Stack catch_stack{stack.o}; + std::vector catch_vars; + std::vector catch_last; + for (const VarDescr& var : block1->var_info.list) { + if (stack.find(var.idx) >= 0) { + catch_vars.push_back(var.idx); + catch_last.push_back(!block0->var_info[var.idx]); + } + } + const size_t block_size = 255; + for (size_t begin = catch_vars.size(), end = begin; end > 0; end = begin) { + begin = end >= block_size ? end - block_size : 0; + for (size_t i = begin; i < end; ++i) { + catch_stack.push_new_var(catch_vars[i]); + } + } + catch_stack.push_new_var(left[0]); + catch_stack.push_new_var(left[1]); + stack.rearrange_top(catch_vars, catch_last); + stack.opt_show(); + stack.o << "c4 PUSH"; + stack.o << "c5 PUSH"; + stack.o << "c7 PUSH"; + stack.o << "<{"; + stack.o.indent(); + if (block1->noreturn()) { + catch_stack.mode |= Stack::_NeedRetAlt; + } + block1->generate_code_all(catch_stack); + catch_stack.drop_vars_except(next->var_info); + catch_stack.opt_show(); + stack.o.undent(); + stack.o << "}>CONT"; + stack.o << "c7 SETCONT"; + stack.o << "c5 SETCONT"; + stack.o << "c4 SETCONT"; + for (size_t begin = catch_vars.size(), end = begin; end > 0; end = begin) { + begin = end >= block_size ? end - block_size : 0; + stack.o << std::to_string(end - begin) + " PUSHINT"; + stack.o << "-1 PUSHINT"; + stack.o << "SETCONTVARARGS"; + } + stack.s.erase(stack.s.end() - catch_vars.size(), stack.s.end()); + stack.modified(); + stack.o << "<{"; + stack.o.indent(); + if (block0->noreturn()) { + stack.mode |= Stack::_NeedRetAlt; + } + block0->generate_code_all(stack); + if (block0->noreturn()) { + stack.s = std::move(catch_stack.s); + } else if (!block1->noreturn()) { + stack.merge_state(catch_stack); + } + stack.opt_show(); + stack.o.undent(); + stack.o << "}>CONT"; + stack.o << "c1 PUSH"; + stack.o << "COMPOSALT"; + stack.o << "SWAP"; + stack.o << "TRY"; + return true; + } default: std::cerr << "fatal: unknown operation \n"; throw src::ParseError{where, "unknown operation in generate_code()"}; diff --git a/crypto/func/func.h b/crypto/func/func.h index 5db99725..0f4182bf 100644 --- a/crypto/func/func.h +++ b/crypto/func/func.h @@ -53,6 +53,8 @@ enum Keyword { _Do, _While, _Until, + _Try, + _Catch, _If, _Ifnot, _Then, @@ -537,6 +539,7 @@ struct Op { _Until, _Repeat, _Again, + _TryCatch, _SliceConst }; int cl; @@ -1559,6 +1562,9 @@ struct Stack { int find_outside(var_idx_t var, int from, int to) const; void forget_const(); void validate(int i) const { + if (i > 255) { + throw src::Fatal{"Too deep stack"}; + } assert(i >= 0 && i < depth() && "invalid stack reference"); } void modified() { @@ -1593,6 +1599,7 @@ struct Stack { void apply_wrappers() { if (o.retalt_) { o.insert(0, "SAMEALTSAVE"); + o.insert(0, "c2 SAVE"); if (mode & _InlineFunc) { o.indent_all(); o.insert(0, "CONT:<{"); diff --git a/crypto/func/keywords.cpp b/crypto/func/keywords.cpp index 3f0fa230..fedce9db 100644 --- a/crypto/func/keywords.cpp +++ b/crypto/func/keywords.cpp @@ -97,6 +97,8 @@ void define_keywords() { .add_keyword("do", Kw::_Do) .add_keyword("while", Kw::_While) .add_keyword("until", Kw::_Until) + .add_keyword("try", Kw::_Try) + .add_keyword("catch", Kw::_Catch) .add_keyword("if", Kw::_If) .add_keyword("ifnot", Kw::_Ifnot) .add_keyword("then", Kw::_Then) diff --git a/crypto/func/parse-func.cpp b/crypto/func/parse-func.cpp index 92dfe47b..fe86bc1e 100644 --- a/crypto/func/parse-func.cpp +++ b/crypto/func/parse-func.cpp @@ -1102,6 +1102,36 @@ blk_fl::val parse_do_stmt(Lexer& lex, CodeBlob& code) { return res & ~blk_fl::empty; } +blk_fl::val parse_try_catch_stmt(Lexer& lex, CodeBlob& code) { + lex.expect(_Try); + Op& try_catch_op = code.emplace_back(lex.cur().loc, Op::_TryCatch); + code.push_set_cur(try_catch_op.block0); + blk_fl::val res0 = parse_block_stmt(lex, code); + code.close_pop_cur(lex.cur().loc); + lex.expect(_Catch); + code.push_set_cur(try_catch_op.block1); + sym::open_scope(lex); + Expr* expr = parse_expr(lex, code, true); + expr->chk_lvalue(lex.cur()); + TypeExpr* tvm_error_type = TypeExpr::new_tensor(TypeExpr::new_var(), TypeExpr::new_atomic(_Int)); + try { + unify(expr->e_type, tvm_error_type); + } catch (UnifyError& ue) { + std::ostringstream os; + os << "`catch` arguments have incorrect type " << expr->e_type << ": " << ue; + lex.cur().error(os.str()); + } + expr->predefine_vars(); + expr->define_new_vars(code); + try_catch_op.left = expr->pre_compile(code); + assert(try_catch_op.left.size() == 2); + blk_fl::val res1 = parse_block_stmt(lex, code); + sym::close_scope(lex); + code.close_pop_cur(lex.cur().loc); + blk_fl::combine_parallel(res0, res1); + return res0; +} + blk_fl::val parse_if_stmt(Lexer& lex, CodeBlob& code, int first_lex = _If) { SrcLocation loc{lex.cur().loc}; lex.expect(first_lex); @@ -1165,6 +1195,8 @@ blk_fl::val parse_stmt(Lexer& lex, CodeBlob& code) { return parse_do_stmt(lex, code); case _While: return parse_while_stmt(lex, code); + case _Try: + return parse_try_catch_stmt(lex, code); default: { auto expr = parse_expr(lex, code); expr->chk_rvalue(lex.cur()); diff --git a/crypto/func/test/tc1.fc b/crypto/func/test/tc1.fc new file mode 100644 index 00000000..245fc521 --- /dev/null +++ b/crypto/func/test/tc1.fc @@ -0,0 +1,113 @@ +() test1() impure { + int i = 3; + repeat (3) { + try { + int j = i; + i *= 2; + throw_unless(500, j <= 10); + } catch (x, e) { + i -= 2; + } + i += i + 1; + } + throw_unless(501, i == 43); +} + +int divide_by_ten(int num) { + try { + throw_unless(500, num < 10); + } catch (x, e) { + return divide_by_ten(num - 10) + 1; + } + return 0; +} + +() test2() impure { + int n = divide_by_ten(37); + throw_unless(502, n == 3); +} + +(int, int) swap_int(int a, int b) { + try { + a = a * b; + b = a / b; + a = a / b; + return (a, b); + } catch (x, e) { + throw_unless(500, b == 0); + } + return (0, a); +} + +() test3() impure { + int a = 0; + int b = 57; + try { + (a, b) = swap_int(a, b); + } catch (x, e) { + throw_unless(500, a == 0); + a = b; + b = 0; + } + throw_unless(503, (a == 57) & (b == 0)); +} + +int get_x(int x, int y) { + try { + } catch (x, e) { + return -1; + } + return x; +} + +int get_y(int x, int y) { + try { + return -1; + } catch (x, e) { + } + return y; +} + +() test4() impure { + throw_unless(504, get_x(3, 4) == 3); + throw_unless(504, get_y(3, 4) == -1); +} + +(int, int, int, int, int) foo(int a, int b, int c, int d, int e) { + try { + throw(11); + } catch (x, y) { + a += 1; + b += 2; + c += 3; + d += 4; + e += 5; + } + return (a, b, c, d, e); +} + +() test5() impure { + var (a, b, c, d, e) = foo(10, 20, 30, 40, 50); + throw_unless(505, (a == 11) & (b == 22) & (c == 33) & (d == 44) & (e == 55)); +} + +() test6() impure { + int a = 0; + int b = 0; + int c = 0; + try { + b = 3; + } catch (x, y) { + b = 12; + } + throw_unless(506, (a == 0) & (b == 3) & (c == 0)); +} + +() main() { + test1(); + test2(); + test3(); + test4(); + test5(); + test6(); +} diff --git a/crypto/func/test/tc2.fc b/crypto/func/test/tc2.fc new file mode 100644 index 00000000..2bde6813 --- /dev/null +++ b/crypto/func/test/tc2.fc @@ -0,0 +1,84 @@ +forall X -> int cast_to_int(X x) asm "NOP"; +forall X -> builder cast_to_builder(X x) asm "NOP"; + +_ test1_body() { + int a = 3; + builder b = begin_cell(); + int c = 1; + try { + c = 3; + throw_arg(b, 100); + } catch (x, y) { + return (a + c + y, cast_to_builder(x)); + } + return (0, null()); +} + +() test1() impure { + var (x, y) = test1_body(); + throw_unless(101, x == 104); + throw_unless(102, y.builder_refs() == y.builder_bits()); +} + +_ test2_body(int a, int b, int c) { + try { + try { + try { + try { + throw_arg_if(1, 201, a + b + c == 3); + throw_arg_if(2, 201, a == 3); + throw_arg_unless(1, 202, b == 4); + return 1; + } catch (y, x) { + int y = y.cast_to_int(); + throw_arg_unless(y, x, x == 202); + throw_arg(y + 1, 200); + } + } catch (y, x) { + int y = y.cast_to_int(); + throw_arg_if(y, x, x == 200); + throw_arg_if(y + 2, x, y < 2); + throw_arg_if(y + 3, 203, a + b + c == 4); + throw_arg_unless(y + 4, 204, b == 4); + return 3; + } + } catch (y, x) { + int y = y.cast_to_int(); + try { + throw_arg_if(y, x, x == 200); + throw_arg_if(y + 1, 200, x == 201); + throw_arg_if(x - 203, 200, x == 202); + throw_arg_if(y, 200, x == 203); + throw_arg_if(a + 4, 205, a + b + c == 5); + throw_arg(7, 200); + } catch (v, u) { + int v = v.cast_to_int(); + throw_arg_unless(v, u, u == 205); + if (c == 0) { + return b + 4; + } + throw_arg(v + 1, 200); + } + } + } catch (y, x) { + throw_unless(x, x == 200); + return y.cast_to_int(); + } + return null(); +} + +() test2() impure { + throw_unless(201, test2_body(0, 4, 0) == 1); + throw_unless(202, test2_body(0, 5, 0) == 2); + throw_unless(203, test2_body(3, 4, 0) == 3); + throw_unless(204, test2_body(3, 0, 0) == 4); + throw_unless(205, test2_body(3, 1, 0) == 5); + throw_unless(206, test2_body(3, 2, 0) == 6); + throw_unless(207, test2_body(3, 1, 2) == 7); + throw_unless(208, test2_body(3, 1, 1) == 8); +} + +() main() { + test1(); + test2(); +} diff --git a/crypto/smartcont/stdlib.fc b/crypto/smartcont/stdlib.fc index 0b98eeb4..3531608a 100644 --- a/crypto/smartcont/stdlib.fc +++ b/crypto/smartcont/stdlib.fc @@ -1,132 +1,525 @@ ;; Standard library for funC ;; +{- + # Tuple manipulation primitives + The names and the types are mostly self-explaining. + See [polymorhism with forall](https://ton.org/docs/#/func/functions?id=polymorphism-with-forall) + for more info on the polymorphic functions. + + Note that currently values of atomic type `tuple` can't be cast to composite tuple type (e.g. `[int, cell]`) + and vise versa. +-} + +{- + # Lisp-style lists + + Lists can be represented as nested 2-elements tuples. + Empty list is conventionally represented as TVM `null` value (it can be obtained by calling [null()]). + For example, tuple `(1, (2, (3, null)))` represents list `[1, 2, 3]`. Elements of a list can be of different types. +-} + +;;; Adds an element to the beginning of lisp-style list. forall X -> tuple cons(X head, tuple tail) asm "CONS"; + +;;; Extracts the head and the tail of lisp-style list. forall X -> (X, tuple) uncons(tuple list) asm "UNCONS"; + +;;; Extracts the tail and the head of lisp-style list. forall X -> (tuple, X) list_next(tuple list) asm( -> 1 0) "UNCONS"; + +;;; Returns the head of lisp-style list. forall X -> X car(tuple list) asm "CAR"; + +;;; Returns the tail of lisp-style list. tuple cdr(tuple list) asm "CDR"; + +;;; Creates tuple with zero elements. tuple empty_tuple() asm "NIL"; + +;;; Appends a value `x` to a `Tuple t = (x1, ..., xn)`, but only if the resulting `Tuple t' = (x1, ..., xn, x)` +;;; is of length at most 255. Otherwise throws a type check exception. forall X -> tuple tpush(tuple t, X value) asm "TPUSH"; forall X -> (tuple, ()) ~tpush(tuple t, X value) asm "TPUSH"; + +;;; Creates a tuple of length one with given argument as element. forall X -> [X] single(X x) asm "SINGLE"; + +;;; Unpacks a tuple of length one forall X -> X unsingle([X] t) asm "UNSINGLE"; + +;;; Creates a tuple of length two with given arguments as elements. forall X, Y -> [X, Y] pair(X x, Y y) asm "PAIR"; + +;;; Unpacks a tuple of length two forall X, Y -> (X, Y) unpair([X, Y] t) asm "UNPAIR"; + +;;; Creates a tuple of length three with given arguments as elements. forall X, Y, Z -> [X, Y, Z] triple(X x, Y y, Z z) asm "TRIPLE"; + +;;; Unpacks a tuple of length three forall X, Y, Z -> (X, Y, Z) untriple([X, Y, Z] t) asm "UNTRIPLE"; + +;;; Creates a tuple of length four with given arguments as elements. forall X, Y, Z, W -> [X, Y, Z, W] tuple4(X x, Y y, Z z, W w) asm "4 TUPLE"; + +;;; Unpacks a tuple of length four forall X, Y, Z, W -> (X, Y, Z, W) untuple4([X, Y, Z, W] t) asm "4 UNTUPLE"; + +;;; Returns the first element of a tuple (with unknown element types). forall X -> X first(tuple t) asm "FIRST"; + +;;; Returns the second element of a tuple (with unknown element types). forall X -> X second(tuple t) asm "SECOND"; + +;;; Returns the third element of a tuple (with unknown element types). forall X -> X third(tuple t) asm "THIRD"; + +;;; Returns the fourth element of a tuple (with unknown element types). forall X -> X fourth(tuple t) asm "3 INDEX"; + +;;; Returns the first element of a pair tuple. forall X, Y -> X pair_first([X, Y] p) asm "FIRST"; + +;;; Returns the second element of a pair tuple. forall X, Y -> Y pair_second([X, Y] p) asm "SECOND"; + +;;; Returns the first element of a triple tuple. forall X, Y, Z -> X triple_first([X, Y, Z] p) asm "FIRST"; + +;;; Returns the second element of a triple tuple. forall X, Y, Z -> Y triple_second([X, Y, Z] p) asm "SECOND"; + +;;; Returns the third element of a triple tuple. forall X, Y, Z -> Z triple_third([X, Y, Z] p) asm "THIRD"; + + +;;; Push null element (casted to given type) +;;; By the TVM type `Null` FunC represents absence of a value of some atomic type. +;;; So `null` can actually have any atomic type. forall X -> X null() asm "PUSHNULL"; + +;;; Moves a variable [x] to the top of the stack forall X -> (X, ()) ~impure_touch(X x) impure asm "NOP"; + + +;;; Returns the current Unix time as an Integer int now() asm "NOW"; + +;;; Returns the internal address of the current smart contract as a Slice with a `MsgAddressInt`. +;;; If necessary, it can be parsed further using primitives such as [parse_std_addr]. slice my_address() asm "MYADDR"; + +;;; Returns the balance of the smart contract as a tuple consisting of an int +;;; (balance in nanotoncoins) and a `cell` +;;; (a dictionary with 32-bit keys representing the balance of "extra currencies") +;;; at the start of Computation Phase. +;;; Note that RAW primitives such as [send_raw_message] do not update this field. [int, cell] get_balance() asm "BALANCE"; + +;;; Returns the logical time of the current transaction. int cur_lt() asm "LTIME"; + +;;; Returns the starting logical time of the current block. int block_lt() asm "BLOCKLT"; +;;; Computes the representation hash of a `cell` [c] and returns it as a 256-bit unsigned integer `x`. +;;; Useful for signing and checking signatures of arbitrary entities represented by a tree of cells. int cell_hash(cell c) asm "HASHCU"; + +;;; Computes the hash of a `slice s` and returns it as a 256-bit unsigned integer `x`. +;;; The result is the same as if an ordinary cell containing only data and references from `s` had been created +;;; and its hash computed by [cell_hash]. int slice_hash(slice s) asm "HASHSU"; + +;;; Computes sha256 of the data bits of `slice` [s]. If the bit length of `s` is not divisible by eight, +;;; throws a cell underflow exception. The hash value is returned as a 256-bit unsigned integer `x`. int string_hash(slice s) asm "SHA256U"; +{- + # Signature checks +-} + +;;; Checks the Ed25519-`signature` of a `hash` (a 256-bit unsigned integer, usually computed as the hash of some data) +;;; using [public_key] (also represented by a 256-bit unsigned integer). +;;; The signature must contain at least 512 data bits; only the first 512 bits are used. +;;; The result is `−1` if the signature is valid, `0` otherwise. +;;; Note that `CHKSIGNU` creates a 256-bit slice with the hash and calls `CHKSIGNS`. +;;; That is, if [hash] is computed as the hash of some data, these data are hashed twice, +;;; the second hashing occurring inside `CHKSIGNS`. int check_signature(int hash, slice signature, int public_key) asm "CHKSIGNU"; + +;;; Checks whether [signature] is a valid Ed25519-signature of the data portion of `slice data` using `public_key`, +;;; similarly to [check_signature]. +;;; If the bit length of [data] is not divisible by eight, throws a cell underflow exception. +;;; The verification of Ed25519 signatures is the standard one, +;;; with sha256 used to reduce [data] to the 256-bit number that is actually signed. int check_data_signature(slice data, slice signature, int public_key) asm "CHKSIGNS"; +{--- + # Computation of boc size + The primitives below may be useful for computing storage fees of user-provided data. +-} + +;;; Returns `(x, y, z, -1)` or `(null, null, null, 0)`. +;;; Recursively computes the count of distinct cells `x`, data bits `y`, and cell references `z` +;;; in the DAG rooted at `cell` [c], effectively returning the total storage used by this DAG taking into account +;;; the identification of equal cells. +;;; The values of `x`, `y`, and `z` are computed by a depth-first traversal of this DAG, +;;; with a hash table of visited cell hashes used to prevent visits of already-visited cells. +;;; The total count of visited cells `x` cannot exceed non-negative [max_cells]; +;;; otherwise the computation is aborted before visiting the `(max_cells + 1)`-st cell and +;;; a zero flag is returned to indicate failure. If [c] is `null`, returns `x = y = z = 0`. (int, int, int) compute_data_size(cell c, int max_cells) impure asm "CDATASIZE"; + +;;; Similar to [compute_data_size?], but accepting a `slice` [s] instead of a `cell`. +;;; The returned value of `x` does not take into account the cell that contains the `slice` [s] itself; +;;; however, the data bits and the cell references of [s] are accounted for in `y` and `z`. (int, int, int) slice_compute_data_size(slice s, int max_cells) impure asm "SDATASIZE"; + +;;; A non-quiet version of [compute_data_size?] that throws a cell overflow exception (`8`) on failure. (int, int, int, int) compute_data_size?(cell c, int max_cells) asm "CDATASIZEQ NULLSWAPIFNOT2 NULLSWAPIFNOT"; + +;;; A non-quiet version of [slice_compute_data_size?] that throws a cell overflow exception (8) on failure. (int, int, int, int) slice_compute_data_size?(cell c, int max_cells) asm "SDATASIZEQ NULLSWAPIFNOT2 NULLSWAPIFNOT"; +;;; Throws an exception with exit_code excno if cond is not 0 (commented since implemented in compilator) ;; () throw_if(int excno, int cond) impure asm "THROWARGIF"; +{-- + # Debug primitives + Only works for local TVM execution with debug level verbosity +-} +;;; Dumps the stack (at most the top 255 values) and shows the total stack depth. () dump_stack() impure asm "DUMPSTK"; +{- + # Persistent storage save and load +-} + +;;; Returns the persistent contract storage cell. It can be parsed or modified with slice and builder primitives later. cell get_data() asm "c4 PUSH"; + +;;; Sets `cell` [c] as persistent contract data. You can update persistent contract storage with this primitive. () set_data(cell c) impure asm "c4 POP"; + +{- + # Continuation primitives +-} +;;; Usually `c3` has a continuation initialized by the whole code of the contract. It is used for function calls. +;;; The primitive returns the current value of `c3`. cont get_c3() impure asm "c3 PUSH"; + +;;; Updates the current value of `c3`. Usually, it is used for updating smart contract code in run-time. +;;; Note that after execution of this primitive the current code +;;; (and the stack of recursive function calls) won't change, +;;; but any other function call will use a function from the new code. () set_c3(cont c) impure asm "c3 POP"; + +;;; Transforms a `slice` [s] into a simple ordinary continuation `c`, with `c.code = s` and an empty stack and savelist. cont bless(slice s) impure asm "BLESS"; -() accept_message() impure asm "ACCEPT"; -() set_gas_limit(int limit) impure asm "SETGASLIMIT"; -() commit() impure asm "COMMIT"; -() buy_gas(int gram) impure asm "BUYGAS"; +{--- + # Gas related primitives +-} +;;; Sets current gas limit `gl` to its maximal allowed value `gm`, and resets the gas credit `gc` to zero, +;;; decreasing the value of `gr` by `gc` in the process. +;;; In other words, the current smart contract agrees to buy some gas to finish the current transaction. +;;; This action is required to process external messages, which bring no value (hence no gas) with themselves. +;;; +;;; For more details check [accept_message effects](https://ton.org/docs/#/smart-contracts/accept). +() accept_message() impure asm "ACCEPT"; + +;;; Sets current gas limit `gl` to the minimum of limit and `gm`, and resets the gas credit `gc` to zero. +;;; If the gas consumed so far (including the present instruction) exceeds the resulting value of `gl`, +;;; an (unhandled) out of gas exception is thrown before setting new gas limits. +;;; Notice that [set_gas_limit] with an argument `limit ≥ 2^63 − 1` is equivalent to [accept_message]. +() set_gas_limit(int limit) impure asm "SETGASLIMIT"; + +;;; Commits the current state of registers `c4` (“persistent data”) and `c5` (“actions”) +;;; so that the current execution is considered “successful” with the saved values even if an exception +;;; in Computation Phase is thrown later. +() commit() impure asm "COMMIT"; + +;;; Not implemented +;;() buy_gas(int gram) impure asm "BUYGAS"; + +;;; Computes the amount of gas that can be bought for `amount` nanoTONs, +;;; and sets `gl` accordingly in the same way as [set_gas_limit]. +() buy_gas(int amount) impure asm "BUYGAS"; + +;;; Computes the minimum of two integers [x] and [y]. int min(int x, int y) asm "MIN"; + +;;; Computes the maximum of two integers [x] and [y]. int max(int x, int y) asm "MAX"; + +;;; Sorts two integers. (int, int) minmax(int x, int y) asm "MINMAX"; + +;;; Computes the absolute value of an integer [x]. int abs(int x) asm "ABS"; +{- + # Slice primitives + + It is said that a primitive _loads_ some data, + if it returns the data and the remainder of the slice + (so it can also be used as [modifying method](https://ton.org/docs/#/func/statements?id=modifying-methods)). + + It is said that a primitive _preloads_ some data, if it returns only the data + (it can be used as [non-modifying method](https://ton.org/docs/#/func/statements?id=non-modifying-methods)). + + Unless otherwise stated, loading and preloading primitives read the data from a prefix of the slice. +-} + + +;;; Converts a `cell` [c] into a `slice`. Notice that [c] must be either an ordinary cell, +;;; or an exotic cell (see [TVM.pdf](https://ton-blockchain.github.io/docs/tvm.pdf), 3.1.2) +;;; which is automatically loaded to yield an ordinary cell `c'`, converted into a `slice` afterwards. slice begin_parse(cell c) asm "CTOS"; + +;;; Checks if [s] is empty. If not, throws an exception. () end_parse(slice s) impure asm "ENDS"; + +;;; Loads the first reference from the slice. (slice, cell) load_ref(slice s) asm( -> 1 0) "LDREF"; + +;;; Preloads the first reference from the slice. cell preload_ref(slice s) asm "PLDREF"; + + {- Functions below are commented because are implemented on compilator level for optimisation -} + +;;; Loads a signed [len]-bit integer from a slice [s]. ;; (slice, int) ~load_int(slice s, int len) asm(s len -> 1 0) "LDIX"; + +;;; Loads an unsigned [len]-bit integer from a slice [s]. ;; (slice, int) ~load_uint(slice s, int len) asm( -> 1 0) "LDUX"; + +;;; Preloads a signed [len]-bit integer from a slice [s]. ;; int preload_int(slice s, int len) asm "PLDIX"; + +;;; Preloads an unsigned [len]-bit integer from a slice [s]. ;; int preload_uint(slice s, int len) asm "PLDUX"; + +;;; Loads the first `0 ≤ len ≤ 1023` bits from slice [s] into a separate `slice s''`. ;; (slice, slice) load_bits(slice s, int len) asm(s len -> 1 0) "LDSLICEX"; + +;;; Preloads the first `0 ≤ len ≤ 1023` bits from slice [s] into a separate `slice s''`. ;; slice preload_bits(slice s, int len) asm "PLDSLICEX"; + +;;; Loads serialized amount of TonCoins (any unsigned integer up to `2^128 - 1`). (slice, int) load_grams(slice s) asm( -> 1 0) "LDGRAMS"; +(slice, int) load_coins(slice s) asm( -> 1 0) "LDGRAMS"; + +;;; Returns all but the first `0 ≤ len ≤ 1023` bits of `slice` [s]. slice skip_bits(slice s, int len) asm "SDSKIPFIRST"; (slice, ()) ~skip_bits(slice s, int len) asm "SDSKIPFIRST"; + +;;; Returns the first `0 ≤ len ≤ 1023` bits of `slice` [s]. slice first_bits(slice s, int len) asm "SDCUTFIRST"; + +;;; Returns all but the last `0 ≤ len ≤ 1023` bits of `slice` [s]. slice skip_last_bits(slice s, int len) asm "SDSKIPLAST"; (slice, ()) ~skip_last_bits(slice s, int len) asm "SDSKIPLAST"; + +;;; Returns the last `0 ≤ len ≤ 1023` bits of `slice` [s]. slice slice_last(slice s, int len) asm "SDCUTLAST"; + +;;; Loads a dictionary `D` (HashMapE) from `slice` [s]. +;;; (returns `null` if `nothing` constructor is used). (slice, cell) load_dict(slice s) asm( -> 1 0) "LDDICT"; + +;;; Preloads a dictionary `D` from `slice` [s]. cell preload_dict(slice s) asm "PLDDICT"; + +;;; Loads a dictionary as [load_dict], but returns only the remainder of the slice. slice skip_dict(slice s) asm "SKIPDICT"; +;;; Loads (Maybe ^Cell) from `slice` [s]. +;;; In other words loads 1 bit and if it is true +;;; loads first ref and return it with slice remainder +;;; otherwise returns `null` and slice remainder (slice, cell) load_maybe_ref(slice s) asm( -> 1 0) "LDOPTREF"; -cell preload_maybe_ref(slice s) asm "PLDOPTREF"; -builder store_maybe_ref(builder b, cell c) asm(c b) "STOPTREF"; +;;; Preloads (Maybe ^Cell) from `slice` [s]. +cell preload_maybe_ref(slice s) asm "PLDOPTREF"; + + +;;; Returns the depth of `cell` [c]. +;;; If [c] has no references, then return `0`; +;;; otherwise the returned value is one plus the maximum of depths of cells referred to from [c]. +;;; If [c] is a `null` instead of a cell, returns zero. int cell_depth(cell c) asm "CDEPTH"; + +{- + # Slice size primitives +-} + +;;; Returns the number of references in `slice` [s]. int slice_refs(slice s) asm "SREFS"; + +;;; Returns the number of data bits in `slice` [s]. int slice_bits(slice s) asm "SBITS"; + +;;; Returns both the number of data bits and the number of references in `slice` [s]. (int, int) slice_bits_refs(slice s) asm "SBITREFS"; + +;;; Checks whether a `slice` [s] is empty (i.e., contains no bits of data and no cell references). int slice_empty?(slice s) asm "SEMPTY"; + +;;; Checks whether `slice` [s] has no bits of data. int slice_data_empty?(slice s) asm "SDEMPTY"; + +;;; Checks whether `slice` [s] has no references. int slice_refs_empty?(slice s) asm "SREMPTY"; + +;;; Returns the depth of `slice` [s]. +;;; If [s] has no references, then returns `0`; +;;; otherwise the returned value is one plus the maximum of depths of cells referred to from [s]. int slice_depth(slice s) asm "SDEPTH"; +{- + # Builder size primitives +-} + +;;; Returns the number of cell references already stored in `builder` [b] int builder_refs(builder b) asm "BREFS"; + +;;; Returns the number of data bits already stored in `builder` [b]. int builder_bits(builder b) asm "BBITS"; + +;;; Returns the depth of `builder` [b]. +;;; If no cell references are stored in [b], then returns 0; +;;; otherwise the returned value is one plus the maximum of depths of cells referred to from [b]. int builder_depth(builder b) asm "BDEPTH"; +{- + # Builder primitives + It is said that a primitive _stores_ a value `x` into a builder `b` + if it returns a modified version of the builder `b'` with the value `x` stored at the end of it. + It can be used as [non-modifying method](https://ton.org/docs/#/func/statements?id=non-modifying-methods). + + All the primitives below first check whether there is enough space in the `builder`, + and only then check the range of the value being serialized. +-} + +;;; Creates a new empty `builder`. builder begin_cell() asm "NEWC"; + +;;; Converts a `builder` into an ordinary `cell`. cell end_cell(builder b) asm "ENDC"; + +;;; Stores a reference to `cell` [c] into `builder` [b]. builder store_ref(builder b, cell c) asm(c b) "STREF"; + +;;; Stores an unsigned [len]-bit integer `x` into `b` for `0 ≤ len ≤ 256`. ;; builder store_uint(builder b, int x, int len) asm(x b len) "STUX"; + +;;; Stores a signed [len]-bit integer `x` into `b` for` 0 ≤ len ≤ 257`. ;; builder store_int(builder b, int x, int len) asm(x b len) "STIX"; + + +;;; Stores `slice` [s] into `builder` [b] builder store_slice(builder b, slice s) asm "STSLICER"; + +;;; Stores (serializes) an integer [x] in the range `0..2^128 − 1` into `builder` [b]. +;;; The serialization of [x] consists of a 4-bit unsigned big-endian integer `l`, +;;; which is the smallest integer `l ≥ 0`, such that `x < 2^8l`, +;;; followed by an `8l`-bit unsigned big-endian representation of [x]. +;;; If [x] does not belong to the supported range, a range check exception is thrown. +;;; +;;; Store amounts of TonCoins to the builder as VarUInteger 16 builder store_grams(builder b, int x) asm "STGRAMS"; +builder store_coins(builder b, int x) asm "STGRAMS"; + +;;; Stores dictionary `D` represented by `cell` [c] or `null` into `builder` [b]. +;;; In other words, stores a `1`-bit and a reference to [c] if [c] is not `null` and `0`-bit otherwise. builder store_dict(builder b, cell c) asm(c b) "STDICT"; +;;; Stores (Maybe ^Cell) to builder: +;;; if cell is null store 1 zero bit +;;; otherwise store 1 true bit and ref to cell +builder store_maybe_ref(builder b, cell c) asm(c b) "STOPTREF"; + + +{- + # Address manipulation primitives + The address manipulation primitives listed below serialize and deserialize values according to the following TL-B scheme: + ```TL-B + addr_none$00 = MsgAddressExt; + addr_extern$01 len:(## 8) external_address:(bits len) + = MsgAddressExt; + anycast_info$_ depth:(#<= 30) { depth >= 1 } + rewrite_pfx:(bits depth) = Anycast; + addr_std$10 anycast:(Maybe Anycast) + workchain_id:int8 address:bits256 = MsgAddressInt; + addr_var$11 anycast:(Maybe Anycast) addr_len:(## 9) + workchain_id:int32 address:(bits addr_len) = MsgAddressInt; + _ _:MsgAddressInt = MsgAddress; + _ _:MsgAddressExt = MsgAddress; + + int_msg_info$0 ihr_disabled:Bool bounce:Bool bounced:Bool + src:MsgAddress dest:MsgAddressInt + value:CurrencyCollection ihr_fee:Grams fwd_fee:Grams + created_lt:uint64 created_at:uint32 = CommonMsgInfoRelaxed; + ext_out_msg_info$11 src:MsgAddress dest:MsgAddressExt + created_lt:uint64 created_at:uint32 = CommonMsgInfoRelaxed; + ``` + A deserialized `MsgAddress` is represented by a tuple `t` as follows: + + - `addr_none` is represented by `t = (0)`, + i.e., a tuple containing exactly one integer equal to zero. + - `addr_extern` is represented by `t = (1, s)`, + where slice `s` contains the field `external_address`. In other words, ` + t` is a pair (a tuple consisting of two entries), containing an integer equal to one and slice `s`. + - `addr_std` is represented by `t = (2, u, x, s)`, + where `u` is either a `null` (if `anycast` is absent) or a slice `s'` containing `rewrite_pfx` (if anycast is present). + Next, integer `x` is the `workchain_id`, and slice `s` contains the address. + - `addr_var` is represented by `t = (3, u, x, s)`, + where `u`, `x`, and `s` have the same meaning as for `addr_std`. +-} + +;;; Loads from slice [s] the only prefix that is a valid `MsgAddress`, +;;; and returns both this prefix `s'` and the remainder `s''` of [s] as slices. (slice, slice) load_msg_addr(slice s) asm( -> 1 0) "LDMSGADDR"; + +;;; Decomposes slice [s] containing a valid `MsgAddress` into a `tuple t` with separate fields of this `MsgAddress`. +;;; If [s] is not a valid `MsgAddress`, a cell deserialization exception is thrown. tuple parse_addr(slice s) asm "PARSEMSGADDR"; + +;;; Parses slice [s] containing a valid `MsgAddressInt` (usually a `msg_addr_std`), +;;; applies rewriting from the anycast (if present) to the same-length prefix of the address, +;;; and returns both the workchain and the 256-bit address as integers. +;;; If the address is not 256-bit, or if [s] is not a valid serialization of `MsgAddressInt`, +;;; throws a cell deserialization exception. (int, int) parse_std_addr(slice s) asm "REWRITESTDADDR"; + +;;; A variant of [parse_std_addr] that returns the (rewritten) address as a slice [s], +;;; even if it is not exactly 256 bit long (represented by a `msg_addr_var`). (int, slice) parse_var_addr(slice s) asm "REWRITEVARADDR"; +{- + # Dictionary primitives +-} + + +;;; Sets the value associated with [key_len]-bit key signed index in dictionary [dict] to [value] (cell), +;;; and returns the resulting dictionary. cell idict_set_ref(cell dict, int key_len, int index, cell value) asm(value index dict key_len) "DICTISETREF"; (cell, ()) ~idict_set_ref(cell dict, int key_len, int index, cell value) asm(value index dict key_len) "DICTISETREF"; + +;;; Sets the value associated with [key_len]-bit key unsigned index in dictionary [dict] to [value] (cell), +;;; and returns the resulting dictionary. cell udict_set_ref(cell dict, int key_len, int index, cell value) asm(value index dict key_len) "DICTUSETREF"; (cell, ()) ~udict_set_ref(cell dict, int key_len, int index, cell value) asm(value index dict key_len) "DICTUSETREF"; + cell idict_get_ref(cell dict, int key_len, int index) asm(index dict key_len) "DICTIGETOPTREF"; -(cell, int) idict_get_ref?(cell dict, int key_len, int index) asm(index dict key_len) "DICTIGETREF"; -(cell, int) udict_get_ref?(cell dict, int key_len, int index) asm(index dict key_len) "DICTUGETREF"; +(cell, int) idict_get_ref?(cell dict, int key_len, int index) asm(index dict key_len) "DICTIGETREF" "NULLSWAPIFNOT"; +(cell, int) udict_get_ref?(cell dict, int key_len, int index) asm(index dict key_len) "DICTUGETREF" "NULLSWAPIFNOT"; (cell, cell) idict_set_get_ref(cell dict, int key_len, int index, cell value) asm(value index dict key_len) "DICTISETGETOPTREF"; (cell, cell) udict_set_get_ref(cell dict, int key_len, int index, cell value) asm(value index dict key_len) "DICTUSETGETOPTREF"; (cell, int) idict_delete?(cell dict, int key_len, int index) asm(index dict key_len) "DICTIDEL"; @@ -185,24 +578,47 @@ cell dict_set_builder(cell dict, int key_len, slice index, builder value) asm(va (int, slice, int) idict_get_nexteq?(cell dict, int key_len, int pivot) asm(pivot dict key_len -> 1 0 2) "DICTIGETNEXTEQ" "NULLSWAPIFNOT2"; (int, slice, int) idict_get_prev?(cell dict, int key_len, int pivot) asm(pivot dict key_len -> 1 0 2) "DICTIGETPREV" "NULLSWAPIFNOT2"; (int, slice, int) idict_get_preveq?(cell dict, int key_len, int pivot) asm(pivot dict key_len -> 1 0 2) "DICTIGETPREVEQ" "NULLSWAPIFNOT2"; + +;;; Creates an empty dictionary, which is actually a null value. Equivalent to PUSHNULL cell new_dict() asm "NEWDICT"; +;;; Checks whether a dictionary is empty. Equivalent to cell_null?. int dict_empty?(cell c) asm "DICTEMPTY"; + +{- Prefix dictionary primitives -} (slice, slice, slice, int) pfxdict_get?(cell dict, int key_len, slice key) asm(key dict key_len) "PFXDICTGETQ" "NULLSWAPIFNOT2"; (cell, int) pfxdict_set?(cell dict, int key_len, slice key, slice value) asm(value key dict key_len) "PFXDICTSET"; (cell, int) pfxdict_delete?(cell dict, int key_len, slice key) asm(key dict key_len) "PFXDICTDEL"; +;;; Returns the value of the global configuration parameter with integer index `i` as a `cell` or `null` value. cell config_param(int x) asm "CONFIGOPTPARAM"; +;;; Checks whether c is a null. Note, that FunC also has polymorphic null? built-in. int cell_null?(cell c) asm "ISNULL"; +;;; Creates an output action which would reserve exactly amount nanotoncoins (if mode = 0), at most amount nanotoncoins (if mode = 2), or all but amount nanotoncoins (if mode = 1 or mode = 3), from the remaining balance of the account. It is roughly equivalent to creating an outbound message carrying amount nanotoncoins (or b − amount nanotoncoins, where b is the remaining balance) to oneself, so that the subsequent output actions would not be able to spend more money than the remainder. Bit +2 in mode means that the external action does not fail if the specified amount cannot be reserved; instead, all remaining balance is reserved. Bit +8 in mode means `amount <- -amount` before performing any further actions. Bit +4 in mode means that amount is increased by the original balance of the current account (before the compute phase), including all extra currencies, before performing any other checks and actions. Currently, amount must be a non-negative integer, and mode must be in the range 0..15. () raw_reserve(int amount, int mode) impure asm "RAWRESERVE"; +;;; Similar to raw_reserve, but also accepts a dictionary extra_amount (represented by a cell or null) with extra currencies. In this way currencies other than TonCoin can be reserved. () raw_reserve_extra(int amount, cell extra_amount, int mode) impure asm "RAWRESERVEX"; +;;; Sends a raw message contained in msg, which should contain a correctly serialized object Message X, with the only exception that the source address is allowed to have dummy value addr_none (to be automatically replaced with the current smart contract address), and ihr_fee, fwd_fee, created_lt and created_at fields can have arbitrary values (to be rewritten with correct values during the action phase of the current transaction). Integer parameter mode contains the flags. Currently mode = 0 is used for ordinary messages; mode = 128 is used for messages that are to carry all the remaining balance of the current smart contract (instead of the value originally indicated in the message); mode = 64 is used for messages that carry all the remaining value of the inbound message in addition to the value initially indicated in the new message (if bit 0 is not set, the gas fees are deducted from this amount); mode' = mode + 1 means that the sender wants to pay transfer fees separately; mode' = mode + 2 means that any errors arising while processing this message during the action phase should be ignored. Finally, mode' = mode + 32 means that the current account must be destroyed if its resulting balance is zero. This flag is usually employed together with +128. () send_raw_message(cell msg, int mode) impure asm "SENDRAWMSG"; +;;; Creates an output action that would change this smart contract code to that given by cell new_code. Notice that this change will take effect only after the successful termination of the current run of the smart contract () set_code(cell new_code) impure asm "SETCODE"; +;;; Generates a new pseudo-random unsigned 256-bit integer x. The algorithm is as follows: if r is the old value of the random seed, considered as a 32-byte array (by constructing the big-endian representation of an unsigned 256-bit integer), then its sha512(r) is computed; the first 32 bytes of this hash are stored as the new value r' of the random seed, and the remaining 32 bytes are returned as the next random value x. int random() impure asm "RANDU256"; +;;; Generates a new pseudo-random integer z in the range 0..range−1 (or range..−1, if range < 0). More precisely, an unsigned random value x is generated as in random; then z := x * range / 2^256 is computed. int rand(int range) impure asm "RAND"; +;;; Returns the current random seed as an unsigned 256-bit Integer. int get_seed() impure asm "RANDSEED"; -int set_seed() impure asm "SETRAND"; +;;; Sets the random seed to unsigned 256-bit seed. +() set_seed(int) impure asm "SETRAND"; +;;; Mixes unsigned 256-bit integer x into the random seed r by setting the random seed to sha256 of the concatenation of two 32-byte strings: the first with the big-endian representation of the old seed r, and the second with the big-endian representation of x. () randomize(int x) impure asm "ADDRAND"; +;;; Equivalent to randomize(cur_lt());. () randomize_lt() impure asm "LTIME" "ADDRAND"; + +;;; Checks whether the data parts of two slices coinside +int equal_slice_bits (slice a, slice b) asm "SDEQ"; + +;;; Concatenates two builders +builder store_builder(builder to, builder from) asm "STBR"; diff --git a/crypto/vm/boc.cpp b/crypto/vm/boc.cpp index d35291d3..f438d480 100644 --- a/crypto/vm/boc.cpp +++ b/crypto/vm/boc.cpp @@ -1039,9 +1039,15 @@ bool CellStorageStat::add_used_storage(Ref cs_ref, bool kill_dup, bool CellStorageStat::add_used_storage(const CellSlice& cs, bool kill_dup, unsigned skip_count_root) { if (!(skip_count_root & 1)) { ++cells; + if (cells > limit_cells) { + return false; + } } if (!(skip_count_root & 2)) { bits += cs.size(); + if (bits > limit_bits) { + return false; + } } for (unsigned i = 0; i < cs.size_refs(); i++) { if (!add_used_storage(cs.prefetch_ref(i), kill_dup)) { @@ -1054,9 +1060,15 @@ bool CellStorageStat::add_used_storage(const CellSlice& cs, bool kill_dup, unsig bool CellStorageStat::add_used_storage(CellSlice&& cs, bool kill_dup, unsigned skip_count_root) { if (!(skip_count_root & 1)) { ++cells; + if (cells > limit_cells) { + return false; + } } if (!(skip_count_root & 2)) { bits += cs.size(); + if (bits > limit_bits) { + return false; + } } while (cs.size_refs()) { if (!add_used_storage(cs.fetch_ref(), kill_dup)) { diff --git a/crypto/vm/boc.h b/crypto/vm/boc.h index 2fae1846..dd74a6d1 100644 --- a/crypto/vm/boc.h +++ b/crypto/vm/boc.h @@ -117,8 +117,13 @@ struct CellStorageStat { } void clear() { cells = bits = public_cells = 0; + clear_limit(); clear_seen(); } + void clear_limit() { + limit_cells = std::numeric_limits::max(); + limit_bits = std::numeric_limits::max(); + } bool compute_used_storage(Ref cs_ref, bool kill_dup = true, unsigned skip_count_root = 0); bool compute_used_storage(const CellSlice& cs, bool kill_dup = true, unsigned skip_count_root = 0); bool compute_used_storage(CellSlice&& cs, bool kill_dup = true, unsigned skip_count_root = 0); @@ -128,6 +133,9 @@ struct CellStorageStat { bool add_used_storage(const CellSlice& cs, bool kill_dup = true, unsigned skip_count_root = 0); bool add_used_storage(CellSlice&& cs, bool kill_dup = true, unsigned skip_count_root = 0); bool add_used_storage(Ref cell, bool kill_dup = true, unsigned skip_count_root = 0); + + unsigned long long limit_cells = std::numeric_limits::max(); + unsigned long long limit_bits = std::numeric_limits::max(); }; struct VmStorageStat { diff --git a/crypto/vm/db/DynamicBagOfCellsDb.cpp b/crypto/vm/db/DynamicBagOfCellsDb.cpp index 4ff4ec30..5441feea 100644 --- a/crypto/vm/db/DynamicBagOfCellsDb.cpp +++ b/crypto/vm/db/DynamicBagOfCellsDb.cpp @@ -293,7 +293,9 @@ class DynamicBagOfCellsDbImpl : public DynamicBagOfCellsDb, private ExtCellCreat return db_->load_cell(hash); } TRY_RESULT(load_result, cell_loader_->load(hash, true, *this)); - CHECK(load_result.status == CellLoader::LoadResult::Ok); + if (load_result.status != CellLoader::LoadResult::Ok) { + return td::Status::Error("cell not found"); + } return std::move(load_result.cell()); } diff --git a/crypto/vm/vm.h b/crypto/vm/vm.h index 44aa7f23..17ce1aa3 100644 --- a/crypto/vm/vm.h +++ b/crypto/vm/vm.h @@ -97,6 +97,7 @@ class VmState final : public VmStateInterface { int stack_trace{0}, debug_off{0}; bool chksig_always_succeed{false}; td::ConstBitPtr missing_library{0}; + td::uint16 max_data_depth = 512; // Default value public: enum { @@ -108,8 +109,7 @@ class VmState final : public VmStateInterface { implicit_jmpref_gas_price = 10, implicit_ret_gas_price = 5, free_stack_depth = 32, - stack_entry_gas_price = 1, - max_data_depth = 512 + stack_entry_gas_price = 1 }; VmState(); VmState(Ref _code); @@ -325,6 +325,9 @@ class VmState final : public VmStateInterface { td::ConstBitPtr get_missing_library() const { return missing_library; } + void set_max_data_depth(td::uint16 depth) { + max_data_depth = depth; + } private: void init_cregs(bool same_c3 = false, bool push_0 = true); diff --git a/dht/CMakeLists.txt b/dht/CMakeLists.txt index 36cfdd82..e50a7497 100644 --- a/dht/CMakeLists.txt +++ b/dht/CMakeLists.txt @@ -30,3 +30,8 @@ target_include_directories(dht PUBLIC ) target_link_libraries(dht PRIVATE tdutils tdactor adnl tl_api) +add_executable(dht-ping-servers utils/dht-ping-servers.cpp) +target_link_libraries(dht-ping-servers PRIVATE tdutils tdactor adnl dht terminal) + +add_executable(dht-resolve utils/dht-resolve.cpp) +target_link_libraries(dht-resolve PRIVATE tdutils tdactor adnl dht terminal) diff --git a/dht/dht-bucket.cpp b/dht/dht-bucket.cpp index 4f9d75eb..f60f9961 100644 --- a/dht/dht-bucket.cpp +++ b/dht/dht-bucket.cpp @@ -66,38 +66,66 @@ td::uint32 DhtBucket::active_cnt() { } td::Status DhtBucket::add_full_node(DhtKeyId id, DhtNode newnode, td::actor::ActorId adnl, - adnl::AdnlNodeIdShort self_id) { + adnl::AdnlNodeIdShort self_id, td::int32 our_network_id, bool set_active) { for (auto &node : active_nodes_) { if (node && node->get_key() == id) { - return node->update_value(std::move(newnode), adnl, self_id); + if (set_active) { + return node->receive_ping(std::move(newnode), adnl, self_id); + } else { + return node->update_value(std::move(newnode), adnl, self_id); + } } } - for (auto &node : backup_nodes_) { + for (size_t i = 0; i < backup_nodes_.size(); ++i) { + auto &node = backup_nodes_[i]; if (node && node->get_key() == id) { - return node->update_value(std::move(newnode), adnl, self_id); + if (set_active) { + TRY_STATUS(node->receive_ping(std::move(newnode), adnl, self_id)); + if (node->is_ready()) { + promote_node(i); + } + return td::Status::OK(); + } else { + return node->update_value(std::move(newnode), adnl, self_id); + } } } - TRY_RESULT_PREFIX(N, DhtRemoteNode::create(std::move(newnode), max_missed_pings_), "failed to add new node: "); - - for (auto &node : backup_nodes_) { - if (node == nullptr) { - node = std::move(N); - return td::Status::OK(); + TRY_RESULT_PREFIX(N, DhtRemoteNode::create(std::move(newnode), max_missed_pings_, our_network_id), + "failed to add new node: "); + if (set_active) { + for (auto &node : active_nodes_) { + if (node == nullptr) { + node = std::move(N); + node->receive_ping(); + return td::Status::OK(); + } } } - for (auto &node : backup_nodes_) { - CHECK(node); - if (node->ready_from() == 0 && node->failed_from() + 60 < td::Time::now_cached()) { - node = std::move(N); - return td::Status::OK(); - } + size_t idx = select_backup_node_to_drop(); + if (idx < backup_nodes_.size()) { + backup_nodes_[idx] = std::move(N); } - return td::Status::OK(); } +size_t DhtBucket::select_backup_node_to_drop() const { + size_t result = backup_nodes_.size(); + for (size_t idx = 0; idx < backup_nodes_.size(); ++idx) { + const auto &node = backup_nodes_[idx]; + if (node == nullptr) { + return idx; + } + if (node->ready_from() == 0 && node->failed_from() + 60 < td::Time::now_cached()) { + if (result == backup_nodes_.size() || node->failed_from() < backup_nodes_[result]->failed_from()) { + result = idx; + } + } + } + return result; +} + void DhtBucket::receive_ping(DhtKeyId id, DhtNode result, td::actor::ActorId adnl, adnl::AdnlNodeIdShort self_id) { for (auto &node : active_nodes_) { @@ -119,17 +147,9 @@ void DhtBucket::receive_ping(DhtKeyId id, DhtNode result, td::actor::ActorIdready_from() == 0 && node->failed_from() + 60 < td::Time::now_cached()) { - node = std::move(active_nodes_[idx]); - return; - } + size_t new_idx = select_backup_node_to_drop(); + if (new_idx < backup_nodes_.size()) { + backup_nodes_[new_idx] = std::move(active_nodes_[idx]); } active_nodes_[idx] = nullptr; } @@ -150,7 +170,7 @@ void DhtBucket::check(bool client_only, td::actor::ActorId adnl, td: size_t have_space = 0; for (size_t i = 0; i < active_nodes_.size(); i++) { auto &node = active_nodes_[i]; - if (node && td::Time::now_cached() - node->last_ping_at() > ping_timeout_) { + if (node && td::Time::now_cached() - node->last_ping_at() > node->ping_interval()) { node->send_ping(client_only, adnl, dht, src); if (node->ready_from() == 0) { demote_node(i); @@ -162,7 +182,7 @@ void DhtBucket::check(bool client_only, td::actor::ActorId adnl, td: } for (size_t i = 0; i < backup_nodes_.size(); i++) { auto &node = backup_nodes_[i]; - if (node && td::Time::now_cached() - node->last_ping_at() > ping_timeout_) { + if (node && td::Time::now_cached() - node->last_ping_at() > node->ping_interval()) { node->send_ping(client_only, adnl, dht, src); } if (node && have_space > 0 && node->is_ready()) { @@ -200,6 +220,9 @@ DhtNodesList DhtBucket::export_nodes() const { list.push_back(node->get_node()); } } + if (list.size() > k_) { + list.list().resize(k_); + } return list; } diff --git a/dht/dht-bucket.hpp b/dht/dht-bucket.hpp index 812f670d..e12fe6a4 100644 --- a/dht/dht-bucket.hpp +++ b/dht/dht-bucket.hpp @@ -31,7 +31,6 @@ class DhtMember; class DhtBucket { private: - double ping_timeout_ = 60; td::uint32 max_missed_pings_ = 3; std::vector> active_nodes_; @@ -43,6 +42,7 @@ class DhtBucket { // const DhtMember::PrintId &print_id); void demote_node(size_t idx); void promote_node(size_t idx); + size_t select_backup_node_to_drop() const; public: DhtBucket(td::uint32 k) : k_(k) { @@ -51,7 +51,7 @@ class DhtBucket { } td::uint32 active_cnt(); td::Status add_full_node(DhtKeyId id, DhtNode node, td::actor::ActorId adnl, - adnl::AdnlNodeIdShort self_id); + adnl::AdnlNodeIdShort self_id, td::int32 our_network_id, bool set_active = false); void check(bool client_only, td::actor::ActorId adnl, td::actor::ActorId node, adnl::AdnlNodeIdShort src); void receive_ping(DhtKeyId id, DhtNode result, td::actor::ActorId adnl, adnl::AdnlNodeIdShort self_id); diff --git a/dht/dht-in.hpp b/dht/dht-in.hpp index c4f67819..59ce2184 100644 --- a/dht/dht-in.hpp +++ b/dht/dht-in.hpp @@ -46,6 +46,7 @@ class DhtMemberImpl : public DhtMember { DhtKeyId key_; td::uint32 k_; td::uint32 a_; + td::int32 network_id_{-1}; td::uint32 max_cache_time_ = 60; td::uint32 max_cache_size_ = 100; @@ -66,6 +67,15 @@ class DhtMemberImpl : public DhtMember { DhtKeyId last_republish_key_ = DhtKeyId::zero(); DhtKeyId last_check_key_ = DhtKeyId::zero(); + adnl::AdnlNodeIdShort last_check_reverse_conn_ = adnl::AdnlNodeIdShort::zero(); + + struct ReverseConnection { + adnl::AdnlNodeIdShort dht_node_; + DhtKeyId key_id_; + td::Timestamp ttl_; + }; + std::map reverse_connections_; + std::set our_reverse_connections_; class Callback : public adnl::Adnl::Callback { public: @@ -122,17 +132,33 @@ class DhtMemberImpl : public DhtMember { void process_query(adnl::AdnlNodeIdShort src, ton_api::dht_store &query, td::Promise promise); void process_query(adnl::AdnlNodeIdShort src, ton_api::dht_getSignedAddressList &query, td::Promise promise); + void process_query(adnl::AdnlNodeIdShort src, ton_api::dht_registerReverseConnection &query, + td::Promise promise); + void process_query(adnl::AdnlNodeIdShort src, ton_api::dht_requestReversePing &query, + td::Promise promise); public: DhtMemberImpl(adnl::AdnlNodeIdShort id, std::string db_root, td::actor::ActorId keyring, - td::actor::ActorId adnl, td::uint32 k, td::uint32 a = 3, bool client_only = false) - : id_(id), key_{id_}, k_(k), a_(a), db_root_(db_root), keyring_(keyring), adnl_(adnl), client_only_(client_only) { + td::actor::ActorId adnl, td::int32 network_id, td::uint32 k, td::uint32 a = 3, + bool client_only = false) + : id_(id) + , key_{id_} + , k_(k) + , a_(a) + , network_id_(network_id) + , db_root_(db_root) + , keyring_(keyring) + , adnl_(adnl) + , client_only_(client_only) { for (size_t i = 0; i < 256; i++) { buckets_.emplace_back(k_); } } - void add_full_node(DhtKeyId id, DhtNode node) override; + void add_full_node(DhtKeyId id, DhtNode node) override { + add_full_node_impl(id, std::move(node)); + } + void add_full_node_impl(DhtKeyId id, DhtNode node, bool set_active = false); adnl::AdnlNodeIdShort get_id() const override { return id_; @@ -143,6 +169,12 @@ class DhtMemberImpl : public DhtMember { void set_value(DhtValue key_value, td::Promise result) override; td::uint32 distance(DhtKeyId key_id, td::uint32 max_value); + void register_reverse_connection(adnl::AdnlNodeIdFull client, td::Promise promise) override; + void request_reverse_ping(adnl::AdnlNode target, adnl::AdnlNodeIdShort client, + td::Promise promise) override; + void request_reverse_ping_cont(adnl::AdnlNode target, td::BufferSlice signature, adnl::AdnlNodeIdShort client, + td::Promise promise); + td::Status store_in(DhtValue value) override; void send_store(DhtValue value, td::Promise promise); diff --git a/dht/dht-node.cpp b/dht/dht-node.cpp index 2b366ab3..409e3f68 100644 --- a/dht/dht-node.cpp +++ b/dht/dht-node.cpp @@ -23,31 +23,46 @@ namespace ton { namespace dht { -td::Status DhtNode::update(tl_object_ptr obj) { +td::Status DhtNode::update(tl_object_ptr obj, td::int32 our_network_id) { if (version_ && obj->version_ <= version_) { return td::Status::Error(ErrorCode::notready, "too old version"); } - auto signature = std::move(obj->signature_); - auto B = serialize_tl_object(obj, true); - + td::BufferSlice signature; + td::int32 network_id = -1; + if (obj->signature_.size() == 64) { + signature = std::move(obj->signature_); + } else if (obj->signature_.size() == 64 + 4) { + signature = td::BufferSlice{obj->signature_.as_slice().remove_prefix(4)}; + network_id = *(td::uint32 *)obj->signature_.as_slice().remove_suffix(64).data(); + } else { + return td::Status::Error(ErrorCode::notready, "invalid length of signature"); + } + if (network_id != our_network_id && network_id != -1 && our_network_id != -1) { + // Remove (network_id != -1 && our_network_id != -1) after network update + return td::Status::Error(ErrorCode::notready, PSTRING() << "wrong network id (expected " << our_network_id + << ", found " << network_id << ")"); + } TRY_RESULT(pub, adnl::AdnlNodeIdFull::create(obj->id_)); TRY_RESULT(addr_list, adnl::AdnlAddressList::create(std::move(obj->addr_list_))); - if (!addr_list.public_only()) { return td::Status::Error(ErrorCode::notready, "dht node must have only public addresses"); } if (!addr_list.size()) { return td::Status::Error(ErrorCode::notready, "dht node must have >0 addresses"); } + DhtNode new_node{std::move(pub), std::move(addr_list), obj->version_, network_id, std::move(signature)}; + TRY_STATUS(new_node.check_signature()); - TRY_RESULT(E, pub.pubkey().create_encryptor()); - TRY_STATUS(E->check_signature(B.as_slice(), signature.as_slice())); - - id_ = pub; - addr_list_ = addr_list; - version_ = obj->version_; - signature_ = td::SharedSlice(signature.as_slice()); + *this = std::move(new_node); + return td::Status::OK(); +} +td::Status DhtNode::check_signature() const { + TRY_RESULT(enc, id_.pubkey().create_encryptor()); + auto node2 = clone(); + node2.signature_ = {}; + TRY_STATUS_PREFIX(enc->check_signature(serialize_tl_object(node2.tl(), true).as_slice(), signature_.as_slice()), + "bad node signature: "); return td::Status::OK(); } diff --git a/dht/dht-node.hpp b/dht/dht-node.hpp index 03f8d134..d5860f91 100644 --- a/dht/dht-node.hpp +++ b/dht/dht-node.hpp @@ -22,6 +22,8 @@ #include "adnl/adnl-address-list.hpp" #include "dht-types.h" +#include "auto/tl/ton_api.hpp" +#include "td/utils/overloaded.h" namespace ton { @@ -32,26 +34,26 @@ class DhtNode { adnl::AdnlNodeIdFull id_; adnl::AdnlAddressList addr_list_; td::int32 version_{0}; + td::int32 network_id_{-1}; td::SharedSlice signature_; public: - DhtNode() { + DhtNode() = default; + DhtNode(adnl::AdnlNodeIdFull id, adnl::AdnlAddressList addr_list, td::int32 version, td::int32 network_id, td::BufferSlice signature) + : id_(id), addr_list_(addr_list), version_(version), network_id_(network_id), signature_(signature.as_slice()) { } - DhtNode(adnl::AdnlNodeIdFull id, adnl::AdnlAddressList addr_list, td::int32 version, td::BufferSlice signature) - : id_(id), addr_list_(addr_list), version_(version), signature_(signature.as_slice()) { + DhtNode(adnl::AdnlNodeIdFull id, adnl::AdnlAddressList addr_list, td::int32 version, td::int32 network_id, td::SharedSlice signature) + : id_(id), addr_list_(addr_list), version_(version), network_id_(network_id), signature_(std::move(signature)) { } - DhtNode(adnl::AdnlNodeIdFull id, adnl::AdnlAddressList addr_list, td::int32 version, td::SharedSlice signature) - : id_(id), addr_list_(addr_list), version_(version), signature_(std::move(signature)) { - } - static td::Result create(tl_object_ptr obj) { + static td::Result create(tl_object_ptr obj, td::int32 our_network_id) { if (obj->version_ == 0) { return td::Status::Error(ErrorCode::protoviolation, "zero version"); } DhtNode n; - TRY_STATUS(n.update(std::move(obj))); + TRY_STATUS(n.update(std::move(obj), our_network_id)); return std::move(n); } - td::Status update(tl_object_ptr obj); + td::Status update(tl_object_ptr obj, td::int32 our_network_id); DhtKeyId get_key() const { CHECK(!id_.empty()); return DhtKeyId{id_.compute_short_id()}; @@ -68,20 +70,30 @@ class DhtNode { } tl_object_ptr tl() const { - return create_tl_object(id_.tl(), addr_list_.tl(), version_, signature_.clone_as_buffer_slice()); + td::BufferSlice signature_ext; + if (network_id_ == -1) { + signature_ext = signature_.clone_as_buffer_slice(); + } else { + signature_ext = td::BufferSlice{4 + signature_.size()}; + td::MutableSlice s = signature_ext.as_slice(); + s.copy_from(td::Slice(reinterpret_cast(&network_id_), 4)); + s.remove_prefix(4); + s.copy_from(signature_.as_slice()); + } + return create_tl_object(id_.tl(), addr_list_.tl(), version_, std::move(signature_ext)); } DhtNode clone() const { - return DhtNode{id_, addr_list_, version_, signature_.clone()}; + return DhtNode{id_, addr_list_, version_, network_id_, signature_.clone()}; } + td::Status check_signature() const; }; class DhtNodesList { public: - DhtNodesList() { - } - DhtNodesList(tl_object_ptr R) { + DhtNodesList() = default; + DhtNodesList(tl_object_ptr R, td::int32 our_network_id) { for (auto &n : R->nodes_) { - auto N = DhtNode::create(std::move(n)); + auto N = DhtNode::create(std::move(n), our_network_id); if (N.is_ok()) { list_.emplace_back(N.move_as_ok()); } else { diff --git a/dht/dht-query.cpp b/dht/dht-query.cpp index 7e1f1b92..bc61242d 100644 --- a/dht/dht-query.cpp +++ b/dht/dht-query.cpp @@ -20,14 +20,11 @@ #include "td/utils/tl_storers.h" #include "td/utils/crypto.h" -#include "td/utils/tl_parsers.h" #include "td/utils/Random.h" #include "td/utils/overloaded.h" #include "td/utils/format.h" -#include "keys/encryptor.h" - #include "auto/tl/ton_api.hpp" #include "dht-query.hpp" @@ -123,7 +120,7 @@ void DhtQueryFindNodes::on_result(td::Result R, adnl::AdnlNodeI VLOG(DHT_WARNING) << this << ": incorrect result on dht.findNodes query from " << dst << ": " << Res.move_as_error(); } else { - add_nodes(DhtNodesList{Res.move_as_ok()}); + add_nodes(DhtNodesList{Res.move_as_ok(), our_network_id()}); } finish_query(); } @@ -149,6 +146,23 @@ void DhtQueryFindValue::send_one_query(adnl::AdnlNodeIdShort id) { td::Timestamp::in(2.0 + td::Random::fast(0, 20) * 0.1), std::move(B)); } +void DhtQueryFindValue::send_one_query_nodes(adnl::AdnlNodeIdShort id) { + auto P = create_serialize_tl_object(get_key().tl(), get_k()); + td::BufferSlice B; + if (client_only_) { + B = std::move(P); + } else { + B = create_serialize_tl_object_suffix(P.as_slice(), self_.tl()); + } + + auto Pr = td::PromiseCreator::lambda([SelfId = actor_id(this), dst = id](td::Result R) { + td::actor::send_closure(SelfId, &DhtQueryFindValue::on_result_nodes, std::move(R), dst); + }); + + td::actor::send_closure(adnl_, &adnl::Adnl::send_query, get_src(), id, "dht findValue", std::move(Pr), + td::Timestamp::in(2.0 + td::Random::fast(0, 20) * 0.1), std::move(B)); +} + void DhtQueryFindValue::on_result(td::Result R, adnl::AdnlNodeIdShort dst) { if (R.is_error()) { VLOG(DHT_INFO) << this << ": failed find value query " << get_src() << "->" << dst << ": " << R.move_as_error(); @@ -164,44 +178,72 @@ void DhtQueryFindValue::on_result(td::Result R, adnl::AdnlNodeI } bool need_stop = false; + bool send_get_nodes = false; auto A = Res.move_as_ok(); ton_api::downcast_call( - *A.get(), td::overloaded( - [&](ton_api::dht_valueFound &v) { - auto valueR = DhtValue::create(std::move(v.value_), true); - if (valueR.is_error()) { - VLOG(DHT_WARNING) << this << ": received incorrect dht answer on find value query from " << dst - << ": " << valueR.move_as_error(); - return; - } - auto value = valueR.move_as_ok(); - if (value.key_id() != key_) { - VLOG(DHT_WARNING) << this << ": received value for bad key on find value query from " << dst; - return; - } - promise_.set_value(std::move(value)); - need_stop = true; - }, - [&](ton_api::dht_valueNotFound &v) { add_nodes(DhtNodesList{std::move(v.nodes_)}); })); + *A, td::overloaded( + [&](ton_api::dht_valueFound &v) { + auto valueR = DhtValue::create(std::move(v.value_), true); + if (valueR.is_error()) { + VLOG(DHT_WARNING) << this << ": received incorrect dht answer on find value query from " << dst + << ": " << valueR.move_as_error(); + return; + } + auto value = valueR.move_as_ok(); + if (value.key_id() != key_) { + VLOG(DHT_WARNING) << this << ": received value for bad key on find value query from " << dst; + return; + } + if (!value.check_is_acceptable()) { + send_get_nodes = true; + return; + } + promise_.set_value(std::move(value)); + need_stop = true; + }, + [&](ton_api::dht_valueNotFound &v) { + add_nodes(DhtNodesList{std::move(v.nodes_), our_network_id()}); + })); if (need_stop) { stop(); + } else if (send_get_nodes) { + send_one_query_nodes(dst); } else { finish_query(); } } +void DhtQueryFindValue::on_result_nodes(td::Result R, adnl::AdnlNodeIdShort dst) { + if (R.is_error()) { + VLOG(DHT_INFO) << this << ": failed find nodes query " << get_src() << "->" << dst << ": " << R.move_as_error(); + finish_query(); + return; + } + auto Res = fetch_tl_object(R.move_as_ok(), true); + if (Res.is_error()) { + VLOG(DHT_WARNING) << this << ": dropping incorrect answer on dht.findNodes query from " << dst << ": " + << Res.move_as_error(); + finish_query(); + return; + } + auto r = Res.move_as_ok(); + add_nodes(DhtNodesList{create_tl_object(std::move(r->nodes_)), our_network_id()}); + finish_query(); +} + void DhtQueryFindValue::finish(DhtNodesList list) { promise_.set_error(td::Status::Error(ErrorCode::notready, "dht key not found")); } DhtQueryStore::DhtQueryStore(DhtValue key_value, DhtMember::PrintId print_id, adnl::AdnlNodeIdShort src, - DhtNodesList list, td::uint32 k, td::uint32 a, DhtNode self, bool client_only, - td::actor::ActorId node, td::actor::ActorId adnl, + DhtNodesList list, td::uint32 k, td::uint32 a, td::int32 our_network_id, DhtNode self, + bool client_only, td::actor::ActorId node, td::actor::ActorId adnl, td::Promise promise) : print_id_(print_id) , k_(k) , a_(a) + , our_network_id_(our_network_id) , promise_(std::move(promise)) , value_(std::move(key_value)) , list_(std::move(list)) @@ -219,7 +261,8 @@ void DhtQueryStore::start_up() { auto key = value_.key_id(); auto A = td::actor::create_actor("FindNodesQuery", key, print_id_, src_, std::move(list_), k_, a_, - self_.clone(), client_only_, node_, adnl_, std::move(P)); + our_network_id_, self_.clone(), client_only_, node_, adnl_, + std::move(P)); A.release(); } @@ -279,6 +322,133 @@ void DhtQueryStore::store_ready(td::Result R) { } } +DhtQueryRegisterReverseConnection::DhtQueryRegisterReverseConnection( + DhtKeyId key_id, adnl::AdnlNodeIdFull client, td::uint32 ttl, td::BufferSlice signature, + DhtMember::PrintId print_id, adnl::AdnlNodeIdShort src, DhtNodesList list, td::uint32 k, td::uint32 a, + td::int32 our_network_id, DhtNode self, bool client_only, td::actor::ActorId node, + td::actor::ActorId adnl, td::Promise promise) + : print_id_(print_id) + , k_(k) + , a_(a) + , our_network_id_(our_network_id) + , promise_(std::move(promise)) + , key_id_(key_id) + , list_(std::move(list)) + , self_(std::move(self)) + , client_only_(client_only) { + node_ = node; + adnl_ = adnl; + src_ = src; + query_ = create_serialize_tl_object(client.tl(), ttl, std::move(signature)); +} + +void DhtQueryRegisterReverseConnection::start_up() { + auto P = td::PromiseCreator::lambda([SelfId = actor_id(this)](td::Result res) { + td::actor::send_closure(SelfId, &DhtQueryRegisterReverseConnection::send_queries, std::move(res)); + }); + + auto A = td::actor::create_actor("FindNodesQuery", key_id_, print_id_, src_, std::move(list_), k_, + a_, our_network_id_, self_.clone(), client_only_, node_, adnl_, + std::move(P)); + A.release(); +} + +void DhtQueryRegisterReverseConnection::send_queries(td::Result R) { + if (R.is_error()) { + auto S = R.move_as_error(); + VLOG(DHT_NOTICE) << this << ": failed to get nearest nodes to " << key_id_ << ": " << S; + promise_.set_error(std::move(S)); + stop(); + return; + } + auto list = R.move_as_ok(); + + remaining_ = static_cast(list.size()); + if (remaining_ == 0) { + VLOG(DHT_NOTICE) << this << ": failed to get nearest nodes to " << key_id_ << ": no nodes"; + promise_.set_error(td::Status::Error("no dht nodes")); + stop(); + return; + } + + for (auto &node : list.list()) { + auto P = td::PromiseCreator::lambda([SelfId = actor_id(this)](td::Result R) { + td::actor::send_closure(SelfId, &DhtQueryRegisterReverseConnection::ready, std::move(R)); + }); + td::actor::send_closure(adnl_, &adnl::Adnl::send_query, src_, node.adnl_id().compute_short_id(), "dht regrevcon", + std::move(P), td::Timestamp::in(2.0 + td::Random::fast(0, 20) * 0.1), query_.clone()); + } +} + +void DhtQueryRegisterReverseConnection::ready(td::Result R) { + if (R.is_error()) { + fail_++; + VLOG(DHT_INFO) << this << ": failed register reverse connection query: " << R.move_as_error(); + } else { + auto R2 = fetch_tl_object(R.move_as_ok(), true); + if (R2.is_error()) { + fail_++; + VLOG(DHT_WARNING) << this << ": can not parse answer (expected dht.stored): " << R2.move_as_error(); + } else { + success_++; + } + } + CHECK(remaining_ > 0); + remaining_--; + if (remaining_ == 0) { + if (success_ > 0) { + promise_.set_value(td::Unit()); + } else { + promise_.set_result(td::Status::Error("failed to make actual query")); + } + stop(); + } +} + +void DhtQueryRequestReversePing::send_one_query(adnl::AdnlNodeIdShort id) { + td::BufferSlice B; + if (client_only_) { + B = query_.clone(); + } else { + B = create_serialize_tl_object_suffix(query_.as_slice(), self_.tl()); + } + auto P = td::PromiseCreator::lambda([SelfId = actor_id(this), dst = id](td::Result R) { + td::actor::send_closure(SelfId, &DhtQueryRequestReversePing::on_result, std::move(R), dst); + }); + td::actor::send_closure(adnl_, &adnl::Adnl::send_query, get_src(), id, "dht requestReversePing", std::move(P), + td::Timestamp::in(2.0 + td::Random::fast(0, 20) * 0.1), std::move(B)); +} + +void DhtQueryRequestReversePing::on_result(td::Result R, adnl::AdnlNodeIdShort dst) { + if (R.is_error()) { + VLOG(DHT_INFO) << this << ": failed reverse ping query " << get_src() << "->" << dst << ": " << R.move_as_error(); + finish_query(); + return; + } + auto Res = fetch_tl_object(R.move_as_ok(), true); + if (Res.is_error()) { + VLOG(DHT_WARNING) << this << ": dropping incorrect answer on dht.requestReversePing query from " << dst << ": " + << Res.move_as_error(); + finish_query(); + return; + } + + auto A = Res.move_as_ok(); + ton_api::downcast_call(*A, td::overloaded( + [&](ton_api::dht_reversePingOk &v) { + promise_.set_value(td::Unit()); + stop(); + }, + [&](ton_api::dht_clientNotFound &v) { + add_nodes(DhtNodesList{std::move(v.nodes_), our_network_id()}); + finish_query(); + })); +} + +void DhtQueryRequestReversePing::finish(DhtNodesList list) { + promise_.set_error(td::Status::Error(ErrorCode::notready, "dht key not found")); +} + } // namespace dht } // namespace ton diff --git a/dht/dht-query.hpp b/dht/dht-query.hpp index aa607f56..cf085e25 100644 --- a/dht/dht-query.hpp +++ b/dht/dht-query.hpp @@ -45,7 +45,7 @@ class DhtQuery : public td::actor::Actor { public: DhtQuery(DhtKeyId key, DhtMember::PrintId print_id, adnl::AdnlNodeIdShort src, DhtNodesList list, td::uint32 k, - td::uint32 a, DhtNode self, bool client_only, td::actor::ActorId node, + td::uint32 a, td::int32 our_network_id, DhtNode self, bool client_only, td::actor::ActorId node, td::actor::ActorId adnl) : key_(key) , self_(std::move(self)) @@ -54,6 +54,7 @@ class DhtQuery : public td::actor::Actor { , src_(src) , k_(k) , a_(a) + , our_network_id_(our_network_id) , node_(node) , adnl_(adnl) { add_nodes(std::move(list)); @@ -77,6 +78,9 @@ class DhtQuery : public td::actor::Actor { td::uint32 get_k() const { return k_; } + td::int32 our_network_id() const { + return our_network_id_; + } void start_up() override { send_queries(); } @@ -91,6 +95,7 @@ class DhtQuery : public td::actor::Actor { std::set pending_ids_; td::uint32 k_; td::uint32 a_; + td::int32 our_network_id_; td::actor::ActorId node_; td::uint32 active_queries_ = 0; @@ -104,9 +109,10 @@ class DhtQueryFindNodes : public DhtQuery { public: DhtQueryFindNodes(DhtKeyId key, DhtMember::PrintId print_id, adnl::AdnlNodeIdShort src, DhtNodesList list, - td::uint32 k, td::uint32 a, DhtNode self, bool client_only, td::actor::ActorId node, - td::actor::ActorId adnl, td::Promise promise) - : DhtQuery(key, print_id, src, std::move(list), k, a, std::move(self), client_only, node, adnl) + td::uint32 k, td::uint32 a, td::int32 our_network_id, DhtNode self, bool client_only, + td::actor::ActorId node, td::actor::ActorId adnl, + td::Promise promise) + : DhtQuery(key, print_id, src, std::move(list), k, a, our_network_id, std::move(self), client_only, node, adnl) , promise_(std::move(promise)) { } void send_one_query(adnl::AdnlNodeIdShort id) override; @@ -123,13 +129,16 @@ class DhtQueryFindValue : public DhtQuery { public: DhtQueryFindValue(DhtKeyId key, DhtMember::PrintId print_id, adnl::AdnlNodeIdShort src, DhtNodesList list, - td::uint32 k, td::uint32 a, DhtNode self, bool client_only, td::actor::ActorId node, - td::actor::ActorId adnl, td::Promise promise) - : DhtQuery(key, print_id, src, std::move(list), k, a, std::move(self), client_only, node, adnl) + td::uint32 k, td::uint32 a, td::int32 our_network_id, DhtNode self, bool client_only, + td::actor::ActorId node, td::actor::ActorId adnl, + td::Promise promise) + : DhtQuery(key, print_id, src, std::move(list), k, a, our_network_id, std::move(self), client_only, node, adnl) , promise_(std::move(promise)) { } void send_one_query(adnl::AdnlNodeIdShort id) override; + void send_one_query_nodes(adnl::AdnlNodeIdShort id); void on_result(td::Result R, adnl::AdnlNodeIdShort dst); + void on_result_nodes(td::Result R, adnl::AdnlNodeIdShort dst); void finish(DhtNodesList list) override; std::string get_name() const override { return "find value"; @@ -141,6 +150,7 @@ class DhtQueryStore : public td::actor::Actor { DhtMember::PrintId print_id_; td::uint32 k_; td::uint32 a_; + td::int32 our_network_id_; td::Promise promise_; td::actor::ActorId node_; td::actor::ActorId adnl_; @@ -155,7 +165,7 @@ class DhtQueryStore : public td::actor::Actor { public: DhtQueryStore(DhtValue key_value, DhtMember::PrintId print_id, adnl::AdnlNodeIdShort src, DhtNodesList list, - td::uint32 k, td::uint32 a, DhtNode self, bool client_only, td::actor::ActorId node, + td::uint32 k, td::uint32 a, td::int32 our_network_id, DhtNode self, bool client_only, td::actor::ActorId node, td::actor::ActorId adnl, td::Promise promise); void send_stores(td::Result res); void store_ready(td::Result res); @@ -165,6 +175,64 @@ class DhtQueryStore : public td::actor::Actor { } }; +class DhtQueryRegisterReverseConnection : public td::actor::Actor { + private: + DhtMember::PrintId print_id_; + td::uint32 k_; + td::uint32 a_; + td::int32 our_network_id_; + td::Promise promise_; + td::actor::ActorId node_; + td::actor::ActorId adnl_; + adnl::AdnlNodeIdShort src_; + DhtKeyId key_id_; + td::BufferSlice query_; + td::uint32 success_ = 0; + td::uint32 fail_ = 0; + td::uint32 remaining_; + DhtNodesList list_; + DhtNode self_; + bool client_only_; + + public: + DhtQueryRegisterReverseConnection(DhtKeyId key_id, adnl::AdnlNodeIdFull client, td::uint32 ttl, + td::BufferSlice signature, DhtMember::PrintId print_id, adnl::AdnlNodeIdShort src, + DhtNodesList list, td::uint32 k, td::uint32 a, td::int32 our_network_id, + DhtNode self, bool client_only, td::actor::ActorId node, + td::actor::ActorId adnl, td::Promise promise); + void send_queries(td::Result R); + void ready(td::Result R); + void start_up() override; + DhtMember::PrintId print_id() const { + return print_id_; + } +}; + +class DhtQueryRequestReversePing : public DhtQuery { + private: + td::Promise promise_; + td::BufferSlice query_; + + public: + DhtQueryRequestReversePing(adnl::AdnlNodeIdShort client, adnl::AdnlNode target, td::BufferSlice signature, + DhtMember::PrintId print_id, adnl::AdnlNodeIdShort src, DhtNodesList list, td::uint32 k, + td::uint32 a, td::int32 our_network_id, DhtNode self, bool client_only, + td::actor::ActorId node, td::actor::ActorId adnl, + td::Promise promise) + : DhtQuery(DhtMember::get_reverse_connection_key(client).compute_key_id(), print_id, src, std::move(list), k, a, + our_network_id, std::move(self), client_only, node, adnl) + , promise_(std::move(promise)) + , query_(create_serialize_tl_object(target.tl(), std::move(signature), + client.bits256_value(), k)) { + } + void send_one_query(adnl::AdnlNodeIdShort id) override; + void on_result(td::Result R, adnl::AdnlNodeIdShort dst); + void finish(DhtNodesList list) override; + std::string get_name() const override { + return "request remote ping"; + } +}; + inline td::StringBuilder &operator<<(td::StringBuilder &sb, const DhtQuery &dht) { sb << dht.print_id(); return sb; diff --git a/dht/dht-remote-node.cpp b/dht/dht-remote-node.cpp index f1ea2197..653de256 100644 --- a/dht/dht-remote-node.cpp +++ b/dht/dht-remote-node.cpp @@ -20,13 +20,10 @@ #include "td/utils/tl_storers.h" #include "td/utils/crypto.h" -#include "td/utils/tl_parsers.h" #include "td/utils/Random.h" #include "td/utils/format.h" -#include "keys/encryptor.h" - #include "auto/tl/ton_api.hpp" #include "dht-remote-node.hpp" @@ -35,28 +32,43 @@ namespace ton { namespace dht { +static const double PING_INTERVAL_DEFAULT = 60.0; +static const double PING_INTERVAL_MULTIPLIER = 1.1; +static const double PING_INTERVAL_MAX = 3600.0 * 4; + +DhtRemoteNode::DhtRemoteNode(DhtNode node, td::uint32 max_missed_pings, td::int32 our_network_id) + : node_(std::move(node)) + , max_missed_pings_(max_missed_pings) + , our_network_id_(our_network_id) + , ping_interval_(PING_INTERVAL_DEFAULT) { + failed_from_ = td::Time::now_cached(); + id_ = node_.get_key(); +} + td::Status DhtRemoteNode::receive_ping(DhtNode node, td::actor::ActorId adnl, adnl::AdnlNodeIdShort self_id) { TRY_STATUS(update_value(std::move(node), adnl, self_id)); + receive_ping(); + return td::Status::OK(); +} + +void DhtRemoteNode::receive_ping() { missed_pings_ = 0; + ping_interval_ = PING_INTERVAL_DEFAULT; if (ready_from_ == 0) { ready_from_ = td::Time::now_cached(); } - return td::Status::OK(); } td::Status DhtRemoteNode::update_value(DhtNode node, td::actor::ActorId adnl, adnl::AdnlNodeIdShort self_id) { - CHECK(node.adnl_id() == node_.adnl_id()); + if (node.adnl_id() != node_.adnl_id()) { + return td::Status::Error("Wrong adnl id"); + } if (node.version() <= node_.version()) { return td::Status::OK(); } - - TRY_RESULT(enc, node.adnl_id().pubkey().create_encryptor()); - auto tl = node.tl(); - auto sig = std::move(tl->signature_); - TRY_STATUS_PREFIX(enc->check_signature(serialize_tl_object(tl, true).as_slice(), sig.as_slice()), - "bad node signature: "); + TRY_STATUS(node.check_signature()); node_ = std::move(node); td::actor::send_closure(adnl, &adnl::Adnl::add_peer, self_id, node_.adnl_id(), node_.addr_list()); @@ -66,22 +78,25 @@ td::Status DhtRemoteNode::update_value(DhtNode node, td::actor::ActorId adnl, td::actor::ActorId node, adnl::AdnlNodeIdShort src) { missed_pings_++; - if (missed_pings_ > max_missed_pings_ && ready_from_ > 0) { - ready_from_ = 0; - failed_from_ = td::Time::now_cached(); + if (missed_pings_ > max_missed_pings_) { + ping_interval_ = std::min(ping_interval_ * PING_INTERVAL_MULTIPLIER, PING_INTERVAL_MAX); + if (ready_from_ > 0) { + ready_from_ = 0; + failed_from_ = td::Time::now_cached(); + } } last_ping_at_ = td::Time::now_cached(); td::actor::send_closure(adnl, &adnl::Adnl::add_peer, src, node_.adnl_id(), node_.addr_list()); - auto P = td::PromiseCreator::lambda([key = id_, id = node_.adnl_id().compute_short_id(), client_only, node, src, - adnl](td::Result R) mutable { + auto P = td::PromiseCreator::lambda([key = id_, id = node_.adnl_id().compute_short_id(), client_only, node, src, adnl, + our_network_id = our_network_id_](td::Result R) mutable { if (R.is_error()) { LOG(ERROR) << "[dht]: failed to get self node"; return; } - auto P = td::PromiseCreator::lambda([key, node, adnl](td::Result R) { + auto P = td::PromiseCreator::lambda([key, node, adnl, our_network_id](td::Result R) { if (R.is_error()) { VLOG(DHT_INFO) << "[dht]: received error for query to " << key << ": " << R.move_as_error(); return; @@ -89,7 +104,7 @@ void DhtRemoteNode::send_ping(bool client_only, td::actor::ActorId a auto F = fetch_tl_object(R.move_as_ok(), true); if (F.is_ok()) { - auto N = DhtNode::create(F.move_as_ok()); + auto N = DhtNode::create(F.move_as_ok(), our_network_id); if (N.is_ok()) { td::actor::send_closure(node, &DhtMember::receive_ping, key, N.move_as_ok()); } else { @@ -123,7 +138,8 @@ adnl::AdnlNodeIdFull DhtRemoteNode::get_full_id() const { return node_.adnl_id(); } -td::Result> DhtRemoteNode::create(DhtNode node, td::uint32 max_missed_pings) { +td::Result> DhtRemoteNode::create(DhtNode node, td::uint32 max_missed_pings, + td::int32 our_network_id) { TRY_RESULT(enc, node.adnl_id().pubkey().create_encryptor()); auto tl = node.tl(); auto sig = std::move(tl->signature_); @@ -131,7 +147,7 @@ td::Result> DhtRemoteNode::create(DhtNode node, t TRY_STATUS_PREFIX(enc->check_signature(serialize_tl_object(tl, true).as_slice(), sig.as_slice()), "bad node signature: "); - return std::make_unique(std::move(node), max_missed_pings); + return std::make_unique(std::move(node), max_missed_pings, our_network_id); } } // namespace dht diff --git a/dht/dht-remote-node.hpp b/dht/dht-remote-node.hpp index e65c0429..8e7db489 100644 --- a/dht/dht-remote-node.hpp +++ b/dht/dht-remote-node.hpp @@ -40,19 +40,18 @@ class DhtRemoteNode { DhtNode node_; td::uint32 max_missed_pings_; + td::int32 our_network_id_; td::uint32 missed_pings_ = 0; double last_ping_at_ = 0; double ready_from_ = 0; double failed_from_ = 0; + double ping_interval_; td::int32 version_; public: - DhtRemoteNode(DhtNode node, td::uint32 max_missed_pings) - : node_(std::move(node)), max_missed_pings_(max_missed_pings) { - failed_from_ = td::Time::now_cached(); - id_ = node_.get_key(); - } - static td::Result> create(DhtNode node, td::uint32 max_missed_pings); + DhtRemoteNode(DhtNode node, td::uint32 max_missed_pings, td::int32 our_network_id); + static td::Result> create(DhtNode node, td::uint32 max_missed_pings, + td::int32 our_network_id); DhtNode get_node() const { return node_.clone(); } @@ -76,9 +75,13 @@ class DhtRemoteNode { double last_ping_at() const { return last_ping_at_; } + double ping_interval() const { + return ping_interval_; + } void send_ping(bool client_only, td::actor::ActorId adnl, td::actor::ActorId node, adnl::AdnlNodeIdShort src); td::Status receive_ping(DhtNode node, td::actor::ActorId adnl, adnl::AdnlNodeIdShort self_id); + void receive_ping(); td::Status update_value(DhtNode node, td::actor::ActorId adnl, adnl::AdnlNodeIdShort self_id); }; diff --git a/dht/dht-types.cpp b/dht/dht-types.cpp index 89949bc1..118df2a8 100644 --- a/dht/dht-types.cpp +++ b/dht/dht-types.cpp @@ -209,6 +209,10 @@ td::Status DhtValue::check() const { return key_.update_rule()->check_value(*this); } +bool DhtValue::check_is_acceptable() const { + return key_.update_rule()->check_is_acceptable(*this); +} + DhtKeyId DhtValue::key_id() const { return key_.key().compute_key_id(); } @@ -360,6 +364,21 @@ td::Status DhtUpdateRuleOverlayNodes::update_value(DhtValue &value, DhtValue &&n return td::Status::OK(); } +bool DhtUpdateRuleOverlayNodes::check_is_acceptable(const ton::dht::DhtValue &value) { + auto F = fetch_tl_object(value.value().clone_as_buffer_slice(), true); + if (F.is_error()) { + return false; + } + auto L = F.move_as_ok(); + auto now = td::Clocks::system(); + for (auto &node : L->nodes_) { + if (node->version_ + 600 > now) { + return true; + } + } + return false; +} + tl_object_ptr DhtUpdateRuleOverlayNodes::tl() const { return create_tl_object(); } diff --git a/dht/dht-types.h b/dht/dht-types.h index 75efee55..45657d45 100644 --- a/dht/dht-types.h +++ b/dht/dht-types.h @@ -119,6 +119,9 @@ class DhtUpdateRule { virtual td::Status check_value(const DhtValue &value) = 0; virtual td::Status update_value(DhtValue &value, DhtValue &&new_value) = 0; virtual bool need_republish() const = 0; + virtual bool check_is_acceptable(const DhtValue &value) { + return true; + } virtual tl_object_ptr tl() const = 0; static td::Result> create(tl_object_ptr obj); }; @@ -210,6 +213,7 @@ class DhtValue { void update_signature(td::BufferSlice signature); void update_signature(td::SharedSlice signature); td::Status check() const; + bool check_is_acceptable() const; DhtKeyId key_id() const; @@ -249,6 +253,7 @@ class DhtUpdateRuleOverlayNodes : public DhtUpdateRule { bool need_republish() const override { return false; } + bool check_is_acceptable(const DhtValue &value) override; tl_object_ptr tl() const override; static td::Result> create(); }; diff --git a/dht/dht.cpp b/dht/dht.cpp index 0d441427..e1e20d45 100644 --- a/dht/dht.cpp +++ b/dht/dht.cpp @@ -20,7 +20,6 @@ #include "td/utils/tl_storers.h" #include "td/utils/crypto.h" -#include "td/utils/tl_parsers.h" #include "td/utils/Random.h" #include "td/utils/base64.h" @@ -28,9 +27,6 @@ #include "td/db/RocksDb.h" -#include "keys/encryptor.h" -#include "adnl/utils.hpp" - #include "auto/tl/ton_api.hpp" #include "dht.h" @@ -44,10 +40,9 @@ namespace dht { td::actor::ActorOwn DhtMember::create(adnl::AdnlNodeIdShort id, std::string db_root, td::actor::ActorId keyring, - td::actor::ActorId adnl, td::uint32 k, td::uint32 a, - bool client_only) { - return td::actor::ActorOwn( - td::actor::create_actor("dht", id, db_root, keyring, adnl, k, a, client_only)); + td::actor::ActorId adnl, td::int32 network_id, + td::uint32 k, td::uint32 a, bool client_only) { + return td::actor::create_actor("dht", id, db_root, keyring, adnl, network_id, k, a, client_only); } td::Result> Dht::create(adnl::AdnlNodeIdShort id, std::string db_root, @@ -57,7 +52,7 @@ td::Result> Dht::create(adnl::AdnlNodeIdShort id, std:: CHECK(conf->get_k() > 0); CHECK(conf->get_a() > 0); - auto D = DhtMember::create(id, db_root, keyring, adnl, conf->get_k(), conf->get_a()); + auto D = DhtMember::create(id, db_root, keyring, adnl, conf->get_network_id(), conf->get_k(), conf->get_a()); auto &nodes = conf->nodes(); for (auto &node : nodes.list()) { @@ -74,7 +69,7 @@ td::Result> Dht::create_client(adnl::AdnlNodeIdShort id CHECK(conf->get_k() > 0); CHECK(conf->get_a() > 0); - auto D = DhtMember::create(id, db_root, keyring, adnl, conf->get_k(), conf->get_a(), true); + auto D = DhtMember::create(id, db_root, keyring, adnl, conf->get_network_id(), conf->get_k(), conf->get_a(), true); auto &nodes = conf->nodes(); for (auto &node : nodes.list()) { @@ -90,8 +85,11 @@ void DhtMemberImpl::start_up() { ton_api::dht_findValue::ID, ton_api::dht_store::ID, ton_api::dht_ping::ID, + ton_api::dht_registerReverseConnection::ID, + ton_api::dht_requestReversePing::ID, ton_api::dht_query::ID, - ton_api::dht_message::ID}; + ton_api::dht_message::ID, + ton_api::dht_requestReversePingCont::ID}; for (auto it : methods) { td::actor::send_closure(adnl_, &adnl::Adnl::subscribe, id_, adnl::Adnl::int_to_bytestring(it), @@ -112,12 +110,12 @@ void DhtMemberImpl::start_up() { V.ensure(); auto nodes = std::move(V.move_as_ok()->nodes_); auto s = nodes->nodes_.size(); - DhtNodesList list{std::move(nodes)}; - CHECK(list.size() == s); + DhtNodesList list{std::move(nodes), network_id_}; + CHECK(list.size() <= s); // Some nodes can be dropped due to a wrong network id auto &B = buckets_[bit]; for (auto &node : list.list()) { auto key = node.get_key(); - B.add_full_node(key, std::move(node), adnl_, id_); + B.add_full_node(key, std::move(node), adnl_, id_, network_id_); } } } @@ -131,8 +129,11 @@ void DhtMemberImpl::tear_down() { ton_api::dht_findValue::ID, ton_api::dht_store::ID, ton_api::dht_ping::ID, + ton_api::dht_registerReverseConnection::ID, + ton_api::dht_requestReversePing::ID, ton_api::dht_query::ID, - ton_api::dht_message::ID}; + ton_api::dht_message::ID, + ton_api::dht_requestReversePingCont::ID}; for (auto it : methods) { td::actor::send_closure(adnl_, &adnl::Adnl::unsubscribe, id_, adnl::Adnl::int_to_bytestring(it)); @@ -299,6 +300,61 @@ void DhtMemberImpl::process_query(adnl::AdnlNodeIdShort src, ton_api::dht_getSig get_self_node(std::move(P)); } +static td::BufferSlice register_reverse_connection_to_sign(adnl::AdnlNodeIdShort client, adnl::AdnlNodeIdShort dht_id, + td::uint32 ttl) { + td::BufferSlice result(32 + 32 + 4); + td::MutableSlice s = result.as_slice(); + s.copy_from(client.as_slice()); + s.remove_prefix(32); + s.copy_from(dht_id.as_slice()); + s.remove_prefix(32); + s.copy_from(std::string(reinterpret_cast(&ttl), 4)); + return result; +} + +void DhtMemberImpl::process_query(adnl::AdnlNodeIdShort src, ton_api::dht_registerReverseConnection &query, + td::Promise promise) { + td::uint32 ttl = query.ttl_, now = (td::uint32)td::Clocks::system(); + if (ttl <= now) { + return; + } + PublicKey pub{query.node_}; + adnl::AdnlNodeIdShort client_id{pub.compute_short_id()}; + td::BufferSlice to_sign = register_reverse_connection_to_sign(client_id, src, ttl); + TRY_RESULT_PROMISE(promise, encryptor, pub.create_encryptor()); + TRY_STATUS_PROMISE(promise, encryptor->check_signature(to_sign, query.signature_)); + DhtKeyId key_id = get_reverse_connection_key(client_id).compute_key_id(); + reverse_connections_[client_id] = ReverseConnection{src, key_id, td::Timestamp::at_unix(std::min(ttl, now + 300))}; + promise.set_value(create_serialize_tl_object()); +} + +void DhtMemberImpl::process_query(adnl::AdnlNodeIdShort src, ton_api::dht_requestReversePing &query, + td::Promise promise) { + adnl::AdnlNodeIdShort client{query.client_}; + auto it = reverse_connections_.find(client); + if (it != reverse_connections_.end()) { + if (it->second.ttl_.is_in_past()) { + reverse_connections_.erase(it); + } else { + PublicKey pub{query.target_->id_}; + TRY_RESULT_PROMISE(promise, encryptor, pub.create_encryptor()); + TRY_STATUS_PROMISE(promise, + encryptor->check_signature(serialize_tl_object(query.target_, true), query.signature_)); + td::actor::send_closure(adnl_, &adnl::Adnl::send_message, id_, it->second.dht_node_, + create_serialize_tl_object( + std::move(query.target_), std::move(query.signature_), query.client_)); + promise.set_result(create_serialize_tl_object()); + return; + } + } + auto k = static_cast(query.k_); + if (k > max_k()) { + k = max_k(); + } + auto R = get_nearest_nodes(get_reverse_connection_key(client).compute_key_id(), k); + promise.set_value(create_serialize_tl_object(R.tl())); +} + void DhtMemberImpl::receive_query(adnl::AdnlNodeIdShort src, td::BufferSlice data, td::Promise promise) { if (client_only_) { @@ -307,11 +363,15 @@ void DhtMemberImpl::receive_query(adnl::AdnlNodeIdShort src, td::BufferSlice dat { auto R = fetch_tl_prefix(data, true); if (R.is_ok()) { - auto N = DhtNode::create(std::move(R.move_as_ok()->node_)); + auto N = DhtNode::create(std::move(R.move_as_ok()->node_), network_id_); if (N.is_ok()) { auto node = N.move_as_ok(); - auto key = node.get_key(); - add_full_node(key, std::move(node)); + if (node.adnl_id().compute_short_id() == src) { + auto key = node.get_key(); + add_full_node_impl(key, std::move(node), true); + } else { + VLOG(DHT_WARNING) << this << ": dropping bad node: unexpected adnl id"; + } } else { VLOG(DHT_WARNING) << this << ": dropping bad node " << N.move_as_error(); } @@ -335,10 +395,10 @@ void DhtMemberImpl::receive_query(adnl::AdnlNodeIdShort src, td::BufferSlice dat VLOG(DHT_EXTRA_DEBUG) << this << ": query to DHT from " << src << ": " << ton_api::to_string(Q); - ton_api::downcast_call(*Q.get(), [&](auto &object) { this->process_query(src, object, std::move(promise)); }); + ton_api::downcast_call(*Q, [&](auto &object) { this->process_query(src, object, std::move(promise)); }); } -void DhtMemberImpl::add_full_node(DhtKeyId key, DhtNode node) { +void DhtMemberImpl::add_full_node_impl(DhtKeyId key, DhtNode node, bool set_active) { VLOG(DHT_EXTRA_DEBUG) << this << ": adding full node " << key; auto eid = key ^ key_; @@ -350,7 +410,7 @@ void DhtMemberImpl::add_full_node(DhtKeyId key, DhtNode node) { #endif if (bit < 256) { CHECK(key.get_bit(bit) != key_.get_bit(bit)); - buckets_[bit].add_full_node(key, std::move(node), adnl_, id_); + buckets_[bit].add_full_node(key, std::move(node), adnl_, id_, network_id_, set_active); } else { CHECK(key == key_); } @@ -369,6 +429,27 @@ void DhtMemberImpl::receive_ping(DhtKeyId key, DhtNode result) { } void DhtMemberImpl::receive_message(adnl::AdnlNodeIdShort src, td::BufferSlice data) { + auto F = fetch_tl_object(data, true); + if (F.is_ok()) { + auto S = [&]() -> td::Status { + auto f = F.move_as_ok(); + adnl::AdnlNodeIdShort client{f->client_}; + if (!our_reverse_connections_.count(client)) { + return td::Status::Error(PSTRING() << ": unknown id for reverse ping: " << client); + } + TRY_RESULT_PREFIX(node, adnl::AdnlNode::create(f->target_), "failed to parse node: "); + TRY_RESULT_PREFIX(encryptor, node.pub_id().pubkey().create_encryptor(), "failed to create encryptor: "); + TRY_STATUS_PREFIX(encryptor->check_signature(serialize_tl_object(f->target_, true), f->signature_), + "invalid signature: "); + VLOG(DHT_INFO) << this << ": sending reverse ping to " << node.compute_short_id(); + td::actor::send_closure(adnl_, &adnl::Adnl::add_peer, client, node.pub_id(), node.addr_list()); + td::actor::send_closure(adnl_, &adnl::Adnl::send_message, client, node.compute_short_id(), td::BufferSlice()); + return td::Status::OK(); + }(); + if (S.is_error()) { + VLOG(DHT_INFO) << this << ": " << S; + } + } } void DhtMemberImpl::set_value(DhtValue value, td::Promise promise) { @@ -385,10 +466,11 @@ void DhtMemberImpl::set_value(DhtValue value, td::Promise promise) { void DhtMemberImpl::get_value_in(DhtKeyId key, td::Promise result) { auto P = td::PromiseCreator::lambda([key, promise = std::move(result), SelfId = actor_id(this), print_id = print_id(), - adnl = adnl_, list = get_nearest_nodes(key, k_), k = k_, a = a_, id = id_, + adnl = adnl_, list = get_nearest_nodes(key, k_), k = k_, a = a_, + network_id = network_id_, id = id_, client_only = client_only_](td::Result R) mutable { R.ensure(); - td::actor::create_actor("FindValueQuery", key, print_id, id, std::move(list), k, a, + td::actor::create_actor("FindValueQuery", key, print_id, id, std::move(list), k, a, network_id, R.move_as_ok(), client_only, SelfId, adnl, std::move(promise)) .release(); }); @@ -396,6 +478,70 @@ void DhtMemberImpl::get_value_in(DhtKeyId key, td::Promise result) { get_self_node(std::move(P)); } +void DhtMemberImpl::register_reverse_connection(adnl::AdnlNodeIdFull client, td::Promise promise) { + auto client_short = client.compute_short_id(); + td::uint32 ttl = (td::uint32)td::Clocks::system() + 300; + our_reverse_connections_.insert(client_short); + auto key_id = get_reverse_connection_key(client_short).compute_key_id(); + td::actor::send_closure(keyring_, &keyring::Keyring::sign_message, client_short.pubkey_hash(), + register_reverse_connection_to_sign(client_short, id_, ttl), + [=, print_id = print_id(), list = get_nearest_nodes(key_id, k_), SelfId = actor_id(this), + promise = std::move(promise)](td::Result R) mutable { + TRY_RESULT_PROMISE_PREFIX(promise, signature, std::move(R), "Failed to sign: "); + td::actor::send_closure(SelfId, &DhtMemberImpl::get_self_node, + [=, list = std::move(list), signature = std::move(signature), + promise = std::move(promise)](td::Result R) mutable { + R.ensure(); + td::actor::create_actor( + "RegisterReverseQuery", key_id, std::move(client), ttl, + std::move(signature), print_id, id_, std::move(list), k_, a_, + network_id_, R.move_as_ok(), client_only_, SelfId, adnl_, + std::move(promise)) + .release(); + }); + }); +} + +void DhtMemberImpl::request_reverse_ping(adnl::AdnlNode target, adnl::AdnlNodeIdShort client, + td::Promise promise) { + auto pubkey_hash = target.compute_short_id().pubkey_hash(); + td::BufferSlice to_sign = serialize_tl_object(target.tl(), true); + td::actor::send_closure(keyring_, &keyring::Keyring::sign_message, pubkey_hash, std::move(to_sign), + [SelfId = actor_id(this), promise = std::move(promise), target = std::move(target), + client](td::Result R) mutable { + TRY_RESULT_PROMISE(promise, signature, std::move(R)); + td::actor::send_closure(SelfId, &DhtMemberImpl::request_reverse_ping_cont, + std::move(target), std::move(signature), client, + std::move(promise)); + }); +} + +void DhtMemberImpl::request_reverse_ping_cont(adnl::AdnlNode target, td::BufferSlice signature, + adnl::AdnlNodeIdShort client, td::Promise promise) { + auto it = reverse_connections_.find(client); + if (it != reverse_connections_.end()) { + if (it->second.ttl_.is_in_past()) { + reverse_connections_.erase(it); + } else { + td::actor::send_closure(adnl_, &adnl::Adnl::send_message, id_, it->second.dht_node_, + create_serialize_tl_object( + target.tl(), std::move(signature), client.bits256_value())); + promise.set_result(td::Unit()); + return; + } + } + auto key_id = get_reverse_connection_key(client).compute_key_id(); + get_self_node([=, target = std::move(target), signature = std::move(signature), promise = std::move(promise), + SelfId = actor_id(this), print_id = print_id(), list = get_nearest_nodes(key_id, k_), + client_only = client_only_](td::Result R) mutable { + R.ensure(); + td::actor::create_actor( + "RequestReversePing", client, std::move(target), std::move(signature), print_id, id_, std::move(list), k_, a_, + network_id_, R.move_as_ok(), client_only, SelfId, adnl_, std::move(promise)) + .release(); + }); +} + void DhtMemberImpl::check() { VLOG(DHT_INFO) << this << ": ping=" << ping_queries_ << " fnode=" << find_node_queries_ << " fvalue=" << find_value_queries_ << " store=" << store_queries_ @@ -454,6 +600,16 @@ void DhtMemberImpl::check() { } } } + if (reverse_connections_.size() > 0) { + auto it = reverse_connections_.upper_bound(last_check_reverse_conn_); + if (it == reverse_connections_.end()) { + it = reverse_connections_.begin(); + } + last_check_reverse_conn_ = it->first; + if (it->second.ttl_.is_in_past()) { + reverse_connections_.erase(it); + } + } if (republish_att_.is_in_past()) { auto it = our_values_.lower_bound(last_republish_key_); @@ -496,9 +652,10 @@ void DhtMemberImpl::check() { DhtKeyId key{x}; auto P = td::PromiseCreator::lambda([key, promise = std::move(promise), SelfId = actor_id(this), print_id = print_id(), adnl = adnl_, list = get_nearest_nodes(key, k_), k = k_, - a = a_, id = id_, client_only = client_only_](td::Result R) mutable { + a = a_, network_id = network_id_, id = id_, + client_only = client_only_](td::Result R) mutable { R.ensure(); - td::actor::create_actor("FindNodesQuery", key, print_id, id, std::move(list), k, a, + td::actor::create_actor("FindNodesQuery", key, print_id, id, std::move(list), k, a, network_id, R.move_as_ok(), client_only, SelfId, adnl, std::move(promise)) .release(); }); @@ -519,63 +676,71 @@ void DhtMemberImpl::send_store(DhtValue value, td::Promise promise) { value.check().ensure(); auto key_id = value.key_id(); - auto P = - td::PromiseCreator::lambda([value = std::move(value), print_id = print_id(), id = id_, client_only = client_only_, - list = get_nearest_nodes(key_id, k_), k = k_, a = a_, SelfId = actor_id(this), - adnl = adnl_, promise = std::move(promise)](td::Result R) mutable { - R.ensure(); - td::actor::create_actor("StoreQuery", std::move(value), print_id, id, std::move(list), k, a, - R.move_as_ok(), client_only, SelfId, adnl, std::move(promise)) - .release(); - }); + auto P = td::PromiseCreator::lambda([value = std::move(value), print_id = print_id(), id = id_, + client_only = client_only_, list = get_nearest_nodes(key_id, k_), k = k_, a = a_, + network_id = network_id_, SelfId = actor_id(this), adnl = adnl_, + promise = std::move(promise)](td::Result R) mutable { + R.ensure(); + td::actor::create_actor("StoreQuery", std::move(value), print_id, id, std::move(list), k, a, + network_id, R.move_as_ok(), client_only, SelfId, adnl, std::move(promise)) + .release(); + }); get_self_node(std::move(P)); } void DhtMemberImpl::get_self_node(td::Promise promise) { - auto P = - td::PromiseCreator::lambda([promise = std::move(promise), print_id = print_id(), id = id_, keyring = keyring_, - client_only = client_only_](td::Result R) mutable { - R.ensure(); - auto node = R.move_as_ok(); - auto version = static_cast(td::Clocks::system()); - auto B = create_serialize_tl_object(node.pub_id().tl(), node.addr_list().tl(), version, - td::BufferSlice()); - if (!client_only) { - CHECK(node.addr_list().size() > 0); - } - auto P = td::PromiseCreator::lambda( - [promise = std::move(promise), node = std::move(node), version](td::Result R) mutable { - R.ensure(); - DhtNode n{node.pub_id(), node.addr_list(), version, R.move_as_ok()}; - promise.set_result(std::move(n)); - }); - td::actor::send_closure(keyring, &keyring::Keyring::sign_message, id.pubkey_hash(), std::move(B), std::move(P)); - }); + auto P = td::PromiseCreator::lambda([promise = std::move(promise), print_id = print_id(), id = id_, + keyring = keyring_, client_only = client_only_, + network_id = network_id_](td::Result R) mutable { + R.ensure(); + auto node = R.move_as_ok(); + auto version = static_cast(td::Clocks::system()); + td::BufferSlice B = serialize_tl_object( + DhtNode{node.pub_id(), node.addr_list(), version, network_id, td::BufferSlice{}}.tl(), true); + if (!client_only) { + CHECK(node.addr_list().size() > 0); + } + auto P = td::PromiseCreator::lambda([promise = std::move(promise), node = std::move(node), version, + network_id](td::Result R) mutable { + R.ensure(); + DhtNode n{node.pub_id(), node.addr_list(), version, network_id, R.move_as_ok()}; + promise.set_result(std::move(n)); + }); + td::actor::send_closure(keyring, &keyring::Keyring::sign_message, id.pubkey_hash(), std::move(B), std::move(P)); + }); td::actor::send_closure(adnl_, &adnl::Adnl::get_self_node, id_, std::move(P)); } -td::Result> Dht::create_global_config(tl_object_ptr conf) { - td::uint32 k; - if (conf->k_ == 0) { +td::Result> Dht::create_global_config(tl_object_ptr conf) { + td::uint32 k = 0, a = 0; + td::int32 network_id = -1; + tl_object_ptr static_nodes; + ton_api::downcast_call(*conf, td::overloaded( + [&](ton_api::dht_config_global &f) { + k = f.k_; + a = f.a_; + network_id = -1; + static_nodes = std::move(f.static_nodes_); + }, + [&](ton_api::dht_config_global_v2 &f) { + k = f.k_; + a = f.a_; + network_id = f.network_id_; + static_nodes = std::move(f.static_nodes_); + })); + if (k == 0) { k = DhtMember::default_k(); - } else if (conf->k_ > 0 && static_cast(conf->k_) <= DhtMember::max_k()) { - k = conf->k_; - } else { - return td::Status::Error(ErrorCode::protoviolation, PSTRING() << "bad value k=" << conf->k_); + } else if (k > DhtMember::max_k()) { + return td::Status::Error(ErrorCode::protoviolation, PSTRING() << "bad value k=" << k); } - td::uint32 a; - if (conf->a_ == 0) { + if (a == 0) { a = DhtMember::default_a(); - } else if (conf->a_ > 0 && static_cast(conf->a_) <= DhtMember::max_a()) { - a = conf->a_; - } else { - return td::Status::Error(ErrorCode::protoviolation, PSTRING() << "bad value a=" << conf->a_); + } else if (a > DhtMember::max_a()) { + return td::Status::Error(ErrorCode::protoviolation, PSTRING() << "bad value a=" << a); } - - DhtNodesList l{std::move(conf->static_nodes_)}; - - return std::make_shared(k, a, std::move(l)); + DhtNodesList l{std::move(static_nodes), network_id}; + return std::make_shared(k, a, network_id, std::move(l)); } } // namespace dht diff --git a/dht/dht.h b/dht/dht.h index eacb2e4b..b9c65c8a 100644 --- a/dht/dht.h +++ b/dht/dht.h @@ -47,13 +47,17 @@ class Dht : public td::actor::Actor { td::actor::ActorId keyring, td::actor::ActorId adnl); static td::Result> create_global_config( - tl_object_ptr conf); + tl_object_ptr conf); virtual adnl::AdnlNodeIdShort get_id() const = 0; virtual void set_value(DhtValue key_value, td::Promise result) = 0; virtual void get_value(DhtKey key, td::Promise result) = 0; + virtual void register_reverse_connection(adnl::AdnlNodeIdFull client, td::Promise promise) = 0; + virtual void request_reverse_ping(adnl::AdnlNode target, adnl::AdnlNodeIdShort client, + td::Promise promise) = 0; + virtual void dump(td::StringBuilder &sb) const = 0; virtual ~Dht() = default; diff --git a/dht/dht.hpp b/dht/dht.hpp index b8d73c8e..0b46d635 100644 --- a/dht/dht.hpp +++ b/dht/dht.hpp @@ -52,15 +52,20 @@ class DhtGlobalConfig { auto get_a() const { return a_; } + auto get_network_id() const { + return network_id_; + } const auto &nodes() const { return static_nodes_; } - DhtGlobalConfig(td::uint32 k, td::uint32 a, DhtNodesList nodes) : k_(k), a_(a), static_nodes_(std::move(nodes)) { + DhtGlobalConfig(td::uint32 k, td::uint32 a, td::int32 network_id, DhtNodesList nodes) + : k_(k), a_(a), network_id_(network_id), static_nodes_(std::move(nodes)) { } private: td::uint32 k_; td::uint32 a_; + td::int32 network_id_; DhtNodesList static_nodes_; }; @@ -85,8 +90,8 @@ class DhtMember : public Dht { static td::actor::ActorOwn create(adnl::AdnlNodeIdShort id, std::string db_root, td::actor::ActorId keyring, - td::actor::ActorId adnl, td::uint32 k = 10, td::uint32 a = 3, - bool client_only = false); + td::actor::ActorId adnl, td::int32 network_id, + td::uint32 k = 10, td::uint32 a = 3, bool client_only = false); //virtual void update_addr_list(tl_object_ptr addr_list) = 0; //virtual void add_node(adnl::AdnlNodeIdShort id) = 0; @@ -101,6 +106,10 @@ class DhtMember : public Dht { virtual void get_self_node(td::Promise promise) = 0; virtual PrintId print_id() const = 0; + + static DhtKey get_reverse_connection_key(adnl::AdnlNodeIdShort node) { + return DhtKey{node.pubkey_hash(), "address", 0}; + } }; inline td::StringBuilder &operator<<(td::StringBuilder &sb, const DhtMember::PrintId &id) { diff --git a/dht/test/dht-test-ping.cpp b/dht/test/dht-test-ping.cpp deleted file mode 100644 index f2756b47..00000000 --- a/dht/test/dht-test-ping.cpp +++ /dev/null @@ -1,303 +0,0 @@ -/* - This file is part of TON Blockchain source code. - - TON Blockchain is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License - as published by the Free Software Foundation; either version 2 - of the License, or (at your option) any later version. - - TON Blockchain is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with TON Blockchain. If not, see . - - In addition, as a special exception, the copyright holders give permission - to link the code of portions of this program with the OpenSSL library. - You must obey the GNU General Public License in all respects for all - of the code used other than OpenSSL. If you modify file(s) with this - exception, you may extend this exception to your version of the file(s), - but you are not obligated to do so. If you do not wish to do so, delete this - exception statement from your version. If you delete this exception statement - from all source files in the program, then also delete it here. - - Copyright 2017-2020 Telegram Systems LLP -*/ -#include "adnl/adnl-network-manager.h" -#include "adnl/adnl-peer-table.h" -#include "adnl/utils.hpp" -#include "keys/encryptor.h" -#include "td/utils/Time.h" -#include "td/utils/format.h" -#include "td/utils/OptionParser.h" -#include "td/utils/filesystem.h" -#include "dht/dht.h" -#include "auto/tl/ton_api_json.h" - -#include -#include - -template -std::ostream &operator<<(std::ostream &stream, const td::UInt &x) { - for (size_t i = 0; i < size / 8; i++) { - stream << td::format::hex_digit((x.raw[i] >> 4) & 15) << td::format::hex_digit(x.raw[i] & 15); - } - - return stream; -} - -class adnl::AdnlNode : public td::actor::Actor { - private: - std::vector ping_ids_; - - td::actor::ActorOwn network_manager_; - td::actor::ActorOwn peer_table_; - td::actor::ActorOwn dht_node_; - - td::UInt256 local_id_; - bool local_id_set_ = false; - - std::string host_ = "127.0.0.1"; - td::uint32 ip_ = 0x7f000001; - td::uint16 port_ = 2380; - - std::string local_config_ = "ton-local.config"; - std::string global_config_ = "ton-global.config"; - - void receive_message(td::UInt256 src, td::UInt256 dst, td::BufferSlice data) { - std::cout << "MESSAGE FROM " << src << " to " << dst << " of size " << std::to_string(data.size()) << "\n"; - } - - void receive_query(td::UInt256 src, td::UInt256 dst, td::uint64 query_id, td::BufferSlice data) { - std::cout << "QUERY " << std::to_string(query_id) << " FROM " << src << " to " << dst << " of size " - << std::to_string(data.size()) << "\n"; - td::actor::send_closure(peer_table_, &ton::adnl::AdnlPeerTable::answer_query, dst, src, query_id, - ton::create_tl_object()); - } - - std::unique_ptr make_callback() { - class Callback : public ton::adnl::AdnlPeerTable::Callback { - public: - void receive_message(td::UInt256 src, td::UInt256 dst, td::BufferSlice data) override { - td::actor::send_closure(id_, &adnl::AdnlNode::receive_message, src, dst, std::move(data)); - } - void receive_query(td::UInt256 src, td::UInt256 dst, td::uint64 query_id, td::BufferSlice data) override { - td::actor::send_closure(id_, &adnl::AdnlNode::receive_query, src, dst, query_id, std::move(data)); - } - Callback(td::actor::ActorId id) : id_(std::move(id)) { - } - - private: - td::actor::ActorId id_; - }; - - return std::make_unique(td::actor::actor_id(this)); - } - - public: - void set_local_config(std::string str) { - local_config_ = str; - } - void set_global_config(std::string str) { - global_config_ = str; - } - void start_up() override { - alarm_timestamp() = td::Timestamp::in(1); - } - adnl::AdnlNode() { - network_manager_ = ton::adnl::AdnlNetworkManager::create(); - peer_table_ = ton::adnl::AdnlPeerTable::create(); - td::actor::send_closure(network_manager_, &ton::adnl::AdnlNetworkManager::register_peer_table, peer_table_.get()); - td::actor::send_closure(peer_table_, &ton::adnl::AdnlPeerTable::register_network_manager, network_manager_.get()); - } - void listen_udp(td::uint16 port) { - td::actor::send_closure(network_manager_, &ton::adnl::AdnlNetworkManager::add_listening_udp_port, "0.0.0.0", port); - port_ = port; - } - void run() { - auto L = td::read_file(local_config_); - if (L.is_error()) { - LOG(FATAL) << "can not read local config: " << L.move_as_error(); - } - auto L2 = td::json_decode(L.move_as_ok().as_slice()); - if (L2.is_error()) { - LOG(FATAL) << "can not parse local config: " << L2.move_as_error(); - } - auto lc_j = L2.move_as_ok(); - if (lc_j.type() != td::JsonValue::Type::Object) { - LOG(FATAL) << "can not parse local config: expected json object"; - } - - ton::ton_api::config_local lc; - auto rl = ton::ton_api::from_json(lc, lc_j.get_object()); - if (rl.is_error()) { - LOG(FATAL) << "can not interpret local config: " << rl.move_as_error(); - } - - auto G = td::read_file(global_config_); - if (G.is_error()) { - LOG(FATAL) << "can not read global config: " << G.move_as_error(); - } - auto G2 = td::json_decode(G.move_as_ok().as_slice()); - if (G2.is_error()) { - LOG(FATAL) << "can not parse global config: " << G2.move_as_error(); - } - auto gc_j = G2.move_as_ok(); - if (gc_j.type() != td::JsonValue::Type::Object) { - LOG(FATAL) << "can not parse global config: expected json object"; - } - - ton::ton_api::config_global gc; - auto rg = ton::ton_api::from_json(gc, gc_j.get_object()); - if (rg.is_error()) { - LOG(FATAL) << "can not interpret local config: " << rg.move_as_error(); - } - - if (gc.adnl_) { - auto it = gc.adnl_->static_nodes_.begin(); - while (it != gc.adnl_->static_nodes_.end()) { - auto R = ton::adnl_validate_full_id(std::move((*it)->id_)); - if (R.is_error()) { - LOG(FATAL) << "can not apply global config: " << R.move_as_error(); - } - auto R2 = ton::adnl_validate_addr_list(std::move((*it)->addr_list_)); - if (R2.is_error()) { - LOG(FATAL) << "can not apply global config: " << R2.move_as_error(); - } - td::actor::send_closure(peer_table_, &ton::adnl::AdnlPeerTable::add_peer, R.move_as_ok(), R2.move_as_ok()); - it++; - } - } - - if (!gc.dht_) { - LOG(FATAL) << "global config does not contain dht section"; - } - if (lc.dht_.size() != 1) { - LOG(FATAL) << "local config must contain exactly one dht section"; - } - - auto R = ton::DhtNode::create_from_json(std::move(gc.dht_), std::move(lc.dht_[0]), peer_table_.get()); - if (R.is_error()) { - LOG(FATAL) << "fail creating dht node: " << R.move_as_error(); - } - - dht_node_ = R.move_as_ok(); - } - /* - void set_host(td::IPAddress ip, std::string host) { - ip_ = ip.get_ipv4(); - host_ = host; - } - void send_pings_to(td::UInt256 id) { - std::cout << "send pings to " << id << "\n"; - ping_ids_.push_back(id); - } - void add_local_id(ton::tl_object_ptr pk_) { - auto pub_ = ton::get_public_key(pk_); - local_id_ = ton::adnl_short_id(pub_); - std::cout << "local_id = '" << local_id_ << "'\n"; - auto x = ton::create_tl_object(ip_, port_); - auto v = std::vector>(); - v.push_back(ton::move_tl_object_as(x)); - auto y = - ton::create_tl_object(std::move(v), static_cast(td::Time::now())); - - LOG(INFO) << "local_addr_list: " << ton::ton_api::to_string(y); - td::actor::send_closure(peer_table_, &ton::adnl::AdnlPeerTable::add_id, ton::clone_tl_object(pk_), - ton::clone_tl_object(y)); - td::actor::send_closure(peer_table_, &ton::adnl::AdnlPeerTable::subscribe_custom, local_id_, "TEST", make_callback()); - local_id_set_ = true; - - dht_node_ = ton::DhtNode::create(std::move(pk_), peer_table_.get()); - td::actor::send_closure(dht_node_, &ton::DhtNode::update_addr_list, std::move(y)); - } - - void add_static_dht_node(ton::tl_object_ptr id, - ton::tl_object_ptr addr_list, - td::BufferSlice signature) { - auto Id = ton::adnl_short_id(id); - td::actor::send_closure( - dht_node_, &ton::DhtNode::add_full_node, Id, - ton::create_tl_object(std::move(id), std::move(addr_list), signature.as_slice().str())); - } - - void add_foreign(ton::tl_object_ptr id, - ton::tl_object_ptr addr_list) { - std::cout << ton::adnl_short_id(id) << "\n"; - td::actor::send_closure(peer_table_, &ton::adnl::AdnlPeerTable::add_peer, std::move(id), std::move(addr_list)); - } - - void alarm() override { - std::cout << "alarm\n"; - if (local_id_set_) { - for (auto it = ping_ids_.begin(); it != ping_ids_.end(); it++) { - auto P = td::PromiseCreator::lambda([](td::Result> result) { - if (result.is_error()) { - std::cout << "received error " << result.move_as_error().to_string() << "\n"; - } else { - auto message = result.move_as_ok(); - std::cout << "received answer to query\n"; - } - }); - td::actor::send_closure(peer_table_, &ton::adnl::AdnlPeerTable::send_query, local_id_, *it, std::move(P), - td::Timestamp::in(5), - ton::move_tl_object_as( - ton::create_tl_object("TEST"))); - } - } - } - */ -}; - -td::Result get_uint256(std::string str) { - if (str.size() != 64) { - return td::Status::Error("uint256 must have 64 bytes"); - } - td::UInt256 res; - for (size_t i = 0; i < 32; i++) { - res.raw[i] = static_cast(td::hex_to_int(str[2 * i]) * 16 + td::hex_to_int(str[2 * i + 1])); - } - return res; -} - -int main(int argc, char *argv[]) { - td::actor::ActorOwn x; - - td::OptionParser p; - p.set_description("test basic adnl functionality"); - p.add_option('h', "help", "prints_help", [&]() { - char b[10240]; - td::StringBuilder sb({b, 10000}); - sb << p; - std::cout << sb.as_cslice().c_str(); - std::exit(2); - return td::Status::OK(); - }); - p.add_option('p', "port", "sets udp port", [&](td::Slice port) { - td::actor::send_closure(x, &adnl::AdnlNode::listen_udp, static_cast(std::stoi(port.str()))); - return td::Status::OK(); - }); - p.add_option('C', "global-config", "file to read global config", [&](td::Slice fname) { - td::actor::send_closure(x, &adnl::AdnlNode::set_global_config, fname.str()); - return td::Status::OK(); - }); - p.add_option('c', "local-config", "file to read local config", [&](td::Slice fname) { - td::actor::send_closure(x, &adnl::AdnlNode::set_local_config, fname.str()); - return td::Status::OK(); - }); - - td::actor::Scheduler scheduler({2}); - - scheduler.run_in_context([&] { - x = td::actor::create_actor(td::actor::ActorInfoCreator::Options().with_name("A").with_poll()); - }); - - scheduler.run_in_context([&] { p.run(argc, argv).ensure(); }); - scheduler.run_in_context([&] { td::actor::send_closure(x, &adnl::AdnlNode::run); }); - - scheduler.run(); - - return 0; -} diff --git a/dht/utils/dht-ping-servers.cpp b/dht/utils/dht-ping-servers.cpp new file mode 100644 index 00000000..0feafb9a --- /dev/null +++ b/dht/utils/dht-ping-servers.cpp @@ -0,0 +1,214 @@ +/* + This file is part of TON Blockchain source code. + + TON Blockchain is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License + as published by the Free Software Foundation; either version 2 + of the License, or (at your option) any later version. + + TON Blockchain is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with TON Blockchain. If not, see . + + In addition, as a special exception, the copyright holders give permission + to link the code of portions of this program with the OpenSSL library. + You must obey the GNU General Public License in all respects for all + of the code used other than OpenSSL. If you modify file(s) with this + exception, you may extend this exception to your version of the file(s), + but you are not obligated to do so. If you do not wish to do so, delete this + exception statement from your version. If you delete this exception statement + from all source files in the program, then also delete it here. + + Copyright 2017-2020 Telegram Systems LLP +*/ +#include "adnl/adnl-network-manager.h" +#include "adnl/adnl.h" +#include "adnl/utils.hpp" +#include "keys/encryptor.h" +#include "td/utils/Time.h" +#include "td/utils/format.h" +#include "td/utils/OptionParser.h" +#include "td/utils/filesystem.h" +#include "dht/dht.hpp" +#include "auto/tl/ton_api_json.h" +#include "common/delay.h" +#include "td/utils/Random.h" +#include "terminal/terminal.h" + +#include + +class AdnlNode : public td::actor::Actor { + private: + td::actor::ActorOwn network_manager_; + td::actor::ActorOwn adnl_; + td::actor::ActorOwn keyring_; + ton::adnl::AdnlNodeIdShort local_id_; + + std::string host_ = "127.0.0.1"; + td::uint16 port_ = 2380; + + std::string global_config_ = "ton-global.config"; + + struct NodeInfo { + ton::adnl::AdnlNodeIdShort id; + td::uint32 sent = 0, received = 0; + double sum_time = 0.0; + explicit NodeInfo(ton::adnl::AdnlNodeIdShort id) : id(id) { + } + }; + std::vector nodes_; + + td::uint32 pings_remaining_ = 4; + td::uint32 pending_ = 1; + + public: + void set_global_config(std::string str) { + global_config_ = str; + } + void listen_udp(td::uint16 port) { + port_ = port; + } + + AdnlNode() { + } + + void run() { + network_manager_ = ton::adnl::AdnlNetworkManager::create(port_); + keyring_ = ton::keyring::Keyring::create(""); + adnl_ = ton::adnl::Adnl::create("", keyring_.get()); + td::actor::send_closure(adnl_, &ton::adnl::Adnl::register_network_manager, network_manager_.get()); + + td::IPAddress addr; + addr.init_host_port(host_, port_).ensure(); + ton::adnl::AdnlCategoryMask mask; + mask[0] = true; + td::actor::send_closure(network_manager_, &ton::adnl::AdnlNetworkManager::add_self_addr, addr, mask, 0); + auto pk = ton::privkeys::Ed25519::random(); + td::actor::send_closure(keyring_, &ton::keyring::Keyring::add_key, pk, true, [](td::Result) {}); + ton::adnl::AdnlNodeIdFull local_id_full(pk.pub()); + ton::adnl::AdnlAddressList addr_list; + addr_list.set_version(static_cast(td::Clocks::system())); + addr_list.set_reinit_date(ton::adnl::Adnl::adnl_start_time()); + td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_id, local_id_full, std::move(addr_list), (td::uint8)0); + local_id_ = local_id_full.compute_short_id(); + + auto r_dht = get_dht_config(); + if (r_dht.is_error()) { + LOG(FATAL) << "Cannot get dht config: " << r_dht.move_as_error(); + } + auto dht = r_dht.move_as_ok(); + ton::adnl::AdnlNodesList static_nodes; + for (const auto &node : dht->nodes().list()) { + LOG(INFO) << "Node #" << nodes_.size() << " : " << node.adnl_id().compute_short_id(); + nodes_.emplace_back(node.adnl_id().compute_short_id()); + static_nodes.push(ton::adnl::AdnlNode(node.adnl_id(), node.addr_list())); + } + td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_static_nodes_from_config, std::move(static_nodes)); + + ton::delay_action([SelfId = actor_id(this)]() { td::actor::send_closure(SelfId, &AdnlNode::send_pings); }, + td::Timestamp::in(1.0)); + } + + td::Result> get_dht_config() { + TRY_RESULT_PREFIX(conf_data, td::read_file(global_config_), "failed to read: "); + TRY_RESULT_PREFIX(conf_json, td::json_decode(conf_data.as_slice()), "failed to parse json: "); + ton::ton_api::config_global conf; + TRY_STATUS_PREFIX(ton::ton_api::from_json(conf, conf_json.get_object()), "json does not fit TL scheme: "); + if (!conf.dht_) { + return td::Status::Error(ton::ErrorCode::error, "does not contain [dht] section"); + } + TRY_RESULT_PREFIX(dht, ton::dht::Dht::create_global_config(std::move(conf.dht_)), "bad [dht] section: "); + return std::move(dht); + } + + void send_pings() { + CHECK(pings_remaining_); + --pings_remaining_; + for (size_t i = 0; i < nodes_.size(); ++i) { + auto id = nodes_[i].id; + LOG(INFO) << "Sending ping to " << id; + ++pending_; + td::actor::send_closure( + adnl_, &ton::adnl::Adnl::send_query, local_id_, id, "ping", + [SelfId = actor_id(this), i, timer = td::Timer()](td::Result R) { + td::actor::send_closure(SelfId, &AdnlNode::on_pong, i, timer.elapsed(), R.is_ok()); + }, td::Timestamp::in(5.0), + ton::create_serialize_tl_object(td::Random::fast_uint64())); + } + + if (pings_remaining_ == 0) { + --pending_; + try_finish(); + } else { + ton::delay_action([SelfId = actor_id(this)]() { td::actor::send_closure(SelfId, &AdnlNode::send_pings); }, + td::Timestamp::in(1.0)); + } + } + + void on_pong(size_t i, double time, bool success) { + auto &node = nodes_[i]; + ++node.sent; + if (success) { + ++node.received; + node.sum_time += time; + LOG(INFO) << "Pong from " << node.id << " in " << time << "s"; + } else { + LOG(INFO) << "Pong from " << node.id << " : timeout"; + } + --pending_; + try_finish(); + } + + void try_finish() { + if (pending_) { + return; + } + td::TerminalIO::out() << "Pinged " << nodes_.size() << " nodes:\n"; + for (const auto& node : nodes_) { + td::TerminalIO::out() << node.id << " : " << node.received << "/" << node.sent; + if (node.received > 0) { + td::TerminalIO::out() << " (avg. time = " << node.sum_time / node.received << ")"; + } + td::TerminalIO::out() << "\n"; + } + std::exit(0); + } +}; + +int main(int argc, char *argv[]) { + td::actor::ActorOwn x; + + td::OptionParser p; + p.set_description("ping dht servers from config"); + p.add_option('h', "help", "print help", [&]() { + char b[10240]; + td::StringBuilder sb(td::MutableSlice{b, 10000}); + sb << p; + std::cout << sb.as_cslice().c_str(); + std::exit(2); + }); + p.add_option('p', "port", "set udp port", [&](td::Slice port) { + td::actor::send_closure(x, &AdnlNode::listen_udp, static_cast(std::stoi(port.str()))); + }); + p.add_option('C', "global-config", "file to read global config from", + [&](td::Slice fname) { td::actor::send_closure(x, &AdnlNode::set_global_config, fname.str()); }); + p.add_option('v', "verbosity", "set verbosity", [&](td::Slice arg) { + int v = VERBOSITY_NAME(FATAL) + (td::to_integer(arg)); + SET_VERBOSITY_LEVEL(v); + }); + + td::actor::Scheduler scheduler({2}); + + scheduler.run_in_context([&] { x = td::actor::create_actor("AdnlNode"); }); + + scheduler.run_in_context([&] { p.run(argc, argv).ensure(); }); + scheduler.run_in_context([&] { td::actor::send_closure(x, &AdnlNode::run); }); + + scheduler.run(); + + return 0; +} diff --git a/dht/utils/dht-resolve.cpp b/dht/utils/dht-resolve.cpp new file mode 100644 index 00000000..5ab98d01 --- /dev/null +++ b/dht/utils/dht-resolve.cpp @@ -0,0 +1,219 @@ +/* + This file is part of TON Blockchain source code. + + TON Blockchain is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License + as published by the Free Software Foundation; either version 2 + of the License, or (at your option) any later version. + + TON Blockchain is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with TON Blockchain. If not, see . + + In addition, as a special exception, the copyright holders give permission + to link the code of portions of this program with the OpenSSL library. + You must obey the GNU General Public License in all respects for all + of the code used other than OpenSSL. If you modify file(s) with this + exception, you may extend this exception to your version of the file(s), + but you are not obligated to do so. If you do not wish to do so, delete this + exception statement from your version. If you delete this exception statement + from all source files in the program, then also delete it here. + + Copyright 2017-2020 Telegram Systems LLP +*/ +#include "adnl/adnl-network-manager.h" +#include "adnl/adnl.h" +#include "adnl/utils.hpp" +#include "keys/encryptor.h" +#include "td/utils/Time.h" +#include "td/utils/format.h" +#include "td/utils/OptionParser.h" +#include "td/utils/filesystem.h" +#include "dht/dht.hpp" +#include "auto/tl/ton_api_json.h" +#include "common/delay.h" +#include "td/utils/Random.h" +#include "terminal/terminal.h" +#include "common/util.h" + +#include + +class Resolver : public td::actor::Actor { + private: + td::actor::ActorOwn network_manager_; + td::actor::ActorOwn adnl_; + td::actor::ActorOwn keyring_; + ton::adnl::AdnlNodeIdShort local_id_; + td::actor::ActorOwn dht_; + + std::string global_config_; + int server_idx_; + + std::string host_ = "127.0.0.1"; + td::uint16 port_; + ton::dht::DhtKey key_; + double timeout_; + + public: + Resolver(std::string global_config, int server_idx, td::uint16 port, ton::dht::DhtKey key, double timeout) + : global_config_(global_config), server_idx_(server_idx), port_(port), key_(std::move(key)), timeout_(timeout) { + } + + void run() { + network_manager_ = ton::adnl::AdnlNetworkManager::create(port_); + keyring_ = ton::keyring::Keyring::create(""); + adnl_ = ton::adnl::Adnl::create("", keyring_.get()); + td::actor::send_closure(adnl_, &ton::adnl::Adnl::register_network_manager, network_manager_.get()); + + td::IPAddress addr; + addr.init_host_port(host_, port_).ensure(); + ton::adnl::AdnlCategoryMask mask; + mask[0] = true; + td::actor::send_closure(network_manager_, &ton::adnl::AdnlNetworkManager::add_self_addr, addr, mask, 0); + auto pk = ton::privkeys::Ed25519::random(); + td::actor::send_closure(keyring_, &ton::keyring::Keyring::add_key, pk, true, [](td::Result) {}); + ton::adnl::AdnlNodeIdFull local_id_full(pk.pub()); + ton::adnl::AdnlAddressList addr_list; + addr_list.set_version(static_cast(td::Clocks::system())); + addr_list.set_reinit_date(ton::adnl::Adnl::adnl_start_time()); + td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_id, local_id_full, std::move(addr_list), (td::uint8)0); + local_id_ = local_id_full.compute_short_id(); + + auto dht_config = get_dht_config(); + if (dht_config.is_error()) { + LOG(FATAL) << "Failed to load dht config: " << dht_config.move_as_error(); + } + auto D = ton::dht::Dht::create_client(local_id_, "", dht_config.move_as_ok(), keyring_.get(), adnl_.get()); + if (D.is_error()) { + LOG(FATAL) << "Failed to init dht client: " << D.move_as_error(); + } + dht_ = D.move_as_ok(); + LOG(INFO) << "Get value " << key_.public_key_hash() << " " << key_.name() << " " << key_.idx(); + + send_query(); + alarm_timestamp() = td::Timestamp::in(timeout_); + } + + void send_query() { + td::actor::send_closure(dht_, &ton::dht::Dht::get_value, key_, + [SelfId = actor_id(this)](td::Result R) { + td::actor::send_closure(SelfId, &Resolver::got_result, std::move(R)); + }); + } + + void got_result(td::Result R) { + if (R.is_error()) { + LOG(WARNING) << "Failed to get value, retrying: " << R.move_as_error(); + ton::delay_action([SelfId = actor_id(this)]() { td::actor::send_closure(SelfId, &Resolver::send_query); }, + td::Timestamp::in(0.25)); + return; + } + auto r = R.move_as_ok(); + LOG(INFO) << "Got result"; + td::TerminalIO::out() << "KEY: " << td::base64_encode(ton::serialize_tl_object(r.key().public_key().tl(), true)) + << "\n"; + td::TerminalIO::out() << "VALUE: " << td::base64_encode(r.value().as_slice()) << "\n"; + std::exit(0); + } + + void alarm() override { + LOG(FATAL) << "Failed to get value: timeout"; + } + + td::Result> get_dht_config() { + TRY_RESULT_PREFIX(conf_data, td::read_file(global_config_), "failed to read: "); + TRY_RESULT_PREFIX(conf_json, td::json_decode(conf_data.as_slice()), "failed to parse json: "); + ton::ton_api::config_global conf; + TRY_STATUS_PREFIX(ton::ton_api::from_json(conf, conf_json.get_object()), "json does not fit TL scheme: "); + if (!conf.dht_) { + return td::Status::Error(ton::ErrorCode::error, "does not contain [dht] section"); + } + ton::ton_api::dht_nodes* static_nodes = nullptr; + ton::ton_api::downcast_call(*conf.dht_, [&](auto &f) { static_nodes = f.static_nodes_.get(); }); + auto &nodes = static_nodes->nodes_; + if (server_idx_ >= 0) { + CHECK(server_idx_ < (int)nodes.size()); + LOG(INFO) << "Using server #" << server_idx_; + std::swap(nodes[0], nodes[server_idx_]); + nodes.resize(1); + } else { + LOG(INFO) << "Using all " << nodes.size() << " servers"; + } + TRY_RESULT_PREFIX(dht, ton::dht::Dht::create_global_config(std::move(conf.dht_)), "bad [dht] section: "); + return std::move(dht); + } +}; + +td::Result parse_bits256(td::Slice s) { + td::BufferSlice str = td::base64_decode(s, true); + if (str.size() != 32) { + return td::Status::Error("Invalid bits256"); + } + return td::Bits256(td::BitPtr((unsigned char *)str.data())); +} + +int main(int argc, char *argv[]) { + td::actor::ActorOwn x; + + td::optional global_config; + int server_idx = -1; + td::uint16 port = 2380; + td::optional key_id; + td::optional key_name; + td::uint32 key_idx = 0; + double timeout = 5.0; + + td::OptionParser p; + p.set_description("find value in dht by the given key (key-id, key-name, ket-idx)"); + p.add_option('h', "help", "print help", [&]() { + char b[10240]; + td::StringBuilder sb(td::MutableSlice{b, 10000}); + sb << p; + std::cout << sb.as_cslice().c_str(); + std::exit(2); + }); + p.add_option('C', "global-config", "global config", [&](td::Slice arg) { global_config = arg.str(); }); + p.add_checked_option('s', "server-idx", "index of dht server from global config (default: all)", [&](td::Slice arg) { + TRY_RESULT_ASSIGN(server_idx, td::to_integer_safe(arg)); + return td::Status::OK(); + }); + p.add_checked_option('p', "port", "set udp port", [&](td::Slice arg) { + TRY_RESULT_ASSIGN(port, td::to_integer_safe(arg)); + return td::Status::OK(); + }); + p.add_option('v', "verbosity", "set verbosity", [&](td::Slice arg) { + int v = VERBOSITY_NAME(FATAL) + (td::to_integer(arg)); + SET_VERBOSITY_LEVEL(v); + }); + p.add_checked_option('k', "key-id", "set key id (256-bit, base64)", [&](td::Slice arg) { + TRY_RESULT_ASSIGN(key_id, parse_bits256(arg)); + return td::Status::OK(); + }); + p.add_option('n', "key-name", "set key name", [&](td::Slice arg) { key_name = arg.str(); }); + p.add_checked_option('i', "key-idx", "set key idx (default: 0)", [&](td::Slice arg) { + TRY_RESULT_ASSIGN(key_idx, td::to_integer_safe(arg)); + return td::Status::OK(); + }); + p.add_option('t', "timeout", "set timeout (default: 5s)", [&](td::Slice arg) { timeout = td::to_double(arg); }); + + td::actor::Scheduler scheduler({2}); + + scheduler.run_in_context([&] { p.run(argc, argv).ensure(); }); + scheduler.run_in_context([&] { + LOG_IF(FATAL, !global_config) << "global config is not set"; + LOG_IF(FATAL, !key_id) << "key-id is not set"; + LOG_IF(FATAL, !key_name) << "key-name is not set"; + x = td::actor::create_actor( + "Resolver", global_config.value(), server_idx, port, + ton::dht::DhtKey{ton::PublicKeyHash(key_id.value()), key_name.value(), key_idx}, timeout); + }); + scheduler.run_in_context([&] { td::actor::send_closure(x, &Resolver::run); }); + + scheduler.run(); + + return 0; +} diff --git a/docker/Dockerfile b/docker/Dockerfile index 9d6b6b27..95395bb9 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,21 +1,22 @@ FROM ubuntu:20.04 as builder RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y build-essential cmake clang-6.0 openssl libssl-dev zlib1g-dev gperf wget git && \ + DEBIAN_FRONTEND=noninteractive apt-get install -y build-essential cmake clang-6.0 openssl libssl-dev zlib1g-dev gperf wget git ninja-build && \ rm -rf /var/lib/apt/lists/* ENV CC clang-6.0 ENV CXX clang++-6.0 +ENV CCACHE_DISABLE 1 WORKDIR / RUN git clone --recursive https://github.com/ton-blockchain/ton WORKDIR /ton RUN mkdir build && \ cd build && \ - cmake .. -DCMAKE_BUILD_TYPE=Release && \ - make -j 4 + cmake -GNinja -DCMAKE_BUILD_TYPE=Release .. && \ + ninja tonlibjson fift func validator-engine validator-engine-console generate-random-id dht-server lite-client FROM ubuntu:20.04 RUN apt-get update && \ - apt-get install -y openssl wget&& \ + apt-get install -y openssl wget libatomic1 && \ rm -rf /var/lib/apt/lists/* RUN mkdir -p /var/ton-work/db && \ mkdir -p /var/ton-work/db/static diff --git a/docker/README.md b/docker/README.md index 955fd3ef..fd98374b 100644 --- a/docker/README.md +++ b/docker/README.md @@ -2,11 +2,11 @@ Dockerfile for The Open Network Node #### Install -```docker pull ghcr.io/newton-blockchain/ton``` +```docker pull ghcr.io/ton-blockchain/ton:latest``` #### Create volume ```docker volume create ton-db``` #### Run -```docker run -d --name ton-node --mount source=ton-db,target=/var/ton-work/db --network host -e "PUBLIC_IP=" -e "CONSOLE_PORT=" -e "LITESERVER=true" -e "LITE_PORT=" -it ghcr.io/newton-blockchain/ton``` +```docker run -d --name ton-node --mount source=ton-db,target=/var/ton-work/db --network host -e "PUBLIC_IP=" -e "CONSOLE_PORT=" -e "LITESERVER=true" -e "LITE_PORT=" -it ghcr.io/ton-blockchain/ton``` If you don't need Liteserver, then remove -e "LITESERVER=true". diff --git a/example/android/CMakeLists.txt b/example/android/CMakeLists.txt index 06454852..55eda220 100644 --- a/example/android/CMakeLists.txt +++ b/example/android/CMakeLists.txt @@ -7,8 +7,14 @@ cmake_minimum_required(VERSION 3.4.1 FATAL_ERROR) option(TONLIB_ENABLE_JNI "Enable JNI-compatible TonLib API" ON) -string(APPEND CMAKE_CXX_FLAGS " -std=c++14 -Wall -Wextra -Wno-unused-parameter -Wno-deprecated-declarations -Wconversion -Wno-sign-conversion -fno-omit-frame-pointer -ffunction-sections -fdata-sections") -string(APPEND CMAKE_SHARED_LINKER_FLAGS " -Wl,--gc-sections -Wl,--exclude-libs,ALL") +if (${CMAKE_SYSTEM_NAME} MATCHES "Windows") + string(APPEND CMAKE_CXX_FLAGS " -std=c++14 -fno-omit-frame-pointer -ffunction-sections -fdata-sections") +else() + string(APPEND CMAKE_CXX_FLAGS " -std=c++14 -Wall -Wextra -Wno-unused-parameter -Wno-deprecated-declarations -Wconversion -Wno-sign-conversion -fno-omit-frame-pointer -ffunction-sections -fdata-sections") +endif() +if (NOT ${CMAKE_SYSTEM_NAME} MATCHES "Darwin") + string(APPEND CMAKE_SHARED_LINKER_FLAGS " -Wl,--gc-sections -Wl,--exclude-libs,ALL") +endif() # Creates and names a library, sets it as either STATIC # or SHARED, and provides the relative paths to its source code. @@ -37,9 +43,15 @@ target_link_libraries(native-lib tonlib) set(TONLIB_API_JAVA_PACKAGE "drinkless/org/ton") target_compile_definitions(native-lib PRIVATE PACKAGE_NAME="${TONLIB_API_JAVA_PACKAGE}") -add_custom_command(TARGET native-lib POST_BUILD - COMMAND ${CMAKE_COMMAND} -E rename $ $.debug - COMMAND ${CMAKE_STRIP} --strip-debug --strip-unneeded $.debug -o $) +if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin") + add_custom_command(TARGET native-lib POST_BUILD + COMMAND ${CMAKE_COMMAND} -E rename $ $.debug + COMMAND ${CMAKE_STRIP} -S $.debug -o $) +elseif (${CMAKE_SYSTEM_NAME} MATCHES "Linux") + add_custom_command(TARGET native-lib POST_BUILD + COMMAND ${CMAKE_COMMAND} -E rename $ $.debug + COMMAND ${CMAKE_STRIP} --strip-debug --strip-unneeded $.debug -o $) +endif() if (NOT CMAKE_CROSSCOMPILING) set(TONLIB_API_JAVA_PATH ${CMAKE_CURRENT_SOURCE_DIR}/src/) diff --git a/example/android/README.md b/example/android/README.md new file mode 100644 index 00000000..0d10ff1e --- /dev/null +++ b/example/android/README.md @@ -0,0 +1,48 @@ +# Generation of Tonlib libraries for Android OS +**Tl;dr** Download the latest version of Tonlib libraries for Android from TON release page or check the artifacts from [Android JNI GitHub action](https://github.com/ton-blockchain/ton/actions/workflows/tonlib-android-jni.yml). + +## Compile Tonlib for Android manually +Prerequisite: installed Java and set environment variable JAVA_HOME. +```bash +git clone --recursive https://github.com/ton-blockchain/ton.git +cd ton +wget https://dl.google.com/android/repository/android-ndk-r25b-linux.zip +unzip android-ndk-r25b-linux.zip +export JAVA_AWT_LIBRARY=NotNeeded +export JAVA_JVM_LIBRARY=NotNeeded +export JAVA_INCLUDE_PATH=${JAVA_HOME}/include +export JAVA_AWT_INCLUDE_PATH=${JAVA_HOME}/include +export JAVA_INCLUDE_PATH2=${JAVA_HOME}/include/linux + +export ANDROID_NDK_ROOT=$(pwd)/android-ndk-r25b +export OPENSSL_DIR=$(pwd)/example/android/third_party/crypto + +rm -rf example/android/src/drinkless/org/ton/TonApi.java +cd example/android/ +cmake -GNinja -DTON_ONLY_TONLIB=ON . +ninja prepare_cross_compiling +rm CMakeCache.txt +./build-all.sh +``` +# Generation of Tonlib libraries for iOS in Xcode + +1. Clone repository https://github.com/labraburn/tonlib-xcframework +2. Open repository directory in Terminal +3. Run command: +```bash +swift run builder --output ./build --clean +``` +5. Run command: +```bash +echo ./build/TON.xcframework/* | xargs -n 1 cp -R ./Resources/Headers +```` +7. Import **OpenSSL.xcframework** and **TON.xcframework** in XCode in section _"Frameworks, Libraries, and Embedded Content"_ +8. Now you can start using Tonlib client by importing it in C or Objective-C source files: +```objective-c +#import +``` + +# Generation of Tonlib libraries for Desktop applications +You can use Tonlib compiled in an ordinary way for desktop applications. If you use Java you can load the library using JNA. + +The latest Tonlib library can be found among other TON artifacts either on TON release page or inside the [appropriate GitHub action](https://github.com/ton-blockchain/ton/actions/). \ No newline at end of file diff --git a/example/android/build.sh b/example/android/build.sh index 91a5c9f4..b16a9df9 100755 --- a/example/android/build.sh +++ b/example/android/build.sh @@ -33,8 +33,7 @@ mkdir -p build-$ARCH cd build-$ARCH -cmake .. \ - -DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK_ROOT}/build/cmake/android.toolchain.cmake -DCMAKE_BUILD_TYPE=Release -GNinja -DANDROID_ABI=${ABI} -DOPENSSL_ROOT_DIR=/Users/arseny30/Code/td_android/libtd/src/main/jni/third_party/crypto/${ARCH} -DTON_ARCH="" -DTON_ONLY_TONLIB=ON || exit 1 +cmake .. -DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK_ROOT}/build/cmake/android.toolchain.cmake -DCMAKE_BUILD_TYPE=Release -GNinja -DANDROID_ABI=${ABI} -DOPENSSL_ROOT_DIR=${OPENSSL_DIR}/${ARCH} -DTON_ARCH="" -DTON_ONLY_TONLIB=ON || exit 1 ninja native-lib || exit 1 popd diff --git a/lite-client/lite-client.cpp b/lite-client/lite-client.cpp index 1a08841c..d7d38037 100644 --- a/lite-client/lite-client.cpp +++ b/lite-client/lite-client.cpp @@ -41,6 +41,8 @@ #include "td/utils/crypto.h" #include "td/utils/overloaded.h" #include "td/utils/port/signals.h" +#include "td/utils/port/stacktrace.h" +#include "td/utils/port/StdStreams.h" #include "td/utils/port/FileFd.h" #include "terminal/terminal.h" #include "ton/lite-tl.hpp" @@ -57,14 +59,17 @@ #include "vm/cp0.h" #include "vm/memo.h" #include "ton/ton-shard.h" +#include "openssl/rand.hpp" #include "crypto/vm/utils.h" #include "crypto/common/util.h" #include "common/checksum.h" #if TD_DARWIN || TD_LINUX #include +#include #endif #include +#include #include "git.h" using namespace std::literals::string_literals; @@ -72,30 +77,22 @@ using td::Ref; int verbosity; -int TestNode::LiteServer::max_common_prefix(ton::ShardIdFull shard) const { - if (shard.is_masterchain()) { - return 0; - } - if (is_full) { - return shard.pfx_len(); - } - int res = -1; - for (const ton::ShardIdFull &our_shard : shards) { - if (ton::shard_is_ancestor(our_shard, shard)) { - return shard.pfx_len(); +std::unique_ptr TestNode::make_callback() { + class Callback : public ton::adnl::AdnlExtClient::Callback { + public: + void on_ready() override { + td::actor::send_closure(id_, &TestNode::conn_ready); } - if (shard.workchain == our_shard.workchain) { - int x = std::min({shard.pfx_len(), our_shard.pfx_len(), ton::count_matching_bits(shard.shard, our_shard.shard)}); - res = std::max(res, x); + void on_stop_ready() override { + td::actor::send_closure(id_, &TestNode::conn_closed); + } + Callback(td::actor::ActorId id) : id_(std::move(id)) { } - } - return res; -} -bool TestNode::LiteServer::supports_shard(ton::ShardIdFull shard) const { - return is_full || shard.is_masterchain() || - std::any_of(shards.begin(), shards.end(), - [&](const ton::ShardIdFull& our_shard) { return ton::shard_intersects(shard, our_shard); }); + private: + td::actor::ActorId id_; + }; + return std::make_unique(actor_id(this)); } void TestNode::run() { @@ -113,56 +110,31 @@ void TestNode::run() { io_ = td::TerminalIO::create("> ", readline_enabled_, ex_mode_, std::make_unique(actor_id(this))); td::actor::send_closure(io_, &td::TerminalIO::set_log_interface); - if (!single_remote_public_key_.empty()) { // Use single provided liteserver - LiteServer s; - s.addr = single_remote_addr_; - s.public_key = single_remote_public_key_; - single_liteserver_idx_ = 0; - servers_.push_back(std::move(s)); - run_init_queries(); - return; - } - - auto G = td::read_file(global_config_).move_as_ok(); - auto gc_j = td::json_decode(G.as_slice()).move_as_ok(); - ton::ton_api::liteclient_config_global gc; - ton::ton_api::from_json(gc, gc_j.get_object()).ensure(); - CHECK(gc.liteservers_.size() + gc.liteservers_v2_.size() > 0); - - if (gc.validator_ && gc.validator_->zero_state_) { - zstate_id_.workchain = gc.validator_->zero_state_->workchain_; - if (zstate_id_.workchain != ton::workchainInvalid) { - zstate_id_.root_hash = gc.validator_->zero_state_->root_hash_; - zstate_id_.file_hash = gc.validator_->zero_state_->file_hash_; - td::TerminalIO::out() << "zerostate set to " << zstate_id_.to_str() << "\n"; + if (remote_public_key_.empty()) { + auto G = td::read_file(global_config_).move_as_ok(); + auto gc_j = td::json_decode(G.as_slice()).move_as_ok(); + ton::ton_api::liteclient_config_global gc; + ton::ton_api::from_json(gc, gc_j.get_object()).ensure(); + CHECK(gc.liteservers_.size() > 0); + auto idx = liteserver_idx_ >= 0 ? liteserver_idx_ + : td::Random::fast(0, static_cast(gc.liteservers_.size() - 1)); + CHECK(idx >= 0 && static_cast(idx) <= gc.liteservers_.size()); + auto& cli = gc.liteservers_[idx]; + remote_addr_.init_host_port(td::IPAddress::ipv4_to_str(cli->ip_), cli->port_).ensure(); + remote_public_key_ = ton::PublicKey{cli->id_}; + td::TerminalIO::out() << "using liteserver " << idx << " with addr " << remote_addr_ << "\n"; + if (gc.validator_ && gc.validator_->zero_state_) { + zstate_id_.workchain = gc.validator_->zero_state_->workchain_; + if (zstate_id_.workchain != ton::workchainInvalid) { + zstate_id_.root_hash = gc.validator_->zero_state_->root_hash_; + zstate_id_.file_hash = gc.validator_->zero_state_->file_hash_; + td::TerminalIO::out() << "zerostate set to " << zstate_id_.to_str() << "\n"; + } } } - for (auto& server : gc.liteservers_) { - LiteServer s; - s.addr.init_host_port(td::IPAddress::ipv4_to_str(server->ip_), server->port_).ensure(); - s.public_key = ton::PublicKey{server->id_}; - servers_.push_back(std::move(s)); - } - for (auto& server : gc.liteservers_v2_) { - LiteServer s; - s.addr.init_host_port(td::IPAddress::ipv4_to_str(server->ip_), server->port_).ensure(); - s.public_key = ton::PublicKey{server->id_}; - s.is_full = false; - for (const auto& shard : server->shards_) { - s.shards.emplace_back(shard->workchain_, shard->shard_); - CHECK(s.shards.back().is_valid_ext()); - } - servers_.push_back(std::move(s)); - } - - if (single_liteserver_idx_ != -1) { // Use single liteserver from config - CHECK(single_liteserver_idx_ >= 0 && (size_t)single_liteserver_idx_ < gc.liteservers_.size()); - td::TerminalIO::out() << "using liteserver " << single_liteserver_idx_ << " with addr " - << servers_[single_liteserver_idx_].addr << "\n"; - } - - run_init_queries(); + client_ = + ton::adnl::AdnlExtClient::create(ton::adnl::AdnlNodeIdFull{remote_public_key_}, remote_addr_, make_callback()); } void TestNode::got_result(td::Result R, td::Promise promise) { @@ -207,156 +179,23 @@ void TestNode::after_got_result(bool ok) { } } -bool TestNode::envelope_send_query_to_any(td::BufferSlice query, td::Promise promise) { - return envelope_send_query_to_shard(ton::ShardIdFull(ton::masterchainId), std::move(query), std::move(promise)); -} - -bool TestNode::envelope_send_query_to_account(ton::AccountIdPrefixFull prefix, td::BufferSlice query, - td::Promise promise) { - if (single_liteserver_idx_ >= 0) { - return envelope_send_query_to_server(single_liteserver_idx_, std::move(query), std::move(promise)); - } - // TODO: maybe use current shard configuration? - int max_prefix_len = -1; - for (const LiteServer &server : servers_) { - max_prefix_len = std::max(max_prefix_len, server.max_common_prefix(prefix.as_leaf_shard())); - } - max_prefix_len = std::min(max_prefix_len, ton::max_shard_pfx_len); - if (max_prefix_len == -1) { - running_queries_++; - got_result(td::Status::Error("failed to select a suitable server"), std::move(promise)); - return false; - } - ton::ShardIdFull shard = shard_prefix(prefix.as_leaf_shard(), max_prefix_len); - return envelope_send_query_to_shard(shard, std::move(query), std::move(promise)); -} - -bool TestNode::envelope_send_query_to_shard(ton::ShardIdFull shard, td::BufferSlice query, - td::Promise promise) { - if (single_liteserver_idx_ >= 0) { - return envelope_send_query_to_server(single_liteserver_idx_, std::move(query), std::move(promise)); - } - if (shard.is_masterchain() && mc_server_idx_ != -1) { - return envelope_send_query_to_server(mc_server_idx_, std::move(query), std::move(promise)); - } - auto it = shard_server_idx_cached_.find(shard); - if (it != shard_server_idx_cached_.end()) { - return envelope_send_query_to_server(it->second, std::move(query), std::move(promise)); - } - int server_idx = -1; - int random_idx = -1; - int cnt = 0; - bool selected_full = false; - for (int i = 0; i < (int)servers_.size(); ++i) { - const LiteServer &server = servers_[i]; - if (!server.supports_shard(shard)) { - continue; - } - if (server.is_full && !selected_full) { - selected_full = true; - server_idx = -1; - cnt = 0; - } - if (!server.is_full && selected_full) { - continue; - } - if (!server.client.empty()) { - server_idx = i; - } - if (td::Random::fast(0, cnt) == 0) { - random_idx = i; - } - ++cnt; - } - if (server_idx == -1) { - server_idx = random_idx; - } - if (server_idx == -1) { - running_queries_++; - got_result(td::Status::Error("failed to select a suitable server"), std::move(promise)); - return false; - } - shard_server_idx_cached_[shard] = server_idx; - if (shard.is_masterchain()) { - mc_server_idx_ = server_idx; - } - return envelope_send_query_to_server(server_idx, std::move(query), std::move(promise)); -} - -bool TestNode::envelope_send_query_to_server(td::int32 server_idx, td::BufferSlice query, - td::Promise promise) { +bool TestNode::envelope_send_query(td::BufferSlice query, td::Promise promise) { running_queries_++; - LiteServer &server = servers_.at(server_idx); - if (server.client.empty()) { - start_client(server_idx); + if (!ready_ || client_.empty()) { + got_result(td::Status::Error("failed to send query to server: not ready"), std::move(promise)); + return false; } - CHECK(!server.client.empty()); - auto P = td::PromiseCreator::lambda( [SelfId = actor_id(this), promise = std::move(promise)](td::Result R) mutable { td::actor::send_closure(SelfId, &TestNode::got_result, std::move(R), std::move(promise)); }); td::BufferSlice b = ton::serialize_tl_object(ton::create_tl_object(std::move(query)), true); - if (server.client_ready) { - td::actor::send_closure(server.client, &ton::adnl::AdnlExtClient::send_query, "query", std::move(b), - td::Timestamp::in(10.0), std::move(P)); - } else { - server.wait_client_ready.push_back( - [client = server.client.get(), b = std::move(b), P = std::move(P)](td::Result R) mutable { - if (R.is_ok()) { - td::actor::send_closure(client, &ton::adnl::AdnlExtClient::send_query, "query", std::move(b), - td::Timestamp::in(10.0), std::move(P)); - } else { - P.set_error(R.move_as_error_prefix("failed to connect: ")); - } - }); - } + td::actor::send_closure(client_, &ton::adnl::AdnlExtClient::send_query, "query", std::move(b), + td::Timestamp::in(10.0), std::move(P)); return true; } -void TestNode::start_client(int server_idx) { - LiteServer &server = servers_[server_idx]; - CHECK(server.client.empty()); - class Callback : public ton::adnl::AdnlExtClient::Callback { - public: - void on_ready() override { - td::actor::send_closure(id_, &TestNode::conn_ready, server_idx_); - } - void on_stop_ready() override { - td::actor::send_closure(id_, &TestNode::conn_closed, server_idx_); - } - Callback(td::actor::ActorId id, int server_idx) : id_(std::move(id)), server_idx_(server_idx) { - } - - private: - td::actor::ActorId id_; - int server_idx_; - }; - server.client_ready = false; - server.wait_client_ready.clear(); - LOG(INFO) << "Connecting to " << server.addr << " (liteserver #" << server_idx << ")"; - server.client = ton::adnl::AdnlExtClient::create(ton::adnl::AdnlNodeIdFull{server.public_key}, server.addr, - std::make_unique(actor_id(this), server_idx)); -} - -void TestNode::conn_ready(int server_idx) { - LiteServer &server = servers_[server_idx]; - LOG(INFO) << "Connection to " << server.addr << " (liteserver #" << server_idx << ") is ready"; - server.client_ready = true; - for (auto &p : server.wait_client_ready) { - p.set_result(td::Unit()); - } - server.wait_client_ready.clear(); -} - -void TestNode::conn_closed(int server_idx) { - LiteServer &server = servers_[server_idx]; - LOG(INFO) << "Connection to " << server.addr << " (liteserver #" << server_idx << ") closed"; - server.client_ready = false; - server.wait_client_ready.clear(); -} - td::Promise TestNode::trivial_promise() { return td::PromiseCreator::lambda([Self = actor_id(this)](td::Result res) { if (res.is_error()) { @@ -471,7 +310,7 @@ bool TestNode::dump_cached_cell(td::Slice hash_pfx, td::Slice type_name) { bool TestNode::get_server_time() { auto b = ton::serialize_tl_object(ton::create_tl_object(), true); - return envelope_send_query_to_any(std::move(b), [&, Self = actor_id(this)](td::Result res) -> void { + return envelope_send_query(std::move(b), [&, Self = actor_id(this)](td::Result res) -> void { if (res.is_error()) { LOG(ERROR) << "cannot get server time"; return; @@ -480,10 +319,9 @@ bool TestNode::get_server_time() { if (F.is_error()) { LOG(ERROR) << "cannot parse answer to liteServer.getTime"; } else { - mc_server_time_ = F.move_as_ok()->now_; - mc_server_time_got_at_ = now(); - LOG(INFO) << "server time is " << mc_server_time_ << " (delta " << mc_server_time_ - mc_server_time_got_at_ - << ")"; + server_time_ = F.move_as_ok()->now_; + server_time_got_at_ = now(); + LOG(INFO) << "server time is " << server_time_ << " (delta " << server_time_ - server_time_got_at_ << ")"; } } }); @@ -491,13 +329,13 @@ bool TestNode::get_server_time() { bool TestNode::get_server_version(int mode) { auto b = ton::serialize_tl_object(ton::create_tl_object(), true); - return envelope_send_query_to_any(std::move(b), [Self = actor_id(this), mode](td::Result res) { + return envelope_send_query(std::move(b), [Self = actor_id(this), mode](td::Result res) { td::actor::send_closure_later(Self, &TestNode::got_server_version, std::move(res), mode); }); }; void TestNode::got_server_version(td::Result res, int mode) { - mc_server_ok_ = false; + server_ok_ = false; if (res.is_error()) { LOG(ERROR) << "cannot get server version and time (server too old?)"; } else { @@ -506,11 +344,11 @@ void TestNode::got_server_version(td::Result res, int mode) { LOG(ERROR) << "cannot parse answer to liteServer.getVersion"; } else { auto a = F.move_as_ok(); - set_mc_server_version(a->version_, a->capabilities_); - set_mc_server_time(a->now_); + set_server_version(a->version_, a->capabilities_); + set_server_time(a->now_); } } - if (!mc_server_ok_) { + if (!server_ok_) { LOG(ERROR) << "server version is too old (at least " << (min_ls_version >> 8) << "." << (min_ls_version & 0xff) << " with capabilities " << min_ls_capabilities << " required), some queries are unavailable"; } @@ -519,27 +357,27 @@ void TestNode::got_server_version(td::Result res, int mode) { } } -void TestNode::set_mc_server_version(td::int32 version, td::int64 capabilities) { - if (mc_server_version_ != version || mc_server_capabilities_ != capabilities) { - mc_server_version_ = version; - mc_server_capabilities_ = capabilities; - LOG(WARNING) << "server version is " << (mc_server_version_ >> 8) << "." << (mc_server_version_ & 0xff) - << ", capabilities " << mc_server_capabilities_; +void TestNode::set_server_version(td::int32 version, td::int64 capabilities) { + if (server_version_ != version || server_capabilities_ != capabilities) { + server_version_ = version; + server_capabilities_ = capabilities; + LOG(WARNING) << "server version is " << (server_version_ >> 8) << "." << (server_version_ & 0xff) + << ", capabilities " << server_capabilities_; } - mc_server_ok_ = (mc_server_version_ >= min_ls_version) && !(~mc_server_capabilities_ & min_ls_capabilities); + server_ok_ = (server_version_ >= min_ls_version) && !(~server_capabilities_ & min_ls_capabilities); } -void TestNode::set_mc_server_time(int server_utime) { - mc_server_time_ = server_utime; - mc_server_time_got_at_ = now(); - LOG(INFO) << "server time is " << mc_server_time_ << " (delta " << mc_server_time_ - mc_server_time_got_at_ << ")"; +void TestNode::set_server_time(int server_utime) { + server_time_ = server_utime; + server_time_got_at_ = now(); + LOG(INFO) << "server time is " << server_time_ << " (delta " << server_time_ - server_time_got_at_ << ")"; } bool TestNode::get_server_mc_block_id() { - int mode = (mc_server_capabilities_ & 2) ? 0 : -1; + int mode = (server_capabilities_ & 2) ? 0 : -1; if (mode < 0) { auto b = ton::serialize_tl_object(ton::create_tl_object(), true); - return envelope_send_query_to_any(std::move(b), [Self = actor_id(this)](td::Result res) -> void { + return envelope_send_query(std::move(b), [Self = actor_id(this)](td::Result res) -> void { if (res.is_error()) { LOG(ERROR) << "cannot get masterchain info from server"; return; @@ -559,25 +397,24 @@ bool TestNode::get_server_mc_block_id() { } else { auto b = ton::serialize_tl_object(ton::create_tl_object(mode), true); - return envelope_send_query_to_any( - std::move(b), [Self = actor_id(this), mode](td::Result res) -> void { - if (res.is_error()) { - LOG(ERROR) << "cannot get extended masterchain info from server"; - return; - } else { - auto F = ton::fetch_tl_object(res.move_as_ok(), true); - if (F.is_error()) { - LOG(ERROR) << "cannot parse answer to liteServer.getMasterchainInfoExt"; - } else { - auto f = F.move_as_ok(); - auto blk_id = create_block_id(f->last_); - auto zstate_id = create_zero_state_id(f->init_); - LOG(INFO) << "last masterchain block is " << blk_id.to_str(); - td::actor::send_closure_later(Self, &TestNode::got_server_mc_block_id_ext, blk_id, zstate_id, mode, - f->version_, f->capabilities_, f->last_utime_, f->now_); - } - } - }); + return envelope_send_query(std::move(b), [Self = actor_id(this), mode](td::Result res) -> void { + if (res.is_error()) { + LOG(ERROR) << "cannot get extended masterchain info from server"; + return; + } else { + auto F = ton::fetch_tl_object(res.move_as_ok(), true); + if (F.is_error()) { + LOG(ERROR) << "cannot parse answer to liteServer.getMasterchainInfoExt"; + } else { + auto f = F.move_as_ok(); + auto blk_id = create_block_id(f->last_); + auto zstate_id = create_zero_state_id(f->init_); + LOG(INFO) << "last masterchain block is " << blk_id.to_str(); + td::actor::send_closure_later(Self, &TestNode::got_server_mc_block_id_ext, blk_id, zstate_id, mode, + f->version_, f->capabilities_, f->last_utime_, f->now_); + } + } + }); } } @@ -611,8 +448,8 @@ void TestNode::got_server_mc_block_id(ton::BlockIdExt blkid, ton::ZeroStateIdExt void TestNode::got_server_mc_block_id_ext(ton::BlockIdExt blkid, ton::ZeroStateIdExt zstateid, int mode, int version, long long capabilities, int last_utime, int server_now) { - set_mc_server_version(version, capabilities); - set_mc_server_time(server_now); + set_server_version(version, capabilities); + set_server_time(server_now); if (last_utime > server_now) { LOG(WARNING) << "server claims to have a masterchain block " << blkid.to_str() << " created at " << last_utime << " (" << last_utime - server_now << " seconds in the future)"; @@ -620,10 +457,10 @@ void TestNode::got_server_mc_block_id_ext(ton::BlockIdExt blkid, ton::ZeroStateI LOG(WARNING) << "server appears to be out of sync: its newest masterchain block is " << blkid.to_str() << " created at " << last_utime << " (" << server_now - last_utime << " seconds ago according to the server's clock)"; - } else if (last_utime < mc_server_time_got_at_ - 60) { + } else if (last_utime < server_time_got_at_ - 60) { LOG(WARNING) << "either the server is out of sync, or the local clock is set incorrectly: the newest masterchain " "block known to server is " - << blkid.to_str() << " created at " << last_utime << " (" << server_now - mc_server_time_got_at_ + << blkid.to_str() << " created at " << last_utime << " (" << server_now - server_time_got_at_ << " seconds ago according to the local clock)"; } got_server_mc_block_id(blkid, zstateid, last_utime); @@ -632,54 +469,52 @@ void TestNode::got_server_mc_block_id_ext(ton::BlockIdExt blkid, ton::ZeroStateI bool TestNode::request_block(ton::BlockIdExt blkid) { auto b = ton::serialize_tl_object( ton::create_tl_object(ton::create_tl_lite_block_id(blkid)), true); - return envelope_send_query_to_shard( - blkid.shard_full(), std::move(b), [Self = actor_id(this), blkid](td::Result res) -> void { - if (res.is_error()) { - LOG(ERROR) << "cannot obtain block " << blkid.to_str() << " from server"; - return; - } else { - auto F = ton::fetch_tl_object(res.move_as_ok(), true); - if (F.is_error()) { - LOG(ERROR) << "cannot parse answer to liteServer.getBlock"; - } else { - auto f = F.move_as_ok(); - auto blk_id = ton::create_block_id(f->id_); - LOG(INFO) << "obtained block " << blk_id.to_str() << " from server"; - if (blk_id != blkid) { - LOG(ERROR) << "block id mismatch: expected data for block " << blkid.to_str() << ", obtained for " - << blk_id.to_str(); - } - td::actor::send_closure_later(Self, &TestNode::got_mc_block, blk_id, std::move(f->data_)); - } + return envelope_send_query(std::move(b), [Self = actor_id(this), blkid](td::Result res) -> void { + if (res.is_error()) { + LOG(ERROR) << "cannot obtain block " << blkid.to_str() << " from server"; + return; + } else { + auto F = ton::fetch_tl_object(res.move_as_ok(), true); + if (F.is_error()) { + LOG(ERROR) << "cannot parse answer to liteServer.getBlock"; + } else { + auto f = F.move_as_ok(); + auto blk_id = ton::create_block_id(f->id_); + LOG(INFO) << "obtained block " << blk_id.to_str() << " from server"; + if (blk_id != blkid) { + LOG(ERROR) << "block id mismatch: expected data for block " << blkid.to_str() << ", obtained for " + << blk_id.to_str(); } - }); + td::actor::send_closure_later(Self, &TestNode::got_mc_block, blk_id, std::move(f->data_)); + } + } + }); } bool TestNode::request_state(ton::BlockIdExt blkid) { auto b = ton::serialize_tl_object( ton::create_tl_object(ton::create_tl_lite_block_id(blkid)), true); - return envelope_send_query_to_shard( - blkid.shard_full(), std::move(b), [Self = actor_id(this), blkid](td::Result res) -> void { - if (res.is_error()) { - LOG(ERROR) << "cannot obtain state " << blkid.to_str() << " from server"; - return; - } else { - auto F = ton::fetch_tl_object(res.move_as_ok(), true); - if (F.is_error()) { - LOG(ERROR) << "cannot parse answer to liteServer.getState"; - } else { - auto f = F.move_as_ok(); - auto blk_id = ton::create_block_id(f->id_); - LOG(INFO) << "obtained state " << blk_id.to_str() << " from server"; - if (blk_id != blkid) { - LOG(ERROR) << "block id mismatch: expected state for block " << blkid.to_str() << ", obtained for " - << blk_id.to_str(); - } - td::actor::send_closure_later(Self, &TestNode::got_mc_state, blk_id, f->root_hash_, f->file_hash_, - std::move(f->data_)); - } + return envelope_send_query(std::move(b), [Self = actor_id(this), blkid](td::Result res) -> void { + if (res.is_error()) { + LOG(ERROR) << "cannot obtain state " << blkid.to_str() << " from server"; + return; + } else { + auto F = ton::fetch_tl_object(res.move_as_ok(), true); + if (F.is_error()) { + LOG(ERROR) << "cannot parse answer to liteServer.getState"; + } else { + auto f = F.move_as_ok(); + auto blk_id = ton::create_block_id(f->id_); + LOG(INFO) << "obtained state " << blk_id.to_str() << " from server"; + if (blk_id != blkid) { + LOG(ERROR) << "block id mismatch: expected state for block " << blkid.to_str() << ", obtained for " + << blk_id.to_str(); } - }); + td::actor::send_closure_later(Self, &TestNode::got_mc_state, blk_id, f->root_hash_, f->file_hash_, + std::move(f->data_)); + } + } + }); } void TestNode::got_mc_block(ton::BlockIdExt blkid, td::BufferSlice data) { @@ -1170,11 +1005,12 @@ bool TestNode::do_parse_line() { return eoln() && get_server_mc_block_id(); } else if (word == "sendfile") { return !eoln() && set_error(send_ext_msg_from_filename(get_line_tail())); - } else if (word == "getaccount") { + } else if (word == "getaccount" || word == "getaccountprunned") { + bool prunned = word == "getaccountprunned"; return parse_account_addr_ext(workchain, addr, addr_ext) && - (seekeoln() - ? get_account_state(workchain, addr, mc_last_id_, addr_ext) - : parse_block_id_ext(blkid) && seekeoln() && get_account_state(workchain, addr, blkid, addr_ext)); + (seekeoln() ? get_account_state(workchain, addr, mc_last_id_, addr_ext, "", -1, prunned) + : parse_block_id_ext(blkid) && seekeoln() && + get_account_state(workchain, addr, blkid, addr_ext, "", -1, prunned)); } else if (word == "saveaccount" || word == "saveaccountcode" || word == "saveaccountdata") { std::string filename; int mode = ((word.c_str()[11] >> 1) & 3); @@ -1314,73 +1150,76 @@ td::Status TestNode::send_ext_msg_from_filename(std::string filename) { LOG(ERROR) << "failed to read file `" << filename << "`: " << err.to_string(); return err; } - LOG(ERROR) << "sending query from file " << filename; - - TRY_RESULT_PREFIX(root, vm::std_boc_deserialize(F.ok().as_slice()), "invalid boc: "); - block::gen::CommonMsgInfo::Record_ext_in_msg_info info; - if (!tlb::unpack_cell_inexact(root, info)) { - return td::Status::Error("failed to unpack external message header"); + if (ready_ && !client_.empty()) { + LOG(ERROR) << "sending query from file " << filename; + auto P = td::PromiseCreator::lambda([](td::Result R) { + if (R.is_error()) { + return; + } + auto F = ton::fetch_tl_object(R.move_as_ok(), true); + if (F.is_error()) { + LOG(ERROR) << "cannot parse answer to liteServer.sendMessage"; + } else { + int status = F.move_as_ok()->status_; + LOG(INFO) << "external message status is " << status; + } + }); + auto b = + ton::serialize_tl_object(ton::create_tl_object(F.move_as_ok()), true); + return envelope_send_query(std::move(b), std::move(P)) ? td::Status::OK() + : td::Status::Error("cannot send query to server"); + } else { + return td::Status::Error("server connection not ready"); } - auto dest_prefix = block::tlb::t_MsgAddressInt.get_prefix(info.dest); - if (!dest_prefix.is_valid()) { - return td::Status::Error("destination of the message is invalid"); - } - - auto P = td::PromiseCreator::lambda([](td::Result R) { - if (R.is_error()) { - return; - } - auto F = ton::fetch_tl_object(R.move_as_ok(), true); - if (F.is_error()) { - LOG(ERROR) << "cannot parse answer to liteServer.sendMessage"; - } else { - int status = F.move_as_ok()->status_; - LOG(INFO) << "external message status is " << status; - } - }); - auto b = - ton::serialize_tl_object(ton::create_tl_object(F.move_as_ok()), true); - return envelope_send_query_to_account(dest_prefix, std::move(b), std::move(P)) ? td::Status::OK() - : td::Status::Error("cannot send query to server"); } bool TestNode::get_account_state(ton::WorkchainId workchain, ton::StdSmcAddress addr, ton::BlockIdExt ref_blkid, - int addr_ext, std::string filename, int mode) { + int addr_ext, std::string filename, int mode, bool prunned) { if (!ref_blkid.is_valid()) { return set_error("must obtain last block information before making other queries"); } + if (!(ready_ && !client_.empty())) { + return set_error("server connection not ready"); + } if (addr_ext) { - return get_special_smc_addr(addr_ext, [this, ref_blkid, filename, mode](td::Result res) { - if (res.is_error()) { - LOG(ERROR) << "cannot resolve special smart contract address: " << res.move_as_error(); - } else { - get_account_state(ton::masterchainId, res.move_as_ok(), ref_blkid, 0, filename, mode); - } - }); + return get_special_smc_addr( + addr_ext, [this, ref_blkid, filename, mode, prunned](td::Result res) { + if (res.is_error()) { + LOG(ERROR) << "cannot resolve special smart contract address: " << res.move_as_error(); + } else { + get_account_state(ton::masterchainId, res.move_as_ok(), ref_blkid, 0, filename, mode, prunned); + } + }); } auto a = ton::create_tl_object(workchain, addr); - auto b = ton::serialize_tl_object(ton::create_tl_object( - ton::create_tl_lite_block_id(ref_blkid), std::move(a)), - true); - ton::AccountIdPrefixFull account_prefix(workchain, addr.bits().get_uint(64)); - LOG(INFO) << "requesting account state for " << workchain << ":" << addr.to_hex() << " with respect to " - << ref_blkid.to_str() << " with savefile `" << filename << "` and mode " << mode; - return envelope_send_query_to_account( - account_prefix, std::move(b), - [Self = actor_id(this), workchain, addr, ref_blkid, filename, mode](td::Result R) { - if (R.is_error()) { - return; - } - auto F = ton::fetch_tl_object(R.move_as_ok(), true); - if (F.is_error()) { - LOG(ERROR) << "cannot parse answer to liteServer.getAccountState"; - } else { - auto f = F.move_as_ok(); - td::actor::send_closure_later(Self, &TestNode::got_account_state, ref_blkid, ton::create_block_id(f->id_), - ton::create_block_id(f->shardblk_), std::move(f->shard_proof_), - std::move(f->proof_), std::move(f->state_), workchain, addr, filename, mode); - } - }); + td::BufferSlice b; + if (prunned) { + b = ton::serialize_tl_object(ton::create_tl_object( + ton::create_tl_lite_block_id(ref_blkid), std::move(a)), + true); + } else { + b = ton::serialize_tl_object(ton::create_tl_object( + ton::create_tl_lite_block_id(ref_blkid), std::move(a)), + true); + } + LOG(INFO) << "requesting " << (prunned ? "prunned " : "") << "account state for " << workchain << ":" << addr.to_hex() + << " with respect to " << ref_blkid.to_str() << " with savefile `" << filename << "` and mode " << mode; + return envelope_send_query(std::move(b), [Self = actor_id(this), workchain, addr, ref_blkid, filename, mode, + prunned](td::Result R) { + if (R.is_error()) { + return; + } + auto F = ton::fetch_tl_object(R.move_as_ok(), true); + if (F.is_error()) { + LOG(ERROR) << "cannot parse answer to liteServer.getAccountState"; + } else { + auto f = F.move_as_ok(); + td::actor::send_closure_later(Self, &TestNode::got_account_state, ref_blkid, ton::create_block_id(f->id_), + ton::create_block_id(f->shardblk_), std::move(f->shard_proof_), + std::move(f->proof_), std::move(f->state_), workchain, addr, filename, mode, + prunned); + } + }); } td::int64 TestNode::compute_method_id(std::string method) { @@ -1446,18 +1285,19 @@ bool TestNode::start_run_method(ton::WorkchainId workchain, ton::StdSmcAddress a if (!ref_blkid.is_valid()) { return set_error("must obtain last block information before making other queries"); } + if (!(ready_ && !client_.empty())) { + return set_error("server connection not ready"); + } auto a = ton::create_tl_object(workchain, addr); - ton::AccountIdPrefixFull account_prefix(workchain, addr.bits().get_uint(64)); if (!mode) { auto b = ton::serialize_tl_object(ton::create_tl_object( ton::create_tl_lite_block_id(ref_blkid), std::move(a)), true); LOG(INFO) << "requesting account state for " << workchain << ":" << addr.to_hex() << " with respect to " << ref_blkid.to_str() << " to run method " << method_name << " with " << params.size() << " parameters"; - return envelope_send_query_to_account( - account_prefix, std::move(b), - [Self = actor_id(this), workchain, addr, ref_blkid, method_name, params = std::move(params), - promise = std::move(promise)](td::Result R) mutable { + return envelope_send_query( + std::move(b), [Self = actor_id(this), workchain, addr, ref_blkid, method_name, params = std::move(params), + promise = std::move(promise)](td::Result R) mutable { if (R.is_error()) { promise.set_error(R.move_as_error()); return; @@ -1497,27 +1337,26 @@ bool TestNode::start_run_method(ton::WorkchainId workchain, ton::StdSmcAddress a LOG(INFO) << "requesting remote get-method execution for " << workchain << ":" << addr.to_hex() << " with respect to " << ref_blkid.to_str() << " to run method " << method_name << " with " << params.size() << " parameters"; - return envelope_send_query_to_account( - account_prefix, std::move(b), - [Self = actor_id(this), workchain, addr, ref_blkid, method_name, mode, params = std::move(params), - promise = std::move(promise)](td::Result R) mutable { - if (R.is_error()) { - promise.set_error(R.move_as_error()); - return; - } - auto F = ton::fetch_tl_object(R.move_as_ok(), true); - if (F.is_error()) { - LOG(ERROR) << "cannot parse answer to liteServer.runSmcMethod"; - promise.set_error(td::Status::Error("cannot parse answer to liteServer.runSmcMethod")); - } else { - auto f = F.move_as_ok(); - td::actor::send_closure_later( - Self, &TestNode::run_smc_method, mode, ref_blkid, ton::create_block_id(f->id_), - ton::create_block_id(f->shardblk_), std::move(f->shard_proof_), std::move(f->proof_), - std::move(f->state_proof_), workchain, addr, method_name, std::move(params), std::move(f->init_c7_), - std::move(f->lib_extras_), std::move(f->result_), f->exit_code_, std::move(promise)); - } - }); + return envelope_send_query(std::move(b), [Self = actor_id(this), workchain, addr, ref_blkid, method_name, mode, + params = std::move(params), + promise = std::move(promise)](td::Result R) mutable { + if (R.is_error()) { + promise.set_error(R.move_as_error()); + return; + } + auto F = ton::fetch_tl_object(R.move_as_ok(), true); + if (F.is_error()) { + LOG(ERROR) << "cannot parse answer to liteServer.runSmcMethod"; + promise.set_error(td::Status::Error("cannot parse answer to liteServer.runSmcMethod")); + } else { + auto f = F.move_as_ok(); + td::actor::send_closure_later(Self, &TestNode::run_smc_method, mode, ref_blkid, ton::create_block_id(f->id_), + ton::create_block_id(f->shardblk_), std::move(f->shard_proof_), + std::move(f->proof_), std::move(f->state_proof_), workchain, addr, method_name, + std::move(params), std::move(f->init_c7_), std::move(f->lib_extras_), + std::move(f->result_), f->exit_code_, std::move(promise)); + } + }); } } @@ -1748,7 +1587,7 @@ void TestNode::send_compute_complaint_price_query(ton::StdSmcAddress elector_add params.emplace_back(td::make_refint(refs)); params.emplace_back(td::make_refint(expires_in)); auto P = td::PromiseCreator::lambda( - [expires_in, bits, refs, chash, filename](td::Result> R) { + [this, expires_in, bits, refs, chash, filename](td::Result> R) { if (R.is_error()) { LOG(ERROR) << R.move_as_error(); return; @@ -1810,6 +1649,10 @@ bool TestNode::dns_resolve_start(ton::WorkchainId workchain, ton::StdSmcAddress return set_error("domain name too long"); } + if (!(ready_ && !client_.empty())) { + return set_error("server connection not ready"); + } + if (workchain == ton::workchainInvalid) { if (dns_root_queried_) { workchain = ton::masterchainId; @@ -2011,16 +1854,17 @@ bool TestNode::get_one_transaction(ton::BlockIdExt blkid, ton::WorkchainId workc if (!ton::shard_contains(blkid.shard_full(), ton::extract_addr_prefix(workchain, addr))) { return set_error("the shard of this block cannot contain this account"); } + if (!(ready_ && !client_.empty())) { + return set_error("server connection not ready"); + } auto a = ton::create_tl_object(workchain, addr); auto b = ton::serialize_tl_object(ton::create_tl_object( ton::create_tl_lite_block_id(blkid), std::move(a), lt), true); - ton::AccountIdPrefixFull account_prefix(workchain, addr.bits().get_uint(64)); LOG(INFO) << "requesting transaction " << lt << " of " << workchain << ":" << addr.to_hex() << " from block " << blkid.to_str(); - return envelope_send_query_to_account( - account_prefix, std::move(b), - [Self = actor_id(this), workchain, addr, lt, blkid, dump](td::Result R) -> void { + return envelope_send_query( + std::move(b), [Self = actor_id(this), workchain, addr, lt, blkid, dump](td::Result R) -> void { if (R.is_error()) { return; } @@ -2037,15 +1881,16 @@ bool TestNode::get_one_transaction(ton::BlockIdExt blkid, ton::WorkchainId workc bool TestNode::get_last_transactions(ton::WorkchainId workchain, ton::StdSmcAddress addr, ton::LogicalTime lt, ton::Bits256 hash, unsigned count, bool dump) { + if (!(ready_ && !client_.empty())) { + return set_error("server connection not ready"); + } auto a = ton::create_tl_object(workchain, addr); auto b = ton::serialize_tl_object( ton::create_tl_object(count, std::move(a), lt, hash), true); - ton::AccountIdPrefixFull account_prefix(workchain, addr.bits().get_uint(64)); LOG(INFO) << "requesting " << count << " last transactions from " << lt << ":" << hash.to_hex() << " of " << workchain << ":" << addr.to_hex(); - return envelope_send_query_to_account( - account_prefix, std::move(b), - [Self = actor_id(this), workchain, addr, lt, hash, count, dump](td::Result R) { + return envelope_send_query( + std::move(b), [Self = actor_id(this), workchain, addr, lt, hash, count, dump](td::Result R) { if (R.is_error()) { return; } @@ -2066,15 +1911,18 @@ bool TestNode::get_last_transactions(ton::WorkchainId workchain, ton::StdSmcAddr void TestNode::got_account_state(ton::BlockIdExt ref_blk, ton::BlockIdExt blk, ton::BlockIdExt shard_blk, td::BufferSlice shard_proof, td::BufferSlice proof, td::BufferSlice state, - ton::WorkchainId workchain, ton::StdSmcAddress addr, std::string filename, int mode) { - LOG(INFO) << "got account state for " << workchain << ":" << addr.to_hex() << " with respect to blocks " - << blk.to_str() << (shard_blk == blk ? "" : std::string{" and "} + shard_blk.to_str()); + ton::WorkchainId workchain, ton::StdSmcAddress addr, std::string filename, int mode, + bool prunned) { + LOG(INFO) << "got " << (prunned ? "prunned " : "") << "account state for " << workchain << ":" << addr.to_hex() + << " with respect to blocks " << blk.to_str() + << (shard_blk == blk ? "" : std::string{" and "} + shard_blk.to_str()); block::AccountState account_state; account_state.blk = blk; account_state.shard_blk = shard_blk; account_state.shard_proof = std::move(shard_proof); account_state.proof = std::move(proof); account_state.state = std::move(state); + account_state.is_virtualized = prunned; auto r_info = account_state.validate(ref_blk, block::StdAddress(workchain, addr)); if (r_info.is_error()) { LOG(ERROR) << r_info.error().message(); @@ -2402,10 +2250,10 @@ void TestNode::got_one_transaction(ton::BlockIdExt req_blkid, ton::BlockIdExt bl << " but received data has " << root->get_hash().bits().to_hex(256); return; } - } catch (vm::VmError &err) { + } catch (vm::VmError err) { LOG(ERROR) << "error while traversing block transaction proof : " << err.get_msg(); return; - } catch (vm::VmVirtError &err) { + } catch (vm::VmVirtError err) { LOG(ERROR) << "virtualization error while traversing block transaction proof : " << err.get_msg(); return; } @@ -2584,30 +2432,32 @@ void TestNode::got_last_transactions(std::vector blkids, td::Bu bool TestNode::get_block_transactions(ton::BlockIdExt blkid, int mode, unsigned count, ton::Bits256 acc_addr, ton::LogicalTime lt) { + if (!(ready_ && !client_.empty())) { + return set_error("server connection not ready"); + } auto a = ton::create_tl_object(acc_addr, lt); auto b = ton::serialize_tl_object(ton::create_tl_object( ton::create_tl_lite_block_id(blkid), mode, count, std::move(a), false, false), true); LOG(INFO) << "requesting " << count << " transactions from block " << blkid.to_str() << " starting from account " << acc_addr.to_hex() << " lt " << lt; - return envelope_send_query_to_shard( - blkid.shard_full(), std::move(b), [Self = actor_id(this), mode](td::Result R) { - if (R.is_error()) { - return; - } - auto F = ton::fetch_tl_object(R.move_as_ok(), true); - if (F.is_error()) { - LOG(ERROR) << "cannot parse answer to liteServer.listBlockTransactions"; - } else { - auto f = F.move_as_ok(); - std::vector transactions; - for (auto& id : f->ids_) { - transactions.emplace_back(id->account_, id->lt_, id->hash_); - } - td::actor::send_closure_later(Self, &TestNode::got_block_transactions, ton::create_block_id(f->id_), mode, - f->req_count_, f->incomplete_, std::move(transactions), std::move(f->proof_)); - } - }); + return envelope_send_query(std::move(b), [Self = actor_id(this), mode](td::Result R) { + if (R.is_error()) { + return; + } + auto F = ton::fetch_tl_object(R.move_as_ok(), true); + if (F.is_error()) { + LOG(ERROR) << "cannot parse answer to liteServer.listBlockTransactions"; + } else { + auto f = F.move_as_ok(); + std::vector transactions; + for (auto& id : f->ids_) { + transactions.emplace_back(id->account_, id->lt_, id->hash_); + } + td::actor::send_closure_later(Self, &TestNode::got_block_transactions, ton::create_block_id(f->id_), mode, + f->req_count_, f->incomplete_, std::move(transactions), std::move(f->proof_)); + } + }); } void TestNode::got_block_transactions(ton::BlockIdExt blkid, int mode, unsigned req_count, bool incomplete, @@ -2633,23 +2483,25 @@ bool TestNode::get_all_shards(std::string filename, bool use_last, ton::BlockIdE if (!blkid.is_masterchain()) { return set_error("only masterchain blocks contain shard configuration"); } + if (!(ready_ && !client_.empty())) { + return set_error("server connection not ready"); + } auto b = ton::serialize_tl_object( ton::create_tl_object(ton::create_tl_lite_block_id(blkid)), true); LOG(INFO) << "requesting recent shard configuration"; - return envelope_send_query_to_any( - std::move(b), [Self = actor_id(this), filename](td::Result R) -> void { - if (R.is_error()) { - return; - } - auto F = ton::fetch_tl_object(R.move_as_ok(), true); - if (F.is_error()) { - LOG(ERROR) << "cannot parse answer to liteServer.getAllShardsInfo"; - } else { - auto f = F.move_as_ok(); - td::actor::send_closure_later(Self, &TestNode::got_all_shards, ton::create_block_id(f->id_), - std::move(f->proof_), std::move(f->data_), filename); - } - }); + return envelope_send_query(std::move(b), [Self = actor_id(this), filename](td::Result R) -> void { + if (R.is_error()) { + return; + } + auto F = ton::fetch_tl_object(R.move_as_ok(), true); + if (F.is_error()) { + LOG(ERROR) << "cannot parse answer to liteServer.getAllShardsInfo"; + } else { + auto f = F.move_as_ok(); + td::actor::send_closure_later(Self, &TestNode::got_all_shards, ton::create_block_id(f->id_), std::move(f->proof_), + std::move(f->data_), filename); + } + }); } void TestNode::got_all_shards(ton::BlockIdExt blk, td::BufferSlice proof, td::BufferSlice data, std::string filename) { @@ -2713,6 +2565,9 @@ bool TestNode::parse_get_config_params(ton::BlockIdExt blkid, int mode, std::str params.push_back(x); } } + if (!(ready_ && !client_.empty())) { + return set_error("server connection not ready"); + } if (!blkid.is_masterchain_ext()) { return set_error("only masterchain blocks contain configuration"); } @@ -2731,6 +2586,10 @@ bool TestNode::get_config_params(ton::BlockIdExt blkid, td::Promise promise, int mode, std::string filename, std::vector params) { + if (!(ready_ && !client_.empty())) { + promise.set_error(td::Status::Error("server connection not ready")); + return false; + } if (!blkid.is_masterchain_ext()) { promise.set_error(td::Status::Error("masterchain reference block expected")); return false; @@ -2748,12 +2607,11 @@ bool TestNode::get_config_params_ext(ton::BlockIdExt blkid, td::Promise R) mutable { - td::actor::send_closure_later(Self, &TestNode::got_config_params, blkid, mode, filename, std::move(params), - std::move(R), std::move(promise)); - }); + return envelope_send_query(std::move(b), [Self = actor_id(this), mode, filename, blkid, params = std::move(params), + promise = std::move(promise)](td::Result R) mutable { + td::actor::send_closure_later(Self, &TestNode::got_config_params, blkid, mode, filename, std::move(params), + std::move(R), std::move(promise)); + }); } void TestNode::got_config_params(ton::BlockIdExt req_blkid, int mode, std::string filename, std::vector params, @@ -2941,8 +2799,8 @@ bool TestNode::get_block(ton::BlockIdExt blkid, bool dump) { LOG(INFO) << "got block download request for " << blkid.to_str(); auto b = ton::serialize_tl_object( ton::create_tl_object(ton::create_tl_lite_block_id(blkid)), true); - return envelope_send_query_to_shard( - blkid.shard_full(), std::move(b), [Self = actor_id(this), blkid, dump](td::Result res) -> void { + return envelope_send_query( + std::move(b), [Self = actor_id(this), blkid, dump](td::Result res) -> void { if (res.is_error()) { LOG(ERROR) << "cannot obtain block " << blkid.to_str() << " from server : " << res.move_as_error().to_string(); @@ -2970,8 +2828,8 @@ bool TestNode::get_state(ton::BlockIdExt blkid, bool dump) { LOG(INFO) << "got state download request for " << blkid.to_str(); auto b = ton::serialize_tl_object( ton::create_tl_object(ton::create_tl_lite_block_id(blkid)), true); - return envelope_send_query_to_shard( - blkid.shard_full(), std::move(b), [Self = actor_id(this), blkid, dump](td::Result res) -> void { + return envelope_send_query( + std::move(b), [Self = actor_id(this), blkid, dump](td::Result res) -> void { if (res.is_error()) { LOG(ERROR) << "cannot obtain state " << blkid.to_str() << " from server : " << res.move_as_error().to_string(); @@ -3108,7 +2966,7 @@ void TestNode::got_state(ton::BlockIdExt blkid, ton::RootHash root_hash, ton::Fi } bool TestNode::get_show_block_header(ton::BlockIdExt blkid, int mode) { - return get_block_header(blkid, mode, [this](td::Result R) { + return get_block_header(blkid, mode, [this, blkid](td::Result R) { if (R.is_error()) { LOG(ERROR) << "unable to fetch block header: " << R.move_as_error(); } else { @@ -3123,9 +2981,8 @@ bool TestNode::get_block_header(ton::BlockIdExt blkid, int mode, td::Promise(ton::create_tl_lite_block_id(blkid), mode), true); - return envelope_send_query_to_shard( - blkid.shard_full(), std::move(b), - [this, blkid, promise = std::move(promise)](td::Result R) mutable -> void { + return envelope_send_query( + std::move(b), [this, blkid, promise = std::move(promise)](td::Result R) mutable -> void { TRY_RESULT_PROMISE_PREFIX(promise, res, std::move(R), PSLICE() << "cannot obtain block header for " << blkid.to_str() << " from server :"); got_block_header_raw(std::move(res), std::move(promise), blkid); @@ -3151,8 +3008,8 @@ bool TestNode::lookup_block(ton::ShardIdFull shard, int mode, td::uint64 arg, auto b = ton::serialize_tl_object(ton::create_tl_object( mode, ton::create_tl_lite_block_id_simple(id), arg, (td::uint32)arg), true); - return envelope_send_query_to_shard( - shard, std::move(b), [this, id, mode, arg, promise = std::move(promise)](td::Result R) mutable -> void { + return envelope_send_query( + std::move(b), [this, id, mode, arg, promise = std::move(promise)](td::Result R) mutable -> void { TRY_RESULT_PROMISE_PREFIX(promise, res, std::move(R), PSLICE() << "cannot look up block header for " << id.to_str() << " with mode " << mode << " and argument " << arg << " from server :"); @@ -3266,9 +3123,9 @@ void TestNode::got_block_header(ton::BlockIdExt blkid, td::BufferSlice data, int return; } show_block_header(blkid, std::move(virt_root), mode); - } catch (vm::VmError &err) { + } catch (vm::VmError err) { LOG(ERROR) << "error processing header for " << blkid.to_str() << " : " << err.get_msg(); - } catch (vm::VmVirtError &err) { + } catch (vm::VmVirtError err) { LOG(ERROR) << "error processing header for " << blkid.to_str() << " : " << err.get_msg(); } show_new_blkids(); @@ -3297,15 +3154,14 @@ bool TestNode::get_block_proof(ton::BlockIdExt from, ton::BlockIdExt to, int mod ton::serialize_tl_object(ton::create_tl_object( mode & 0xfff, ton::create_tl_lite_block_id(from), ton::create_tl_lite_block_id(to)), true); - return envelope_send_query_to_any( - std::move(b), [Self = actor_id(this), from, to, mode](td::Result res) { - if (res.is_error()) { - LOG(ERROR) << "cannot obtain block proof for " << ((mode & 1) ? to.to_str() : "last masterchain block") - << " starting from " << from.to_str() << " from server : " << res.move_as_error().to_string(); - } else { - td::actor::send_closure_later(Self, &TestNode::got_block_proof, from, to, mode, res.move_as_ok()); - } - }); + return envelope_send_query(std::move(b), [Self = actor_id(this), from, to, mode](td::Result res) { + if (res.is_error()) { + LOG(ERROR) << "cannot obtain block proof for " << ((mode & 1) ? to.to_str() : "last masterchain block") + << " starting from " << from.to_str() << " from server : " << res.move_as_error().to_string(); + } else { + td::actor::send_closure_later(Self, &TestNode::got_block_proof, from, to, mode, res.move_as_ok()); + } + }); } void TestNode::got_block_proof(ton::BlockIdExt from, ton::BlockIdExt to, int mode, td::BufferSlice pchain) { @@ -3359,6 +3215,9 @@ void TestNode::got_block_proof(ton::BlockIdExt from, ton::BlockIdExt to, int mod bool TestNode::get_creator_stats(ton::BlockIdExt blkid, int mode, unsigned req_count, ton::Bits256 start_after, ton::UnixTime min_utime) { + if (!(ready_ && !client_.empty())) { + return set_error("server connection not ready"); + } if (!blkid.is_masterchain_ext()) { return set_error("only masterchain blocks contain block creator statistics"); } @@ -3369,7 +3228,7 @@ bool TestNode::get_creator_stats(ton::BlockIdExt blkid, int mode, unsigned req_c auto& os = *osp; return get_creator_stats( blkid, mode, req_count, start_after, min_utime, - [&os](const td::Bits256& key, const block::DiscountedCounter& mc_cnt, + [min_utime, &os](const td::Bits256& key, const block::DiscountedCounter& mc_cnt, const block::DiscountedCounter& shard_cnt) -> bool { os << key.to_hex() << " mc_cnt:" << mc_cnt << " shard_cnt:" << shard_cnt << std::endl; return true; @@ -3399,6 +3258,10 @@ bool TestNode::get_creator_stats(ton::BlockIdExt blkid, int mode, unsigned req_c bool TestNode::get_creator_stats(ton::BlockIdExt blkid, unsigned req_count, ton::UnixTime min_utime, TestNode::creator_stats_func_t func, std::unique_ptr state, td::Promise> promise) { + if (!(ready_ && !client_.empty())) { + promise.set_error(td::Status::Error("server connection not ready")); + return false; + } if (!state) { promise.set_error(td::Status::Error("null CreatorStatsRes")); return false; @@ -3417,7 +3280,7 @@ bool TestNode::get_creator_stats(ton::BlockIdExt blkid, unsigned req_count, ton: LOG(INFO) << "requesting up to " << req_count << " block creator stats records with respect to masterchain block " << blkid.to_str() << " starting from validator public key " << state->last_key.to_hex() << " created after " << min_utime << " (mode=" << state->mode << ")"; - return envelope_send_query_to_any( + return envelope_send_query( std::move(b), [this, blkid, req_count, state = std::move(state), min_utime, func = std::move(func), promise = std::move(promise)](td::Result R) mutable { TRY_RESULT_PROMISE(promise, res, std::move(R)); @@ -3634,7 +3497,7 @@ bool TestNode::load_creator_stats(std::unique_ptr l ton::UnixTime min_utime = info.valid_since - 1000; return get_creator_stats( info.blk_id, 1000, min_utime, - [&info](const td::Bits256& key, const block::DiscountedCounter& mc_cnt, + [min_utime, &info](const td::Bits256& key, const block::DiscountedCounter& mc_cnt, const block::DiscountedCounter& shard_cnt) -> bool { info.store_record(key, mc_cnt, shard_cnt); return true; @@ -4029,7 +3892,7 @@ td::Result> TestNode::ValidatorLoadInfo::build_proof(int idx, td:: block::gen::ValidatorDescr::Record_validator_addr rec2; if (tlb::csr_unpack(entry, rec1)) { pk = std::move(rec1.public_key); - } else if (tlb::csr_unpack(entry, rec2)) { + } else if (tlb::csr_unpack(std::move(entry), rec2)) { pk = std::move(rec2.public_key); } else { return td::Status::Error("cannot unpack ValidatorDescr"); diff --git a/lite-client/lite-client.h b/lite-client/lite-client.h index adfa9a04..34d02011 100644 --- a/lite-client/lite-client.h +++ b/lite-client/lite-client.h @@ -205,10 +205,11 @@ class TestNode : public td::actor::Actor { td::Status send_ext_msg_from_filename(std::string filename); td::Status save_db_file(ton::FileHash file_hash, td::BufferSlice data); bool get_account_state(ton::WorkchainId workchain, ton::StdSmcAddress addr, ton::BlockIdExt ref_blkid, - int addr_ext = 0, std::string filename = "", int mode = -1); + int addr_ext = 0, std::string filename = "", int mode = -1, bool prunned = false); void got_account_state(ton::BlockIdExt ref_blk, ton::BlockIdExt blk, ton::BlockIdExt shard_blk, td::BufferSlice shard_proof, td::BufferSlice proof, td::BufferSlice state, - ton::WorkchainId workchain, ton::StdSmcAddress addr, std::string filename, int mode); + ton::WorkchainId workchain, ton::StdSmcAddress addr, std::string filename, int mode, + bool prunned); bool parse_run_method(ton::WorkchainId workchain, ton::StdSmcAddress addr, ton::BlockIdExt ref_blkid, int addr_ext, std::string method_name, bool ext_mode); bool after_parse_run_method(ton::WorkchainId workchain, ton::StdSmcAddress addr, ton::BlockIdExt ref_blkid, diff --git a/overlay/overlay-manager.cpp b/overlay/overlay-manager.cpp index 96e9fc2a..b7ca1270 100644 --- a/overlay/overlay-manager.cpp +++ b/overlay/overlay-manager.cpp @@ -89,12 +89,20 @@ void OverlayManager::delete_overlay(adnl::AdnlNodeIdShort local_id, OverlayIdSho } void OverlayManager::create_public_overlay(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, - std::unique_ptr callback, OverlayPrivacyRules rules, td::string scope) { + std::unique_ptr callback, OverlayPrivacyRules rules, + td::string scope) { + create_public_overlay_ex(local_id, std::move(overlay_id), std::move(callback), std::move(rules), std::move(scope), + true); +} + +void OverlayManager::create_public_overlay_ex(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, + std::unique_ptr callback, OverlayPrivacyRules rules, + td::string scope, bool announce_self) { CHECK(!dht_node_.empty()); auto id = overlay_id.compute_short_id(); register_overlay(local_id, id, Overlay::create(keyring_, adnl_, actor_id(this), dht_node_, local_id, std::move(overlay_id), - std::move(callback), std::move(rules), scope)); + std::move(callback), std::move(rules), scope, announce_self)); } void OverlayManager::create_public_overlay_external(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, diff --git a/overlay/overlay-manager.h b/overlay/overlay-manager.h index ed9da23b..f7ed32ba 100644 --- a/overlay/overlay-manager.h +++ b/overlay/overlay-manager.h @@ -52,9 +52,9 @@ class OverlayManager : public Overlays { void create_public_overlay(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, std::unique_ptr callback, OverlayPrivacyRules rules, td::string scope) override; - void create_public_overlay_external(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, - std::unique_ptr callback, OverlayPrivacyRules rules, - td::string scope) override; + void create_public_overlay_ex(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, + std::unique_ptr callback, OverlayPrivacyRules rules, td::string scope, + bool announce_self) override; void create_private_overlay(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, std::vector nodes, std::unique_ptr callback, OverlayPrivacyRules rules) override; diff --git a/overlay/overlay-peers.cpp b/overlay/overlay-peers.cpp index e1e4a3ce..409f0993 100644 --- a/overlay/overlay-peers.cpp +++ b/overlay/overlay-peers.cpp @@ -43,6 +43,7 @@ void OverlayImpl::del_peer(adnl::AdnlNodeIdShort id) { P->set_neighbour(false); } peers_.remove(id); + bad_peers_.erase(id); update_neighbours(0); } @@ -51,7 +52,16 @@ void OverlayImpl::del_some_peers() { return; } while (peers_.size() > max_peers()) { - auto P = get_random_peer(); + OverlayPeer *P; + if (bad_peers_.empty()) { + P = get_random_peer(); + } else { + auto it = bad_peers_.upper_bound(next_bad_peer_); + if (it == bad_peers_.end()) { + it = bad_peers_.begin(); + } + P = peers_.get(next_bad_peer_ = *it); + } if (P) { auto id = P->get_id(); del_peer(id); @@ -118,16 +128,35 @@ void OverlayImpl::add_peer(OverlayNode P) { add_peer_in(std::move(P)); } -void OverlayImpl::receive_random_peers(adnl::AdnlNodeIdShort src, td::BufferSlice data) { +void OverlayImpl::on_ping_result(adnl::AdnlNodeIdShort peer, bool success) { + if (!public_) { + return; + } + if (OverlayPeer *p = peers_.get(peer)) { + p->on_ping_result(success); + if (p->is_alive()) { + bad_peers_.erase(peer); + } else { + bad_peers_.insert(peer); + } + } +} + +void OverlayImpl::receive_random_peers(adnl::AdnlNodeIdShort src, td::Result R) { CHECK(public_); - auto R = fetch_tl_object(std::move(data), true); + on_ping_result(src, R.is_ok()); if (R.is_error()) { + VLOG(OVERLAY_NOTICE) << this << ": failed getRandomPeers query: " << R.move_as_error(); + return; + } + auto R2 = fetch_tl_object(R.move_as_ok(), true); + if (R2.is_error()) { VLOG(OVERLAY_WARNING) << this << ": dropping incorrect answer to overlay.getRandomPeers query from " << src << ": " - << R.move_as_error(); + << R2.move_as_error(); return; } - auto res = R.move_as_ok(); + auto res = R2.move_as_ok(); std::vector nodes; for (auto &n : res->nodes_) { @@ -142,12 +171,12 @@ void OverlayImpl::receive_random_peers(adnl::AdnlNodeIdShort src, td::BufferSlic void OverlayImpl::send_random_peers_cont(adnl::AdnlNodeIdShort src, OverlayNode node, td::Promise promise) { std::vector> vec; - if (!is_external_) { + if (announce_self_) { vec.emplace_back(node.tl()); } for (td::uint32 i = 0; i < nodes_to_send(); i++) { - auto P = get_random_peer(); + auto P = get_random_peer(true); if (P) { vec.emplace_back(P->get().tl()); } else { @@ -161,11 +190,7 @@ void OverlayImpl::send_random_peers_cont(adnl::AdnlNodeIdShort src, OverlayNode } else { auto P = td::PromiseCreator::lambda([SelfId = actor_id(this), src, oid = print_id()](td::Result res) { - if (res.is_error()) { - VLOG(OVERLAY_NOTICE) << oid << ": failed getRandomPeers query: " << res.move_as_error(); - return; - } - td::actor::send_closure(SelfId, &OverlayImpl::receive_random_peers, src, res.move_as_ok()); + td::actor::send_closure(SelfId, &OverlayImpl::receive_random_peers, src, std::move(res)); }); auto Q = create_tl_object(create_tl_object(std::move(vec))); @@ -218,6 +243,7 @@ void OverlayImpl::update_neighbours(td::uint32 nodes_to_change) { neighbours_.pop_back(); X->set_neighbour(false); } + bad_peers_.erase(X->get_id()); peers_.remove(X->get_id()); continue; } @@ -246,15 +272,25 @@ void OverlayImpl::update_neighbours(td::uint32 nodes_to_change) { } } -OverlayPeer *OverlayImpl::get_random_peer() { - while (peers_.size() > 0) { +OverlayPeer *OverlayImpl::get_random_peer(bool only_alive) { + size_t skip_bad = 3; + while (peers_.size() > (only_alive ? bad_peers_.size() : 0)) { auto P = peers_.get_random(); if (public_ && P->get_version() + 3600 < td::Clocks::system()) { VLOG(OVERLAY_INFO) << this << ": deleting outdated peer " << P->get_id(); del_peer(P->get_id()); - } else { - return P; + continue; } + if (!P->is_alive()) { + if (only_alive) { + continue; + } + if (skip_bad > 0) { + --skip_bad; + continue; + } + } + return P; } return nullptr; } @@ -263,17 +299,17 @@ void OverlayImpl::get_overlay_random_peers(td::uint32 max_peers, td::Promise> promise) { std::vector v; auto t = td::Clocks::system(); - while (peers_.size() > v.size()) { + while (v.size() < max_peers && v.size() < peers_.size() - bad_peers_.size()) { auto P = peers_.get_random(); if (P->get_version() + 3600 < t) { VLOG(OVERLAY_INFO) << this << ": deleting outdated peer " << P->get_id(); del_peer(P->get_id()); - } else { + } else if (P->is_alive()) { bool dup = false; for (auto &n : v) { if (n == P->get_id()) { dup = true; - continue; + break; } } if (!dup) { diff --git a/overlay/overlay.cpp b/overlay/overlay.cpp index 3e10daec..f0bd3b9c 100644 --- a/overlay/overlay.cpp +++ b/overlay/overlay.cpp @@ -38,10 +38,10 @@ td::actor::ActorOwn Overlay::create(td::actor::ActorId manager, td::actor::ActorId dht_node, adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, std::unique_ptr callback, - OverlayPrivacyRules rules, td::string scope, bool is_external) { + OverlayPrivacyRules rules, td::string scope, bool announce_self) { auto R = td::actor::create_actor("overlay", keyring, adnl, manager, dht_node, local_id, std::move(overlay_id), true, std::vector(), - std::move(callback), std::move(rules), scope, is_external); + std::move(callback), std::move(rules), scope, announce_self); return td::actor::ActorOwn(std::move(R)); } @@ -61,7 +61,7 @@ OverlayImpl::OverlayImpl(td::actor::ActorId keyring, td::actor td::actor::ActorId manager, td::actor::ActorId dht_node, adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, bool pub, std::vector nodes, std::unique_ptr callback, - OverlayPrivacyRules rules, td::string scope, bool is_external) + OverlayPrivacyRules rules, td::string scope, bool announce_self) : keyring_(keyring) , adnl_(adnl) , manager_(manager) @@ -72,15 +72,10 @@ OverlayImpl::OverlayImpl(td::actor::ActorId keyring, td::actor , public_(pub) , rules_(std::move(rules)) , scope_(scope) - , is_external_(is_external) { + , announce_self_(announce_self) { overlay_id_ = id_full_.compute_short_id(); - if (is_external_) { - CHECK(public_); - VLOG(OVERLAY_INFO) << this << ": creating public external"; - } else { - VLOG(OVERLAY_INFO) << this << ": creating " << (public_ ? "public" : "private"); - } + VLOG(OVERLAY_INFO) << this << ": creating " << (public_ ? "public" : "private"); for (auto &node : nodes) { CHECK(!public_); @@ -139,13 +134,6 @@ void OverlayImpl::process_query(adnl::AdnlNodeIdShort src, ton_api::overlay_getB } void OverlayImpl::receive_query(adnl::AdnlNodeIdShort src, td::BufferSlice data, td::Promise promise) { - if (is_external_) { - LOG(OVERLAY_WARNING) << "dropping query in external overlay " << overlay_id_; - promise.set_error(td::Status::Error("overlay is external")); - td::actor::send_closure(manager_, &Overlays::send_message, src, local_id_, overlay_id_, - create_serialize_tl_object()); - return; - } if (!public_) { auto P = peers_.get(src); if (P == nullptr) { @@ -153,6 +141,8 @@ void OverlayImpl::receive_query(adnl::AdnlNodeIdShort src, td::BufferSlice data, promise.set_error(td::Status::Error(ErrorCode::protoviolation, "overlay is private")); return; } + } else { + on_ping_result(src, true); } auto R = fetch_tl_object(data.clone(), true); @@ -231,24 +221,10 @@ void OverlayImpl::receive_message(adnl::AdnlNodeIdShort src, td::BufferSlice dat VLOG(OVERLAY_WARNING) << this << ": received query in private overlay from unknown source " << src; return; } + } else { + on_ping_result(src, true); } auto X = fetch_tl_object(data.clone(), true); - if (X.is_error()) { - auto Y = fetch_tl_object(data.clone(), true); - if (Y.is_ok() && public_) { - VLOG(OVERLAY_DEBUG) << this << ": received removePeer message from " << src; - if (peers_.exists(src)) { - del_peer(src); - } - callback_->on_remove_peer(src); - return; - } - } - - if (is_external_) { - LOG(OVERLAY_WARNING) << "dropping message in external overlay " << overlay_id_; - return; - } if (X.is_error()) { VLOG(OVERLAY_DEBUG) << this << ": received custom message"; callback_->receive_message(src, overlay_id_, std::move(data)); @@ -349,7 +325,7 @@ void OverlayImpl::receive_dht_nodes(td::Result res, bool dummy) { VLOG(OVERLAY_NOTICE) << this << ": can not get value from DHT: " << res.move_as_error(); } - if (is_external_) { + if (!announce_self_) { return; } @@ -555,9 +531,6 @@ void OverlayImpl::send_new_fec_broadcast_part(PublicKeyHash local_id, Overlay::B } void OverlayImpl::deliver_broadcast(PublicKeyHash source, td::BufferSlice data) { - if (is_external_) { - return; - } callback_->receive_broadcast(source, overlay_id_, std::move(data)); } @@ -630,10 +603,6 @@ void OverlayImpl::set_privacy_rules(OverlayPrivacyRules rules) { } void OverlayImpl::check_broadcast(PublicKeyHash src, td::BufferSlice data, td::Promise promise) { - if (is_external_) { - promise.set_result(td::Unit()); - return; - } callback_->check_broadcast(src, overlay_id_, std::move(data), std::move(promise)); } diff --git a/overlay/overlay.h b/overlay/overlay.h index fcf61ea8..4289142c 100644 --- a/overlay/overlay.h +++ b/overlay/overlay.h @@ -42,7 +42,7 @@ class Overlay : public td::actor::Actor { td::actor::ActorId manager, td::actor::ActorId dht_node, adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, std::unique_ptr callback, - OverlayPrivacyRules rules, td::string scope, bool is_external = false); + OverlayPrivacyRules rules, td::string scope, bool announce_self = true); static td::actor::ActorOwn create(td::actor::ActorId keyring, td::actor::ActorId adnl, td::actor::ActorId manager, diff --git a/overlay/overlay.hpp b/overlay/overlay.hpp index f52de86d..208aba7f 100644 --- a/overlay/overlay.hpp +++ b/overlay/overlay.hpp @@ -79,7 +79,17 @@ class OverlayPeer { td::int32 get_version() const { return node_.version(); } - + void on_ping_result(bool success) { + if (success) { + missed_pings_ = 0; + } else { + ++missed_pings_; + } + } + bool is_alive() const { + return missed_pings_ < 3; + } + td::uint32 throughput_out_bytes = 0; td::uint32 throughput_in_bytes = 0; @@ -105,6 +115,7 @@ class OverlayPeer { adnl::AdnlNodeIdShort id_; bool is_neighbour_ = false; + size_t missed_pings_ = 0; }; class OverlayImpl : public Overlay { @@ -113,7 +124,7 @@ class OverlayImpl : public Overlay { td::actor::ActorId manager, td::actor::ActorId dht_node, adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, bool pub, std::vector nodes, std::unique_ptr callback, - OverlayPrivacyRules rules, td::string scope = "{ \"type\": \"undefined\" }", bool is_external = false); + OverlayPrivacyRules rules, td::string scope = "{ \"type\": \"undefined\" }", bool announce_self = true); void update_dht_node(td::actor::ActorId dht) override { dht_node_ = dht; } @@ -138,7 +149,8 @@ class OverlayImpl : public Overlay { alarm_timestamp() = td::Timestamp::in(1); } - void receive_random_peers(adnl::AdnlNodeIdShort src, td::BufferSlice data); + void on_ping_result(adnl::AdnlNodeIdShort peer, bool success); + void receive_random_peers(adnl::AdnlNodeIdShort src, td::Result R); void send_random_peers(adnl::AdnlNodeIdShort dst, td::Promise promise); void send_random_peers_cont(adnl::AdnlNodeIdShort dst, OverlayNode node, td::Promise promise); void get_overlay_random_peers(td::uint32 max_peers, td::Promise> promise) override; @@ -299,7 +311,7 @@ class OverlayImpl : public Overlay { void add_peers(std::vector nodes); void del_some_peers(); void del_peer(adnl::AdnlNodeIdShort id); - OverlayPeer *get_random_peer(); + OverlayPeer *get_random_peer(bool only_alive = false); td::actor::ActorId keyring_; td::actor::ActorId adnl_; @@ -314,6 +326,8 @@ class OverlayImpl : public Overlay { td::Timestamp update_db_at_; td::Timestamp update_throughput_at_; td::Timestamp last_throughput_update_; + std::set bad_peers_; + adnl::AdnlNodeIdShort next_bad_peer_ = adnl::AdnlNodeIdShort::zero(); std::unique_ptr callback_; @@ -371,7 +385,7 @@ class OverlayImpl : public Overlay { bool semi_public_ = false; OverlayPrivacyRules rules_; td::string scope_; - bool is_external_ = false; + bool announce_self_ = true; std::map> certs_; class CachedEncryptor : public td::ListNode { diff --git a/overlay/overlays.h b/overlay/overlays.h index a7b29faf..def80d75 100644 --- a/overlay/overlays.h +++ b/overlay/overlays.h @@ -195,10 +195,11 @@ class Overlays : public td::actor::Actor { virtual void update_dht_node(td::actor::ActorId dht) = 0; virtual void create_public_overlay(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, - std::unique_ptr callback, OverlayPrivacyRules rules, td::string scope) = 0; - virtual void create_public_overlay_external(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, - std::unique_ptr callback, OverlayPrivacyRules rules, - td::string scope) = 0; + std::unique_ptr callback, OverlayPrivacyRules rules, + td::string scope) = 0; + virtual void create_public_overlay_ex(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, + std::unique_ptr callback, OverlayPrivacyRules rules, + td::string scope, bool announce_self) = 0; virtual void create_private_overlay(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, std::vector nodes, std::unique_ptr callback, OverlayPrivacyRules rules) = 0; diff --git a/rldp-http-proxy/CMakeLists.txt b/rldp-http-proxy/CMakeLists.txt index 7ba66ccb..eefd1dd8 100644 --- a/rldp-http-proxy/CMakeLists.txt +++ b/rldp-http-proxy/CMakeLists.txt @@ -1,5 +1,5 @@ cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) -add_executable(rldp-http-proxy rldp-http-proxy.cpp DNSResolver.h TonlibClient.h TonlibClient.cpp DNSResolver.cpp) +add_executable(rldp-http-proxy rldp-http-proxy.cpp DNSResolver.h DNSResolver.cpp) target_include_directories(rldp-http-proxy PUBLIC $) target_link_libraries(rldp-http-proxy PRIVATE tonhttp rldp dht tonlib git) diff --git a/rldp-http-proxy/DNSResolver.cpp b/rldp-http-proxy/DNSResolver.cpp index 1fb197a5..1a5705d3 100644 --- a/rldp-http-proxy/DNSResolver.cpp +++ b/rldp-http-proxy/DNSResolver.cpp @@ -30,7 +30,8 @@ static const double CACHE_TIMEOUT_HARD = 300.0; static const double CACHE_TIMEOUT_SOFT = 270.0; -DNSResolver::DNSResolver(td::actor::ActorId tonlib_client) : tonlib_client_(std::move(tonlib_client)) { +DNSResolver::DNSResolver(td::actor::ActorId tonlib_client) + : tonlib_client_(std::move(tonlib_client)) { } void DNSResolver::start_up() { @@ -39,14 +40,15 @@ void DNSResolver::start_up() { void DNSResolver::sync() { auto obj = tonlib_api::make_object(); - auto P = td::PromiseCreator::lambda([SelfId = - actor_id(this)](td::Result> R) { + auto P = td::PromiseCreator::lambda([SelfId = actor_id(this)]( + td::Result> R) { if (R.is_error()) { LOG(WARNING) << "Sync error: " << R.move_as_error(); ton::delay_action([SelfId]() { td::actor::send_closure(SelfId, &DNSResolver::sync); }, td::Timestamp::in(5.0)); } }); - td::actor::send_closure(tonlib_client_, &TonlibClient::send_request, std::move(obj), std::move(P)); + td::actor::send_closure(tonlib_client_, &tonlib::TonlibClientWrapper::send_request, std::move(obj), + std::move(P)); } void DNSResolver::resolve(std::string host, td::Promise promise) { @@ -66,18 +68,13 @@ void DNSResolver::resolve(std::string host, td::Promise(nullptr, host, category, 16); auto P = td::PromiseCreator::lambda([SelfId = actor_id(this), promise = std::move(promise), host = std::move(host)]( - td::Result> R) mutable { + td::Result> R) mutable { if (R.is_error()) { if (promise) { promise.set_result(R.move_as_error()); } } else { - auto v = R.move_as_ok(); - auto obj = dynamic_cast(v.get()); - if (obj == nullptr) { - promise.set_result(td::Status::Error("invalid response from tonlib")); - return; - } + auto obj = R.move_as_ok(); ton::adnl::AdnlNodeIdShort id; td::uint32 cnt = 0; for (auto &e : obj->entries_) { @@ -106,7 +103,8 @@ void DNSResolver::resolve(std::string host, td::Promise, + std::move(obj), std::move(P)); } void DNSResolver::save_to_cache(std::string host, ton::adnl::AdnlNodeIdShort id) { diff --git a/rldp-http-proxy/DNSResolver.h b/rldp-http-proxy/DNSResolver.h index ba52a67e..6b9d31da 100644 --- a/rldp-http-proxy/DNSResolver.h +++ b/rldp-http-proxy/DNSResolver.h @@ -25,13 +25,13 @@ */ #pragma once #include "td/actor/actor.h" -#include "TonlibClient.h" +#include "tonlib/tonlib/TonlibClientWrapper.h" #include "adnl/adnl.h" #include "td/actor/PromiseFuture.h" class DNSResolver : public td::actor::Actor { public: - explicit DNSResolver(td::actor::ActorId tonlib_client); + explicit DNSResolver(td::actor::ActorId tonlib_client); void start_up() override; void resolve(std::string host, td::Promise promise); @@ -40,7 +40,7 @@ class DNSResolver : public td::actor::Actor { void sync(); void save_to_cache(std::string host, ton::adnl::AdnlNodeIdShort id); - td::actor::ActorId tonlib_client_; + td::actor::ActorId tonlib_client_; struct CacheEntry { ton::adnl::AdnlNodeIdShort id_; diff --git a/rldp-http-proxy/rldp-http-proxy.cpp b/rldp-http-proxy/rldp-http-proxy.cpp index a45e6433..9b9e958f 100644 --- a/rldp-http-proxy/rldp-http-proxy.cpp +++ b/rldp-http-proxy/rldp-http-proxy.cpp @@ -33,7 +33,6 @@ #include "td/utils/FileLog.h" #include "td/utils/Random.h" #include "td/utils/filesystem.h" -#include "td/utils/overloaded.h" #include "auto/tl/ton_api_json.h" #include "auto/tl/tonlib_api.hpp" @@ -55,7 +54,7 @@ #include "td/utils/BufferedFd.h" #include "common/delay.h" -#include "TonlibClient.h" +#include "tonlib/tonlib/TonlibClientWrapper.h" #include "DNSResolver.h" #if TD_DARWIN || TD_LINUX @@ -134,6 +133,15 @@ td::BufferSlice create_error_response(const std::string &proto_version, int code proto_version, code, reason, std::vector>(), true); } +const std::string PROXY_SITE_VERISON_HEADER_NAME = "Ton-Proxy-Site-Version"; +const std::string PROXY_ENTRY_VERISON_HEADER_NAME = "Ton-Proxy-Entry-Version"; +const std::string PROXY_VERSION_HEADER = PSTRING() << "Commit: " << GitMetadata::CommitSHA1() + << ", Date: " << GitMetadata::CommitDate(); + +using RegisteredPayloadSenderGuard = + std::unique_ptr, td::Bits256>, + std::function, td::Bits256> *)>>; + class HttpRldpPayloadReceiver : public td::actor::Actor { public: HttpRldpPayloadReceiver(std::shared_ptr payload, td::Bits256 transfer_id, @@ -244,10 +252,10 @@ class HttpRldpPayloadReceiver : public td::actor::Actor { private: static constexpr size_t watermark() { - return 1 << 15; + return (1 << 21) - (1 << 11); } static constexpr size_t chunk_size() { - return 1 << 17; + return (1 << 21) - (1 << 11); } std::shared_ptr payload_; @@ -268,12 +276,14 @@ class HttpRldpPayloadSender : public td::actor::Actor { public: HttpRldpPayloadSender(std::shared_ptr payload, td::Bits256 transfer_id, ton::adnl::AdnlNodeIdShort local_id, td::actor::ActorId adnl, - td::actor::ActorId rldp, bool is_tunnel = false) + td::actor::ActorId rldp, td::actor::ActorId proxy, + bool is_tunnel = false) : payload_(std::move(payload)) , id_(transfer_id) , local_id_(local_id) , adnl_(adnl) , rldp_(rldp) + , proxy_(proxy) , is_tunnel_(is_tunnel) { } @@ -289,52 +299,10 @@ class HttpRldpPayloadSender : public td::actor::Actor { return x; } - void start_up() override { - class AdnlCb : public ton::adnl::Adnl::Callback { - public: - AdnlCb(td::actor::ActorId id) : self_id_(id) { - } - void receive_message(ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst, - td::BufferSlice data) override { - LOG(INFO) << "http payload sender: dropping message"; - } - void receive_query(ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst, td::BufferSlice data, - td::Promise promise) override { - td::actor::send_closure(self_id_, &HttpRldpPayloadSender::receive_query, std::move(data), std::move(promise)); - } + void start_up() override; - private: - td::actor::ActorId self_id_; - }; - td::actor::send_closure(adnl_, &ton::adnl::Adnl::subscribe, local_id_, generate_prefix(), - std::make_unique(actor_id(this))); - - class Cb : public ton::http::HttpPayload::Callback { - public: - Cb(td::actor::ActorId id, size_t watermark) : self_id_(id), watermark_(watermark) { - } - void run(size_t ready_bytes) override { - if (!reached_ && ready_bytes >= watermark_) { - reached_ = true; - td::actor::send_closure(self_id_, &HttpRldpPayloadSender::try_answer_query, false); - } else if (reached_ && ready_bytes < watermark_) { - reached_ = false; - } - } - void completed() override { - td::actor::send_closure(self_id_, &HttpRldpPayloadSender::try_answer_query, false); - } - - private: - bool reached_ = false; - td::actor::ActorId self_id_; - size_t watermark_; - }; - - payload_->add_callback( - std::make_unique(actor_id(this), is_tunnel_ ? 1 : ton::http::HttpRequest::low_watermark())); - - alarm_timestamp() = td::Timestamp::in(is_tunnel_ ? 60.0 : 10.0); + void registered_sender(RegisteredPayloadSenderGuard guard) { + guard_ = std::move(guard); } void try_answer_query(bool from_timer = false) { @@ -391,13 +359,9 @@ class HttpRldpPayloadSender : public td::actor::Actor { try_answer_query(false); } - void receive_query(td::BufferSlice data, td::Promise promise) { - auto F = ton::fetch_tl_object(data, true); - if (F.is_error()) { - LOG(INFO) << "failed to parse query: " << F.move_as_error(); - return; - } - send_data(F.move_as_ok(), std::move(promise)); + void receive_query(ton::tl_object_ptr f, + td::Promise promise) { + send_data(std::move(f), std::move(promise)); } void alarm() override { @@ -429,24 +393,22 @@ class HttpRldpPayloadSender : public td::actor::Actor { stop(); } - void tear_down() override { - td::actor::send_closure(adnl_, &ton::adnl::Adnl::unsubscribe, local_id_, generate_prefix()); - } - private: static constexpr size_t watermark() { - return 1 << 15; + return (1 << 21) - (1 << 11); } std::shared_ptr payload_; td::Bits256 id_; + RegisteredPayloadSenderGuard guard_; td::int32 seqno_ = 0; ton::adnl::AdnlNodeIdShort local_id_; td::actor::ActorId adnl_; td::actor::ActorId rldp_; + td::actor::ActorId proxy_; size_t cur_query_size_; td::Promise cur_query_promise_; @@ -462,7 +424,8 @@ class TcpToRldpRequestSender : public td::actor::Actor { std::shared_ptr request_payload, td::Promise, std::shared_ptr>> promise, td::actor::ActorId adnl, td::actor::ActorId dht, - td::actor::ActorId rldp, td::actor::ActorId dns_resolver) + td::actor::ActorId rldp, td::actor::ActorId proxy, + td::actor::ActorId dns_resolver) : local_id_(local_id) , host_(std::move(host)) , request_(std::move(request)) @@ -471,6 +434,7 @@ class TcpToRldpRequestSender : public td::actor::Actor { , adnl_(adnl) , dht_(dht) , rldp_(rldp) + , proxy_(proxy) , dns_resolver_(dns_resolver) { } void start_up() override { @@ -492,7 +456,7 @@ class TcpToRldpRequestSender : public td::actor::Actor { }); td::actor::create_actor("HttpPayloadSender", request_payload_, id_, local_id_, adnl_, rldp_, - is_tunnel()) + proxy_, is_tunnel()) .release(); auto f = ton::serialize_tl_object(request_->store_tl(id_), true); @@ -523,6 +487,7 @@ class TcpToRldpRequestSender : public td::actor::Actor { } response_->add_header(std::move(h)); } + response_->add_header({PROXY_ENTRY_VERISON_HEADER_NAME, PROXY_VERSION_HEADER}); auto S = response_->complete_parse_header(); if (S.is_error()) { abort_query(S.move_as_error()); @@ -579,6 +544,7 @@ class TcpToRldpRequestSender : public td::actor::Actor { td::actor::ActorId adnl_; td::actor::ActorId dht_; td::actor::ActorId rldp_; + td::actor::ActorId proxy_; td::actor::ActorId dns_resolver_; std::unique_ptr response_; @@ -588,47 +554,28 @@ class TcpToRldpRequestSender : public td::actor::Actor { class RldpTcpTunnel : public td::actor::Actor, private td::ObserverBase { public: RldpTcpTunnel(td::Bits256 transfer_id, ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort local_id, - td::actor::ActorId adnl, td::actor::ActorId rldp, td::SocketFd fd) + td::actor::ActorId adnl, td::actor::ActorId rldp, + td::actor::ActorId proxy, td::SocketFd fd) : id_(transfer_id) , src_(src) , local_id_(local_id) , adnl_(std::move(adnl)) , rldp_(std::move(rldp)) + , proxy_(std::move(proxy)) , fd_(std::move(fd)) { } - void start_up() override { - self_ = actor_id(this); - td::actor::SchedulerContext::get()->get_poll().subscribe(fd_.get_poll_info().extract_pollable_fd(this), - td::PollFlags::ReadWrite()); - - class Cb : public ton::adnl::Adnl::Callback { - public: - explicit Cb(td::actor::ActorId id) : self_id_(std::move(id)) { - } - void receive_message(ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst, - td::BufferSlice data) override { - LOG(INFO) << "rldp tcp tunnel: dropping message"; - } - void receive_query(ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst, td::BufferSlice data, - td::Promise promise) override { - td::actor::send_closure(self_id_, &RldpTcpTunnel::receive_query, std::move(data), std::move(promise)); - } - - private: - td::actor::ActorId self_id_; - }; - td::actor::send_closure(adnl_, &ton::adnl::Adnl::subscribe, local_id_, generate_prefix(), - std::make_unique(actor_id(this))); - process(); - } + void start_up() override; void tear_down() override { LOG(INFO) << "RldpTcpTunnel: tear_down"; - td::actor::send_closure(adnl_, &ton::adnl::Adnl::unsubscribe, local_id_, generate_prefix()); td::actor::SchedulerContext::get()->get_poll().unsubscribe(fd_.get_poll_info().get_pollable_fd_ref()); } + void registered_sender(RegisteredPayloadSenderGuard guard) { + guard_ = std::move(guard); + } + void notify() override { td::actor::send_closure(self_, &RldpTcpTunnel::process); } @@ -642,19 +589,14 @@ class RldpTcpTunnel : public td::actor::Actor, private td::ObserverBase { td::actor::send_closure(SelfId, &RldpTcpTunnel::got_data_from_rldp, std::move(R)); }); - auto f = ton::create_serialize_tl_object(id_, out_seqno_++, 1 << 17); + auto f = ton::create_serialize_tl_object(id_, out_seqno_++, + (1 << 21) - (1 << 11)); td::actor::send_closure(rldp_, &ton::rldp::Rldp::send_query_ex, local_id_, src_, "payload part", std::move(P), - td::Timestamp::in(60.0), std::move(f), (1 << 18) + 1024); + td::Timestamp::in(60.0), std::move(f), (1 << 21) + 1024); } - void receive_query(td::BufferSlice data, td::Promise promise) { - auto F = ton::fetch_tl_object(data, true); - if (F.is_error()) { - LOG(INFO) << "failed to parse query: " << F.error(); - promise.set_error(F.move_as_error()); - return; - } - auto f = F.move_as_ok(); + void receive_query(ton::tl_object_ptr f, + td::Promise promise) { if (cur_promise_) { LOG(INFO) << "failed to process query: previous query is active"; promise.set_error(td::Status::Error("previous query is active")); @@ -772,11 +714,13 @@ class RldpTcpTunnel : public td::actor::Actor, private td::ObserverBase { } td::Bits256 id_; + RegisteredPayloadSenderGuard guard_; ton::adnl::AdnlNodeIdShort src_; ton::adnl::AdnlNodeIdShort local_id_; td::actor::ActorId adnl_; td::actor::ActorId rldp_; + td::actor::ActorId proxy_; td::BufferedFd fd_; @@ -795,7 +739,7 @@ class RldpToTcpRequestSender : public td::actor::Actor { std::unique_ptr request, std::shared_ptr request_payload, td::Promise promise, td::actor::ActorId adnl, td::actor::ActorId rldp, - td::actor::ActorId remote) + td::actor::ActorId proxy, td::actor::ActorId remote) : id_(id) , local_id_(local_id) , dst_(dst) @@ -805,6 +749,7 @@ class RldpToTcpRequestSender : public td::actor::Actor { , promise_(std::move(promise)) , adnl_(adnl) , rldp_(rldp) + , proxy_(proxy) , remote_(std::move(remote)) { } void start_up() override { @@ -826,8 +771,9 @@ class RldpToTcpRequestSender : public td::actor::Actor { void got_result(std::pair, std::shared_ptr> R) { td::actor::create_actor("HttpPayloadSender(R)", std::move(R.second), id_, local_id_, adnl_, - rldp_) + rldp_, proxy_) .release(); + R.first->add_header({PROXY_SITE_VERISON_HEADER_NAME, PROXY_VERSION_HEADER}); auto f = ton::serialize_tl_object(R.first->store_tl(), true); promise_.set_value(std::move(f)); stop(); @@ -853,6 +799,7 @@ class RldpToTcpRequestSender : public td::actor::Actor { td::actor::ActorId adnl_; td::actor::ActorId rldp_; + td::actor::ActorId proxy_; td::actor::ActorId remote_; }; @@ -972,7 +919,7 @@ class RldpHttpProxy : public td::actor::Actor { auto tonlib_options = tonlib_api::make_object( tonlib_api::make_object(conf_dataR.move_as_ok().as_slice().str(), "", false, false), tonlib_api::make_object()); - tonlib_client_ = td::actor::create_actor("tonlibclient", std::move(tonlib_options)); + tonlib_client_ = td::actor::create_actor("tonlibclient", std::move(tonlib_options)); dns_resolver_ = td::actor::create_actor("dnsresolver", tonlib_client_.get()); } @@ -1067,6 +1014,22 @@ class RldpHttpProxy : public td::actor::Actor { server_ = ton::http::HttpServer::create(port_, std::make_shared(actor_id(this))); } + class AdnlPayloadCb : public ton::adnl::Adnl::Callback { + public: + AdnlPayloadCb(td::actor::ActorId id) : self_id_(id) { + } + void receive_message(ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst, + td::BufferSlice data) override { + } + void receive_query(ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst, td::BufferSlice data, + td::Promise promise) override { + td::actor::send_closure(self_id_, &RldpHttpProxy::receive_payload_part_request, std::move(data), + std::move(promise)); + } + + private: + td::actor::ActorId self_id_; + }; for (auto &serv_id : server_ids_) { class AdnlCb : public ton::adnl::Adnl::Callback { public: @@ -1087,7 +1050,15 @@ class RldpHttpProxy : public td::actor::Actor { td::actor::send_closure(adnl_, &ton::adnl::Adnl::subscribe, serv_id, ton::adnl::Adnl::int_to_bytestring(ton::ton_api::http_request::ID), std::make_unique(actor_id(this))); + if (local_id_ != serv_id) { + td::actor::send_closure(adnl_, &ton::adnl::Adnl::subscribe, serv_id, + ton::adnl::Adnl::int_to_bytestring(ton::ton_api::http_getNextPayloadPart::ID), + std::make_unique(actor_id(this))); + } } + td::actor::send_closure(adnl_, &ton::adnl::Adnl::subscribe, local_id_, + ton::adnl::Adnl::int_to_bytestring(ton::ton_api::http_getNextPayloadPart::ID), + std::make_unique(actor_id(this))); rldp_ = ton::rldp::Rldp::create(adnl_.get()); td::actor::send_closure(rldp_, &ton::rldp::Rldp::set_default_mtu, 16 << 10); @@ -1141,7 +1112,7 @@ class RldpHttpProxy : public td::actor::Actor { td::actor::create_actor("outboundreq", local_id_, host, std::move(request), std::move(payload), std::move(promise), adnl_.get(), dht_.get(), - rldp_.get(), dns_resolver_.get()) + rldp_.get(), actor_id(this), dns_resolver_.get()) .release(); } @@ -1237,7 +1208,7 @@ class RldpHttpProxy : public td::actor::Actor { LOG(INFO) << "starting HTTP over RLDP request"; td::actor::create_actor("inboundreq", f->id_, dst, src, std::move(request), payload.move_as_ok(), std::move(promise), adnl_.get(), rldp_.get(), - server.http_remote_.get()) + actor_id(this), server.http_remote_.get()) .release(); } @@ -1249,10 +1220,52 @@ class RldpHttpProxy : public td::actor::Actor { return; } td::actor::create_actor(td::actor::ActorOptions().with_name("tunnel").with_poll(), id, src, local_id, - adnl_.get(), rldp_.get(), fd.move_as_ok()).release(); + adnl_.get(), rldp_.get(), actor_id(this), fd.move_as_ok()) + .release(); + std::vector> headers; + headers.push_back( + ton::create_tl_object(PROXY_SITE_VERISON_HEADER_NAME, PROXY_VERSION_HEADER)); promise.set_result(ton::create_serialize_tl_object( - http_version, 200, "Connection Established", std::vector>(), - false)); + http_version, 200, "Connection Established", std::move(headers), false)); + } + + void receive_payload_part_request(td::BufferSlice data, td::Promise promise) { + auto F = ton::fetch_tl_object(data, true); + if (F.is_error()) { + LOG(INFO) << "failed to parse query: " << F.error(); + promise.set_error(F.move_as_error()); + return; + } + auto f = F.move_as_ok(); + auto it = payload_senders_.find(f->id_); + if (it == payload_senders_.end()) { + LOG(INFO) << "failed to answer query: unknown request id"; + promise.set_error(td::Status::Error("unknown request id")); + return; + } + it->second(std::move(f), std::move(promise)); + } + + void register_payload_sender( + td::Bits256 id, + std::function, td::Promise)> f, + td::Promise promise) { + auto &f1 = payload_senders_[id]; + if (f1) { + promise.set_error(td::Status::Error("duplicate id")); + return; + } + f1 = std::move(f); + promise.set_result(RegisteredPayloadSenderGuard( + new std::pair, td::Bits256>(actor_id(this), id), + [](std::pair, td::Bits256> *x) { + td::actor::send_closure(x->first, &RldpHttpProxy::unregister_payload_sender, x->second); + delete x; + })); + } + + void unregister_payload_sender(td::Bits256 id) { + payload_senders_.erase(id); } void add_adnl_addr(ton::adnl::AdnlNodeIdShort id) { @@ -1302,8 +1315,12 @@ class RldpHttpProxy : public td::actor::Actor { std::string db_root_ = "."; bool proxy_all_ = false; - td::actor::ActorOwn tonlib_client_; + td::actor::ActorOwn tonlib_client_; td::actor::ActorOwn dns_resolver_; + + std::map, td::Promise)>> + payload_senders_; }; void TcpToRldpRequestSender::resolve() { @@ -1329,6 +1346,67 @@ void TcpToRldpRequestSender::resolve() { td::actor::send_closure(dns_resolver_, &DNSResolver::resolve, host_, std::move(P)); } +void HttpRldpPayloadSender::start_up() { + td::actor::send_closure( + proxy_, &RldpHttpProxy::register_payload_sender, id_, + [SelfId = actor_id(this)](ton::tl_object_ptr f, + td::Promise promise) { + td::actor::send_closure(SelfId, &HttpRldpPayloadSender::receive_query, std::move(f), std::move(promise)); + }, + [SelfId = actor_id(this)](td::Result R) { + if (R.is_error()) { + LOG(INFO) << "Failed to register request sender: " << R.move_as_error(); + } + td::actor::send_closure(SelfId, &HttpRldpPayloadSender::registered_sender, R.move_as_ok()); + }); + + class Cb : public ton::http::HttpPayload::Callback { + public: + Cb(td::actor::ActorId id, size_t watermark) : self_id_(id), watermark_(watermark) { + } + void run(size_t ready_bytes) override { + if (!reached_ && ready_bytes >= watermark_) { + reached_ = true; + td::actor::send_closure(self_id_, &HttpRldpPayloadSender::try_answer_query, false); + } else if (reached_ && ready_bytes < watermark_) { + reached_ = false; + } + } + void completed() override { + td::actor::send_closure(self_id_, &HttpRldpPayloadSender::try_answer_query, false); + } + + private: + bool reached_ = false; + td::actor::ActorId self_id_; + size_t watermark_; + }; + + payload_->add_callback( + std::make_unique(actor_id(this), is_tunnel_ ? 1 : ton::http::HttpRequest::low_watermark())); + + alarm_timestamp() = td::Timestamp::in(is_tunnel_ ? 60.0 : 10.0); +} + +void RldpTcpTunnel::start_up() { + self_ = actor_id(this); + td::actor::SchedulerContext::get()->get_poll().subscribe(fd_.get_poll_info().extract_pollable_fd(this), + td::PollFlags::ReadWrite()); + td::actor::send_closure( + proxy_, &RldpHttpProxy::register_payload_sender, id_, + [SelfId = actor_id(this)](ton::tl_object_ptr f, + td::Promise promise) { + td::actor::send_closure(SelfId, &RldpTcpTunnel::receive_query, std::move(f), std::move(promise)); + }, + [SelfId = actor_id(this)](td::Result R) { + if (R.is_error()) { + LOG(INFO) << "Failed to register request sender: " << R.move_as_error(); + } + td::actor::send_closure(SelfId, &RldpTcpTunnel::registered_sender, R.move_as_ok()); + }); + process(); +} + int main(int argc, char *argv[]) { SET_VERBOSITY_LEVEL(verbosity_WARNING); @@ -1340,7 +1418,7 @@ int main(int argc, char *argv[]) { td::log_interface = td::default_log_interface; }; - auto add_local_host = [&](const std::string& local, const std::string& remote) -> td::Status { + auto add_local_host = [&](const std::string &local, const std::string &remote) -> td::Status { std::string host; std::vector ports; auto p = local.find(':'); @@ -1357,7 +1435,7 @@ int main(int argc, char *argv[]) { } try { ports.push_back((td::uint16)std::stoul(local.substr(p, p2 - p))); - } catch (const std::logic_error& e) { + } catch (const std::logic_error &e) { return td::Status::Error(PSLICE() << "Invalid port: " << local.substr(p, p2 - p)); } p = p2 + 1; @@ -1427,9 +1505,7 @@ int main(int argc, char *argv[]) { p.add_checked_option('L', "local", ":, hostname that will be proxied to localhost\n" " is a comma-separated list of ports (may be omitted, default: 80, 443)\n", - [&](td::Slice arg) -> td::Status { - return add_local_host(arg.str(), "127.0.0.1"); - }); + [&](td::Slice arg) -> td::Status { return add_local_host(arg.str(), "127.0.0.1"); }); p.add_option('D', "db", "db root", [&](td::Slice arg) { td::actor::send_closure(x, &RldpHttpProxy::set_db_root, arg.str()); }); p.add_checked_option( diff --git a/rldp/rldp-in.hpp b/rldp/rldp-in.hpp index 8d420738..2073b59c 100644 --- a/rldp/rldp-in.hpp +++ b/rldp/rldp-in.hpp @@ -100,7 +100,7 @@ class RldpIn : public RldpImpl { void receive_message(adnl::AdnlNodeIdShort source, adnl::AdnlNodeIdShort local_id, TransferId transfer_id, td::BufferSlice data); - void in_transfer_completed(TransferId transfer_id); + void in_transfer_completed(TransferId transfer_id, bool success); void add_id(adnl::AdnlNodeIdShort local_id) override; void get_conn_ip_str(adnl::AdnlNodeIdShort l_id, adnl::AdnlNodeIdShort p_id, td::Promise promise) override; diff --git a/rldp/rldp.cpp b/rldp/rldp.cpp index 1834aac3..3c9e7b27 100644 --- a/rldp/rldp.cpp +++ b/rldp/rldp.cpp @@ -141,11 +141,11 @@ void RldpIn::process_message_part(adnl::AdnlNodeIdShort source, adnl::AdnlNodeId } auto P = td::PromiseCreator::lambda( [SelfId = actor_id(this), source, local_id, transfer_id = part.transfer_id_](td::Result R) { + td::actor::send_closure(SelfId, &RldpIn::in_transfer_completed, transfer_id, R.is_ok()); if (R.is_error()) { VLOG(RLDP_INFO) << "failed to receive: " << R.move_as_error(); return; } - td::actor::send_closure(SelfId, &RldpIn::in_transfer_completed, transfer_id); td::actor::send_closure(SelfId, &RldpIn::receive_message, source, local_id, transfer_id, R.move_as_ok()); }); @@ -250,8 +250,9 @@ void RldpIn::transfer_completed(TransferId transfer_id) { VLOG(RLDP_DEBUG) << "rldp: completed transfer " << transfer_id << "; " << senders_.size() << " out transfer pending "; } -void RldpIn::in_transfer_completed(TransferId transfer_id) { - if (lru_set_.count(transfer_id) == 1) { +void RldpIn::in_transfer_completed(TransferId transfer_id, bool success) { + receivers_.erase(transfer_id); + if (!success || lru_set_.count(transfer_id) == 1) { return; } while (lru_size_ >= lru_size()) { diff --git a/rldp2/BdwStats.cpp b/rldp2/BdwStats.cpp index 41221ec4..2417368f 100644 --- a/rldp2/BdwStats.cpp +++ b/rldp2/BdwStats.cpp @@ -18,6 +18,7 @@ */ #include "BdwStats.h" +#include "rldp.hpp" namespace ton { namespace rldp2 { @@ -39,7 +40,7 @@ void BdwStats::on_packet_ack(const PacketInfo &info, td::Timestamp sent_at, td:: auto ack_passed = now.at() - info.delivered_now.at(); auto passed = td::max(sent_passed, ack_passed); if (passed < 0.01) { - LOG(ERROR) << "Invalid passed " << passed; + VLOG(RLDP_WARNING) << "Invalid passed " << passed; } auto delivered = delivered_count - info.delivered_count; on_rate_sample((double)delivered / passed, now, info.is_paused); diff --git a/rldp2/RldpConnection.cpp b/rldp2/RldpConnection.cpp index e6ba2cfd..c6f96728 100644 --- a/rldp2/RldpConnection.cpp +++ b/rldp2/RldpConnection.cpp @@ -18,6 +18,7 @@ */ #include "RldpConnection.h" +#include "rldp.hpp" #include "td/utils/overloaded.h" #include "td/utils/Random.h" @@ -83,7 +84,7 @@ td::Timestamp RldpConnection::loop_limits(td::Timestamp now) { outbound_transfers_.erase(it); to_on_sent_.emplace_back(limit->transfer_id, std::move(error)); } else { - LOG(ERROR) << "Timeout on unknown transfer " << limit->transfer_id.to_hex(); + VLOG(RLDP_WARNING) << "Timeout on unknown transfer " << limit->transfer_id.to_hex(); } } limits_set_.erase(*limit); @@ -113,7 +114,7 @@ void RldpConnection::send(TransferId transfer_id, td::BufferSlice data, td::Time td::Random::secure_bytes(transfer_id.as_slice()); } else { if (outbound_transfers_.find(transfer_id) != outbound_transfers_.end()) { - LOG(WARNING) << "Skip resend of " << transfer_id.to_hex(); + VLOG(RLDP_WARNING) << "Skip resend of " << transfer_id.to_hex(); return; } } @@ -143,17 +144,6 @@ void RldpConnection::loop_bbr(td::Timestamp now) { double speed = bbr_.get_rate(); td::uint32 congestion_window = bbr_.get_window_size(); - static td::Timestamp next; - //FIXME: remove this UNSAFE debug output - if (next.is_in_past(now)) { - next = td::Timestamp::in(1, now); - if (td::actor::core::ActorExecuteContext::get()->actor().get_actor_info_ptr()->get_name() == "Alice") { - LOG(ERROR) << "speed=" << td::format::as_size((td::int64)speed * 768) << " " - << "cgw=" << td::format::as_size((td::int64)congestion_window * 768) << " " - << "loss=" << loss_stats_.loss * 100 << "%"; - } - } - pacer_.set_speed(speed); congestion_window_ = congestion_window; } @@ -301,7 +291,7 @@ void RldpConnection::receive_raw_obj(ton::ton_api::rldp2_messagePart &part) { max_size = limit_it->max_size; } if (total_size > max_size) { - LOG(INFO) << "Drop too big rldp query " << part.total_size_ << " > " << max_size; + VLOG(RLDP_INFO) << "Drop too big rldp query " << part.total_size_ << " > " << max_size; return; } @@ -324,7 +314,7 @@ void RldpConnection::receive_raw_obj(ton::ton_api::rldp2_messagePart &part) { } return {}; } - if (in_part->receiver.on_received(part.seqno_, td::Timestamp::now())) { + if (in_part->receiver.on_received(part.seqno_ + 1, td::Timestamp::now())) { TRY_STATUS_PREFIX(in_part->decoder->add_symbol({static_cast(part.seqno_), std::move(part.data_)}), td::Status::Error(ErrorCode::protoviolation, "invalid symbol")); if (in_part->decoder->may_try_decode()) { diff --git a/rldp2/RldpReceiver.cpp b/rldp2/RldpReceiver.cpp index 169bd551..cf0136c2 100644 --- a/rldp2/RldpReceiver.cpp +++ b/rldp2/RldpReceiver.cpp @@ -33,9 +33,8 @@ void RldpReceiver::on_ack_sent(td::Timestamp now) { //LOG(ERROR) << "RESEND ACK " << cnt_; } cnt_++; - if (cnt_ > 7) { - send_ack_at_ = {}; - } else { + send_ack_at_ = {}; + if (cnt_ <= 7) { send_ack_at_.relax(td::Timestamp::at(now.at() + config_.ack_delay * (1 << cnt_))); } } diff --git a/rldp2/RldpSender.cpp b/rldp2/RldpSender.cpp index 1f2dc26a..00c71956 100644 --- a/rldp2/RldpSender.cpp +++ b/rldp2/RldpSender.cpp @@ -49,7 +49,6 @@ SenderPackets::Update RldpSender::on_ack(const Ack &ack, double ack_delay, td::T //LOG(ERROR) << "ON ACK " << ack.max_seqno << " " << ack.received_mask << " " << ack.received_count; auto update = packets_.on_ack(ack); if (!update.was_max_updated) { - CHECK(!update.new_received); return update; } diff --git a/rldp2/RttStats.cpp b/rldp2/RttStats.cpp index 8269cfe4..2c3e750d 100644 --- a/rldp2/RttStats.cpp +++ b/rldp2/RttStats.cpp @@ -18,17 +18,18 @@ */ #include "RttStats.h" +#include "rldp.hpp" #include namespace ton { namespace rldp2 { void RttStats::on_rtt_sample(double rtt_sample, double ack_delay, td::Timestamp now) { if (rtt_sample < 0.001 || rtt_sample > 10) { - LOG(WARNING) << "Suspicious rtt sample " << rtt_sample; + VLOG(RLDP_WARNING) << "Suspicious rtt sample " << rtt_sample; return; } if (ack_delay < -1e-9 || ack_delay > 10) { - LOG(WARNING) << "Suspicious ack_delay " << ack_delay; + VLOG(RLDP_WARNING) << "Suspicious ack_delay " << ack_delay; return; } rtt_sample = td::max(0.01, rtt_sample); diff --git a/storage/Bitset.h b/storage/Bitset.h index 55380742..2c88bc6a 100644 --- a/storage/Bitset.h +++ b/storage/Bitset.h @@ -53,13 +53,28 @@ struct Bitset { } auto mask = 1 << bit_i; if ((bits_[i] & mask) == 0) { - bits_[i] |= mask; + bits_[i] |= (char)mask; count_++; return true; } return false; } + bool set_zero(size_t offset) { + auto i = offset / 8; + if (i >= bits_.size()) { + return false; + } + auto bit_i = offset % 8; + auto mask = 1 << bit_i; + if (bits_[i] & mask) { + bits_[i] &= (char)~mask; + count_--; + return true; + } + return false; + } + size_t ones_count() const { return count_; } diff --git a/storage/CMakeLists.txt b/storage/CMakeLists.txt index 0e5cf085..a53a42b8 100644 --- a/storage/CMakeLists.txt +++ b/storage/CMakeLists.txt @@ -17,6 +17,7 @@ set(STORAGE_SOURCE TorrentInfo.cpp TorrentMeta.cpp + db.h Bitset.h LoadSpeed.h MerkleTree.h @@ -24,32 +25,33 @@ set(STORAGE_SOURCE PartsHelper.h PeerActor.h PeerState.h - SharedState.h Torrent.h TorrentCreator.h TorrentHeader.h TorrentInfo.h TorrentMeta.h -) - + PeerManager.h + MicrochunkTree.h MicrochunkTree.cpp) set(STORAGE_CLI_SOURCE storage-cli.cpp ) add_library(storage ${STORAGE_SOURCE}) target_link_libraries(storage tdutils tdactor tddb ton_crypto tl_api ${JEMALLOC_LIBRARIES}) -target_include_directories(storage PUBLIC - $ +target_include_directories(storage PUBLIC + $ ) add_executable(storage-cli ${STORAGE_CLI_SOURCE}) target_link_libraries(storage-cli storage overlay tdutils tdactor adnl tl_api dht - rldp rldp2 catchain validatorsession full-node validator ton_validator validator - fift-lib memprof terminal git ${JEMALLOC_LIBRARIES}) + rldp rldp2 fift-lib memprof terminal git ${JEMALLOC_LIBRARIES}) set(STORAGE_TEST_SOURCE ${CMAKE_CURRENT_SOURCE_DIR}/test/storage.cpp PARENT_SCOPE ) + +add_subdirectory(storage-daemon) + # Do not install it yet #install(TARGETS storage-cli RUNTIME DESTINATION bin) diff --git a/storage/LoadSpeed.cpp b/storage/LoadSpeed.cpp index c2fcfcd4..ddba2b85 100644 --- a/storage/LoadSpeed.cpp +++ b/storage/LoadSpeed.cpp @@ -29,7 +29,7 @@ void LoadSpeed::add(std::size_t size, td::Timestamp now) { } double LoadSpeed::speed(td::Timestamp now) const { update(now); - return total_size_ / duration(); + return (double)total_size_ / duration(now); } td::StringBuilder &operator<<(td::StringBuilder &sb, const LoadSpeed &speed) { @@ -37,15 +37,15 @@ td::StringBuilder &operator<<(td::StringBuilder &sb, const LoadSpeed &speed) { } void LoadSpeed::update(td::Timestamp now) const { - while (duration() > 60) { + while (duration(now) > 30) { total_size_ -= events_.front().size; events_.pop(); } } -double LoadSpeed::duration() const { +double LoadSpeed::duration(td::Timestamp now) const { double res = 5; - if (events_.size() > 1) { - res = std::max(res, events_.back().at.at() - events_.front().at.at()); + if (!events_.empty()) { + res = std::max(res, now.at() - events_.front().at.at()); } return res; } diff --git a/storage/LoadSpeed.h b/storage/LoadSpeed.h index d936f541..92d947f7 100644 --- a/storage/LoadSpeed.h +++ b/storage/LoadSpeed.h @@ -26,19 +26,19 @@ namespace ton { class LoadSpeed { public: - void add(std::size_t size, td::Timestamp now); + void add(td::uint64 size, td::Timestamp now = td::Timestamp::now()); double speed(td::Timestamp now = td::Timestamp::now()) const; friend td::StringBuilder &operator<<(td::StringBuilder &sb, const LoadSpeed &speed); private: struct Event { - std::size_t size; + td::uint64 size; td::Timestamp at; }; mutable td::VectorQueue events_; - mutable std::size_t total_size_{0}; + mutable td::uint64 total_size_{0}; - double duration() const; + double duration(td::Timestamp now) const; void update(td::Timestamp now) const; }; } // namespace ton diff --git a/storage/MerkleTree.cpp b/storage/MerkleTree.cpp index d35a1f39..2ba4aeb8 100644 --- a/storage/MerkleTree.cpp +++ b/storage/MerkleTree.cpp @@ -28,152 +28,51 @@ #include "vm/excno.hpp" namespace ton { -static td::Ref unpack_proof(td::Ref root) { +static td::Result> unpack_proof(td::Ref root) { vm::CellSlice cs(vm::NoVm(), root); - CHECK(cs.special_type() == vm::Cell::SpecialType::MerkleProof); + if (cs.special_type() != vm::Cell::SpecialType::MerkleProof) { + return td::Status::Error("Not a merkle proof"); + } return cs.fetch_ref(); } -td::uint32 MerkleTree::get_depth() const { - return log_n_; -} -td::Ref MerkleTree::get_root(size_t depth_limit) const { - if (depth_limit > log_n_ || root_proof_.is_null()) { - return root_proof_; +MerkleTree::MerkleTree(size_t pieces_count, td::Bits256 root_hash) + : pieces_count_(pieces_count), root_hash_(root_hash) { + depth_ = 0; + n_ = 1; + while (n_ < pieces_count_) { + ++depth_; + n_ <<= 1; } - - auto usage_tree = std::make_shared(); - auto root_raw = vm::MerkleProof::virtualize(root_proof_, 1); - auto usage_cell = vm::UsageCell::create(root_raw, usage_tree->root_ptr()); - do_gen_proof(std::move(usage_cell), unpack_proof(root_proof_), depth_limit); - auto res = vm::MerkleProof::generate(root_raw, usage_tree.get()); - CHECK(res.not_null()); - return res; } -void MerkleTree::do_gen_proof(td::Ref node, td::Ref node_raw, size_t depth_limit) const { - if (depth_limit == 0) { - return; +static td::Ref build_tree(td::Bits256 *hashes, size_t len) { + if (len == 1) { + return vm::CellBuilder().store_bytes(hashes[0].as_slice()).finalize(); } - // check if it is possible to load node without breaking virtualization - vm::CellSlice cs_raw(vm::NoVm(), std::move(node_raw)); - if (cs_raw.is_special()) { - return; + td::Ref l = build_tree(hashes, len / 2); + td::Ref r = build_tree(hashes + len / 2, len / 2); + return vm::CellBuilder().store_ref(l).store_ref(r).finalize(); +}; + +MerkleTree::MerkleTree(std::vector hashes) : pieces_count_(hashes.size()) { + depth_ = 0; + n_ = 1; + while (n_ < pieces_count_) { + ++depth_; + n_ <<= 1; + } + hashes.resize(n_, td::Bits256::zero()); + td::Ref root = build_tree(hashes.data(), n_); + root_hash_ = root->get_hash().bits(); + root_proof_ = vm::CellBuilder::create_merkle_proof(std::move(root)); +} + +static td::Status do_validate_proof(td::Ref node, size_t depth) { + if (node->get_depth(0) != depth) { + return td::Status::Error("Depth mismatch"); } vm::CellSlice cs(vm::NoVm(), std::move(node)); - while (cs.have_refs()) { - do_gen_proof(cs.fetch_ref(), cs_raw.fetch_ref(), depth_limit - 1); - } -} - -td::Bits256 MerkleTree::get_root_hash() const { - CHECK(root_hash_); - return root_hash_.value(); -} - -MerkleTree::MerkleTree(size_t chunks_count, td::Bits256 root_hash) { - init_begin(chunks_count); - root_hash_ = root_hash; - init_finish(); -} - -MerkleTree::MerkleTree(size_t chunks_count, td::Ref root_proof) { - init_begin(chunks_count); - root_hash_ = unpack_proof(root_proof)->get_hash(0).as_array(); - root_proof_ = std::move(root_proof); - init_finish(); -} - -MerkleTree::MerkleTree(td::Span chunks) { - init_begin(chunks.size()); - - for (size_t i = 0; i < chunks.size(); i++) { - CHECK(chunks[i].index == i); - init_add_chunk(i, chunks[i].hash.as_slice()); - } - - init_finish(); -} - -void MerkleTree::init_begin(size_t chunks_count) { - log_n_ = 0; - while ((size_t(1) << log_n_) < chunks_count) { - log_n_++; - } - n_ = size_t(1) << log_n_; - total_blocks_ = chunks_count; - mark_.resize(n_ * 2); - proof_.resize(n_ * 2); - - td::UInt256 null{}; - auto cell = vm::CellBuilder().store_bytes(null.as_slice()).finalize(); - for (auto i = chunks_count; i < n_; i++) { - proof_[i + n_] = cell; - } -} - -void MerkleTree::init_add_chunk(size_t index, td::Slice hash) { - CHECK(index < total_blocks_); - CHECK(proof_[index + n_].is_null()); - proof_[index + n_] = vm::CellBuilder().store_bytes(hash).finalize(); -} - -void MerkleTree::init_finish() { - for (size_t i = n_ - 1; i >= 1; i--) { - auto j = i * 2; - if (proof_[j].is_null()) { - continue; - } - if (i + 1 < n_ && proof_[i + 1].not_null() && proof_[j]->get_hash() == proof_[j + 2]->get_hash() && - proof_[j + 1]->get_hash() == proof_[j + 3]->get_hash()) { - // minor optimization for same chunks - proof_[i] = proof_[i + 1]; - } else { - proof_[i] = vm::CellBuilder().store_ref(proof_[j]).store_ref(proof_[j + 1]).finalize(); - } - } - if (proof_[1].not_null()) { - init_proof(); - } - CHECK(root_hash_); -} - -void MerkleTree::remove_chunk(std::size_t index) { - CHECK(index < n_); - index += n_; - while (proof_[index].not_null()) { - proof_[index] = {}; - index /= 2; - } -} - -bool MerkleTree::has_chunk(std::size_t index) const { - CHECK(index < n_); - index += n_; - return proof_[index].not_null(); -} - -void MerkleTree::add_chunk(std::size_t index, td::Slice hash) { - CHECK(hash.size() == 32); - CHECK(index < n_); - index += n_; - auto cell = vm::CellBuilder().store_bytes(hash).finalize(); - CHECK(proof_[index].is_null()); - proof_[index] = std::move(cell); - mark_[index] = mark_id_; - for (index /= 2; index != 0; index /= 2) { - CHECK(proof_[index].is_null()); - auto &left = proof_[index * 2]; - auto &right = proof_[index * 2 + 1]; - if (left.not_null() && right.not_null()) { - proof_[index] = vm::CellBuilder().store_ref(left).store_ref(right).finalize(); - mark_[index] = mark_id_; - } - } -} - -static td::Status do_validate(td::Ref ref, size_t depth) { - vm::CellSlice cs(vm::NoVm(), std::move(ref)); if (cs.is_special()) { if (cs.special_type() != vm::Cell::SpecialType::PrunnedBranch) { return td::Status::Error("Unexpected special cell"); @@ -194,154 +93,65 @@ static td::Status do_validate(td::Ref ref, size_t depth) { if (cs.size_refs() != 2) { return td::Status::Error("Node in proof must have two refs"); } - TRY_STATUS(do_validate(cs.fetch_ref(), depth - 1)); - TRY_STATUS(do_validate(cs.fetch_ref(), depth - 1)); + TRY_STATUS(do_validate_proof(cs.fetch_ref(), depth - 1)); + TRY_STATUS(do_validate_proof(cs.fetch_ref(), depth - 1)); } return td::Status::OK(); } -td::Status MerkleTree::validate_proof(td::Ref new_root) { - // 1. depth <= log_n - // 2. each non special node has two refs and nothing else - // 3. each list contains only hash - // 4. all special nodes are merkle proofs - vm::CellSlice cs(vm::NoVm(), new_root); - if (cs.special_type() != vm::Cell::SpecialType::MerkleProof) { - return td::Status::Error("Proof must be a mekle proof cell"); +td::Status MerkleTree::add_proof(td::Ref proof) { + if (proof.is_null()) { + return td::Status::OK(); } - auto root = cs.fetch_ref(); - if (root_hash_ && root->get_hash(0).as_slice() != root_hash_.value().as_slice()) { - return td::Status::Error("Proof has invalid root hash"); + TRY_RESULT(proof_raw, unpack_proof(proof)); + if (root_hash_ != proof_raw->get_hash(0).bits()) { + return td::Status::Error("Root hash mismatch"); } - return do_validate(std::move(root), log_n_); -} - -td::Status MerkleTree::add_proof(td::Ref new_root) { - CHECK(root_proof_.not_null() || root_hash_); - TRY_STATUS(validate_proof(new_root)); - if (root_proof_.not_null()) { - auto combined = vm::MerkleProof::combine_fast(root_proof_, std::move(new_root)); + TRY_STATUS(do_validate_proof(proof_raw, depth_)); + if (root_proof_.is_null()) { + root_proof_ = std::move(proof); + } else { + auto combined = vm::MerkleProof::combine_fast(root_proof_, std::move(proof)); if (combined.is_null()) { return td::Status::Error("Can't combine proofs"); } root_proof_ = std::move(combined); - } else { - root_proof_ = std::move(new_root); } return td::Status::OK(); } -td::Status MerkleTree::validate_existing_chunk(const Chunk &chunk) { - vm::CellSlice cs(vm::NoVm(), proof_[chunk.index + n_]); - CHECK(cs.size() == chunk.hash.size()); - if (cs.as_bitslice().compare(chunk.hash.cbits()) != 0) { - return td::Status::Error("Hash mismatch"); +td::Result MerkleTree::get_piece_hash(size_t idx) const { + if (idx >= n_) { + return td::Status::Error("Index is too big"); } - return td::Status::OK(); -} - -td::Status MerkleTree::try_add_chunks(td::Span chunks) { - td::Bitset bitmask; - add_chunks(chunks, bitmask); - for (size_t i = 0; i < chunks.size(); i++) { - if (!bitmask.get(i)) { - return td::Status::Error(PSLICE() << "Invalid chunk #" << chunks[i].index); - } - } - return td::Status::OK(); -} - -void MerkleTree::add_chunks(td::Span chunks, td::Bitset &bitmask) { if (root_proof_.is_null()) { - return; + return td::Status::Error("Hash is not known"); } - - mark_id_++; - bitmask.reserve(chunks.size()); - for (size_t i = 0; i < chunks.size(); i++) { - const auto &chunk = chunks[i]; - if (has_chunk(chunk.index)) { - if (validate_existing_chunk(chunk).is_ok()) { - bitmask.set_one(i); - } - continue; + size_t l = 0, r = n_ - 1; + td::Ref node = unpack_proof(root_proof_).move_as_ok(); + while (true) { + vm::CellSlice cs(vm::NoVm(), std::move(node)); + if (cs.is_special()) { + return td::Status::Error("Hash is not known"); } - add_chunk(chunk.index, chunk.hash.as_slice()); - } - - root_proof_ = vm::CellBuilder::create_merkle_proof(merge(unpack_proof(root_proof_), 1)); - - for (size_t i = 0; i < chunks.size(); i++) { - const auto &chunk = chunks[i]; - if (has_chunk(chunk.index) && mark_[chunk.index + n_] == mark_id_) { - bitmask.set_one(i); + if (l == r) { + td::Bits256 hash; + CHECK(cs.fetch_bits_to(hash.bits(), 256)); + return hash; } - } -} - -td::Ref MerkleTree::merge(td::Ref root, size_t index) { - const auto &down = proof_[index]; - if (down.not_null()) { - if (down->get_hash() != root->get_hash(0)) { - proof_[index] = {}; + CHECK(cs.size_refs() == 2); + size_t mid = (l + r) / 2; + if (idx <= mid) { + node = cs.prefetch_ref(0); + r = mid; } else { - return down; + node = cs.prefetch_ref(1); + l = mid + 1; } } - - if (mark_[index] != mark_id_ || index >= n_) { - return root; - } - - vm::CellSlice cs(vm::NoVm(), root); - if (cs.is_special()) { - cleanup_add(index); - return root; - } - - CHECK(cs.size_refs() == 2); - vm::CellBuilder cb; - cb.store_bits(cs.fetch_bits(cs.size())); - auto left = merge(cs.fetch_ref(), index * 2); - auto right = merge(cs.fetch_ref(), index * 2 + 1); - cb.store_ref(std::move(left)).store_ref(std::move(right)); - return cb.finalize(); } -void MerkleTree::cleanup_add(size_t index) { - if (mark_[index] != mark_id_) { - return; - } - proof_[index] = {}; - if (index >= n_) { - return; - } - cleanup_add(index * 2); - cleanup_add(index * 2 + 1); -} - -void MerkleTree::init_proof() { - CHECK(proof_[1].not_null()); - td::Bits256 new_root_hash = proof_[1]->get_hash(0).as_array(); - CHECK(!root_hash_ || root_hash_.value() == new_root_hash); - root_hash_ = new_root_hash; - root_proof_ = vm::CellBuilder::create_merkle_proof(proof_[1]); -} - -td::Result> MerkleTree::gen_proof(size_t l, size_t r) { - if (root_proof_.is_null()) { - return td::Status::Error("got no proofs yet"); - } - auto usage_tree = std::make_shared(); - auto root_raw = vm::MerkleProof::virtualize(root_proof_, 1); - auto usage_cell = vm::UsageCell::create(root_raw, usage_tree->root_ptr()); - TRY_STATUS(TRY_VM(do_gen_proof(std::move(usage_cell), 0, n_ - 1, l, r))); - auto res = vm::MerkleProof::generate(root_raw, usage_tree.get()); - CHECK(res.not_null()); - return res; -} - -td::Status MerkleTree::do_gen_proof(td::Ref node, size_t il, size_t ir, size_t l, size_t r) const { +static td::Status do_gen_proof(td::Ref node, size_t il, size_t ir, size_t l, size_t r) { if (ir < l || il > r) { return td::Status::OK(); } @@ -358,4 +168,114 @@ td::Status MerkleTree::do_gen_proof(td::Ref node, size_t il, size_t ir TRY_STATUS(do_gen_proof(cs.fetch_ref(), ic + 1, ir, l, r)); return td::Status::OK(); } + +td::Result> MerkleTree::gen_proof(size_t l, size_t r) const { + if (root_proof_.is_null()) { + return td::Status::Error("Got no proofs yet"); + } + auto usage_tree = std::make_shared(); + auto root_raw = vm::MerkleProof::virtualize(root_proof_, 1); + auto usage_cell = vm::UsageCell::create(root_raw, usage_tree->root_ptr()); + TRY_STATUS(TRY_VM(do_gen_proof(std::move(usage_cell), 0, n_ - 1, l, r))); + auto res = vm::MerkleProof::generate(root_raw, usage_tree.get()); + CHECK(res.not_null()); + return res; +} + +static void do_gen_proof(td::Ref node, td::Ref node_raw, size_t depth_limit) { + if (depth_limit == 0) { + return; + } + // check if it is possible to load node without breaking virtualization + vm::CellSlice cs_raw(vm::NoVm(), std::move(node_raw)); + if (cs_raw.is_special()) { + return; + } + vm::CellSlice cs(vm::NoVm(), std::move(node)); + while (cs.have_refs()) { + do_gen_proof(cs.fetch_ref(), cs_raw.fetch_ref(), depth_limit - 1); + } +} + +td::Ref MerkleTree::get_root(size_t depth_limit) const { + if (depth_limit > depth_ || root_proof_.is_null()) { + return root_proof_; + } + auto usage_tree = std::make_shared(); + auto root_raw = vm::MerkleProof::virtualize(root_proof_, 1); + auto usage_cell = vm::UsageCell::create(root_raw, usage_tree->root_ptr()); + do_gen_proof(std::move(usage_cell), unpack_proof(root_proof_).move_as_ok(), depth_limit); + auto res = vm::MerkleProof::generate(root_raw, usage_tree.get()); + CHECK(res.not_null()); + return res; +} + +static td::Ref build_from_hashes(std::pair *p, std::pair *pend, + size_t len) { + if (len == 1) { + return vm::CellBuilder().store_bytes((p < pend ? p->second : td::Bits256::zero()).as_slice()).finalize(); + } + td::Ref l = build_from_hashes(p, pend, len / 2); + td::Ref r = build_from_hashes(p + len / 2, pend, len / 2); + return vm::CellBuilder().store_ref(l).store_ref(r).finalize(); +} + +td::Ref MerkleTree::do_add_pieces(td::Ref node, std::vector &ok_pieces, size_t il, + size_t ir, std::pair *pl, + std::pair *pr) { + if (pl == pr || il >= pieces_count_) { + return node; + } + vm::CellSlice cs; + if (node.is_null() || (cs = vm::CellSlice(vm::NoVm(), node)).is_special() || il + 1 == ir) { + if ((size_t)(pr - pl) != std::min(ir, pieces_count_) - il) { + return node; + } + td::Ref new_node = build_from_hashes(pl, pr, ir - il); + td::Bits256 new_hash = new_node->get_hash().bits(); + if (new_hash != (node.is_null() ? root_hash_ : node->get_hash(0).bits())) { + return node; + } + for (auto p = pl; p != pr; ++p) { + ok_pieces.push_back(p->first); + } + if (node.is_null() || cs.is_special()) { + node = std::move(new_node); + } + return node; + } + size_t imid = (il + ir) / 2; + auto pmid = pl; + while (pmid != pr && pmid->first < imid) { + ++pmid; + } + td::Ref l = do_add_pieces(cs.prefetch_ref(0), ok_pieces, il, imid, pl, pmid); + td::Ref r = do_add_pieces(cs.prefetch_ref(1), ok_pieces, imid, ir, pmid, pr); + if (l != cs.prefetch_ref(0) || r != cs.prefetch_ref(1)) { + node = vm::CellBuilder().store_ref(l).store_ref(r).finalize(); + } + return node; +} + +std::vector MerkleTree::add_pieces(std::vector> pieces) { + if (pieces.empty()) { + return {}; + } + std::sort(pieces.begin(), pieces.end()); + for (size_t i = 0; i + 1 < pieces.size(); ++i) { + CHECK(pieces[i].first != pieces[i + 1].first); + } + CHECK(pieces.back().first < pieces_count_); + std::vector ok_pieces; + td::Ref root; + if (!root_proof_.is_null()) { + root = unpack_proof(root_proof_).move_as_ok(); + } + root = do_add_pieces(root, ok_pieces, 0, n_, pieces.data(), pieces.data() + pieces.size()); + if (!root.is_null()) { + root_proof_ = vm::CellBuilder::create_merkle_proof(std::move(root)); + } + return ok_pieces; +} + } // namespace ton diff --git a/storage/MerkleTree.h b/storage/MerkleTree.h index 6f7be17d..cf080c40 100644 --- a/storage/MerkleTree.h +++ b/storage/MerkleTree.h @@ -24,6 +24,7 @@ #include "vm/cells.h" #include "Bitset.h" +#include namespace ton { // merkle_node$_ {n:#} left:^(ton::MerkleTree n) right:^(ton::MerkleTree n) = ton::MerkleTree (n + 1); @@ -31,66 +32,33 @@ namespace ton { class MerkleTree { public: - td::uint32 get_depth() const; - td::Ref get_root(size_t depth_limit = std::numeric_limits::max()) const; - td::Bits256 get_root_hash() const; - - MerkleTree(size_t chunks_count, td::Bits256 root_hash); - MerkleTree(size_t chunks_count, td::Ref root_proof); - - struct Chunk { - std::size_t index{0}; - td::Bits256 hash; - }; - - explicit MerkleTree(td::Span chunks); - MerkleTree() = default; - void init_begin(size_t chunks_count); - void init_add_chunk(std::size_t index, td::Slice hash); - void init_finish(); + MerkleTree(size_t pieces_count, td::Bits256 root_hash); + explicit MerkleTree(std::vector hashes); - // merge external proof with an existing proof - td::Status add_proof(td::Ref new_root); - // generate proof for all chunks from l to r inclusive - td::Result> gen_proof(size_t l, size_t r); + td::Status add_proof(td::Ref proof); + td::Result get_piece_hash(size_t idx) const; + td::Result> gen_proof(size_t l, size_t r) const; + td::Ref get_root(size_t depth_limit = std::numeric_limits::max()) const; - // Trying to add and validate list of chunks simultaniously - td::Status try_add_chunks(td::Span chunks); + std::vector add_pieces(std::vector> pieces); - // Returns bitmask of successfully added chunks - // Intended to be used during validation of a torrent. - // We got arbitrary chunks read from disk, and we got an arbirary proof. - // Now we can say about some chunks that they are correct. This ia a general way - // to do this. - // - // NB: already added chunks are simply validated. One should be careful - // not to process them twice - void add_chunks(td::Span chunks, td::Bitset &bitmask); + size_t get_depth() const { + return depth_; + } + + td::Bits256 get_root_hash() const { + return root_hash_; + } private: - td::uint64 total_blocks_; - std::size_t n_; // n = 2^log_n - td::uint32 log_n_; - std::size_t mark_id_{0}; - std::vector mark_; // n_ * 2 - std::vector> proof_; // n_ * 2 - - td::optional root_hash_; + size_t pieces_count_{0}; + td::Bits256 root_hash_ = td::Bits256::zero(); + size_t depth_{0}, n_{1}; td::Ref root_proof_; - td::Status validate_proof(td::Ref new_root); - bool has_chunk(std::size_t index) const; - void remove_chunk(std::size_t index); - - void add_chunk(std::size_t index, td::Slice hash); - void init_proof(); - - td::Ref merge(td::Ref root, size_t index); - void cleanup_add(size_t index); - td::Status do_gen_proof(td::Ref node, size_t il, size_t ir, size_t l, size_t r) const; - void do_gen_proof(td::Ref node, td::Ref node_raw, size_t depth_limit) const; - td::Status validate_existing_chunk(const Chunk &chunk); + td::Ref do_add_pieces(td::Ref node, std::vector &ok_pieces, size_t il, size_t ir, + std::pair *pl, std::pair *pr); }; } // namespace ton diff --git a/storage/MicrochunkTree.cpp b/storage/MicrochunkTree.cpp new file mode 100644 index 00000000..3249d25b --- /dev/null +++ b/storage/MicrochunkTree.cpp @@ -0,0 +1,214 @@ +/* + This file is part of TON Blockchain Library. + + TON Blockchain Library is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + TON Blockchain Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with TON Blockchain Library. If not, see . +*/ + +#include "MicrochunkTree.h" +#include "Torrent.h" +#include "vm/cells/CellSlice.h" +#include "vm/cells/MerkleProof.h" + +namespace ton { + +static td::Ref prun(const td::Ref &node) { + vm::CellBuilder cb; + cb.store_long(static_cast(vm::Cell::SpecialType::PrunnedBranch), 8); + cb.store_long(1, 8); + cb.store_bytes(node->get_hash(0).as_slice()); + cb.store_long(node->get_depth(0), 16); + return cb.finalize(true); +} + +MicrochunkTree::Builder::Builder(td::uint64 file_size, td::uint64 prun_size) + : file_size_(file_size), prun_size_(prun_size) { + total_size_ = MICROCHUNK_SIZE; + while (total_size_ < file_size) { + total_size_ *= 2; + } +} + +void MicrochunkTree::Builder::add_data(td::Slice s) { + CHECK(cur_size_ + s.size() <= file_size_); + while (s.size() > 0) { + size_t buf_ptr = cur_size_ % MICROCHUNK_SIZE; + size_t buf_remaining = MICROCHUNK_SIZE - buf_ptr; + if (buf_remaining > s.size()) { + memcpy(cur_microchunk_ + buf_ptr, s.data(), s.size()); + cur_size_ += s.size(); + return; + } + memcpy(cur_microchunk_ + buf_ptr, s.data(), buf_remaining); + cur_size_ += buf_remaining; + s.remove_prefix(buf_remaining); + add_microchunk(td::Slice(cur_microchunk_, MICROCHUNK_SIZE)); + } +} + +MicrochunkTree MicrochunkTree::Builder::finalize() { + CHECK(cur_size_ == file_size_); + if (cur_size_ % MICROCHUNK_SIZE != 0) { + size_t buf_ptr = cur_size_ % MICROCHUNK_SIZE; + size_t buf_remaining = MICROCHUNK_SIZE - buf_ptr; + memset(cur_microchunk_ + buf_ptr, 0, buf_remaining); + cur_size_ += buf_remaining; + add_microchunk(td::Slice(cur_microchunk_, MICROCHUNK_SIZE)); + } + memset(cur_microchunk_, 0, MICROCHUNK_SIZE); + while (cur_size_ < total_size_) { + add_microchunk(td::Slice(cur_microchunk_, MICROCHUNK_SIZE)); + cur_size_ += MICROCHUNK_SIZE; + } + CHECK(proof_.size() == 1); + MicrochunkTree tree(vm::CellBuilder::create_merkle_proof(std::move(proof_[0]))); + CHECK(tree.total_size_ == total_size_); + return tree; +} + +void MicrochunkTree::Builder::add_microchunk(td::Slice s) { + CHECK(s.size() == MICROCHUNK_SIZE); + td::Ref node = vm::CellBuilder().store_zeroes(2).store_bytes(s).finalize_novm(); + while (!proof_.empty() && proof_.back()->get_depth(0) == node->get_depth(0)) { + td::Ref left = std::move(proof_.back()); + proof_.pop_back(); + node = vm::CellBuilder().store_zeroes(2).store_ref(std::move(left)).store_ref(std::move(node)).finalize_novm(); + if ((MICROCHUNK_SIZE << node->get_depth(0)) <= prun_size_) { + node = prun(node); + } + } + proof_.push_back(std::move(node)); +} + +MicrochunkTree::MicrochunkTree(td::Ref root_proof) : root_proof_(root_proof) { + td::Ref virt_root = vm::MerkleProof::virtualize(root_proof_, 1); + CHECK(!virt_root.is_null()); + CHECK(virt_root->get_depth() <= 50); + total_size_ = MICROCHUNK_SIZE << virt_root->get_depth(); + root_hash_ = virt_root->get_hash().bits(); +} + +class GetMicrochunkProof { + public: + GetMicrochunkProof(td::uint64 l, td::uint64 r, Torrent &torrent) : l(l), r(r), torrent(torrent) { + } + + td::Result> unprun(td::uint64 il, td::uint64 ir) { + if (ir - il == MicrochunkTree::MICROCHUNK_SIZE) { + TRY_RESULT(data, get_microchunk(il)); + return vm::CellBuilder().store_zeroes(2).store_bytes(data).finalize_novm(); + } + td::uint64 imid = (il + ir) / 2; + TRY_RESULT(node_l, unprun(il, imid)); + TRY_RESULT(node_r, unprun(imid, ir)); + td::Ref node = + vm::CellBuilder().store_zeroes(2).store_ref(std::move(node_l)).store_ref(std::move(node_r)).finalize_novm(); + if (l >= ir || il >= r) { + node = prun(node); + } + return node; + } + + td::Result> unprun(const td::Ref &node, td::uint64 il, td::uint64 ir) { + vm::CellSlice cs(vm::NoVm(), node); + if (!cs.is_special()) { + return node; + } + TRY_RESULT(result, unprun(il, ir)); + if (result->get_hash(0) != node->get_hash(0)) { + return td::Status::Error("Hash mismatch"); + } + return result; + } + + td::Result> get_proof(td::Ref node, td::uint64 il, td::uint64 ir) { + if (l >= ir || il >= r) { + return prun(node); + } + if (ir - il == MicrochunkTree::MICROCHUNK_SIZE) { + return unprun(node, il, ir); + } + if (l <= il && ir <= r) { + return prun(node); + } + td::uint64 imid = (il + ir) / 2; + TRY_RESULT_ASSIGN(node, unprun(node, il, ir)); + vm::CellSlice cs(vm::NoVm(), node); + if (cs.size_ext() != 2 + (2 << 16)) { + return td::Status::Error("Invalid node in microchunk tree"); + } + TRY_RESULT(node_l, get_proof(cs.prefetch_ref(0), il, imid)); + TRY_RESULT(node_r, get_proof(cs.prefetch_ref(1), imid, ir)); + return vm::CellBuilder().store_zeroes(2).store_ref(std::move(node_l)).store_ref(std::move(node_r)).finalize_novm(); + } + + private: + td::uint64 l, r; + Torrent &torrent; + + td::uint64 cache_offset = 0; + std::string cache; + + td::Result get_microchunk(td::uint64 l) { + DCHECK(l % MicrochunkTree::MICROCHUNK_SIZE == 0); + td::uint64 r = l + MicrochunkTree::MICROCHUNK_SIZE; + if (!(cache_offset <= l && r <= cache_offset + cache.size())) { + td::uint64 piece_size = torrent.get_info().piece_size; + td::uint64 piece_i = l / piece_size; + if (piece_i < torrent.get_info().pieces_count()) { + TRY_RESULT(piece, torrent.get_piece_data(piece_i)); + piece.resize(piece_size, '\0'); + cache = std::move(piece); + } else { + cache = std::string(piece_size, '\0'); + } + cache_offset = piece_i * piece_size; + } + return td::Slice{cache.data() + (l - cache_offset), MicrochunkTree::MICROCHUNK_SIZE}; + } +}; + +td::Result> MicrochunkTree::get_proof(td::uint64 l, td::uint64 r, Torrent &torrent) const { + if (root_proof_.is_null()) { + return td::Status::Error("Empty microchunk tree"); + } + if (l % MICROCHUNK_SIZE != 0 || r % MICROCHUNK_SIZE != 0 || l >= r || r > total_size_) { + return td::Status::Error("Invalid range"); + } + if (!torrent.inited_info()) { + return td::Status::Error("Torrent info is not ready"); + } + if (!torrent.get_info().piece_size % MICROCHUNK_SIZE != 0) { + return td::Status::Error("Invalid piece size in torrent"); + } + td::Ref root_raw = vm::CellSlice(vm::NoVm(), root_proof_).prefetch_ref(); + TRY_RESULT(result, GetMicrochunkProof(l, r, torrent).get_proof(std::move(root_raw), 0, total_size_)); + return vm::CellBuilder::create_merkle_proof(std::move(result)); +} + +td::Result MicrochunkTree::Builder::build_for_torrent(Torrent &torrent, td::uint64 prun_size) { + if (!torrent.inited_info()) { + return td::Status::Error("Torrent info is not available"); + } + const TorrentInfo &info = torrent.get_info(); + Builder builder(info.file_size, prun_size); + td::uint64 pieces_count = info.pieces_count(); + for (td::uint64 i = 0; i < pieces_count; ++i) { + TRY_RESULT(piece, torrent.get_piece_data(i)); + builder.add_data(piece); + } + MicrochunkTree tree = builder.finalize(); + return tree; +} + +} // namespace ton diff --git a/storage/MicrochunkTree.h b/storage/MicrochunkTree.h new file mode 100644 index 00000000..a1661640 --- /dev/null +++ b/storage/MicrochunkTree.h @@ -0,0 +1,74 @@ +/* + This file is part of TON Blockchain Library. + + TON Blockchain Library is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + TON Blockchain Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with TON Blockchain Library. If not, see . +*/ + +#pragma once + +#include "td/utils/optional.h" +#include "td/utils/Slice.h" +#include "vm/cells.h" + +#include "Bitset.h" +#include + +namespace ton { + +class Torrent; + +class MicrochunkTree { + public: + static const size_t MICROCHUNK_SIZE = 64; + + class Builder { + public: + explicit Builder(td::uint64 file_size, td::uint64 prun_size = 1 << 17); + void add_data(td::Slice s); + MicrochunkTree finalize(); + + static td::Result build_for_torrent(Torrent &torrent, td::uint64 prun_size = 1 << 17); + private: + td::uint64 file_size_; + td::uint64 prun_size_; + td::uint64 total_size_; + std::vector> proof_; + unsigned char cur_microchunk_[MICROCHUNK_SIZE]; + td::uint64 cur_size_ = 0; + + void add_microchunk(td::Slice s); + }; + + MicrochunkTree() = default; + MicrochunkTree(td::Ref root_proof); + + td::Result> get_proof(td::uint64 l, td::uint64 r, Torrent &torrent) const; + + td::Ref get_root() const { + return root_proof_; + } + td::Bits256 get_root_hash() const { + return root_hash_; + } + td::uint64 get_total_size() const { + return total_size_; + } + + private: + td::Bits256 root_hash_ = td::Bits256::zero(); + td::uint64 total_size_ = 0; + td::Ref root_proof_ = {}; +}; + +} // namespace ton diff --git a/storage/NodeActor.cpp b/storage/NodeActor.cpp index 434a3d62..cfc3ef18 100644 --- a/storage/NodeActor.cpp +++ b/storage/NodeActor.cpp @@ -20,17 +20,38 @@ #include "NodeActor.h" #include "vm/boc.h" +#include "vm/cellslice.h" #include "td/utils/Enumerator.h" #include "td/utils/tests.h" +#include "td/utils/overloaded.h" +#include "tl-utils/common-utils.hpp" +#include "tl-utils/tl-utils.hpp" +#include "auto/tl/ton_api.hpp" +#include "td/actor/MultiPromise.h" namespace ton { -NodeActor::NodeActor(PeerId self_id, ton::Torrent torrent, td::unique_ptr callback, bool should_download) +NodeActor::NodeActor(PeerId self_id, Torrent torrent, td::unique_ptr callback, + td::unique_ptr node_callback, std::shared_ptr db, bool should_download) : self_id_(self_id) , torrent_(std::move(torrent)) , callback_(std::move(callback)) + , node_callback_(std::move(node_callback)) + , db_(std::move(db)) + , should_download_(should_download) { +} + +NodeActor::NodeActor(PeerId self_id, ton::Torrent torrent, td::unique_ptr callback, + td::unique_ptr node_callback, std::shared_ptr db, bool should_download, + DbInitialData db_initial_data) + : self_id_(self_id) + , torrent_(std::move(torrent)) + , callback_(std::move(callback)) + , node_callback_(std::move(node_callback)) + , db_(std::move(db)) , should_download_(should_download) - , parts_helper_(torrent.get_info().pieces_count()) { + , pending_set_file_priority_(std::move(db_initial_data.priorities)) + , pieces_in_db_(std::move(db_initial_data.pieces_in_db)) { } void NodeActor::start_peer(PeerId peer_id, td::Promise> promise) { @@ -45,25 +66,103 @@ void NodeActor::start_peer(PeerId peer_id, td::Promisesecond); } void NodeActor::start_up() { - callback_->register_self(actor_id(this)); + node_callback_->register_self(actor_id(this)); + db_store_torrent(); + if (torrent_.inited_info()) { + init_torrent(); + } + loop(); +} + +void NodeActor::init_torrent() { auto pieces_count = torrent_.get_info().pieces_count(); + parts_helper_.init_parts_count(pieces_count); parts_.parts.resize(pieces_count); auto header = torrent_.get_header_parts_range(); - for (td::uint32 i = static_cast(header.begin); i < header.end; i++) { + for (auto i = static_cast(header.begin); i < header.end; i++) { parts_helper_.set_part_priority(i, 255); } for (td::uint32 i = 0; i < pieces_count; i++) { if (torrent_.is_piece_ready(i)) { - parts_helper_.on_self_part_ready(i); - parts_.parts[i].ready = true; + on_part_ready(i); + } + } + + torrent_info_str_ = + std::make_shared(vm::std_boc_serialize(torrent_.get_info().as_cell()).move_as_ok()); + for (auto &p : peers_) { + auto &state = p.second.state; + state->torrent_info_str_ = torrent_info_str_; + CHECK(!state->torrent_info_ready_.exchange(true)); + } + LOG(INFO) << "Inited torrent info for " << torrent_.get_hash().to_hex() << ": size=" << torrent_.get_info().file_size + << ", pieces=" << torrent_.get_info().pieces_count(); + if (torrent_.inited_header()) { + init_torrent_header(); + } +} + +void NodeActor::init_torrent_header() { + if (header_ready_) { + return; + } + header_ready_ = true; + size_t files_count = torrent_.get_files_count().unwrap(); + for (size_t i = 0; i < files_count; ++i) { + file_name_to_idx_[torrent_.get_file_name(i).str()] = i; + } + db_store_priorities_paused_ = true; + file_priority_.resize(files_count, 1); + for (auto &s : pending_set_file_priority_) { + td::Promise P = [](td::Result) {}; + s.file.visit( + td::overloaded([&](const PendingSetFilePriority::All &) { set_all_files_priority(s.priority, std::move(P)); }, + [&](const size_t &i) { set_file_priority_by_idx(i, s.priority, std::move(P)); }, + [&](const std::string &name) { set_file_priority_by_name(name, s.priority, std::move(P)); })); + } + pending_set_file_priority_.clear(); + torrent_.enable_write_to_files(); + db_store_priorities_paused_ = false; + db_store_priorities(); + + auto pieces = pieces_in_db_; + for (td::uint64 p : pieces) { + if (!torrent_.is_piece_in_memory(p)) { + db_erase_piece(p); + } + } + for (td::uint64 p : torrent_.get_pieces_in_memory()) { + if (!pieces_in_db_.count(p)) { + db_store_piece(p, torrent_.get_piece_data(p).move_as_ok()); + } + } + db_update_pieces_list(); + recheck_parts(Torrent::PartsRange{0, torrent_.get_info().pieces_count()}); + db_store_torrent_meta(); + + LOG(INFO) << "Inited torrent header for " << torrent_.get_hash().to_hex() + << ": files=" << torrent_.get_files_count().value() << ", included_size=" << torrent_.get_included_size(); +} + +void NodeActor::recheck_parts(Torrent::PartsRange range) { + CHECK(torrent_.inited_info()); + for (size_t i = range.begin; i < range.end; ++i) { + if (parts_.parts[i].ready && !torrent_.is_piece_ready(i)) { + parts_helper_.on_self_part_not_ready(i); + parts_.parts[i].ready = false; + } else if (!parts_.parts[i].ready && torrent_.is_piece_ready(i)) { + on_part_ready((PartId)i); } } - loop(); } void NodeActor::loop_will_upload() { @@ -80,12 +179,12 @@ void NodeActor::loop_will_upload() { alarm_timestamp().relax(will_upload_at_); std::vector> peers; for (auto &it : peers_) { - auto state = it.second.state.lock(); + auto &state = it.second.state; bool needed = false; - if (state->peer_state_) { - needed = state->peer_state_.value().want_download; + if (state->peer_state_ready_) { + needed = state->peer_state_.load().want_download; } - peers.emplace_back(!needed, !state->node_state_.want_download, -state->download.speed(), it.first); + peers.emplace_back(!needed, !state->node_state_.load().want_download, -it.second.download_speed.speed(), it.first); } std::sort(peers.begin(), peers.end()); @@ -101,9 +200,11 @@ void NodeActor::loop_will_upload() { for (auto &it : peers_) { auto will_upload = peers_set.count(it.first) > 0; - auto state = it.second.state.lock(); - if (state->node_state_.will_upload != will_upload) { - state->node_state_.will_upload = will_upload; + auto &state = it.second.state; + auto node_state = state->node_state_.load(); + if (node_state.will_upload != will_upload) { + node_state.will_upload = will_upload; + state->node_state_.exchange(node_state); state->notify_peer(); } } @@ -112,43 +213,64 @@ void NodeActor::loop_will_upload() { void NodeActor::loop() { loop_get_peers(); loop_start_stop_peers(); - loop_queries(); - loop_will_upload(); + if (torrent_.inited_info()) { + loop_queries(); + loop_will_upload(); + } if (!ready_parts_.empty()) { for (auto &it : peers_) { - auto state = it.second.state.lock(); - state->node_ready_parts_.insert(state->node_ready_parts_.end(), ready_parts_.begin(), ready_parts_.end()); + auto &state = it.second.state; + state->node_ready_parts_.add_elements(ready_parts_); state->notify_peer(); } ready_parts_.clear(); } - if (torrent_.is_completed() && !is_completed_) { - is_completed_ = true; - callback_->on_completed(); + if (next_db_store_meta_at_ && next_db_store_meta_at_.is_in_past()) { + db_store_torrent_meta(); + } + + if (torrent_.get_fatal_error().is_error()) { + for (auto &promise : wait_for_completion_) { + promise.set_error(torrent_.get_fatal_error().clone()); + } + wait_for_completion_.clear(); + } else if (torrent_.is_completed()) { + db_store_torrent_meta(); + if (!is_completed_) { + for (auto &promise : wait_for_completion_) { + promise.set_result(td::Unit()); + } + wait_for_completion_.clear(); + is_completed_ = true; + callback_->on_completed(); + } } } std::string NodeActor::get_stats_str() { td::StringBuilder sb; - sb << "Node " << self_id_ << " " << torrent_.get_ready_parts_count() << "\t" << download_; + sb << "Node " << self_id_ << " " << torrent_.get_ready_parts_count() << "\t" << download_speed_; sb << "\toutq " << parts_.total_queries; sb << "\n"; for (auto &it : peers_) { - auto state = it.second.state.lock(); + auto &state = it.second.state; sb << "\tPeer " << it.first; - sb << "\t" << parts_helper_.get_ready_parts(it.second.peer_token).ones_count(); - sb << "\t" << state->download; - if (state->peer_state_) { - auto &peer_state = state->peer_state_.value(); + if (torrent_.inited_info()) { + sb << "\t" << parts_helper_.get_ready_parts(it.second.peer_token).ones_count(); + } + sb << "\t" << it.second.download_speed; + if (state->peer_state_ready_) { + auto peer_state = state->peer_state_.load(); sb << "\t up:" << peer_state.will_upload; sb << "\tdown:" << peer_state.want_download; - sb << "\tcnt:" << parts_helper_.get_want_download_count(it.second.peer_token); + if (torrent_.inited_info()) { + sb << "\tcnt:" << parts_helper_.get_want_download_count(it.second.peer_token); + } } - sb << "\toutq:" << state->node_queries_.size(); - sb << "\tinq:" << state->peer_queries_.size(); - auto &node_state = state->node_state_; + sb << "\toutq:" << state->node_queries_active_.size(); + auto node_state = state->node_state_.load(); sb << "\tNup:" << node_state.will_upload; sb << "\tNdown:" << node_state.want_download; sb << "\n"; @@ -170,35 +292,57 @@ std::string NodeActor::get_stats_str() { return sb.as_cslice().str(); } -void NodeActor::set_file_priority(size_t i, td::uint8 priority) { - auto o_files_count = torrent_.get_files_count(); - if (!o_files_count) { +void NodeActor::set_all_files_priority(td::uint8 priority, td::Promise promise) { + if (!header_ready_) { + pending_set_file_priority_.clear(); + pending_set_file_priority_.push_back(PendingSetFilePriority{PendingSetFilePriority::All(), priority}); + db_store_priorities(); + promise.set_result(false); return; } - auto files_count = o_files_count.unwrap(); - if (file_priority_.size() != files_count) { - // by default all parts priority == 1 - file_priority_.resize(files_count, 1); + auto header_range = torrent_.get_header_parts_range(); + for (td::uint32 i = 0; i < torrent_.get_info().pieces_count(); i++) { + if (!header_range.contains(i)) { + parts_helper_.set_part_priority(i, priority); + } } + for (size_t i = 0; i < file_priority_.size(); ++i) { + file_priority_[i] = priority; + torrent_.set_file_excluded(i, priority == 0); + } + recheck_parts(Torrent::PartsRange{0, torrent_.get_info().pieces_count()}); + db_store_priorities(); + update_pieces_in_db(0, torrent_.get_info().pieces_count()); + if (!torrent_.is_completed()) { + is_completed_ = false; + } + promise.set_result(true); + yield(); +} +void NodeActor::set_file_priority_by_idx(size_t i, td::uint8 priority, td::Promise promise) { + if (!header_ready_) { + pending_set_file_priority_.push_back(PendingSetFilePriority{i, priority}); + db_store_priorities(); + promise.set_result(false); + return; + } + auto files_count = torrent_.get_files_count().unwrap(); if (i >= files_count) { - for (td::uint32 part_i = 0; part_i < torrent_.get_info().pieces_count(); part_i++) { - parts_helper_.set_part_priority(part_i, priority); - } - for (auto &p : file_priority_) { - p = priority; - } + promise.set_error(td::Status::Error("File index is too big")); return; } if (file_priority_[i] == priority) { + promise.set_result(true); return; } file_priority_[i] = priority; + torrent_.set_file_excluded(i, priority == 0); auto range = torrent_.get_file_parts_range(i); - td::uint32 begin = static_cast(range.begin); - td::uint32 end = static_cast(range.end); - for (td::uint32 i = begin; i < end; i++) { - if (i == begin || i + 1 == end) { + recheck_parts(range); + update_pieces_in_db(range.begin, range.end); + for (auto i = range.begin; i < range.end; i++) { + if (i == range.begin || i + 1 == range.end) { auto chunks = torrent_.chunks_by_piece(i); td::uint8 max_priority = 0; for (auto chunk_id : chunks) { @@ -213,15 +357,95 @@ void NodeActor::set_file_priority(size_t i, td::uint8 priority) { parts_helper_.set_part_priority(i, priority); } } + db_store_priorities(); + if (!torrent_.is_completed()) { + is_completed_ = false; + } + promise.set_result(true); yield(); } +void NodeActor::set_file_priority_by_name(std::string name, td::uint8 priority, td::Promise promise) { + if (!header_ready_) { + pending_set_file_priority_.push_back(PendingSetFilePriority{name, priority}); + db_store_priorities(); + promise.set_result(false); + return; + } + auto it = file_name_to_idx_.find(name); + if (it == file_name_to_idx_.end()) { + promise.set_error(td::Status::Error("No such file")); + return; + } + set_file_priority_by_idx(it->second, priority, std::move(promise)); +} + +void NodeActor::wait_for_completion(td::Promise promise) { + if (torrent_.get_fatal_error().is_error()) { + promise.set_error(torrent_.get_fatal_error().clone()); + } else if (is_completed_) { + promise.set_result(td::Unit()); + } else { + wait_for_completion_.push_back(std::move(promise)); + } +} + void NodeActor::set_should_download(bool should_download) { + if (should_download == should_download_) { + return; + } should_download_ = should_download; + db_store_torrent(); yield(); } +void NodeActor::load_from(td::optional meta, std::string files_path, td::Promise promise) { + auto S = [&]() -> td::Status { + if (meta) { + TorrentInfo &info = meta.value().info; + if (info.get_hash() != torrent_.get_hash()) { + return td::Status::Error("Incorrect hash in meta"); + } + if (!torrent_.inited_info()) { + LOG(INFO) << "Loading torrent info for " << torrent_.get_hash().to_hex(); + TRY_STATUS(torrent_.init_info(std::move(info))); + init_torrent(); + } + auto &header = meta.value().header; + if (header && !torrent_.inited_header()) { + LOG(INFO) << "Loading torrent header for " << torrent_.get_hash().to_hex(); + TRY_STATUS(torrent_.set_header(header.unwrap())); + init_torrent_header(); + } + auto proof = std::move(meta.value().root_proof); + if (!proof.is_null()) { + LOG(INFO) << "Loading proof for " << torrent_.get_hash().to_hex(); + TRY_STATUS(torrent_.add_proof(std::move(proof))); + } + } + TRY_STATUS_PREFIX(torrent_.get_fatal_error().clone(), "Fatal error: "); + if (torrent_.inited_header() && !files_path.empty()) { + torrent_.load_from_files(std::move(files_path)); + } + TRY_STATUS_PREFIX(torrent_.get_fatal_error().clone(), "Fatal error: "); + return td::Status::OK(); + }(); + if (S.is_error()) { + LOG(WARNING) << "Load from failed: " << S; + promise.set_error(std::move(S)); + } else { + promise.set_result(td::Unit()); + } + if (torrent_.inited_header()) { + recheck_parts(Torrent::PartsRange{0, torrent_.get_info().pieces_count()}); + } + loop(); +} + void NodeActor::tear_down() { + for (auto &promise : wait_for_completion_) { + promise.set_error(td::Status::Error("Torrent closed")); + } callback_->on_closed(std::move(torrent_)); } @@ -235,16 +459,24 @@ void NodeActor::loop_start_stop_peers() { } if (peer.actor.empty()) { - LOG(ERROR) << "Init Peer " << self_id_ << " -> " << peer_id; - auto state = peer.state.lock(); - state->node = peer.notifier.get(); - for (td::uint32 i = 0; i < parts_.parts.size(); i++) { - if (parts_.parts[i].ready) { - state->node_ready_parts_.push_back(i); + auto &state = peer.state = std::make_shared(peer.notifier.get()); + if (torrent_.inited_info()) { + std::vector node_ready_parts; + for (td::uint32 i = 0; i < parts_.parts.size(); i++) { + if (parts_.parts[i].ready) { + node_ready_parts.push_back(i); + } } + state->node_ready_parts_.add_elements(std::move(node_ready_parts)); + state->torrent_info_str_ = torrent_info_str_; + state->torrent_info_ready_ = true; + } else { + state->torrent_info_response_callback_ = [SelfId = actor_id(this)](td::BufferSlice data) { + td::actor::send_closure(SelfId, &NodeActor::got_torrent_info_str, std::move(data)); + }; } peer.peer_token = parts_helper_.register_peer(peer_id); - peer.actor = callback_->create_peer(self_id_, peer_id, peer.state); + peer.actor = node_callback_->create_peer(self_id_, peer_id, peer.state); } } } @@ -255,29 +487,31 @@ void NodeActor::loop_queries() { } for (auto &it : peers_) { auto peer_token = it.second.peer_token; - auto state = it.second.state.lock(); - if (!state->peer_state_) { + auto &state = it.second.state; + if (!state->peer_state_ready_) { parts_helper_.set_peer_limit(peer_token, 0); continue; } - if (!state->peer_state_.value().will_upload) { + if (!state->peer_state_.load().will_upload) { parts_helper_.set_peer_limit(peer_token, 0); continue; } - parts_helper_.set_peer_limit(peer_token, - td::narrow_cast(MAX_PEER_TOTAL_QUERIES - state->node_queries_.size())); + parts_helper_.set_peer_limit( + peer_token, td::narrow_cast(MAX_PEER_TOTAL_QUERIES - state->node_queries_active_.size())); } auto parts = parts_helper_.get_rarest_parts(MAX_TOTAL_QUERIES); for (auto &part : parts) { auto it = peers_.find(part.peer_id); CHECK(it != peers_.end()); - auto state = it->second.state.lock(); - CHECK(state->peer_state_); - CHECK(state->peer_state_.value().will_upload); - CHECK(state->node_queries_.size() < MAX_PEER_TOTAL_QUERIES); + auto &state = it->second.state; + CHECK(state->peer_state_ready_); + CHECK(state->peer_state_.load().will_upload); + CHECK(state->node_queries_active_.size() < MAX_PEER_TOTAL_QUERIES); auto part_id = part.part_id; - state->node_queries_[static_cast(part_id)]; + if (state->node_queries_active_.insert(static_cast(part_id)).second) { + state->node_queries_.add_element(static_cast(part_id)); + } parts_helper_.lock_part(part_id); parts_.total_queries++; parts_.parts[part_id].query_to_peer = part.peer_id; @@ -290,7 +524,7 @@ void NodeActor::loop_get_peers() { return; } if (next_get_peers_at_.is_in_past()) { - callback_->get_peers(promise_send_closure(td::actor::actor_id(this), &NodeActor::got_peers)); + node_callback_->get_peers(self_id_, promise_send_closure(td::actor::actor_id(this), &NodeActor::got_peers)); has_get_peers_ = true; return; } @@ -315,67 +549,75 @@ void NodeActor::got_peers(td::Result> r_peers) { } void NodeActor::loop_peer(const PeerId &peer_id, Peer &peer) { - auto state = peer.state.lock(); - CHECK(!state->peer.empty()); + auto &state = peer.state; + if (!state->peer_ready_ || !torrent_.inited_info()) { + return; + } - for (auto part_id : state->peer_ready_parts_) { + for (auto part_id : state->peer_ready_parts_.read()) { parts_helper_.on_peer_part_ready(peer.peer_token, part_id); } - state->peer_ready_parts_.clear(); // Answer queries from peer bool should_notify_peer = false; auto want_download = parts_helper_.get_want_download_count(peer.peer_token) > 0; - if (state->node_state_.want_download != want_download) { - state->node_state_.want_download = want_download; + auto node_state = state->node_state_.load(); + if (node_state.want_download != want_download) { + node_state.want_download = want_download; + state->node_state_.exchange(node_state); should_notify_peer = true; } - for (auto it = state->peer_queries_.begin(); it != state->peer_queries_.end();) { - if (it->second) { - it++; - } else { - should_notify_peer = true; - it->second = [&]() -> td::Result { - if (!state->node_state_.will_upload) { - return td::Status::Error("Won't upload"); - } - TRY_RESULT(proof, torrent_.get_piece_proof(it->first)); - TRY_RESULT(data, torrent_.get_piece_data(it->first)); - PeerState::Part res; - TRY_RESULT(proof_serialized, vm::std_boc_serialize(std::move(proof))); - res.proof = std::move(proof_serialized); - res.data = td::BufferSlice(std::move(data)); - return std::move(res); - }(); + std::vector>> results; + for (td::uint32 part_id : state->peer_queries_.read()) { + should_notify_peer = true; + auto res = [&]() -> td::Result { + if (!node_state.will_upload) { + return td::Status::Error("Won't upload"); + } + TRY_RESULT(proof, torrent_.get_piece_proof(part_id)); + TRY_RESULT(data, torrent_.get_piece_data(part_id)); + PeerState::Part res; + TRY_RESULT(proof_serialized, vm::std_boc_serialize(std::move(proof))); + res.proof = std::move(proof_serialized); + res.data = td::BufferSlice(std::move(data)); + td::uint64 size = res.data.size() + res.proof.size(); + upload_speed_.add(size); + peer.upload_speed.add(size); + return std::move(res); + }(); + results.emplace_back(part_id, std::move(res)); + } + state->peer_queries_results_.add_elements(std::move(results)); + + // Handle results from peer + for (auto &p : state->node_queries_results_.read()) { + auto part_id = p.first; + if (!state->node_queries_active_.count(part_id)) { + continue; + } + auto r_unit = p.second.move_fmap([&](PeerState::Part part) -> td::Result { + TRY_RESULT(proof, vm::std_boc_deserialize(part.proof)); + TRY_STATUS(torrent_.add_piece(part_id, part.data.as_slice(), std::move(proof))); + update_pieces_in_db(part_id, part_id + 1); + download_speed_.add(part.data.size()); + peer.download_speed.add(part.data.size()); + return td::Unit(); + }); + + parts_.parts[part_id].query_to_peer = {}; + parts_.total_queries--; + state->node_queries_active_.erase(part_id); + parts_helper_.unlock_part(part_id); + + if (r_unit.is_ok()) { + on_part_ready(part_id); } } - // Handle results from peer - for (auto it = state->node_queries_.begin(); it != state->node_queries_.end();) { - if (it->second) { - auto part_id = it->first; - auto r_unit = it->second.unwrap().move_fmap([&](PeerState::Part part) -> td::Result { - TRY_RESULT(proof, vm::std_boc_deserialize(part.proof)); - TRY_STATUS(torrent_.add_piece(part_id, part.data.as_slice(), std::move(proof))); - download_.add(part.data.size(), td::Timestamp::now()); - return td::Unit(); - }); - - parts_.parts[part_id].query_to_peer = {}; - parts_.total_queries--; - it = state->node_queries_.erase(it); - parts_helper_.unlock_part(part_id); - - if (r_unit.is_ok()) { - on_part_ready(part_id); - } else { - //LOG(ERROR) << "Failed " << part_id; - } - } else { - it++; - } + if (!header_ready_ && torrent_.inited_info() && torrent_.inited_header()) { + init_torrent_header(); } if (should_notify_peer) { @@ -391,8 +633,421 @@ void NodeActor::on_part_ready(PartId part_id) { parts_.parts[part_id].ready = true; for (auto &peer : peers_) { // TODO: notify only peer want_download_count == 0 - peer.second.state.unsafe()->notify_node(); + peer.second.state->notify_peer(); } ready_parts_.push_back(part_id); } + +void NodeActor::got_torrent_info_str(td::BufferSlice data) { + if (torrent_.inited_info()) { + return; + } + auto r_info_cell = vm::std_boc_deserialize(data.as_slice()); + if (r_info_cell.is_error()) { + return; + } + TorrentInfo info; + vm::CellSlice cs = vm::load_cell_slice(r_info_cell.move_as_ok()); + if (!info.unpack(cs)) { + return; + } + info.init_cell(); + if (torrent_.init_info(std::move(info)).is_error()) { + return; + } + init_torrent(); + loop(); +} + +void NodeActor::update_pieces_in_db(td::uint64 begin, td::uint64 end) { + bool changed = false; + for (auto i = begin; i < end; ++i) { + bool stored = pieces_in_db_.count(i); + bool need_store = torrent_.is_piece_in_memory(i); + if (need_store == stored) { + continue; + } + changed = true; + if (need_store) { + db_store_piece(i, torrent_.get_piece_data(i).move_as_ok()); + } else { + db_erase_piece(i); + } + } + if (changed) { + db_update_pieces_list(); + } +} + +void NodeActor::db_store_torrent() { + if (!db_) { + return; + } + auto obj = create_tl_object(); + obj->active_download_ = should_download_; + obj->root_dir_ = torrent_.get_root_dir(); + db_->set(create_hash_tl_object(torrent_.get_hash()), serialize_tl_object(obj, true), + [](td::Result R) { + if (R.is_error()) { + LOG(ERROR) << "Failed to save torrent to db: " << R.move_as_error(); + } + }); +} + +void NodeActor::db_store_priorities() { + if (!db_ || db_store_priorities_paused_) { + return; + } + auto obj = create_tl_object(); + if (file_priority_.empty()) { + for (auto &s : pending_set_file_priority_) { + s.file.visit(td::overloaded( + [&](const PendingSetFilePriority::All &) { + obj->actions_.push_back(create_tl_object(s.priority)); + }, + [&](const size_t &i) { + obj->actions_.push_back(create_tl_object(i, s.priority)); + }, + [&](const std::string &name) { + obj->actions_.push_back(create_tl_object(name, s.priority)); + })); + } + } else { + size_t prior_cnt[256]; + std::fill(prior_cnt, prior_cnt + 256, 0); + for (td::uint8 p : file_priority_) { + ++prior_cnt[p]; + } + auto base_priority = (td::uint8)(std::max_element(prior_cnt, prior_cnt + 256) - prior_cnt); + obj->actions_.push_back(create_tl_object(base_priority)); + for (size_t i = 0; i < file_priority_.size(); ++i) { + if (file_priority_[i] != base_priority) { + obj->actions_.push_back(create_tl_object(i, file_priority_[i])); + } + } + } + db_->set(create_hash_tl_object(torrent_.get_hash()), + serialize_tl_object(obj, true), [](td::Result R) { + if (R.is_error()) { + LOG(ERROR) << "Failed to save torrent priorities to db: " << R.move_as_error(); + } + }); +} + +void NodeActor::db_store_torrent_meta() { + if (!db_ || !torrent_.inited_info() || (td::int64)torrent_.get_ready_parts_count() == last_stored_meta_count_) { + after_db_store_torrent_meta(last_stored_meta_count_); + return; + } + next_db_store_meta_at_ = td::Timestamp::never(); + auto meta = torrent_.get_meta_str(); + db_->set(create_hash_tl_object(torrent_.get_hash()), td::BufferSlice(meta), + [new_count = (td::int64)torrent_.get_ready_parts_count(), SelfId = actor_id(this)](td::Result R) { + if (R.is_error()) { + td::actor::send_closure(SelfId, &NodeActor::after_db_store_torrent_meta, R.move_as_error()); + } else { + td::actor::send_closure(SelfId, &NodeActor::after_db_store_torrent_meta, new_count); + } + }); +} + +void NodeActor::after_db_store_torrent_meta(td::Result R) { + if (R.is_error()) { + LOG(ERROR) << "Failed to save torrent meta to db: " << R.move_as_error(); + } else { + last_stored_meta_count_ = R.move_as_ok(); + } + next_db_store_meta_at_ = td::Timestamp::in(td::Random::fast(10.0, 20.0)); + alarm_timestamp().relax(next_db_store_meta_at_); +} + +void NodeActor::db_store_piece(td::uint64 i, std::string s) { + pieces_in_db_.insert(i); + if (!db_) { + return; + } + db_->set(create_hash_tl_object(torrent_.get_hash(), i), td::BufferSlice(s), + [](td::Result R) { + if (R.is_error()) { + LOG(ERROR) << "Failed to store piece to db: " << R.move_as_error(); + } + }); +} + +void NodeActor::db_erase_piece(td::uint64 i) { + pieces_in_db_.erase(i); + if (!db_) { + return; + } + db_->erase(create_hash_tl_object(torrent_.get_hash(), i), + [](td::Result R) { + if (R.is_error()) { + LOG(ERROR) << "Failed to store piece to db: " << R.move_as_error(); + } + }); +} + +void NodeActor::db_update_pieces_list() { + if (!db_) { + return; + } + auto obj = create_tl_object(); + for (td::uint64 p : pieces_in_db_) { + obj->pieces_.push_back(p); + } + db_->set(create_hash_tl_object(torrent_.get_hash()), + serialize_tl_object(obj, true), [](td::Result R) { + if (R.is_error()) { + LOG(ERROR) << "Failed to store list of pieces to db: " << R.move_as_error(); + } + }); +} + +void NodeActor::load_from_db(std::shared_ptr db, td::Bits256 hash, td::unique_ptr callback, + td::unique_ptr node_callback, + td::Promise> promise) { + class Loader : public td::actor::Actor { + public: + Loader(std::shared_ptr db, td::Bits256 hash, td::unique_ptr callback, + td::unique_ptr node_callback, td::Promise> promise) + : db_(std::move(db)) + , hash_(hash) + , callback_(std::move(callback)) + , node_callback_(std::move(node_callback)) + , promise_(std::move(promise)) { + } + + void finish(td::Result> R) { + promise_.set_result(std::move(R)); + stop(); + } + + void start_up() override { + db::db_get( + *db_, create_hash_tl_object(hash_), false, + [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + td::actor::send_closure(SelfId, &Loader::finish, R.move_as_error_prefix("Torrent: ")); + } else { + td::actor::send_closure(SelfId, &Loader::got_torrent, R.move_as_ok()); + } + }); + } + + void got_torrent(tl_object_ptr obj) { + root_dir_ = std::move(obj->root_dir_); + active_download_ = obj->active_download_; + db_->get(create_hash_tl_object(hash_), + [SelfId = actor_id(this)](td::Result R) { + if (R.is_error()) { + td::actor::send_closure(SelfId, &Loader::finish, R.move_as_error_prefix("Meta: ")); + return; + } + auto r = R.move_as_ok(); + if (r.status == td::KeyValueReader::GetStatus::NotFound) { + td::actor::send_closure(SelfId, &Loader::got_meta_str, td::optional()); + } else { + td::actor::send_closure(SelfId, &Loader::got_meta_str, std::move(r.value)); + } + }); + } + + void got_meta_str(td::optional meta_str) { + auto r_torrent = [&]() -> td::Result { + Torrent::Options options; + options.root_dir = std::move(root_dir_); + options.in_memory = false; + options.validate = false; + if (meta_str) { + TRY_RESULT(meta, TorrentMeta::deserialize(meta_str.value().as_slice())); + options.validate = true; + return Torrent::open(std::move(options), std::move(meta)); + } else { + return Torrent::open(std::move(options), hash_); + } + }(); + if (r_torrent.is_error()) { + finish(r_torrent.move_as_error()); + return; + } + torrent_ = r_torrent.move_as_ok(); + + db::db_get( + *db_, create_hash_tl_object(hash_), true, + [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + td::actor::send_closure(SelfId, &Loader::finish, R.move_as_error_prefix("Priorities: ")); + } else { + td::actor::send_closure(SelfId, &Loader::got_priorities, R.move_as_ok()); + } + }); + } + + void got_priorities(tl_object_ptr priorities) { + if (priorities != nullptr) { + for (auto &p : priorities->actions_) { + td::Variant file; + int priority = 0; + ton_api::downcast_call(*p, td::overloaded( + [&](ton_api::storage_priorityAction_all &obj) { + file = PendingSetFilePriority::All(); + priority = obj.priority_; + }, + [&](ton_api::storage_priorityAction_idx &obj) { + file = (size_t)obj.idx_; + priority = obj.priority_; + }, + [&](ton_api::storage_priorityAction_name &obj) { + file = std::move(obj.name_); + priority = obj.priority_; + })); + auto R = td::narrow_cast_safe(priority); + if (R.is_error()) { + LOG(ERROR) << "Invalid priority in db: " << R.move_as_error(); + continue; + } + priorities_.push_back(PendingSetFilePriority{std::move(file), R.move_as_ok()}); + } + } + + db::db_get( + *db_, create_hash_tl_object(hash_), true, + [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + td::actor::send_closure(SelfId, &Loader::finish, R.move_as_error_prefix("Pieces in db: ")); + } else { + td::actor::send_closure(SelfId, &Loader::got_pieces_in_db, R.move_as_ok()); + } + }); + } + + void got_pieces_in_db(tl_object_ptr list) { + for (auto idx : list == nullptr ? std::vector() : list->pieces_) { + ++remaining_pieces_in_db_; + db_->get(create_hash_tl_object(hash_, idx), + [SelfId = actor_id(this), idx](td::Result R) { + if (R.is_error()) { + td::actor::send_closure(SelfId, &Loader::finish, R.move_as_error_prefix("Piece in db: ")); + return; + } + auto r = R.move_as_ok(); + td::optional piece; + if (r.status == td::KeyValueReader::GetStatus::Ok) { + piece = std::move(r.value); + } + td::actor::send_closure(SelfId, &Loader::got_piece_in_db, idx, std::move(piece)); + }); + } + if (remaining_pieces_in_db_ == 0) { + finished_db_read(); + } + } + + void got_piece_in_db(size_t idx, td::optional data) { + if (data) { + auto r_proof = torrent_.value().get_piece_proof(idx); + if (r_proof.is_ok()) { + torrent_.value().add_piece(idx, data.unwrap(), r_proof.move_as_ok()); + } + pieces_in_db_.insert(idx); + } + if (--remaining_pieces_in_db_ == 0) { + finished_db_read(); + } + } + + void finished_db_read() { + DbInitialData data; + data.priorities = std::move(priorities_); + data.pieces_in_db = std::move(pieces_in_db_); + finish(td::actor::create_actor("Node", 1, torrent_.unwrap(), std::move(callback_), + std::move(node_callback_), std::move(db_), active_download_, + std::move(data))); + } + + private: + std::shared_ptr db_; + td::Bits256 hash_; + td::unique_ptr callback_; + td::unique_ptr node_callback_; + td::Promise> promise_; + + std::string root_dir_; + bool active_download_{false}; + td::optional torrent_; + std::vector priorities_; + std::set pieces_in_db_; + size_t remaining_pieces_in_db_ = 0; + }; + td::actor::create_actor("loader", std::move(db), hash, std::move(callback), std::move(node_callback), + std::move(promise)) + .release(); +} + +void NodeActor::cleanup_db(std::shared_ptr db, td::Bits256 hash, td::Promise promise) { + td::MultiPromise mp; + auto ig = mp.init_guard(); + ig.add_promise(std::move(promise)); + db->erase(create_hash_tl_object(hash), ig.get_promise()); + db->erase(create_hash_tl_object(hash), ig.get_promise()); + db->erase(create_hash_tl_object(hash), ig.get_promise()); + db::db_get( + *db, create_hash_tl_object(hash), true, + [db, promise = ig.get_promise(), hash](td::Result> R) mutable { + if (R.is_error()) { + promise.set_error(R.move_as_error()); + return; + } + auto pieces = R.move_as_ok(); + if (pieces == nullptr) { + promise.set_result(td::Unit()); + return; + } + td::MultiPromise mp; + auto ig = mp.init_guard(); + ig.add_promise(std::move(promise)); + db->erase(create_hash_tl_object(hash), ig.get_promise()); + for (auto idx : pieces->pieces_) { + db->erase(create_hash_tl_object(hash, idx), ig.get_promise()); + } + }); +} + +void NodeActor::get_peers_info(td::Promise> promise) { + auto result = std::make_shared>>(); + td::MultiPromise mp; + auto ig = mp.init_guard(); + ig.add_promise([result, promise = std::move(promise), download_speed = download_speed_.speed(), + upload_speed = upload_speed_.speed(), parts = parts_.parts.size()](td::Result R) mutable { + if (R.is_error()) { + promise.set_error(R.move_as_error()); + return; + } + promise.set_result( + create_tl_object(std::move(*result), download_speed, upload_speed, parts)); + }); + + result->reserve(peers_.size()); + size_t i = 0; + for (auto &peer : peers_) { + if (!peer.second.state->peer_online_) { + continue; + } + result->push_back(create_tl_object()); + auto &obj = *result->back(); + obj.download_speed_ = peer.second.download_speed.speed(); + obj.upload_speed_ = peer.second.upload_speed.speed(); + obj.ready_parts_ = parts_helper_.get_ready_parts(peer.second.peer_token).ones_count(); + node_callback_->get_peer_info( + self_id_, peer.first, + [result, i, promise = ig.get_promise()](td::Result> R) mutable { + TRY_RESULT_PROMISE(promise, r, std::move(R)); + result->at(i)->adnl_id_ = r.first; + result->at(i)->ip_str_ = r.second; + promise.set_result(td::Unit()); + }); + ++i; + } +} + } // namespace ton diff --git a/storage/NodeActor.h b/storage/NodeActor.h index 79c78b7b..f7fe1c39 100644 --- a/storage/NodeActor.h +++ b/storage/NodeActor.h @@ -25,42 +25,87 @@ #include "Torrent.h" #include "td/utils/Random.h" +#include "td/utils/Variant.h" #include +#include "db.h" namespace ton { class NodeActor : public td::actor::Actor { public: + class NodeCallback { + public: + virtual ~NodeCallback() = default; + virtual td::actor::ActorOwn create_peer(PeerId self_id, PeerId peer_id, + std::shared_ptr state) = 0; + virtual void get_peers(PeerId src, td::Promise> peers) = 0; + virtual void register_self(td::actor::ActorId self) = 0; + virtual void get_peer_info(PeerId src, PeerId peer, td::Promise> promise) { + promise.set_error(td::Status::Error("Not implemented")); + } + }; + class Callback { public: - virtual ~Callback() { - } - virtual td::actor::ActorOwn create_peer(PeerId self_id, PeerId peer_id, - td::SharedState state) = 0; - virtual void get_peers(td::Promise> peers) = 0; - virtual void register_self(td::actor::ActorId self) = 0; - - //TODO: proper callbacks + virtual ~Callback() = default; virtual void on_completed() = 0; virtual void on_closed(ton::Torrent torrent) = 0; }; - NodeActor(PeerId self_id, ton::Torrent torrent, td::unique_ptr callback, bool should_download = true); + struct PendingSetFilePriority { + struct All {}; + td::Variant file; + td::uint8 priority; + }; + struct DbInitialData { + std::vector priorities; + std::set pieces_in_db; + }; + + NodeActor(PeerId self_id, ton::Torrent torrent, td::unique_ptr callback, + td::unique_ptr node_callback, std::shared_ptr db, bool should_download = true); + NodeActor(PeerId self_id, ton::Torrent torrent, td::unique_ptr callback, + td::unique_ptr node_callback, std::shared_ptr db, bool should_download, + DbInitialData db_initial_data); void start_peer(PeerId peer_id, td::Promise> promise); - ton::Torrent *with_torrent() { - return &torrent_; + struct NodeState { + Torrent &torrent; + bool active_download; + double download_speed; + double upload_speed; + const std::vector &file_priority; + }; + void with_torrent(td::Promise promise) { + promise.set_value( + NodeState{torrent_, should_download_, download_speed_.speed(), upload_speed_.speed(), file_priority_}); } std::string get_stats_str(); - void set_file_priority(size_t i, td::uint8 priority); void set_should_download(bool should_download); + void set_all_files_priority(td::uint8 priority, td::Promise promise); + void set_file_priority_by_idx(size_t i, td::uint8 priority, td::Promise promise); + void set_file_priority_by_name(std::string name, td::uint8 priority, td::Promise promise); + + void load_from(td::optional meta, std::string files_path, td::Promise promise); + + void wait_for_completion(td::Promise promise); + void get_peers_info(td::Promise> promise); + + static void load_from_db(std::shared_ptr db, td::Bits256 hash, td::unique_ptr callback, + td::unique_ptr node_callback, + td::Promise> promise); + static void cleanup_db(std::shared_ptr db, td::Bits256 hash, td::Promise promise); + private: PeerId self_id_; ton::Torrent torrent_; + std::shared_ptr torrent_info_str_; std::vector file_priority_; td::unique_ptr callback_; + td::unique_ptr node_callback_; + std::shared_ptr db_; bool should_download_{false}; class Notifier : public td::actor::Actor { @@ -80,24 +125,13 @@ class NodeActor : public td::actor::Actor { struct Peer { td::actor::ActorOwn actor; td::actor::ActorOwn notifier; - td::SharedState state; + std::shared_ptr state; PartsHelper::PeerToken peer_token; + LoadSpeed download_speed, upload_speed; }; std::map peers_; - struct QueryId { - PeerId peer; - PartId part; - - auto key() const { - return std::tie(peer, part); - } - bool operator<(const QueryId &other) const { - return key() < other.key(); - } - }; - struct PartsSet { struct Info { td::optional query_to_peer; @@ -110,7 +144,7 @@ class NodeActor : public td::actor::Actor { PartsSet parts_; PartsHelper parts_helper_; std::vector ready_parts_; - LoadSpeed download_; + LoadSpeed download_speed_, upload_speed_; td::Timestamp next_get_peers_at_; bool has_get_peers_{false}; @@ -118,9 +152,22 @@ class NodeActor : public td::actor::Actor { static constexpr double GET_PEER_EACH = 5; bool is_completed_{false}; + std::vector> wait_for_completion_; td::Timestamp will_upload_at_; + std::vector pending_set_file_priority_; + bool header_ready_ = false; + std::map file_name_to_idx_; + std::set pieces_in_db_; + bool db_store_priorities_paused_ = false; + td::int64 last_stored_meta_count_ = -1; + td::Timestamp next_db_store_meta_at_ = td::Timestamp::now(); + + void init_torrent(); + void init_torrent_header(); + void recheck_parts(Torrent::PartsRange range); + void on_signal_from_peer(PeerId peer_id); void start_up() override; @@ -134,13 +181,23 @@ class NodeActor : public td::actor::Actor { static constexpr size_t MAX_TOTAL_QUERIES = 20; static constexpr size_t MAX_PEER_TOTAL_QUERIES = 5; void loop_queries(); - bool try_send_query(); - bool try_send_part(PartId part_id); void loop_get_peers(); void got_peers(td::Result> r_peers); void loop_peer(const PeerId &peer_id, Peer &peer); void on_part_ready(PartId part_id); void loop_will_upload(); + + void got_torrent_info_str(td::BufferSlice data); + + void update_pieces_in_db(td::uint64 begin, td::uint64 end); + + void db_store_torrent(); + void db_store_priorities(); + void db_store_torrent_meta(); + void after_db_store_torrent_meta(td::Result R); + void db_store_piece(td::uint64 i, std::string s); + void db_erase_piece(td::uint64 i); + void db_update_pieces_list(); }; } // namespace ton diff --git a/storage/PartsHelper.h b/storage/PartsHelper.h index 1fe0558d..6ad4e0b3 100644 --- a/storage/PartsHelper.h +++ b/storage/PartsHelper.h @@ -26,12 +26,16 @@ namespace ton { struct PartsHelper { public: - PartsHelper(size_t parts_count) : parts_(parts_count), peers_(64) { + explicit PartsHelper(size_t parts_count = 0) : parts_(parts_count), peers_(64) { peers_[0].is_valid = true; } using PartId = size_t; using PeerToken = size_t; + void init_parts_count(size_t parts_count) { + CHECK(parts_.empty()); + parts_.resize(parts_count); + } PeerToken register_self() { return self_token_; } @@ -123,6 +127,22 @@ struct PartsHelper { change_key(part_id, part->rnd, part->peers_count, 0, part->priority, part->priority); } + void on_self_part_not_ready(PartId part_id) { + auto peer = get_peer(self_token_); + if (!peer->ready_parts.set_zero(part_id)) { + return; + } + auto part = get_part(part_id); + CHECK(part->is_ready); + part->is_ready = false; + for (auto &peer : peers_) { + if (peer.ready_parts.get(part_id)) { + peer.want_download_count++; + } + } + change_key(part_id, part->rnd, 0, part->peers_count, part->priority, part->priority); + } + struct RarePart { PartId part_id; PeerId peer_id; diff --git a/storage/PeerActor.cpp b/storage/PeerActor.cpp index 51ec20d2..b2bf5a89 100644 --- a/storage/PeerActor.cpp +++ b/storage/PeerActor.cpp @@ -38,7 +38,7 @@ ton::ton_api::object_ptr to_ton_api(const PeerState return ton::ton_api::make_object(state.will_upload, state.want_download); } -PeerActor::PeerActor(td::unique_ptr callback, td::SharedState state) +PeerActor::PeerActor(td::unique_ptr callback, std::shared_ptr state) : callback_(std::move(callback)), state_(std::move(state)) { CHECK(callback_); } @@ -50,7 +50,6 @@ td::uint64 PeerActor::create_and_send_query(ArgsT &&... args) { td::uint64 PeerActor::send_query(td::BufferSlice query) { auto query_id = next_query_id_++; - //LOG(ERROR) << "send_query " << to_string(ton::fetch_tl_object(std::move(query), true).ok()); callback_->send_query(query_id, std::move(query)); return query_id; } @@ -64,8 +63,8 @@ void PeerActor::notify_node() { } void PeerActor::execute_query(td::BufferSlice query, td::Promise promise) { + on_pong(); TRY_RESULT_PROMISE(promise, f, ton::fetch_tl_object(std::move(query), true)); - //LOG(ERROR) << "execute_query " << to_string(f); ton::ton_api::downcast_call( *f, td::overloaded( [&](ton::ton_api::storage_ping &ping) { @@ -73,6 +72,7 @@ void PeerActor::execute_query(td::BufferSlice query, td::Promise r_answer) { } void PeerActor::on_pong() { - wait_pong_till_ = td::Timestamp::in(4); - state_.lock()->peer_online_ = true; + wait_pong_till_ = td::Timestamp::in(10); + state_->peer_online_ = true; notify_node(); } void PeerActor::on_update_result(td::Result r_answer) { update_query_id_ = {}; if (r_answer.is_ok()) { - peer_is_inited_ = true; - have_pieces_list_.clear(); + if (!peer_is_inited_) { + peer_init_offset_ += UPDATE_INIT_BLOCK_SIZE; + if (peer_init_offset_ >= have_pieces_.as_slice().size()) { + peer_is_inited_ = true; + } + } + } else { + have_pieces_list_.insert(have_pieces_list_.end(), sent_have_pieces_list_.begin(), sent_have_pieces_list_.end()); } + sent_have_pieces_list_.clear(); } void PeerActor::on_get_piece_result(PartId piece_id, td::Result r_answer) { - auto state = state_.lock(); - auto it = state->node_queries_.find(piece_id); - if (it == state->node_queries_.end()) { - LOG(ERROR) << "???"; - return; - } //TODO: handle errors ??? - it->second = [&]() -> td::Result { + auto res = [&]() -> td::Result { TRY_RESULT(slice, std::move(r_answer)); TRY_RESULT(piece, ton::fetch_result(slice.as_slice())); PeerState::Part res; @@ -114,6 +115,7 @@ void PeerActor::on_get_piece_result(PartId piece_id, td::Result res.proof = std::move(piece->proof_); return std::move(res); }(); + state_->node_queries_results_.add_element(std::make_pair(piece_id, std::move(res))); notify_node(); } @@ -123,10 +125,26 @@ void PeerActor::on_update_state_result(td::Result r_answer) { } } +void PeerActor::on_get_info_result(td::Result r_answer) { + get_info_query_id_ = {}; + next_get_info_at_ = td::Timestamp::in(5.0); + alarm_timestamp().relax(next_get_info_at_); + if (r_answer.is_error()) { + return; + } + auto R = fetch_tl_object(r_answer.move_as_ok(), true); + if (R.is_error()) { + return; + } + td::BufferSlice data = std::move(R.ok_ref()->data_); + if (!data.empty() && !state_->torrent_info_ready_) { + state_->torrent_info_response_callback_(std::move(data)); + } +} + void PeerActor::on_query_result(td::uint64 query_id, td::Result r_answer) { if (r_answer.is_ok()) { on_pong(); - state_.lock()->download.add(r_answer.ok().size(), td::Timestamp::now()); } if (ping_query_id_ && ping_query_id_.value() == query_id) { on_ping_result(std::move(r_answer)); @@ -134,11 +152,14 @@ void PeerActor::on_query_result(td::uint64 query_id, td::Result on_update_result(std::move(r_answer)); } else if (update_state_query_.query_id && update_state_query_.query_id.value() == query_id) { on_update_state_result(std::move(r_answer)); + } else if (get_info_query_id_ && get_info_query_id_.value() == query_id) { + on_get_info_result(std::move(r_answer)); } else { for (auto &query_it : node_get_piece_) { if (query_it.second.query_id && query_it.second.query_id.value() == query_id) { on_get_piece_result(query_it.first, std::move(r_answer)); - query_it.second.query_id = {}; + node_get_piece_.erase(query_it.first); + break; } } } @@ -151,8 +172,8 @@ void PeerActor::start_up() { node_session_id_ = td::Random::secure_uint64(); - auto state = state_.lock(); - state->peer = actor_id(this); + state_->peer = actor_id(this); + state_->peer_ready_ = true; notify_node(); schedule_loop(); @@ -165,6 +186,7 @@ void PeerActor::loop() { loop_update_init(); loop_update_state(); loop_update_pieces(); + loop_get_torrent_info(); loop_node_get_piece(); loop_peer_get_piece(); @@ -175,8 +197,8 @@ void PeerActor::loop() { void PeerActor::loop_pong() { if (wait_pong_till_ && wait_pong_till_.is_in_past()) { wait_pong_till_ = {}; - LOG(INFO) << "Disconnected"; - state_.lock()->peer_online_ = false; + LOG(DEBUG) << "Disconnected from peer"; + state_->peer_online_ = false; notify_node(); } alarm_timestamp().relax(wait_pong_till_); @@ -203,25 +225,24 @@ td::BufferSlice PeerActor::create_update_query(ton::tl_object_ptrnode_state_.load(); + auto s = have_pieces_.as_slice(); + if (s.size() <= peer_init_offset_) { + peer_is_inited_ = true; + return; + } + s = s.substr(peer_init_offset_, UPDATE_INIT_BLOCK_SIZE); auto query = create_update_query(ton::create_tl_object( - td::BufferSlice(have_pieces_.as_slice()), to_ton_api(state->node_state_))); + td::BufferSlice(s), (int)peer_init_offset_, to_ton_api(node_state))); // take care about update_state_query initial state - update_state_query_.state = state->node_state_; + update_state_query_.state = node_state; update_state_query_.query_id = 0; update_query_id_ = send_query(std::move(query)); @@ -232,9 +253,9 @@ void PeerActor::loop_update_state() { return; } - auto state = state_.lock(); - if (!(update_state_query_.state == state->node_state_)) { - update_state_query_.state = state->node_state_; + auto node_state = state_->node_state_.load(); + if (!(update_state_query_.state == node_state)) { + update_state_query_.state = node_state; update_state_query_.query_id = {}; } @@ -248,49 +269,45 @@ void PeerActor::loop_update_state() { } void PeerActor::update_have_pieces() { - auto state = state_.lock(); - have_pieces_list_.insert(have_pieces_list_.end(), state->node_ready_parts_.begin(), state->node_ready_parts_.end()); - for (auto piece_id : state->node_ready_parts_) { + auto node_ready_parts = state_->node_ready_parts_.read(); + for (auto piece_id : node_ready_parts) { + if (piece_id < peer_init_offset_ + UPDATE_INIT_BLOCK_SIZE) { + have_pieces_list_.push_back(piece_id); + } have_pieces_.set_one(piece_id); } - state->node_ready_parts_.clear(); } void PeerActor::loop_update_pieces() { - if (update_query_id_) { - return; - } - - if (!peer_is_inited_) { + if (update_query_id_ || !peer_is_inited_) { return; } update_have_pieces(); if (!have_pieces_list_.empty()) { + size_t count = std::min(have_pieces_list_.size(), 1500); + sent_have_pieces_list_.assign(have_pieces_list_.end() - count, have_pieces_list_.end()); + have_pieces_list_.erase(have_pieces_list_.end() - count, have_pieces_list_.end()); auto query = create_update_query(ton::create_tl_object( - td::transform(have_pieces_list_, [](auto x) { return static_cast(x); }))); + td::transform(sent_have_pieces_list_, [](auto x) { return static_cast(x); }))); update_query_id_ = send_query(std::move(query)); } } -void PeerActor::loop_node_get_piece() { - auto state = state_.lock(); - - for (auto it = node_get_piece_.begin(); it != node_get_piece_.end();) { - auto other_it = state->node_queries_.find(it->first); - if (other_it == state->node_queries_.end() || other_it->second) { - it = node_get_piece_.erase(it); - } else { - it++; - } +void PeerActor::loop_get_torrent_info() { + if (get_info_query_id_ || state_->torrent_info_ready_) { + return; } + if (next_get_info_at_ && !next_get_info_at_.is_in_past()) { + return; + } + get_info_query_id_ = create_and_send_query(); +} - for (auto &query_it : state->node_queries_) { - if (query_it.second) { - continue; - } - node_get_piece_.emplace(query_it.first, NodePieceQuery{}); +void PeerActor::loop_node_get_piece() { + for (auto part : state_->node_queries_.read()) { + node_get_piece_.emplace(part, NodePieceQuery{}); } for (auto &query_it : node_get_piece_) { @@ -304,40 +321,29 @@ void PeerActor::loop_node_get_piece() { } void PeerActor::loop_peer_get_piece() { - auto state = state_.lock(); - // process answers - for (auto &it : state->peer_queries_) { - if (!it.second) { - continue; - } - auto promise_it = peer_get_piece_.find(it.first); + for (auto &p : state_->peer_queries_results_.read()) { + state_->peer_queries_active_.erase(p.first); + auto promise_it = peer_get_piece_.find(p.first); if (promise_it == peer_get_piece_.end()) { continue; } - promise_it->second.promise.set_result(it.second.unwrap().move_map([](PeerState::Part part) { + promise_it->second.promise.set_result(p.second.move_map([](PeerState::Part part) { return ton::create_serialize_tl_object(std::move(part.proof), std::move(part.data)); })); peer_get_piece_.erase(promise_it); - } - - // erase unneeded queries - for (auto it = state->peer_queries_.begin(); it != state->peer_queries_.end();) { - if (peer_get_piece_.count(it->first) == 0) { - it = state->peer_queries_.erase(it); - notify_node(); - } else { - it++; - } + notify_node(); } // create queries + std::vector new_peer_queries; for (auto &query_it : peer_get_piece_) { - auto res = state->peer_queries_.emplace(query_it.first, td::optional>()); - if (res.second) { + if (state_->peer_queries_active_.insert(query_it.first).second) { + new_peer_queries.push_back(query_it.first); notify_node(); } } + state_->peer_queries_.add_elements(std::move(new_peer_queries)); } void PeerActor::loop_notify_node() { @@ -345,13 +351,14 @@ void PeerActor::loop_notify_node() { return; } need_notify_node_ = false; - state_.lock()->notify_node(); + state_->notify_node(); } void PeerActor::execute_ping(td::uint64 session_id, td::Promise promise) { if (!peer_session_id_ || peer_session_id_.value() != session_id) { peer_session_id_ = session_id; peer_is_inited_ = false; + peer_init_offset_ = 0; update_query_id_ = {}; update_state_query_.query_id = {}; @@ -369,11 +376,11 @@ void PeerActor::execute_add_update(ton::ton_api::storage_addUpdate &add_update, promise.set_value(ton::create_serialize_tl_object()); - auto state = state_.lock(); + std::vector new_peer_ready_parts; auto add_piece = [&](PartId id) { if (!peer_have_pieces_.get(id)) { peer_have_pieces_.set_one(id); - state->peer_ready_parts_.push_back(id); + new_peer_ready_parts.push_back(id); notify_node(); } }; @@ -383,15 +390,15 @@ void PeerActor::execute_add_update(ton::ton_api::storage_addUpdate &add_update, if (peer_seqno_ >= seqno) { return; } - if (state->peer_state_ && state->peer_state_.value() == peer_state) { + if (state_->peer_state_ready_ && state_->peer_state_.load() == peer_state) { return; } peer_seqno_ = seqno; - state->peer_state_ = peer_state; + state_->peer_state_.exchange(peer_state); + state_->peer_state_ready_ = true; notify_node(); }; - //LOG(ERROR) << "Got " << to_string(add_update); downcast_call(*add_update.update_, td::overloaded( [&](ton::ton_api::storage_updateHavePieces &have_pieces) { @@ -404,16 +411,24 @@ void PeerActor::execute_add_update(ton::ton_api::storage_addUpdate &add_update, update_peer_state(from_ton_api(*init.state_)); td::Bitset new_bitset; new_bitset.set_raw(init.have_pieces_.as_slice().str()); + size_t offset = init.have_pieces_offset_ * 8; for (auto size = new_bitset.size(), i = size_t(0); i < size; i++) { if (new_bitset.get(i)) { - add_piece(static_cast(i)); + add_piece(static_cast(offset + i)); } } })); + state_->peer_ready_parts_.add_elements(std::move(new_peer_ready_parts)); } void PeerActor::execute_get_piece(ton::ton_api::storage_getPiece &get_piece, td::Promise promise) { PartId piece_id = get_piece.piece_id_; peer_get_piece_[piece_id] = {std::move(promise)}; } + +void PeerActor::execute_get_torrent_info(td::Promise promise) { + td::BufferSlice result = create_serialize_tl_object( + state_->torrent_info_ready_ ? state_->torrent_info_str_->clone() : td::BufferSlice()); + promise.set_result(std::move(result)); +} } // namespace ton diff --git a/storage/PeerActor.h b/storage/PeerActor.h index cc72496d..fd34bc88 100644 --- a/storage/PeerActor.h +++ b/storage/PeerActor.h @@ -21,7 +21,6 @@ #include "Bitset.h" #include "PeerState.h" -#include "SharedState.h" #include "td/utils/optional.h" @@ -38,14 +37,14 @@ class PeerActor : public td::actor::Actor { virtual void send_query(td::uint64 query_id, td::BufferSlice query) = 0; }; - PeerActor(td::unique_ptr callback, td::SharedState state); + PeerActor(td::unique_ptr callback, std::shared_ptr state); void execute_query(td::BufferSlice query, td::Promise promise); void on_query_result(td::uint64 query_id, td::Result r_answer); private: td::unique_ptr callback_; - td::SharedState state_; + std::shared_ptr state_; bool need_notify_node_{false}; td::uint64 next_query_id_{0}; @@ -53,7 +52,9 @@ class PeerActor : public td::actor::Actor { // ping td::Timestamp next_ping_at_; td::optional ping_query_id_; + td::optional get_info_query_id_; td::Timestamp wait_pong_till_; + td::Timestamp next_get_info_at_; // startSession td::uint64 node_session_id_; @@ -63,15 +64,17 @@ class PeerActor : public td::actor::Actor { td::optional peer_session_id_; td::optional update_query_id_; bool peer_is_inited_{false}; + size_t peer_init_offset_{0}; td::uint32 node_seqno_{0}; td::Bitset have_pieces_; std::vector have_pieces_list_; + std::vector sent_have_pieces_list_; td::uint32 peer_seqno_{0}; // update state struct UpdateState { td::optional query_id; - PeerState::State state; + PeerState::State state{false, false}; }; UpdateState update_state_query_; @@ -102,6 +105,7 @@ class PeerActor : public td::actor::Actor { void loop_update_init(); void loop_update_pieces(); void update_have_pieces(); + void loop_get_torrent_info(); void loop_update_state(); @@ -112,14 +116,14 @@ class PeerActor : public td::actor::Actor { void loop_peer_get_piece(); void execute_add_update(ton::ton_api::storage_addUpdate &add_update, td::Promise promise); - void execute_get_piece(ton::ton_api::storage_getPiece &get_piece, td::Promise promise); + void execute_get_torrent_info(td::Promise promise); void on_update_result(td::Result r_answer); void on_get_piece_result(PartId piece_id, td::Result r_answer); - void on_update_state_result(td::Result r_answer); + void on_get_info_result(td::Result r_answer); template td::uint64 create_and_send_query(ArgsT &&... args); @@ -127,5 +131,7 @@ class PeerActor : public td::actor::Actor { void schedule_loop(); void notify_node(); + + static const size_t UPDATE_INIT_BLOCK_SIZE = 6000; }; } // namespace ton diff --git a/storage/PeerManager.h b/storage/PeerManager.h new file mode 100644 index 00000000..52297ac5 --- /dev/null +++ b/storage/PeerManager.h @@ -0,0 +1,275 @@ +/* + This file is part of TON Blockchain Library. + + TON Blockchain Library is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + TON Blockchain Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with TON Blockchain Library. If not, see . +*/ + +#include "rldp2/rldp.h" +#include "td/actor/actor.h" +#include "overlay/overlay.h" +#include "NodeActor.h" + +namespace ton_rldp = ton::rldp2; + +class PeerManager : public td::actor::Actor { + public: + PeerManager(ton::adnl::AdnlNodeIdShort adnl_id, ton::overlay::OverlayIdFull overlay_id, bool client_mode, + td::actor::ActorId overlays, td::actor::ActorId adnl, + td::actor::ActorId rldp) + : overlay_id_(std::move(overlay_id)) + , client_mode_(client_mode) + , overlays_(std::move(overlays)) + , adnl_(std::move(adnl)) + , rldp_(std::move(rldp)) { + CHECK(register_adnl_id(adnl_id) == 1); + } + void start_up() override { + } + void tear_down() override { + for (const auto& p : subscribed_peers_) { + if (p.second > 0) { + auto adnl_id = peer_to_andl(p.first); + if (adnl_id.is_ok()) { + send_closure(overlays_, &ton::overlay::Overlays::delete_overlay, adnl_id.move_as_ok(), + overlay_id_.compute_short_id()); + } + } + } + } + void send_query(ton::PeerId src, ton::PeerId dst, td::BufferSlice query, td::Promise promise) { + TRY_RESULT_PROMISE(promise, src_id, peer_to_andl(src)); + TRY_RESULT_PROMISE(promise, dst_id, peer_to_andl(dst)); + send_closure(overlays_, &ton::overlay::Overlays::send_query_via, dst_id, src_id, overlay_id_.compute_short_id(), "", + std::move(promise), td::Timestamp::in(10), std::move(query), 1 << 25, rldp_); + } + + void execute_query(ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst, td::BufferSlice data, + td::Promise promise) { + auto src_id = register_adnl_id(src); + auto dst_id = register_adnl_id(dst); + auto it = peers_.find(std::make_pair(dst_id, src_id)); + if (it == peers_.end()) { + auto node_it = nodes_.find(dst_id); + if (node_it == nodes_.end()) { + LOG(ERROR) << "Unknown query destination"; + promise.set_error(td::Status::Error("Unknown query destination")); + return; + } + if (!node_it->second.is_alive()) { + LOG(ERROR) << "Expired query destination"; + promise.set_error(td::Status::Error("Unknown query destination")); + return; + } + send_closure(node_it->second, &ton::NodeActor::start_peer, src_id, + [promise = std::move(promise), + data = std::move(data)](td::Result> r_peer) mutable { + TRY_RESULT_PROMISE(promise, peer, std::move(r_peer)); + send_closure(peer, &ton::PeerActor::execute_query, std::move(data), std::move(promise)); + }); + return; + } + send_closure(it->second, &ton::PeerActor::execute_query, std::move(data), std::move(promise)); + } + + void register_peer(ton::PeerId src, ton::PeerId dst, td::actor::ActorId peer) { + peers_[std::make_pair(src, dst)] = std::move(peer); + register_src(src, [](td::Result res) { res.ensure(); }); + } + + void register_node(ton::PeerId src, td::actor::ActorId node) { + nodes_[src] = std::move(node); + register_src(src, [](td::Result res) { res.ensure(); }); + } + + void unregister_node(ton::PeerId src, td::actor::ActorId node) { + auto it = nodes_.find(src); + CHECK(it != nodes_.end()); + if (it->second == node) { + nodes_.erase(it); + } + unregister_src(src, [](td::Result res) { res.ensure(); }); + } + + void unregister_peer(ton::PeerId src, ton::PeerId dst, td::actor::ActorId peer) { + auto it = peers_.find(std::make_pair(src, dst)); + CHECK(it != peers_.end()); + if (it->second == peer) { + peers_.erase(it); + } + unregister_src(src, [](td::Result res) { res.ensure(); }); + } + + void unregister_src(ton::PeerId src, td::Promise promise) { + TRY_RESULT_PROMISE(promise, src_id, peer_to_andl(src)); + if (--subscribed_peers_[src] == 0) { + subscribed_peers_.erase(src); + send_closure(overlays_, &ton::overlay::Overlays::delete_overlay, src_id, overlay_id_.compute_short_id()); + } + promise.set_value({}); + } + void register_src(ton::PeerId src, td::Promise promise) { + TRY_RESULT_PROMISE(promise, src_id, peer_to_andl(src)); + if (subscribed_peers_[src]++ == 0) { + auto rules = ton::overlay::OverlayPrivacyRules{}; + class Callback : public ton::overlay::Overlays::Callback { + public: + explicit Callback(td::actor::ActorId peer_manager, ton::adnl::AdnlNodeIdShort dst) + : peer_manager_(std::move(peer_manager)), dst_(dst) { + } + void receive_message(ton::adnl::AdnlNodeIdShort src, ton::overlay::OverlayIdShort overlay_id, + td::BufferSlice data) override { + } + void receive_query(ton::adnl::AdnlNodeIdShort src, ton::overlay::OverlayIdShort overlay_id, + td::BufferSlice data, td::Promise promise) override { + td::actor::send_closure(peer_manager_, &PeerManager::execute_query, src, dst_, std::move(data), + std::move(promise)); + } + void receive_broadcast(ton::PublicKeyHash src, ton::overlay::OverlayIdShort overlay_id, + td::BufferSlice data) override { + } + + private: + td::actor::ActorId peer_manager_; + ton::adnl::AdnlNodeIdShort dst_; + }; + send_closure(overlays_, &ton::overlay::Overlays::create_public_overlay_ex, src_id, overlay_id_.clone(), + std::make_unique(actor_id(this), src_id), rules, R"({ "type": "storage" })", + !client_mode_); + } + promise.set_value({}); + } + + td::Result peer_to_andl(ton::PeerId id) { + if (id <= 0 || id > adnl_ids_.size()) { + return td::Status::Error(PSLICE() << "Invalid peer id " << id); + } + return adnl_ids_[id - 1]; + } + + ton::PeerId register_adnl_id(ton::adnl::AdnlNodeIdShort id) { + auto it = adnl_to_peer_id_.emplace(id, next_peer_id_); + if (it.second) { + adnl_ids_.push_back(id); + next_peer_id_++; + } + return it.first->second; + } + + void get_peers(ton::PeerId src, td::Promise> promise) { + TRY_RESULT_PROMISE(promise, src_id, peer_to_andl(src)); + send_closure(overlays_, &ton::overlay::Overlays::get_overlay_random_peers, src_id, overlay_id_.compute_short_id(), + 30, promise.send_closure(actor_id(this), &PeerManager::got_overlay_random_peers)); + } + + void get_peer_info(ton::PeerId src, ton::PeerId peer, td::Promise> promise) { + TRY_RESULT_PROMISE(promise, src_id, peer_to_andl(src)); + TRY_RESULT_PROMISE(promise, peer_id, peer_to_andl(peer)); + td::actor::send_closure( + adnl_, &ton::adnl::Adnl::get_conn_ip_str, src_id, peer_id, + promise.wrap([peer_id](std::string s) { return std::make_pair(peer_id.bits256_value(), std::move(s)); })); + } + + static td::unique_ptr create_callback(td::actor::ActorId peer_manager) { + class Context : public ton::NodeActor::NodeCallback { + public: + Context(td::actor::ActorId peer_manager) : peer_manager_(peer_manager) { + } + void get_peers(ton::PeerId src, td::Promise> promise) override { + send_closure(peer_manager_, &PeerManager::get_peers, src, std::move(promise)); + } + void register_self(td::actor::ActorId self) override { + CHECK(self_.empty()); + self_ = self; + send_closure(peer_manager_, &PeerManager::register_node, 1, self_); + } + ~Context() override { + if (!self_.empty()) { + send_closure(peer_manager_, &PeerManager::unregister_node, 1, self_); + } + } + td::actor::ActorOwn create_peer(ton::PeerId self_id, ton::PeerId peer_id, + std::shared_ptr state) override { + CHECK(self_id == 1); + class PeerCallback : public ton::PeerActor::Callback { + public: + PeerCallback(ton::PeerId self_id, ton::PeerId peer_id, td::actor::ActorId peer_manager) + : self_id_(self_id), peer_id_(peer_id), peer_manager_(std::move(peer_manager)) { + } + void register_self(td::actor::ActorId self) override { + CHECK(self_.empty()); + self_ = std::move(self); + send_closure(peer_manager_, &PeerManager::register_peer, self_id_, peer_id_, self_); + } + void send_query(td::uint64 query_id, td::BufferSlice query) override { + send_closure(peer_manager_, &PeerManager::send_query, self_id_, peer_id_, std::move(query), + promise_send_closure(self_, &ton::PeerActor::on_query_result, query_id)); + } + + ~PeerCallback() { + if (!self_.empty()) { + send_closure(peer_manager_, &PeerManager::unregister_peer, self_id_, peer_id_, self_); + } + } + + private: + td::actor::ActorId self_; + ton::PeerId self_id_; + ton::PeerId peer_id_; + td::actor::ActorId peer_manager_; + }; + return td::actor::create_actor(PSLICE() << "PeerActor " << peer_id, + td::make_unique(self_id, peer_id, peer_manager_), + std::move(state)); + } + + void get_peer_info(ton::PeerId src, ton::PeerId peer, + td::Promise> promise) override { + td::actor::send_closure(peer_manager_, &PeerManager::get_peer_info, src, peer, std::move(promise)); + } + + private: + td::actor::ActorId peer_manager_; + std::vector peers_; + td::actor::ActorId self_; + }; + return td::make_unique(std::move(peer_manager)); + } + + private: + ton::overlay::OverlayIdFull overlay_id_; + bool client_mode_ = false; + td::actor::ActorId overlays_; + td::actor::ActorId adnl_; + td::actor::ActorId rldp_; + + std::map, td::actor::ActorId> peers_; + std::map> nodes_; + ton::PeerId next_peer_id_{1}; + std::map adnl_to_peer_id_; + std::vector adnl_ids_; + + std::map subscribed_peers_; + + void got_overlay_random_peers(td::Result> r_peers, + td::Promise> promise) { + TRY_RESULT_PROMISE(promise, peers, std::move(r_peers)); + + std::vector res; + for (auto peer : peers) { + res.push_back(register_adnl_id(peer)); + } + + promise.set_value(std::move(res)); + } +}; diff --git a/storage/PeerState.cpp b/storage/PeerState.cpp index 35d27e43..a760024f 100644 --- a/storage/PeerState.cpp +++ b/storage/PeerState.cpp @@ -27,7 +27,7 @@ void PeerState::notify_node() { } void PeerState::notify_peer() { - if (peer.empty()) { + if (!peer_ready_) { return; } td::actor::send_signals_later(peer, td::actor::ActorSignals::wakeup()); diff --git a/storage/PeerState.h b/storage/PeerState.h index 3a3595f2..c578b544 100644 --- a/storage/PeerState.h +++ b/storage/PeerState.h @@ -25,17 +25,69 @@ #include "td/actor/actor.h" #include - -#include "LoadSpeed.h" +#include namespace ton { using PeerId = td::uint64; using PartId = td::uint32; +// Concurrent buffer for messages with one writer and one reader +// Reader reads all existing messages at once +// TODO: Use some better algorithm here, or maybe a concurrent queue +template +class MessageBuffer { + public: + MessageBuffer() = default; + MessageBuffer(const MessageBuffer&) = delete; + MessageBuffer& operator=(const MessageBuffer&) = delete; + ~MessageBuffer() { + delete ptr_.load(); + } + + void add_element(T x) { + std::vector* vec = ptr_.exchange(nullptr); + if (vec == nullptr) { + vec = new std::vector(); + } + vec->push_back(std::move(x)); + CHECK(ptr_.exchange(vec) == nullptr); + } + + void add_elements(std::vector elements) { + if (elements.empty()) { + return; + } + std::vector* vec = ptr_.exchange(nullptr); + if (vec == nullptr) { + vec = new std::vector(std::move(elements)); + } else { + for (auto& x : elements) { + vec->push_back(std::move(x)); + } + } + CHECK(ptr_.exchange(vec) == nullptr); + } + + std::vector read() { + std::vector* vec = ptr_.exchange(nullptr); + std::vector result; + if (vec != nullptr) { + result = std::move(*vec); + delete vec; + } + return result; + } + private: + std::atomic*> ptr_{nullptr}; +}; + struct PeerState { + explicit PeerState(td::actor::ActorId<> node) : node(std::move(node)) { + } + struct State { - bool will_upload{false}; - bool want_download{false}; + bool will_upload; + bool want_download; auto key() const { return std::tie(will_upload, want_download); } @@ -43,31 +95,39 @@ struct PeerState { return key() == other.key(); } }; - State node_state_; - td::optional peer_state_; - bool peer_online_{false}; + // Thread-safe fields + std::atomic node_state_{State{false, false}}; + std::atomic_bool peer_state_ready_{false}; + std::atomic peer_state_{State{false, false}}; + std::atomic_bool peer_online_{false}; struct Part { td::BufferSlice proof; td::BufferSlice data; }; - std::map>> node_queries_; - std::map>> peer_queries_; + + std::set node_queries_active_; // Node only + MessageBuffer node_queries_; // Node -> Peer + MessageBuffer>> node_queries_results_; // Peer -> Node + + std::set peer_queries_active_; // Peer only + MessageBuffer peer_queries_; // Peer -> Node + MessageBuffer>> peer_queries_results_; // Node -> Peer // Peer -> Node - // update are added to this vector, so reader will be able to process all changes - std::vector peer_ready_parts_; + MessageBuffer peer_ready_parts_; + // Node -> Peer + MessageBuffer node_ready_parts_; // Node -> Peer - // writer writes all new parts to this vector. This state will be eventually synchornized with a peer - std::vector node_ready_parts_; + std::atomic_bool torrent_info_ready_{false}; + std::shared_ptr torrent_info_str_; + std::function torrent_info_response_callback_; - td::actor::ActorId<> node; + const td::actor::ActorId<> node; + std::atomic_bool peer_ready_{false}; td::actor::ActorId<> peer; - LoadSpeed upload; - LoadSpeed download; - void notify_node(); void notify_peer(); }; diff --git a/storage/SharedState.h b/storage/SharedState.h deleted file mode 100644 index 75b02e91..00000000 --- a/storage/SharedState.h +++ /dev/null @@ -1,67 +0,0 @@ -/* - This file is part of TON Blockchain Library. - - TON Blockchain Library is free software: you can redistribute it and/or modify - it under the terms of the GNU Lesser General Public License as published by - the Free Software Foundation, either version 2 of the License, or - (at your option) any later version. - - TON Blockchain Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public License - along with TON Blockchain Library. If not, see . - - Copyright 2017-2020 Telegram Systems LLP -*/ - -#pragma once - -#include -#include "td/utils/MovableValue.h" -#include - -namespace td { -template -class SharedState { - public: - friend class Guard; - class Guard { - public: - Guard(Guard &&) = default; - Guard(SharedState *self) : self(self) { - CHECK(!self->data_->is_locked.exchange(true)); - } - ~Guard() { - if (self.get()) { - CHECK(self.get()->data_->is_locked.exchange(false)); - } - } - T *get() { - return &self.get()->data_->data; - } - T *operator->() { - return get(); - } - - private: - td::MovableValue *> self; - }; - - auto lock() { - return Guard{this}; - } - auto unsafe() { - return &data_->data; - } - - private: - struct Data { - std::atomic is_locked{}; - T data; - }; - std::shared_ptr data_{std::make_shared()}; -}; -} // namespace td diff --git a/storage/Torrent.cpp b/storage/Torrent.cpp index 2012d053..ce0a9d0b 100644 --- a/storage/Torrent.cpp +++ b/storage/Torrent.cpp @@ -23,29 +23,30 @@ #include "td/utils/crypto.h" #include "td/utils/port/Stat.h" #include "td/utils/tl_helpers.h" +#include "td/utils/port/path.h" namespace ton { -Torrent::Torrent(TorrentMeta meta) - : info_(meta.info) - , merkle_tree_(info_.pieces_count(), info_.root_hash) - , piece_is_ready_(info_.pieces_count(), false) { - not_ready_piece_count_ = piece_is_ready_.size(); - header_pieces_count_ = (info_.header_size + info_.piece_size - 1) / info_.piece_size; - not_ready_pending_piece_count_ = header_pieces_count_; - - if (meta.header) { - set_header(meta.header.unwrap()); - } else { - header_str_ = td::BufferSlice(info_.header_size); - } - if (meta.root_proof.not_null()) { - merkle_tree_.add_proof(meta.root_proof); +td::Result Torrent::open(Options options, td::Bits256 hash) { + Torrent res(hash); + if (!options.in_memory) { + if (options.root_dir.empty()) { + options.root_dir = "."; + } + res.set_root_dir(options.root_dir); } + return std::move(res); } td::Result Torrent::open(Options options, TorrentMeta meta) { - Torrent res(std::move(meta)); + Torrent res(td::Bits256(meta.info.get_hash())); + TRY_STATUS(res.init_info(std::move(meta.info))); + if (meta.header) { + TRY_STATUS(res.set_header(meta.header.unwrap())); + } + if (meta.root_proof.not_null()) { + TRY_STATUS(res.merkle_tree_.add_proof(meta.root_proof)); + } if (!options.in_memory) { if (options.root_dir.empty()) { options.root_dir = "."; @@ -64,6 +65,7 @@ td::Result Torrent::open(Options options, td::Slice meta_str) { } const Torrent::Info &Torrent::get_info() const { + CHECK(inited_info_); return info_; } @@ -103,6 +105,9 @@ td::Status Torrent::iterate_piece(Info::PieceInfo piece, F &&f) { } bool Torrent::is_piece_ready(td::uint64 piece_i) const { + if (!inited_info_) { + return false; + } CHECK(piece_i < info_.pieces_count()); return piece_is_ready_[piece_i]; } @@ -133,7 +138,8 @@ Torrent::PartsRange Torrent::get_file_parts_range(size_t i) { return res; } -Torrent::PartsRange Torrent::get_header_parts_range() { +Torrent::PartsRange Torrent::get_header_parts_range() const { + CHECK(inited_info_); PartsRange res; res.begin = 0; res.end = header_pieces_count_; @@ -184,11 +190,14 @@ std::string Torrent::get_stats_str() const { } void Torrent::validate() { - CHECK(header_); + if (!inited_info_ || !header_) { + return; + } std::fill(piece_is_ready_.begin(), piece_is_ready_.end(), false); not_ready_piece_count_ = info_.pieces_count(); + included_ready_size_ = 0; for (auto &chunk : chunks_) { chunk.ready_size = 0; if (root_dir_) { @@ -200,31 +209,25 @@ void Torrent::validate() { } std::vector hashes; - std::vector chunks; + std::vector> pieces; auto flush = [&] { - td::Bitset bitmask; - merkle_tree_.add_chunks(chunks, bitmask); - for (size_t i = 0; i < chunks.size(); i++) { - if (!bitmask.get(i)) { - continue; - } - - auto piece_i = chunks[i].index; + for (size_t piece_i : merkle_tree_.add_pieces(std::move(pieces))) { auto piece = info_.get_piece_info(piece_i); iterate_piece(piece, [&](auto it, auto info) { it->ready_size += info.size; + if (!it->excluded) { + included_ready_size_ += info.size; + } return td::Status::OK(); }); piece_is_ready_[piece_i] = true; ready_parts_count_++; - CHECK(not_ready_piece_count_); not_ready_piece_count_--; } - hashes.clear(); - chunks.clear(); + pieces.clear(); }; td::BufferSlice buf(info_.piece_size); @@ -246,24 +249,23 @@ void Torrent::validate() { auto dest = buf.as_slice().truncate(info.size); TRY_STATUS(it->get_piece(dest, info.chunk_offset, &cache)); sha256.feed(dest); - //LOG(ERROR) << dest; return td::Status::OK(); }); if (is_ok.is_error()) { LOG_IF(ERROR, !skipped) << "Failed: " << is_ok; - LOG(ERROR) << "Failed: " << is_ok; continue; } - MerkleTree::Chunk chunk; - chunk.index = piece_i; - sha256.extract(chunk.hash.as_slice()); - - chunks.push_back(chunk); + td::Bits256 hash; + sha256.extract(hash.as_slice()); + pieces.emplace_back(piece_i, hash); } flush(); } td::Result Torrent::get_piece_data(td::uint64 piece_i) { + if (!inited_info_) { + return td::Status::Error("Torrent info not inited"); + } CHECK(piece_i < info_.pieces_count()); if (!piece_is_ready_[piece_i]) { return td::Status::Error("Piece is not ready"); @@ -272,6 +274,10 @@ td::Result Torrent::get_piece_data(td::uint64 piece_i) { if (it != pending_pieces_.end()) { return it->second; } + auto it2 = in_memory_pieces_.find(piece_i); + if (it2 != in_memory_pieces_.end()) { + return it2->second.data; + } auto piece = info_.get_piece_info(piece_i); std::string res(piece.size, '\0'); @@ -282,68 +288,114 @@ td::Result Torrent::get_piece_data(td::uint64 piece_i) { } td::Result> Torrent::get_piece_proof(td::uint64 piece_i) { + if (!inited_info_) { + return td::Status::Error("Torrent info not inited"); + } CHECK(piece_i < info_.pieces_count()); return merkle_tree_.gen_proof(piece_i, piece_i); } td::Status Torrent::add_piece(td::uint64 piece_i, td::Slice data, td::Ref proof) { - TRY_STATUS(merkle_tree_.add_proof(proof)); - //LOG(ERROR) << "Add piece #" << piece_i; + if (fatal_error_.is_error()) { + return fatal_error_.clone().move_as_error_prefix("Fatal error: "); + } + if (!inited_info_) { + return td::Status::Error("Torrent info not inited"); + } + if (!proof.is_null()) { + TRY_STATUS(merkle_tree_.add_proof(proof)); + } CHECK(piece_i < info_.pieces_count()); if (piece_is_ready_[piece_i]) { return td::Status::OK(); } + td::Bits256 hash; + td::sha256(data, hash.as_slice()); + TRY_RESULT(expected_hash, merkle_tree_.get_piece_hash(piece_i)); + if (expected_hash != hash) { + return td::Status::Error("Hash mismatch"); + } piece_is_ready_[piece_i] = true; ready_parts_count_++; - ton::MerkleTree::Chunk chunk; - chunk.index = piece_i; - td::sha256(data, chunk.hash.as_slice()); - TRY_STATUS(merkle_tree_.try_add_chunks({chunk})); - if (chunks_.empty()) { - return add_header_piece(piece_i, data); + if (chunks_.empty() || !enabled_wirte_to_files_) { + return add_pending_piece(piece_i, data); } - return add_validated_piece(piece_i, data); } -td::Status Torrent::add_header_piece(td::uint64 piece_i, td::Slice data) { +td::Status Torrent::add_proof(td::Ref proof) { + if (!inited_info_) { + return td::Status::Error("Torrent info not inited"); + } + return merkle_tree_.add_proof(std::move(proof)); +} + +td::Status Torrent::add_pending_piece(td::uint64 piece_i, td::Slice data) { pending_pieces_[piece_i] = data.str(); if (piece_i < header_pieces_count_) { - //LOG(ERROR) << "Add header piece #" << piece_i; auto piece = info_.get_piece_info(piece_i); auto dest = header_str_.as_slice().substr(piece.offset); data.truncate(dest.size()); dest.copy_from(data); not_ready_pending_piece_count_--; if (not_ready_pending_piece_count_ == 0) { - //LOG(ERROR) << "Got full header"; TorrentHeader header; - TRY_STATUS(td::unserialize(header, header_str_.as_slice())); // TODO: it is a fatal error - set_header(header); - for (auto &it : pending_pieces_) { - TRY_STATUS(add_validated_piece(it.first, it.second)); + auto S = td::unserialize(header, header_str_.as_slice()); + if (S.is_ok()) { + S = set_header(std::move(header)); + } + if (S.is_error()) { + S = S.move_as_error_prefix("Invalid torrent header: "); + fatal_error_ = S.clone(); + return S; + } + if (enabled_wirte_to_files_) { + add_pending_pieces(); } - pending_pieces_.clear(); } - } else { - LOG(ERROR) << "PENDING"; } return td::Status::OK(); } -std::string Torrent::get_chunk_path(td::Slice name) { +void Torrent::enable_write_to_files() { + if (enabled_wirte_to_files_) { + return; + } + enabled_wirte_to_files_ = true; + if (header_) { + add_pending_pieces(); + } +} + +void Torrent::add_pending_pieces() { + for (auto &p : pending_pieces_) { + td::Status S = add_validated_piece(p.first, std::move(p.second)); + if (S.is_error()) { + LOG(WARNING) << "Failed to add pending piece #" << p.first << ": " << S; + } + } + pending_pieces_.clear(); +} + +std::string Torrent::get_chunk_path(td::Slice name) const { return PSTRING() << root_dir_.value() << TD_DIR_SLASH << header_.value().dir_name << TD_DIR_SLASH << name; } +std::string Torrent::get_file_path(size_t i) const { + return get_chunk_path(chunks_.at(i + 1).name); +} + td::Status Torrent::init_chunk_data(ChunkState &chunk) { if (chunk.data) { return td::Status::OK(); } if (root_dir_) { - TRY_RESULT(data, td::FileNoCacheBlobView::create(get_chunk_path(chunk.name), chunk.size, true)); + std::string path = get_chunk_path(chunk.name); + TRY_STATUS(td::mkpath(path)); + TRY_RESULT(data, td::FileNoCacheBlobView::create(path, chunk.size, true)); chunk.data = std::move(data); } else { chunk.data = td::BufferSliceBlobView::create(td::BufferSlice(chunk.size)); @@ -355,22 +407,41 @@ td::Status Torrent::add_validated_piece(td::uint64 piece_i, td::Slice data) { CHECK(!chunks_.empty()); auto piece = info_.get_piece_info(piece_i); + std::set excluded; TRY_STATUS(iterate_piece(piece, [&](auto it, auto info) { + if (it->excluded) { + excluded.insert(it - chunks_.begin()); + return td::Status::OK(); + } TRY_STATUS(init_chunk_data(*it)); - return it->add_piece(data.substr(info.piece_offset, info.size), info.chunk_offset); + TRY_STATUS(it->write_piece(data.substr(info.piece_offset, info.size), info.chunk_offset)); + return td::Status::OK(); + })); + TRY_STATUS(iterate_piece(piece, [&](auto it, auto info) { + if (!it->excluded) { + it->ready_size += info.size; + included_ready_size_ += info.size; + } + return td::Status::OK(); })); piece_is_ready_[piece_i] = true; not_ready_piece_count_--; + if (!excluded.empty()) { + in_memory_pieces_[piece_i] = {data.str(), std::move(excluded)}; + } return td::Status::OK(); } bool Torrent::is_completed() const { - return not_ready_piece_count_ == 0; + return inited_info_ && enabled_wirte_to_files_ && included_ready_size_ == included_size_; } td::Result Torrent::read_file(td::Slice name) { + if (!inited_info_) { + return td::Status::Error("Torrent info not inited"); + } for (auto &chunk : chunks_) { if (chunk.name == name) { td::BufferSlice res(chunk.size); @@ -383,10 +454,12 @@ td::Result Torrent::read_file(td::Slice name) { Torrent::GetMetaOptions::GetMetaOptions() = default; std::string Torrent::get_meta_str(const GetMetaOptions &options) const { + CHECK(inited_info_); return get_meta(options).serialize(); } TorrentMeta Torrent::get_meta(const GetMetaOptions &options) const { + CHECK(inited_info_); TorrentMeta torrent_file; if (options.with_header) { torrent_file.header = header_; @@ -399,17 +472,36 @@ TorrentMeta Torrent::get_meta(const GetMetaOptions &options) const { return torrent_file; } -Torrent::Torrent(Info info, td::optional header, ton::MerkleTree tree, std::vector chunks) - : info_(info) +Torrent::Torrent(td::Bits256 hash) : hash_(hash), inited_info_(false) { +} + +Torrent::Torrent(Info info, td::optional header, ton::MerkleTree tree, std::vector chunks, + std::string root_dir) + : hash_(info.get_hash()) + , inited_info_(true) + , info_(info) + , root_dir_(std::move(root_dir)) , header_(std::move(header)) + , enabled_wirte_to_files_(true) , merkle_tree_(std::move(tree)) , piece_is_ready_(info_.pieces_count(), true) , ready_parts_count_{info_.pieces_count()} - , chunks_(std::move(chunks)) { + , chunks_(std::move(chunks)) + , included_size_(info_.file_size) + , included_ready_size_(info_.file_size) { } -void Torrent::set_header(const TorrentHeader &header) { - header_ = header; +td::Status Torrent::set_header(TorrentHeader header) { + if (header_) { + return td::Status::OK(); + } + auto header_str = header.serialize(); + td::Bits256 header_hash; + td::sha256(header_str.as_slice(), header_hash.as_slice()); + if (header_hash != info_.header_hash) { + return td::Status::Error("Incorrect header hash"); + } + TRY_STATUS_PREFIX(header.validate(info_.file_size, info_.header_size), "Invalid torrent header: "); auto add_chunk = [&](td::Slice name, td::uint64 offset, td::uint64 size) { ChunkState chunk; chunk.name = name.str(); @@ -417,14 +509,17 @@ void Torrent::set_header(const TorrentHeader &header) { chunk.size = size; chunk.offset = offset; chunks_.push_back(std::move(chunk)); + included_size_ += size; }; - add_chunk("", 0, header.serialization_size()); - chunks_.back().data = td::BufferSliceBlobView::create(header.serialize()); + add_chunk("", 0, header_str.size()); + chunks_.back().data = td::BufferSliceBlobView::create(std::move(header_str)); for (size_t i = 0; i < header.files_count; i++) { auto l = header.get_data_begin(i); auto r = header.get_data_end(i); add_chunk(header.get_name(i), l, r - l); } + header_ = std::move(header); + return td::Status::OK(); } size_t Torrent::get_ready_parts_count() const { @@ -432,6 +527,7 @@ size_t Torrent::get_ready_parts_count() const { } std::vector Torrent::chunks_by_piece(td::uint64 piece_id) { + CHECK(inited_info_); std::vector res; auto piece = info_.get_piece_info(piece_id); auto is_ok = iterate_piece(piece, [&](auto it, auto info) { @@ -441,4 +537,165 @@ std::vector Torrent::chunks_by_piece(td::uint64 piece_id) { return res; } +td::Status Torrent::init_info(Info info) { + if (hash_ != info.get_hash()) { + return td::Status::Error("Hash mismatch"); + } + if (inited_info_) { + return td::Status::OK(); + } + auto S = info.validate(); + if (S.is_error()) { + S = S.move_as_error_prefix("Invalid torrent info: "); + fatal_error_ = S.clone(); + return S; + } + inited_info_ = true; + info_ = std::move(info); + merkle_tree_ = MerkleTree(info_.pieces_count(), info_.root_hash); + piece_is_ready_.resize(info_.pieces_count(), false); + not_ready_piece_count_ = piece_is_ready_.size(); + header_pieces_count_ = (info_.header_size + info_.piece_size - 1) / info_.piece_size; + not_ready_pending_piece_count_ = header_pieces_count_; + header_str_ = td::BufferSlice(info_.header_size); + return td::Status::OK(); +} + +void Torrent::set_file_excluded(size_t i, bool excluded) { + CHECK(header_); + CHECK(i + 1 < chunks_.size()); + if (!root_dir_) { + return; // All files are in-memory, nothing to do + } + size_t chunk_i = i + 1; + auto &chunk = chunks_[chunk_i]; + if (chunk.excluded == excluded) { + return; + } + if (excluded) { + included_size_ -= chunk.size; + included_ready_size_ -= chunk.ready_size; + } else { + included_size_ += chunk.size; + included_ready_size_ += chunk.ready_size; + } + chunk.excluded = excluded; + if (!enabled_wirte_to_files_ || excluded) { + return; + } + auto range = get_file_parts_range(i); + for (auto it = in_memory_pieces_.lower_bound(range.begin); it != in_memory_pieces_.end() && it->first < range.end;) { + if (!it->second.pending_chunks.count(chunk_i)) { + ++it; + continue; + } + auto piece_i = it->first; + auto piece = info_.get_piece_info(piece_i); + auto S = [&]() { + auto l = td::max(chunk.offset, piece.offset); + auto r = td::min(chunk.offset + chunk.size, piece.offset + piece.size); + TRY_STATUS(init_chunk_data(chunk)); + TRY_STATUS(chunk.write_piece(it->second.data.substr(l - piece.offset, r - l), l - chunk.offset)); + chunk.ready_size += r - l; + included_ready_size_ += r - l; + return td::Status::OK(); + }(); + if (S.is_error()) { + // Erase piece completely + piece_is_ready_[piece_i] = false; + not_ready_piece_count_++; + iterate_piece(piece, [&](auto it2, auto info) { + if (!it2->excluded) { + included_ready_size_ -= info.size; + } + if (!it2->excluded || !it->second.pending_chunks.count(it2 - chunks_.begin())) { + it2->ready_size -= info.size; + } + return td::Status::OK(); + }); + it = in_memory_pieces_.erase(it); + continue; + } + it->second.pending_chunks.erase(chunk_i); + if (it->second.pending_chunks.empty()) { + it = in_memory_pieces_.erase(it); + } else { + ++it; + } + } +} + +void Torrent::load_from_files(std::string files_path) { + CHECK(inited_header()); + std::vector> new_blobs; + new_blobs.push_back(td::BufferSliceBlobView::create(get_header().serialize())); + size_t files_count = get_files_count().unwrap(); + for (size_t i = 0; i < files_count; ++i) { + std::string new_path = PSTRING() << files_path << TD_DIR_SLASH << header_.value().dir_name << TD_DIR_SLASH + << get_file_name(i); + auto R = td::FileNoCacheBlobView::create(new_path, get_file_size(i), false); + if (R.is_error() && files_count == 1) { + R = td::FileNoCacheBlobView::create(files_path, get_file_size(i), false); + } + if (R.is_error()) { + new_blobs.emplace_back(); + } else { + new_blobs.push_back(R.move_as_ok()); + } + } + auto load_new_piece = [&](size_t piece_i) -> td::Result { + auto piece = info_.get_piece_info(piece_i); + bool included = false; + TRY_STATUS(iterate_piece(piece, [&](auto it, IterateInfo info) { + if (!it->excluded) { + included = true; + } + return td::Status::OK(); + })); + if (!included) { + return td::Status::Error("Piece is excluded"); + } + std::string data(piece.size, '\0'); + TRY_STATUS(iterate_piece(piece, [&](auto it, IterateInfo info) { + size_t chunk_i = it - chunks_.begin(); + if (!new_blobs[chunk_i]) { + return td::Status::Error("No such file"); + } + TRY_RESULT(s, new_blobs[chunk_i].value().view_copy(td::MutableSlice(data).substr(info.piece_offset, info.size), + info.chunk_offset)); + if (s != info.size) { + return td::Status::Error("Can't read file"); + } + return td::Status::OK(); + })); + return data; + }; + std::vector> new_pieces; + for (size_t i = 0; i < piece_is_ready_.size(); ++i) { + if (piece_is_ready_[i]) { + continue; + } + auto r_data = load_new_piece(i); + if (r_data.is_error()) { + continue; + } + td::Bits256 hash; + td::sha256(r_data.ok(), hash.as_slice()); + new_pieces.emplace_back(i, hash); + } + size_t added_cnt = 0; + for (size_t i : merkle_tree_.add_pieces(std::move(new_pieces))) { + auto r_data = load_new_piece(i); + if (r_data.is_error()) { + continue; + } + if (add_piece(i, r_data.ok(), {}).is_ok()) { + ++added_cnt; + } + } + if (added_cnt > 0) { + LOG(INFO) << "Loaded " << added_cnt << " new pieces for " << get_hash().to_hex(); + } +} + } // namespace ton diff --git a/storage/Torrent.h b/storage/Torrent.h index a335c4b4..c776c36e 100644 --- a/storage/Torrent.h +++ b/storage/Torrent.h @@ -25,6 +25,7 @@ #include "td/db/utils/BlobView.h" #include +#include namespace ton { class Torrent { @@ -40,6 +41,7 @@ class Torrent { }; // creation + static td::Result open(Options options, td::Bits256 hash); static td::Result open(Options options, TorrentMeta meta); static td::Result open(Options options, td::Slice meta_str); void validate(); @@ -54,7 +56,8 @@ class Torrent { // add piece (with an optional proof) td::Status add_piece(td::uint64 piece_i, td::Slice data, td::Ref proof); - //TODO: add multiple chunks? Merkle tree supports much more general interface + //TODO: add multiple pieces? Merkle tree supports much more general interface + td::Status add_proof(td::Ref proof); bool is_completed() const; @@ -91,19 +94,74 @@ class Torrent { td::CSlice get_file_name(size_t i) const; td::uint64 get_file_size(size_t i) const; td::uint64 get_file_ready_size(size_t i) const; + std::string get_file_path(size_t i) const; struct PartsRange { td::uint64 begin{0}; td::uint64 end{0}; + bool contains(td::uint64 i) const { + return begin <= i && i < end; + } }; PartsRange get_file_parts_range(size_t i); - PartsRange get_header_parts_range(); + PartsRange get_header_parts_range() const; size_t get_ready_parts_count() const; std::vector chunks_by_piece(td::uint64 piece_id); + bool inited_info() const { + return inited_info_; + } + bool inited_header() const { + return (bool)header_; + } + td::Bits256 get_hash() const { + return hash_; + } + std::string get_root_dir() const { + return root_dir_ ? root_dir_.value() : ""; + } + td::Status init_info(Info info); + td::Status set_header(TorrentHeader header); + + void enable_write_to_files(); + void set_file_excluded(size_t i, bool excluded); + bool file_is_excluded(size_t i) const { + return chunks_.at(i).excluded; + } + td::uint64 get_included_size() const { + return header_ ? included_size_ : info_.file_size; + } + td::uint64 get_included_ready_size() const { + return included_ready_size_; + } + + bool is_piece_in_memory(td::uint64 i) const { + return in_memory_pieces_.count(i); + } + std::set get_pieces_in_memory() const { + std::set pieces; + for (const auto &p : in_memory_pieces_) { + pieces.insert(p.first); + } + return pieces; + } + + const td::Status &get_fatal_error() const { + return fatal_error_; + } + + const TorrentHeader &get_header() const { + CHECK(inited_header()) + return header_.value(); + } + + void load_from_files(std::string files_path); + private: + td::Bits256 hash_; + bool inited_info_ = false; Info info_; td::optional root_dir_; @@ -113,6 +171,12 @@ class Torrent { size_t not_ready_pending_piece_count_{0}; size_t header_pieces_count_{0}; std::map pending_pieces_; + bool enabled_wirte_to_files_ = false; + struct InMemoryPiece { + std::string data; + std::set pending_chunks; + }; + std::map in_memory_pieces_; // Pieces that overlap excluded files ton::MerkleTree merkle_tree_; @@ -120,12 +184,15 @@ class Torrent { size_t not_ready_piece_count_{0}; size_t ready_parts_count_{0}; + td::Status fatal_error_ = td::Status::OK(); + struct ChunkState { std::string name; td::uint64 offset{0}; td::uint64 size{0}; td::uint64 ready_size{0}; td::BlobView data; + bool excluded{false}; struct Cache { td::uint64 offset{0}; @@ -137,10 +204,11 @@ class Torrent { return ready_size == size; } - TD_WARN_UNUSED_RESULT td::Status add_piece(td::Slice piece, td::uint64 offset) { + TD_WARN_UNUSED_RESULT td::Status write_piece(td::Slice piece, td::uint64 offset) { TRY_RESULT(written, data.write(piece, offset)); - CHECK(written == piece.size()); - ready_size += written; + if (written != piece.size()) { + return td::Status::Error("Written less than expected"); + } return td::Status::OK(); } bool has_piece(td::uint64 offset, td::uint64 size) { @@ -149,21 +217,24 @@ class Torrent { TD_WARN_UNUSED_RESULT td::Status get_piece(td::MutableSlice dest, td::uint64 offset, Cache *cache = nullptr); }; std::vector chunks_; + td::uint64 included_size_{0}; + td::uint64 included_ready_size_{0}; - explicit Torrent(Info info, td::optional header, ton::MerkleTree tree, std::vector chunk); - explicit Torrent(TorrentMeta meta); + explicit Torrent(td::Bits256 hash); + explicit Torrent(Info info, td::optional header, ton::MerkleTree tree, std::vector chunk, + std::string root_dir); void set_root_dir(std::string root_dir) { root_dir_ = std::move(root_dir); } - std::string get_chunk_path(td::Slice name); + std::string get_chunk_path(td::Slice name) const; td::Status init_chunk_data(ChunkState &chunk); template td::Status iterate_piece(Info::PieceInfo piece, F &&f); + void add_pending_pieces(); - td::Status add_header_piece(td::uint64 piece_i, td::Slice data); + td::Status add_pending_piece(td::uint64 piece_i, td::Slice data); td::Status add_validated_piece(td::uint64 piece_i, td::Slice data); - void set_header(const TorrentHeader &header); }; } // namespace ton diff --git a/storage/TorrentCreator.cpp b/storage/TorrentCreator.cpp index cdd2e4cf..d9d97df7 100644 --- a/storage/TorrentCreator.cpp +++ b/storage/TorrentCreator.cpp @@ -25,11 +25,20 @@ #include "td/utils/PathView.h" #include "td/utils/port/path.h" #include "td/utils/tl_helpers.h" +#include "MicrochunkTree.h" +#include "TorrentHeader.hpp" namespace ton { td::Result Torrent::Creator::create_from_path(Options options, td::CSlice raw_path) { TRY_RESULT(path, td::realpath(raw_path)); TRY_RESULT(stat, td::stat(path)); + std::string root_dir = path; + while (!root_dir.empty() && root_dir.back() == TD_DIR_SLASH) { + root_dir.pop_back(); + } + while (!root_dir.empty() && root_dir.back() != TD_DIR_SLASH) { + root_dir.pop_back(); + } if (stat.is_dir_) { if (!path.empty() && path.back() != TD_DIR_SLASH) { path += TD_DIR_SLASH; @@ -50,17 +59,21 @@ td::Result Torrent::Creator::create_from_path(Options options, td::CSli }); TRY_STATUS(std::move(status)); TRY_STATUS(std::move(walk_status)); + creator.root_dir_ = std::move(root_dir); + std::sort(creator.files_.begin(), creator.files_.end(), + [](const Torrent::Creator::File& a, const Torrent::Creator::File& b) { return a.name < b.name; }); return creator.finalize(); } else { Torrent::Creator creator(options); TRY_STATUS(creator.add_file(td::PathView(path).file_name(), path)); + creator.root_dir_ = std::move(root_dir); return creator.finalize(); } } td::Result Torrent::Creator::create_from_blobs(Options options, td::Span blobs) { Torrent::Creator creator(options); - for (auto &blob : blobs) { + for (auto& blob : blobs) { TRY_STATUS(creator.add_blob(blob.name, blob.data)); } return creator.finalize(); @@ -79,7 +92,7 @@ td::Status Torrent::Creator::add_blob(td::Slice name, td::BlobView blob) { } TD_WARN_UNUSED_RESULT td::Status Torrent::Creator::add_file(td::Slice name, td::CSlice path) { - LOG(INFO) << "Add file " << name << " " << path; + LOG(DEBUG) << "Add file " << name << " " << path; TRY_RESULT(data, td::FileNoCacheBlobView::create(path)); return add_blob(name, std::move(data)); } @@ -115,11 +128,9 @@ td::Result Torrent::Creator::finalize() { auto header_size = header.serialization_size(); auto file_size = header_size + data_offset; - auto chunks_count = (file_size + options_.piece_size - 1) / options_.piece_size; - ton::MerkleTree tree; - tree.init_begin(chunks_count); + auto pieces_count = (file_size + options_.piece_size - 1) / options_.piece_size; std::vector chunks; - size_t chunk_i = 0; + std::vector pieces; auto flush_reader = [&](bool force) { while (true) { auto slice = reader.prepare_read(); @@ -127,16 +138,14 @@ td::Result Torrent::Creator::finalize() { if (slice.empty() || (slice.size() != options_.piece_size && !force)) { break; } - td::UInt256 hash; + td::Bits256 hash; sha256(slice, hash.as_slice()); - CHECK(chunk_i < chunks_count); - tree.init_add_chunk(chunk_i, hash.as_slice()); - chunk_i++; + pieces.push_back(hash); reader.confirm_read(slice.size()); } }; td::uint64 offset = 0; - auto add_blob = [&](auto &&data, td::Slice name) { + auto add_blob = [&](auto data, td::Slice name) { td::uint64 data_offset = 0; while (data_offset < data.size()) { auto dest = writer.prepare_write(); @@ -168,24 +177,25 @@ td::Result Torrent::Creator::finalize() { td::sha256(header_str, info.header_hash.as_slice()); add_blob(td::BufferSliceBlobView::create(td::BufferSlice(header_str)), "").ensure(); - for (auto &file : files_) { + for (auto& file : files_) { add_blob(std::move(file.data), file.name).ensure(); } flush_reader(true); - tree.init_finish(); - CHECK(chunk_i == chunks_count); + CHECK(pieces.size() == pieces_count); CHECK(offset == file_size); + MerkleTree tree(std::move(pieces)); info.header_size = header.serialization_size(); info.piece_size = options_.piece_size; info.description = options_.description; info.file_size = file_size; - info.depth = tree.get_depth(); info.root_hash = tree.get_root_hash(); info.init_cell(); + TRY_STATUS_PREFIX(info.validate(), "Invalid torrent info: "); + TRY_STATUS_PREFIX(header.validate(info.file_size, info.header_size), "Invalid torrent header: "); - Torrent torrent(info, std::move(header), std::move(tree), std::move(chunks)); + Torrent torrent(info, std::move(header), std::move(tree), std::move(chunks), root_dir_); return std::move(torrent); } diff --git a/storage/TorrentCreator.h b/storage/TorrentCreator.h index 5d8d7a1c..b54157c1 100644 --- a/storage/TorrentCreator.h +++ b/storage/TorrentCreator.h @@ -55,13 +55,12 @@ class Torrent::Creator { td::Result finalize(); private: - td::Status init(); - Options options_; struct File { std::string name; td::BlobView data; }; std::vector files_; + std::string root_dir_; }; } // namespace ton diff --git a/storage/TorrentHeader.cpp b/storage/TorrentHeader.cpp index dc3ae265..2d7867e0 100644 --- a/storage/TorrentHeader.cpp +++ b/storage/TorrentHeader.cpp @@ -66,4 +66,76 @@ td::Slice TorrentHeader::get_name(td::uint64 file_i) const { return td::Slice(names).substr(from, till - from); } +static td::Status validate_name(td::Slice name, bool is_dir_name = false) { + if (name.empty()) { + return td::Status::Error("Name can't be empty"); + } + if (name[0] == '/') { + return td::Status::Error("Name can't start with '/'"); + } + if (name.back() == '/' && !is_dir_name) { + return td::Status::Error("Name can't end with '/'"); + } + for (size_t l = 0; l < name.size();) { + size_t r = l + 1; + while (r < name.size() && name[r] != '/') { + ++r; + } + td::Slice s = name.substr(l, r - l); + if (s == "") { + return td::Status::Error("Name can't contain consequitive '/'"); + } + if (s == ".") { + return td::Status::Error("Name can't contain component \".\""); + } + if (s == "..") { + return td::Status::Error("Name can't contain component \"..\""); + } + l = r + 1; + } + return td::Status::OK(); +} + +td::Status TorrentHeader::validate(td::uint64 total_size, td::uint64 header_size) const { + if (serialization_size() != header_size) { + return td::Status::Error("Invalid size"); + } + for (size_t i = 0; i + 1 < files_count; ++i) { + if (name_index[i] > name_index[i + 1]) { + return td::Status::Error("Invalid name offset"); + } + } + if (name_index.back() != names.size()) { + return td::Status::Error("Invalid name offset"); + } + for (size_t i = 0; i < files_count; ++i) { + if (get_data_offset(i) > get_data_offset(i + 1)) { + return td::Status::Error("Invalid data offset"); + } + } + if (get_data_offset(files_count) != total_size) { + return td::Status::Error("Invalid data offset"); + } + + std::set names; + for (size_t i = 0; i < files_count; ++i) { + auto name = get_name(i); + TRY_STATUS_PREFIX(validate_name(name), PSTRING() << "Invalid filename " << name << ": "); + if (!names.insert(name.str()).second) { + return td::Status::Error(PSTRING() << "Duplicate filename " << name); + } + } + if (!dir_name.empty()) { + TRY_STATUS_PREFIX(validate_name(dir_name, true), "Invalid dir_name: "); + } + for (const std::string& name : names) { + std::string name1 = name + '/'; + auto it = names.lower_bound(name1); + if (it != names.end() && it->substr(0, name1.size()) == name1) { + return td::Status::Error(PSTRING() << "Filename " << name << " coincides with directory name"); + } + } + return td::Status::OK(); +} + } // namespace ton diff --git a/storage/TorrentHeader.h b/storage/TorrentHeader.h index 6a13855d..65ea859f 100644 --- a/storage/TorrentHeader.h +++ b/storage/TorrentHeader.h @@ -21,6 +21,7 @@ #include "td/utils/Slice.h" #include "td/utils/buffer.h" +#include "td/utils/Status.h" namespace ton { // fec_info_none#c82a1964 = FecInfo; @@ -37,6 +38,13 @@ namespace ton { // names:(file_names_size * [uint8]) // data:(tot_data_size * [uint8]) // = TorrentHeader; +// +// Filename rules: +// 1) Name can't be empty +// 2) Names in a torrent should be unique +// 3) Name can't start or end with '/' or contain two consequitive '/' +// 4) Components of name can't be equal to "." or ".." +// 5) If there's a name aaa/bbb/ccc, no other name can start with aaa/bbb/ccc/ struct TorrentHeader { td::uint32 files_count{0}; @@ -64,5 +72,7 @@ struct TorrentHeader { void store(StorerT &storer) const; template void parse(ParserT &parser); + + td::Status validate(td::uint64 total_size, td::uint64 header_size) const; }; } // namespace ton diff --git a/storage/TorrentHeader.hpp b/storage/TorrentHeader.hpp index 2b84ccb0..d8f8f719 100644 --- a/storage/TorrentHeader.hpp +++ b/storage/TorrentHeader.hpp @@ -53,7 +53,7 @@ void TorrentHeader::parse(ParserT &parser) { td::uint32 got_type; parse(got_type, parser); if (got_type != type) { - parser.set_error("Unknown fec type"); + parser.set_error("Unknown type"); return; } parse(files_count, parser); diff --git a/storage/TorrentInfo.cpp b/storage/TorrentInfo.cpp index 46c1cf69..48b0d592 100644 --- a/storage/TorrentInfo.cpp +++ b/storage/TorrentInfo.cpp @@ -26,19 +26,18 @@ namespace ton { bool TorrentInfo::pack(vm::CellBuilder &cb) const { - return cb.store_long_bool(depth, 32) && cb.store_long_bool(piece_size, 32) && cb.store_long_bool(file_size, 64) && - cb.store_bits_bool(root_hash) && cb.store_long_bool(header_size, 64) && cb.store_bits_bool(header_hash) && + return cb.store_long_bool(piece_size, 32) && cb.store_long_bool(file_size, 64) && cb.store_bits_bool(root_hash) && + cb.store_long_bool(header_size, 64) && cb.store_bits_bool(header_hash) && vm::CellText::store(cb, description).is_ok(); } bool TorrentInfo::unpack(vm::CellSlice &cs) { - return cs.fetch_uint_to(32, depth) && cs.fetch_uint_to(32, piece_size) && cs.fetch_uint_to(64, file_size) && - cs.fetch_bits_to(root_hash) && cs.fetch_uint_to(64, header_size) && cs.fetch_bits_to(header_hash) && - vm::CellText::fetch_to(cs, description); + return cs.fetch_uint_to(32, piece_size) && cs.fetch_uint_to(64, file_size) && cs.fetch_bits_to(root_hash) && + cs.fetch_uint_to(64, header_size) && cs.fetch_bits_to(header_hash) && vm::CellText::fetch_to(cs, description); } -vm::Cell::Hash TorrentInfo::get_hash() const { - return as_cell()->get_hash(); +td::Bits256 TorrentInfo::get_hash() const { + return as_cell()->get_hash().bits(); } void TorrentInfo::init_cell() { @@ -48,7 +47,7 @@ void TorrentInfo::init_cell() { } td::Ref TorrentInfo::as_cell() const { - CHECK(cell_.not_null()); + CHECK(cell_.not_null()) return cell_; } @@ -63,4 +62,17 @@ TorrentInfo::PieceInfo TorrentInfo::get_piece_info(td::uint64 piece_i) const { info.size = td::min(static_cast(piece_size), file_size - info.offset); return info; } + +td::Status TorrentInfo::validate() const { + if (piece_size == 0) { + return td::Status::Error("Piece size is 0"); + } + if (header_size > file_size) { + return td::Status::Error("Header is too big"); + } + if (description.size() > 1024) { + return td::Status::Error("Description is too long"); + } + return td::Status::OK(); +} } // namespace ton diff --git a/storage/TorrentInfo.h b/storage/TorrentInfo.h index fd4778fa..2aee4858 100644 --- a/storage/TorrentInfo.h +++ b/storage/TorrentInfo.h @@ -23,11 +23,12 @@ #include "td/utils/UInt.h" #include "vm/cells.h" +#include "td/utils/optional.h" namespace ton { -// torrent_info depth:# piece_size:uint32 file_size:uint64 root_hash:(## 256) header_size:uint64 header_hash:(## 256) description:Text = TorrentInfo; +// torrent_info piece_size:uint32 file_size:uint64 root_hash:(## 256) header_size:uint64 header_hash:(## 256) +// description:Text = TorrentInfo; struct TorrentInfo { - td::uint32 depth{0}; td::uint32 piece_size{768 * 128}; td::uint64 file_size{0}; td::Bits256 root_hash; @@ -39,7 +40,7 @@ struct TorrentInfo { bool unpack(vm::CellSlice &cs); void init_cell(); - vm::Cell::Hash get_hash() const; + td::Bits256 get_hash() const; td::Ref as_cell() const; struct PieceInfo { @@ -50,6 +51,8 @@ struct TorrentInfo { td::uint64 pieces_count() const; PieceInfo get_piece_info(td::uint64 piece_i) const; + td::Status validate() const; + private: td::Ref cell_; }; diff --git a/storage/TorrentMeta.cpp b/storage/TorrentMeta.cpp index 0e677715..1a8fdd89 100644 --- a/storage/TorrentMeta.cpp +++ b/storage/TorrentMeta.cpp @@ -40,8 +40,6 @@ td::Result TorrentMeta::deserialize(td::Slice data) { if (header_hash != res.info.header_hash) { return td::Status::Error("Header hash mismatch"); } - } else { - LOG(ERROR) << "NO HEADER"; } if (res.root_proof.not_null()) { auto root = vm::MerkleProof::virtualize(res.root_proof, 1); diff --git a/storage/db.h b/storage/db.h new file mode 100644 index 00000000..269be0d9 --- /dev/null +++ b/storage/db.h @@ -0,0 +1,57 @@ +/* + This file is part of TON Blockchain Library. + + TON Blockchain Library is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + TON Blockchain Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with TON Blockchain Library. If not, see . +*/ + +#pragma once +#include "td/db/KeyValueAsync.h" +#include "tl-utils/common-utils.hpp" + +namespace db { + +using DbType = td::KeyValueAsync; + +template +inline void db_get(DbType& db, td::Bits256 key, bool allow_not_found, + td::Promise> promise) { + db.get(key, [allow_not_found, promise = std::move(promise)](td::Result R) mutable { + TRY_RESULT_PROMISE(promise, r, std::move(R)); + if (r.status == td::KeyValueReader::GetStatus::NotFound) { + if (allow_not_found) { + promise.set_value(nullptr); + } else { + promise.set_error(td::Status::Error("Key not found")); + } + return; + } + promise.set_result(ton::fetch_tl_object(r.value, true)); + }); +} + +template +inline td::Result> db_get(td::KeyValue& db, td::Bits256 key, bool allow_not_found) { + std::string value; + TRY_RESULT(r, db.get(key.as_slice(), value)); + if (r == td::KeyValue::GetStatus::NotFound) { + if (allow_not_found) { + return nullptr; + } else { + return td::Status::Error("Key not found"); + } + } + return ton::fetch_tl_object(td::Slice(value), true); +} + +} // namespace db \ No newline at end of file diff --git a/storage/storage-cli.cpp b/storage/storage-cli.cpp index 06cb0055..43ddbeaf 100644 --- a/storage/storage-cli.cpp +++ b/storage/storage-cli.cpp @@ -23,8 +23,6 @@ #include "dht/dht.h" #include "keys/encryptor.h" #include "overlay/overlay.h" -#include "rldp/rldp.h" -#include "rldp2/rldp.h" #include "td/utils/JsonBuilder.h" #include "td/utils/port/signals.h" @@ -37,13 +35,12 @@ #include "td/utils/filesystem.h" #include "td/utils/port/path.h" -#include "td/actor/actor.h" #include "td/actor/MultiPromise.h" #include "terminal/terminal.h" #include "Torrent.h" #include "TorrentCreator.h" -#include "NodeActor.h" +#include "PeerManager.h" #include "auto/tl/ton_api_json.h" @@ -53,8 +50,6 @@ #include #include "git.h" -namespace ton_rldp = ton::rldp2; - struct StorageCliOptions { std::string config; bool enable_readline{true}; @@ -67,199 +62,6 @@ struct StorageCliOptions { using AdnlCategory = td::int32; -class PeerManager : public td::actor::Actor { - public: - PeerManager(ton::adnl::AdnlNodeIdShort adnl_id, ton::overlay::OverlayIdFull overlay_id, - td::actor::ActorId overlays, td::actor::ActorId adnl, - td::actor::ActorId rldp) - : adnl_id_(std::move(adnl_id)) - , overlay_id_(std::move(overlay_id)) - , overlays_(std::move(overlays)) - , adnl_(std::move(adnl)) - , rldp_(std::move(rldp)) { - CHECK(register_adnl_id(adnl_id_) == 1); - } - void start_up() override { - // TODO: forbid broadcasts? - auto rules = ton::overlay::OverlayPrivacyRules{ton::overlay::Overlays::max_fec_broadcast_size()}; - class Callback : public ton::overlay::Overlays::Callback { - public: - void receive_message(ton::adnl::AdnlNodeIdShort src, ton::overlay::OverlayIdShort overlay_id, - td::BufferSlice data) override { - } - void receive_query(ton::adnl::AdnlNodeIdShort src, ton::overlay::OverlayIdShort overlay_id, td::BufferSlice data, - td::Promise promise) override { - } - void receive_broadcast(ton::PublicKeyHash src, ton::overlay::OverlayIdShort overlay_id, - td::BufferSlice data) override { - } - }; - send_closure(overlays_, &ton::overlay::Overlays::create_public_overlay, adnl_id_, overlay_id_.clone(), - std::make_unique(), rules, "{ \"type\": \"storage\" }"); - } - void tear_down() override { - send_closure(overlays_, &ton::overlay::Overlays::delete_overlay, adnl_id_, overlay_id_.compute_short_id()); - } - void send_query(ton::PeerId src, ton::PeerId dst, td::BufferSlice query, td::Promise promise) { - TRY_RESULT_PROMISE(promise, src_id, peer_to_andl(src)); - TRY_RESULT_PROMISE(promise, dst_id, peer_to_andl(dst)); - query = ton::create_serialize_tl_object_suffix( - std::move(query), overlay_id_.compute_short_id().bits256_value()); - send_closure(rldp_, &ton_rldp::Rldp::send_query_ex, src_id, dst_id, "", std::move(promise), td::Timestamp::in(10), - std::move(query), 1 << 25); - } - - void execute_query(ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst, td::BufferSlice data, - td::Promise promise) { - data = data.from_slice(data.as_slice().substr(4 + 32)); - auto src_id = register_adnl_id(src); - auto dst_id = register_adnl_id(dst); - auto it = peers_.find(std::make_pair(dst_id, src_id)); - if (it == peers_.end()) { - auto node_it = nodes_.find(dst_id); - if (node_it == nodes_.end()) { - LOG(ERROR) << "Unknown query destination"; - promise.set_error(td::Status::Error("Unknown query destination")); - return; - } - if (!node_it->second.is_alive()) { - LOG(ERROR) << "Expired query destination"; - promise.set_error(td::Status::Error("Unknown query destination")); - return; - } - send_closure(node_it->second, &ton::NodeActor::start_peer, src_id, - [promise = std::move(promise), - data = std::move(data)](td::Result> r_peer) mutable { - TRY_RESULT_PROMISE(promise, peer, std::move(r_peer)); - send_closure(peer, &ton::PeerActor::execute_query, std::move(data), std::move(promise)); - }); - return; - } - send_closure(it->second, &ton::PeerActor::execute_query, std::move(data), std::move(promise)); - } - - void register_peer(ton::PeerId src, ton::PeerId dst, td::actor::ActorId peer) { - peers_[std::make_pair(src, dst)] = std::move(peer); - register_src(src, [](td::Result res) { res.ensure(); }); - } - - void register_node(ton::PeerId src, td::actor::ActorId node) { - nodes_[src] = std::move(node); - register_src(src, [](td::Result res) { res.ensure(); }); - } - - void unregister_node(ton::PeerId src, td::actor::ActorId node) { - auto it = nodes_.find(src); - CHECK(it != nodes_.end()); - if (it->second == node) { - nodes_.erase(it); - } - unregister_src(src, [](td::Result res) { res.ensure(); }); - } - - void unregister_peer(ton::PeerId src, ton::PeerId dst, td::actor::ActorId peer) { - auto it = peers_.find(std::make_pair(src, dst)); - CHECK(it != peers_.end()); - if (it->second == peer) { - peers_.erase(it); - } - unregister_src(src, [](td::Result res) { res.ensure(); }); - } - - void unregister_src(ton::PeerId src, td::Promise promise) { - TRY_RESULT_PROMISE(promise, src_id, peer_to_andl(src)); - if (--subscribed_peers_[src] == 0) { - LOG(ERROR) << "Unsubscribe " << src_id; - subscribed_peers_.erase(src); - send_closure(adnl_, &ton::adnl::Adnl::unsubscribe, src_id, - ton::create_serialize_tl_object( - overlay_id_.compute_short_id().bits256_value()) - .as_slice() - .str()); - } - promise.set_value({}); - } - void register_src(ton::PeerId src, td::Promise promise) { - TRY_RESULT_PROMISE(promise, src_id, peer_to_andl(src)); - class Callback : public ton::adnl::Adnl::Callback { - public: - Callback(td::actor::ActorId peer_manager) : peer_manager_(std::move(peer_manager)) { - } - void receive_message(ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst, - td::BufferSlice data) override { - } - void receive_query(ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst, td::BufferSlice data, - td::Promise promise) override { - send_closure(peer_manager_, &PeerManager::execute_query, std::move(src), std::move(dst), std::move(data), - std::move(promise)); - } - - private: - td::actor::ActorId peer_manager_; - }; - - if (subscribed_peers_[src]++ == 0) { - LOG(ERROR) << "Subscribe " << src_id; - send_closure(adnl_, &ton::adnl::Adnl::subscribe, src_id, - ton::create_serialize_tl_object( - overlay_id_.compute_short_id().bits256_value()) - .as_slice() - .str(), - std::make_unique(actor_id(this))); - } - promise.set_value({}); - } - - td::Result peer_to_andl(ton::PeerId id) { - if (id <= 0 || id > adnl_ids_.size()) { - return td::Status::Error(PSLICE() << "Invalid peer id " << id); - } - return adnl_ids_[id - 1]; - } - - ton::PeerId register_adnl_id(ton::adnl::AdnlNodeIdShort id) { - auto it = adnl_to_peer_id_.emplace(id, next_peer_id_); - if (it.second) { - LOG(ERROR) << "Register AndlId " << id << " -> " << it.first->second; - adnl_ids_.push_back(id); - next_peer_id_++; - } - return it.first->second; - } - - void get_peers(td::Promise> promise) { - send_closure(overlays_, &ton::overlay::Overlays::get_overlay_random_peers, adnl_id_, overlay_id_.compute_short_id(), - 30, promise.send_closure(actor_id(this), &PeerManager::got_overlay_random_peers)); - } - - private: - ton::adnl::AdnlNodeIdShort adnl_id_; - ton::overlay::OverlayIdFull overlay_id_; - td::actor::ActorId overlays_; - td::actor::ActorId adnl_; - td::actor::ActorId rldp_; - - std::map, td::actor::ActorId> peers_; - std::map> nodes_; - ton::PeerId next_peer_id_{1}; - std::map adnl_to_peer_id_; - std::vector adnl_ids_; - - std::map subscribed_peers_; - - void got_overlay_random_peers(td::Result> r_peers, - td::Promise> promise) { - TRY_RESULT_PROMISE(promise, peers, std::move(r_peers)); - - std::vector res; - for (auto peer : peers) { - res.push_back(register_adnl_id(peer)); - } - - promise.set_value(std::move(res)); - } -}; - class StorageCli : public td::actor::Actor { public: explicit StorageCli(StorageCliOptions options) : options_(std::move(options)) { @@ -338,7 +140,8 @@ class StorageCli : public td::actor::Actor { td::mkdir(options_.db_root).ignore(); keyring_ = ton::keyring::Keyring::create(options_.db_root + "/keyring"); - adnl_network_manager_ = ton::adnl::AdnlNetworkManager::create(td::narrow_cast(options_.addr.get_port())); + adnl_network_manager_ = + ton::adnl::AdnlNetworkManager::create(td::narrow_cast(options_.addr.get_port())); adnl_ = ton::adnl::Adnl::create(options_.db_root, keyring_.get()); td::actor::send_closure(adnl_, &ton::adnl::Adnl::register_network_manager, adnl_network_manager_.get()); rldp_ = ton_rldp::Rldp::create(adnl_.get()); @@ -430,6 +233,7 @@ class StorageCli : public td::actor::Actor { td::TerminalIO::out() << "create \tCreate torrent from a directory\n"; td::TerminalIO::out() << "info \tPrint info about loaded torrent\n"; td::TerminalIO::out() << "load \tLoad torrent file in memory\n"; + td::TerminalIO::out() << "addhash \tAdd torrent by hash (in hex)\n"; td::TerminalIO::out() << "save \tSave torrent file\n"; td::TerminalIO::out() << "start \tStart torrent downloading/uploading\n"; td::TerminalIO::out() << "seed \tStart torrent uploading\n"; @@ -451,6 +255,8 @@ class StorageCli : public td::actor::Actor { torrent_info(parser.read_all(), std::move(cmd_promise)); } else if (cmd == "load") { cmd_promise.set_result(torrent_load(parser.read_all()).move_map([](auto &&x) { return td::Unit(); })); + } else if (cmd == "addhash") { + cmd_promise.set_result(torrent_add_by_hash(parser.read_all()).move_map([](auto &&x) { return td::Unit(); })); } else if (cmd == "save") { auto id = parser.read_word(); parser.skip_whitespaces(); @@ -544,7 +350,7 @@ class StorageCli : public td::actor::Actor { ton::Torrent::Creator::Options options; options.piece_size = 128 * 1024; TRY_RESULT_PROMISE(promise, torrent, ton::Torrent::Creator::create_from_path(options, path)); - auto hash = torrent.get_info().header_hash; + auto hash = torrent.get_hash(); for (auto &it : infos_) { if (it.second.hash == hash) { promise.set_error(td::Status::Error(PSLICE() << "Torrent already loaded (#" << it.first << ")")); @@ -552,6 +358,7 @@ class StorageCli : public td::actor::Actor { } } td::TerminalIO::out() << "Torrent #" << torrent_id_ << " created\n"; + td::TerminalIO::out() << "Torrent hash: " << torrent.get_hash().to_hex() << "\n"; infos_.emplace(torrent_id_, Info{torrent_id_, hash, std::move(torrent), td::actor::ActorOwn(), td::actor::ActorOwn()}); torrent_id_++; @@ -600,12 +407,12 @@ class StorageCli : public td::actor::Actor { } } - td::actor::ActorOwn create_peer_manager(vm::Cell::Hash hash) { + td::actor::ActorOwn create_peer_manager(td::Bits256 hash) { // create overlay network td::BufferSlice hash_str(hash.as_slice()); ton::overlay::OverlayIdFull overlay_id(std::move(hash_str)); auto adnl_id = ton::adnl::AdnlNodeIdShort{public_key_.compute_short_id()}; - return td::actor::create_actor("PeerManager", adnl_id, std::move(overlay_id), overlays_.get(), + return td::actor::create_actor("PeerManager", adnl_id, std::move(overlay_id), false, overlays_.get(), adnl_.get(), rldp_.get()); } @@ -616,68 +423,17 @@ class StorageCli : public td::actor::Actor { return; } if (ptr->peer_manager.empty()) { - ptr->peer_manager = create_peer_manager(ptr->torrent.value().get_info().get_hash()); + ptr->peer_manager = create_peer_manager(ptr->torrent.value().get_hash()); } ton::PeerId self_id = 1; - class Context : public ton::NodeActor::Callback { + class Callback : public ton::NodeActor::Callback { public: - Context(td::actor::ActorId peer_manager, td::actor::ActorId storage_cli, - ton::PeerId self_id, td::uint32 torrent_id, td::Promise on_completed) - : peer_manager_(peer_manager) - , storage_cli_(std::move(storage_cli)) - , self_id_(self_id) + Callback(td::actor::ActorId storage_cli, td::uint32 torrent_id, td::Promise on_completed) + : storage_cli_(std::move(storage_cli)) , torrent_id_(std::move(torrent_id)) , on_completed_(std::move(on_completed)) { } - void get_peers(td::Promise> promise) override { - send_closure(peer_manager_, &PeerManager::get_peers, std::move(promise)); - } - void register_self(td::actor::ActorId self) override { - CHECK(self_.empty()); - self_ = self; - send_closure(peer_manager_, &PeerManager::register_node, self_id_, self_); - } - ~Context() { - if (!self_.empty()) { - send_closure(peer_manager_, &PeerManager::unregister_node, self_id_, self_); - } - } - td::actor::ActorOwn create_peer(ton::PeerId self_id, ton::PeerId peer_id, - td::SharedState state) override { - CHECK(self_id == self_id_); - class PeerCallback : public ton::PeerActor::Callback { - public: - PeerCallback(ton::PeerId self_id, ton::PeerId peer_id, td::actor::ActorId peer_manager) - : self_id_(self_id), peer_id_(peer_id), peer_manager_(std::move(peer_manager)) { - } - void register_self(td::actor::ActorId self) override { - CHECK(self_.empty()); - self_ = std::move(self); - send_closure(peer_manager_, &PeerManager::register_peer, self_id_, peer_id_, self_); - } - void send_query(td::uint64 query_id, td::BufferSlice query) override { - send_closure(peer_manager_, &PeerManager::send_query, self_id_, peer_id_, std::move(query), - promise_send_closure(self_, &ton::PeerActor::on_query_result, query_id)); - } - - ~PeerCallback() { - if (!self_.empty()) { - send_closure(peer_manager_, &PeerManager::unregister_peer, self_id_, peer_id_, self_); - } - } - - private: - td::actor::ActorId self_; - ton::PeerId self_id_; - ton::PeerId peer_id_; - td::actor::ActorId peer_manager_; - }; - - return td::actor::create_actor(PSLICE() << "ton::PeerActor " << self_id << "->" << peer_id, - td::make_unique(self_id, peer_id, peer_manager_), - std::move(state)); - } void on_completed() override { if (on_completed_) { @@ -691,23 +447,20 @@ class StorageCli : public td::actor::Actor { } private: - td::actor::ActorId peer_manager_; td::actor::ActorId storage_cli_; - ton::PeerId self_id_; td::uint32 torrent_id_; - std::vector peers_; td::Promise on_completed_; - td::actor::ActorId self_; }; td::Promise on_completed; if (wait_download) { on_completed = std::move(promise); } - auto context = - td::make_unique(ptr->peer_manager.get(), actor_id(this), self_id, ptr->id, std::move(on_completed)); - ptr->node = td::actor::create_actor(PSLICE() << "Node#" << self_id, self_id, ptr->torrent.unwrap(), - std::move(context), should_download); + auto callback = td::make_unique(actor_id(this), ptr->id, std::move(on_completed)); + auto context = PeerManager::create_callback(ptr->peer_manager.get()); + ptr->node = + td::actor::create_actor(PSLICE() << "Node#" << self_id, self_id, ptr->torrent.unwrap(), + std::move(callback), std::move(context), nullptr, should_download); td::TerminalIO::out() << "Torrent #" << ptr->id << " started\n"; promise.release().release(); if (promise) { @@ -748,8 +501,11 @@ class StorageCli : public td::actor::Actor { return; } auto file_id_str = parser.read_word(); - size_t file_id = std::numeric_limits::max(); - if (file_id_str != "*") { + size_t file_id = 0; + bool all = false; + if (file_id_str == "*") { + all = true; + } else { TRY_RESULT_PROMISE_ASSIGN(promise, file_id, td::to_integer_safe(file_id_str)); } TRY_RESULT_PROMISE(promise, priority, td::to_integer_safe(parser.read_word())); @@ -757,7 +513,13 @@ class StorageCli : public td::actor::Actor { promise.set_error(td::Status::Error("Priority = 255 is reserved")); return; } - send_closure(ptr->node, &ton::NodeActor::set_file_priority, file_id, priority); + if (all) { + send_closure(ptr->node, &ton::NodeActor::set_all_files_priority, priority, + promise.wrap([](bool) { return td::Unit(); })); + } else { + send_closure(ptr->node, &ton::NodeActor::set_file_priority_by_idx, file_id, priority, + promise.wrap([](bool) { return td::Unit(); })); + } promise.set_value(td::Unit()); } @@ -779,13 +541,40 @@ class StorageCli : public td::actor::Actor { TRY_RESULT(torrent, ton::Torrent::open(options, data)); - auto hash = torrent.get_info().header_hash; + auto hash = torrent.get_hash(); for (auto &it : infos_) { if (it.second.hash == hash) { return td::Status::Error(PSLICE() << "Torrent already loaded (#" << it.first << ")"); } } td::TerminalIO::out() << "Torrent #" << torrent_id_ << " created\n"; + td::TerminalIO::out() << "Torrent hash: " << torrent.get_hash().to_hex() << "\n"; + auto res = + infos_.emplace(torrent_id_, Info{torrent_id_, hash, std::move(torrent), td::actor::ActorOwn(), + td::actor::ActorOwn()}); + torrent_id_++; + return &res.first->second; + } + + td::Result torrent_add_by_hash(td::Slice hash_hex) { + td::Bits256 hash; + if (hash.from_hex(hash_hex) != 256) { + return td::Status::Error("Failed to parse torrent hash"); + } + ton::Torrent::Options options; + options.in_memory = false; + options.root_dir = "."; + options.validate = false; + + TRY_RESULT(torrent, ton::Torrent::open(options, hash)); + + for (auto &it : infos_) { + if (it.second.hash == hash) { + return td::Status::Error(PSLICE() << "Torrent already loaded (#" << it.first << ")"); + } + } + td::TerminalIO::out() << "Torrent #" << torrent_id_ << " created\n"; + td::TerminalIO::out() << "Torrent hash: " << torrent.get_hash().to_hex() << "\n"; auto res = infos_.emplace(torrent_id_, Info{torrent_id_, hash, std::move(torrent), td::actor::ActorOwn(), td::actor::ActorOwn()}); @@ -839,7 +628,8 @@ int main(int argc, char *argv[]) { return (verbosity >= 0 && verbosity <= 20) ? td::Status::OK() : td::Status::Error("verbosity must be 0..20"); }); p.add_option('V', "version", "shows storage-cli build information", [&]() { - std::cout << "storage-cli build information: [ Commit: " << GitMetadata::CommitSHA1() << ", Date: " << GitMetadata::CommitDate() << "]\n"; + std::cout << "storage-cli build information: [ Commit: " << GitMetadata::CommitSHA1() + << ", Date: " << GitMetadata::CommitDate() << "]\n"; std::exit(0); }); p.add_option('C', "config", "set ton config", [&](td::Slice arg) { options.config = arg.str(); }); @@ -859,7 +649,7 @@ int main(int argc, char *argv[]) { std::_Exit(2); } - td::actor::Scheduler scheduler({0}); + td::actor::Scheduler scheduler({3}); scheduler.run_in_context([&] { td::actor::create_actor("console", options).release(); }); scheduler.run(); return 0; diff --git a/storage/storage-daemon/CMakeLists.txt b/storage/storage-daemon/CMakeLists.txt new file mode 100644 index 00000000..4880eece --- /dev/null +++ b/storage/storage-daemon/CMakeLists.txt @@ -0,0 +1,32 @@ +cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) + +add_executable(embed-provider-code smartcont/embed-provider-code.cpp) + +add_custom_command( + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + COMMAND embed-provider-code smartcont/storage-provider-code.boc smartcont/provider-code.h + COMMENT "Generate provider-code.h" + DEPENDS embed-provider-code smartcont/storage-provider-code.boc + OUTPUT smartcont/provider-code.h +) + +set(STORAGE_DAEMON_SOURCE + storage-daemon.cpp + StorageManager.h + StorageManager.cpp + StorageProvider.h + StorageProvider.cpp + smc-util.h + smc-util.cpp + smartcont/provider-code.h) + +set(STORAGE_DAEMON_CLI_SOURCE + storage-daemon-cli.cpp + ) + +add_executable(storage-daemon ${STORAGE_DAEMON_SOURCE}) +target_link_libraries(storage-daemon storage overlay tdutils tdactor adnl tl_api dht + rldp rldp2 fift-lib memprof git tonlib) + +add_executable(storage-daemon-cli ${STORAGE_DAEMON_CLI_SOURCE}) +target_link_libraries(storage-daemon-cli tdutils tdactor adnllite tl_api tl_lite_api tl-lite-utils ton_block terminal git) \ No newline at end of file diff --git a/storage/storage-daemon/StorageManager.cpp b/storage/storage-daemon/StorageManager.cpp new file mode 100644 index 00000000..a62560cf --- /dev/null +++ b/storage/storage-daemon/StorageManager.cpp @@ -0,0 +1,268 @@ +/* + This file is part of TON Blockchain Library. + + TON Blockchain Library is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + TON Blockchain Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with TON Blockchain Library. If not, see . +*/ + +#include "StorageManager.h" +#include "td/utils/filesystem.h" +#include "td/utils/port/path.h" +#include "td/db/RocksDb.h" +#include "td/actor/MultiPromise.h" + +static overlay::OverlayIdFull get_overlay_id(td::Bits256 hash) { + td::BufferSlice hash_str(hash.as_slice()); + return overlay::OverlayIdFull(std::move(hash_str)); +} + +StorageManager::StorageManager(adnl::AdnlNodeIdShort local_id, std::string db_root, td::unique_ptr callback, + bool client_mode, td::actor::ActorId adnl, + td::actor::ActorId rldp, td::actor::ActorId overlays) + : local_id_(local_id) + , db_root_(std::move(db_root)) + , callback_(std::move(callback)) + , client_mode_(client_mode) + , adnl_(std::move(adnl)) + , rldp_(std::move(rldp)) + , overlays_(std::move(overlays)) { +} + +void StorageManager::start_up() { + CHECK(db_root_ != ""); + td::mkdir(db_root_).ensure(); + db_root_ = td::realpath(db_root_).move_as_ok(); + td::mkdir(db_root_ + "/torrent-db").ensure(); + td::mkdir(db_root_ + "/torrent-files").ensure(); + LOG(INFO) << "Starting Storage manager. DB = " << db_root_; + + db_ = std::make_shared( + std::make_shared(td::RocksDb::open(db_root_ + "/torrent-db").move_as_ok())); + db::db_get( + *db_, create_hash_tl_object(), true, + [SelfId = actor_id(this)](td::Result> R) { + std::vector torrents; + if (R.is_error()) { + LOG(ERROR) << "Failed to load torrent list from db: " << R.move_as_error(); + } else { + auto r = R.move_as_ok(); + if (r != nullptr) { + torrents = std::move(r->torrents_); + } + } + td::actor::send_closure(SelfId, &StorageManager::load_torrents_from_db, std::move(torrents)); + }); +} + +void StorageManager::load_torrents_from_db(std::vector torrents) { + td::MultiPromise mp; + auto ig = mp.init_guard(); + ig.add_promise([SelfId = actor_id(this)](td::Result R) { + td::actor::send_closure(SelfId, &StorageManager::after_load_torrents_from_db); + }); + for (auto hash : torrents) { + CHECK(!torrents_.count(hash)) + auto& entry = torrents_[hash]; + entry.peer_manager = td::actor::create_actor("PeerManager", local_id_, get_overlay_id(hash), + client_mode_, overlays_, adnl_, rldp_); + NodeActor::load_from_db( + db_, hash, create_callback(hash, entry.closing_state), PeerManager::create_callback(entry.peer_manager.get()), + [SelfId = actor_id(this), hash, + promise = ig.get_promise()](td::Result> R) mutable { + td::actor::send_closure(SelfId, &StorageManager::loaded_torrent_from_db, hash, std::move(R)); + promise.set_result(td::Unit()); + }); + } +} + +void StorageManager::loaded_torrent_from_db(td::Bits256 hash, td::Result> R) { + if (R.is_error()) { + LOG(ERROR) << "Failed to load torrent " << hash.to_hex() << " from db: " << R.move_as_error(); + torrents_.erase(hash); + } else { + auto it = torrents_.find(hash); + CHECK(it != torrents_.end()); + it->second.actor = R.move_as_ok(); + LOG(INFO) << "Loaded torrent " << hash.to_hex() << " from db"; + } +} + +void StorageManager::after_load_torrents_from_db() { + LOG(INFO) << "Finished loading torrents from db (" << torrents_.size() << " torrents)"; + db_store_torrent_list(); + callback_->on_ready(); +} + +td::unique_ptr StorageManager::create_callback( + td::Bits256 hash, std::shared_ptr closing_state) { + class Callback : public NodeActor::Callback { + public: + Callback(td::actor::ActorId id, td::Bits256 hash, + std::shared_ptr closing_state) + : id_(std::move(id)), hash_(hash), closing_state_(std::move(closing_state)) { + } + void on_completed() override { + } + void on_closed(Torrent torrent) override { + CHECK(torrent.get_hash() == hash_); + td::actor::send_closure(id_, &StorageManager::on_torrent_closed, std::move(torrent), closing_state_); + } + + private: + td::actor::ActorId id_; + td::Bits256 hash_; + std::shared_ptr closing_state_; + }; + return td::make_unique(actor_id(this), hash, std::move(closing_state)); +} + +void StorageManager::add_torrent(Torrent torrent, bool start_download, td::Promise promise) { + TRY_STATUS_PROMISE(promise, add_torrent_impl(std::move(torrent), start_download)); + db_store_torrent_list(); + promise.set_result(td::Unit()); +} + +td::Status StorageManager::add_torrent_impl(Torrent torrent, bool start_download) { + td::Bits256 hash = torrent.get_hash(); + if (torrents_.count(hash)) { + return td::Status::Error("Cannot add torrent: duplicate hash"); + } + TorrentEntry& entry = torrents_[hash]; + entry.hash = hash; + entry.peer_manager = td::actor::create_actor("PeerManager", local_id_, get_overlay_id(hash), + client_mode_, overlays_, adnl_, rldp_); + auto context = PeerManager::create_callback(entry.peer_manager.get()); + LOG(INFO) << "Added torrent " << hash.to_hex() << " , root_dir = " << torrent.get_root_dir(); + entry.actor = + td::actor::create_actor("Node", 1, std::move(torrent), create_callback(hash, entry.closing_state), + std::move(context), db_, start_download); + return td::Status::OK(); +} + +void StorageManager::add_torrent_by_meta(TorrentMeta meta, std::string root_dir, bool start_download, + td::Promise promise) { + td::Bits256 hash(meta.info.get_hash()); + Torrent::Options options; + options.root_dir = root_dir.empty() ? db_root_ + "/torrent-files/" + hash.to_hex() : root_dir; + TRY_RESULT_PROMISE(promise, torrent, Torrent::open(std::move(options), std::move(meta))); + add_torrent(std::move(torrent), start_download, std::move(promise)); +} + +void StorageManager::add_torrent_by_hash(td::Bits256 hash, std::string root_dir, bool start_download, + td::Promise promise) { + Torrent::Options options; + options.root_dir = root_dir.empty() ? db_root_ + "/torrent-files/" + hash.to_hex() : root_dir; + TRY_RESULT_PROMISE(promise, torrent, Torrent::open(std::move(options), hash)); + add_torrent(std::move(torrent), start_download, std::move(promise)); +} + +void StorageManager::set_active_download(td::Bits256 hash, bool active, td::Promise promise) { + TRY_RESULT_PROMISE(promise, entry, get_torrent(hash)); + td::actor::send_closure(entry->actor, &NodeActor::set_should_download, active); + promise.set_result(td::Unit()); +} + +void StorageManager::with_torrent(td::Bits256 hash, td::Promise promise) { + TRY_RESULT_PROMISE(promise, entry, get_torrent(hash)); + td::actor::send_closure(entry->actor, &NodeActor::with_torrent, std::move(promise)); +} + +void StorageManager::get_all_torrents(td::Promise> promise) { + std::vector result; + for (const auto& p : torrents_) { + result.push_back(p.first); + } + promise.set_result(std::move(result)); +} + +void StorageManager::db_store_torrent_list() { + std::vector torrents; + for (const auto& p : torrents_) { + torrents.push_back(p.first); + } + db_->set(create_hash_tl_object(), + create_serialize_tl_object(std::move(torrents)), + [](td::Result R) { + if (R.is_error()) { + LOG(ERROR) << "Failed to save torrent list to db: " << R.move_as_error(); + } + }); +} + +void StorageManager::set_all_files_priority(td::Bits256 hash, td::uint8 priority, td::Promise promise) { + TRY_RESULT_PROMISE(promise, entry, get_torrent(hash)); + td::actor::send_closure(entry->actor, &NodeActor::set_all_files_priority, priority, std::move(promise)); +} + +void StorageManager::set_file_priority_by_idx(td::Bits256 hash, size_t idx, td::uint8 priority, + td::Promise promise) { + TRY_RESULT_PROMISE(promise, entry, get_torrent(hash)); + td::actor::send_closure(entry->actor, &NodeActor::set_file_priority_by_idx, idx, priority, std::move(promise)); +} + +void StorageManager::set_file_priority_by_name(td::Bits256 hash, std::string name, td::uint8 priority, + td::Promise promise) { + TRY_RESULT_PROMISE(promise, entry, get_torrent(hash)); + td::actor::send_closure(entry->actor, &NodeActor::set_file_priority_by_name, std::move(name), priority, + std::move(promise)); +} + +void StorageManager::remove_torrent(td::Bits256 hash, bool remove_files, td::Promise promise) { + TRY_RESULT_PROMISE(promise, entry, get_torrent(hash)); + LOG(INFO) << "Removing torrent " << hash.to_hex(); + entry->closing_state->removing = true; + entry->closing_state->remove_files = remove_files; + entry->closing_state->promise = std::move(promise); + torrents_.erase(hash); + db_store_torrent_list(); +} + +void StorageManager::load_from(td::Bits256 hash, td::optional meta, std::string files_path, + td::Promise promise) { + TRY_RESULT_PROMISE(promise, entry, get_torrent(hash)); + td::actor::send_closure(entry->actor, &NodeActor::load_from, std::move(meta), std::move(files_path), + std::move(promise)); +} + +void StorageManager::on_torrent_closed(Torrent torrent, std::shared_ptr closing_state) { + if (!closing_state->removing) { + return; + } + if (closing_state->remove_files && torrent.inited_header()) { + size_t files_count = torrent.get_files_count().unwrap(); + for (size_t i = 0; i < files_count; ++i) { + std::string path = torrent.get_file_path(i); + td::unlink(path).ignore(); + // TODO: Check errors, remove empty directories + } + } + td::rmrf(db_root_ + "/torrent-files/" + torrent.get_hash().to_hex()).ignore(); + NodeActor::cleanup_db(db_, torrent.get_hash(), + [promise = std::move(closing_state->promise)](td::Result R) mutable { + if (R.is_error()) { + LOG(ERROR) << "Failed to cleanup database: " << R.move_as_error(); + } + promise.set_result(td::Unit()); + }); +} + +void StorageManager::wait_for_completion(td::Bits256 hash, td::Promise promise) { + TRY_RESULT_PROMISE(promise, entry, get_torrent(hash)); + td::actor::send_closure(entry->actor, &NodeActor::wait_for_completion, std::move(promise)); +} + +void StorageManager::get_peers_info(td::Bits256 hash, + td::Promise> promise) { + TRY_RESULT_PROMISE(promise, entry, get_torrent(hash)); + td::actor::send_closure(entry->actor, &NodeActor::get_peers_info, std::move(promise)); +} diff --git a/storage/storage-daemon/StorageManager.h b/storage/storage-daemon/StorageManager.h new file mode 100644 index 00000000..38e371d0 --- /dev/null +++ b/storage/storage-daemon/StorageManager.h @@ -0,0 +1,107 @@ +/* + This file is part of TON Blockchain Library. + + TON Blockchain Library is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + TON Blockchain Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with TON Blockchain Library. If not, see . +*/ + +#pragma once +#include "td/actor/actor.h" +#include "adnl/adnl.h" +#include "rldp2/rldp.h" +#include "overlay/overlays.h" +#include "storage/PeerManager.h" +#include "storage/db.h" + +using namespace ton; + +class StorageManager : public td::actor::Actor { + public: + class Callback { + public: + virtual ~Callback() = default; + virtual void on_ready() = 0; + }; + + StorageManager(adnl::AdnlNodeIdShort local_id, std::string db_root, td::unique_ptr callback, + bool client_mode, td::actor::ActorId adnl, td::actor::ActorId rldp, + td::actor::ActorId overlays); + + void start_up() override; + + void add_torrent(Torrent torrent, bool start_download, td::Promise promise); + void add_torrent_by_meta(TorrentMeta meta, std::string root_dir, bool start_download, td::Promise promise); + void add_torrent_by_hash(td::Bits256 hash, std::string root_dir, bool start_download, td::Promise promise); + + void set_active_download(td::Bits256 hash, bool active, td::Promise promise); + + void with_torrent(td::Bits256 hash, td::Promise promise); + void get_all_torrents(td::Promise> promise); + + void set_all_files_priority(td::Bits256 hash, td::uint8 priority, td::Promise promise); + void set_file_priority_by_idx(td::Bits256 hash, size_t idx, td::uint8 priority, td::Promise promise); + void set_file_priority_by_name(td::Bits256 hash, std::string name, td::uint8 priority, td::Promise promise); + + void remove_torrent(td::Bits256 hash, bool remove_files, td::Promise promise); + void load_from(td::Bits256 hash, td::optional meta, std::string files_path, + td::Promise promise); + + void wait_for_completion(td::Bits256 hash, td::Promise promise); + void get_peers_info(td::Bits256 hash, td::Promise> promise); + + private: + adnl::AdnlNodeIdShort local_id_; + std::string db_root_; + td::unique_ptr callback_; + bool client_mode_ = false; + td::actor::ActorId adnl_; + td::actor::ActorId rldp_; + td::actor::ActorId overlays_; + + std::shared_ptr db_; + + struct TorrentEntry { + td::Bits256 hash; + td::actor::ActorOwn actor; + td::actor::ActorOwn peer_manager; + + struct ClosingState { + bool removing = false; + td::Promise promise; + bool remove_files = false; + }; + std::shared_ptr closing_state = std::make_shared(); + }; + + std::map torrents_; + + td::Status add_torrent_impl(Torrent torrent, bool start_download); + + td::Result get_torrent(td::Bits256 hash) { + auto it = torrents_.find(hash); + if (it == torrents_.end()) { + return td::Status::Error("No such torrent"); + } + return &it->second; + } + + td::unique_ptr create_callback(td::Bits256 hash, + std::shared_ptr closing_state); + + void load_torrents_from_db(std::vector torrents); + void loaded_torrent_from_db(td::Bits256 hash, td::Result> R); + void after_load_torrents_from_db(); + void db_store_torrent_list(); + + void on_torrent_closed(Torrent torrent, std::shared_ptr closing_state); +}; \ No newline at end of file diff --git a/storage/storage-daemon/StorageProvider.cpp b/storage/storage-daemon/StorageProvider.cpp new file mode 100644 index 00000000..013327c9 --- /dev/null +++ b/storage/storage-daemon/StorageProvider.cpp @@ -0,0 +1,840 @@ +/* + This file is part of TON Blockchain Library. + + TON Blockchain Library is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + TON Blockchain Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with TON Blockchain Library. If not, see . +*/ + +#include "StorageProvider.h" +#include "td/db/RocksDb.h" +#include "td/utils/JsonBuilder.h" +#include "auto/tl/ton_api_json.h" +#include "td/utils/port/path.h" +#include "block/block-auto.h" +#include "common/delay.h" +#include "td/actor/MultiPromise.h" + +td::Result ProviderParams::create(const tl_object_ptr& obj) { + ProviderParams p; + p.accept_new_contracts = obj->accept_new_contracts_; + p.rate_per_mb_day = td::string_to_int256(obj->rate_per_mb_day_); + if (p.rate_per_mb_day.is_null() || p.rate_per_mb_day->sgn() < 0) { + return td::Status::Error("Invalid rate"); + } + p.max_span = obj->max_span_; + p.minimal_file_size = obj->minimal_file_size_; + p.maximal_file_size = obj->maximal_file_size_; + return p; +} + +tl_object_ptr ProviderParams::tl() const { + return create_tl_object( + accept_new_contracts, rate_per_mb_day->to_dec_string(), max_span, minimal_file_size, maximal_file_size); +} + +bool ProviderParams::to_builder(vm::CellBuilder& b) const { + return b.store_long_bool(accept_new_contracts, 1) && store_coins(b, rate_per_mb_day) && + b.store_long_bool(max_span, 32) && b.store_long_bool(minimal_file_size, 64) && + b.store_long_bool(maximal_file_size, 64); +} + +StorageProvider::StorageProvider(ContractAddress account_address, std::string db_root, + td::actor::ActorId tonlib_client, + td::actor::ActorId storage_manager, + td::actor::ActorId keyring) + + : main_address_(account_address) + , db_root_(std::move(db_root)) + , tonlib_client_(std::move(tonlib_client)) + , storage_manager_(std::move(storage_manager)) + , keyring_(std::move(keyring)) { +} + +void StorageProvider::start_up() { + LOG(INFO) << "Initing storage provider, account address: " << main_address_.to_string(); + td::mkdir(db_root_).ensure(); + db_ = std::make_unique(td::RocksDb::open(db_root_).move_as_ok()); + + auto r_state = db::db_get( + *db_, create_hash_tl_object(), true); + r_state.ensure(); + auto state = r_state.move_as_ok(); + if (state) { + last_processed_lt_ = state->last_processed_lt_; + LOG(INFO) << "Loaded storage provider state"; + LOG(INFO) << "Last processed lt: " << last_processed_lt_; + } + + class Callback : public FabricContractWrapper::Callback { + public: + explicit Callback(td::actor::ActorId id) : id_(std::move(id)) { + } + void on_transaction(tl_object_ptr transaction) override { + td::actor::send_closure(id_, &StorageProvider::process_transaction, std::move(transaction)); + } + + private: + td::actor::ActorId id_; + }; + contract_wrapper_ = + td::actor::create_actor("ContractWrapper", main_address_, tonlib_client_, keyring_, + td::make_unique(actor_id(this)), last_processed_lt_); + + auto r_config = db::db_get( + *db_, create_hash_tl_object(), true); + r_config.ensure(); + auto config_obj = r_config.move_as_ok(); + if (config_obj) { + LOG(INFO) << "Loaded config from db"; + config_ = Config(config_obj); + } else { + LOG(INFO) << "Using default config"; + db_store_config(); + } + LOG(INFO) << "Config: max_contracts=" << config_.max_contracts << ", max_total_size=" << config_.max_total_size; + + auto r_contract_list = db::db_get( + *db_, create_hash_tl_object(), true); + r_contract_list.ensure(); + auto contract_list = r_contract_list.move_as_ok(); + if (contract_list) { + LOG(INFO) << "Loading " << contract_list->contracts_.size() << " contracts from db"; + for (auto& c : contract_list->contracts_) { + ContractAddress address(c->wc_, c->addr_); + if (contracts_.count(address)) { + LOG(ERROR) << "Duplicate contract in db: " << address.to_string(); + continue; + } + auto r_contract = db::db_get( + *db_, create_hash_tl_object(address.wc, address.addr), + true); + r_contract.ensure(); + auto db_contract = r_contract.move_as_ok(); + if (!db_contract) { + LOG(ERROR) << "Missing contract in db: " << address.to_string(); + continue; + } + StorageContract& contract = contracts_[address]; + contract.torrent_hash = db_contract->torrent_hash_; + contract.microchunk_hash = db_contract->microchunk_hash_; + contract.created_time = db_contract->created_time_; + contract.state = (StorageContract::State)db_contract->state_; + contract.file_size = db_contract->file_size_; + contract.max_span = db_contract->max_span_; + contract.rate = td::string_to_int256(db_contract->rate_); + contracts_total_size_ += contract.file_size; + + auto r_tree = db::db_get( + *db_, create_hash_tl_object(address.wc, address.addr), true); + r_tree.ensure(); + auto tree = r_tree.move_as_ok(); + if (tree) { + contract.microchunk_tree = std::make_shared(vm::std_boc_deserialize(tree->data_).move_as_ok()); + } + + LOG(INFO) << "Loaded contract from db: " << address.to_string() << ", torrent=" << contract.torrent_hash.to_hex() + << ", state=" << contract.state; + } + } + + for (auto& p : contracts_) { + const ContractAddress& address = p.first; + StorageContract& contract = p.second; + switch (contract.state) { + case StorageContract::st_downloading: + init_new_storage_contract(address, contract); + break; + case StorageContract::st_downloaded: + check_contract_active(address); + break; + case StorageContract::st_active: + contract.check_next_proof_at = td::Timestamp::now(); + break; + case StorageContract::st_closing: + check_storage_contract_deleted(address); + break; + default: + LOG(FATAL) << "Invalid contract state in db"; + } + } + LOG(INFO) << "Loaded contracts from db"; + + alarm(); +} + +void StorageProvider::get_params(td::Promise promise) { + return get_provider_params(tonlib_client_, main_address_, std::move(promise)); +} + +void StorageProvider::get_provider_params(td::actor::ActorId client, + ContractAddress address, td::Promise promise) { + run_get_method( + address, client, "get_storage_params", std::vector>(), + promise.wrap([](std::vector> stack) -> td::Result { + if (stack.size() != 5) { + return td::Status::Error(PSTRING() << "Method returned " << stack.size() << " values, 5 expected"); + } + TRY_RESULT_PREFIX(accept_new_contracts, entry_to_int(stack[0]), "Invalid accept_new_contracts: "); + TRY_RESULT_PREFIX(rate_per_mb_day, entry_to_int(stack[1]), "Invalid rate_per_mb_day: "); + TRY_RESULT_PREFIX(max_span, entry_to_int(stack[2]), "Invalid max_span: "); + TRY_RESULT_PREFIX(minimal_file_size, entry_to_int(stack[3]), "Invalid minimal_file_size: "); + TRY_RESULT_PREFIX(maximal_file_size, entry_to_int(stack[4]), "Invalid maximal_file_size: "); + ProviderParams params; + params.accept_new_contracts = accept_new_contracts; + params.rate_per_mb_day = rate_per_mb_day; + params.max_span = max_span; + params.minimal_file_size = minimal_file_size; + params.maximal_file_size = maximal_file_size; + return params; + })); +} + +void StorageProvider::set_params(ProviderParams params, td::Promise promise) { + vm::CellBuilder b; + b.store_long(0x54cbf19b, 32); // const op::update_storage_params = 0x54cbf19b; + b.store_long(0, 64); // query_id + if (!params.to_builder(b)) { + promise.set_error(td::Status::Error("Failed to store params to builder")); + return; + } + LOG(INFO) << "Sending external message to update provider parameters: " << params.accept_new_contracts << ", " + << params.max_span << ", " << params.rate_per_mb_day << ", " << params.minimal_file_size << ", " + << params.maximal_file_size; + td::actor::send_closure(contract_wrapper_, &FabricContractWrapper::send_internal_message, main_address_, + td::make_refint(100'000'000), b.as_cellslice(), std::move(promise)); +} + +void StorageProvider::db_store_state() { + LOG(DEBUG) << "db_store_state last_lt=" << last_processed_lt_; + db_->begin_transaction().ensure(); + db_->set(create_hash_tl_object().as_slice(), + create_serialize_tl_object(last_processed_lt_)) + .ensure(); + db_->commit_transaction().ensure(); +} + +void StorageProvider::db_store_config() { + LOG(DEBUG) << "db_store_config"; + db_->begin_transaction().ensure(); + db_->set(create_hash_tl_object().as_slice(), + serialize_tl_object(config_.tl(), true)) + .ensure(); + db_->commit_transaction().ensure(); +} + +void StorageProvider::alarm() { + for (auto& p : contracts_) { + if (p.second.check_next_proof_at && p.second.check_next_proof_at.is_in_past()) { + p.second.check_next_proof_at = td::Timestamp::never(); + check_next_proof(p.first, p.second); + } + alarm_timestamp().relax(p.second.check_next_proof_at); + } +} + +void StorageProvider::process_transaction(tl_object_ptr transaction) { + std::string new_contract_address; + for (auto& message : transaction->out_msgs_) { + auto data = dynamic_cast(message->msg_data_.get()); + if (data == nullptr) { + continue; + } + auto r_body = vm::std_boc_deserialize(data->body_); + if (r_body.is_error()) { + LOG(ERROR) << "Invalid message body in tonlib response: " << r_body.move_as_error(); + continue; + } + td::Ref body = r_body.move_as_ok(); + vm::CellSlice cs = vm::load_cell_slice(body); + // const op::offer_storage_contract = 0x107c49ef; + if (cs.size() >= 32 && cs.prefetch_long(32) == 0x107c49ef) { + new_contract_address = message->destination_->account_address_; + } + } + if (!new_contract_address.empty()) { + auto P = td::PromiseCreator::lambda( + [SelfId = actor_id(this), lt = (td::uint64)transaction->transaction_id_->lt_](td::Result R) { + if (R.is_error()) { + LOG(ERROR) << "Error during processing new storage contract, skipping: " << R.move_as_error(); + } + }); + on_new_storage_contract(ContractAddress::parse(new_contract_address).move_as_ok(), std::move(P)); + } + + last_processed_lt_ = transaction->transaction_id_->lt_; + db_store_state(); +} + +void StorageProvider::on_new_storage_contract(ContractAddress address, td::Promise promise, int max_retries) { + LOG(INFO) << "Processing new storage contract: " << address.to_string(); + get_storage_contract_data( + address, tonlib_client_, + [SelfId = actor_id(this), address, promise = std::move(promise), + max_retries](td::Result R) mutable { + if (R.is_error()) { + if (max_retries > 0) { + LOG(WARNING) << "Processing new storage contract: " << R.move_as_error() << ", retrying"; + delay_action( + [SelfId = std::move(SelfId), promise = std::move(promise), address = std::move(address), + max_retries]() mutable { + td::actor::send_closure(SelfId, &StorageProvider::on_new_storage_contract, std::move(address), + std::move(promise), max_retries - 1); + }, + td::Timestamp::in(5.0)); + } else { + promise.set_error(R.move_as_error()); + } + return; + } + td::actor::send_closure(SelfId, &StorageProvider::on_new_storage_contract_cont, address, R.move_as_ok(), + std::move(promise)); + }); +} + +void StorageProvider::on_new_storage_contract_cont(ContractAddress address, StorageContractData data, + td::Promise promise) { + auto it = contracts_.emplace(address, StorageContract()); + if (!it.second) { + promise.set_error(td::Status::Error(PSTRING() << "Storage contract already registered: " << address.to_string())); + return; + } + LOG(INFO) << "New storage contract " << address.to_string() << ", torrent hash: " << data.torrent_hash.to_hex(); + LOG(DEBUG) << "Stoage contract data: microchunk_hash=" << data.microchunk_hash << ", balance=" << data.balance + << ", file_size=" << data.file_size << ", next_proof=" << data.next_proof + << ", rate=" << data.rate_per_mb_day << ", max_span=" << data.max_span; + StorageContract& contract = it.first->second; + contract.torrent_hash = data.torrent_hash; + contract.microchunk_hash = data.microchunk_hash; + contract.state = StorageContract::st_downloading; + contract.created_time = (td::uint32)td::Clocks::system(); + contract.file_size = data.file_size; + contract.max_span = data.max_span; + contract.rate = data.rate_per_mb_day; + contracts_total_size_ += contract.file_size; + promise.set_result(td::Unit()); + + if (contracts_.size() <= config_.max_contracts && contracts_total_size_ <= config_.max_total_size) { + db_update_storage_contract(address, true); + init_new_storage_contract(address, contract); + } else { + if (contracts_.size() > config_.max_contracts) { + LOG(WARNING) << "Cannot add new storage contract: too many contracts (limit = " << config_.max_contracts << ")"; + } else { + LOG(WARNING) << "Cannot add new storage contract: total size exceeded (limit = " + << td::format::as_size(config_.max_total_size) << ")"; + } + contract.state = StorageContract::st_closing; + db_update_storage_contract(address, true); + do_close_storage_contract(address); + } +} + +void StorageProvider::db_update_storage_contract(const ContractAddress& address, bool update_list) { + LOG(DEBUG) << "db_update_storage_contract " << address.to_string() << " " << update_list; + db_->begin_transaction().ensure(); + if (update_list) { + std::vector> list; + for (const auto& t : contracts_) { + list.push_back(create_tl_object(t.first.wc, t.first.addr)); + } + db_->set(create_hash_tl_object().as_slice(), + create_serialize_tl_object(std::move(list))) + .ensure(); + } + auto key = create_hash_tl_object(address.wc, address.addr); + auto it = contracts_.find(address); + if (it == contracts_.end()) { + db_->erase(key.as_slice()).ensure(); + } else { + const StorageContract& contract = it->second; + db_->set(key.as_slice(), + create_serialize_tl_object( + contract.torrent_hash, contract.microchunk_hash, contract.created_time, (int)contract.state, + contract.file_size, contract.rate->to_dec_string(), contract.max_span)); + } + db_->commit_transaction().ensure(); +} + +void StorageProvider::db_update_microchunk_tree(const ContractAddress& address) { + LOG(DEBUG) << "db_update_microchunk_tree " << address.to_string(); + db_->begin_transaction().ensure(); + auto key = create_hash_tl_object(address.wc, address.addr); + auto it = contracts_.find(address); + if (it == contracts_.end() || it->second.microchunk_tree == nullptr) { + db_->erase(key.as_slice()).ensure(); + } else { + db_->set(key.as_slice(), create_serialize_tl_object( + vm::std_boc_serialize(it->second.microchunk_tree->get_root()).move_as_ok())); + } + db_->commit_transaction().ensure(); +} + +void StorageProvider::init_new_storage_contract(ContractAddress address, StorageContract& contract) { + CHECK(contract.state == StorageContract::st_downloading); + td::actor::send_closure(storage_manager_, &StorageManager::add_torrent_by_hash, contract.torrent_hash, "", false, + [](td::Result R) { + // Ignore errors: error can mean that the torrent already exists, other errors will be caught later + if (R.is_error()) { + LOG(DEBUG) << "Add torrent: " << R.move_as_error(); + } else { + LOG(DEBUG) << "Add torrent: OK"; + } + }); + td::actor::send_closure(storage_manager_, &StorageManager::set_active_download, contract.torrent_hash, true, + [SelfId = actor_id(this), address](td::Result R) { + if (R.is_error()) { + LOG(ERROR) << "Failed to init storage contract: " << R.move_as_error(); + td::actor::send_closure(SelfId, &StorageProvider::do_close_storage_contract, address); + return; + } + LOG(DEBUG) << "Set active download: OK"; + }); + td::actor::send_closure( + storage_manager_, &StorageManager::wait_for_completion, contract.torrent_hash, + [SelfId = actor_id(this), address, hash = contract.torrent_hash, microchunk_hash = contract.microchunk_hash, + manager = storage_manager_](td::Result R) { + if (R.is_error()) { + LOG(WARNING) << "Failed to download torrent " << hash.to_hex() << ": " << R.move_as_error(); + td::actor::send_closure(SelfId, &StorageProvider::do_close_storage_contract, address); + return; + } + LOG(DEBUG) << "Downloaded torrent " << hash; + td::actor::send_closure( + manager, &StorageManager::with_torrent, hash, + [SelfId, address, hash, microchunk_hash](td::Result R) { + auto r_microchunk_tree = [&]() -> td::Result { + TRY_RESULT(state, std::move(R)); + Torrent& torrent = state.torrent; + if (!torrent.is_completed() || torrent.get_included_size() != torrent.get_info().file_size) { + return td::Status::Error("unknown error"); + } + LOG(DEBUG) << "Building microchunk tree for " << hash; + TRY_RESULT(tree, MicrochunkTree::Builder::build_for_torrent(torrent)); + if (tree.get_root_hash() != microchunk_hash) { + return td::Status::Error("microchunk tree hash mismatch"); + } + return tree; + }(); + if (r_microchunk_tree.is_error()) { + LOG(WARNING) << "Failed to download torrent " << hash.to_hex() << ": " << R.move_as_error(); + td::actor::send_closure(SelfId, &StorageProvider::do_close_storage_contract, address); + } else { + td::actor::send_closure(SelfId, &StorageProvider::downloaded_torrent, address, + r_microchunk_tree.move_as_ok()); + } + }); + }); +} + +void StorageProvider::downloaded_torrent(ContractAddress address, MicrochunkTree microchunk_tree) { + auto it = contracts_.find(address); + if (it == contracts_.end()) { + LOG(WARNING) << "Contract " << address.to_string() << " does not exist anymore"; + return; + } + auto& contract = it->second; + LOG(INFO) << "Finished downloading torrent " << contract.torrent_hash.to_hex() << " for contract " + << address.to_string(); + contract.state = StorageContract::st_downloaded; + contract.microchunk_tree = std::make_shared(std::move(microchunk_tree)); + db_update_microchunk_tree(address); + db_update_storage_contract(address, false); + check_contract_active(address); +} + +void StorageProvider::check_contract_active(ContractAddress address, td::Timestamp retry_until, + td::Timestamp retry_false_until) { + get_storage_contract_data(address, tonlib_client_, + [=, SelfId = actor_id(this)](td::Result R) mutable { + if (R.is_error()) { + LOG(WARNING) << "Failed to check that contract is active: " << R.move_as_error(); + if (retry_until && retry_until.is_in_past()) { + delay_action( + [=]() { + td::actor::send_closure(SelfId, &StorageProvider::check_contract_active, + address, retry_until, retry_false_until); + }, + td::Timestamp::in(5.0)); + } + return; + } + if (R.ok().active) { + td::actor::send_closure(SelfId, &StorageProvider::activated_storage_contract, address); + } else if (retry_false_until && retry_false_until.is_in_past()) { + delay_action( + [=]() { + td::actor::send_closure(SelfId, &StorageProvider::check_contract_active, address, + retry_until, retry_false_until); + }, + td::Timestamp::in(5.0)); + } else { + td::actor::send_closure(SelfId, &StorageProvider::activate_contract_cont, address); + } + }); +} + +void StorageProvider::activate_contract_cont(ContractAddress address) { + vm::CellBuilder b; + b.store_long(0x7a361688, 32); // const op::accept_storage_contract = 0x7a361688; + b.store_long(0, 64); // query_id + LOG(DEBUG) << "Sending op::accept_storage_contract to " << address.to_string(); + td::actor::send_closure( + contract_wrapper_, &FabricContractWrapper::send_internal_message, address, td::make_refint(100'000'000), + b.as_cellslice(), [SelfId = actor_id(this), address](td::Result R) { + if (R.is_error()) { + LOG(ERROR) << "Failed to send activate message, retrying later: " << R.move_as_error(); + delay_action([=]() { td::actor::send_closure(SelfId, &StorageProvider::activate_contract_cont, address); }, + td::Timestamp::in(10.0)); + return; + } + td::actor::send_closure(SelfId, &StorageProvider::check_contract_active, address, td::Timestamp::in(60.0), + td::Timestamp::in(40.0)); + }); +} + +void StorageProvider::activated_storage_contract(ContractAddress address) { + auto it = contracts_.find(address); + if (it == contracts_.end()) { + LOG(WARNING) << "Contract " << address.to_string() << " does not exist anymore"; + return; + } + LOG(INFO) << "Storage contract " << address.to_string() << " is active"; + auto& contract = it->second; + contract.state = StorageContract::st_active; + db_update_storage_contract(address, false); + alarm_timestamp().relax(contract.check_next_proof_at = td::Timestamp::in(1.0)); +} + +void StorageProvider::do_close_storage_contract(ContractAddress address) { + auto it = contracts_.find(address); + if (it == contracts_.end()) { + LOG(WARNING) << "Contract " << address.to_string() << " does not exist anymore"; + return; + } + LOG(INFO) << "Closing storage contract " << address.to_string(); + auto& contract = it->second; + contract.state = StorageContract::st_closing; + db_update_storage_contract(address, false); + check_storage_contract_deleted(address); +} + +void StorageProvider::send_close_storage_contract(ContractAddress address) { + vm::CellBuilder b; + b.store_long(0x79f937ea, 32); // const op::close_contract = 0x79f937ea; + b.store_long(0, 64); // query_id + LOG(DEBUG) << "Sending op::close_contract to " << address.to_string(); + td::actor::send_closure( + contract_wrapper_, &FabricContractWrapper::send_internal_message, address, td::make_refint(100'000'000), + b.as_cellslice(), [SelfId = actor_id(this), address](td::Result R) { + if (R.is_error()) { + LOG(ERROR) << "Failed to send close message, retrying later: " << R.move_as_error(); + delay_action([=]() { td::actor::send_closure(SelfId, &StorageProvider::activate_contract_cont, address); }, + td::Timestamp::in(10.0)); + return; + } + td::actor::send_closure(SelfId, &StorageProvider::check_storage_contract_deleted, address, + td::Timestamp::in(40.0)); + }); +} + +void StorageProvider::check_storage_contract_deleted(ContractAddress address, td::Timestamp retry_false_until) { + check_contract_exists(address, tonlib_client_, [=, SelfId = actor_id(this)](td::Result R) { + if (R.is_error()) { + delay_action( + [=]() { + td::actor::send_closure(SelfId, &StorageProvider::check_storage_contract_deleted, address, + retry_false_until); + }, + td::Timestamp::in(10.0)); + return; + } + if (!R.move_as_ok()) { + td::actor::send_closure(SelfId, &StorageProvider::storage_contract_deleted, address); + } else if (retry_false_until && !retry_false_until.is_in_past()) { + delay_action( + [=]() { + td::actor::send_closure(SelfId, &StorageProvider::check_storage_contract_deleted, address, + retry_false_until); + }, + td::Timestamp::in(5.0)); + } else { + td::actor::send_closure(SelfId, &StorageProvider::send_close_storage_contract, address); + } + }); +} + +void StorageProvider::storage_contract_deleted(ContractAddress address) { + auto it = contracts_.find(address); + if (it == contracts_.end()) { + return; + } + LOG(INFO) << "Storage contract " << address.to_string() << " was deleted"; + td::Bits256 hash = it->second.torrent_hash; + contracts_total_size_ -= it->second.file_size; + contracts_.erase(it); + bool delete_torrent = true; + for (const auto& p : contracts_) { + if (p.second.torrent_hash == hash) { + delete_torrent = false; + break; + } + } + if (delete_torrent) { + LOG(INFO) << "Deleting torrent " << hash.to_hex(); + td::actor::send_closure(storage_manager_, &StorageManager::remove_torrent, hash, true, + [](td::Result R) {}); + } + db_update_storage_contract(address, true); +} + +void StorageProvider::check_next_proof(ContractAddress address, StorageContract& contract) { + if (contract.state != StorageContract::st_active) { + return; + } + CHECK(contract.microchunk_tree != nullptr); + get_storage_contract_data( + address, tonlib_client_, [SelfId = actor_id(this), address](td::Result R) { + td::actor::send_closure(SelfId, &StorageProvider::got_next_proof_info, address, std::move(R)); + }); +} + +void StorageProvider::got_next_proof_info(ContractAddress address, td::Result R) { + auto it = contracts_.find(address); + if (it == contracts_.end() || it->second.state != StorageContract::st_active) { + return; + } + auto& contract = it->second; + if (R.is_error()) { + LOG(ERROR) << "get_next_proof_info for " << address.to_string() << ": " << R.move_as_error(); + check_contract_exists(address, tonlib_client_, [SelfId = actor_id(this), address](td::Result R) { + td::actor::send_closure(SelfId, &StorageProvider::got_contract_exists, address, std::move(R)); + }); + return; + } + auto data = R.move_as_ok(); + if (data.balance->sgn() == 0) { + LOG(INFO) << "Balance of contract " << address.to_string() << " is zero, closing"; + do_close_storage_contract(address); + return; + } + td::uint32 send_at = data.last_proof_time + data.max_span / 2, now = (td::uint32)td::Clocks::system(); + if (now < send_at) { + LOG(DEBUG) << "Will send proof in " << send_at - now << "s (last_proof_time=" << data.last_proof_time + << ", max_span=" << data.max_span << ")"; + alarm_timestamp().relax(contract.check_next_proof_at = td::Timestamp::in(send_at - now + 2)); + return; + } + + LOG(INFO) << "Sending proof for " << address.to_string() << ": next_proof=" << data.next_proof + << ", max_span=" << data.max_span << ", last_proof_time=" << data.last_proof_time << " (" + << now - data.last_proof_time << "s ago)"; + td::actor::send_closure( + storage_manager_, &StorageManager::with_torrent, contract.torrent_hash, + [=, SelfId = actor_id(this), tree = contract.microchunk_tree](td::Result R) { + if (R.is_error()) { + LOG(ERROR) << "Missing torrent for " << address.to_string(); + return; + } + auto state = R.move_as_ok(); + td::uint64 l = data.next_proof / MicrochunkTree::MICROCHUNK_SIZE * MicrochunkTree::MICROCHUNK_SIZE; + td::uint64 r = l + MicrochunkTree::MICROCHUNK_SIZE; + auto proof = tree->get_proof(l, r, state.torrent); + td::actor::send_closure(SelfId, &StorageProvider::got_next_proof, address, std::move(proof)); + }); +} + +void StorageProvider::got_contract_exists(ContractAddress address, td::Result R) { + auto it = contracts_.find(address); + if (it == contracts_.end() || it->second.state != StorageContract::st_active) { + return; + } + auto& contract = it->second; + if (R.is_error()) { + LOG(ERROR) << "Check contract exists for " << address.to_string() << ": " << R.move_as_error(); + alarm_timestamp().relax(contract.check_next_proof_at = td::Timestamp::in(10.0)); + return; + } + if (R.ok()) { + alarm_timestamp().relax(contract.check_next_proof_at = td::Timestamp::in(10.0)); + return; + } + storage_contract_deleted(address); +} + +void StorageProvider::got_next_proof(ContractAddress address, td::Result> R) { + if (R.is_error()) { + LOG(ERROR) << "Failed to build proof: " << R.move_as_error(); + return; + } + LOG(INFO) << "Got proof, sending"; + + vm::CellBuilder b; + b.store_long(0x419d5d4d, 32); // const op::proof_storage = 0x419d5d4d; + b.store_long(0, 64); // query_id + b.store_ref(R.move_as_ok()); + td::actor::send_closure(contract_wrapper_, &FabricContractWrapper::send_internal_message, address, + td::make_refint(100'000'000), b.as_cellslice(), + [SelfId = actor_id(this), address](td::Result R) { + if (R.is_error()) { + LOG(ERROR) << "Failed to send proof message: " << R.move_as_error(); + } else { + LOG(DEBUG) << "Proof for " << address.to_string() << " was sent"; + } + td::actor::send_closure(SelfId, &StorageProvider::sent_next_proof, address); + }); +} + +void StorageProvider::sent_next_proof(ContractAddress address) { + auto it = contracts_.find(address); + if (it == contracts_.end() || it->second.state != StorageContract::st_active) { + return; + } + auto& contract = it->second; + alarm_timestamp().relax(contract.check_next_proof_at = td::Timestamp::in(30.0)); +} + +void StorageProvider::get_provider_info(bool with_balances, bool with_contracts, + td::Promise> promise) { + auto result = std::make_shared(); + td::MultiPromise mp; + auto ig = mp.init_guard(); + ig.add_promise(promise.wrap( + [result](td::Unit) { return create_tl_object(std::move(*result)); })); + result->address_ = main_address_.to_string(); + result->config_ = config_.tl(); + result->contracts_count_ = (int)contracts_.size(); + result->contracts_total_size_ = contracts_total_size_; + if (with_balances) { + get_contract_balance(main_address_, tonlib_client_, ig.get_promise().wrap([result](td::RefInt256 balance) { + result->balance_ = balance->to_dec_string(); + return td::Unit(); + })); + } else { + result->balance_ = "-1"; + } + if (with_contracts) { + for (const auto& p : contracts_) { + auto obj = create_tl_object(); + const StorageContract& contract = p.second; + obj->address_ = p.first.to_string(); + obj->state_ = (int)contract.state; + obj->torrent_ = contract.torrent_hash; + obj->created_time_ = contract.created_time; + obj->rate_ = contract.rate->to_dec_string(); + obj->max_span_ = contract.max_span; + obj->file_size_ = contract.file_size; + obj->downloaded_size_ = obj->file_size_; + obj->client_balance_ = "-1"; + obj->contract_balance_ = "-1"; + result->contracts_.push_back(std::move(obj)); + } + size_t i = 0; + for (const auto& p : contracts_) { + const StorageContract& contract = p.second; + if (contract.state == StorageContract::st_downloading) { + td::actor::send_closure(storage_manager_, &StorageManager::with_torrent, contract.torrent_hash, + [i, result, promise = ig.get_promise()](td::Result R) mutable { + if (R.is_error()) { + result->contracts_[i]->downloaded_size_ = 0; + } else { + auto state = R.move_as_ok(); + result->contracts_[i]->downloaded_size_ = state.torrent.get_included_ready_size(); + } + promise.set_result(td::Unit()); + }); + } + if (with_balances) { + get_contract_balance(p.first, tonlib_client_, + [i, result, promise = ig.get_promise()](td::Result R) mutable { + if (R.is_ok()) { + result->contracts_[i]->contract_balance_ = R.ok()->to_dec_string(); + } + promise.set_result(td::Unit()); + }); + get_storage_contract_data(p.first, tonlib_client_, + [i, result, promise = ig.get_promise()](td::Result R) mutable { + auto S = [&]() -> td::Status { + TRY_RESULT(data, std::move(R)); + result->contracts_[i]->client_balance_ = data.balance->to_dec_string(); + return td::Status::OK(); + }(); + promise.set_result(td::Unit()); + }); + } + i += 1; + } + } +} + +void StorageProvider::set_provider_config(Config config, td::Promise promise) { + config_ = config; + LOG(INFO) << "Changing provider config: max_contracts=" << config_.max_contracts + << ", max_total_size=" << config_.max_total_size; + db_store_config(); + promise.set_result(td::Unit()); +} + +void StorageProvider::withdraw(ContractAddress address, td::Promise promise) { + auto it = contracts_.find(address); + if (it == contracts_.end() || it->second.state != StorageContract::st_active) { + promise.set_error(td::Status::Error("No such storage contract")); + return; + } + if (it->second.state != StorageContract::st_active) { + promise.set_error(td::Status::Error("Storage contract is not active")); + return; + } + vm::CellBuilder b; + b.store_long(0x46ed2e94, 32); // const op::withdraw = 0x46ed2e94; + b.store_long(0, 64); // query_id + LOG(INFO) << "Sending op::withdraw to storage contract " << address.to_string(); + td::actor::send_closure(contract_wrapper_, &FabricContractWrapper::send_internal_message, address, + td::make_refint(100'000'000), b.as_cellslice(), std::move(promise)); +} + +void StorageProvider::send_coins(ContractAddress dest, td::RefInt256 amount, std::string message, + td::Promise promise) { + if (amount->sgn() < 0) { + promise.set_error(td::Status::Error("Amount is negative")); + return; + } + vm::CellBuilder b; + if (!message.empty()) { + b.store_long(0, 32); + if (b.remaining_bits() < message.size() * 8) { + promise.set_error(td::Status::Error("Message is too long (max 122 bytes)")); + return; + } + b.store_bytes(td::Slice(message)); + } + LOG(INFO) << "Sending " << amount << " nanoTON to " << dest.to_string(); + td::actor::send_closure(contract_wrapper_, &FabricContractWrapper::send_internal_message, dest, amount, + b.finalize_novm(), std::move(promise)); +} + +void StorageProvider::close_storage_contract(ContractAddress address, td::Promise promise) { + if (!contracts_.count(address)) { + promise.set_error(td::Status::Error("No such storage contract")); + return; + } + do_close_storage_contract(address); + promise.set_result(td::Unit()); +} + +StorageProvider::Config::Config(const tl_object_ptr& obj) + : max_contracts(obj->max_contracts_), max_total_size(obj->max_total_size_) { +} + +tl_object_ptr StorageProvider::Config::tl() const { + return create_tl_object(max_contracts, max_total_size); +} diff --git a/storage/storage-daemon/StorageProvider.h b/storage/storage-daemon/StorageProvider.h new file mode 100644 index 00000000..a7d63f88 --- /dev/null +++ b/storage/storage-daemon/StorageProvider.h @@ -0,0 +1,127 @@ +/* + This file is part of TON Blockchain Library. + + TON Blockchain Library is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + TON Blockchain Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with TON Blockchain Library. If not, see . +*/ + +#pragma once +#include "td/actor/actor.h" +#include "storage/db.h" +#include "tonlib/tonlib/TonlibClientWrapper.h" +#include "StorageManager.h" +#include "keyring/keyring.h" +#include "smc-util.h" +#include "storage/MicrochunkTree.h" + +using namespace ton; + +struct ProviderParams { + bool accept_new_contracts = false; + td::RefInt256 rate_per_mb_day = td::zero_refint(); + td::uint32 max_span = 0; + td::uint64 minimal_file_size = 0; + td::uint64 maximal_file_size = 0; + + static td::Result create(const tl_object_ptr& obj); + tl_object_ptr tl() const; + bool to_builder(vm::CellBuilder& b) const; +}; + +class StorageProvider : public td::actor::Actor { + public: + struct Config { + td::uint32 max_contracts = 1000; + td::uint64 max_total_size = 128LL << 30; + Config() = default; + explicit Config(const tl_object_ptr& obj); + tl_object_ptr tl() const; + }; + + StorageProvider(ContractAddress address, std::string db_root, + td::actor::ActorId tonlib_client, + td::actor::ActorId storage_manager, td::actor::ActorId keyring); + + void start_up() override; + void alarm() override; + void get_params(td::Promise promise); + static void get_provider_params(td::actor::ActorId, ContractAddress address, + td::Promise promise); + void set_params(ProviderParams params, td::Promise promise); + + void get_provider_info(bool with_balances, bool with_contracts, + td::Promise> promise); + void set_provider_config(Config config, td::Promise promise); + void withdraw(ContractAddress address, td::Promise promise); + void send_coins(ContractAddress dest, td::RefInt256 amount, std::string message, td::Promise promise); + void close_storage_contract(ContractAddress address, td::Promise promise); + + private: + ContractAddress main_address_; + std::string db_root_; + td::actor::ActorId tonlib_client_; + td::actor::ActorId storage_manager_; + td::actor::ActorId keyring_; + td::Promise init_promise_; + Config config_; + + std::unique_ptr db_; + td::actor::ActorOwn contract_wrapper_; + td::uint64 last_processed_lt_ = 0; + + struct StorageContract { + enum State { st_downloading = 0, st_downloaded = 1, st_active = 2, st_closing = 3 }; + td::Bits256 torrent_hash; + td::Bits256 microchunk_hash; + td::uint32 created_time; + State state; + + td::uint64 file_size = 0; + td::uint32 max_span = 0; + td::RefInt256 rate = td::zero_refint(); + + // TODO: Compute and store only one tree for duplicating torrents + std::shared_ptr microchunk_tree; + + td::Timestamp check_next_proof_at = td::Timestamp::never(); + }; + std::map contracts_; + td::uint64 contracts_total_size_ = 0; + + void process_transaction(tl_object_ptr transaction); + + void db_store_state(); + void db_store_config(); + void db_update_storage_contract(const ContractAddress& address, bool update_list); + void db_update_microchunk_tree(const ContractAddress& address); + + void on_new_storage_contract(ContractAddress address, td::Promise promise, int max_retries = 10); + void on_new_storage_contract_cont(ContractAddress address, StorageContractData data, td::Promise promise); + void init_new_storage_contract(ContractAddress address, StorageContract& contract); + void downloaded_torrent(ContractAddress address, MicrochunkTree microchunk_tree); + void check_contract_active(ContractAddress address, td::Timestamp retry_until = td::Timestamp::in(30.0), + td::Timestamp retry_false_until = td::Timestamp::never()); + void activate_contract_cont(ContractAddress address); + void activated_storage_contract(ContractAddress address); + void do_close_storage_contract(ContractAddress address); + void check_storage_contract_deleted(ContractAddress address, + td::Timestamp retry_false_until = td::Timestamp::never()); + void send_close_storage_contract(ContractAddress address); + void storage_contract_deleted(ContractAddress address); + + void check_next_proof(ContractAddress address, StorageContract& contract); + void got_next_proof_info(ContractAddress address, td::Result R); + void got_contract_exists(ContractAddress address, td::Result R); + void got_next_proof(ContractAddress address, td::Result> R); + void sent_next_proof(ContractAddress address); +}; diff --git a/storage/storage-daemon/smartcont/.gitignore b/storage/storage-daemon/smartcont/.gitignore new file mode 100644 index 00000000..5fa1d61f --- /dev/null +++ b/storage/storage-daemon/smartcont/.gitignore @@ -0,0 +1 @@ +provider-code.h diff --git a/storage/storage-daemon/smartcont/README.md b/storage/storage-daemon/smartcont/README.md new file mode 100644 index 00000000..e8950bda --- /dev/null +++ b/storage/storage-daemon/smartcont/README.md @@ -0,0 +1,23 @@ +# Storage Provider +Simple smart-contract system for conclusion of a storage agreements. +- guarantees that the provider stores the file +- no storage - no payment +- no penalties, if provider doesn't store file client can stop payment at any time +- no control that provider upload the file: client can stop payment at any time if not satisfied + +## Storage Agreements Fabric + +Storage provider deploy storage agreements fabric. Any client may request fabric to deploy storage agreement contract. +Fabric provides get-method `get_storage_params` which returns +- `accept_new_contracts?` - whether provider accepts new contracts +- `rate_per_mb_day` - price in nanoTON per Megabyte per day +- `max_span` - maximal timespan between proving file storage which will be paid +- `minimal_file_size` - minimal file size accepted by provider +- `maximal_file_size` - maximal file size accepted by provider + +## Storage agreement +Agreement contract has client account and accept deposits to this account. + +It also knows merkle root and allows provider to withdraw money from client account by providing merkle proof of file storage. + +Client can stop agreement at any time. diff --git a/storage/storage-daemon/smartcont/build.sh b/storage/storage-daemon/smartcont/build.sh new file mode 100755 index 00000000..3ebd3aa4 --- /dev/null +++ b/storage/storage-daemon/smartcont/build.sh @@ -0,0 +1,4 @@ +func -SPA -o storage-contract.fif ../../../crypto/smartcont/stdlib.fc storage-contract.fc && \ + (echo "\"storage-contract.fif\" include boc>B \"storage-contract-code.boc\" B>file" | fift) && \ + func -SPA -o storage-provider.fif ../../../crypto/smartcont/stdlib.fc storage-provider.fc && \ + (echo "\"storage-provider.fif\" include boc>B \"storage-provider-code.boc\" B>file" | fift) diff --git a/storage/storage-daemon/smartcont/constants.fc b/storage/storage-daemon/smartcont/constants.fc new file mode 100644 index 00000000..479cdfa3 --- /dev/null +++ b/storage/storage-daemon/smartcont/constants.fc @@ -0,0 +1,23 @@ +const op::offer_storage_contract = 0x107c49ef; +const op::close_contract = 0x79f937ea; +const op::contract_deployed = 0xbf7bd0c1; +const op::storage_contract_confirmed = 0xd4caedcd; +const op::reward_withdrawal = 0xa91baf56; +const op::storage_contract_terminated = 0xb6236d63; +const op::accept_storage_contract = 0x7a361688; +const op::withdraw = 0x46ed2e94; +const op::proof_storage = 0x419d5d4d; + +const op::update_pubkey = 0x53f34cd6; +const op::update_storage_params = 0x54cbf19b; + +const error::not_enough_money = 1001; +const error::unauthorized = 401; +const error::wrong_proof = 1002; +const error::contract_not_active = 1003; +const error::file_too_small = 1004; +const error::file_too_big = 1005; +const error::no_new_contracts = 1006; +const error::contract_already_active = 1007; +const error::no_microchunk_hash = 1008; +const error::provider_params_changed = 1009; diff --git a/storage/storage-daemon/smartcont/embed-provider-code.cpp b/storage/storage-daemon/smartcont/embed-provider-code.cpp new file mode 100644 index 00000000..9cc38930 --- /dev/null +++ b/storage/storage-daemon/smartcont/embed-provider-code.cpp @@ -0,0 +1,40 @@ +#include +#include +#include + +int main(int argc, char** argv) { + if (argc != 3) { + std::cerr << "Usage: generate-provider-code in.boc out.h\n"; + return 1; + } + std::ifstream in(argv[1], std::ios_base::ate | std::ios_base::binary); + size_t size = in.tellg(); + in.seekg(0, std::ios::beg); + std::vector buf(size); + if (!in.read(buf.data(), size)) { + std::cerr << "Error: cannot read input\n"; + return 1; + } + in.close(); + + std::ofstream out(argv[2]); + out << "// Auto-generated by embed-provider-code\n"; + out << "#pragma once\n"; + out << "const unsigned char STORAGE_PROVIDER_CODE[" << size << "] = {\n "; + for (size_t i = 0; i < size; ++i) { + if (i != 0) { + out << ","; + if (i % 32 == 31) { + out << "\n "; + } + } + out << (int)(unsigned char)buf[i]; + } + out << "\n};\n"; + if (!out) { + std::cerr << "Error: cannot write output\n"; + return 1; + } + out.close(); + return 0; +} diff --git a/storage/storage-daemon/smartcont/storage-contract-code.boc b/storage/storage-daemon/smartcont/storage-contract-code.boc new file mode 100644 index 00000000..03dd18ea Binary files /dev/null and b/storage/storage-daemon/smartcont/storage-contract-code.boc differ diff --git a/storage/storage-daemon/smartcont/storage-contract.fc b/storage/storage-daemon/smartcont/storage-contract.fc new file mode 100644 index 00000000..41e3ee5b --- /dev/null +++ b/storage/storage-daemon/smartcont/storage-contract.fc @@ -0,0 +1,266 @@ +#include "constants.fc"; + + +const CHUNK_SIZE = 64; +const fee::receipt_value = 20000000; +const fee::storage = 10000000; + + + +{- + storage#_ active:Bool + balance:Coins provider:MsgAddress + merkle_hash:uint256 file_size:uint64 next_proof_byte:uint64 + rate_per_mb_day:Coins + max_span:uint32 last_proof_time:uint32 + ^[client:MsgAddress torrent_hash:uint256] = Storage; +-} + +(slice, int) begin_parse_special(cell c) asm "x{D739} s,"; + +int check_proof(int merkle_hash, int byte_to_proof, int file_size, cell file_dict_proof) { + (slice cs, int special) = file_dict_proof.begin_parse_special(); + if (~ special) { + return false; + } + if (cs~load_uint(8) != 3) { ;; Merkle proof + return false; + } + if (cs~load_uint(256) != merkle_hash) { + return false; + } + cell file_dict = cs~load_ref(); + int key_len = 0; + while ((CHUNK_SIZE << key_len) < file_size) { + key_len += 1; + } + (slice data, int found?) = file_dict.udict_get?(key_len, byte_to_proof / CHUNK_SIZE); + if(found?) { + return true; + } + return false; +} + +() add_to_balance(int amount) impure inline_ref { + var ds = get_data().begin_parse(); + var (active, balance, residue) = (ds~load_int(1), ds~load_grams(), ds); + balance += amount; + begin_cell() + .store_int(active, 1) + .store_coins(balance) + .store_slice(residue) + .end_cell().set_data(); +} + +(slice, int) get_client_data(ds) { + ds = ds.preload_ref().begin_parse(); + return (ds~load_msg_addr(), ds~load_uint(256)); +} + +() recv_internal(int msg_value, cell in_msg_full, slice in_msg_body) impure { + slice cs = in_msg_full.begin_parse(); + int flags = cs~load_uint(4); + + if (flags & 1) { ;; ignore all bounced messages + return (); + } + slice sender_address = cs~load_msg_addr(); + + if (in_msg_body.slice_empty?()) { + return add_to_balance(msg_value); + } + int op = in_msg_body~load_uint(32); + if (op == 0) { + return add_to_balance(msg_value); + } + + int query_id = in_msg_body~load_uint(64); + + if(op == op::offer_storage_contract) { + add_to_balance(msg_value - 2 * fee::receipt_value); + var (client, torrent_hash) = get_client_data(get_data().begin_parse()); + var msg = begin_cell() + .store_uint(0x18, 6) + .store_slice(client) + .store_coins(fee::receipt_value) + .store_uint(0, 1 + 4 + 4 + 64 + 32 + 1 + 1) + .store_uint(op::contract_deployed, 32) + .store_uint(query_id, 64) + .store_uint(torrent_hash, 256) + .end_cell(); + send_raw_message(msg, 0); + } + + if (op == op::accept_storage_contract) { + var ds = get_data().begin_parse(); + (int active, int balance, slice provider, slice rest) = + (ds~load_int(1), ds~load_coins(), ds~load_msg_addr(), ds); + throw_unless(error::contract_already_active, ~ active); + throw_unless(error::unauthorized, equal_slice_bits(sender_address, provider)); + begin_cell() + .store_int(true, 1) + .store_coins(balance) + .store_slice(provider) + .store_slice(rest) + .end_cell().set_data(); + var (client, torrent_hash) = get_client_data(rest); + var msg = begin_cell() + .store_uint(0x18, 6) + .store_slice(client) + .store_coins(fee::receipt_value) + .store_uint(0, 1 + 4 + 4 + 64 + 32 + 1 + 1) + .store_uint(op::storage_contract_confirmed, 32) + .store_uint(cur_lt(), 64) + .store_uint(torrent_hash, 256) + .end_cell(); + send_raw_message(msg, 0); + } + + if (op == op::close_contract) { + var ds = get_data().begin_parse(); + (int active, int balance, slice provider, slice rest) = + (ds~load_int(1), ds~load_coins(), ds~load_msg_addr(), ds); + var (client, torrent_hash) = get_client_data(rest); + throw_unless(error::unauthorized, equal_slice_bits(sender_address, provider) | equal_slice_bits(sender_address, client)); + var client_msg = begin_cell() + .store_uint(0x18, 6) + .store_slice(client) + .store_coins(balance) + .store_uint(0, 1 + 4 + 4 + 64 + 32 + 1 + 1) + .store_uint(op::storage_contract_terminated, 32) + .store_uint(cur_lt(), 64) + .store_uint(torrent_hash, 256) + .end_cell(); + if(~ active) { + return send_raw_message(client_msg, 128 + 32); + } + send_raw_message(client_msg, 64); + var provider_msg = begin_cell() + .store_uint(0x18, 6) + .store_slice(provider) + .store_coins(0) + .store_uint(0, 1 + 4 + 4 + 64 + 32 + 1 + 1) + .store_uint(op::storage_contract_terminated, 32) + .store_uint(cur_lt(), 64) + .store_uint(torrent_hash, 256) + .end_cell(); + return send_raw_message(provider_msg, 128 + 32); + } + + if (op == op::withdraw) { + var ds = get_data().begin_parse(); + (int active, int balance, slice provider) = (ds~load_int(1), ds~load_coins(), ds~load_msg_addr()); + throw_unless(error::contract_not_active, active); + throw_unless(error::unauthorized, equal_slice_bits(sender_address, provider)); + if(balance > 0) { + raw_reserve(balance + fee::storage, 2); + } + var msg = begin_cell() + .store_uint(0x18, 6) + .store_slice(provider) + .store_coins(fee::receipt_value) + .store_uint(0, 1 + 4 + 4 + 64 + 32 + 1 + 1) + .store_uint(op::reward_withdrawal, 32) + .store_uint(query_id, 64) + .end_cell(); + send_raw_message(msg, 128 + 32); + } + + if (op == op::proof_storage) { + cell file_dict_proof = in_msg_body~load_ref(); + var ds = get_data().begin_parse(); + var (active, + balance, + provider, + merkle_hash, + file_size, + next_proof, + rate_per_mb_day, + max_span, + last_proof_time, + client_data) = (ds~load_int(1), + ds~load_coins(), + ds~load_msg_addr(), + ds~load_uint(256), + ds~load_uint(64), + ds~load_uint(64), + ds~load_coins(), + ds~load_uint(32), + ds~load_uint(32), + ds~load_ref()); + throw_unless(error::contract_not_active, active); + throw_unless(error::unauthorized, equal_slice_bits(sender_address, provider)); + throw_unless(error::wrong_proof, check_proof(merkle_hash, next_proof, file_size, file_dict_proof)); + next_proof = rand(file_size); + int actual_span = min(now() - last_proof_time, max_span); + int bounty = muldiv(file_size * rate_per_mb_day, actual_span, 24 * 60 * 60 * 1024 * 1024); + balance = max(0, balance - bounty); + last_proof_time = now(); + begin_cell() + .store_int(true, 1) + .store_coins(balance) + .store_slice(provider) + .store_uint(merkle_hash, 256) + .store_uint(file_size, 64) + .store_uint(next_proof, 64) + .store_coins(rate_per_mb_day) + .store_uint(max_span, 32) + .store_uint(last_proof_time, 32) + .store_ref(client_data) + .end_cell().set_data(); + + ;; Send remaining balance back + cell msg = begin_cell() + .store_uint(0x18, 6) + .store_slice(sender_address) + .store_uint(0, 4 + 1 + 4 + 4 + 64 + 32 + 1 + 1) + .end_cell(); + send_raw_message(msg, 64 + 2); + } +} + +_ get_storage_contract_data() method_id { + var ds = get_data().begin_parse(); + var (active, + balance, + provider, + merkle_hash, + file_size, + next_proof, + rate_per_mb_day, + max_span, + last_proof_time, + rest) = (ds~load_int(1), + ds~load_coins(), + ds~load_msg_addr(), + ds~load_uint(256), + ds~load_uint(64), + ds~load_uint(64), + ds~load_coins(), + ds~load_uint(32), + ds~load_uint(32), + ds); + var (client, torrent_hash) = get_client_data(rest); + return (active, balance, provider, merkle_hash, file_size, + next_proof, rate_per_mb_day, max_span, last_proof_time, + client, torrent_hash); +} + +_ get_torrent_hash() method_id { + var (active, balance, provider, merkle_hash, file_size, + next_proof, rate_per_mb_day, max_span, last_proof_time, + client, torrent_hash) = get_storage_contract_data(); + return torrent_hash; +} + +_ is_active() method_id { + return get_data().begin_parse().preload_int(1); +} + +;; next_proof, last_proof_time, max_span +_ get_next_proof_info() method_id { + var (active, balance, provider, merkle_hash, file_size, + next_proof, rate_per_mb_day, max_span, last_proof_time, + client, torrent_hash) = get_storage_contract_data(); + return (next_proof, last_proof_time, max_span); +} \ No newline at end of file diff --git a/storage/storage-daemon/smartcont/storage-contract.fif b/storage/storage-daemon/smartcont/storage-contract.fif new file mode 100644 index 00000000..5dd86983 --- /dev/null +++ b/storage/storage-daemon/smartcont/storage-contract.fif @@ -0,0 +1,421 @@ +"Asm.fif" include +// automatically generated from `../../../crypto/smartcont/stdlib.fc` `storage-contract.fc` incl:`constants.fc` +PROGRAM{ + DECLPROC check_proof + DECLPROC add_to_balance + DECLPROC get_client_data + DECLPROC recv_internal + 86593 DECLMETHOD get_storage_contract_data + 71463 DECLMETHOD get_torrent_hash + 122058 DECLMETHOD is_active + 81490 DECLMETHOD get_next_proof_info + check_proof PROC:<{ + // merkle_hash byte_to_proof file_size file_dict_proof + x{D739} s, // merkle_hash byte_to_proof file_size cs special + NOT // merkle_hash byte_to_proof file_size cs _7 + IFJMP:<{ // merkle_hash byte_to_proof file_size cs + 4 BLKDROP // + FALSE // _8 + }> // merkle_hash byte_to_proof file_size cs + 8 LDU // merkle_hash byte_to_proof file_size _9 cs + SWAP // merkle_hash byte_to_proof file_size cs _9 + 3 NEQINT // merkle_hash byte_to_proof file_size cs _13 + IFJMP:<{ // merkle_hash byte_to_proof file_size cs + 4 BLKDROP // + FALSE // _14 + }> // merkle_hash byte_to_proof file_size cs + 256 LDU // merkle_hash byte_to_proof file_size _15 cs + s0 s4 XCHG // cs byte_to_proof file_size _15 merkle_hash + NEQ // cs byte_to_proof file_size _18 + IFJMP:<{ // cs byte_to_proof file_size + 3 BLKDROP // + FALSE // _19 + }> // cs byte_to_proof file_size + s0 s2 XCHG // file_size byte_to_proof cs + LDREF // file_size byte_to_proof _44 _43 + DROP // file_size byte_to_proof file_dict + 0 PUSHINT // file_size byte_to_proof file_dict key_len=0 + WHILE:<{ + 64 PUSHINT // file_size byte_to_proof file_dict key_len _25=64 + OVER // file_size byte_to_proof file_dict key_len _25=64 key_len + LSHIFT // file_size byte_to_proof file_dict key_len _26 + s4 PUSH // file_size byte_to_proof file_dict key_len _26 file_size + LESS // file_size byte_to_proof file_dict key_len _27 + }>DO<{ // file_size byte_to_proof file_dict key_len + INC // file_size byte_to_proof file_dict key_len + }> // file_size byte_to_proof file_dict key_len + s3 POP // key_len byte_to_proof file_dict + SWAP // key_len file_dict byte_to_proof + 6 RSHIFT# // key_len file_dict _33 + s0 s2 XCHG // _33 file_dict key_len + DICTUGET + NULLSWAPIFNOT // _45 _46 + NIP // found? + IFJMP:<{ // + TRUE // _35 + }> // + FALSE // _36 + }> + add_to_balance PROCREF:<{ + // amount + c4 PUSH // amount _2 + CTOS // amount ds + 1 LDI // amount _7 ds + LDGRAMS // amount active balance residue + s0 s3 XCHG // residue active balance amount + ADD // residue active balance + SWAP + NEWC // residue balance active _13 + 1 STI // residue balance _15 + SWAP // residue _15 balance + STGRAMS // residue _16 + SWAP // _16 residue + STSLICER // _17 + ENDC // _18 + c4 POP + }> + get_client_data PROC:<{ + // ds + PLDREF // _1 + CTOS // ds + LDMSGADDR // _3 ds + 256 LDU // _3 _11 _10 + DROP // _3 _5 + }> + recv_internal PROC:<{ + // msg_value in_msg_full in_msg_body + SWAP // msg_value in_msg_body in_msg_full + CTOS // msg_value in_msg_body cs + 4 LDU // msg_value in_msg_body flags cs + SWAP + 1 PUSHINT // msg_value in_msg_body cs flags _9=1 + AND // msg_value in_msg_body cs _10 + IFJMP:<{ // msg_value in_msg_body cs + 3 BLKDROP // + }> // msg_value in_msg_body cs + LDMSGADDR // msg_value in_msg_body _421 _420 + DROP // msg_value in_msg_body sender_address + OVER // msg_value in_msg_body sender_address in_msg_body + SEMPTY // msg_value in_msg_body sender_address _14 + IFJMP:<{ // msg_value in_msg_body sender_address + 2DROP // msg_value + add_to_balance INLINECALLDICT + }> // msg_value in_msg_body sender_address + SWAP // msg_value sender_address in_msg_body + 32 LDU // msg_value sender_address op in_msg_body + OVER // msg_value sender_address op in_msg_body op + 0 EQINT // msg_value sender_address op in_msg_body _21 + IFJMP:<{ // msg_value sender_address op in_msg_body + 3 BLKDROP // msg_value + add_to_balance INLINECALLDICT + }> // msg_value sender_address op in_msg_body + 64 LDU // msg_value sender_address op query_id in_msg_body + s2 PUSH + 276580847 PUSHINT // msg_value sender_address op query_id in_msg_body op _27=276580847 + EQUAL // msg_value sender_address op query_id in_msg_body _28 + IF:<{ // msg_value sender_address op query_id in_msg_body + s0 s4 XCHG + 40000000 PUSHINT // in_msg_body sender_address op query_id msg_value _31 + SUB // in_msg_body sender_address op query_id _32 + add_to_balance INLINECALLDICT + c4 PUSH // in_msg_body sender_address op query_id _36 + CTOS // in_msg_body sender_address op query_id _37 + get_client_data CALLDICT // in_msg_body sender_address op query_id client torrent_hash + 3212562625 PUSHINT // in_msg_body sender_address op query_id client torrent_hash _40=3212562625 + 0 PUSHINT // in_msg_body sender_address op query_id client torrent_hash _40=3212562625 _41=0 + 24 PUSHINT // in_msg_body sender_address op query_id client torrent_hash _40=3212562625 _41=0 _42=24 + NEWC // in_msg_body sender_address op query_id client torrent_hash _40=3212562625 _41=0 _42=24 _43 + 6 STU // in_msg_body sender_address op query_id client torrent_hash _40=3212562625 _41=0 _45 + s0 s4 XCHG2 // in_msg_body sender_address op query_id _41=0 torrent_hash _40=3212562625 _45 client + STSLICER // in_msg_body sender_address op query_id _41=0 torrent_hash _40=3212562625 _46 + 20000000 PUSHINT // in_msg_body sender_address op query_id _41=0 torrent_hash _40=3212562625 _46 _47=20000000 + STGRAMS // in_msg_body sender_address op query_id _41=0 torrent_hash _40=3212562625 _48 + s1 s3 XCHG // in_msg_body sender_address op query_id _40=3212562625 torrent_hash _41=0 _48 + 107 STU // in_msg_body sender_address op query_id _40=3212562625 torrent_hash _62 + s1 s2 XCHG // in_msg_body sender_address op query_id torrent_hash _40=3212562625 _62 + 32 STU // in_msg_body sender_address op query_id torrent_hash _64 + s2 s(-1) PUXC // in_msg_body sender_address op query_id torrent_hash query_id _64 + 64 STU // in_msg_body sender_address op query_id torrent_hash _66 + 256 STU // in_msg_body sender_address op query_id _68 + ENDC // in_msg_body sender_address op query_id msg + 0 PUSHINT // in_msg_body sender_address op query_id msg _70=0 + SENDRAWMSG + }>ELSE<{ + s4 POP // in_msg_body sender_address op query_id + }> + OVER + 2050365064 PUSHINT // in_msg_body sender_address op query_id op _72=2050365064 + EQUAL // in_msg_body sender_address op query_id _73 + IF:<{ // in_msg_body sender_address op query_id + c4 PUSH // in_msg_body sender_address op query_id _75 + CTOS // in_msg_body sender_address op query_id ds + 1 LDI // in_msg_body sender_address op query_id _81 ds + LDGRAMS // in_msg_body sender_address op query_id _81 _84 ds + LDMSGADDR // in_msg_body sender_address op query_id active balance provider rest + s0 s3 XCHG // in_msg_body sender_address op query_id rest balance provider active + NOT // in_msg_body sender_address op query_id rest balance provider _89 + 1007 THROWIFNOT + s5 s0 PUSH2 // in_msg_body sender_address op query_id rest balance provider sender_address provider + SDEQ // in_msg_body sender_address op query_id rest balance provider _92 + 401 THROWIFNOT + TRUE // in_msg_body sender_address op query_id rest balance provider _94 + NEWC // in_msg_body sender_address op query_id rest balance provider _94 _95 + 1 STI // in_msg_body sender_address op query_id rest balance provider _97 + ROT // in_msg_body sender_address op query_id rest provider _97 balance + STGRAMS // in_msg_body sender_address op query_id rest provider _98 + SWAP // in_msg_body sender_address op query_id rest _98 provider + STSLICER // in_msg_body sender_address op query_id rest _99 + OVER // in_msg_body sender_address op query_id rest _99 rest + STSLICER // in_msg_body sender_address op query_id rest _100 + ENDC // in_msg_body sender_address op query_id rest _101 + c4 POP + get_client_data CALLDICT // in_msg_body sender_address op query_id client torrent_hash + LTIME // in_msg_body sender_address op query_id client torrent_hash _107 + 3570068941 PUSHINT // in_msg_body sender_address op query_id client torrent_hash _107 _108=3570068941 + 0 PUSHINT // in_msg_body sender_address op query_id client torrent_hash _107 _108=3570068941 _109=0 + 24 PUSHINT // in_msg_body sender_address op query_id client torrent_hash _107 _108=3570068941 _109=0 _110=24 + NEWC // in_msg_body sender_address op query_id client torrent_hash _107 _108=3570068941 _109=0 _110=24 _111 + 6 STU // in_msg_body sender_address op query_id client torrent_hash _107 _108=3570068941 _109=0 _113 + s0 s5 XCHG2 // in_msg_body sender_address op query_id _109=0 torrent_hash _107 _108=3570068941 _113 client + STSLICER // in_msg_body sender_address op query_id _109=0 torrent_hash _107 _108=3570068941 _114 + 20000000 PUSHINT // in_msg_body sender_address op query_id _109=0 torrent_hash _107 _108=3570068941 _114 _115=20000000 + STGRAMS // in_msg_body sender_address op query_id _109=0 torrent_hash _107 _108=3570068941 _116 + s1 s4 XCHG // in_msg_body sender_address op query_id _108=3570068941 torrent_hash _107 _109=0 _116 + 107 STU // in_msg_body sender_address op query_id _108=3570068941 torrent_hash _107 _130 + s1 s3 XCHG // in_msg_body sender_address op query_id _107 torrent_hash _108=3570068941 _130 + 32 STU // in_msg_body sender_address op query_id _107 torrent_hash _132 + s1 s2 XCHG // in_msg_body sender_address op query_id torrent_hash _107 _132 + 64 STU // in_msg_body sender_address op query_id torrent_hash _134 + 256 STU // in_msg_body sender_address op query_id _136 + ENDC // in_msg_body sender_address op query_id msg + 0 PUSHINT // in_msg_body sender_address op query_id msg _138=0 + SENDRAWMSG + }> // in_msg_body sender_address op query_id + OVER + 2046375914 PUSHINT // in_msg_body sender_address op query_id op _140=2046375914 + EQUAL // in_msg_body sender_address op query_id _141 + IFJMP:<{ // in_msg_body sender_address op query_id + s2 s3 XCHG + 3 BLKDROP // sender_address + c4 PUSH // sender_address _143 + CTOS // sender_address ds + 1 LDI // sender_address _149 ds + LDGRAMS // sender_address _149 _152 ds + LDMSGADDR // sender_address active balance provider rest + get_client_data CALLDICT // sender_address active balance provider client torrent_hash + s5 s2 PUSH2 // sender_address active balance provider client torrent_hash sender_address provider + SDEQ // sender_address active balance provider client torrent_hash _160 + s6 s2 XCPU // _160 active balance provider client torrent_hash sender_address client + SDEQ // _160 active balance provider client torrent_hash _161 + s1 s6 XCHG // torrent_hash active balance provider client _160 _161 + OR // torrent_hash active balance provider client _162 + 401 THROWIFNOT + LTIME // torrent_hash active balance provider client _165 + 3055775075 PUSHINT // torrent_hash active balance provider client _165 _166=3055775075 + 0 PUSHINT // torrent_hash active balance provider client _165 _166=3055775075 _167=0 + 24 PUSHINT // torrent_hash active balance provider client _165 _166=3055775075 _167=0 _168=24 + NEWC // torrent_hash active balance provider client _165 _166=3055775075 _167=0 _168=24 _169 + 6 STU // torrent_hash active balance provider client _165 _166=3055775075 _167=0 _171 + s0 s4 XCHG2 // torrent_hash active balance provider _167=0 _165 _166=3055775075 _171 client + STSLICER // torrent_hash active balance provider _167=0 _165 _166=3055775075 _172 + s0 s5 XCHG2 // torrent_hash active _166=3055775075 provider _167=0 _165 _172 balance + STGRAMS // torrent_hash active _166=3055775075 provider _167=0 _165 _173 + s1 s2 XCHG // torrent_hash active _166=3055775075 provider _165 _167=0 _173 + 107 STU // torrent_hash active _166=3055775075 provider _165 _187 + s1 s3 XCHG // torrent_hash active _165 provider _166=3055775075 _187 + 32 STU // torrent_hash active _165 provider _189 + s1 s2 XCHG // torrent_hash active provider _165 _189 + 64 STU // torrent_hash active provider _191 + s3 s(-1) PUXC // torrent_hash active provider torrent_hash _191 + 256 STU // torrent_hash active provider _193 + ENDC // torrent_hash active provider client_msg + s0 s2 XCHG // torrent_hash client_msg provider active + NOT // torrent_hash client_msg provider _195 + IFJMP:<{ // torrent_hash client_msg provider + DROP + NIP // client_msg + 160 PUSHINT // client_msg _198 + SENDRAWMSG + }> // torrent_hash client_msg provider + SWAP + 64 PUSHINT // torrent_hash provider client_msg _200=64 + SENDRAWMSG + LTIME // torrent_hash provider _203 + 3055775075 PUSHINT // torrent_hash provider _203 _204=3055775075 + 0 PUSHINT // torrent_hash provider _203 _204=3055775075 _205=0 + 24 PUSHINT // torrent_hash provider _203 _204=3055775075 _205=0 _206=24 + NEWC // torrent_hash provider _203 _204=3055775075 _205=0 _206=24 _207 + 6 STU // torrent_hash provider _203 _204=3055775075 _205=0 _209 + s0 s4 XCHG2 // torrent_hash _205=0 _203 _204=3055775075 _209 provider + STSLICER // torrent_hash _205=0 _203 _204=3055775075 _210 + s3 PUSH // torrent_hash _205=0 _203 _204=3055775075 _210 _211=0 + STGRAMS // torrent_hash _205=0 _203 _204=3055775075 _212 + s1 s3 XCHG // torrent_hash _204=3055775075 _203 _205=0 _212 + 107 STU // torrent_hash _204=3055775075 _203 _226 + s1 s2 XCHG // torrent_hash _203 _204=3055775075 _226 + 32 STU // torrent_hash _203 _228 + 64 STU // torrent_hash _230 + 256 STU // _232 + ENDC // provider_msg + 160 PUSHINT // provider_msg _236 + SENDRAWMSG + }> // in_msg_body sender_address op query_id + OVER + 1189949076 PUSHINT // in_msg_body sender_address op query_id op _238=1189949076 + EQUAL // in_msg_body sender_address op query_id _239 + IF:<{ // in_msg_body sender_address op query_id + c4 PUSH // in_msg_body sender_address op query_id _241 + CTOS // in_msg_body sender_address op query_id ds + 1 LDI // in_msg_body sender_address op query_id _246 ds + LDGRAMS // in_msg_body sender_address op query_id _246 _249 ds + LDMSGADDR // in_msg_body sender_address op query_id _246 _249 _449 _448 + DROP // in_msg_body sender_address op query_id active balance provider + s0 s2 XCHG // in_msg_body sender_address op query_id provider balance active + 1003 THROWIFNOT + s4 s1 PUSH2 // in_msg_body sender_address op query_id provider balance sender_address provider + SDEQ // in_msg_body sender_address op query_id provider balance _256 + 401 THROWIFNOT + DUP // in_msg_body sender_address op query_id provider balance balance + 0 GTINT // in_msg_body sender_address op query_id provider balance _259 + IF:<{ // in_msg_body sender_address op query_id provider balance + 10000000 PUSHINT // in_msg_body sender_address op query_id provider balance _260=10000000 + ADD // in_msg_body sender_address op query_id provider _261 + 2 PUSHINT // in_msg_body sender_address op query_id provider _261 _262=2 + RAWRESERVE + }>ELSE<{ + DROP // in_msg_body sender_address op query_id provider + }> + 2837163862 PUSHINT // in_msg_body sender_address op query_id provider _265=2837163862 + 0 PUSHINT // in_msg_body sender_address op query_id provider _265=2837163862 _266=0 + 24 PUSHINT // in_msg_body sender_address op query_id provider _265=2837163862 _266=0 _267=24 + NEWC // in_msg_body sender_address op query_id provider _265=2837163862 _266=0 _267=24 _268 + 6 STU // in_msg_body sender_address op query_id provider _265=2837163862 _266=0 _270 + s0 s3 XCHG2 // in_msg_body sender_address op query_id _266=0 _265=2837163862 _270 provider + STSLICER // in_msg_body sender_address op query_id _266=0 _265=2837163862 _271 + 20000000 PUSHINT // in_msg_body sender_address op query_id _266=0 _265=2837163862 _271 _272=20000000 + STGRAMS // in_msg_body sender_address op query_id _266=0 _265=2837163862 _273 + s1 s2 XCHG // in_msg_body sender_address op query_id _265=2837163862 _266=0 _273 + 107 STU // in_msg_body sender_address op query_id _265=2837163862 _287 + 32 STU // in_msg_body sender_address op query_id _289 + 64 STU // in_msg_body sender_address op _291 + ENDC // in_msg_body sender_address op msg + 160 PUSHINT // in_msg_body sender_address op msg _295 + SENDRAWMSG + }>ELSE<{ + DROP // in_msg_body sender_address op + }> + 1100832077 PUSHINT // in_msg_body sender_address op _297=1100832077 + EQUAL // in_msg_body sender_address _298 + IF:<{ // in_msg_body sender_address + SWAP // sender_address in_msg_body + LDREF // sender_address _451 _450 + DROP // sender_address file_dict_proof + c4 PUSH // sender_address file_dict_proof _303 + CTOS // sender_address file_dict_proof ds + 1 LDI // sender_address file_dict_proof _315 ds + LDGRAMS // sender_address file_dict_proof _315 _318 ds + LDMSGADDR // sender_address file_dict_proof _315 _318 _320 ds + 256 LDU // sender_address file_dict_proof _315 _318 _320 _322 ds + 64 LDU // sender_address file_dict_proof _315 _318 _320 _322 _325 ds + 64 LDU // sender_address file_dict_proof _315 _318 _320 _322 _325 _328 ds + LDGRAMS // sender_address file_dict_proof _315 _318 _320 _322 _325 _328 _331 ds + 32 LDU // sender_address file_dict_proof _315 _318 _320 _322 _325 _328 _331 _333 ds + 32 LDU // sender_address file_dict_proof _315 _318 _320 _322 _325 _328 _331 _333 _336 ds + LDREF // sender_address file_dict_proof _315 _318 _320 _322 _325 _328 _331 _333 _336 _471 _470 + DROP // sender_address file_dict_proof active balance provider merkle_hash file_size next_proof rate_per_mb_day max_span last_proof_time client_data + s0 s9 XCHG // sender_address file_dict_proof client_data balance provider merkle_hash file_size next_proof rate_per_mb_day max_span last_proof_time active + 1003 THROWIFNOT + s10 s6 PUSH2 // sender_address file_dict_proof client_data balance provider merkle_hash file_size next_proof rate_per_mb_day max_span last_proof_time sender_address provider + SDEQ // sender_address file_dict_proof client_data balance provider merkle_hash file_size next_proof rate_per_mb_day max_span last_proof_time _344 + 401 THROWIFNOT + s5 s3 s(-1) PUXC2 + s5 s10 PUXC // sender_address max_span client_data balance provider merkle_hash file_size last_proof_time rate_per_mb_day merkle_hash next_proof file_size file_dict_proof + check_proof CALLDICT // sender_address max_span client_data balance provider merkle_hash file_size last_proof_time rate_per_mb_day _347 + 1002 THROWIFNOT + s2 PUSH // sender_address max_span client_data balance provider merkle_hash file_size last_proof_time rate_per_mb_day file_size + RAND // sender_address max_span client_data balance provider merkle_hash file_size last_proof_time rate_per_mb_day next_proof + NOW // sender_address max_span client_data balance provider merkle_hash file_size last_proof_time rate_per_mb_day next_proof _351 + s0 s3 XCHG2 // sender_address max_span client_data balance provider merkle_hash file_size next_proof rate_per_mb_day _351 last_proof_time + SUB // sender_address max_span client_data balance provider merkle_hash file_size next_proof rate_per_mb_day _352 + s8 PUSH // sender_address max_span client_data balance provider merkle_hash file_size next_proof rate_per_mb_day _352 max_span + MIN // sender_address max_span client_data balance provider merkle_hash file_size next_proof rate_per_mb_day actual_span + s3 s1 PUSH2 // sender_address max_span client_data balance provider merkle_hash file_size next_proof rate_per_mb_day actual_span file_size rate_per_mb_day + MUL // sender_address max_span client_data balance provider merkle_hash file_size next_proof rate_per_mb_day actual_span _355 + SWAP + 90596966400 PUSHINTX // sender_address max_span client_data balance provider merkle_hash file_size next_proof rate_per_mb_day _355 actual_span _364 + MULDIV // sender_address max_span client_data balance provider merkle_hash file_size next_proof rate_per_mb_day bounty + s0 s6 XCHG + 0 PUSHINT + s0 s7 XCHG // sender_address max_span client_data _366=0 provider merkle_hash file_size next_proof rate_per_mb_day balance bounty + SUB // sender_address max_span client_data _366=0 provider merkle_hash file_size next_proof rate_per_mb_day _367 + s1 s6 XCHG // sender_address max_span client_data rate_per_mb_day provider merkle_hash file_size next_proof _366=0 _367 + MAX // sender_address max_span client_data rate_per_mb_day provider merkle_hash file_size next_proof balance + NOW // sender_address max_span client_data rate_per_mb_day provider merkle_hash file_size next_proof balance last_proof_time + TRUE // sender_address max_span client_data rate_per_mb_day provider merkle_hash file_size next_proof balance last_proof_time _370 + NEWC // sender_address max_span client_data rate_per_mb_day provider merkle_hash file_size next_proof balance last_proof_time _370 _371 + 1 STI // sender_address max_span client_data rate_per_mb_day provider merkle_hash file_size next_proof balance last_proof_time _373 + ROT // sender_address max_span client_data rate_per_mb_day provider merkle_hash file_size next_proof last_proof_time _373 balance + STGRAMS // sender_address max_span client_data rate_per_mb_day provider merkle_hash file_size next_proof last_proof_time _374 + s0 s5 XCHG2 // sender_address max_span client_data rate_per_mb_day last_proof_time merkle_hash file_size next_proof _374 provider + STSLICER // sender_address max_span client_data rate_per_mb_day last_proof_time merkle_hash file_size next_proof _375 + s1 s3 XCHG // sender_address max_span client_data rate_per_mb_day last_proof_time next_proof file_size merkle_hash _375 + 256 STU // sender_address max_span client_data rate_per_mb_day last_proof_time next_proof file_size _377 + 64 STU // sender_address max_span client_data rate_per_mb_day last_proof_time next_proof _379 + 64 STU // sender_address max_span client_data rate_per_mb_day last_proof_time _381 + ROT // sender_address max_span client_data last_proof_time _381 rate_per_mb_day + STGRAMS // sender_address max_span client_data last_proof_time _382 + s1 s3 XCHG // sender_address last_proof_time client_data max_span _382 + 32 STU // sender_address last_proof_time client_data _384 + s1 s2 XCHG // sender_address client_data last_proof_time _384 + 32 STU // sender_address client_data _386 + STREF // sender_address _387 + ENDC // sender_address _388 + c4 POP + 0 PUSHINT // sender_address _391=0 + 24 PUSHINT // sender_address _391=0 _392=24 + NEWC // sender_address _391=0 _392=24 _393 + 6 STU // sender_address _391=0 _395 + ROT // _391=0 _395 sender_address + STSLICER // _391=0 _396 + 111 STU // _412 + ENDC // msg + 66 PUSHINT // msg _416 + SENDRAWMSG + }>ELSE<{ + 2DROP // + }> + }> + get_storage_contract_data PROC:<{ + // + c4 PUSH // _1 + CTOS // ds + 1 LDI // _13 ds + LDGRAMS // _13 _16 ds + LDMSGADDR // _13 _16 _18 ds + 256 LDU // _13 _16 _18 _20 ds + 64 LDU // _13 _16 _18 _20 _23 ds + 64 LDU // _13 _16 _18 _20 _23 _26 ds + LDGRAMS // _13 _16 _18 _20 _23 _26 _29 ds + 32 LDU // _13 _16 _18 _20 _23 _26 _29 _31 ds + 32 LDU // active balance provider merkle_hash file_size next_proof rate_per_mb_day max_span last_proof_time rest + get_client_data CALLDICT // active balance provider merkle_hash file_size next_proof rate_per_mb_day max_span last_proof_time client torrent_hash + }> + get_torrent_hash PROC:<{ + // + get_storage_contract_data CALLDICT // _12 _13 _14 _15 _16 _17 _18 _19 _20 _21 _22 + 10 1 BLKDROP2 // torrent_hash + }> + is_active PROC:<{ + // + c4 PUSH // _0 + CTOS // _1 + 1 PLDI // _3 + }> + get_next_proof_info PROC:<{ + // + get_storage_contract_data CALLDICT // _12 _13 _14 _15 _16 _17 _18 _19 _20 _21 _22 + 2DROP + s2 POP + 5 3 BLKDROP2 // next_proof last_proof_time max_span + }> +}END>c diff --git a/storage/storage-daemon/smartcont/storage-provider-code.boc b/storage/storage-daemon/smartcont/storage-provider-code.boc new file mode 100644 index 00000000..e7cee4ff Binary files /dev/null and b/storage/storage-daemon/smartcont/storage-provider-code.boc differ diff --git a/storage/storage-daemon/smartcont/storage-provider.fc b/storage/storage-daemon/smartcont/storage-provider.fc new file mode 100644 index 00000000..17223833 --- /dev/null +++ b/storage/storage-daemon/smartcont/storage-provider.fc @@ -0,0 +1,227 @@ +;; Storage contract fabric + +#include "constants.fc"; + +const min_deploy_amount = 50000000; + +cell storage_contract_code() asm """ "storage-contract-code.boc" file>B B>boc PUSHREF """; + +slice calculate_address_by_stateinit(cell state_init) { + return begin_cell().store_uint(4, 3) + .store_int(0, 8) + .store_uint(cell_hash(state_init), 256) + .end_cell() + .begin_parse(); +} + +cell build_storage_contract_stateinit(int merkle_hash, int file_size, int rate_per_mb_day, + int max_span, slice client, int torrent_hash) { + cell data = begin_cell() + .store_int(0, 1) ;; active + .store_coins(0) ;; client balance + .store_slice(my_address()) + .store_uint(merkle_hash, 256) + .store_uint(file_size, 64) + .store_uint(0, 64) ;; next_proof + .store_coins(rate_per_mb_day) + .store_uint(max_span, 32) + .store_uint(now(), 32) ;; last_proof_time + .store_ref(begin_cell() + .store_slice(client) + .store_uint(torrent_hash, 256) + .end_cell()) + .end_cell(); + + cell state_init = begin_cell() + .store_uint(0, 2) + .store_maybe_ref(storage_contract_code()) + .store_maybe_ref(data) + .store_uint(0, 1) .end_cell(); + return state_init; +} + +() deploy_storage_contract (slice client, int query_id, int file_size, int merkle_hash, int torrent_hash, + int expected_rate, int expected_max_span) impure { + var ds = get_data().begin_parse(); + var (wallet_data, + accept_new_contracts?, + rate_per_mb_day, + max_span, + minimal_file_size, + maximal_file_size) = (ds~load_bits(32 + 32 + 256), + ds~load_int(1), + ds~load_coins(), + ds~load_uint(32), + ds~load_uint(64), + ds~load_uint(64)); + throw_unless(error::no_new_contracts, accept_new_contracts?); + throw_unless(error::file_too_small, file_size >= minimal_file_size); + throw_unless(error::file_too_big, file_size <= maximal_file_size); + throw_unless(error::provider_params_changed, expected_rate == rate_per_mb_day); + throw_unless(error::provider_params_changed, expected_max_span == max_span); + cell state_init = build_storage_contract_stateinit(merkle_hash, file_size, rate_per_mb_day, + max_span, client, torrent_hash); + cell msg = begin_cell() + .store_uint(0x18, 6) + .store_slice(calculate_address_by_stateinit(state_init)) + .store_coins(0) + .store_uint(4 + 2, 1 + 4 + 4 + 64 + 32 + 1 + 1 + 1) + .store_ref(state_init) + .store_uint(op::offer_storage_contract, 32) + .store_uint(query_id, 64) + .end_cell(); + send_raw_message(msg, 64); +} + +() recv_internal(int msg_value, cell in_msg_full, slice in_msg_body) impure { + slice cs = in_msg_full.begin_parse(); + int flags = cs~load_uint(4); + + if ((flags & 1) | in_msg_body.slice_empty?()) { ;; ignore all bounced and empty messages + return (); + } + slice sender_address = cs~load_msg_addr(); + + int op = in_msg_body~load_uint(32); + if (op == 0) { ;; transfer with text message + return (); + } + int query_id = in_msg_body~load_uint(64); + + if(op == op::offer_storage_contract) { + throw_unless(error::not_enough_money, msg_value >= min_deploy_amount); + ;; torrent_info piece_size:uint32 file_size:uint64 root_hash:(## 256) header_size:uint64 header_hash:(## 256) + ;; microchunk_hash:(Maybe (## 256)) description:Text = TorrentInfo; + ;; + ;; new_storage_contract#00000001 query_id:uint64 info:(^ TorrentInfo) microchunk_hash:uint256 + ;; expected_rate:Coins expected_max_span:uint32 = NewStorageContract; + cell torrent_info = in_msg_body~load_ref(); + int torrent_hash = cell_hash(torrent_info); + slice info_cs = torrent_info.begin_parse(); + info_cs~skip_bits(32); + int file_size = info_cs~load_uint(64); + int merkle_hash = in_msg_body~load_uint(256); + + int expected_rate = in_msg_body~load_coins(); + int expected_max_span = in_msg_body~load_uint(32); + deploy_storage_contract(sender_address, query_id, file_size, merkle_hash, torrent_hash, + expected_rate, expected_max_span); + return (); + } + if(op == op::storage_contract_terminated) { + return (); + } + + if(op == op::update_pubkey) { + if(~ equal_slice_bits(my_address(), sender_address)) { + return (); + } + var ds = get_data().begin_parse(); + var (seqno_subwallet, + _, + non_wallet_data) = (ds~load_bits(32 + 32), + ds~load_uint(256), + ds); + int new_pubkey = in_msg_body~load_uint(256); + set_data(begin_cell() + .store_slice(seqno_subwallet) + .store_uint(new_pubkey, 256) + .store_slice(non_wallet_data) + .end_cell()); + } + if(op == op::update_storage_params) { + if(~ equal_slice_bits(my_address(), sender_address)) { + return (); + } + var ds = get_data().begin_parse(); + var wallet_data = ds~load_bits(32 + 32 + 256); + var(accept_new_contracts?, + rate_per_mb_day, + max_span, + minimal_file_size, + maximal_file_size) = (in_msg_body~load_int(1), + in_msg_body~load_coins(), + in_msg_body~load_uint(32), + in_msg_body~load_uint(64), + in_msg_body~load_uint(64)); + set_data(begin_cell() + .store_slice(wallet_data) + .store_int(accept_new_contracts?, 1) + .store_coins(rate_per_mb_day) + .store_uint(max_span, 32) + .store_uint(minimal_file_size, 64) + .store_uint(maximal_file_size, 64) + .end_cell()); + } +} + +() recv_external(slice in_msg) impure { + var signature = in_msg~load_bits(512); + var cs = in_msg; + var (subwallet_id, valid_until, msg_seqno) = (cs~load_uint(32), cs~load_uint(32), cs~load_uint(32)); + throw_if(35, valid_until <= now()); + var ds = get_data().begin_parse(); + var (stored_seqno, + stored_subwallet, + public_key, + non_wallet_data) = (ds~load_uint(32), + ds~load_uint(32), + ds~load_uint(256), + ds); + throw_unless(33, msg_seqno == stored_seqno); + throw_unless(34, subwallet_id == stored_subwallet); + throw_unless(35, check_signature(slice_hash(in_msg), signature, public_key)); + accept_message(); + cs~touch(); + while (cs.slice_refs()) { + var mode = cs~load_uint(8); + send_raw_message(cs~load_ref(), mode); + } + set_data(begin_cell() + .store_uint(stored_seqno + 1, 32) + .store_uint(stored_subwallet, 32) + .store_uint(public_key, 256) + .store_slice(non_wallet_data) + .end_cell()); +} + +;; Get methods + +int seqno() method_id { + return get_data().begin_parse().preload_uint(32); +} + +int get_public_key() method_id { + var cs = get_data().begin_parse(); + cs~load_uint(64); + return cs.preload_uint(256); +} + +;; seqno, subwallet, key +_ get_wallet_params() method_id { + var ds = get_data().begin_parse(); + var (stored_seqno, stored_subwallet, public_key) = (ds~load_uint(32), ds~load_uint(32), ds~load_uint(256)); + return (stored_seqno, stored_subwallet, public_key); +} + +_ get_storage_params() method_id { + var ds = get_data().begin_parse(); + var (wallet_data, + accept_new_contracts?, + rate_per_mb_day, + max_span, + minimal_file_size, + maximal_file_size) = (ds~load_bits(32 + 32 + 256), + ds~load_int(1), + ds~load_coins(), + ds~load_uint(32), + ds~load_uint(64), + ds~load_uint(64)); + return (accept_new_contracts?, rate_per_mb_day, max_span, minimal_file_size, maximal_file_size); +} + +slice get_storage_contract_address(int merkle_hash, int file_size, slice client, int torrent_hash) method_id { + var (_, rate_per_mb_day, max_span, _, _) = get_storage_params(); + cell state_init = build_storage_contract_stateinit(merkle_hash, file_size, rate_per_mb_day, max_span, client, torrent_hash); + return calculate_address_by_stateinit(state_init); +} diff --git a/storage/storage-daemon/smartcont/storage-provider.fif b/storage/storage-daemon/smartcont/storage-provider.fif new file mode 100644 index 00000000..28dddc7c --- /dev/null +++ b/storage/storage-daemon/smartcont/storage-provider.fif @@ -0,0 +1,340 @@ +"Asm.fif" include +// automatically generated from `../../../crypto/smartcont/stdlib.fc` `storage-provider.fc` incl:`constants.fc` +PROGRAM{ + DECLPROC calculate_address_by_stateinit + DECLPROC build_storage_contract_stateinit + DECLPROC deploy_storage_contract + DECLPROC recv_internal + DECLPROC recv_external + 85143 DECLMETHOD seqno + 78748 DECLMETHOD get_public_key + 130271 DECLMETHOD get_wallet_params + 104346 DECLMETHOD get_storage_params + 119729 DECLMETHOD get_storage_contract_address + calculate_address_by_stateinit PROC:<{ + // state_init + HASHCU // _1 + 0 PUSHINT // _1 _2=0 + 4 PUSHINT // _1 _2=0 _3=4 + NEWC // _1 _2=0 _3=4 _4 + 3 STU // _1 _2=0 _6 + 8 STI // _1 _8 + 256 STU // _10 + ENDC // _11 + CTOS // _12 + }> + build_storage_contract_stateinit PROC:<{ + // merkle_hash file_size rate_per_mb_day max_span client torrent_hash + NEWC + ROT // merkle_hash file_size rate_per_mb_day max_span torrent_hash _7 client + STSLICER // merkle_hash file_size rate_per_mb_day max_span torrent_hash _8 + 256 STU // merkle_hash file_size rate_per_mb_day max_span _10 + ENDC // merkle_hash file_size rate_per_mb_day max_span _11 + NOW // merkle_hash file_size rate_per_mb_day max_span _11 _12 + 0 PUSHINT // merkle_hash file_size rate_per_mb_day max_span _11 _12 _13=0 + DUP // merkle_hash file_size rate_per_mb_day max_span _11 _12 _13=0 _14=0 + NEWC // merkle_hash file_size rate_per_mb_day max_span _11 _12 _13=0 _14=0 _15 + 1 STI // merkle_hash file_size rate_per_mb_day max_span _11 _12 _13=0 _17 + OVER // merkle_hash file_size rate_per_mb_day max_span _11 _12 _13=0 _17 _18=0 + STGRAMS // merkle_hash file_size rate_per_mb_day max_span _11 _12 _13=0 _19 + MYADDR // merkle_hash file_size rate_per_mb_day max_span _11 _12 _13=0 _19 _20 + STSLICER // merkle_hash file_size rate_per_mb_day max_span _11 _12 _13=0 _21 + s1 s7 XCHG // _13=0 file_size rate_per_mb_day max_span _11 _12 merkle_hash _21 + 256 STU // _13=0 file_size rate_per_mb_day max_span _11 _12 _23 + s1 s5 XCHG // _13=0 _12 rate_per_mb_day max_span _11 file_size _23 + 64 STU // _13=0 _12 rate_per_mb_day max_span _11 _25 + s1 s5 XCHG // _11 _12 rate_per_mb_day max_span _13=0 _25 + 64 STU // _11 _12 rate_per_mb_day max_span _27 + ROT // _11 _12 max_span _27 rate_per_mb_day + STGRAMS // _11 _12 max_span _28 + 32 STU // _11 _12 _30 + 32 STU // _11 _32 + STREF // _33 + ENDC // data + 0 PUSHINT // data _36=0 + "storage-contract-code.boc" file>B B>boc PUSHREF // data _36=0 _37 + OVER // data _36=0 _37 _38=0 + NEWC // data _36=0 _37 _38=0 _39 + 2 STU // data _36=0 _37 _41 + STOPTREF // data _36=0 _42 + s1 s2 XCHG // _36=0 data _42 + STOPTREF // _36=0 _43 + 1 STU // _45 + ENDC // state_init + }> + deploy_storage_contract PROC:<{ + // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span + c4 PUSH // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span _8 + CTOS // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span ds + 320 PUSHINT // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span ds _21 + LDSLICEX // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span _91 _90 + NIP // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span ds + 1 LDI // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span _23 ds + LDGRAMS // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span _23 _26 ds + 32 LDU // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span _23 _26 _28 ds + 64 LDU // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span _23 _26 _28 _31 ds + 64 LDU // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span _23 _26 _28 _31 _101 _100 + DROP // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span accept_new_contracts? rate_per_mb_day max_span minimal_file_size maximal_file_size + s0 s4 XCHG // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span maximal_file_size rate_per_mb_day max_span minimal_file_size accept_new_contracts? + 1006 THROWIFNOT + s8 s(-1) PUXC // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span maximal_file_size rate_per_mb_day max_span file_size minimal_file_size + GEQ // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span maximal_file_size rate_per_mb_day max_span _40 + 1004 THROWIFNOT + s7 s2 PUXC // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span max_span rate_per_mb_day file_size maximal_file_size + LEQ // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span max_span rate_per_mb_day _43 + 1005 THROWIFNOT + s3 s3 XCPU // client query_id file_size merkle_hash torrent_hash rate_per_mb_day expected_max_span max_span expected_rate rate_per_mb_day + EQUAL // client query_id file_size merkle_hash torrent_hash rate_per_mb_day expected_max_span max_span _46 + 1009 THROWIFNOT + TUCK // client query_id file_size merkle_hash torrent_hash rate_per_mb_day max_span expected_max_span max_span + EQUAL // client query_id file_size merkle_hash torrent_hash rate_per_mb_day max_span _49 + 1009 THROWIFNOT + 2SWAP + s1 s5 XCHG + s1 s6 XCHG // query_id merkle_hash file_size rate_per_mb_day max_span client torrent_hash + build_storage_contract_stateinit CALLDICT // query_id state_init + 276580847 PUSHINT // query_id state_init _54=276580847 + 6 PUSHINT // query_id state_init _54=276580847 _57 + 24 PUSHINT // query_id state_init _54=276580847 _57 _58=24 + NEWC // query_id state_init _54=276580847 _57 _58=24 _59 + 6 STU // query_id state_init _54=276580847 _57 _61 + s3 PUSH // query_id state_init _54=276580847 _57 _61 state_init + calculate_address_by_stateinit CALLDICT // query_id state_init _54=276580847 _57 _61 _62 + STSLICER // query_id state_init _54=276580847 _57 _63 + 0 PUSHINT // query_id state_init _54=276580847 _57 _63 _64=0 + STGRAMS // query_id state_init _54=276580847 _57 _65 + 108 STU // query_id state_init _54=276580847 _81 + s1 s2 XCHG // query_id _54=276580847 state_init _81 + STREF // query_id _54=276580847 _82 + 32 STU // query_id _84 + 64 STU // _86 + ENDC // msg + 64 PUSHINT // msg _88=64 + SENDRAWMSG + }> + recv_internal PROC:<{ + SAMEALTSAVE // msg_value in_msg_full in_msg_body + SWAP // msg_value in_msg_body in_msg_full + CTOS // msg_value in_msg_body cs + 4 LDU // msg_value in_msg_body flags cs + SWAP + 1 PUSHINT // msg_value in_msg_body cs flags _9=1 + AND // msg_value in_msg_body cs _10 + s2 PUSH // msg_value in_msg_body cs _10 in_msg_body + SEMPTY // msg_value in_msg_body cs _10 _11 + OR // msg_value in_msg_body cs _12 + IFJMP:<{ // msg_value in_msg_body cs + 3 BLKDROP // + }> // msg_value in_msg_body cs + LDMSGADDR // msg_value in_msg_body _141 _140 + DROP // msg_value in_msg_body sender_address + SWAP // msg_value sender_address in_msg_body + 32 LDU // msg_value sender_address op in_msg_body + OVER // msg_value sender_address op in_msg_body op + 0 EQINT // msg_value sender_address op in_msg_body _21 + IFJMP:<{ // msg_value sender_address op in_msg_body + 4 BLKDROP // + }> // msg_value sender_address op in_msg_body + 64 LDU // msg_value sender_address op query_id in_msg_body + s2 PUSH + 276580847 PUSHINT // msg_value sender_address op query_id in_msg_body op _26=276580847 + EQUAL // msg_value sender_address op query_id in_msg_body _27 + IFJMP:<{ // msg_value sender_address op query_id in_msg_body + s2 POP // msg_value sender_address in_msg_body query_id + s0 s3 XCHG + 50000000 PUSHINT // query_id sender_address in_msg_body msg_value _29=50000000 + GEQ // query_id sender_address in_msg_body _30 + 1001 THROWIFNOT + LDREF // query_id sender_address torrent_info in_msg_body + OVER // query_id sender_address torrent_info in_msg_body torrent_info + HASHCU // query_id sender_address torrent_info in_msg_body torrent_hash + s0 s2 XCHG // query_id sender_address torrent_hash in_msg_body torrent_info + CTOS // query_id sender_address torrent_hash in_msg_body info_cs + 32 PUSHINT // query_id sender_address torrent_hash in_msg_body info_cs _40=32 + SDSKIPFIRST // query_id sender_address torrent_hash in_msg_body info_cs + 64 LDU // query_id sender_address torrent_hash in_msg_body _149 _148 + DROP // query_id sender_address torrent_hash in_msg_body file_size + SWAP // query_id sender_address torrent_hash file_size in_msg_body + 256 LDU // query_id sender_address torrent_hash file_size merkle_hash in_msg_body + LDGRAMS // query_id sender_address torrent_hash file_size merkle_hash expected_rate in_msg_body + 32 LDU // query_id sender_address torrent_hash file_size merkle_hash expected_rate _155 _154 + DROP // query_id sender_address torrent_hash file_size merkle_hash expected_rate expected_max_span + s5 s6 XCHG + s3 s4 XCHG + s2 s3 XCHG // sender_address query_id file_size merkle_hash torrent_hash expected_rate expected_max_span + deploy_storage_contract CALLDICT + }> // msg_value sender_address op query_id in_msg_body + NIP + s3 POP // in_msg_body sender_address op + DUP + 3055775075 PUSHINT // in_msg_body sender_address op op _58=3055775075 + EQUAL // in_msg_body sender_address op _59 + IFJMP:<{ // in_msg_body sender_address op + 3 BLKDROP // + }> // in_msg_body sender_address op + DUP + 1408453846 PUSHINT // in_msg_body sender_address op op _60=1408453846 + EQUAL // in_msg_body sender_address op _61 + IF:<{ // in_msg_body sender_address op + MYADDR // in_msg_body sender_address op _62 + s2 PUSH // in_msg_body sender_address op _62 sender_address + SDEQ // in_msg_body sender_address op _63 + NOT // in_msg_body sender_address op _64 + IFJMP:<{ // in_msg_body sender_address op + 3 BLKDROP // + RETALT + }> // in_msg_body sender_address op + c4 PUSH // in_msg_body sender_address op _66 + CTOS // in_msg_body sender_address op ds + 64 LDSLICE // in_msg_body sender_address op _71 ds + 256 LDU // in_msg_body sender_address op _71 _159 _158 + NIP // in_msg_body sender_address op seqno_subwallet non_wallet_data + s0 s4 XCHG // non_wallet_data sender_address op seqno_subwallet in_msg_body + 256 LDU // non_wallet_data sender_address op seqno_subwallet new_pubkey in_msg_body + NEWC // non_wallet_data sender_address op seqno_subwallet new_pubkey in_msg_body _83 + s0 s3 XCHG2 // non_wallet_data sender_address op in_msg_body new_pubkey _83 seqno_subwallet + STSLICER // non_wallet_data sender_address op in_msg_body new_pubkey _84 + 256 STU // non_wallet_data sender_address op in_msg_body _86 + s0 s4 XCHG2 // in_msg_body sender_address op _86 non_wallet_data + STSLICER // in_msg_body sender_address op _87 + ENDC // in_msg_body sender_address op _88 + c4 POP + }> // in_msg_body sender_address op + 1422651803 PUSHINT // in_msg_body sender_address op _90=1422651803 + EQUAL // in_msg_body sender_address _91 + IF:<{ // in_msg_body sender_address + MYADDR // in_msg_body sender_address _92 + SWAP // in_msg_body _92 sender_address + SDEQ // in_msg_body _93 + NOT // in_msg_body _94 + IFJMP:<{ // in_msg_body + DROP // + RETALT + }> // in_msg_body + c4 PUSH // in_msg_body _96 + CTOS // in_msg_body ds + 320 PUSHINT // in_msg_body ds _104 + LDSLICEX // in_msg_body _163 _162 + DROP // in_msg_body wallet_data + SWAP // wallet_data in_msg_body + 1 LDI // wallet_data _111 in_msg_body + LDGRAMS // wallet_data _111 _114 in_msg_body + 32 LDU // wallet_data _111 _114 _116 in_msg_body + 64 LDU // wallet_data _111 _114 _116 _119 in_msg_body + 64 LDU // wallet_data _111 _114 _116 _119 _173 _172 + DROP // wallet_data accept_new_contracts? rate_per_mb_day max_span minimal_file_size maximal_file_size + NEWC // wallet_data accept_new_contracts? rate_per_mb_day max_span minimal_file_size maximal_file_size _125 + s0 s6 XCHG2 // maximal_file_size accept_new_contracts? rate_per_mb_day max_span minimal_file_size _125 wallet_data + STSLICER // maximal_file_size accept_new_contracts? rate_per_mb_day max_span minimal_file_size _126 + s1 s4 XCHG // maximal_file_size minimal_file_size rate_per_mb_day max_span accept_new_contracts? _126 + 1 STI // maximal_file_size minimal_file_size rate_per_mb_day max_span _128 + ROT // maximal_file_size minimal_file_size max_span _128 rate_per_mb_day + STGRAMS // maximal_file_size minimal_file_size max_span _129 + 32 STU // maximal_file_size minimal_file_size _131 + 64 STU // maximal_file_size _133 + 64 STU // _135 + ENDC // _136 + c4 POP + }>ELSE<{ + 2DROP // + }> + }> + recv_external PROC:<{ + // in_msg + 9 PUSHPOW2 // in_msg _3=512 + LDSLICEX // signature in_msg + DUP // signature in_msg cs + 32 LDU // signature in_msg _9 cs + 32 LDU // signature in_msg _9 _12 cs + 32 LDU // signature in_msg subwallet_id valid_until msg_seqno cs + s0 s2 XCHG + NOW // signature in_msg subwallet_id cs msg_seqno valid_until _19 + LEQ // signature in_msg subwallet_id cs msg_seqno _20 + 35 THROWIF + c4 PUSH // signature in_msg subwallet_id cs msg_seqno _23 + CTOS // signature in_msg subwallet_id cs msg_seqno ds + 32 LDU // signature in_msg subwallet_id cs msg_seqno _29 ds + 32 LDU // signature in_msg subwallet_id cs msg_seqno _29 _32 ds + 256 LDU // signature in_msg subwallet_id cs msg_seqno stored_seqno stored_subwallet public_key non_wallet_data + s4 s3 XCPU // signature in_msg subwallet_id cs non_wallet_data stored_seqno stored_subwallet public_key msg_seqno stored_seqno + EQUAL // signature in_msg subwallet_id cs non_wallet_data stored_seqno stored_subwallet public_key _39 + 33 THROWIFNOT + s5 s1 XCPU // signature in_msg public_key cs non_wallet_data stored_seqno stored_subwallet subwallet_id stored_subwallet + EQUAL // signature in_msg public_key cs non_wallet_data stored_seqno stored_subwallet _42 + 34 THROWIFNOT + s0 s5 XCHG // signature stored_subwallet public_key cs non_wallet_data stored_seqno in_msg + HASHSU // signature stored_subwallet public_key cs non_wallet_data stored_seqno _45 + s0 s6 s4 XC2PU // stored_seqno stored_subwallet public_key cs non_wallet_data _45 signature public_key + CHKSIGNU // stored_seqno stored_subwallet public_key cs non_wallet_data _46 + 35 THROWIFNOT + ACCEPT + SWAP // stored_seqno stored_subwallet public_key non_wallet_data cs + WHILE:<{ + DUP // stored_seqno stored_subwallet public_key non_wallet_data cs cs + SREFS // stored_seqno stored_subwallet public_key non_wallet_data cs _51 + }>DO<{ // stored_seqno stored_subwallet public_key non_wallet_data cs + 8 LDU // stored_seqno stored_subwallet public_key non_wallet_data mode cs + LDREF // stored_seqno stored_subwallet public_key non_wallet_data mode _56 cs + s0 s2 XCHG // stored_seqno stored_subwallet public_key non_wallet_data cs _56 mode + SENDRAWMSG + }> // stored_seqno stored_subwallet public_key non_wallet_data cs + DROP // stored_seqno stored_subwallet public_key non_wallet_data + s0 s3 XCHG // non_wallet_data stored_subwallet public_key stored_seqno + INC // non_wallet_data stored_subwallet public_key _60 + NEWC // non_wallet_data stored_subwallet public_key _60 _61 + 32 STU // non_wallet_data stored_subwallet public_key _63 + s1 s2 XCHG // non_wallet_data public_key stored_subwallet _63 + 32 STU // non_wallet_data public_key _65 + 256 STU // non_wallet_data _67 + SWAP // _67 non_wallet_data + STSLICER // _68 + ENDC // _69 + c4 POP + }> + seqno PROC:<{ + // + c4 PUSH // _0 + CTOS // _1 + 32 PLDU // _3 + }> + get_public_key PROC:<{ + // + c4 PUSH // _1 + CTOS // cs + 64 LDU // _9 _8 + NIP // cs + 256 PLDU // _7 + }> + get_wallet_params PROC:<{ + // + c4 PUSH // _1 + CTOS // ds + 32 LDU // _6 ds + 32 LDU // _6 _9 ds + 256 LDU // _6 _9 _20 _19 + DROP // stored_seqno stored_subwallet public_key + }> + get_storage_params PROC:<{ + // + c4 PUSH // _1 + CTOS // ds + 320 PUSHINT // ds _14 + LDSLICEX // _31 _30 + NIP // ds + 1 LDI // _16 ds + LDGRAMS // _16 _19 ds + 32 LDU // _16 _19 _21 ds + 64 LDU // _16 _19 _21 _24 ds + 64 LDU // _16 _19 _21 _24 _41 _40 + DROP // accept_new_contracts? rate_per_mb_day max_span minimal_file_size maximal_file_size + }> + get_storage_contract_address PROC:<{ + // merkle_hash file_size client torrent_hash + get_storage_params CALLDICT // merkle_hash file_size client torrent_hash _13 _14 _15 _16 _17 + 2DROP + s2 POP // merkle_hash file_size client torrent_hash max_span rate_per_mb_day + s1 s3 s3 XCHG3 // merkle_hash file_size rate_per_mb_day max_span client torrent_hash + build_storage_contract_stateinit CALLDICT // state_init + calculate_address_by_stateinit CALLDICT // _12 + }> +}END>c diff --git a/storage/storage-daemon/smc-util.cpp b/storage/storage-daemon/smc-util.cpp new file mode 100644 index 00000000..32ba6227 --- /dev/null +++ b/storage/storage-daemon/smc-util.cpp @@ -0,0 +1,487 @@ +/* + This file is part of TON Blockchain Library. + + TON Blockchain Library is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + TON Blockchain Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with TON Blockchain Library. If not, see . +*/ + +#include "smc-util.h" +#include "td/utils/filesystem.h" +#include "keys/encryptor.h" +#include "smartcont/provider-code.h" + +static void smc_forget(td::actor::ActorId client, td::int64 id) { + auto query = create_tl_object(id); + td::actor::send_closure(client, &tonlib::TonlibClientWrapper::send_request, std::move(query), + [](td::Result> R) mutable { + if (R.is_error()) { + LOG(WARNING) << "smc_forget failed: " << R.move_as_error(); + } + }); +} + +void run_get_method(ContractAddress address, td::actor::ActorId client, std::string method, + std::vector> args, + td::Promise>> promise) { + LOG(DEBUG) << "Running get method " << method << " on " << address.to_string(); + auto query = + create_tl_object(create_tl_object(address.to_string())); + td::actor::send_closure( + client, &tonlib::TonlibClientWrapper::send_request, std::move(query), + [client, method = std::move(method), args = std::move(args), + promise = std::move(promise)](td::Result> R) mutable { + TRY_RESULT_PROMISE(promise, obj, std::move(R)); + auto query = create_tl_object( + obj->id_, create_tl_object(std::move(method)), std::move(args)); + td::actor::send_closure( + client, &tonlib::TonlibClientWrapper::send_request, std::move(query), + [client, id = obj->id_, + promise = std::move(promise)](td::Result> R) mutable { + smc_forget(client, id); + TRY_RESULT_PROMISE(promise, obj, std::move(R)); + if (obj->exit_code_ != 0 && obj->exit_code_ != 1) { + promise.set_error( + td::Status::Error(PSTRING() << "Method execution finished with code " << obj->exit_code_)); + return; + } + promise.set_result(std::move(obj->stack_)); + }); + }); +} + +void check_contract_exists(ContractAddress address, td::actor::ActorId client, + td::Promise promise) { + auto query = + create_tl_object(create_tl_object(address.to_string())); + td::actor::send_closure( + client, &tonlib::TonlibClientWrapper::send_request, std::move(query), + [client, promise = std::move(promise)](td::Result> R) mutable { + TRY_RESULT_PROMISE(promise, obj, std::move(R)); + auto query = create_tl_object(obj->id_); + td::actor::send_closure( + client, &tonlib::TonlibClientWrapper::send_request, std::move(query), + [client, id = obj->id_, + promise = std::move(promise)](td::Result> R) mutable { + smc_forget(client, id); + TRY_RESULT_PROMISE(promise, r, std::move(R)); + promise.set_result(!r->bytes_.empty()); + }); + }); +} + +void get_contract_balance(ContractAddress address, td::actor::ActorId client, + td::Promise promise) { + auto query = + create_tl_object(create_tl_object(address.to_string())); + td::actor::send_closure( + client, &tonlib::TonlibClientWrapper::send_request, std::move(query), + promise.wrap([](tonlib_api::object_ptr r) -> td::Result { + return td::make_refint(r->balance_); + })); +} + +FabricContractWrapper::FabricContractWrapper(ContractAddress address, + td::actor::ActorId client, + td::actor::ActorId keyring, + td::unique_ptr callback, td::uint64 last_processed_lt) + : address_(address) + , client_(std::move(client)) + , keyring_(std::move(keyring)) + , callback_(std::move(callback)) + , last_processed_lt_(last_processed_lt) { +} + +void FabricContractWrapper::start_up() { + alarm(); +} + +void FabricContractWrapper::alarm() { + if (process_transactions_at_ && process_transactions_at_.is_in_past()) { + process_transactions_at_ = td::Timestamp::never(); + load_transactions(); + } + alarm_timestamp().relax(process_transactions_at_); + if (send_message_at_ && send_message_at_.is_in_past()) { + send_message_at_ = td::Timestamp::never(); + do_send_external_message(); + } + alarm_timestamp().relax(send_message_at_); +} + +void FabricContractWrapper::load_transactions() { + LOG(DEBUG) << "Loading transactions for " << address_.to_string() << ", last_lt=" << last_processed_lt_; + auto query = + create_tl_object(create_tl_object(address_.to_string())); + td::actor::send_closure( + client_, &tonlib::TonlibClientWrapper::send_request, std::move(query), + [SelfId = actor_id(this)](td::Result> R) mutable { + if (R.is_error()) { + td::actor::send_closure(SelfId, &FabricContractWrapper::loaded_last_transactions, R.move_as_error()); + return; + } + auto obj = R.move_as_ok(); + td::actor::send_closure(SelfId, &FabricContractWrapper::load_last_transactions, + std::vector>(), + std::move(obj->last_transaction_id_), (td::uint32)obj->sync_utime_); + }); +} + +void FabricContractWrapper::load_last_transactions(std::vector> transactions, + tl_object_ptr next_id, + td::uint32 utime) { + if ((td::uint64)next_id->lt_ <= last_processed_lt_) { + loaded_last_transactions(std::make_pair(std::move(transactions), utime)); + return; + } + auto query = create_tl_object( + nullptr, create_tl_object(address_.to_string()), std::move(next_id), 10, false); + td::actor::send_closure( + client_, &tonlib::TonlibClientWrapper::send_request, std::move(query), + [transactions = std::move(transactions), last_processed_lt = last_processed_lt_, SelfId = actor_id(this), + utime](td::Result> R) mutable { + if (R.is_error()) { + td::actor::send_closure(SelfId, &FabricContractWrapper::loaded_last_transactions, R.move_as_error()); + return; + } + auto obj = R.move_as_ok(); + for (auto& transaction : obj->transactions_) { + if ((td::uint64)transaction->transaction_id_->lt_ <= last_processed_lt || + (double)transaction->utime_ < td::Clocks::system() - 86400 || transactions.size() >= 1000) { + LOG(DEBUG) << "Stopping loading transactions (too many or too old)"; + td::actor::send_closure(SelfId, &FabricContractWrapper::loaded_last_transactions, + std::make_pair(std::move(transactions), utime)); + return; + } + LOG(DEBUG) << "Adding trtansaction, lt=" << transaction->transaction_id_->lt_; + transactions.push_back(std::move(transaction)); + } + td::actor::send_closure(SelfId, &FabricContractWrapper::load_last_transactions, std::move(transactions), + std::move(obj->previous_transaction_id_), utime); + }); +} + +void FabricContractWrapper::loaded_last_transactions( + td::Result>, td::uint32>> R) { + if (R.is_error()) { + LOG(ERROR) << "Error during loading last transactions: " << R.move_as_error(); + alarm_timestamp().relax(process_transactions_at_ = td::Timestamp::in(30.0)); + return; + } + auto r = R.move_as_ok(); + auto transactions = std::move(r.first); + td::uint32 utime = r.second; + LOG(DEBUG) << "Finished loading " << transactions.size() << " transactions. sync_utime=" << utime; + std::reverse(transactions.begin(), transactions.end()); + for (tl_object_ptr& transaction : transactions) { + LOG(DEBUG) << "Processing transaction tl=" << transaction->transaction_id_->lt_; + last_processed_lt_ = transaction->transaction_id_->lt_; + // transaction->in_msg_->source_->account_address_.empty() - message is external + if (current_ext_message_ && current_ext_message_.value().sent && + transaction->in_msg_->source_->account_address_.empty()) { + auto msg_data = dynamic_cast(transaction->in_msg_->msg_data_.get()); + if (msg_data == nullptr) { + continue; + } + auto r_body = vm::std_boc_deserialize(msg_data->body_); + if (r_body.is_error()) { + LOG(WARNING) << "Invalid response from tonlib: " << r_body.move_as_error(); + continue; + } + td::Ref body = r_body.move_as_ok(); + vm::CellSlice cs(vm::NoVm(), body); + if (cs.size() < 512 + 96) { + continue; + } + cs.skip_first(512 + 64); + auto seqno = (td::uint32)cs.fetch_ulong(32); + if (seqno != current_ext_message_.value().seqno) { + continue; + } + if (current_ext_message_.value().ext_msg_body_hash != body->get_hash().bits()) { + do_send_external_message_finish(td::Status::Error("Another external message with the same seqno was accepted")); + continue; + } + do_send_external_message_finish(&transaction->out_msgs_); + } + } + for (tl_object_ptr& transaction : transactions) { + callback_->on_transaction(std::move(transaction)); + } + if (current_ext_message_ && current_ext_message_.value().sent && current_ext_message_.value().timeout < utime) { + do_send_external_message_finish(td::Status::Error("Timeout")); + } + alarm_timestamp().relax(process_transactions_at_ = td::Timestamp::in(10.0)); +} + +void FabricContractWrapper::run_get_method( + std::string method, std::vector> args, + td::Promise>> promise) { + ::run_get_method(address_, client_, std::move(method), std::move(args), std::move(promise)); +} + +void FabricContractWrapper::send_internal_message(ContractAddress dest, td::RefInt256 coins, vm::CellSlice body, + td::Promise promise) { + td::Bits256 body_hash = vm::CellBuilder().append_cellslice(body).finalize_novm()->get_hash().bits(); + LOG(DEBUG) << "send_internal_message " << address_.to_string() << " -> " << dest.to_string() << ", " << coins + << " nanoTON, body=" << body_hash.to_hex(); + CHECK(coins->sgn() >= 0); + pending_messages_.push(PendingMessage{dest, std::move(coins), std::move(body), body_hash, std::move(promise)}); + if (!send_message_at_ && !current_ext_message_) { + alarm_timestamp().relax(send_message_at_ = td::Timestamp::in(1.0)); + } +} + +void FabricContractWrapper::do_send_external_message() { + CHECK(!current_ext_message_); + LOG(DEBUG) << "do_send_external message: " << pending_messages_.size() << " messages in queue"; + if (pending_messages_.empty()) { + return; + } + current_ext_message_ = CurrentExtMessage(); + while (current_ext_message_.value().int_msgs.size() < 4 && !pending_messages_.empty()) { + PendingMessage msg = std::move(pending_messages_.front()); + current_ext_message_.value().int_msgs.push_back(std::move(msg)); + pending_messages_.pop(); + } + run_get_method( + "get_wallet_params", {}, + [SelfId = actor_id(this)](td::Result>> R) { + td::uint32 seqno = 0; + td::uint32 subwallet_id = 0; + td::Bits256 public_key = td::Bits256::zero(); + auto S = [&]() -> td::Status { + TRY_RESULT(stack, std::move(R)); + if (stack.size() != 3) { + return td::Status::Error(PSTRING() << "Method returned " << stack.size() << " values, 3 expected"); + } + TRY_RESULT_PREFIX_ASSIGN(seqno, entry_to_int(stack[0]), "Invalid seqno: "); + TRY_RESULT_PREFIX_ASSIGN(subwallet_id, entry_to_int(stack[1]), "Invalid subwallet_id: "); + TRY_RESULT_PREFIX_ASSIGN(public_key, entry_to_bits256(stack[2]), "Invalid public_key: "); + return td::Status::OK(); + }(); + if (S.is_error()) { + td::actor::send_closure(SelfId, &FabricContractWrapper::do_send_external_message_finish, + S.move_as_error_prefix("Failed to get wallet params: ")); + return; + } + td::actor::send_closure(SelfId, &FabricContractWrapper::do_send_external_message_cont, seqno, subwallet_id, + public_key); + }); +} + +void FabricContractWrapper::do_send_external_message_cont(td::uint32 seqno, td::uint32 subwallet_id, + td::Bits256 public_key) { + LOG(DEBUG) << "Got wallet params: seqno=" << seqno << ", subwallet_id=" << subwallet_id + << ", key=" << public_key.to_hex(); + CHECK(current_ext_message_); + current_ext_message_.value().seqno = seqno; + current_ext_message_.value().timeout = (td::uint32)td::Clocks::system() + 45; + vm::CellBuilder b; + b.store_long(subwallet_id, 32); // subwallet id. + b.store_long(current_ext_message_.value().timeout, 32); // valid until + b.store_long(seqno, 32); // seqno + for (const PendingMessage& msg : current_ext_message_.value().int_msgs) { + vm::CellBuilder b2; + b2.store_long(3 << 2, 6); // 0 ihr_disabled:Bool bounce:Bool bounced:Bool src:MsgAddressInt + b2.append_cellslice(msg.dest.to_cellslice()); // dest:MsgAddressInt + store_coins(b2, msg.value); // grams:Grams + b2.store_zeroes(1 + 4 + 4 + 64 + 32 + 1); // extre, ihr_fee, fwd_fee, created_lt, created_at, init + // body:(Either X ^X) + if (b2.remaining_bits() >= 1 + msg.body.size() && b2.remaining_refs() >= msg.body.size_refs()) { + b2.store_zeroes(1); + b2.append_cellslice(msg.body); + } else { + b2.store_ones(1); + b2.store_ref(vm::CellBuilder().append_cellslice(msg.body).finalize_novm()); + } + b.store_long(3, 8); // mode + b.store_ref(b2.finalize_novm()); // message + } + td::Ref to_sign = b.finalize_novm(); + td::BufferSlice hash(to_sign->get_hash().as_slice()); + LOG(DEBUG) << "Signing external message"; + td::actor::send_closure( + keyring_, &keyring::Keyring::sign_message, PublicKey(pubkeys::Ed25519(public_key)).compute_short_id(), + std::move(hash), [SelfId = actor_id(this), data = std::move(to_sign)](td::Result R) mutable { + if (R.is_error()) { + td::actor::send_closure(SelfId, &FabricContractWrapper::do_send_external_message_finish, + R.move_as_error_prefix("Failed to sign message: ")); + return; + } + auto signature = R.move_as_ok(); + CHECK(signature.size() == 64); + vm::CellBuilder b; + b.store_bytes(signature); + b.append_cellslice(vm::load_cell_slice(data)); + td::actor::send_closure(SelfId, &FabricContractWrapper::do_send_external_message_cont2, b.finalize_novm()); + }); +} + +void FabricContractWrapper::do_send_external_message_cont2(td::Ref ext_msg_body) { + CHECK(current_ext_message_); + LOG(DEBUG) << "Signed external message, sending: seqno=" << current_ext_message_.value().seqno; + current_ext_message_.value().sent = true; + current_ext_message_.value().ext_msg_body_hash = ext_msg_body->get_hash().bits(); + auto body = vm::std_boc_serialize(ext_msg_body).move_as_ok().as_slice().str(); + auto query = create_tl_object( + create_tl_object(address_.to_string()), "", std::move(body)); + td::actor::send_closure(client_, &tonlib::TonlibClientWrapper::send_request, + std::move(query), [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + td::actor::send_closure(SelfId, &FabricContractWrapper::do_send_external_message_finish, + R.move_as_error_prefix("Failed to send message: ")); + } else { + LOG(DEBUG) << "External message was sent to liteserver"; + } + }); +} + +void FabricContractWrapper::do_send_external_message_finish( + td::Result>*> R) { + CHECK(current_ext_message_); + if (R.is_error()) { + LOG(DEBUG) << "Failed to send external message seqno=" << current_ext_message_.value().seqno << ": " << R.error(); + for (auto& msg : current_ext_message_.value().int_msgs) { + msg.promise.set_error(R.error().clone()); + } + } else { + LOG(DEBUG) << "External message seqno=" << current_ext_message_.value().seqno << " was sent"; + const auto& out_msgs = *R.ok(); + auto& msgs = current_ext_message_.value().int_msgs; + for (const auto& out_msg : out_msgs) { + ContractAddress dest = ContractAddress::parse(out_msg->destination_->account_address_).move_as_ok(); + td::RefInt256 value = td::make_refint((td::uint64)out_msg->value_); + td::Bits256 body_hash; + body_hash.as_slice().copy_from(out_msg->body_hash_); + bool found = false; + for (size_t i = 0; i < msgs.size(); ++i) { + if (msgs[i].dest == dest && msgs[i].value->cmp(*value) == 0 && msgs[i].body_hash == body_hash) { + LOG(DEBUG) << "Internal message was sent dest=" << dest.to_string() << ", value=" << value + << ", body_hash=" << body_hash.to_hex(); + msgs[i].promise.set_result(td::Unit()); + msgs.erase(msgs.begin() + i); + found = true; + break; + } + } + if (!found) { + LOG(DEBUG) << "Unexpected internal message was sent: dest=" << dest.to_string() << " value=" << value + << " body_hash=" << body_hash; + } + } + for (auto& msg : msgs) { + LOG(DEBUG) << "Internal message WAS NOT SENT dest=" << msg.dest.to_string() << ", value=" << msg.value + << ", body_hash=" << msg.body_hash.to_hex(); + msg.promise.set_result(td::Status::Error("External message was accepted, but internal message was not sent")); + } + } + current_ext_message_ = {}; + if (!pending_messages_.empty()) { + do_send_external_message(); + } +} + +bool store_coins(vm::CellBuilder& b, const td::RefInt256& x) { + unsigned len = (((unsigned)x->bit_size(false) + 7) >> 3); + if (len >= 16) { + return false; + } + return b.store_long_bool(len, 4) && b.store_int256_bool(*x, len * 8, false); +} + +bool store_coins(vm::CellBuilder& b, td::uint64 x) { + return store_coins(b, td::make_refint(x)); +} + +td::Result generate_fabric_contract(td::actor::ActorId keyring) { + auto private_key = PrivateKey{privkeys::Ed25519::random()}; + td::Bits256 public_key = private_key.compute_public_key().ed25519_value().raw(); + + td::Slice code_boc(STORAGE_PROVIDER_CODE, sizeof(STORAGE_PROVIDER_CODE)); + TRY_RESULT(code, vm::std_boc_deserialize(code_boc)); + + LOG(DEBUG) << "Generating storage provider state init. code_hash=" << code->get_hash().to_hex() + << " public_key=" << public_key.to_hex(); + + vm::CellBuilder b; + b.store_long(0, 32); // seqno + b.store_long(0, 32); // subwallet_id + b.store_bytes(public_key.as_slice()); // public_key + b.store_long(0, 1); // accept_new_contracts (false by default) + store_coins(b, 1'000'000); // rate_per_mb_day + b.store_long(86400, 32); // max_span + b.store_long(1 << 20, 64); // min_file_size + b.store_long(1 << 30, 64); // max_file_size + td::Ref data = b.finalize_novm(); + + // _ split_depth:(Maybe (## 5)) special:(Maybe TickTock) + // code:(Maybe ^Cell) data:(Maybe ^Cell) + // library:(HashmapE 256 SimpleLib) = StateInit; + td::Ref state_init = + vm::CellBuilder().store_long(0b00110, 5).store_ref(std::move(code)).store_ref(std::move(data)).finalize_novm(); + ContractAddress address{basechainId, state_init->get_hash().bits()}; + + // Message body + b = vm::CellBuilder(); + b.store_long(0, 32); // subwallet_id + b.store_long((td::uint32)td::Clocks::system() + 3600 * 24 * 7, 32); // valid_until + b.store_long(0, 32); // seqno + td::Ref to_sign = b.finalize_novm(); + TRY_RESULT(decryptor, private_key.create_decryptor()); + TRY_RESULT(signature, decryptor->sign(to_sign->get_hash().as_slice())); + CHECK(signature.size() == 64); + td::Ref msg_body = + vm::CellBuilder().store_bytes(signature).append_cellslice(vm::CellSlice(vm::NoVm(), to_sign)).finalize_novm(); + + td::actor::send_closure(keyring, &keyring::Keyring::add_key, private_key, false, + [](td::Result R) { R.ensure(); }); + return FabricContractInit{address, state_init, msg_body}; +} + +td::Ref create_new_contract_message_body(td::Ref info, td::Bits256 microchunk_hash, + td::uint64 query_id, td::RefInt256 rate, td::uint32 max_span) { + // new_storage_contract#00000001 query_id:uint64 info:(^ TorrentInfo) microchunk_hash:uint256 + // expected_rate:Coins expected_max_span:uint32 = NewStorageContract; + vm::CellBuilder b; + b.store_long(0x107c49ef, 32); // const op::offer_storage_contract = 0x107c49ef; + b.store_long(query_id, 64); + b.store_ref(std::move(info)); + b.store_bytes(microchunk_hash.as_slice()); + store_coins(b, rate); + b.store_long(max_span, 32); + return b.finalize_novm(); +} + +void get_storage_contract_data(ContractAddress address, td::actor::ActorId client, + td::Promise promise) { + run_get_method( + address, client, "get_storage_contract_data", {}, + promise.wrap([](std::vector> stack) -> td::Result { + if (stack.size() < 11) { + return td::Status::Error("Too few entries"); + } + // active, balance, provider, merkle_hash, file_size, next_proof, rate_per_mb_day, max_span, last_proof_time, + // client, torrent_hash + TRY_RESULT(active, entry_to_int(stack[0])); + TRY_RESULT(balance, entry_to_int(stack[1])); + TRY_RESULT(microchunk_hash, entry_to_bits256(stack[3])); + TRY_RESULT(file_size, entry_to_int(stack[4])); + TRY_RESULT(next_proof, entry_to_int(stack[5])); + TRY_RESULT(rate_per_mb_day, entry_to_int(stack[6])); + TRY_RESULT(max_span, entry_to_int(stack[7])); + TRY_RESULT(last_proof_time, entry_to_int(stack[8])); + TRY_RESULT(torrent_hash, entry_to_bits256(stack[10])); + return StorageContractData{(bool)active, balance, microchunk_hash, file_size, next_proof, + rate_per_mb_day, max_span, last_proof_time, torrent_hash}; + })); +} diff --git a/storage/storage-daemon/smc-util.h b/storage/storage-daemon/smc-util.h new file mode 100644 index 00000000..0b921d4d --- /dev/null +++ b/storage/storage-daemon/smc-util.h @@ -0,0 +1,184 @@ +/* + This file is part of TON Blockchain Library. + + TON Blockchain Library is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + TON Blockchain Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with TON Blockchain Library. If not, see . +*/ + +#pragma once +#include "ton/ton-types.h" +#include "crypto/vm/cellslice.h" +#include "block/block-parse.h" +#include "td/actor/actor.h" +#include "tonlib/tonlib/TonlibClientWrapper.h" +#include +#include "keyring/keyring.h" + +using namespace ton; + +struct ContractAddress { + WorkchainId wc = workchainIdNotYet; + td::Bits256 addr = td::Bits256::zero(); + + ContractAddress() = default; + ContractAddress(WorkchainId wc, td::Bits256 addr) : wc(wc), addr(addr) { + } + + std::string to_string() const { + return PSTRING() << wc << ":" << addr.to_hex(); + } + td::Ref to_cellslice() const { + return block::tlb::t_MsgAddressInt.pack_std_address(wc, addr); + } + + static td::Result parse(td::Slice s) { + TRY_RESULT(x, block::StdAddress::parse(s)); + return ContractAddress(x.workchain, x.addr); + } + + bool operator==(const ContractAddress& other) const { + return wc == other.wc && addr == other.addr; + } + bool operator!=(const ContractAddress& other) const { + return !(*this == other); + } + bool operator<(const ContractAddress& other) const { + return wc == other.wc ? addr < other.addr : wc < other.wc; + } +}; + +void run_get_method(ContractAddress address, td::actor::ActorId client, std::string method, + std::vector> args, + td::Promise>> promise); +void check_contract_exists(ContractAddress address, td::actor::ActorId client, + td::Promise promise); +void get_contract_balance(ContractAddress address, td::actor::ActorId client, + td::Promise promise); + +class FabricContractWrapper : public td::actor::Actor { + public: + class Callback { + public: + virtual ~Callback() = default; + virtual void on_transaction(tl_object_ptr transaction) = 0; + }; + + explicit FabricContractWrapper(ContractAddress address, td::actor::ActorId client, + td::actor::ActorId keyring, td::unique_ptr callback, + td::uint64 last_processed_lt); + + void start_up() override; + void alarm() override; + + void run_get_method(std::string method, std::vector> args, + td::Promise>> promise); + void send_internal_message(ContractAddress dest, td::RefInt256 coins, vm::CellSlice body, + td::Promise promise); + + private: + ContractAddress address_; + td::actor::ActorId client_; + td::actor::ActorId keyring_; + td::unique_ptr callback_; + + td::Timestamp process_transactions_at_ = td::Timestamp::now(); + td::uint64 last_processed_lt_ = 0; + + struct PendingMessage { + ContractAddress dest; + td::RefInt256 value; + vm::CellSlice body; + td::Bits256 body_hash; + td::Promise promise; + }; + struct CurrentExtMessage { + std::vector int_msgs; + td::uint32 seqno = 0; + bool sent = false; + td::Bits256 ext_msg_body_hash = td::Bits256::zero(); + td::uint32 timeout = 0; + }; + std::queue pending_messages_; + td::Timestamp send_message_at_ = td::Timestamp::never(); + td::optional current_ext_message_; + + void load_transactions(); + void load_last_transactions(std::vector> transactions, + tl_object_ptr next_id, td::uint32 utime); + void loaded_last_transactions( + td::Result>, td::uint32>> R); + + void do_send_external_message(); + void do_send_external_message_cont(td::uint32 seqno, td::uint32 subwallet_id, td::Bits256 public_key); + void do_send_external_message_cont2(td::Ref ext_msg_body); + void do_send_external_message_finish(td::Result>*> R); +}; + +template +inline td::Result entry_to_int(const tl_object_ptr& entry) { + auto num = dynamic_cast(entry.get()); + if (num == nullptr) { + return td::Status::Error("Unexpected value type"); + } + return td::to_integer_safe(num->number_->number_); +} + +template <> +inline td::Result entry_to_int(const tl_object_ptr& entry) { + auto num = dynamic_cast(entry.get()); + if (num == nullptr) { + return td::Status::Error("Unexpected value type"); + } + auto x = td::dec_string_to_int256(num->number_->number_); + if (x.is_null()) { + return td::Status::Error("Invalid integer value"); + } + return x; +} + +inline td::Result entry_to_bits256(const tl_object_ptr& entry) { + TRY_RESULT(x, entry_to_int(entry)); + td::Bits256 bits; + if (!x->export_bytes(bits.data(), 32, false)) { + return td::Status::Error("Invalid int256"); + } + return bits; +} + +bool store_coins(vm::CellBuilder& b, const td::RefInt256& x); +bool store_coins(vm::CellBuilder& b, td::uint64 x); + +struct FabricContractInit { + ContractAddress address; + td::Ref state_init; + td::Ref msg_body; +}; +td::Result generate_fabric_contract(td::actor::ActorId keyring); + +td::Ref create_new_contract_message_body(td::Ref info, td::Bits256 microchunk_hash, + td::uint64 query_id, td::RefInt256 rate, td::uint32 max_span); + +struct StorageContractData { + bool active; + td::RefInt256 balance; + td::Bits256 microchunk_hash; + td::uint64 file_size; + td::uint64 next_proof; + td::RefInt256 rate_per_mb_day; + td::uint32 max_span; + td::uint32 last_proof_time; + td::Bits256 torrent_hash; +}; + +void get_storage_contract_data(ContractAddress address, td::actor::ActorId client, + td::Promise promise); \ No newline at end of file diff --git a/storage/storage-daemon/storage-daemon-cli.cpp b/storage/storage-daemon/storage-daemon-cli.cpp new file mode 100644 index 00000000..6fe14286 --- /dev/null +++ b/storage/storage-daemon/storage-daemon-cli.cpp @@ -0,0 +1,1747 @@ +/* + This file is part of TON Blockchain Library. + + TON Blockchain Library is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + TON Blockchain Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with TON Blockchain Library. If not, see . +*/ +#include "common/bitstring.h" +#include "keys/encryptor.h" +#include "adnl/adnl-ext-client.h" + +#include "td/utils/port/signals.h" +#include "td/utils/Parser.h" +#include "td/utils/OptionParser.h" +#include "td/utils/PathView.h" +#include "td/utils/misc.h" +#include "td/utils/filesystem.h" +#include "td/utils/port/path.h" + +#include "td/actor/MultiPromise.h" +#include "terminal/terminal.h" + +#include "auto/tl/ton_api_json.h" + +#include +#include +#include +#include +#include "git.h" +#include "common/refint.h" +#include "crypto/block/block.h" + +using namespace ton; + +bool is_whitespace(char c) { + return strchr(" \t\n\r", c) != nullptr; +}; + +td::Result> tokenize(td::Slice s) { + const char* ptr = s.begin(); + auto skip_ws = [&]() { + while (ptr != s.end() && is_whitespace(*ptr)) { + ++ptr; + } + }; + std::vector tokens; + while (true) { + skip_ws(); + if (ptr == s.end()) { + break; + } + char quote = '\0'; + if (*ptr == '"' || *ptr == '\'') { + quote = *ptr; + ++ptr; + } + std::string token; + while (true) { + if (ptr == s.end()) { + if (quote) { + return td::Status::Error("Unmatched quote"); + } + break; + } else if (*ptr == '\\') { + ++ptr; + if (ptr == s.end()) { + return td::Status::Error("Backslash at the end of the line"); + } + switch (*ptr) { + case 'n': + token += '\n'; + break; + case 't': + token += '\t'; + break; + case 'r': + token += '\r'; + break; + default: + token += *ptr; + } + ++ptr; + } else if (*ptr == quote || (!quote && is_whitespace(*ptr))) { + ++ptr; + break; + } else { + token += *ptr++; + } + } + tokens.push_back(token); + } + return tokens; +} + +std::string size_to_str(td::uint64 size) { + td::StringBuilder s; + s << td::format::as_size(size); + return s.as_cslice().str(); +} + +std::string time_to_str(td::uint32 time) { + char time_buffer[80]; + time_t rawtime = time; + struct tm tInfo; +#if defined(_WIN32) || defined(_WIN64) + struct tm* timeinfo = localtime_s(&tInfo, &rawtime) ? nullptr : &tInfo; +#else + struct tm* timeinfo = localtime_r(&rawtime, &tInfo); +#endif + assert(timeinfo == &tInfo); + strftime(time_buffer, 80, "%c", timeinfo); + return time_buffer; +} + +std::string coins_to_str(const td::RefInt256& x) { + if (x.is_null() || x->sgn() < 0) { + return "???"; + } + char buf[12]; + snprintf(buf, sizeof(buf), ".%09llu", (x % td::make_refint(1'000'000'000))->to_long()); + return (x / td::make_refint(1'000'000'000))->to_dec_string() + buf; +} + +std::string coins_to_str(const std::string& s) { + return coins_to_str(td::string_to_int256(s)); +} + +template +void print_json(const tl_object_ptr& obj) { + std::string s = td::json_encode(td::ToJson(*obj), true); + td::TerminalIO::out() << s << "\n"; +} + +void print_table(const std::vector>& table, std::set left_cols = {}) { + if (table.empty()) { + return; + } + size_t cols = table[0].size(); + std::vector col_size(cols, 0); + for (const auto& row : table) { + CHECK(row.size() == cols); + for (size_t i = 0; i < cols; ++i) { + col_size[i] = std::max(col_size[i], row[i].size()); + } + } + for (const auto& row : table) { + std::string row_str; + for (size_t i = 0; i < cols; ++i) { + if (i != 0) { + row_str += " "; + } + size_t pad = col_size[i] - row[i].size(); + if (!left_cols.count(i)) { + while (pad--) { + row_str += ' '; + } + } + row_str += row[i]; + if (left_cols.count(i)) { + while (pad--) { + row_str += ' '; + } + } + } + td::TerminalIO::out() << row_str << "\n"; + } +} + +struct OptionalProviderParams { + td::optional accept_new_contracts; + td::optional rate_per_mb_day; + td::optional max_span; + td::optional minimal_file_size; + td::optional maximal_file_size; +}; + +struct OptionalProviderConfig { + td::optional max_contracts; + td::optional max_total_size; +}; + +class StorageDaemonCli : public td::actor::Actor { + public: + explicit StorageDaemonCli(td::IPAddress server_ip, PrivateKey client_private_key, PublicKey server_public_key, + std::vector commands) + : server_ip_(server_ip) + , client_private_key_(client_private_key) + , server_public_key_(server_public_key) + , commands_(std::move(commands)) + , batch_mode_(!commands_.empty()) { + } + + void start_up() override { + class ExtClientCallback : public adnl::AdnlExtClient::Callback { + public: + explicit ExtClientCallback(td::actor::ActorId id) : id_(id) { + } + void on_ready() override { + LOG(INFO) << "Connected"; + td::actor::send_closure(id_, &StorageDaemonCli::on_conn_status, true); + } + void on_stop_ready() override { + LOG(WARNING) << "Connection closed"; + td::actor::send_closure(id_, &StorageDaemonCli::on_conn_status, false); + } + + private: + td::actor::ActorId id_; + }; + CHECK(server_ip_.is_valid()); + client_ = adnl::AdnlExtClient::create(adnl::AdnlNodeIdFull{server_public_key_}, client_private_key_, server_ip_, + std::make_unique(actor_id(this))); + + if (!batch_mode_) { + class TerminalCallback : public td::TerminalIO::Callback { + public: + void line_cb(td::BufferSlice line) override { + td::actor::send_closure(id_, &StorageDaemonCli::parse_line, std::move(line)); + } + TerminalCallback(td::actor::ActorId id) : id_(std::move(id)) { + } + + private: + td::actor::ActorId id_; + }; + io_ = td::TerminalIO::create("> ", true, false, std::make_unique(actor_id(this))); + td::actor::send_closure(io_, &td::TerminalIO::set_log_interface); + } + } + + void on_conn_status(bool status) { + if (batch_mode_ && !batch_started_) { + batch_started_ = true; + parse_line(td::BufferSlice(commands_[cur_command_++])); + } + } + + void parse_line(td::BufferSlice line) { + td::Status S = parse_line_impl(std::move(line)); + if (S.is_error()) { + command_finished(std::move(S)); + } + } + + td::Status parse_line_impl(td::BufferSlice line) { + auto parse_hash = [](const std::string& s) -> td::Result { + td::Bits256 hash; + if (hash.from_hex(s) != 256) { + return td::Status::Error("Invalid BagID"); + } + return hash; + }; + auto parse_torrent = [&](const std::string& s) -> td::Result { + if (s.length() == 64) { + return parse_hash(s); + } + if (batch_mode_) { + return td::Status::Error("Indices are not available in batch mode"); + } + TRY_RESULT(id, td::to_integer_safe(s)); + auto it = id_to_hash_.find(id); + if (it == id_to_hash_.end()) { + return td::Status::Error(PSTRING() << "Unknown index " << id); + } + return it->second; + }; + + TRY_RESULT_PREFIX(tokens, tokenize(line), "Failed to parse line: "); + if (tokens.empty()) { + command_finished(td::Status::OK()); + return td::Status::OK(); + } + if (tokens[0] == "quit" || tokens[0] == "exit") { + if (tokens.size() != 1) { + return td::Status::Error("Unexpected tokens"); + } + std::_Exit(0); + } else if (tokens[0] == "help") { + if (tokens.size() != 1) { + return td::Status::Error("Unexpected tokens"); + } + return execute_help(); + } else if (tokens[0] == "setverbosity") { + if (tokens.size() != 2) { + return td::Status::Error("Expected level"); + } + TRY_RESULT_PREFIX(level, td::to_integer_safe(tokens[1]), "Invalid level: "); + return execute_set_verbosity(level); + } else if (tokens[0] == "create") { + std::string path; + bool found_path = false; + std::string description; + bool json = false; + for (size_t i = 1; i < tokens.size(); ++i) { + if (!tokens[i].empty() && tokens[i][0] == '-') { + if (tokens[i] == "-d") { + ++i; + if (i == tokens.size()) { + return td::Status::Error("Unexpected EOLN"); + } + description = tokens[i]; + continue; + } + if (tokens[i] == "--json") { + json = true; + continue; + } + return td::Status::Error(PSTRING() << "Unknown flag " << tokens[i]); + } + if (found_path) { + return td::Status::Error("Unexpected token"); + } + path = tokens[i]; + found_path = true; + } + if (!found_path) { + return td::Status::Error("Unexpected EOLN"); + } + return execute_create(std::move(path), std::move(description), json); + } else if (tokens[0] == "add-by-hash" || tokens[0] == "add-by-meta") { + td::optional param; + std::string root_dir; + bool paused = false; + bool json = false; + td::optional> partial; + for (size_t i = 1; i < tokens.size(); ++i) { + if (!tokens[i].empty() && tokens[i][0] == '-') { + if (tokens[i] == "-d") { + ++i; + if (i == tokens.size()) { + return td::Status::Error("Unexpected EOLN"); + } + root_dir = tokens[i]; + continue; + } + if (tokens[i] == "--paused") { + paused = true; + continue; + } + if (tokens[i] == "--json") { + json = true; + continue; + } + if (tokens[i] == "--partial") { + partial = std::vector(tokens.begin() + i + 1, tokens.end()); + break; + } + return td::Status::Error(PSTRING() << "Unknown flag " << tokens[i]); + } + if (param) { + return td::Status::Error("Unexpected token"); + } + param = tokens[i]; + } + if (!param) { + return td::Status::Error("Unexpected EOLN"); + } + if (tokens[0] == "add-by-hash") { + TRY_RESULT(hash, parse_hash(param.value())); + return execute_add_by_hash(hash, std::move(root_dir), paused, std::move(partial), json); + } else { + return execute_add_by_meta(param.value(), std::move(root_dir), paused, std::move(partial), json); + } + } else if (tokens[0] == "list") { + bool with_hashes = false; + bool json = false; + for (size_t i = 1; i < tokens.size(); ++i) { + if (tokens[i] == "--hashes") { + with_hashes = true; + continue; + } + if (tokens[i] == "--json") { + json = true; + continue; + } + return td::Status::Error(PSTRING() << "Unexpected argument " << tokens[i]); + } + return execute_list(with_hashes, json); + } else if (tokens[0] == "get") { + td::Bits256 hash; + bool found_hash = false; + bool json = false; + for (size_t i = 1; i < tokens.size(); ++i) { + if (!tokens[i].empty() && tokens[i][0] == '-') { + if (tokens[i] == "--json") { + json = true; + continue; + } + return td::Status::Error(PSTRING() << "Unknown flag " << tokens[i]); + } + if (found_hash) { + return td::Status::Error("Unexpected token"); + } + TRY_RESULT_ASSIGN(hash, parse_torrent(tokens[i])); + found_hash = true; + } + if (!found_hash) { + return td::Status::Error("Unexpected EOLN"); + } + return execute_get(hash, json); + } else if (tokens[0] == "get-meta") { + if (tokens.size() != 3) { + return td::Status::Error("Expected bag and file"); + } + TRY_RESULT(hash, parse_torrent(tokens[1])); + return execute_get_meta(hash, tokens[2]); + } else if (tokens[0] == "get-peers") { + td::Bits256 hash; + bool found_hash = false; + bool json = false; + for (size_t i = 1; i < tokens.size(); ++i) { + if (!tokens[i].empty() && tokens[i][0] == '-') { + if (tokens[i] == "--json") { + json = true; + continue; + } + return td::Status::Error(PSTRING() << "Unknown flag " << tokens[i]); + } + if (found_hash) { + return td::Status::Error("Unexpected token"); + } + TRY_RESULT_ASSIGN(hash, parse_torrent(tokens[i])); + found_hash = true; + } + if (!found_hash) { + return td::Status::Error("Unexpected EOLN"); + } + return execute_get_peers(hash, json); + } else if (tokens[0] == "download-pause" || tokens[0] == "download-resume") { + if (tokens.size() != 2) { + return td::Status::Error("Expected bag"); + } + TRY_RESULT(hash, parse_torrent(tokens[1])); + return execute_set_active_download(hash, tokens[0] == "download-resume"); + } else if (tokens[0] == "priority-all") { + if (tokens.size() != 3) { + return td::Status::Error("Expected bag and priority"); + } + TRY_RESULT(hash, parse_torrent(tokens[1])); + TRY_RESULT_PREFIX(priority, td::to_integer_safe(tokens[2]), "Invalid priority: "); + return execute_set_priority_all(hash, priority); + } else if (tokens[0] == "priority-idx") { + if (tokens.size() != 4) { + return td::Status::Error("Expected bag, idx and priority"); + } + TRY_RESULT(hash, parse_torrent(tokens[1])); + TRY_RESULT_PREFIX(idx, td::to_integer_safe(tokens[2]), "Invalid idx: "); + TRY_RESULT_PREFIX(priority, td::to_integer_safe(tokens[3]), "Invalid priority: "); + return execute_set_priority_idx(hash, idx, priority); + } else if (tokens[0] == "priority-name") { + if (tokens.size() != 4) { + return td::Status::Error("Expected bag, name and priority"); + } + TRY_RESULT(hash, parse_torrent(tokens[1])); + TRY_RESULT_PREFIX(priority, td::to_integer_safe(tokens[3]), "Invalid priority: "); + return execute_set_priority_name(hash, tokens[2], priority); + } else if (tokens[0] == "remove") { + td::Bits256 hash; + bool found_hash = false; + bool remove_files = false; + for (size_t i = 1; i < tokens.size(); ++i) { + if (!tokens[i].empty() && tokens[i][0] == '-') { + if (tokens[i] == "--remove-files") { + remove_files = true; + continue; + } + return td::Status::Error(PSTRING() << "Unknown flag " << tokens[i]); + } + if (found_hash) { + return td::Status::Error("Unexpected token"); + } + TRY_RESULT_ASSIGN(hash, parse_torrent(tokens[i])); + found_hash = true; + } + if (!found_hash) { + return td::Status::Error("Unexpected EOLN"); + } + return execute_remove(hash, remove_files); + } else if (tokens[0] == "load-from") { + td::Bits256 hash; + std::string meta, path; + bool found_hash = false; + for (size_t i = 1; i < tokens.size(); ++i) { + if (!tokens[i].empty() && tokens[i][0] == '-') { + if (tokens[i] == "--meta") { + ++i; + meta = tokens[i]; + continue; + } + if (tokens[i] == "--files") { + ++i; + path = tokens[i]; + continue; + } + return td::Status::Error(PSTRING() << "Unknown flag " << tokens[i]); + } + if (found_hash) { + return td::Status::Error("Unexpected token"); + } + TRY_RESULT_ASSIGN(hash, parse_torrent(tokens[i])); + found_hash = true; + } + if (!found_hash) { + return td::Status::Error("Unexpected EOLN"); + } + return execute_load_from(hash, std::move(meta), std::move(path)); + } else if (tokens[0] == "new-contract-message") { + td::Bits256 hash; + std::string file; + td::uint64 query_id = 0; + int cnt = 0; + td::optional provider_address; + td::optional rate; + td::optional max_span; + for (size_t i = 1; i < tokens.size(); ++i) { + if (!tokens[i].empty() && tokens[i][0] == '-') { + if (tokens[i] == "--query-id") { + ++i; + TRY_RESULT_PREFIX_ASSIGN(query_id, td::to_integer_safe(tokens[i]), "Invalid query id: "); + continue; + } + if (tokens[i] == "--provider") { + ++i; + provider_address = tokens[i]; + continue; + } + if (tokens[i] == "--rate") { + ++i; + rate = tokens[i]; + continue; + } + if (tokens[i] == "--max-span") { + ++i; + TRY_RESULT_PREFIX_ASSIGN(max_span, td::to_integer_safe(tokens[i]), "Invalid max span: "); + continue; + } + return td::Status::Error(PSTRING() << "Unknown flag " << tokens[i]); + } + if (cnt == 0) { + TRY_RESULT_ASSIGN(hash, parse_torrent(tokens[i])); + } else if (cnt == 1) { + file = tokens[i]; + } + ++cnt; + } + if (cnt != 2) { + return td::Status::Error("Unexpected EOLN"); + } + return execute_new_contract_message(hash, std::move(file), query_id, std::move(provider_address), std::move(rate), + max_span); + } else if (tokens[0] == "import-pk") { + if (tokens.size() != 2) { + return td::Status::Error("Expected filename"); + } + return execute_import_pk(tokens[1]); + } else if (tokens[0] == "get-provider-params") { + bool json = false; + std::string address; + for (size_t i = 1; i < tokens.size(); ++i) { + if (!tokens[i].empty() && tokens[i][0] == '-') { + if (tokens[i] == "--json") { + json = true; + continue; + } + return td::Status::Error(PSTRING() << "Unknown flag " << tokens[i]); + } + if (!address.empty()) { + return td::Status::Error("Unexpected token"); + } + address = tokens[i]; + } + return execute_get_provider_params(address, json); + } else if (tokens[0] == "deploy-provider") { + if (tokens.size() != 1) { + return td::Status::Error("Unexpected tokens"); + } + return execute_deploy_provider(); + } else if (tokens[0] == "init-provider") { + if (tokens.size() != 2) { + return td::Status::Error("Expected address"); + } + return execute_init_provider(tokens[1]); + } else if (tokens[0] == "remove-storage-provider") { + if (tokens.size() != 1) { + return td::Status::Error("Unexpected tokens"); + } + return execute_remove_storage_provider(); + } else if (tokens[0] == "set-provider-params") { + if (tokens.size() == 1) { + return td::Status::Error("No parameters specified"); + } + if (tokens.size() % 2 == 0) { + return td::Status::Error("Unexpected number of tokens"); + } + OptionalProviderParams new_params; + for (size_t i = 1; i < tokens.size(); i += 2) { + if (tokens[i] == "--accept") { + if (tokens[i + 1] == "0") { + new_params.accept_new_contracts = false; + } else if (tokens[i + 1] == "1") { + new_params.accept_new_contracts = true; + } else { + return td::Status::Error("Invalid value for --accept"); + } + continue; + } + if (tokens[i] == "--rate") { + new_params.rate_per_mb_day = tokens[i + 1]; + continue; + } + if (tokens[i] == "--max-span") { + TRY_RESULT_PREFIX(x, td::to_integer_safe(tokens[i + 1]), "Invalid value for --max-span: "); + new_params.max_span = x; + continue; + } + if (tokens[i] == "--min-file-size") { + TRY_RESULT_PREFIX(x, td::to_integer_safe(tokens[i + 1]), "Invalid value for --min-file-size: "); + new_params.minimal_file_size = x; + continue; + } + if (tokens[i] == "--max-file-size") { + TRY_RESULT_PREFIX(x, td::to_integer_safe(tokens[i + 1]), "Invalid value for --max-file-size: "); + new_params.maximal_file_size = x; + continue; + } + return td::Status::Error(PSTRING() << "Unexpected token " << tokens[i]); + } + return execute_set_provider_params(std::move(new_params)); + } else if (tokens[0] == "get-provider-info") { + bool with_balances = false; + bool with_contracts = false; + bool json = false; + for (size_t i = 1; i < tokens.size(); ++i) { + if (!tokens[i].empty() && tokens[i][0] == '-') { + if (tokens[i] == "--balances") { + with_balances = true; + continue; + } + if (tokens[i] == "--contracts") { + with_contracts = true; + continue; + } + if (tokens[i] == "--json") { + json = true; + continue; + } + return td::Status::Error(PSTRING() << "Unknown flag " << tokens[i]); + } + } + return execute_get_provider_info(with_balances, with_contracts, json); + } else if (tokens[0] == "set-provider-config") { + if (tokens.size() == 1) { + return td::Status::Error("No parameters specified"); + } + if (tokens.size() % 2 == 0) { + return td::Status::Error("Unexpected number of tokens"); + } + OptionalProviderConfig new_config; + for (size_t i = 1; i < tokens.size(); i += 2) { + if (tokens[i] == "--max-contracts") { + TRY_RESULT_PREFIX(x, td::to_integer_safe(tokens[i + 1]), "Invalid value for --max-contracts: "); + new_config.max_contracts = x; + continue; + } + if (tokens[i] == "--max-total-size") { + TRY_RESULT_PREFIX(x, td::to_integer_safe(tokens[i + 1]), "Invalid value for --max-total-size: "); + new_config.max_total_size = x; + continue; + } + return td::Status::Error(PSTRING() << "Unexpected token " << tokens[i]); + } + return execute_set_provider_config(std::move(new_config)); + } else if (tokens[0] == "withdraw") { + if (tokens.size() != 2) { + return td::Status::Error("Expected contract address"); + } + return execute_withdraw(tokens[1]); + } else if (tokens[0] == "withdraw-all") { + if (tokens.size() != 1) { + return td::Status::Error("Unexpected tokens"); + } + return execute_withdraw_all(); + } else if (tokens[0] == "send-coins") { + std::string address; + std::string amount; + int cnt = 0; + std::string message; + for (size_t i = 1; i < tokens.size(); ++i) { + if (!tokens[i].empty() && tokens[i][0] == '-') { + if (tokens[i] == "--message") { + ++i; + if (i == tokens.size()) { + return td::Status::Error("Expected message"); + } + message = tokens[i]; + continue; + } + return td::Status::Error(PSTRING() << "Unknown flag " << tokens[i]); + } + if (cnt == 0) { + address = tokens[i]; + } else if (cnt == 1) { + amount = tokens[i]; + } else { + return td::Status::Error("Expected address and amount"); + } + ++cnt; + } + if (cnt != 2) { + return td::Status::Error("Expected address and amount"); + } + return execute_send_coins(address, amount, message); + } else if (tokens[0] == "close-contract") { + if (tokens.size() != 2) { + return td::Status::Error("Expected address"); + } + return execute_close_contract(tokens[1]); + } else { + return td::Status::Error(PSTRING() << "Error: unknown command " << tokens[0]); + } + } + + td::Status execute_help() { + td::TerminalIO::out() << "help\tPrint this help\n"; + td::TerminalIO::out() << "create [-d description] [--json] \tCreate bag of files from \n"; + td::TerminalIO::out() << "\t-d - Description will be stored in torrent info.\n"; + td::TerminalIO::out() << "\t--json\tOutput in json\n"; + td::TerminalIO::out() + << "add-by-hash [-d root_dir] [--paused] [--json] [--partial file1 file2 ...]\tAdd bag " + "with given BagID (in hex)\n"; + td::TerminalIO::out() << "\t-d\tTarget directory, default is an internal directory of storage-daemon\n"; + td::TerminalIO::out() << "\t--paused\tDon't start download immediately\n"; + td::TerminalIO::out() + << "\t--partial\tEverything after this flag is a list of filenames. Only these files will be downloaded.\n"; + td::TerminalIO::out() << "\t--json\tOutput in json\n"; + td::TerminalIO::out() << "add-by-meta [-d root_dir] [--paused] [--json] [--partial file1 file2 ...]\tLoad " + "meta from file and add bag\n"; + td::TerminalIO::out() << "\tFlags are the same as in add-by-hash\n"; + td::TerminalIO::out() << "list [--hashes] [--json]\tPrint list of bags\n"; + td::TerminalIO::out() << "\t--hashes\tPrint full BagID\n"; + td::TerminalIO::out() << "\t--json\tOutput in json\n"; + td::TerminalIO::out() << "get [--json]\tPrint information about \n"; + td::TerminalIO::out() << "\t--json\tOutput in json\n"; + td::TerminalIO::out() << "\tHere and below bags are identified by BagID (in hex) or index (see bag list)\n"; + td::TerminalIO::out() << "get-meta \tSave bag meta of to \n"; + td::TerminalIO::out() << "get-peers [--json]\tPrint a list of peers\n"; + td::TerminalIO::out() << "\t--json\tOutput in json\n"; + td::TerminalIO::out() << "download-pause \tPause download of \n"; + td::TerminalIO::out() << "download-resume \tResume download of \n"; + td::TerminalIO::out() << "priority-all

\tSet priority of all files in to

\n"; + td::TerminalIO::out() << "\tPriority is in [0..255], 0 - don't download\n"; + td::TerminalIO::out() << "priority-idx

\tSet priority of file # in to

\n"; + td::TerminalIO::out() << "\tPriority is in [0..255], 0 - don't download\n"; + td::TerminalIO::out() << "priority-name

\tSet priority of file in to

\n"; + td::TerminalIO::out() << "\tPriority is in [0..255], 0 - don't download\n"; + td::TerminalIO::out() << "remove [--remove-files]\tRemove \n"; + td::TerminalIO::out() << "\t--remove-files - also remove all files\n"; + td::TerminalIO::out() << "load-from [--meta meta] [--files path]\tProvide meta and data for an existing " + "incomplete bag.\n"; + td::TerminalIO::out() << "\t--meta meta\ttorrent info and header will be inited (if not ready) from meta file\n"; + td::TerminalIO::out() << "\t--files path\tdata for files will be taken from here\n"; + td::TerminalIO::out() << "new-contract-message [--query-id id] --provider \tCreate " + "\"new contract message\" for storage provider. Saves message body to .\n"; + td::TerminalIO::out() << "\t\tAddress of storage provider account to take parameters from.\n"; + td::TerminalIO::out() << "new-contract-message [--query-id id] --rate --max-span " + "\tSame thing, but parameters are not fetched automatically.\n"; + td::TerminalIO::out() << "exit\tExit\n"; + td::TerminalIO::out() << "quit\tExit\n"; + td::TerminalIO::out() << "setverbosity \tSet vetbosity to in [0..10]\n"; + td::TerminalIO::out() << "\nStorage provider control:\n"; + td::TerminalIO::out() << "import-pk \tImport private key from \n"; + td::TerminalIO::out() << "deploy-provider\tInit storage provider by deploying a new provider smart contract\n"; + td::TerminalIO::out() + << "init-provider \tInit storage provider using the existing provider smart contract\n"; + td::TerminalIO::out() << "remove-storage-provider\tRemove storage provider\n"; + td::TerminalIO::out() + << "\tSmart contracts in blockchain and bags will remain intact, but they will not be managed anymore\n"; + td::TerminalIO::out() << "get-provider-params [address] [--json]\tPrint parameters of the smart contract\n"; + td::TerminalIO::out() + << "\taddress\tAddress of a smart contract. Default is the provider managed by this daemon.\n"; + td::TerminalIO::out() << "\t--json\tOutput in json\n"; + td::TerminalIO::out() << "set-provider-params [--accept x] [--rate x] [--max-span x] [--min-file-size x] " + "[--max-file-size x]\tSet parameters of the smart contract\n"; + td::TerminalIO::out() << "\t--accept\tAccept new contracts: 0 (no) or 1 (yes)\n"; + td::TerminalIO::out() << "\t--rate\tPrice of storage, nanoTON per MB*day\n"; + td::TerminalIO::out() << "\t--max-span\n"; + td::TerminalIO::out() << "\t--min-file-size\tMinimal total size of a bag of files (bytes)\n"; + td::TerminalIO::out() << "\t--max-file-size\tMaximal total size of a bag of files (bytes)\n"; + td::TerminalIO::out() + << "get-provider-info [--balances] [--contracts] [--json]\tPrint information about storage provider\n"; + td::TerminalIO::out() << "\t--contracts\tPrint list of storage contracts\n"; + td::TerminalIO::out() << "\t--balances\tPrint balances of the main contract and storage contracts\n"; + td::TerminalIO::out() << "\t--json\tOutput in json\n"; + td::TerminalIO::out() + << "set-provider-config [--max-contracts x] [--max-total-size x]\tSet configuration parameters\n"; + td::TerminalIO::out() << "\t--max-contracts\tMaximal number of storage contracts\n"; + td::TerminalIO::out() << "\t--max-total-size\tMaximal total size storage contracts (in bytes)\n"; + td::TerminalIO::out() << "withdraw

\tSend bounty from storage contract
to the main contract\n"; + td::TerminalIO::out() << "withdraw-all\tSend bounty from all storage contracts (where at least 1 TON is available) " + "to the main contract\n"; + td::TerminalIO::out() + << "send-coins
[--message msg]\tSend nanoTON to
from the main contract\n"; + td::TerminalIO::out() + << "close-contract
\tClose storage contract
and delete bag (if possible)\n"; + command_finished(td::Status::OK()); + return td::Status::OK(); + } + + td::Status execute_set_verbosity(int level) { + auto query = create_tl_object(level); + send_query(std::move(query), + [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + td::TerminalIO::out() << "Success\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_create(std::string path, std::string description, bool json) { + TRY_RESULT_PREFIX_ASSIGN(path, td::realpath(path), "Invalid path: "); + auto query = create_tl_object(path, description); + send_query(std::move(query), + [=, SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + if (json) { + print_json(R.ok()); + return; + } + td::TerminalIO::out() << "Bag created\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::print_torrent_full, R.move_as_ok()); + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_add_by_hash(td::Bits256 hash, std::string root_dir, bool paused, + td::optional> partial, bool json) { + if (!root_dir.empty()) { + TRY_STATUS_PREFIX(td::mkpath(root_dir), "Failed to create directory: "); + TRY_STATUS_PREFIX(td::mkdir(root_dir), "Failed to create directory: "); + TRY_RESULT_PREFIX_ASSIGN(root_dir, td::realpath(root_dir), "Invalid path: "); + } + std::vector> priorities; + if (partial) { + priorities.push_back(create_tl_object(0)); + for (std::string& f : partial.value()) { + priorities.push_back(create_tl_object(std::move(f), 1)); + } + } + auto query = + create_tl_object(hash, std::move(root_dir), !paused, std::move(priorities)); + send_query(std::move(query), + [=, SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + if (json) { + print_json(R.ok()); + return; + } + td::TerminalIO::out() << "Bag added\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::print_torrent_full, R.move_as_ok()); + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_add_by_meta(std::string meta_file, std::string root_dir, bool paused, + td::optional> partial, bool json) { + TRY_RESULT_PREFIX(meta, td::read_file(meta_file), "Failed to read meta: "); + if (!root_dir.empty()) { + TRY_STATUS_PREFIX(td::mkpath(root_dir), "Failed to create directory: "); + TRY_STATUS_PREFIX(td::mkdir(root_dir), "Failed to create directory: "); + TRY_RESULT_PREFIX_ASSIGN(root_dir, td::realpath(root_dir), "Invalid path: "); + } + std::vector> priorities; + if (partial) { + priorities.push_back(create_tl_object(0)); + for (std::string& f : partial.value()) { + priorities.push_back(create_tl_object(std::move(f), 1)); + } + } + auto query = create_tl_object(std::move(meta), std::move(root_dir), !paused, + std::move(priorities)); + send_query(std::move(query), + [=, SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + if (json) { + print_json(R.ok()); + return; + } + td::TerminalIO::out() << "Bag added\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::print_torrent_full, R.move_as_ok()); + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_list(bool with_hashes, bool json) { + auto query = create_tl_object(); + send_query(std::move(query), + [=, SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + if (json) { + print_json(R.ok()); + return; + } + td::actor::send_closure(SelfId, &StorageDaemonCli::print_torrent_list, R.move_as_ok(), with_hashes); + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_get(td::Bits256 hash, bool json) { + auto query = create_tl_object(hash); + send_query(std::move(query), + [=, SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + if (json) { + print_json(R.ok()); + return; + } + td::actor::send_closure(SelfId, &StorageDaemonCli::print_torrent_full, R.move_as_ok()); + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_get_meta(td::Bits256 hash, std::string meta_file) { + auto query = create_tl_object(hash); + send_query(std::move(query), + [SelfId = actor_id(this), meta_file](td::Result> R) { + if (R.is_error()) { + return; + } + auto data = std::move(R.ok_ref()->meta_); + auto S = td::write_file(meta_file, data); + if (S.is_error()) { + td::actor::send_closure( + SelfId, &StorageDaemonCli::command_finished, + S.move_as_error_prefix(PSTRING() << "Failed to write meta (" << data.size() << " B): ")); + return; + } + td::TerminalIO::out() << "Saved meta (" << data.size() << " B)\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_get_peers(td::Bits256 hash, bool json) { + auto query = create_tl_object(hash); + send_query( + std::move(query), [=, SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + if (json) { + print_json(R.ok()); + return; + } + auto obj = R.move_as_ok(); + td::TerminalIO::out() << "BagID " << hash.to_hex() << "\n"; + td::TerminalIO::out() << "Download speed: " << td::format::as_size((td::uint64)obj->download_speed_) + << "/s\n"; + td::TerminalIO::out() << "Upload speed: " << td::format::as_size((td::uint64)obj->upload_speed_) << "/s\n"; + td::TerminalIO::out() << "Peers: " << obj->peers_.size() << "\n"; + std::vector> table; + table.push_back({"ADNL id", "Address", "Download", "Upload", "Ready"}); + for (auto& peer : obj->peers_) { + std::vector row; + row.push_back(PSTRING() << peer->adnl_id_); + row.push_back(peer->ip_str_); + row.push_back(PSTRING() << td::format::as_size((td::uint64)peer->download_speed_) << "/s"); + row.push_back(PSTRING() << td::format::as_size((td::uint64)peer->upload_speed_) << "/s"); + if (obj->total_parts_ > 0) { + char buf[10]; + snprintf(buf, sizeof(buf), "%5.1f%%", (double)peer->ready_parts_ / (double)obj->total_parts_ * 100); + row.push_back(buf); + } else { + row.push_back("???"); + } + table.push_back(std::move(row)); + } + print_table(table); + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_set_active_download(td::Bits256 hash, bool active) { + auto query = create_tl_object(hash, active); + send_query(std::move(query), + [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + td::TerminalIO::out() << "Success\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_set_priority_all(td::Bits256 hash, td::uint8 priority) { + auto query = create_tl_object(hash, priority); + send_query(std::move(query), + [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + if (R.ok()->get_id() == ton_api::storage_daemon_prioritySet::ID) { + td::TerminalIO::out() << "Priority was set\n"; + } else { + td::TerminalIO::out() << "Torrent header is not available, priority will be set later\n"; + } + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_set_priority_idx(td::Bits256 hash, td::uint64 idx, td::uint8 priority) { + auto query = create_tl_object(hash, idx, priority); + send_query(std::move(query), + [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + if (R.ok()->get_id() == ton_api::storage_daemon_prioritySet::ID) { + td::TerminalIO::out() << "Priority was set\n"; + } else { + td::TerminalIO::out() << "Torrent header is not available, priority will be set later\n"; + } + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_set_priority_name(td::Bits256 hash, std::string name, td::uint8 priority) { + auto query = create_tl_object(hash, std::move(name), priority); + send_query(std::move(query), + [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + if (R.ok()->get_id() == ton_api::storage_daemon_prioritySet::ID) { + td::TerminalIO::out() << "Priority was set\n"; + } else { + td::TerminalIO::out() << "Torrent header is not available, priority will be set later\n"; + } + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_remove(td::Bits256 hash, bool remove_files) { + auto query = create_tl_object(hash, remove_files); + send_query(std::move(query), + [SelfId = actor_id(this), hash](td::Result> R) { + if (R.is_error()) { + return; + } + td::TerminalIO::out() << "Success\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::delete_id, hash); + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_load_from(td::Bits256 hash, std::string meta, std::string path) { + if (meta.empty() && path.empty()) { + return td::Status::Error("Expected meta or files"); + } + td::BufferSlice meta_data; + if (!meta.empty()) { + TRY_RESULT_PREFIX_ASSIGN(meta_data, td::read_file(meta), "Failed to read meta: "); + } + if (!path.empty()) { + TRY_RESULT_PREFIX_ASSIGN(path, td::realpath(path), "Invalid path: "); + } + auto query = create_tl_object(hash, std::move(meta_data), std::move(path)); + send_query(std::move(query), + [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + auto torrent = R.move_as_ok(); + td::TerminalIO::out() << "Loaded data for bag " << torrent->hash_.to_hex() << "\n"; + if (torrent->flags_ & 4) { // fatal error + td::TerminalIO::out() << "FATAL ERROR: " << torrent->fatal_error_ << "\n"; + } + if (torrent->flags_ & 1) { // info ready + td::TerminalIO::out() << "Total size: " << td::format::as_size(torrent->total_size_) << "\n"; + if (torrent->flags_ & 2) { // header ready + td::TerminalIO::out() << "Ready: " << td::format::as_size(torrent->downloaded_size_) << "/" + << td::format::as_size(torrent->included_size_) + << (torrent->completed_ ? " (completed)" : "") << "\n"; + } else { + td::TerminalIO::out() << "Torrent header is not ready\n"; + } + } else { + td::TerminalIO::out() << "Torrent info is not ready\n"; + } + }); + return td::Status::OK(); + } + + td::Status execute_new_contract_message(td::Bits256 hash, std::string file, td::uint64 query_id, + td::optional provider_address, td::optional rate, + td::optional max_span) { + tl_object_ptr params; + if (provider_address) { + if (rate || max_span) { + return td::Status::Error("Incompatible flags"); + } + params = create_tl_object(provider_address.unwrap()); + } else { + if (!rate || !max_span) { + return td::Status::Error("No flags are set"); + } + params = create_tl_object(rate.unwrap(), max_span.unwrap()); + } + auto query = create_tl_object(hash, query_id, std::move(params)); + send_query(std::move(query), [SelfId = actor_id(this), + file](td::Result> R) { + if (R.is_error()) { + return; + } + auto obj = R.move_as_ok(); + auto S = td::write_file(file, obj->body_); + if (S.is_error()) { + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, + S.move_as_error_prefix(PSTRING() << "Failed to write to file: ")); + return; + } + td::TerminalIO::out() << "Saved message body to file\n"; + td::TerminalIO::out() << "Rate (nanoTON per mb*day): " << obj->rate_ << "\n"; + td::TerminalIO::out() << "Max span: " << obj->max_span_ << "\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_import_pk(std::string file) { + TRY_RESULT(data, td::read_file_secure(file)); + TRY_RESULT(pk, ton::PrivateKey::import(data.as_slice())); + auto query = create_tl_object(pk.tl()); + send_query( + std::move(query), [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + td::TerminalIO::out() << "Imported private key. Public key hash: " << R.ok()->key_hash_.to_hex() << "\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_deploy_provider() { + auto query = create_tl_object(); + send_query(std::move(query), + [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + auto obj = R.move_as_ok(); + block::StdAddress std_address; + CHECK(std_address.parse_addr(obj->address_)); + std_address.bounceable = false; + td::TerminalIO::out() << "Address: " << obj->address_ << "\n"; + td::TerminalIO::out() << "Non-bounceable address: " << std_address.rserialize() << "\n"; + td::TerminalIO::out() + << "Send a non-bounceable message with 1 TON to this address to initialize smart contract.\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_init_provider(std::string address) { + auto query = create_tl_object(std::move(address)); + send_query(std::move(query), + [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + td::TerminalIO::out() << "Address of the storage provider was set\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_remove_storage_provider() { + auto query = create_tl_object(); + send_query(std::move(query), + [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + td::TerminalIO::out() << "Storage provider removed\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_get_provider_params(std::string address, bool json) { + auto query = create_tl_object(address); + send_query(std::move(query), + [=, SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + if (json) { + print_json(R.ok()); + return; + } + auto params = R.move_as_ok(); + td::TerminalIO::out() << "Storage provider parameters:\n"; + td::TerminalIO::out() << "Accept new contracts: " << params->accept_new_contracts_ << "\n"; + td::TerminalIO::out() << "Rate (nanoTON per day*MB): " << params->rate_per_mb_day_ << "\n"; + td::TerminalIO::out() << "Max span: " << (td::uint32)params->max_span_ << "\n"; + td::TerminalIO::out() << "Min file size: " << (td::uint64)params->minimal_file_size_ << "\n"; + td::TerminalIO::out() << "Max file size: " << (td::uint64)params->maximal_file_size_ << "\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_set_provider_params(OptionalProviderParams new_params) { + auto query_get = create_tl_object(); + send_query(std::move(query_get), [SelfId = actor_id(this), new_params = std::move(new_params)]( + td::Result> R) mutable { + if (R.is_error()) { + return; + } + td::actor::send_closure(SelfId, &StorageDaemonCli::execute_set_provider_params_cont, R.move_as_ok(), + std::move(new_params)); + }); + return td::Status::OK(); + } + + void execute_set_provider_params_cont(tl_object_ptr params, + OptionalProviderParams new_params) { + if (new_params.accept_new_contracts) { + params->accept_new_contracts_ = new_params.accept_new_contracts.unwrap(); + } + if (new_params.rate_per_mb_day) { + params->rate_per_mb_day_ = new_params.rate_per_mb_day.unwrap(); + } + if (new_params.max_span) { + params->max_span_ = new_params.max_span.unwrap(); + } + if (new_params.minimal_file_size) { + params->minimal_file_size_ = new_params.minimal_file_size.unwrap(); + } + if (new_params.maximal_file_size) { + params->maximal_file_size_ = new_params.maximal_file_size.unwrap(); + } + td::TerminalIO::out() << "Sending external message to update provider parameters...\n"; + auto query_set = create_tl_object(std::move(params)); + send_query(std::move(query_set), + [SelfId = actor_id(this)](td::Result> R) mutable { + if (R.is_error()) { + return; + } + td::TerminalIO::out() << "Storage provider parameters were updated\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + } + + td::Status execute_get_provider_info(bool with_balances, bool with_contracts, bool json) { + auto query = create_tl_object(with_balances, with_contracts); + send_query(std::move(query), + [=, SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + if (json) { + print_json(R.ok()); + return; + } + auto info = R.move_as_ok(); + td::TerminalIO::out() << "Storage provider " << info->address_ << "\n"; + td::TerminalIO::out() << "Storage contracts: " << (td::uint32)info->contracts_count_ << " / " + << (td::uint32)info->config_->max_contracts_ << "\n"; + td::TerminalIO::out() << "Total size: " << size_to_str(info->contracts_total_size_) << " / " + << size_to_str(info->config_->max_total_size_) << "\n"; + if (with_balances) { + td::TerminalIO::out() << "Main contract balance: " << coins_to_str(info->balance_) << " TON\n"; + } + if (with_contracts) { + td::TerminalIO::out() << "Storage contracts: " << info->contracts_.size() << "\n"; + std::vector> table; + table.push_back({"Address", "BagID", "Created at", "Size", "State"}); + if (with_balances) { + table.back().push_back("Client$"); + table.back().push_back("Contract$"); + } + for (const auto& c : info->contracts_) { + table.emplace_back(); + table.back().push_back(c->address_); + table.back().push_back(c->torrent_.to_hex()); + table.back().push_back(time_to_str(c->created_time_)); + table.back().push_back(size_to_str(c->file_size_)); + // enum State { st_downloading = 0, st_downloaded = 1, st_active = 2, st_closing = 3 }; + switch (c->state_) { + case 0: + table.back().push_back("Downloading (" + size_to_str(c->downloaded_size_) + ")"); + break; + case 1: + table.back().push_back("Downloaded"); + break; + case 2: + table.back().push_back("Active"); + break; + case 3: + table.back().push_back("Closing"); + break; + default: + table.back().push_back("???"); + } + if (with_balances) { + table.back().push_back(coins_to_str(c->client_balance_)); + table.back().push_back(coins_to_str(c->contract_balance_)); + } + } + print_table(table); + } + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_set_provider_config(OptionalProviderConfig new_config) { + auto query_get = create_tl_object(false, false); + send_query(std::move(query_get), [SelfId = actor_id(this), new_config = std::move(new_config)]( + td::Result> R) mutable { + if (R.is_error()) { + return; + } + auto info = R.move_as_ok(); + td::actor::send_closure(SelfId, &StorageDaemonCli::execute_set_provider_config_cont, std::move(info->config_), + std::move(new_config)); + }); + return td::Status::OK(); + } + + void execute_set_provider_config_cont(tl_object_ptr config, + OptionalProviderConfig new_config) { + if (new_config.max_contracts) { + config->max_contracts_ = new_config.max_contracts.unwrap(); + } + if (new_config.max_total_size) { + config->max_total_size_ = new_config.max_total_size.unwrap(); + } + auto query_set = create_tl_object(std::move(config)); + send_query(std::move(query_set), + [SelfId = actor_id(this)](td::Result> R) mutable { + if (R.is_error()) { + return; + } + td::TerminalIO::out() << "Storage provider config was updated\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + } + + td::Status execute_withdraw(std::string address) { + auto query = create_tl_object(std::move(address)); + td::TerminalIO::out() << "Sending external message...\n"; + send_query(std::move(query), + [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + td::TerminalIO::out() << "Bounty was withdrawn\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_withdraw_all() { + auto query = create_tl_object(true, true); + send_query(std::move(query), + [=, SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + auto info = R.move_as_ok(); + std::vector addresses; + for (auto& contract : info->contracts_) { + if (contract->state_ != 2) { + continue; + } + td::RefInt256 remaining = td::dec_string_to_int256(contract->contract_balance_) - + td::dec_string_to_int256(contract->client_balance_); + if (remaining < td::make_refint(1'000'000'000)) { + continue; + } + td::TerminalIO::out() << "Withdrawing from " << contract->address_ << " (" << coins_to_str(remaining) + << " TON)\n"; + addresses.push_back(contract->address_); + } + if (addresses.empty()) { + td::TerminalIO::out() << "Nothing to withdraw\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + } else { + td::actor::send_closure(SelfId, &StorageDaemonCli::execute_withdraw_all_cont, std::move(addresses)); + } + }); + return td::Status::OK(); + } + + void execute_withdraw_all_cont(std::vector addresses) { + td::MultiPromise mp; + auto ig = mp.init_guard(); + ig.add_promise([SelfId = actor_id(this), cnt = addresses.size()](td::Result R) { + if (R.is_error()) { + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, R.move_as_error()); + return; + } + td::TerminalIO::out() << "Sent bounty from " << cnt << " contracts\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + for (auto& address : addresses) { + auto query = create_tl_object(std::move(address)); + send_query(std::move(query), + ig.get_promise().wrap([](tl_object_ptr) { return td::Unit(); })); + } + } + + td::Status execute_send_coins(std::string address, std::string amount, std::string message) { + auto query = + create_tl_object(std::move(address), std::move(amount), std::move(message)); + td::TerminalIO::out() << "Sending external messages...\n"; + send_query(std::move(query), + [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + td::TerminalIO::out() << "Internal message was sent\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_close_contract(std::string address) { + auto query = create_tl_object(std::move(address)); + send_query(std::move(query), + [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + td::TerminalIO::out() << "Closing storage contract\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + template + void send_query(tl_object_ptr query, td::Promise promise, bool process_error = true) { + td::actor::send_closure( + client_, &adnl::AdnlExtClient::send_query, "q", serialize_tl_object(query, true), td::Timestamp::in(1800.0), + [SelfId = actor_id(this), promise = std::move(promise), process_error](td::Result R) mutable { + td::Result result; + if (R.is_error()) { + if (R.error().message().empty() && R.error().code() == ErrorCode::cancelled) { + result = td::Status::Error("Query error: failed to connect"); + } else { + result = R.move_as_error_prefix("Query error: "); + } + } else { + td::BufferSlice data = R.move_as_ok(); + result = fetch_tl_object(data, true); + if (result.is_error()) { + auto R3 = fetch_tl_object(data, true); + if (R3.is_ok()) { + result = td::Status::Error("Query error: " + R3.ok()->message_); + } + } + } + if (result.is_ok()) { + promise.set_value(result.move_as_ok()); + } else { + promise.set_error(result.error().clone()); + if (process_error) { + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, result.move_as_error()); + } + } + }); + } + + void command_finished(td::Status S) { + if (S.is_error()) { + td::TerminalIO::out() << S.message() << "\n"; + if (batch_mode_) { + std::exit(2); + } + } else if (batch_mode_) { + if (cur_command_ == commands_.size()) { + std::exit(0); + } else { + parse_line(td::BufferSlice(commands_[cur_command_++])); + } + } + } + + private: + td::IPAddress server_ip_; + PrivateKey client_private_key_; + PublicKey server_public_key_; + std::vector commands_; + bool batch_mode_ = false; + bool batch_started_ = false; + size_t cur_command_ = 0; + td::actor::ActorOwn client_; + td::actor::ActorOwn io_; + + std::map id_to_hash_; + std::map hash_to_id_; + td::uint32 cur_id_ = 0; + + void add_id(td::Bits256 hash) { + if (hash_to_id_.emplace(hash, cur_id_).second) { + id_to_hash_[cur_id_++] = hash; + } + } + + void delete_id(td::Bits256 hash) { + auto it = hash_to_id_.find(hash); + if (it != hash_to_id_.end()) { + id_to_hash_.erase(it->second); + hash_to_id_.erase(it); + } + } + + void update_ids(std::vector hashes) { + for (const td::Bits256& hash : hashes) { + add_id(hash); + } + std::sort(hashes.begin(), hashes.end()); + for (auto it = hash_to_id_.begin(); it != hash_to_id_.end();) { + if (std::binary_search(hashes.begin(), hashes.end(), it->first)) { + ++it; + } else { + id_to_hash_.erase(it->second); + it = hash_to_id_.erase(it); + } + } + } + + void print_torrent_full(tl_object_ptr ptr) { + auto& obj = *ptr; + add_id(obj.torrent_->hash_); + td::TerminalIO::out() << "BagID = " << obj.torrent_->hash_.to_hex() << "\n"; + td::TerminalIO::out() << "Index = " << hash_to_id_[obj.torrent_->hash_] << "\n"; + if (obj.torrent_->flags_ & 4) { // fatal error + td::TerminalIO::out() << "FATAL ERROR: " << obj.torrent_->fatal_error_ << "\n"; + } + if (obj.torrent_->flags_ & 1) { // info ready + if (!obj.torrent_->description_.empty()) { + td::TerminalIO::out() << "-----------------------------------\n"; + td::TerminalIO::out() << obj.torrent_->description_ << "\n"; + td::TerminalIO::out() << "-----------------------------------\n"; + } + if (obj.torrent_->flags_ & 2) { // header ready + td::TerminalIO::out() << "Downloaded: " << td::format::as_size(obj.torrent_->downloaded_size_) << "/" + << td::format::as_size(obj.torrent_->included_size_) + << (obj.torrent_->completed_ + ? " (completed)" + : " (remaining " + + size_to_str(obj.torrent_->included_size_ - obj.torrent_->downloaded_size_) + + ")") + << "\n"; + td::TerminalIO::out() << "Dir name: " << obj.torrent_->dir_name_ << "\n"; + } + td::TerminalIO::out() << "Total size: " << td::format::as_size(obj.torrent_->total_size_) << "\n"; + } else { + td::TerminalIO::out() << "Torrent info is not available\n"; + } + if (obj.torrent_->completed_) { + } else if (obj.torrent_->active_download_) { + td::TerminalIO::out() << "Download speed: " << td::format::as_size((td::uint64)obj.torrent_->download_speed_) + << "/s\n"; + } else { + td::TerminalIO::out() << "Download paused\n"; + } + td::TerminalIO::out() << "Upload speed: " << td::format::as_size((td::uint64)obj.torrent_->upload_speed_) << "/s\n"; + td::TerminalIO::out() << "Root dir: " << obj.torrent_->root_dir_ << "\n"; + if (obj.torrent_->flags_ & 2) { // header ready + td::TerminalIO::out() << obj.files_.size() << " files:\n"; + td::TerminalIO::out() << "###### Prior Ready/Size Name\n"; + td::uint32 i = 0; + for (const auto& f : obj.files_) { + char str[64]; + char priority[4] = "---"; + if (f->priority_ > 0) { + CHECK(f->priority_ <= 255); + snprintf(priority, sizeof(priority), "%03d", f->priority_); + } + snprintf(str, sizeof(str), "%6u: (%s) %7s/%-7s %s ", i, priority, + f->priority_ == 0 ? "---" : size_to_str(f->downloaded_size_).c_str(), size_to_str(f->size_).c_str(), + (f->downloaded_size_ == f->size_ ? "+" : " ")); + td::TerminalIO::out() << str << f->name_ << "\n"; + ++i; + } + } else { + td::TerminalIO::out() << "Torrent header is not available\n"; + } + } + + void print_torrent_list(tl_object_ptr ptr, bool with_hashes) { + auto& obj = *ptr; + std::vector hashes; + for (const auto& torrent : obj.torrents_) { + hashes.push_back(torrent->hash_); + } + update_ids(std::move(hashes)); + std::sort(obj.torrents_.begin(), obj.torrents_.end(), + [&](const tl_object_ptr& a, + const tl_object_ptr& b) { + return hash_to_id_[a->hash_] < hash_to_id_[b->hash_]; + }); + td::TerminalIO::out() << obj.torrents_.size() << " bags\n"; + std::vector> table; + table.push_back({"#####", "BagID", "Description", "Downloaded", "Total", "Speed"}); + for (const auto& torrent : obj.torrents_) { + std::vector row; + row.push_back(std::to_string(hash_to_id_[torrent->hash_])); + std::string hash_str = torrent->hash_.to_hex(); + if (!with_hashes) { + hash_str = hash_str.substr(0, 8) + "..."; + } + row.push_back(hash_str); + std::string description = torrent->description_; + for (size_t i = 0; i < description.size(); ++i) { + if (!is_whitespace(description[i])) { + description.erase(description.begin(), description.begin() + i); + break; + } + } + for (size_t i = 0; i < description.size(); ++i) { + if (description[i] == '\n') { + description.resize(i); + break; + } + } + if (description.size() > 45) { + description.resize(42); + description += "..."; + } + row.push_back(description); + bool info_ready = torrent->flags_ & 1; + bool header_ready = torrent->flags_ & 2; + std::string downloaded_size = size_to_str(torrent->downloaded_size_); + std::string included_size = header_ready ? size_to_str(torrent->included_size_) : "???"; + std::string total_size = info_ready ? size_to_str(torrent->total_size_) : "???"; + std::string status; + if (torrent->flags_ & 4) { // fatal error + status = "FATAL ERROR: " + torrent->fatal_error_; + } else { + status = + torrent->completed_ + ? "COMPLETED" + : (torrent->active_download_ ? size_to_str((td::uint64)torrent->download_speed_) + "/s" : "Paused"); + } + row.push_back(downloaded_size.append("/").append(included_size)); + row.push_back(total_size); + row.push_back(status); + table.push_back(std::move(row)); + } + print_table(table, {2}); + } +}; + +int main(int argc, char* argv[]) { + SET_VERBOSITY_LEVEL(verbosity_INFO); + td::set_default_failure_signal_handler(); + td::IPAddress ip_addr; + PrivateKey client_private_key; + PublicKey server_public_key; + std::vector commands; + td::OptionParser p; + p.set_description("command-line interface for storage-daemon"); + p.add_option('h', "help", "prints_help", [&]() { + std::cout << (PSLICE() << p).c_str(); + std::exit(2); + }); + p.add_checked_option('v', "verbosity", "set verbosity level", [&](td::Slice arg) { + auto verbosity = td::to_integer(arg); + SET_VERBOSITY_LEVEL(VERBOSITY_NAME(FATAL) + verbosity); + return (verbosity >= 0 && verbosity <= 20) ? td::Status::OK() : td::Status::Error("verbosity must be 0..20"); + }); + p.add_option('V', "version", "shows storage-daemon-cli build information", [&]() { + std::cout << "storage-daemon-cli build information: [ Commit: " << GitMetadata::CommitSHA1() + << ", Date: " << GitMetadata::CommitDate() << "]\n"; + std::exit(0); + }); + p.add_checked_option('I', "ip", "set ip:port of storage-daemon", [&](td::Slice arg) { + TRY_STATUS(ip_addr.init_host_port(arg.str())); + return td::Status::OK(); + }); + p.add_option('c', "cmd", "execute command", [&](td::Slice arg) { commands.push_back(arg.str()); }); + p.add_checked_option('k', "key", "private key", [&](td::Slice arg) { + TRY_RESULT_PREFIX(data, td::read_file(arg.str()), "failed to read: "); + TRY_RESULT_ASSIGN(client_private_key, PrivateKey::import(data)); + return td::Status::OK(); + }); + p.add_checked_option('p', "pub", "server public key", [&](td::Slice arg) { + TRY_RESULT_PREFIX(data, td::read_file(arg.str()), "failed to read: "); + TRY_RESULT_ASSIGN(server_public_key, PublicKey::import(data)); + return td::Status::OK(); + }); + + auto S = p.run(argc, argv); + if (S.is_error()) { + std::cerr << S.move_as_error().message().str() << std::endl; + std::_Exit(2); + } + LOG_IF(FATAL, client_private_key.empty()) << "Client private key is not set"; + LOG_IF(FATAL, server_public_key.empty()) << "Server public key is not set"; + + td::actor::Scheduler scheduler({0}); + scheduler.run_in_context([&] { + td::actor::create_actor("console", ip_addr, client_private_key, server_public_key, + std::move(commands)) + .release(); + }); + scheduler.run(); + return 0; +} diff --git a/storage/storage-daemon/storage-daemon.cpp b/storage/storage-daemon/storage-daemon.cpp new file mode 100644 index 00000000..1cdbb514 --- /dev/null +++ b/storage/storage-daemon/storage-daemon.cpp @@ -0,0 +1,937 @@ +/* + This file is part of TON Blockchain Library. + + TON Blockchain Library is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + TON Blockchain Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with TON Blockchain Library. If not, see . +*/ +#include "td/utils/filesystem.h" +#include "td/actor/actor.h" +#include "td/actor/MultiPromise.h" +#include "td/utils/OptionParser.h" +#include "td/utils/port/path.h" +#include "td/utils/port/signals.h" +#include "td/utils/port/user.h" +#include "td/utils/port/IPAddress.h" +#include "td/utils/Random.h" +#include "td/utils/FileLog.h" +#include "checksum.h" +#include "git.h" +#include "auto/tl/ton_api_json.h" +#include "common/delay.h" + +#include "adnl/adnl.h" +#include "rldp2/rldp.h" +#include "dht/dht.h" +#include "overlay/overlays.h" + +#include "Torrent.h" +#include "TorrentCreator.h" +#include "StorageManager.h" +#include "StorageProvider.h" + +#if TD_DARWIN || TD_LINUX +#include +#endif +#include + +using namespace ton; + +td::BufferSlice create_query_error(td::CSlice message) { + return create_serialize_tl_object(message.str()); +} + +td::BufferSlice create_query_error(td::Status error) { + return create_query_error(error.message()); +} + +class StorageDaemon : public td::actor::Actor { + public: + StorageDaemon(td::IPAddress ip_addr, bool client_mode, std::string global_config, std::string db_root, + td::uint16 control_port, bool enable_storage_provider) + : ip_addr_(ip_addr) + , client_mode_(client_mode) + , global_config_(std::move(global_config)) + , db_root_(std::move(db_root)) + , control_port_(control_port) + , enable_storage_provider_(enable_storage_provider) { + } + + void start_up() override { + CHECK(db_root_ != ""); + td::mkdir(db_root_).ensure(); + keyring_ = keyring::Keyring::create(db_root_ + "/keyring"); + { + auto S = load_global_config(); + if (S.is_error()) { + LOG(FATAL) << "Failed to load global config: " << S; + } + } + { + auto S = load_daemon_config(); + if (S.is_error()) { + LOG(FATAL) << "Failed to load daemon config: " << S; + } + } + + init_adnl(); + + class Callback : public StorageManager::Callback { + public: + explicit Callback(td::actor::ActorId actor) : actor_(std::move(actor)) { + } + void on_ready() override { + td::actor::send_closure(actor_, &StorageDaemon::inited_storage_manager); + } + + private: + td::actor::ActorId actor_; + }; + manager_ = td::actor::create_actor("storage", local_id_, db_root_ + "/torrent", + td::make_unique(actor_id(this)), client_mode_, + adnl_.get(), rldp_.get(), overlays_.get()); + } + + td::Status load_global_config() { + TRY_RESULT_PREFIX(conf_data, td::read_file(global_config_), "failed to read: "); + TRY_RESULT_PREFIX(conf_json, td::json_decode(conf_data.as_slice()), "failed to parse json: "); + ton_api::config_global conf; + TRY_STATUS_PREFIX(ton_api::from_json(conf, conf_json.get_object()), "json does not fit TL scheme: "); + if (!conf.dht_) { + return td::Status::Error(ErrorCode::error, "does not contain [dht] section"); + } + TRY_RESULT_PREFIX(dht, dht::Dht::create_global_config(std::move(conf.dht_)), "bad [dht] section: "); + dht_config_ = std::move(dht); + return td::Status::OK(); + } + + td::Status load_daemon_config() { + daemon_config_ = create_tl_object(); + auto r_conf_data = td::read_file(daemon_config_file()); + if (r_conf_data.is_ok()) { + auto conf_data = r_conf_data.move_as_ok(); + TRY_RESULT_PREFIX(conf_json, td::json_decode(conf_data.as_slice()), "failed to parse json: "); + TRY_STATUS_PREFIX(ton_api::from_json(*daemon_config_, conf_json.get_object()), "json does not fit TL scheme: "); + return td::Status::OK(); + } + std::string keys_dir = db_root_ + "/cli-keys/"; + LOG(INFO) << "First launch, storing keys for storage-daemon-cli to " << keys_dir; + td::mkdir(keys_dir).ensure(); + auto generate_public_key = [&]() -> PublicKey { + auto pk = PrivateKey{privkeys::Ed25519::random()}; + auto pub = pk.compute_public_key(); + td::actor::send_closure(keyring_, &keyring::Keyring::add_key, std::move(pk), false, [](td::Unit) {}); + return pub; + }; + { + // Server key + daemon_config_->server_key_ = generate_public_key().tl(); + TRY_STATUS(td::write_file(keys_dir + "server.pub", serialize_tl_object(daemon_config_->server_key_, true))); + } + { + // Client key + auto pk = PrivateKey{privkeys::Ed25519::random()}; + daemon_config_->cli_key_hash_ = pk.compute_short_id().bits256_value(); + TRY_STATUS(td::write_file(keys_dir + "client", serialize_tl_object(pk.tl(), true))); + } + daemon_config_->adnl_id_ = generate_public_key().tl(); + daemon_config_->dht_id_ = generate_public_key().tl(); + return save_daemon_config(); + } + + td::Status save_daemon_config() { + auto s = td::json_encode(td::ToJson(*daemon_config_), true); + TRY_STATUS_PREFIX(td::write_file(daemon_config_file(), s), "Failed to write daemon config: "); + return td::Status::OK(); + } + + void init_adnl() { + CHECK(ip_addr_.is_valid()); + + adnl_network_manager_ = adnl::AdnlNetworkManager::create(static_cast(ip_addr_.get_port())); + adnl_ = adnl::Adnl::create(db_root_, keyring_.get()); + td::actor::send_closure(adnl_, &adnl::Adnl::register_network_manager, adnl_network_manager_.get()); + adnl::AdnlCategoryMask cat_mask; + cat_mask[0] = true; + td::actor::send_closure(adnl_network_manager_, &adnl::AdnlNetworkManager::add_self_addr, ip_addr_, + std::move(cat_mask), 0); + + adnl::AdnlAddressList addr_list; + if (!client_mode_) { + addr_list.add_udp_address(ip_addr_).ensure(); + } + addr_list.set_version(static_cast(td::Clocks::system())); + addr_list.set_reinit_date(adnl::Adnl::adnl_start_time()); + + adnl::AdnlNodeIdFull local_id_full = adnl::AdnlNodeIdFull::create(daemon_config_->adnl_id_).move_as_ok(); + local_id_ = local_id_full.compute_short_id(); + td::actor::send_closure(adnl_, &adnl::Adnl::add_id, local_id_full, addr_list, static_cast(0)); + adnl::AdnlNodeIdFull dht_id_full = adnl::AdnlNodeIdFull::create(daemon_config_->dht_id_).move_as_ok(); + dht_id_ = dht_id_full.compute_short_id(); + td::actor::send_closure(adnl_, &adnl::Adnl::add_id, dht_id_full, addr_list, static_cast(0)); + + if (client_mode_) { + auto D = dht::Dht::create_client(dht_id_, db_root_, dht_config_, keyring_.get(), adnl_.get()); + D.ensure(); + dht_ = D.move_as_ok(); + } else { + auto D = dht::Dht::create(dht_id_, db_root_, dht_config_, keyring_.get(), adnl_.get()); + D.ensure(); + dht_ = D.move_as_ok(); + } + td::actor::send_closure(adnl_, &adnl::Adnl::register_dht_node, dht_.get()); + + rldp_ = ton_rldp::Rldp::create(adnl_.get()); + td::actor::send_closure(rldp_, &ton_rldp::Rldp::add_id, local_id_); + overlays_ = overlay::Overlays::create(db_root_, keyring_.get(), adnl_.get(), dht_.get()); + } + + void inited_storage_manager() { + if (enable_storage_provider_) { + if (!daemon_config_->provider_address_.empty()) { + auto provider_account = ContractAddress::parse(daemon_config_->provider_address_).move_as_ok(); + init_tonlib_client(); + provider_ = td::actor::create_actor("provider", provider_account, db_root_ + "/provider", + tonlib_client_.get(), manager_.get(), keyring_.get()); + } else { + LOG(WARNING) << "Storage provider account is not set, it can be set in storage-daemon-cli"; + } + } + init_control_interface(); + } + + void init_control_interface() { + if (control_port_ == 0) { + return; + } + + auto adnl_id_full = adnl::AdnlNodeIdFull::create(daemon_config_->server_key_).move_as_ok(); + auto adnl_id = adnl_id_full.compute_short_id(); + td::actor::send_closure(adnl_, &adnl::Adnl::add_id, adnl_id_full, adnl::AdnlAddressList(), + static_cast(255)); + + class Callback : public adnl::Adnl::Callback { + public: + explicit Callback(td::actor::ActorId id) : self_id_(id) { + } + void receive_message(adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst, td::BufferSlice data) override { + } + void receive_query(adnl::AdnlNodeIdShort src, adnl::AdnlNodeIdShort dst, td::BufferSlice data, + td::Promise promise) override { + td::actor::send_closure(self_id_, &StorageDaemon::process_control_query, src, std::move(data), + std::move(promise)); + } + + private: + td::actor::ActorId self_id_; + }; + td::actor::send_closure(adnl_, &adnl::Adnl::subscribe, adnl_id, "", std::make_unique(actor_id(this))); + td::actor::send_closure(adnl_, &adnl::Adnl::create_ext_server, std::vector{adnl_id}, + std::vector{control_port_}, + [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + LOG(ERROR) << "Failed to init control interface: " << R.move_as_error(); + return; + } + td::actor::send_closure(SelfId, &StorageDaemon::created_ext_server, R.move_as_ok()); + }); + } + + void created_ext_server(td::actor::ActorOwn ext_server) { + ext_server_ = std::move(ext_server); + LOG(INFO) << "Started control interface on port " << control_port_; + } + + void process_control_query(adnl::AdnlNodeIdShort src, td::BufferSlice data, td::Promise promise) { + promise = [promise = std::move(promise)](td::Result R) mutable { + if (R.is_error()) { + promise.set_value(create_query_error(R.move_as_error())); + } else { + promise.set_value(R.move_as_ok()); + } + }; + if (src.bits256_value() != daemon_config_->cli_key_hash_) { + promise.set_error(td::Status::Error("Not authorized")); + return; + } + auto F = fetch_tl_object(data, true); + if (F.is_error()) { + promise.set_error(F.move_as_error_prefix("failed to parse control query: ")); + return; + } + auto f = F.move_as_ok(); + LOG(DEBUG) << "Running control query " << f->get_id(); + ton_api::downcast_call(*f, [&](auto &obj) { run_control_query(obj, std::move(promise)); }); + } + + void run_control_query(ton_api::storage_daemon_setVerbosity &query, td::Promise promise) { + if (query.verbosity_ < 0 || query.verbosity_ > 10) { + promise.set_value(create_query_error("verbosity should be in range [0..10]")); + return; + } + SET_VERBOSITY_LEVEL(VERBOSITY_NAME(FATAL) + query.verbosity_); + promise.set_result(create_serialize_tl_object()); + } + + void run_control_query(ton_api::storage_daemon_createTorrent &query, td::Promise promise) { + // Run in a separate thread + delay_action( + [promise = std::move(promise), manager = manager_.get(), query = std::move(query)]() mutable { + Torrent::Creator::Options options; + options.piece_size = 128 * 1024; + options.description = std::move(query.description_); + TRY_RESULT_PROMISE(promise, torrent, Torrent::Creator::create_from_path(std::move(options), query.path_)); + td::Bits256 hash = torrent.get_hash(); + td::actor::send_closure(manager, &StorageManager::add_torrent, std::move(torrent), false, + [manager, hash, promise = std::move(promise)](td::Result R) mutable { + if (R.is_error()) { + promise.set_error(R.move_as_error()); + } else { + get_torrent_info_full_serialized(manager, hash, std::move(promise)); + } + }); + }, + td::Timestamp::now()); + } + + void run_control_query(ton_api::storage_daemon_addByHash &query, td::Promise promise) { + td::Bits256 hash = query.hash_; + bool start_download_now = query.start_download_ && query.priorities_.empty(); + td::actor::send_closure( + manager_, &StorageManager::add_torrent_by_hash, hash, std::move(query.root_dir_), start_download_now, + query_add_torrent_cont(hash, query.start_download_, std::move(query.priorities_), std::move(promise))); + } + + void run_control_query(ton_api::storage_daemon_addByMeta &query, td::Promise promise) { + TRY_RESULT_PROMISE(promise, meta, TorrentMeta::deserialize(query.meta_)); + td::Bits256 hash(meta.info.get_hash()); + bool start_download_now = query.start_download_ && query.priorities_.empty(); + td::actor::send_closure( + manager_, &StorageManager::add_torrent_by_meta, std::move(meta), std::move(query.root_dir_), start_download_now, + query_add_torrent_cont(hash, query.start_download_, std::move(query.priorities_), std::move(promise))); + } + + td::Promise query_add_torrent_cont(td::Bits256 hash, bool start_download, + std::vector> priorities, + td::Promise promise) { + return [manager = manager_.get(), hash, start_download = start_download, priorities = std::move(priorities), + promise = std::move(promise)](td::Result R) mutable { + if (R.is_error()) { + promise.set_error(R.move_as_error()); + return; + } + if (!priorities.empty()) { + for (auto &p : priorities) { + ton_api::downcast_call( + *p, td::overloaded( + [&](ton_api::storage_priorityAction_all &obj) { + td::actor::send_closure(manager, &StorageManager::set_all_files_priority, hash, + (td::uint8)obj.priority_, [](td::Result) {}); + }, + [&](ton_api::storage_priorityAction_idx &obj) { + td::actor::send_closure(manager, &StorageManager::set_file_priority_by_idx, hash, obj.idx_, + (td::uint8)obj.priority_, [](td::Result) {}); + }, + [&](ton_api::storage_priorityAction_name &obj) { + td::actor::send_closure(manager, &StorageManager::set_file_priority_by_name, hash, + std::move(obj.name_), (td::uint8)obj.priority_, + [](td::Result) {}); + })); + } + if (start_download) { + td::actor::send_closure(manager, &StorageManager::set_active_download, hash, true, + [](td::Result) {}); + } + } + get_torrent_info_full_serialized(manager, hash, std::move(promise)); + }; + } + + void run_control_query(ton_api::storage_daemon_setActiveDownload &query, td::Promise promise) { + td::actor::send_closure( + manager_, &StorageManager::set_active_download, query.hash_, query.active_, + promise.wrap([](td::Unit &&) { return create_serialize_tl_object(); })); + } + + void run_control_query(ton_api::storage_daemon_getTorrents &query, td::Promise promise) { + td::actor::send_closure( + manager_, &StorageManager::get_all_torrents, + [manager = manager_.get(), promise = std::move(promise)](td::Result> R) mutable { + if (R.is_error()) { + promise.set_error(R.move_as_error()); + return; + } + std::vector torrents = R.move_as_ok(); + auto result = std::make_shared>>(torrents.size()); + td::MultiPromise mp; + auto ig = mp.init_guard(); + for (size_t i = 0; i < torrents.size(); ++i) { + get_torrent_info_short(manager, torrents[i], + [i, result, promise = ig.get_promise()]( + td::Result> R) mutable { + if (R.is_ok()) { + result->at(i) = R.move_as_ok(); + } + promise.set_result(td::Unit()); + }); + } + ig.add_promise([promise = std::move(promise), result](td::Result R) mutable { + if (R.is_error()) { + promise.set_error(R.move_as_error()); + return; + } + auto v = std::move(*result); + v.erase(std::remove(v.begin(), v.end(), nullptr), v.end()); + promise.set_result(create_serialize_tl_object(std::move(v))); + }); + }); + } + + void run_control_query(ton_api::storage_daemon_getTorrentFull &query, td::Promise promise) { + get_torrent_info_full_serialized(manager_.get(), query.hash_, std::move(promise)); + } + + void run_control_query(ton_api::storage_daemon_getTorrentMeta &query, td::Promise promise) { + td::actor::send_closure( + manager_, &StorageManager::with_torrent, query.hash_, + promise.wrap([](NodeActor::NodeState state) -> td::Result { + Torrent &torrent = state.torrent; + if (!torrent.inited_info()) { + return td::Status::Error("Torrent meta is not available"); + } + std::string meta_str = torrent.get_meta(Torrent::GetMetaOptions().with_proof_depth_limit(10)).serialize(); + return create_serialize_tl_object(td::BufferSlice(meta_str)); + })); + } + + void run_control_query(ton_api::storage_daemon_getTorrentPeers &query, td::Promise promise) { + td::actor::send_closure(manager_, &StorageManager::get_peers_info, query.hash_, + promise.wrap([](tl_object_ptr obj) -> td::BufferSlice { + return serialize_tl_object(obj, true); + })); + } + + void run_control_query(ton_api::storage_daemon_setFilePriorityAll &query, td::Promise promise) { + TRY_RESULT_PROMISE(promise, priority, td::narrow_cast_safe(query.priority_)); + td::actor::send_closure(manager_, &StorageManager::set_all_files_priority, query.hash_, priority, + promise.wrap([](bool done) -> td::Result { + if (done) { + return create_serialize_tl_object(); + } else { + return create_serialize_tl_object(); + } + })); + } + + void run_control_query(ton_api::storage_daemon_setFilePriorityByIdx &query, td::Promise promise) { + TRY_RESULT_PROMISE(promise, priority, td::narrow_cast_safe(query.priority_)); + td::actor::send_closure(manager_, &StorageManager::set_file_priority_by_idx, query.hash_, query.idx_, priority, + promise.wrap([](bool done) -> td::Result { + if (done) { + return create_serialize_tl_object(); + } else { + return create_serialize_tl_object(); + } + })); + } + + void run_control_query(ton_api::storage_daemon_setFilePriorityByName &query, td::Promise promise) { + TRY_RESULT_PROMISE(promise, priority, td::narrow_cast_safe(query.priority_)); + td::actor::send_closure(manager_, &StorageManager::set_file_priority_by_name, query.hash_, std::move(query.name_), + priority, promise.wrap([](bool done) -> td::Result { + if (done) { + return create_serialize_tl_object(); + } else { + return create_serialize_tl_object(); + } + })); + } + + void run_control_query(ton_api::storage_daemon_removeTorrent &query, td::Promise promise) { + td::actor::send_closure( + manager_, &StorageManager::remove_torrent, query.hash_, query.remove_files_, + promise.wrap([](td::Unit &&) { return create_serialize_tl_object(); })); + } + + void run_control_query(ton_api::storage_daemon_loadFrom &query, td::Promise promise) { + td::optional meta; + if (!query.meta_.empty()) { + TRY_RESULT_PROMISE_ASSIGN(promise, meta, TorrentMeta::deserialize(query.meta_)); + } + td::actor::send_closure( + manager_, &StorageManager::load_from, query.hash_, std::move(meta), std::move(query.path_), + [manager = manager_.get(), hash = query.hash_, promise = std::move(promise)](td::Result R) mutable { + if (R.is_error()) { + promise.set_error(R.move_as_error()); + } else { + get_torrent_info_short(manager, hash, promise.wrap([](tl_object_ptr obj) { + return serialize_tl_object(obj, true); + })); + } + }); + } + + void run_control_query(ton_api::storage_daemon_getNewContractMessage &query, td::Promise promise) { + td::Promise> P = + [promise = std::move(promise), hash = query.hash_, query_id = query.query_id_, + manager = manager_.get()](td::Result> R) mutable { + TRY_RESULT_PROMISE(promise, r, std::move(R)); + td::actor::send_closure( + manager, &StorageManager::with_torrent, hash, + promise.wrap([r = std::move(r), query_id](NodeActor::NodeState state) -> td::Result { + Torrent &torrent = state.torrent; + if (!torrent.is_completed()) { + return td::Status::Error("Torrent is not complete"); + } + TRY_RESULT(microchunk_tree, MicrochunkTree::Builder::build_for_torrent(torrent, 1LL << 60)); + td::Ref msg = create_new_contract_message_body( + torrent.get_info().as_cell(), microchunk_tree.get_root_hash(), query_id, r.first, r.second); + return create_serialize_tl_object( + vm::std_boc_serialize(msg).move_as_ok(), r.first->to_dec_string(), r.second); + })); + }; + + ton_api::downcast_call(*query.params_, + td::overloaded( + [&](ton_api::storage_daemon_newContractParams &obj) { + td::RefInt256 rate = td::string_to_int256(obj.rate_); + if (rate.is_null() || rate->sgn() < 0) { + P.set_error(td::Status::Error("Invalid rate")); + return; + } + P.set_result(std::make_pair(std::move(rate), (td::uint32)obj.max_span_)); + }, + [&](ton_api::storage_daemon_newContractParamsAuto &obj) { + TRY_RESULT_PROMISE(P, address, ContractAddress::parse(obj.provider_address_)); + init_tonlib_client(); + StorageProvider::get_provider_params( + tonlib_client_.get(), address, P.wrap([](ProviderParams params) { + return std::make_pair(std::move(params.rate_per_mb_day), params.max_span); + })); + })); + } + + void run_control_query(ton_api::storage_daemon_importPrivateKey &query, td::Promise promise) { + auto pk = ton::PrivateKey{query.key_}; + td::actor::send_closure(keyring_, &ton::keyring::Keyring::add_key, std::move(pk), false, + promise.wrap([hash = pk.compute_short_id()](td::Unit) mutable { + return create_serialize_tl_object(hash.bits256_value()); + })); + } + + void run_control_query(ton_api::storage_daemon_deployProvider &query, td::Promise promise) { + if (!enable_storage_provider_) { + promise.set_error( + td::Status::Error("Storage provider is not enabled, run daemon with --storage-provider to enable it")); + return; + } + if (!provider_.empty() || deploying_provider_) { + promise.set_error(td::Status::Error("Storage provider already exists")); + return; + } + TRY_RESULT_PROMISE_ASSIGN(promise, deploying_provider_, generate_fabric_contract(keyring_.get())); + promise.set_result(create_serialize_tl_object( + deploying_provider_.value().address.to_string())); + do_deploy_provider(); + } + + void run_control_query(ton_api::storage_daemon_initProvider &query, td::Promise promise) { + if (!enable_storage_provider_) { + promise.set_error( + td::Status::Error("Storage provider is not enabled, run daemon with --storage-provider to enable it")); + return; + } + if (!provider_.empty() || deploying_provider_) { + promise.set_error(td::Status::Error("Storage provider already exists")); + return; + } + TRY_RESULT_PROMISE_PREFIX(promise, address, ContractAddress::parse(query.account_address_), "Invalid address: "); + do_init_provider( + address, promise.wrap([](td::Unit) { return create_serialize_tl_object(); })); + } + + void do_init_provider(ContractAddress address, td::Promise promise, bool deploying = false) { + if (deploying && (!deploying_provider_ || deploying_provider_.value().address != address)) { + promise.set_error(td::Status::Error("Deploying was cancelled")); + return; + } + daemon_config_->provider_address_ = address.to_string(); + TRY_STATUS_PROMISE(promise, save_daemon_config()); + init_tonlib_client(); + provider_ = td::actor::create_actor("provider", address, db_root_ + "/provider", + tonlib_client_.get(), manager_.get(), keyring_.get()); + deploying_provider_ = {}; + promise.set_result(td::Unit()); + } + + void do_deploy_provider() { + if (!deploying_provider_) { + return; + } + init_tonlib_client(); + check_contract_exists( + deploying_provider_.value().address, tonlib_client_.get(), + [SelfId = actor_id(this), client = tonlib_client_.get(), + init = deploying_provider_.value()](td::Result R) mutable { + if (R.is_error()) { + LOG(INFO) << "Deploying storage contract: " << R.move_as_error(); + delay_action([=]() { td::actor::send_closure(SelfId, &StorageDaemon::do_deploy_provider); }, + td::Timestamp::in(5.0)); + return; + } + if (R.ok()) { + LOG(INFO) << "Deploying storage contract: DONE"; + td::actor::send_closure( + SelfId, &StorageDaemon::do_init_provider, init.address, [](td::Result) {}, true); + return; + } + ContractAddress address = init.address; + td::BufferSlice state_init_boc = vm::std_boc_serialize(init.state_init).move_as_ok(); + td::BufferSlice body_boc = vm::std_boc_serialize(init.msg_body).move_as_ok(); + auto query = create_tl_object( + create_tl_object(address.to_string()), state_init_boc.as_slice().str(), + body_boc.as_slice().str()); + td::actor::send_closure( + client, &tonlib::TonlibClientWrapper::send_request, + std::move(query), [=](td::Result> R) { + if (R.is_error()) { + LOG(INFO) << "Deploying storage contract: " << R.move_as_error(); + } + delay_action([=]() { td::actor::send_closure(SelfId, &StorageDaemon::do_deploy_provider); }, + td::Timestamp::in(5.0)); + }); + }); + } + + void run_control_query(ton_api::storage_daemon_removeStorageProvider &query, td::Promise promise) { + if (!enable_storage_provider_) { + promise.set_error(td::Status::Error("No storage provider")); + return; + } + if (provider_.empty() && !deploying_provider_) { + promise.set_error(td::Status::Error("No storage provider")); + return; + } + daemon_config_->provider_address_ = ""; + TRY_STATUS_PROMISE(promise, save_daemon_config()); + deploying_provider_ = {}; + provider_ = {}; + auto S = td::rmrf(db_root_ + "/provider"); + if (S.is_error()) { + LOG(ERROR) << "Failed to delete provider directory: " << S; + } + promise.set_result(create_serialize_tl_object()); + } + + void run_control_query(ton_api::storage_daemon_getProviderParams &query, td::Promise promise) { + if (!query.address_.empty()) { + TRY_RESULT_PROMISE_PREFIX(promise, address, ContractAddress::parse(query.address_), "Invalid address: "); + init_tonlib_client(); + StorageProvider::get_provider_params(tonlib_client_.get(), address, promise.wrap([](ProviderParams params) { + return serialize_tl_object(params.tl(), true); + })); + } + if (provider_.empty()) { + promise.set_error(td::Status::Error("No storage provider")); + return; + } + td::actor::send_closure(provider_, &StorageProvider::get_params, promise.wrap([](ProviderParams params) { + return serialize_tl_object(params.tl(), true); + })); + } + + void run_control_query(ton_api::storage_daemon_setProviderParams &query, td::Promise promise) { + if (provider_.empty()) { + promise.set_error(td::Status::Error("No storage provider")); + return; + } + TRY_RESULT_PROMISE(promise, params, ProviderParams::create(query.params_)); + td::actor::send_closure( + provider_, &StorageProvider::set_params, std::move(params), + promise.wrap([](td::Unit) mutable { return create_serialize_tl_object(); })); + } + + template + void run_control_query(T &query, td::Promise promise) { + promise.set_error(td::Status::Error("unknown query")); + } + + void run_control_query(ton_api::storage_daemon_getProviderInfo &query, td::Promise promise) { + if (provider_.empty()) { + promise.set_error(td::Status::Error("No storage provider")); + return; + } + td::actor::send_closure(provider_, &StorageProvider::get_provider_info, query.with_balances_, query.with_contracts_, + promise.wrap([](tl_object_ptr info) { + return serialize_tl_object(info, true); + })); + } + + void run_control_query(ton_api::storage_daemon_setProviderConfig &query, td::Promise promise) { + if (provider_.empty()) { + promise.set_error(td::Status::Error("No storage provider")); + return; + } + td::actor::send_closure( + provider_, &StorageProvider::set_provider_config, StorageProvider::Config(query.config_), + promise.wrap([](td::Unit) { return create_serialize_tl_object(); })); + } + + void run_control_query(ton_api::storage_daemon_withdraw &query, td::Promise promise) { + if (provider_.empty()) { + promise.set_error(td::Status::Error("No storage provider")); + return; + } + TRY_RESULT_PROMISE_PREFIX(promise, address, ContractAddress::parse(query.contract_), "Invalid address: "); + td::actor::send_closure(provider_, &StorageProvider::withdraw, address, promise.wrap([](td::Unit) { + return create_serialize_tl_object(); + })); + } + + void run_control_query(ton_api::storage_daemon_sendCoins &query, td::Promise promise) { + if (provider_.empty()) { + promise.set_error(td::Status::Error("No storage provider")); + return; + } + TRY_RESULT_PROMISE_PREFIX(promise, address, ContractAddress::parse(query.address_), "Invalid address: "); + td::RefInt256 amount = td::string_to_int256(query.amount_); + if (amount.is_null()) { + promise.set_error(td::Status::Error("Invalid amount")); + return; + } + td::actor::send_closure( + provider_, &StorageProvider::send_coins, address, amount, std::move(query.message_), + promise.wrap([](td::Unit) { return create_serialize_tl_object(); })); + } + + void run_control_query(ton_api::storage_daemon_closeStorageContract &query, td::Promise promise) { + if (provider_.empty()) { + promise.set_error(td::Status::Error("No storage provider")); + return; + } + TRY_RESULT_PROMISE_PREFIX(promise, address, ContractAddress::parse(query.address_), "Invalid address: "); + td::actor::send_closure(provider_, &StorageProvider::close_storage_contract, address, promise.wrap([](td::Unit) { + return create_serialize_tl_object(); + })); + } + + private: + static void fill_torrent_info_short(Torrent &torrent, ton_api::storage_daemon_torrent &obj) { + obj.hash_ = torrent.get_hash(); + obj.root_dir_ = torrent.get_root_dir(); + if (torrent.inited_info()) { + const Torrent::Info &info = torrent.get_info(); + obj.flags_ = 1; + if (torrent.inited_header()) { + obj.flags_ |= 2; + } + obj.total_size_ = info.file_size; + obj.description_ = info.description; + if (torrent.inited_header()) { + obj.included_size_ = torrent.get_included_size(); + obj.files_count_ = torrent.get_files_count().unwrap(); + obj.dir_name_ = torrent.get_header().dir_name; + } + obj.downloaded_size_ = torrent.get_included_ready_size(); + obj.completed_ = torrent.is_completed(); + } else { + obj.flags_ = 0; + obj.downloaded_size_ = 0; + obj.completed_ = false; + } + if (torrent.get_fatal_error().is_error()) { + obj.flags_ |= 4; + obj.fatal_error_ = torrent.get_fatal_error().message().str(); + } + } + + static void fill_torrent_info_full(Torrent &torrent, ton_api::storage_daemon_torrentFull &obj) { + if (!obj.torrent_) { + obj.torrent_ = create_tl_object(); + } + fill_torrent_info_short(torrent, *obj.torrent_); + obj.files_.clear(); + auto count = torrent.get_files_count(); + if (!count) { + return; + } + for (size_t i = 0; i < count.value(); ++i) { + auto file = create_tl_object(); + file->name_ = torrent.get_file_name(i).str(); + file->size_ = torrent.get_file_size(i); + file->downloaded_size_ = torrent.get_file_ready_size(i); + obj.files_.push_back(std::move(file)); + } + } + + static void get_torrent_info_short(td::actor::ActorId manager, td::Bits256 hash, + td::Promise> promise) { + td::actor::send_closure(manager, &StorageManager::with_torrent, hash, + [promise = std::move(promise)](td::Result R) mutable { + if (R.is_error()) { + promise.set_result(R.move_as_error()); + return; + } + auto state = R.move_as_ok(); + auto obj = create_tl_object(); + fill_torrent_info_short(state.torrent, *obj); + obj->active_download_ = state.active_download; + obj->download_speed_ = state.download_speed; + obj->upload_speed_ = state.upload_speed; + promise.set_result(std::move(obj)); + }); + } + + static void get_torrent_info_full_serialized(td::actor::ActorId manager, td::Bits256 hash, + td::Promise promise) { + td::actor::send_closure(manager, &StorageManager::with_torrent, hash, + [promise = std::move(promise)](td::Result R) mutable { + if (R.is_error()) { + promise.set_error(R.move_as_error()); + } else { + auto state = R.move_as_ok(); + auto obj = create_tl_object(); + fill_torrent_info_full(state.torrent, *obj); + obj->torrent_->active_download_ = state.active_download; + obj->torrent_->download_speed_ = state.download_speed; + obj->torrent_->upload_speed_ = state.upload_speed; + for (size_t i = 0; i < obj->files_.size(); ++i) { + obj->files_[i]->priority_ = + (i < state.file_priority.size() ? state.file_priority[i] : 1); + } + promise.set_result(serialize_tl_object(obj, true)); + } + }); + } + + td::IPAddress ip_addr_; + bool client_mode_; + std::string global_config_; + std::string db_root_; + td::uint16 control_port_; + bool enable_storage_provider_; + + tl_object_ptr daemon_config_; + std::shared_ptr dht_config_; + adnl::AdnlNodeIdShort local_id_; + adnl::AdnlNodeIdShort dht_id_; + + td::actor::ActorOwn keyring_; + td::actor::ActorOwn adnl_network_manager_; + td::actor::ActorOwn adnl_; + td::actor::ActorOwn dht_; + td::actor::ActorOwn rldp_; + td::actor::ActorOwn overlays_; + td::actor::ActorOwn ext_server_; + + td::actor::ActorOwn manager_; + + td::actor::ActorOwn tonlib_client_; + td::actor::ActorOwn provider_; + td::optional deploying_provider_; + + void init_tonlib_client() { + if (!tonlib_client_.empty()) { + return; + } + auto r_conf_data = td::read_file(global_config_); + r_conf_data.ensure(); + auto tonlib_options = tonlib_api::make_object( + tonlib_api::make_object(r_conf_data.move_as_ok().as_slice().str(), "", false, false), + tonlib_api::make_object()); + tonlib_client_ = td::actor::create_actor("tonlibclient", std::move(tonlib_options)); + } + + std::string daemon_config_file() { + return db_root_ + "/config.json"; + } +}; + +int main(int argc, char *argv[]) { + SET_VERBOSITY_LEVEL(verbosity_WARNING); + td::set_default_failure_signal_handler().ensure(); + + td::unique_ptr logger_; + SCOPE_EXIT { + td::log_interface = td::default_log_interface; + }; + + td::IPAddress ip_addr; + bool client_mode = false; + std::string global_config, db_root; + td::uint16 control_port = 0; + bool enable_storage_provider = false; + + td::OptionParser p; + p.set_description("Server for seeding and downloading bags of files (torrents)\n"); + p.add_option('v', "verbosity", "set verbosity level", [&](td::Slice arg) { + int v = VERBOSITY_NAME(FATAL) + (td::to_integer(arg)); + SET_VERBOSITY_LEVEL(v); + }); + p.add_option('V', "version", "shows storage-daemon build information", [&]() { + std::cout << "storage-daemon build information: [ Commit: " << GitMetadata::CommitSHA1() + << ", Date: " << GitMetadata::CommitDate() << "]\n"; + std::exit(0); + }); + p.add_option('h', "help", "prints a help message", [&]() { + char b[10240]; + td::StringBuilder sb(td::MutableSlice{b, 10000}); + sb << p; + std::cout << sb.as_cslice().c_str(); + std::exit(2); + }); + p.add_checked_option('I', "ip", "set : for adnl. : for client mode", + [&](td::Slice arg) -> td::Status { + if (ip_addr.is_valid()) { + return td::Status::Error("Duplicate ip address"); + } + if (!arg.empty() && arg[0] == ':') { + TRY_RESULT(port, td::to_integer_safe(arg.substr(1))); + TRY_STATUS(ip_addr.init_ipv4_port("127.0.0.1", port)); + client_mode = true; + } else { + TRY_STATUS(ip_addr.init_host_port(arg.str())); + } + return td::Status::OK(); + }); + p.add_checked_option('p', "control-port", "port for control interface", [&](td::Slice arg) -> td::Status { + TRY_RESULT_ASSIGN(control_port, td::to_integer_safe(arg)); + return td::Status::OK(); + }); + p.add_option('C', "global-config", "global TON configuration file", + [&](td::Slice arg) { global_config = arg.str(); }); + p.add_option('D', "db", "db root", [&](td::Slice arg) { db_root = arg.str(); }); + p.add_option('d', "daemonize", "set SIGHUP", [&]() { + td::set_signal_handler(td::SignalType::HangUp, [](int sig) { +#if TD_DARWIN || TD_LINUX + close(0); + setsid(); +#endif + }).ensure(); + }); + p.add_option('l', "logname", "log to file", [&](td::Slice fname) { + logger_ = td::FileLog::create(fname.str()).move_as_ok(); + td::log_interface = logger_.get(); + }); + p.add_option('P', "storage-provider", "run storage provider", [&]() { enable_storage_provider = true; }); + + td::actor::Scheduler scheduler({7}); + + scheduler.run_in_context([&] { + p.run(argc, argv).ensure(); + td::actor::create_actor("storage-daemon", ip_addr, client_mode, global_config, db_root, control_port, + enable_storage_provider) + .release(); + }); + while (scheduler.run(1)) { + } +} diff --git a/storage/test/storage.cpp b/storage/test/storage.cpp index 7bdd03de..b4f67b9b 100644 --- a/storage/test/storage.cpp +++ b/storage/test/storage.cpp @@ -58,7 +58,6 @@ #include "Bitset.h" #include "PeerState.h" -#include "SharedState.h" #include "Torrent.h" #include "TorrentCreator.h" @@ -778,7 +777,7 @@ TEST(Rldp, Main) { RldpBasicTest::run(Options::create(1, 100 * MegaByte, NetChannel::Options::perfect_net())); } - +/* TEST(MerkleTree, Manual) { td::Random::Xorshift128plus rnd(123); // create big random file @@ -803,7 +802,7 @@ TEST(MerkleTree, Manual) { timer = {}; LOG(INFO) << "Init merkle tree"; size_t i = 0; - ton::MerkleTree tree(td::transform(hashes, [&i](auto &x) { return ton::MerkleTree::Chunk{i++, x}; })); + ton::MerkleTree tree(td::transform(hashes, [&i](auto &x) { return ton::MerkleTree::Piece{i++, x}; })); LOG(INFO) << timer; auto root_proof = tree.gen_proof(0, chunks_count - 1).move_as_ok(); @@ -830,15 +829,15 @@ TEST(MerkleTree, Manual) { other_new_tree.add_proof(tree.gen_proof(i, i + stride - 1).move_as_ok()).ensure(); other_new_tree.gen_proof(i, i + stride - 1).ensure(); other_new_tree.get_root(2); - std::vector chunks; + std::vector chunks; for (size_t j = 0; j < stride && i + j < chunks_count; j++) { chunks.push_back({i + j, hashes.at(i + j)}); } - new_tree.try_add_chunks(chunks).ensure(); + new_tree.try_add_pieces(chunks).ensure(); } if (stride == 1) { - std::vector chunks; + std::vector chunks; for (size_t i = 0; i < chunks_count; i++) { if (rnd.fast(0, 1) == 1) { @@ -848,7 +847,7 @@ TEST(MerkleTree, Manual) { } } td::Bitset bitmask; - other_new_tree.add_chunks(chunks, bitmask); + other_new_tree.add_pieces(chunks, bitmask); for (size_t i = 0; i < chunks_count; i++) { auto expected = chunks[i].hash == hashes[i]; auto got = bitmask.get(i); @@ -874,9 +873,9 @@ TEST(MerkleTree, Stress) { } } size_t i = 0; - ton::MerkleTree tree(td::transform(hashes, [&i](auto &x) { return ton::MerkleTree::Chunk{i++, x}; })); + ton::MerkleTree tree(td::transform(hashes, [&i](auto &x) { return ton::MerkleTree::Piece{i++, x}; })); for (int t2 = 0; t2 < 1000; t2++) { - std::vector chunks; + std::vector chunks; int mask = rnd.fast(0, (1 << chunks_count) - 1); for (size_t i = 0; i < chunks_count; i++) { @@ -889,8 +888,8 @@ TEST(MerkleTree, Stress) { td::Bitset bitmask_strict; td::Bitset bitmask; ton::MerkleTree new_tree(chunks_count, tree.get_root(rnd.fast(1, 5))); - tree.add_chunks(chunks, bitmask_strict); - new_tree.add_chunks(chunks, bitmask); + tree.add_pieces(chunks, bitmask_strict); + new_tree.add_pieces(chunks, bitmask); for (size_t i = 0; i < chunks_count; i++) { auto expected = chunks[i].hash == hashes[i]; auto strict_got = bitmask_strict.get(i); @@ -901,7 +900,7 @@ TEST(MerkleTree, Stress) { } } } -}; +};*/ struct TorrentMetas { td::optional torrent; @@ -985,6 +984,7 @@ TEST(Torrent, Meta) { torrent_file.header = {}; torrent_file.root_proof = {}; auto new_torrent = ton::Torrent::open(options, torrent_file).move_as_ok(); + new_torrent.enable_write_to_files(); std::vector order; for (size_t i = 0; i < torrent.get_info().pieces_count(); i++) { @@ -1039,6 +1039,7 @@ TEST(Torrent, OneFile) { ton::Torrent::Options options; options.root_dir = "second/"; auto other_torrent = ton::Torrent::open(options, meta).move_as_ok(); + other_torrent.enable_write_to_files(); CHECK(!other_torrent.is_completed()); other_torrent.add_piece(0, torrent.get_piece_data(0).move_as_ok(), torrent.get_piece_proof(0).move_as_ok()) .ensure(); @@ -1190,17 +1191,12 @@ TEST(Torrent, Peer) { } }; - class PeerCreator : public ton::NodeActor::Callback { + class PeerCreator : public ton::NodeActor::NodeCallback { public: - PeerCreator(td::actor::ActorId peer_manager, ton::PeerId self_id, std::vector peers, - std::shared_ptr stop_watcher, std::shared_ptr complete_watcher) - : peer_manager_(std::move(peer_manager)) - , peers_(std::move(peers)) - , self_id_(self_id) - , stop_watcher_(stop_watcher) - , complete_watcher_(complete_watcher) { + PeerCreator(td::actor::ActorId peer_manager, ton::PeerId self_id, std::vector peers) + : peer_manager_(std::move(peer_manager)), peers_(std::move(peers)), self_id_(self_id) { } - void get_peers(td::Promise> promise) override { + void get_peers(ton::PeerId src, td::Promise> promise) override { auto peers = peers_; promise.set_value(std::move(peers)); } @@ -1209,7 +1205,7 @@ TEST(Torrent, Peer) { send_closure(peer_manager_, &PeerManager::register_node, self_id_, self_); } td::actor::ActorOwn create_peer(ton::PeerId self_id, ton::PeerId peer_id, - td::SharedState state) override { + std::shared_ptr state) override { class PeerCallback : public ton::PeerActor::Callback { public: PeerCallback(ton::PeerId self_id, ton::PeerId peer_id, td::actor::ActorId peer_manager) @@ -1254,6 +1250,19 @@ TEST(Torrent, Peer) { std::move(state)); } + private: + td::actor::ActorId peer_manager_; + std::vector peers_; + ton::PeerId self_id_; + td::actor::ActorId self_; + }; + + class TorrentCallback : public ton::NodeActor::Callback { + public: + TorrentCallback(std::shared_ptr stop_watcher, std::shared_ptr complete_watcher) + : stop_watcher_(stop_watcher), complete_watcher_(complete_watcher) { + } + void on_completed() override { complete_watcher_.reset(); } @@ -1265,12 +1274,8 @@ TEST(Torrent, Peer) { } private: - td::actor::ActorId peer_manager_; - std::vector peers_; - ton::PeerId self_id_; std::shared_ptr stop_watcher_; std::shared_ptr complete_watcher_; - td::actor::ActorId self_; }; size_t peers_n = 20; @@ -1314,12 +1319,11 @@ TEST(Torrent, Peer) { alarm_timestamp() = td::Timestamp::in(1); } void alarm() override { - send_closure(node_actor_, &ton::NodeActor::with_torrent, [](td::Result r_torrent) { - if (r_torrent.is_error()) { + send_closure(node_actor_, &ton::NodeActor::with_torrent, [](td::Result r_state) { + if (r_state.is_error()) { return; } - auto torrent = r_torrent.move_as_ok(); - print_debug(torrent); + print_debug(&r_state.ok().torrent); }); alarm_timestamp() = td::Timestamp::in(4); } @@ -1337,14 +1341,17 @@ TEST(Torrent, Peer) { auto peer_manager = td::actor::create_actor("PeerManager"); guard->push_back(td::actor::create_actor( "Node#1", 1, std::move(torrent), - td::make_unique(peer_manager.get(), 1, gen_peers(1, 2), stop_watcher, complete_watcher))); + td::make_unique(stop_watcher, complete_watcher), + td::make_unique(peer_manager.get(), 1, gen_peers(1, 2)), nullptr)); for (size_t i = 2; i <= peers_n; i++) { ton::Torrent::Options options; options.in_memory = true; auto other_torrent = ton::Torrent::open(options, ton::TorrentMeta(info)).move_as_ok(); auto node_actor = td::actor::create_actor( PSLICE() << "Node#" << i, i, std::move(other_torrent), - td::make_unique(peer_manager.get(), i, gen_peers(i, 2), stop_watcher, complete_watcher)); + td::make_unique(stop_watcher, complete_watcher), + td::make_unique(peer_manager.get(), i, gen_peers(i, 2)), + nullptr); if (i == 3) { td::actor::create_actor("StatsActor", node_actor.get()).release(); diff --git a/tdutils/td/utils/JsonBuilder.h b/tdutils/td/utils/JsonBuilder.h index 823cca4b..0fdaceb3 100644 --- a/tdutils/td/utils/JsonBuilder.h +++ b/tdutils/td/utils/JsonBuilder.h @@ -363,7 +363,6 @@ class JsonArrayScope : public JsonScope { void leave() { jb_->dec_offset(); if (jb_->is_pretty()) { - *sb_ << "\n"; jb_->print_offset(); } *sb_ << "]"; @@ -385,7 +384,6 @@ class JsonArrayScope : public JsonScope { is_first_ = true; } if (jb_->is_pretty()) { - *sb_ << "\n"; jb_->print_offset(); } return jb_->enter_value(); @@ -410,7 +408,6 @@ class JsonObjectScope : public JsonScope { void leave() { jb_->dec_offset(); if (jb_->is_pretty()) { - *sb_ << "\n"; jb_->print_offset(); } *sb_ << "}"; @@ -424,7 +421,6 @@ class JsonObjectScope : public JsonScope { is_first_ = true; } if (jb_->is_pretty()) { - *sb_ << "\n"; jb_->print_offset(); } jb_->enter_value() << key; diff --git a/third-party/abseil-cpp b/third-party/abseil-cpp index 21510581..8c6e53ef 160000 --- a/third-party/abseil-cpp +++ b/third-party/abseil-cpp @@ -1 +1 @@ -Subproject commit 215105818dfde3174fe799600bb0f3cae233d0bf +Subproject commit 8c6e53ef3adb1227fffa442c50349dab134a54bc diff --git a/tl/generate/scheme/lite_api.tl b/tl/generate/scheme/lite_api.tl index b2a72dc9..a01da11a 100644 --- a/tl/generate/scheme/lite_api.tl +++ b/tl/generate/scheme/lite_api.tl @@ -68,6 +68,7 @@ liteServer.getState id:tonNode.blockIdExt = liteServer.BlockState; liteServer.getBlockHeader id:tonNode.blockIdExt mode:# = liteServer.BlockHeader; liteServer.sendMessage body:bytes = liteServer.SendMsgStatus; liteServer.getAccountState id:tonNode.blockIdExt account:liteServer.accountId = liteServer.AccountState; +liteServer.getAccountStatePrunned id:tonNode.blockIdExt account:liteServer.accountId = liteServer.AccountState; liteServer.runSmcMethod mode:# id:tonNode.blockIdExt account:liteServer.accountId method_id:long params:bytes = liteServer.RunMethodResult; liteServer.getShardInfo id:tonNode.blockIdExt workchain:int shard:long exact:Bool = liteServer.ShardInfo; liteServer.getAllShardsInfo id:tonNode.blockIdExt = liteServer.AllShardsInfo; diff --git a/tl/generate/scheme/lite_api.tlo b/tl/generate/scheme/lite_api.tlo index a015d932..0572fd46 100644 Binary files a/tl/generate/scheme/lite_api.tlo and b/tl/generate/scheme/lite_api.tlo differ diff --git a/tl/generate/scheme/ton_api.tl b/tl/generate/scheme/ton_api.tl index 3a2ad1fc..5e91ebce 100644 --- a/tl/generate/scheme/ton_api.tl +++ b/tl/generate/scheme/ton_api.tl @@ -68,6 +68,7 @@ adnl.address.udp6 ip:int128 port:int = adnl.Address; //adnl.address.tcp6 ip:int128 port:int = adnl.Address; adnl.address.tunnel to:int256 pubkey:PublicKey = adnl.Address; +adnl.address.reverse = adnl.Address; adnl.addressList addrs:(vector adnl.Address) version:int reinit_date:int priority:int expire_at:int = adnl.AddressList; @@ -185,8 +186,12 @@ dht.pong random_id:long = dht.Pong; dht.valueNotFound nodes:dht.nodes = dht.ValueResult; dht.valueFound value:dht.Value = dht.ValueResult; +dht.clientNotFound nodes:dht.nodes = dht.ReversePingResult; +dht.reversePingOk = dht.ReversePingResult; + dht.stored = dht.Stored; dht.message node:dht.node = dht.Message; +dht.requestReversePingCont target:adnl.Node signature:bytes client:int256 = dht.RequestReversePingCont; dht.db.bucket nodes:dht.nodes = dht.db.Bucket; dht.db.key.bucket id:int = dht.db.Key; @@ -198,6 +203,8 @@ dht.store value:dht.value = dht.Stored; dht.findNode key:int256 k:int = dht.Nodes; dht.findValue key:int256 k:int = dht.ValueResult; dht.getSignedAddressList = dht.Node; +dht.registerReverseConnection node:PublicKey ttl:int signature:bytes = dht.Stored; +dht.requestReversePing target:adnl.Node signature:bytes client:int256 k:int = dht.ReversePingResult; dht.query node:dht.node = True; @@ -571,11 +578,12 @@ control.config.local priv:PrivateKey pub:int256 port:int = control.config.Local; config.local local_ids:(vector id.config.local) dht:(vector dht.config.Local) validators:(vector validator.config.Local) liteservers:(vector liteserver.config.Local) control:(vector control.config.local) = config.Local; dht.config.global static_nodes:dht.nodes k:int a:int = dht.config.Global; +dht.config.global_v2 static_nodes:dht.nodes k:int a:int network_id:int = dht.config.Global; adnl.config.global static_nodes:adnl.nodes = adnl.config.Global; catchain.config.global tag:int256 nodes:(vector PublicKey) = catchain.config.Global; dummyworkchain0.config.global zero_state_hash:int256 = dummyworkchain0.config.Global; validator.config.global zero_state:tonNode.blockIdExt init_block:tonNode.blockIdExt hardforks:(vector tonNode.blockIdExt) = validator.config.Global; -config.global adnl:adnl.config.global dht:dht.config.global validator:validator.config.global = config.Global; +config.global adnl:adnl.config.global dht:dht.config.Global validator:validator.config.global = config.Global; liteserver.desc id:PublicKey ip:int port:int = liteserver.Desc; liteserver.descV2 id:PublicKey ip:int port:int shards:(vector tonNode.shardId) = liteserver.DescV2; @@ -725,8 +733,9 @@ storage.ok = Ok; storage.state will_upload:Bool want_download:Bool = storage.State; storage.piece proof:bytes data:bytes = storage.Piece; +storage.torrentInfo data:bytes = storage.TorrentInfo; -storage.updateInit have_pieces:bytes state:storage.State = storage.Update; +storage.updateInit have_pieces:bytes have_pieces_offset:int state:storage.State = storage.Update; storage.updateHavePieces piece_id:(vector int) = storage.Update; storage.updateState state:storage.State = storage.Update; @@ -735,10 +744,9 @@ storage.updateState state:storage.State = storage.Update; storage.ping session_id:long = storage.Pong; storage.addUpdate session_id:long seqno:int update:storage.Update = Ok; +storage.getTorrentInfo = storage.TorrentInfo; storage.getPiece piece_id:int = storage.Piece; -storage.queryPrefix id:int256 = Object; - ---types--- http.header name:string value:string = http.Header; @@ -775,4 +783,115 @@ collatorNode.generateBlockError code:int message:string = collatorNode.GenerateB ---functions--- collatorNode.generateBlock workchain:int shard:long min_mc_id:tonNode.blockIdExt prev_blocks:(vector tonNode.blockIdExt) - creator:int256 = collatorNode.GenerateBlockResult; \ No newline at end of file + creator:int256 = collatorNode.GenerateBlockResult; + +---types--- + +storage.db.key.torrentList = storage.db.key.TorrentList; +storage.db.key.torrent hash:int256 = storage.db.key.TorrentShort; +storage.db.key.torrentMeta hash:int256 = storage.db.key.TorrentMeta; +storage.db.key.priorities hash:int256 = storage.db.key.Priorities; +storage.db.key.piecesInDb hash:int256 = storage.db.key.PiecesInDb; +storage.db.key.pieceInDb hash:int256 idx:long = storage.db.key.PieceInDb; + +storage.db.torrentList torrents:(vector int256) = storage.db.TorrentList; +storage.db.torrent root_dir:string active_download:Bool = storage.db.TorrentShort; +storage.db.priorities actions:(vector storage.PriorityAction) = storage.db.Priorities; +storage.db.piecesInDb pieces:(vector long) = storage.db.PiecesInDb; + +storage.priorityAction.all priority:int = storage.PriorityAction; +storage.priorityAction.idx idx:long priority:int = storage.PriorityAction; +storage.priorityAction.name name:string priority:int = storage.PriorityAction; + +storage.daemon.config server_key:PublicKey cli_key_hash:int256 provider_address:string adnl_id:PublicKey dht_id:PublicKey = storage.daemon.provider.Config; + +storage.daemon.provider.params accept_new_contracts:Bool rate_per_mb_day:string max_span:int + minimal_file_size:long maximal_file_size:long = storage.daemon.provider.Params; + +storage.provider.db.key.state = storage.provider.db.key.State; +storage.provider.db.key.contractList = storage.provider.db.key.ContractList; +storage.provider.db.key.storageContract wc:int addr:int256 = storage.provider.db.key.StorageContract; +storage.provider.db.key.microchunkTree wc:int addr:int256 = storage.provider.db.key.MicrochunkTree; +storage.provider.db.key.providerConfig = storage.provider.db.key.ProviderConfig; + +storage.provider.db.state last_processed_lt:long = storage.provider.db.State; +storage.provider.db.contractAddress wc:int addr:int256 = storage.db.ContractAddress; +storage.provider.db.contractList contracts:(vector storage.provider.db.contractAddress) = storage.db.ContractList; +storage.provider.db.storageContract torrent_hash:int256 microchunk_hash:int256 created_time:int state:int file_size:long + rate:string max_span:int = storage.provider.db.StorageContract; +storage.provider.db.microchunkTree data:bytes = storage.provider.db.MicrochunkTree; + +storage.daemon.queryError message:string = storage.daemon.QueryError; +storage.daemon.success = storage.daemon.Success; +storage.daemon.torrent + hash:int256 flags:# + // 0 - info ready + // 1 - header ready + // 2 - fatal error + total_size:flags.0?long description:flags.0?string + files_count:flags.1?long included_size:flags.1?long dir_name:flags.1?string + downloaded_size:long + root_dir:string active_download:Bool completed:Bool + download_speed:double upload_speed:double + fatal_error:flags.2?string + = storage.daemon.Torrent; +storage.daemon.fileInfo + name:string size:long + priority:int + downloaded_size:long + = storage.daemon.FileInfo; +storage.daemon.torrentFull torrent:storage.daemon.torrent files:(vector storage.daemon.fileInfo) = storage.daemon.TorrentFull; +storage.daemon.torrentList torrents:(vector storage.daemon.torrent) = storage.daemon.TorrentList; +storage.daemon.torrentMeta meta:bytes = storage.daemon.TorrentMeta; + +storage.daemon.newContractParams rate:string max_span:int = storage.daemon.NewContractParams; +storage.daemon.newContractParamsAuto provider_address:string = storage.daemon.NewContractParams; +storage.daemon.newContractMessage body:bytes rate:string max_span:int = storage.daemon.NewContractMessage; + +storage.daemon.peer adnl_id:int256 ip_str:string download_speed:double upload_speed:double ready_parts:long = storage.daemon.Peer; +storage.daemon.peerList peers:(vector storage.daemon.peer) download_speed:double upload_speed:double total_parts:long = storage.daemon.PeerList; + +storage.daemon.prioritySet = storage.daemon.SetPriorityStatus; +storage.daemon.priorityPending = storage.daemon.SetPriorityStatus; + +storage.daemon.keyHash key_hash:int256 = storage.daemon.KeyHash; + +storage.daemon.providerConfig max_contracts:int max_total_size:long = storage.daemon.ProviderConfig; +storage.daemon.contractInfo address:string state:int torrent:int256 created_time:int file_size:long downloaded_size:long + rate:string max_span:int client_balance:string contract_balance:string = storage.daemon.ContractInfo; +storage.daemon.providerInfo address:string balance:string config:storage.daemon.providerConfig + contracts_count:int contracts_total_size:long + contracts:(vector storage.daemon.contractInfo) = storage.daemon.ProviderInfo; +storage.daemon.providerAddress address:string = storage.daemon.ProviderAddress; + +---functions--- +storage.daemon.setVerbosity verbosity:int = storage.daemon.Success; +storage.daemon.createTorrent path:string description:string = storage.daemon.TorrentFull; +storage.daemon.addByHash hash:int256 root_dir:string start_download:Bool priorities:(vector storage.PriorityAction) = storage.daemon.TorrentFull; +storage.daemon.addByMeta meta:bytes root_dir:string start_download:Bool priorities:(vector storage.PriorityAction) = storage.daemon.TorrentFull; +storage.daemon.setActiveDownload hash:int256 active:Bool = storage.daemon.Success; + +storage.daemon.getTorrents = storage.daemon.TorrentList; +storage.daemon.getTorrentFull hash:int256 = storage.daemon.TorrentFull; +storage.daemon.getTorrentMeta hash:int256 = storage.daemon.TorrentMeta; +storage.daemon.getNewContractMessage hash:int256 query_id:long params:storage.daemon.NewContractParams = storage.daemon.NewContractMessage; +storage.daemon.getTorrentPeers hash:int256 = storage.daemon.PeerList; + +storage.daemon.setFilePriorityAll hash:int256 priority:int = storage.daemon.SetPriorityStatus; +storage.daemon.setFilePriorityByIdx hash:int256 idx:long priority:int = storage.daemon.SetPriorityStatus; +storage.daemon.setFilePriorityByName hash:int256 name:string priority:int = storage.daemon.SetPriorityStatus; + +storage.daemon.removeTorrent hash:int256 remove_files:Bool = storage.daemon.Success; +storage.daemon.loadFrom hash:int256 meta:bytes path:string = storage.daemon.Torrent; + +storage.daemon.importPrivateKey key:PrivateKey = storage.daemon.KeyHash; +storage.daemon.initProvider account_address:string = storage.daemon.Success; +storage.daemon.deployProvider = storage.daemon.ProviderAddress; +storage.daemon.getProviderParams address:string = storage.daemon.provider.Params; +storage.daemon.setProviderParams params:storage.daemon.provider.params = storage.daemon.Success; +storage.daemon.getProviderInfo with_balances:Bool with_contracts:Bool = storage.daemon.ProviderInfo; +storage.daemon.setProviderConfig config:storage.daemon.providerConfig = storage.daemon.Success; +storage.daemon.withdraw contract:string = storage.daemon.Success; +storage.daemon.sendCoins address:string amount:string message:string = storage.daemon.Success; +storage.daemon.closeStorageContract address:string = storage.daemon.Success; +storage.daemon.removeStorageProvider = storage.daemon.Success; diff --git a/tl/generate/scheme/ton_api.tlo b/tl/generate/scheme/ton_api.tlo index 2820a789..b299fb5a 100644 Binary files a/tl/generate/scheme/ton_api.tlo and b/tl/generate/scheme/ton_api.tlo differ diff --git a/tl/generate/scheme/tonlib_api.tl b/tl/generate/scheme/tonlib_api.tl index e83ee784..a98b0769 100644 --- a/tl/generate/scheme/tonlib_api.tl +++ b/tl/generate/scheme/tonlib_api.tl @@ -291,7 +291,7 @@ query.estimateFees id:int53 ignore_chksig:Bool = query.Fees; query.getInfo id:int53 = query.Info; smc.load account_address:accountAddress = smc.Info; -//smc.forget id:int53 = Ok; +smc.forget id:int53 = Ok; smc.getCode id:int53 = tvm.Cell; smc.getData id:int53 = tvm.Cell; smc.getState id:int53 = tvm.Cell; diff --git a/tl/generate/scheme/tonlib_api.tlo b/tl/generate/scheme/tonlib_api.tlo index 5f3970a1..db7810e3 100644 Binary files a/tl/generate/scheme/tonlib_api.tlo and b/tl/generate/scheme/tonlib_api.tlo differ diff --git a/tl/generate/tl_writer_java.cpp b/tl/generate/tl_writer_java.cpp index 690d843d..d371c2f8 100644 --- a/tl/generate/tl_writer_java.cpp +++ b/tl/generate/tl_writer_java.cpp @@ -33,7 +33,7 @@ int TD_TL_writer_java::get_max_arity() const { } bool TD_TL_writer_java::is_built_in_simple_type(const std::string &name) const { - return name == "Bool" || name == "Int32" || name == "Int53" || name == "Int64" || name == "Double" || + return name == "Bool" || name == "Int32" || name == "Int53" || name == "Int64" || name == "Int128" || name == "Int256" || name == "Double" || name == "String" || name == "Bytes" || name == "SecureString" || name == "SecureBytes" || name == "Object" || name == "Function"; } @@ -93,9 +93,12 @@ std::string TD_TL_writer_java::gen_base_function_class_name() const { } std::string TD_TL_writer_java::gen_class_name(std::string name) const { - if (name == "Object" || name == "#") { + if (name == "Object") { assert(false); } + if (name == "#") { + return "int"; + } bool next_to_upper = true; std::string result; for (std::size_t i = 0; i < name.size(); i++) { @@ -149,7 +152,7 @@ std::string TD_TL_writer_java::gen_type_name(const tl::tl_tree_type *tree_type) const std::string &name = t->name; if (name == "#") { - assert(false); + return "int"; } if (name == "Bool") { return "boolean"; @@ -166,7 +169,7 @@ std::string TD_TL_writer_java::gen_type_name(const tl::tl_tree_type *tree_type) if (name == "String" || name == "SecureString") { return "String"; } - if (name == "Bytes" || name == "SecureBytes") { + if (name == "Bytes" || name == "SecureBytes" || name == "Int128" || name == "Int256") { return "byte[]"; } if (name == "Object") { @@ -269,9 +272,6 @@ std::string TD_TL_writer_java::gen_vars(const tl::tl_combinator *t, const tl::tl assert(t->args[i].type->get_type() != tl::NODE_TYPE_VAR_TYPE); } - for (std::size_t i = 0; i < vars.size(); i++) { - assert(vars[i].is_type); - } return ""; } @@ -286,12 +286,19 @@ std::string TD_TL_writer_java::gen_function_vars(const tl::tl_combinator *t, } for (std::size_t i = 0; i < t->args.size(); i++) { - assert(t->args[i].type->get_type() != tl::NODE_TYPE_VAR_TYPE); + const tl::arg &a = t->args[i]; + + int arg_type = a.type->get_type(); + if (arg_type == tl::NODE_TYPE_VAR_TYPE) { + const tl::tl_tree_var_type *var_type = static_cast(a.type); + assert(a.flags & tl::FLAG_EXCL); + assert(var_type->var_num >= 0); + assert(!vars[var_type->var_num].is_type); + vars[var_type->var_num].is_type = true; + vars[var_type->var_num].function_arg_num = static_cast(i); + } } - for (std::size_t i = 0; i < vars.size(); i++) { - assert(vars[i].is_type); - } return ""; } @@ -309,19 +316,41 @@ std::string TD_TL_writer_java::gen_field_fetch(int field_num, const tl::arg &a, bool flat, int parser_type) const { assert(parser_type >= 0); - assert(a.exist_var_num == -1); - assert(a.type->get_type() != tl::NODE_TYPE_VAR_TYPE); + if (a.type->get_type() == tl::NODE_TYPE_VAR_TYPE) { + assert(parser_type == 1); + + const tl::tl_tree_var_type *t = static_cast(a.type); + assert(a.flags == tl::FLAG_EXCL); + + assert(a.var_num == -1); + assert(a.exist_var_num == -1); + + assert(t->var_num >= 0); + assert(vars[t->var_num].is_type); + assert(!vars[t->var_num].is_stored); + vars[t->var_num].is_stored = true; + + return ""; + } assert(!(a.flags & tl::FLAG_EXCL)); assert(!(a.flags & tl::FLAG_OPT_VAR)); + if (a.exist_var_num != -1) { + assert(0 <= a.exist_var_num && a.exist_var_num < static_cast(vars.size())); + } + if (flat) { // TODO // return gen_field_fetch(const tl::arg &a, std::vector &vars, int num, bool flat); } - assert(a.var_num == -1); assert(a.type->get_type() == tl::NODE_TYPE_TYPE); + if (a.var_num >= 0) { + assert(static_cast(a.type)->type->id == tl::ID_VAR_NUM); + assert(0 <= a.var_num && a.var_num < static_cast(vars.size())); + } + return ""; } diff --git a/tl/generate/tl_writer_jni_cpp.cpp b/tl/generate/tl_writer_jni_cpp.cpp index 3eacfe77..20afaa89 100644 --- a/tl/generate/tl_writer_jni_cpp.cpp +++ b/tl/generate/tl_writer_jni_cpp.cpp @@ -25,7 +25,7 @@ namespace td { bool TD_TL_writer_jni_cpp::is_built_in_simple_type(const std::string &name) const { assert(name != "function"); - return name == "Bool" || name == "Int32" || name == "Int53" || name == "Int64" || name == "Double" || + return name == "Bool" || name == "Int32" || name == "Int53" || name == "Int64" || name == "Int128" || name == "Int256" || name == "Double" || name == "String" || name == "Bytes" || name == "SecureString" || name == "SecureBytes" || name == "Function" || name == "Object"; } @@ -126,6 +126,8 @@ std::string TD_TL_writer_jni_cpp::gen_vector_fetch(std::string field_name, const } else if (vector_type == bytes_type) { std::fprintf(stderr, "Vector of Bytes is not supported\n"); assert(false); + } else if (vector_type == "td::Bits128" || vector_type == "td::Bits256") { + template_type = vector_type; } else { assert(vector_type.compare(0, 10, "object_ptr") == 0); template_type = gen_main_class_name(t->type); @@ -152,17 +154,11 @@ std::string TD_TL_writer_jni_cpp::gen_type_fetch(const std::string &field_name, assert(is_type_bare(t)); } - std::string res_begin; - if (!field_name.empty()) { - res_begin = field_name + " = "; - } - std::string res; - assert(name != "#"); if (field_name.empty()) { if (name == "Bool") { return "env->CallObjectMethod(p, td::jni::BooleanGetValueMethodID)"; - } else if (name == "Int32") { + } else if (name == "Int32" || name == "#") { return "env->CallObjectMethod(p, td::jni::IntegerGetValueMethodID)"; } else if (name == "Int53" || name == "Int64") { return "env->CallObjectMethod(p, td::jni::LongGetValueMethodID)"; @@ -176,12 +172,16 @@ std::string TD_TL_writer_jni_cpp::gen_type_fetch(const std::string &field_name, return "td::jni::from_jstring_secure(env, (jstring)p)"; } else if (name == "SecureBytes") { return "td::jni::from_bytes_secure(env, (jbyteArray)p)"; + } else if (name == "Int128") { + return "td::jni::from_bits<128>(env, (jbyteArray)p)"; + } else if (name == "Int256") { + return "td::jni::from_bits<256>(env, (jbyteArray)p)"; } } if (name == "Bool") { res = "(env->GetBooleanField(p, " + field_name + "fieldID) != 0)"; - } else if (name == "Int32") { + } else if (name == "Int32" || name == "#") { res = "env->GetIntField(p, " + field_name + "fieldID)"; } else if (name == "Int53" || name == "Int64") { res = "env->GetLongField(p, " + field_name + "fieldID)"; @@ -195,6 +195,10 @@ std::string TD_TL_writer_jni_cpp::gen_type_fetch(const std::string &field_name, res = "td::jni::fetch_string_secure(env, p, " + field_name + "fieldID)"; } else if (name == "SecureBytes") { res = "td::jni::from_bytes_secure(env, (jbyteArray)td::jni::fetch_object(env, p, " + field_name + "fieldID))"; + } else if (name == "Int128") { + res = "td::jni::from_bits<128>(env, (jbyteArray)td::jni::fetch_object(env, p, " + field_name + "fieldID))"; + } else if (name == "Int256") { + res = "td::jni::from_bits<256>(env, (jbyteArray)td::jni::fetch_object(env, p, " + field_name + "fieldID))"; } else if (name == "Vector") { const tl::tl_tree_type *child = static_cast(tree_type->children[0]); res = gen_vector_fetch(field_name, child, vars, parser_type); @@ -206,7 +210,7 @@ std::string TD_TL_writer_jni_cpp::gen_type_fetch(const std::string &field_name, res = "td::jni::fetch_tl_object<" + gen_main_class_name(tree_type->type) + ">(env, td::jni::fetch_object(env, p, " + field_name + "fieldID));"; } - return res_begin + res; + return res; } std::string TD_TL_writer_jni_cpp::gen_field_fetch(int field_num, const tl::arg &a, @@ -215,7 +219,6 @@ std::string TD_TL_writer_jni_cpp::gen_field_fetch(int field_num, const tl::arg & assert(parser_type >= 0); std::string field_name = (parser_type == 0 ? (field_num == 0 ? ": " : ", ") : "res->") + gen_field_name(a.name); - assert(a.exist_var_num == -1); if (a.type->get_type() == tl::NODE_TYPE_VAR_TYPE) { assert(parser_type == 1); @@ -223,6 +226,7 @@ std::string TD_TL_writer_jni_cpp::gen_field_fetch(int field_num, const tl::arg & assert(a.flags == tl::FLAG_EXCL); assert(a.var_num == -1); + assert(a.exist_var_num == -1); assert(t->var_num >= 0); assert(vars[t->var_num].is_type); @@ -236,18 +240,52 @@ std::string TD_TL_writer_jni_cpp::gen_field_fetch(int field_num, const tl::arg & assert(!(a.flags & tl::FLAG_EXCL)); assert(!(a.flags & tl::FLAG_OPT_VAR)); + std::string res = " "; + if (a.exist_var_num != -1) { + assert(0 <= a.exist_var_num && a.exist_var_num < static_cast(vars.size())); + assert(vars[a.exist_var_num].is_stored); + + res += "if (" + gen_var_name(vars[a.exist_var_num]) + " & " + int_to_string(1 << a.exist_var_bit) + ") { "; + } + if (flat) { // TODO // return gen_field_fetch(const tl::arg &a, std::vector &vars, int num, bool flat); } - assert(a.var_num == -1); + bool store_to_var_num = false; + if (a.var_num >= 0) { + assert(a.type->get_type() == tl::NODE_TYPE_TYPE); + assert(static_cast(a.type)->type->id == tl::ID_VAR_NUM); + assert(0 <= a.var_num && a.var_num < static_cast(vars.size())); + if (!vars[a.var_num].is_stored) { + res += "if ((" + gen_var_name(vars[a.var_num]) + " = "; + store_to_var_num = true; + } else { + assert(false); + } + vars[a.var_num].is_stored = true; + } + + res += field_name + (parser_type == 0 ? "(" : " = "); assert(a.type->get_type() == tl::NODE_TYPE_TYPE); const tl::tl_tree_type *tree_type = static_cast(a.type); + res += gen_type_fetch(field_name, tree_type, vars, parser_type); + if (store_to_var_num) { + res += ") < 0) { return nullptr; }"; + } else { + res += (parser_type == 0 ? ")" : ";"); + } - assert(parser_type != 0); - return " " + gen_type_fetch(field_name, tree_type, vars, parser_type) + ";\n"; + if (a.exist_var_num >= 0) { + res += " }"; + if (store_to_var_num) { + res += " else { " + gen_var_name(vars[a.var_num]) + " = 0; }"; + } + } + res += "\n"; + return res; } std::string TD_TL_writer_jni_cpp::get_pretty_field_name(std::string field_name) const { @@ -274,7 +312,7 @@ std::string TD_TL_writer_jni_cpp::gen_vector_store(const std::string &field_name assert(false); // TODO } if (vector_type == "std::int32_t" || vector_type == "std::int64_t" || vector_type == "double" || - vector_type == string_type || vector_type == secure_string_type || + vector_type == string_type || vector_type == secure_string_type || vector_type == "td::Bits128" || vector_type == "td::Bits256" || vector_type.compare(0, 11, "std::vector") == 0 || vector_type.compare(0, 10, "object_ptr") == 0) { return "{ " "auto arr_tmp_ = td::jni::store_vector(env, " + @@ -317,8 +355,8 @@ std::string TD_TL_writer_jni_cpp::gen_type_store(const std::string &field_name, } std::string res; - if (name == "Int32" || name == "Int53" || name == "Int64" || name == "Double" || name == "Bool" || name == "String" || - name == "SecureString") { + if (name == "Int32" || name == "Int53" || name == "Int64" || name == "Int128" || name == "Int256" || name == "Double" || name == "Bool" || name == "String" || + name == "SecureString" || name == "#") { if (storer_type == 1) { res = "s.store_field(\"" + get_pretty_field_name(field_name) + "\", " + field_name + ");"; } else if (name == "Bool") { @@ -337,6 +375,16 @@ std::string TD_TL_writer_jni_cpp::gen_type_store(const std::string &field_name, res = "{ jstring nextString = td::jni::to_jstring_secure(env, " + field_name + "); if (nextString) { env->SetObjectField(s, " + field_name + "fieldID, nextString); env->DeleteLocalRef(nextString); } }"; + } else if (name == "Int128") { + res = "{ jbyteArray nextBytes = td::jni::to_bits<128>(env, " + field_name + + "); if (nextBytes) { env->SetObjectField(s, " + field_name + + "fieldID, nextBytes); env->DeleteLocalRef(nextBytes); } }"; + } else if (name == "Int256") { + res = "{ jbyteArray nextBytes = td::jni::to_bits<256>(env, " + field_name + + "); if (nextBytes) { env->SetObjectField(s, " + field_name + + "fieldID, nextBytes); env->DeleteLocalRef(nextBytes); } }"; + } else if (name == "#") { + res = "env->SetIntField(s, " + TD_TL_writer_cpp::get_pretty_field_name(field_name) + "_fieldID, " + field_name + ");"; } else { assert(false); } @@ -378,12 +426,12 @@ std::string TD_TL_writer_jni_cpp::gen_field_store(const tl::arg &a, std::vector< std::string field_name = gen_field_name(a.name); std::string shift = storer_type == 1 ? " " : " "; - assert(a.exist_var_num == -1); if (a.type->get_type() == tl::NODE_TYPE_VAR_TYPE) { const tl::tl_tree_var_type *t = static_cast(a.type); assert(a.flags == tl::FLAG_EXCL); assert(a.var_num == -1); + assert(a.exist_var_num == -1); assert(t->var_num >= 0); assert(!vars[t->var_num].is_stored); @@ -397,15 +445,39 @@ std::string TD_TL_writer_jni_cpp::gen_field_store(const tl::arg &a, std::vector< assert(!(a.flags & tl::FLAG_EXCL)); assert(!(a.flags & tl::FLAG_OPT_VAR)); + if (a.exist_var_num >= 0) { + assert(a.exist_var_num < static_cast(vars.size())); + assert(vars[a.exist_var_num].is_stored); + + shift += "if (" + gen_var_name(vars[a.exist_var_num]) + " & " + int_to_string(1 << a.exist_var_bit) + ") { "; + } + if (flat) { // TODO // return gen_field_store(const tl::arg &a, std::vector &vars, bool flat, int storer_type); } - assert(a.var_num == -1); + if (a.var_num >= 0) { + assert(a.type->get_type() == tl::NODE_TYPE_TYPE); + assert(static_cast(a.type)->type->id == tl::ID_VAR_NUM); + assert(a.var_num < static_cast(vars.size())); + if (!vars[a.var_num].is_stored) { + field_name = "(" + gen_var_name(vars[a.var_num]) + " = " + field_name + ")"; + vars[a.var_num].is_stored = true; + } else { + assert(false); // need to check value of stored var + field_name = gen_var_name(vars[a.var_num]); + } + } + assert(a.type->get_type() == tl::NODE_TYPE_TYPE); const tl::tl_tree_type *tree_type = static_cast(a.type); - return shift + gen_type_store(field_name, tree_type, vars, storer_type) + "\n"; + shift += gen_type_store(field_name, tree_type, vars, storer_type); + if (a.exist_var_num >= 0) { + shift += " }"; + } + shift += "\n"; + return shift; } std::string TD_TL_writer_jni_cpp::gen_get_id(const std::string &class_name, std::int32_t id, bool is_proxy) const { @@ -568,11 +640,10 @@ std::string TD_TL_writer_jni_cpp::gen_type_signature(const tl::tl_tree_type *tre const tl::tl_type *t = tree_type->type; const std::string &name = t->name; - assert(name != "#"); assert(name != gen_base_tl_class_name()); if (name == "Bool") { return "Z"; - } else if (name == "Int32") { + } else if (name == "Int32" || name == "#") { return "I"; } else if (name == "Int53" || name == "Int64") { return "J"; @@ -580,7 +651,7 @@ std::string TD_TL_writer_jni_cpp::gen_type_signature(const tl::tl_tree_type *tre return "D"; } else if (name == "String" || name == "SecureString") { return "Ljava/lang/String;"; - } else if (name == "Bytes" || name == "SecureBytes") { + } else if (name == "Bytes" || name == "SecureBytes" || name == "Int128" || name == "Int256") { return "[B"; } else if (name == "Vector") { const tl::tl_tree_type *child = static_cast(tree_type->children[0]); diff --git a/tl/generate/tl_writer_jni_h.cpp b/tl/generate/tl_writer_jni_h.cpp index 5c9896bd..dc2551fd 100644 --- a/tl/generate/tl_writer_jni_h.cpp +++ b/tl/generate/tl_writer_jni_h.cpp @@ -23,7 +23,7 @@ namespace td { bool TD_TL_writer_jni_h::is_built_in_simple_type(const std::string &name) const { - return name == "Bool" || name == "Int32" || name == "Int53" || name == "Int64" || name == "Double" || + return name == "Bool" || name == "Int32" || name == "Int53" || name == "Int64" || name == "Int128" || name == "Int256" || name == "Double" || name == "String" || name == "Bytes" || name == "SecureString" || name == "SecureBytes" || name == "Function" || name == "Object"; } diff --git a/tl/tl/tl_jni_object.cpp b/tl/tl/tl_jni_object.cpp index 7f98ed40..e7e69789 100644 --- a/tl/tl/tl_jni_object.cpp +++ b/tl/tl/tl_jni_object.cpp @@ -30,12 +30,6 @@ namespace jni { thread_local bool parse_error; -static jclass BooleanClass; -static jclass IntegerClass; -static jclass LongClass; -static jclass DoubleClass; -static jclass StringClass; -static jclass ObjectClass; jmethodID GetConstructorID; jmethodID BooleanGetValueMethodID; jmethodID IntegerGetValueMethodID; diff --git a/tl/tl/tl_jni_object.h b/tl/tl/tl_jni_object.h index 4ab385f3..e39a063d 100644 --- a/tl/tl/tl_jni_object.h +++ b/tl/tl/tl_jni_object.h @@ -27,12 +27,19 @@ #include "td/utils/Slice.h" #include "td/utils/SharedSlice.h" +#include "common/bitstring.h" namespace td { namespace jni { extern thread_local bool parse_error; +static jclass BooleanClass; +static jclass IntegerClass; +static jclass LongClass; +static jclass DoubleClass; +static jclass StringClass; +static jclass ObjectClass; extern jmethodID GetConstructorID; extern jmethodID BooleanGetValueMethodID; extern jmethodID IntegerGetValueMethodID; @@ -106,6 +113,29 @@ SecureString from_bytes_secure(JNIEnv *env, jbyteArray arr); jbyteArray to_bytes(JNIEnv *env, Slice b); jbyteArray to_bytes_secure(JNIEnv *env, Slice b); +template +td::BitArray from_bits(JNIEnv *env, jbyteArray arr) { + td::BitArray b; + if (arr != nullptr) { + jsize length = env->GetArrayLength(arr); + assert(length * 8 == n); + env->GetByteArrayRegion(arr, 0, length, reinterpret_cast(b.as_slice().begin())); + env->DeleteLocalRef(arr); + } + return b; +} + +template +jbyteArray to_bits(JNIEnv *env, td::BitArray b) { + assert(n % 8 == 0); + jsize length = n / 8; + jbyteArray arr = env->NewByteArray(length); + if (arr != nullptr) { + env->SetByteArrayRegion(arr, 0, length, reinterpret_cast(b.data())); + } + return arr; +} + void init_vars(JNIEnv *env, const char *td_api_java_package); jintArray store_vector(JNIEnv *env, const std::vector &v); @@ -118,6 +148,22 @@ jobjectArray store_vector(JNIEnv *env, const std::vector &v); jobjectArray store_vector(JNIEnv *env, const std::vector &v); +template +jobjectArray store_vector(JNIEnv *env, const std::vector> &v) { + jint length = static_cast(v.size()); + jobjectArray arr = env->NewObjectArray(length, ObjectClass, jobject()); + if (arr != nullptr) { + for (jsize i = 0; i < length; i++) { + jbyteArray bits = to_bits(env, v[i]); + if (bits) { + env->SetObjectArrayElement(arr, i, bits); + env->DeleteLocalRef(bits); + } + } + } + return arr; +} + template jobjectArray store_vector(JNIEnv *env, const std::vector &v) { jint length = static_cast(v.size()); @@ -230,6 +276,26 @@ struct FetchVector { } }; +template +struct FetchVector> { + static std::vector> fetch(JNIEnv *env, jobjectArray arr) { + std::vector> result; + if (arr != nullptr) { + jsize length = env->GetArrayLength(arr); + result.reserve(length); + for (jsize i = 0; i < length; i++) { + jbyteArray bits = (jbyteArray)env->GetObjectArrayElement(arr, i); + result.push_back(jni::from_bits(env, bits)); + if (bits) { + env->DeleteLocalRef(bits); + } + } + env->DeleteLocalRef(arr); + } + return result; + } +}; + template struct FetchVector> { static auto fetch(JNIEnv *env, jobjectArray arr) { diff --git a/tl/tl/tl_json.h b/tl/tl/tl_json.h index fbc7f2c5..66c639b5 100644 --- a/tl/tl/tl_json.h +++ b/tl/tl/tl_json.h @@ -53,7 +53,8 @@ struct JsonBytes { }; inline void to_json(JsonValueScope &jv, const JsonBytes json_bytes) { - jv << JsonString(PSLICE() << base64_encode(json_bytes.bytes)); + auto base64 = base64_encode(json_bytes.bytes); + jv << JsonString(base64); } template struct JsonVectorBytesImpl { diff --git a/ton/ton-types.h b/ton/ton-types.h index 0773c64a..37367258 100644 --- a/ton/ton-types.h +++ b/ton/ton-types.h @@ -51,12 +51,6 @@ using ValidatorSessionId = td::Bits256; constexpr WorkchainId masterchainId = -1, basechainId = 0, workchainInvalid = 0x80000000; constexpr ShardId shardIdAll = (1ULL << 63); -constexpr unsigned split_merge_delay = 100; // prepare (delay) split/merge for 100 seconds -constexpr unsigned split_merge_interval = 100; // split/merge is enabled during 60 second interval -constexpr unsigned min_split_merge_interval = 30; // split/merge interval must be at least 30 seconds -constexpr unsigned max_split_merge_delay = - 1000; // end of split/merge interval must be at most 1000 seconds in the future - constexpr int max_shard_pfx_len = 60; enum GlobalCapabilities { diff --git a/tonlib/CMakeLists.txt b/tonlib/CMakeLists.txt index fd7b0ab0..d9984368 100644 --- a/tonlib/CMakeLists.txt +++ b/tonlib/CMakeLists.txt @@ -18,6 +18,7 @@ set(TONLIB_SOURCE tonlib/LastConfig.cpp tonlib/Logging.cpp tonlib/TonlibClient.cpp + tonlib/TonlibClientWrapper.cpp tonlib/utils.cpp tonlib/Client.h @@ -34,6 +35,7 @@ set(TONLIB_SOURCE tonlib/Logging.h tonlib/TonlibCallback.h tonlib/TonlibClient.h + tonlib/TonlibClientWrapper.h tonlib/utils.h tonlib/keys/bip39.cpp @@ -69,6 +71,7 @@ if (TONLIB_ENABLE_JNI AND NOT ANDROID) # jni is available by default on Android endif() message(STATUS "Found JNI: ${JNI_INCLUDE_DIRS} ${JNI_LIBRARIES}") target_include_directories(tonlib PUBLIC ${JAVA_INCLUDE_PATH} ${JAVA_INCLUDE_PATH2}) + target_include_directories(tl_tonlib_api PUBLIC ${JAVA_INCLUDE_PATH} ${JAVA_INCLUDE_PATH2}) target_link_libraries(tonlib PUBLIC ${JAVA_JVM_LIBRARY}) endif() diff --git a/tonlib/tonlib/ExtClient.cpp b/tonlib/tonlib/ExtClient.cpp index fdd1606d..30a29b59 100644 --- a/tonlib/tonlib/ExtClient.cpp +++ b/tonlib/tonlib/ExtClient.cpp @@ -54,7 +54,7 @@ void ExtClient::with_last_block(td::Promise promise) { td::actor::send_closure(client_.last_block_actor_, &LastBlock::get_last_block, std::move(P)); } -void ExtClient::send_raw_query(td::BufferSlice query, ton::ShardIdFull shard, td::Promise promise) { +void ExtClient::send_raw_query(td::BufferSlice query, td::Promise promise) { auto query_id = queries_.create(std::move(promise)); td::Promise P = [query_id, self = this, actor_id = td::actor::actor_id()](td::Result result) { @@ -62,10 +62,10 @@ void ExtClient::send_raw_query(td::BufferSlice query, ton::ShardIdFull shard, td self->queries_.extract(query_id).set_result(std::move(result)); }); }; - if (client_.raw_client_.empty()) { + if (client_.adnl_ext_client_.empty()) { return P.set_error(TonlibError::NoLiteServers()); } - td::actor::send_closure(client_.raw_client_, &ExtClientRaw::send_query, "query", std::move(query), shard, + td::actor::send_closure(client_.adnl_ext_client_, &ton::adnl::AdnlExtClient::send_query, "query", std::move(query), td::Timestamp::in(10.0), std::move(P)); } } // namespace tonlib diff --git a/tonlib/tonlib/ExtClient.h b/tonlib/tonlib/ExtClient.h index 48818622..43627fb8 100644 --- a/tonlib/tonlib/ExtClient.h +++ b/tonlib/tonlib/ExtClient.h @@ -28,7 +28,7 @@ #include "td/utils/Container.h" #include "td/utils/Random.h" -#include "ExtClientRaw.h" +#include "ExtClientLazy.h" #include "TonlibError.h" #include "utils.h" #include "QueryTraits.h" @@ -40,7 +40,7 @@ class LastConfig; struct LastBlockState; struct LastConfigState; struct ExtClientRef { - td::actor::ActorId raw_client_; + td::actor::ActorId adnl_ext_client_; td::actor::ActorId last_block_actor_; td::actor::ActorId last_config_actor_; }; @@ -99,8 +99,8 @@ class ExtClient { } void force_change_liteserver() { - if (!client_.raw_client_.empty()) { - td::actor::send_closure(client_.raw_client_, &ExtClientRaw::force_change_liteserver); + if (!client_.adnl_ext_client_.empty()) { + td::actor::send_closure(client_.adnl_ext_client_, &ExtClientLazy::force_change_liteserver); } } diff --git a/tonlib/tonlib/ExtClientLazy.cpp b/tonlib/tonlib/ExtClientLazy.cpp index 625d7eaa..335a0ff9 100644 --- a/tonlib/tonlib/ExtClientLazy.cpp +++ b/tonlib/tonlib/ExtClientLazy.cpp @@ -16,15 +16,15 @@ Copyright 2017-2020 Telegram Systems LLP */ -#include "ExtClientRaw.h" +#include "ExtClientLazy.h" #include "TonlibError.h" #include "td/utils/Random.h" namespace tonlib { -class ExtClientLazyImp : public ExtClientRaw { +class ExtClientLazyImp : public ExtClientLazy { public: ExtClientLazyImp(std::vector> servers, - td::unique_ptr callback) + td::unique_ptr callback) : servers_(std::move(servers)), callback_(std::move(callback)) { CHECK(!servers_.empty()); } @@ -34,7 +34,15 @@ class ExtClientLazyImp : public ExtClientRaw { td::random_shuffle(td::as_mutable_span(servers_), rnd); } - void send_query(std::string name, td::BufferSlice data, ton::ShardIdFull shard, td::Timestamp timeout, + void check_ready(td::Promise promise) override { + before_query(); + if (client_.empty()) { + return promise.set_error(TonlibError::Cancelled()); + } + send_closure(client_, &ton::adnl::AdnlExtClient::check_ready, std::move(promise)); + } + + void send_query(std::string name, td::BufferSlice data, td::Timestamp timeout, td::Promise promise) override { before_query(); if (client_.empty()) { @@ -101,7 +109,7 @@ class ExtClientLazyImp : public ExtClientRaw { bool cur_server_bad_force_ = false; td::actor::ActorOwn client_; - td::unique_ptr callback_; + td::unique_ptr callback_; static constexpr double MAX_NO_QUERIES_TIMEOUT = 100; bool is_closing_{false}; @@ -132,12 +140,12 @@ class ExtClientLazyImp : public ExtClientRaw { } }; -td::actor::ActorOwn ExtClientRaw::create(ton::adnl::AdnlNodeIdFull dst, td::IPAddress dst_addr, - td::unique_ptr callback) { +td::actor::ActorOwn ExtClientLazy::create(ton::adnl::AdnlNodeIdFull dst, td::IPAddress dst_addr, + td::unique_ptr callback) { return create({std::make_pair(dst, dst_addr)}, std::move(callback)); } -td::actor::ActorOwn ExtClientRaw::create( +td::actor::ActorOwn ExtClientLazy::create( std::vector> servers, td::unique_ptr callback) { return td::actor::create_actor("ExtClientLazy", std::move(servers), std::move(callback)); } diff --git a/tonlib/tonlib/ExtClientOutbound.cpp b/tonlib/tonlib/ExtClientOutbound.cpp index e057368d..d2715b8b 100644 --- a/tonlib/tonlib/ExtClientOutbound.cpp +++ b/tonlib/tonlib/ExtClientOutbound.cpp @@ -37,6 +37,9 @@ class ExtClientOutboundImp : public ExtClientOutbound { void force_change_liteserver() override { } + void force_change_liteserver() override { + } + void on_query_result(td::int64 id, td::Result r_data, td::Promise promise) override { auto it = queries_.find(id); if (it == queries_.end()) { diff --git a/tonlib/tonlib/ExtClientOutbound.h b/tonlib/tonlib/ExtClientOutbound.h index 91217868..4721e872 100644 --- a/tonlib/tonlib/ExtClientOutbound.h +++ b/tonlib/tonlib/ExtClientOutbound.h @@ -19,10 +19,10 @@ #pragma once #include "td/actor/actor.h" -#include "ExtClientRaw.h" +#include "ExtClientLazy.h" namespace tonlib { -class ExtClientOutbound : public ExtClientRaw { +class ExtClientOutbound : public ExtClientLazy { public: class Callback { public: diff --git a/tonlib/tonlib/ExtClientRaw.h b/tonlib/tonlib/ExtClientRaw.h index 0a55bdc1..612995af 100644 --- a/tonlib/tonlib/ExtClientRaw.h +++ b/tonlib/tonlib/ExtClientRaw.h @@ -22,20 +22,18 @@ #include "ton/ton-types.h" namespace tonlib { -class ExtClientRaw : public td::actor::Actor { +class ExtClientLazy : public ton::adnl::AdnlExtClient { public: class Callback { public: virtual ~Callback() = default; }; - virtual void send_query(std::string name, td::BufferSlice data, ton::ShardIdFull shard, td::Timestamp timeout, - td::Promise promise) = 0; virtual void force_change_liteserver() = 0; - static td::actor::ActorOwn create(ton::adnl::AdnlNodeIdFull dst, td::IPAddress dst_addr, - td::unique_ptr callback); - static td::actor::ActorOwn create( + static td::actor::ActorOwn create(ton::adnl::AdnlNodeIdFull dst, td::IPAddress dst_addr, + td::unique_ptr callback); + static td::actor::ActorOwn create( std::vector> servers, td::unique_ptr callback); }; diff --git a/tonlib/tonlib/TonlibClient.cpp b/tonlib/tonlib/TonlibClient.cpp index b4b38697..b28c07a0 100644 --- a/tonlib/tonlib/TonlibClient.cpp +++ b/tonlib/tonlib/TonlibClient.cpp @@ -18,7 +18,7 @@ */ #include "TonlibClient.h" -#include "tonlib/ExtClientRaw.h" +#include "tonlib/ExtClientLazy.h" #include "tonlib/ExtClientOutbound.h" #include "tonlib/LastBlock.h" #include "tonlib/LastConfig.h" @@ -1650,7 +1650,7 @@ void TonlibClient::hangup() { ExtClientRef TonlibClient::get_client_ref() { ExtClientRef ref; - ref.raw_client_ = raw_client_.get(); + ref.adnl_ext_client_ = raw_client_.get(); ref.last_block_actor_ = raw_last_block_.get(); ref.last_config_actor_ = raw_last_config_.get(); @@ -1685,7 +1685,11 @@ void TonlibClient::init_ext_client() { ext_client_outbound_ = client.get(); raw_client_ = std::move(client); } else { - class Callback : public ExtClientRaw::Callback { + std::vector> servers; + for (const auto& s : config_.lite_clients) { + servers.emplace_back(s.adnl_id, s.address); + } + class Callback : public ExtClientLazy::Callback { public: explicit Callback(td::actor::ActorShared<> parent) : parent_(std::move(parent)) { } @@ -1708,6 +1712,7 @@ void TonlibClient::init_ext_client() { } ext_client_outbound_ = {}; ref_cnt_++; + raw_client_ = ExtClientLazy::create(std::move(servers), td::make_unique(td::actor::actor_shared())); } } @@ -2299,24 +2304,46 @@ const MasterConfig& get_default_master_config() { "liteservers": [ ], "validator": { - "@type": "validator.config.global", - "zero_state": { - "workchain": -1, - "shard": -9223372036854775808, - "seqno": 0, - "root_hash": "gj+B8wb/AmlPk1z1AhVI484rhrUpgSr2oSFIh56VoSg=", - "file_hash": "Z+IKwYS54DmmJmesw/nAD5DzWadnOCMzee+kdgSYDOg=" - }, - "init_block" : { - "root_hash": "gj+B8wb/AmlPk1z1AhVI484rhrUpgSr2oSFIh56VoSg=", - "seqno": 0, - "file_hash": "Z+IKwYS54DmmJmesw/nAD5DzWadnOCMzee+kdgSYDOg=", - "workchain": -1, - "shard": -9223372036854775808 - }, - "hardforks": [ - ] - } + "zero_state": { + "file_hash": "Z+IKwYS54DmmJmesw/nAD5DzWadnOCMzee+kdgSYDOg=", + "seqno": 0, + "root_hash": "gj+B8wb/AmlPk1z1AhVI484rhrUpgSr2oSFIh56VoSg=", + "workchain": -1, + "shard": -9223372036854775808 + }, + "@type": "validator.config.global", + "init_block": + { + "file_hash": "xRaxgUwgTXYFb16YnR+Q+VVsczLl6jmYwvzhQ/ncrh4=", + "seqno": 5176527, + "root_hash": "SoPLqMe9Dz26YJPOGDOHApTSe5i0kXFtRmRh/zPMGuI=", + "workchain": -1, + "shard": -9223372036854775808 + }, + "hardforks": [ + { + "file_hash": "jF3RTD+OyOoP+OI9oIjdV6M8EaOh9E+8+c3m5JkPYdg=", + "seqno": 5141579, + "root_hash": "6JSqIYIkW7y8IorxfbQBoXiuY3kXjcoYgQOxTJpjXXA=", + "workchain": -1, + "shard": -9223372036854775808 + }, + { + "file_hash": "WrNoMrn5UIVPDV/ug/VPjYatvde8TPvz5v1VYHCLPh8=", + "seqno": 5172980, + "root_hash": "054VCNNtUEwYGoRe1zjH+9b1q21/MeM+3fOo76Vcjes=", + "workchain": -1, + "shard": -9223372036854775808 + }, + { + "file_hash": "xRaxgUwgTXYFb16YnR+Q+VVsczLl6jmYwvzhQ/ncrh4=", + "seqno": 5176527, + "root_hash": "SoPLqMe9Dz26YJPOGDOHApTSe5i0kXFtRmRh/zPMGuI=", + "workchain": -1, + "shard": -9223372036854775808 + } + ] + } })abc"); return res; }(); @@ -2520,7 +2547,18 @@ struct ToRawTransactions { auto body_cell = vm::CellBuilder().append_cellslice(*body).finalize(); auto body_hash = body_cell->get_hash().as_slice().str(); - auto get_data = [body = std::move(body), body_cell, this](td::Slice salt) mutable { + td::Ref init_state_cell; + auto& init_state_cs = message.init.write(); + if (init_state_cs.fetch_ulong(1) == 1) { + if (init_state_cs.fetch_long(1) == 0) { + init_state_cell = vm::CellBuilder().append_cellslice(init_state_cs).finalize(); + } else { + init_state_cell = init_state_cs.fetch_ref(); + } + } + + auto get_data = [body = std::move(body), body_cell = std::move(body_cell), + init_state_cell = std::move(init_state_cell), this](td::Slice salt) mutable { tonlib_api::object_ptr data; if (try_decode_messages_ && body->size() >= 32 && static_cast(body->prefetch_long(32)) <= 1) { auto type = body.write().fetch_long(32); @@ -2550,7 +2588,7 @@ struct ToRawTransactions { } } if (!data) { - data = tonlib_api::make_object(to_bytes(std::move(body_cell)), ""); + data = tonlib_api::make_object(to_bytes(std::move(body_cell)), to_bytes(std::move(init_state_cell))); } return data; }; @@ -2708,7 +2746,7 @@ td::Status TonlibClient::do_request(const tonlib_api::raw_sendMessageReturnHash& td::Promise>&& promise) { TRY_RESULT_PREFIX(body, vm::std_boc_deserialize(request.body_), TonlibError::InvalidBagOfCells("body")); auto hash = body->get_hash().as_slice().str(); - make_request(int_api::SendMessage{std::move(body)}, + make_request(int_api::SendMessage{std::move(body)}, promise.wrap([hash = std::move(hash)](auto res) { return tonlib_api::make_object(std::move(hash)); })); @@ -3687,6 +3725,17 @@ td::Status TonlibClient::do_request(const tonlib_api::smc_load& request, return td::Status::OK(); } +td::Status TonlibClient::do_request(const tonlib_api::smc_forget& request, + td::Promise>&& promise) { + auto it = smcs_.find(request.id_); + if (it == smcs_.end()) { + return TonlibError::InvalidSmcId(); + } + smcs_.erase(it); + promise.set_value(tonlib_api::make_object()); + return td::Status::OK(); +} + td::Status TonlibClient::do_request(const tonlib_api::smc_getCode& request, td::Promise>&& promise) { auto it = smcs_.find(request.id_); @@ -3867,7 +3916,7 @@ td::Status TonlibClient::do_request(const tonlib_api::smc_getLibraries& request, return td::Status::OK(); } - client_.send_query(ton::lite_api::liteServer_getLibraries(std::move(not_cached_hashes)), + client_.send_query(ton::lite_api::liteServer_getLibraries(std::move(not_cached_hashes)), promise.wrap([self=this, result_entries = std::move(result_entries)] (td::Result> r_libraries) mutable { @@ -4609,7 +4658,7 @@ td::Status TonlibClient::do_request(const tonlib_api::getConfigParam& request, std::vector params = { param }; client_.send_query(ton::lite_api::liteServer_getConfigParams(0, std::move(lite_block), std::move(params)), - promise.wrap([block, param](auto r_config) { + promise.wrap([block, param](auto r_config) { auto state = block::check_extract_state_proof(block, r_config->state_proof_.as_slice(), r_config->config_proof_.as_slice()); if (state.is_error()) { @@ -4693,7 +4742,7 @@ td::Status TonlibClient::do_request(const tonlib_api::blocks_lookupBlock& reques auto to_tonlib_api(const ton::lite_api::liteServer_transactionId& txid) -> tonlib_api_ptr { - return tonlib_api::make_object( + return tonlib_api::make_object( txid.mode_, txid.account_.as_slice().str(), txid.lt_, txid.hash_.as_slice().str()); } @@ -4817,7 +4866,7 @@ td::Status TonlibClient::do_request(const tonlib_api::blocks_getShardBlockProof& TRY_RESULT(id, to_block_id(*request.id_)); ton::BlockIdExt from; if (request.mode_ & 1) { - TRY_RESULT_ASSIGN(from, to_block_id(*request.id_)); + TRY_RESULT_ASSIGN(from, to_block_id(*request.from_)); } auto actor_id = actor_id_++; actors_[actor_id] = td::actor::create_actor("GetShardBlockProof", client_.get_client(), id, from, diff --git a/tonlib/tonlib/TonlibClient.h b/tonlib/tonlib/TonlibClient.h index 8c076876..9b10b671 100644 --- a/tonlib/tonlib/TonlibClient.h +++ b/tonlib/tonlib/TonlibClient.h @@ -110,7 +110,7 @@ class TonlibClient : public td::actor::Actor { vm::Dictionary libraries{256}; // network - td::actor::ActorOwn raw_client_; + td::actor::ActorOwn raw_client_; td::actor::ActorId ext_client_outbound_; td::actor::ActorOwn raw_last_block_; td::actor::ActorOwn raw_last_config_; @@ -305,6 +305,7 @@ class TonlibClient : public td::actor::Actor { td::Result> get_smc_info(td::int64 id); void finish_load_smc(td::unique_ptr query, td::Promise>&& promise); td::Status do_request(const tonlib_api::smc_load& request, td::Promise>&& promise); + td::Status do_request(const tonlib_api::smc_forget& request, td::Promise>&& promise); td::Status do_request(const tonlib_api::smc_getCode& request, td::Promise>&& promise); td::Status do_request(const tonlib_api::smc_getData& request, diff --git a/rldp-http-proxy/TonlibClient.cpp b/tonlib/tonlib/TonlibClientWrapper.cpp similarity index 66% rename from rldp-http-proxy/TonlibClient.cpp rename to tonlib/tonlib/TonlibClientWrapper.cpp index a8e18b81..8dff1874 100644 --- a/rldp-http-proxy/TonlibClient.cpp +++ b/tonlib/tonlib/TonlibClientWrapper.cpp @@ -23,44 +23,40 @@ exception statement from your version. If you delete this exception statement from all source files in the program, then also delete it here. */ -#include "TonlibClient.h" +#include "TonlibClientWrapper.h" -TonlibClient::TonlibClient(ton::tl_object_ptr options) : options_(std::move(options)) { +namespace tonlib { + +TonlibClientWrapper::TonlibClientWrapper(ton::tl_object_ptr options) + : options_(std::move(options)) { } -void TonlibClient::start_up() { +void TonlibClientWrapper::start_up() { class Cb : public tonlib::TonlibCallback { public: - explicit Cb(td::actor::ActorId self_id) : self_id_(self_id) { + explicit Cb(td::actor::ActorId self_id) : self_id_(self_id) { } void on_result(std::uint64_t id, tonlib_api::object_ptr result) override { - td::actor::send_closure(self_id_, &TonlibClient::receive_request_result, id, std::move(result)); + td::actor::send_closure(self_id_, &TonlibClientWrapper::receive_request_result, id, std::move(result)); } void on_error(std::uint64_t id, tonlib_api::object_ptr error) override { - td::actor::send_closure(self_id_, &TonlibClient::receive_request_result, id, + td::actor::send_closure(self_id_, &TonlibClientWrapper::receive_request_result, id, td::Status::Error(error->code_, std::move(error->message_))); } private: - td::actor::ActorId self_id_; + td::actor::ActorId self_id_; }; tonlib_client_ = td::actor::create_actor("tonlibclient", td::make_unique(actor_id(this))); auto init = tonlib_api::make_object(std::move(options_)); - auto P = td::PromiseCreator::lambda([](td::Result> R) mutable { - R.ensure(); - }); + auto P = td::PromiseCreator::lambda( + [](td::Result> R) mutable { R.ensure(); }); send_request(std::move(init), std::move(P)); } -void TonlibClient::send_request(tonlib_api::object_ptr obj, - td::Promise> promise) { - auto id = next_request_id_++; - CHECK(requests_.emplace(id, std::move(promise)).second); - td::actor::send_closure(tonlib_client_, &tonlib::TonlibClient::request, id, std::move(obj)); -} - -void TonlibClient::receive_request_result(td::uint64 id, td::Result> R) { +void TonlibClientWrapper::receive_request_result(td::uint64 id, + td::Result> R) { if (id == 0) { return; } @@ -69,4 +65,6 @@ void TonlibClient::receive_request_result(td::uint64 id, td::Resultsecond); requests_.erase(it); promise.set_result(std::move(R)); -} \ No newline at end of file +} + +} // namespace tonlib \ No newline at end of file diff --git a/rldp-http-proxy/TonlibClient.h b/tonlib/tonlib/TonlibClientWrapper.h similarity index 68% rename from rldp-http-proxy/TonlibClient.h rename to tonlib/tonlib/TonlibClientWrapper.h index 0b75c6c0..041b25de 100644 --- a/rldp-http-proxy/TonlibClient.h +++ b/tonlib/tonlib/TonlibClientWrapper.h @@ -28,14 +28,26 @@ #include "auto/tl/tonlib_api.hpp" #include "tonlib/tonlib/TonlibClient.h" -class TonlibClient : public td::actor::Actor { +namespace tonlib { + +class TonlibClientWrapper : public td::actor::Actor { public: - explicit TonlibClient(ton::tl_object_ptr options); + explicit TonlibClientWrapper(ton::tl_object_ptr options); void start_up() override; - void send_request(tonlib_api::object_ptr obj, - td::Promise> promise); + template + void send_request(tonlib_api::object_ptr obj, td::Promise promise) { + auto id = next_request_id_++; + auto P = promise.wrap([](tonlib_api::object_ptr x) -> td::Result { + if (x->get_id() != F::ReturnType::element_type::ID) { + return td::Status::Error("Invalid response from tonlib"); + } + return ton::move_tl_object_as(std::move(x)); + }); + CHECK(requests_.emplace(id, std::move(P)).second); + td::actor::send_closure(tonlib_client_, &tonlib::TonlibClient::request, id, std::move(obj)); + } private: void receive_request_result(td::uint64 id, td::Result> R); @@ -45,3 +57,5 @@ class TonlibClient : public td::actor::Actor { std::map>> requests_; td::uint64 next_request_id_{1}; }; + +} // namespace tonlib diff --git a/tonlib/tonlib/tonlib-cli.cpp b/tonlib/tonlib/tonlib-cli.cpp index c256bf1e..53ce53c4 100644 --- a/tonlib/tonlib/tonlib-cli.cpp +++ b/tonlib/tonlib/tonlib-cli.cpp @@ -175,7 +175,7 @@ class TonlibCli : public td::actor::Actor { std::map>> query_handlers_; - td::actor::ActorOwn raw_client_; + td::actor::ActorOwn raw_client_; bool is_closing_{false}; td::uint32 ref_cnt_{1}; diff --git a/utils/CMakeLists.txt b/utils/CMakeLists.txt index 25df5729..3d029c93 100644 --- a/utils/CMakeLists.txt +++ b/utils/CMakeLists.txt @@ -20,4 +20,9 @@ target_link_libraries(pack-viewer tl_api ton_crypto keys validator tddb) target_include_directories(pack-viewer PUBLIC $/..) +add_executable(opcode-timing opcode-timing.cpp ) +target_link_libraries(opcode-timing ton_crypto) +target_include_directories(pack-viewer PUBLIC + $/..) + install(TARGETS generate-random-id RUNTIME DESTINATION bin) diff --git a/utils/opcode-timing.cpp b/utils/opcode-timing.cpp new file mode 100644 index 00000000..dc6ac75f --- /dev/null +++ b/utils/opcode-timing.cpp @@ -0,0 +1,171 @@ +#include +#include + +#include "vm/vm.h" +#include "vm/cp0.h" +#include "vm/dict.h" +#include "fift/utils.h" +#include "common/bigint.hpp" + +#include "td/utils/base64.h" +#include "td/utils/tests.h" +#include "td/utils/ScopeGuard.h" +#include "td/utils/StringBuilder.h" + +td::Ref to_cell(const unsigned char *buff, int bits) { + return vm::CellBuilder().store_bits(buff, bits, 0).finalize(); +} + +long double timingBaseline; + +typedef struct { + long double mean; + long double stddev; +} stats; + +struct runInfo { + long double runtime; + long long gasUsage; + int vmReturnCode; + + runInfo() : runtime(0.0), gasUsage(0), vmReturnCode(0) {} + runInfo(long double runtime, long long gasUsage, int vmReturnCode) : + runtime(runtime), gasUsage(gasUsage), vmReturnCode(vmReturnCode) {} + + runInfo operator+(const runInfo& addend) const { + return {runtime + addend.runtime, gasUsage + addend.gasUsage, vmReturnCode ? vmReturnCode : addend.vmReturnCode}; + } + + runInfo& operator+=(const runInfo& addend) { + runtime += addend.runtime; + gasUsage += addend.gasUsage; + if(!vmReturnCode && addend.vmReturnCode) { + vmReturnCode = addend.vmReturnCode; + } + return *this; + } + + bool errored() const { + return vmReturnCode != 0; + } +}; + +typedef struct { + stats runtime; + stats gasUsage; + bool errored; +} runtimeStats; + +runInfo time_run_vm(td::Slice command) { + unsigned char buff[128]; + const int bits = (int)td::bitstring::parse_bitstring_hex_literal(buff, sizeof(buff), command.begin(), command.end()); + CHECK(bits >= 0); + + const auto cell = to_cell(buff, bits); + + vm::init_op_cp0(); + vm::DictionaryBase::get_empty_dictionary(); + + class Logger : public td::LogInterface { + public: + void append(td::CSlice slice) override { + res.append(slice.data(), slice.size()); + } + std::string res; + }; + static Logger logger; + logger.res = ""; + td::set_log_fatal_error_callback([](td::CSlice message) { td::default_log_interface->append(logger.res); }); + vm::VmLog log{&logger, td::LogOptions::plain()}; + log.log_options.level = 4; + log.log_options.fix_newlines = true; + log.log_mask |= vm::VmLog::DumpStack; + + vm::Stack stack; + try { + vm::GasLimits gas_limit(10000, 10000); + + std::clock_t cStart = std::clock(); + int ret = vm::run_vm_code(vm::load_cell_slice_ref(cell), stack, 0 /*flags*/, nullptr /*data*/, + std::move(log) /*VmLog*/, nullptr, &gas_limit); + std::clock_t cEnd = std::clock(); + const auto time = (1000.0 * static_cast(cEnd - cStart) / CLOCKS_PER_SEC) - timingBaseline; + return {time >= 0 ? time : 0, gas_limit.gas_consumed(), ret}; + } catch (...) { + LOG(FATAL) << "catch unhandled exception"; + return {-1, -1, 1}; + } +} + +runtimeStats averageRuntime(td::Slice command) { + const size_t samples = 5000; + runInfo total; + std::vector values; + values.reserve(samples); + for(size_t i=0; i(samples); + const auto gasMean = static_cast(total.gasUsage) / static_cast(samples); + long double runtimeDiffSum = 0.0; + long double gasDiffSum = 0.0; + bool errored = false; + for(const auto value : values) { + const auto runtime = value.runtime - runtimeMean; + const auto gasUsage = static_cast(value.gasUsage) - gasMean; + runtimeDiffSum += runtime * runtime; + gasDiffSum += gasUsage * gasUsage; + errored = errored || value.errored(); + } + return { + {runtimeMean, sqrt(runtimeDiffSum / static_cast(samples))}, + {gasMean, sqrt(gasDiffSum / static_cast(samples))}, + errored + }; +} + +runtimeStats timeInstruction(const std::string& setupCode, const std::string& toMeasure) { + const auto setupCodeTime = averageRuntime(setupCode); + const auto totalCodeTime = averageRuntime(setupCode + toMeasure); + return { + {totalCodeTime.runtime.mean - setupCodeTime.runtime.mean, totalCodeTime.runtime.stddev}, + {totalCodeTime.gasUsage.mean - setupCodeTime.gasUsage.mean, totalCodeTime.gasUsage.stddev}, + false + }; +} + +int main(int argc, char** argv) { + if(argc != 2 && argc != 3) { + std::cerr << + "This utility compares the timing of VM execution against the gas used.\n" + "It can be used to discover opcodes or opcode sequences that consume an " + "inordinate amount of computational resources relative to their gas cost.\n" + "\n" + "The utility expects two command line arguments, each a hex string: \n" + "The TVM code used to set up the stack and VM state followed by the TVM code to measure.\n" + "For example, to test the DIVMODC opcode:\n" + "\t$ " << argv[0] << " 80FF801C A90E 2>/dev/null\n" + "\tOPCODE,runtime mean,runtime stddev,gas mean,gas stddev\n" + "\tA90E,0.0066416,0.00233496,26,0\n" + "\n" + "Usage: " << argv[0] << + " [TVM_SETUP_BYTECODE_HEX] TVM_BYTECODE_HEX" << std::endl << std::endl; + return 1; + } + std::cout << "OPCODE,runtime mean,runtime stddev,gas mean,gas stddev" << std::endl; + timingBaseline = averageRuntime("").runtime.mean; + std::string setup, code; + if(argc == 2) { + setup = ""; + code = argv[1]; + } else { + setup = argv[1]; + code = argv[2]; + } + const auto time = timeInstruction(setup, code); + std::cout << code << "," << time.runtime.mean << "," << time.runtime.stddev << "," << + time.gasUsage.mean << "," << time.gasUsage.stddev << std::endl; + return 0; +} diff --git a/validator/db/archive-manager.cpp b/validator/db/archive-manager.cpp index e7614154..05fb6ca6 100644 --- a/validator/db/archive-manager.cpp +++ b/validator/db/archive-manager.cpp @@ -441,7 +441,10 @@ void ArchiveManager::get_block_by_unix_time(AccountIdPrefixFull account_id, Unix td::Promise promise) { auto f = get_file_desc_by_unix_time(account_id, ts, false); if (f) { - auto n = get_next_file_desc(f); + auto n = f; + do { + n = get_next_file_desc(n); + } while (n != nullptr && !n->has_account_prefix(account_id)); td::actor::ActorId aid; if (n) { aid = n->file_actor_id(); @@ -464,7 +467,10 @@ void ArchiveManager::get_block_by_lt(AccountIdPrefixFull account_id, LogicalTime td::Promise promise) { auto f = get_file_desc_by_lt(account_id, lt, false); if (f) { - auto n = get_next_file_desc(f); + auto n = f; + do { + n = get_next_file_desc(n); + } while (n != nullptr && !n->has_account_prefix(account_id)); td::actor::ActorId aid; if (n) { aid = n->file_actor_id(); @@ -1232,6 +1238,16 @@ void ArchiveManager::truncate(BlockSeqno masterchain_seqno, ConstBlockHandle han } } +bool ArchiveManager::FileDescription::has_account_prefix(AccountIdPrefixFull account_id) const { + for (int i = 0; i < 60; i++) { + auto shard = shard_prefix(account_id, i); + if (first_blocks.count(shard)) { + return true; + } + } + return false; +} + } // namespace validator } // namespace ton diff --git a/validator/db/archive-manager.hpp b/validator/db/archive-manager.hpp index 31e0e6b6..79e6a2d7 100644 --- a/validator/db/archive-manager.hpp +++ b/validator/db/archive-manager.hpp @@ -97,6 +97,7 @@ class ArchiveManager : public td::actor::Actor { void clear_actor_id() { file.reset(); } + bool has_account_prefix(AccountIdPrefixFull account_id) const; PackageId id; bool deleted; diff --git a/validator/fabric.h b/validator/fabric.h index 069a0470..d1611bea 100644 --- a/validator/fabric.h +++ b/validator/fabric.h @@ -42,13 +42,15 @@ td::Result create_block_handle(td::BufferSlice data); td::Result create_block_handle(td::Slice data); td::Result create_temp_block_handle(td::BufferSlice data); BlockHandle create_empty_block_handle(BlockIdExt id); -td::Result> create_ext_message(td::BufferSlice data); +td::Result> create_ext_message(td::BufferSlice data, + block::SizeLimitsConfig::ExtMsgLimits limits); td::Result> create_ihr_message(td::BufferSlice data); td::Result>> create_new_shard_block_descriptions(td::BufferSlice data); td::Ref create_signature_set(std::vector sig_set); -void run_check_external_message(td::BufferSlice data, td::actor::ActorId manager, td::Promise promise); +void run_check_external_message(td::BufferSlice data, block::SizeLimitsConfig::ExtMsgLimits limits, + td::actor::ActorId manager, td::Promise> promise); void run_accept_block_query(BlockIdExt id, td::Ref data, std::vector prev, td::Ref validator_set, td::Ref signatures, diff --git a/validator/full-node-shard.cpp b/validator/full-node-shard.cpp index d048f611..35a9b851 100644 --- a/validator/full-node-shard.cpp +++ b/validator/full-node-shard.cpp @@ -144,7 +144,14 @@ void FullNodeShardImpl::check_broadcast(PublicKeyHash src, td::BufferSlice broad auto q = B.move_as_ok(); td::actor::send_closure(validator_manager_, &ValidatorManagerInterface::check_external_message, - std::move(q->message_->data_), std::move(promise)); + std::move(q->message_->data_), + [promise = std::move(promise)](td::Result> R) mutable { + if (R.is_error()) { + promise.set_error(R.move_as_error()); + } else { + promise.set_result(td::Unit()); + } + }); } void FullNodeShardImpl::remove_neighbour(adnl::AdnlNodeIdShort id) { diff --git a/validator/impl/collator-impl.h b/validator/impl/collator-impl.h index daa1a21b..43428f96 100644 --- a/validator/impl/collator-impl.h +++ b/validator/impl/collator-impl.h @@ -72,6 +72,7 @@ class Collator final : public td::actor::Actor { Ref validator_set_; td::actor::ActorId manager; td::Timestamp timeout; + td::Timestamp soft_timeout_, medium_timeout_; td::Promise main_promise; ton::BlockSeqno last_block_seqno{0}; ton::BlockSeqno prev_mc_block_seqno{0}; diff --git a/validator/impl/collator.cpp b/validator/impl/collator.cpp index 966e638a..97023014 100644 --- a/validator/impl/collator.cpp +++ b/validator/impl/collator.cpp @@ -66,10 +66,12 @@ Collator::Collator(ShardIdFull shard, bool is_hardfork, BlockIdExt min_mastercha , validator_set_(std::move(validator_set)) , manager(manager) , timeout(timeout) + , soft_timeout_(td::Timestamp::at(timeout.at() - 3.0)) + , medium_timeout_(td::Timestamp::at(timeout.at() - 1.5)) , main_promise(std::move(promise)) , perf_timer_("collate", 0.1, [manager](double duration) { - send_closure(manager, &ValidatorManager::add_perf_timer_stat, "collate", duration); - }) { + send_closure(manager, &ValidatorManager::add_perf_timer_stat, "collate", duration); + }) { } void Collator::start_up() { @@ -1592,30 +1594,22 @@ bool Collator::init_lt() { } bool Collator::fetch_config_params() { - auto res = impl_fetch_config_params(std::move(config_), - &old_mparams_, &storage_prices_, &storage_phase_cfg_, - &rand_seed_, &compute_phase_cfg_, &action_phase_cfg_, - &masterchain_create_fee_, &basechain_create_fee_, - workchain() - ); + auto res = impl_fetch_config_params(std::move(config_), &old_mparams_, &storage_prices_, &storage_phase_cfg_, + &rand_seed_, &compute_phase_cfg_, &action_phase_cfg_, &masterchain_create_fee_, + &basechain_create_fee_, workchain()); if (res.is_error()) { - return fatal_error(res.move_as_error()); + return fatal_error(res.move_as_error()); } config_ = res.move_as_ok(); return true; } -td::Result> - Collator::impl_fetch_config_params(std::unique_ptr config, - Ref* old_mparams, - std::vector* storage_prices, - block::StoragePhaseConfig* storage_phase_cfg, - td::BitArray<256>* rand_seed, - block::ComputePhaseConfig* compute_phase_cfg, - block::ActionPhaseConfig* action_phase_cfg, - td::RefInt256* masterchain_create_fee, - td::RefInt256* basechain_create_fee, - WorkchainId wc) { +td::Result> Collator::impl_fetch_config_params( + std::unique_ptr config, Ref* old_mparams, + std::vector* storage_prices, block::StoragePhaseConfig* storage_phase_cfg, + td::BitArray<256>* rand_seed, block::ComputePhaseConfig* compute_phase_cfg, + block::ActionPhaseConfig* action_phase_cfg, td::RefInt256* masterchain_create_fee, + td::RefInt256* basechain_create_fee, WorkchainId wc) { *old_mparams = config->get_config_param(9); { auto res = config->get_storage_prices(); @@ -1629,6 +1623,7 @@ td::Result> prng::rand_gen().strong_rand_bytes(rand_seed->data(), 32); LOG(DEBUG) << "block random seed set to " << rand_seed->to_hex(); } + TRY_RESULT(size_limits, config->get_size_limits_config()); { // compute compute_phase_cfg / storage_phase_cfg auto cell = config->get_config_param(wc == ton::masterchainId ? 20 : 21); @@ -1641,6 +1636,7 @@ td::Result> } compute_phase_cfg->block_rand_seed = *rand_seed; compute_phase_cfg->libraries = std::make_unique(config->get_libraries_root(), 256); + compute_phase_cfg->max_vm_data_depth = size_limits.max_vm_data_depth; compute_phase_cfg->global_config = config->get_root_cell(); } { @@ -1662,6 +1658,7 @@ td::Result> (unsigned)rec.first_frac, (unsigned)rec.next_frac}; action_phase_cfg->workchains = &config->get_workchain_list(); action_phase_cfg->bounce_msg_body = (config->has_capability(ton::capBounceMsgBody) ? 256 : 0); + action_phase_cfg->size_limits = size_limits; } { // fetch block_grams_created @@ -1761,6 +1758,9 @@ bool Collator::init_value_create() { } bool Collator::do_collate() { + // After do_collate started it will not be interrupted by timeout + alarm_timestamp() = td::Timestamp::never(); + LOG(DEBUG) << "do_collate() : start"; if (!fetch_config_params()) { return fatal_error("cannot fetch required configuration parameters from masterchain state"); @@ -2278,15 +2278,11 @@ Ref Collator::create_ordinary_transaction(Ref msg_root) { block::Account* acc = acc_res.move_as_ok(); assert(acc); - - auto res = impl_create_ordinary_transaction(msg_root, acc, now_, start_lt, - &storage_phase_cfg_, &compute_phase_cfg_, - &action_phase_cfg_, - external, last_proc_int_msg_.first - ); - if(res.is_error()) { + auto res = impl_create_ordinary_transaction(msg_root, acc, now_, start_lt, &storage_phase_cfg_, &compute_phase_cfg_, + &action_phase_cfg_, external, last_proc_int_msg_.first); + if (res.is_error()) { auto error = res.move_as_error(); - if(error.code() == -701) { + if (error.code() == -701) { // ignorable errors LOG(DEBUG) << error.message(); return {}; @@ -2313,16 +2309,13 @@ Ref Collator::create_ordinary_transaction(Ref msg_root) { // If td::status::error_code == 669 - Fatal Error block can not be produced // if td::status::error_code == 701 - Transaction can not be included into block, but it's ok (external or too early internal) -td::Result> Collator::impl_create_ordinary_transaction(Ref msg_root, - block::Account* acc, - UnixTime utime, LogicalTime lt, - block::StoragePhaseConfig* storage_phase_cfg, - block::ComputePhaseConfig* compute_phase_cfg, - block::ActionPhaseConfig* action_phase_cfg, - bool external, LogicalTime after_lt) { +td::Result> Collator::impl_create_ordinary_transaction( + Ref msg_root, block::Account* acc, UnixTime utime, LogicalTime lt, + block::StoragePhaseConfig* storage_phase_cfg, block::ComputePhaseConfig* compute_phase_cfg, + block::ActionPhaseConfig* action_phase_cfg, bool external, LogicalTime after_lt) { if (acc->last_trans_end_lt_ >= lt && acc->transactions.empty()) { - return td::Status::Error(-669, PSTRING() << "last transaction time in the state of account " << acc->workchain << ":" << acc->addr.to_hex() - << " is too large"); + return td::Status::Error(-669, PSTRING() << "last transaction time in the state of account " << acc->workchain + << ":" << acc->addr.to_hex() << " is too large"); } auto trans_min_lt = lt; if (external) { @@ -2336,51 +2329,58 @@ td::Result> Collator::impl_create_ordinary_t if (!trans->unpack_input_msg(ihr_delivered, action_phase_cfg)) { if (external) { // inbound external message was not accepted - return td::Status::Error(-701,"inbound external message rejected by account "s + acc->addr.to_hex() + - " before smart-contract execution"); - } - return td::Status::Error(-669,"cannot unpack input message for a new transaction"); + return td::Status::Error(-701, "inbound external message rejected by account "s + acc->addr.to_hex() + + " before smart-contract execution"); + } + return td::Status::Error(-669, "cannot unpack input message for a new transaction"); } if (trans->bounce_enabled) { if (!trans->prepare_storage_phase(*storage_phase_cfg, true)) { - return td::Status::Error(-669,"cannot create storage phase of a new transaction for smart contract "s + acc->addr.to_hex()); - } + return td::Status::Error( + -669, "cannot create storage phase of a new transaction for smart contract "s + acc->addr.to_hex()); + } if (!external && !trans->prepare_credit_phase()) { - return td::Status::Error(-669,"cannot create credit phase of a new transaction for smart contract "s + acc->addr.to_hex()); - } + return td::Status::Error( + -669, "cannot create credit phase of a new transaction for smart contract "s + acc->addr.to_hex()); + } } else { if (!external && !trans->prepare_credit_phase()) { - return td::Status::Error(-669,"cannot create credit phase of a new transaction for smart contract "s + acc->addr.to_hex()); - } + return td::Status::Error( + -669, "cannot create credit phase of a new transaction for smart contract "s + acc->addr.to_hex()); + } if (!trans->prepare_storage_phase(*storage_phase_cfg, true, true)) { - return td::Status::Error(-669,"cannot create storage phase of a new transaction for smart contract "s + acc->addr.to_hex()); - } + return td::Status::Error( + -669, "cannot create storage phase of a new transaction for smart contract "s + acc->addr.to_hex()); + } } if (!trans->prepare_compute_phase(*compute_phase_cfg)) { - return td::Status::Error(-669,"cannot create compute phase of a new transaction for smart contract "s + acc->addr.to_hex()); + return td::Status::Error( + -669, "cannot create compute phase of a new transaction for smart contract "s + acc->addr.to_hex()); } if (!trans->compute_phase->accepted) { if (external) { // inbound external message was not accepted auto const& cp = *trans->compute_phase; return td::Status::Error( - -701, - PSLICE() << "inbound external message rejected by transaction " << acc->addr.to_hex() << ":\n" << - "exitcode=" << cp.exit_code << ", steps=" << cp.vm_steps << ", gas_used=" << cp.gas_used << - (cp.vm_log.empty() ? "" : "\nVM Log (truncated):\n..." + cp.vm_log)); - } else if (trans->compute_phase->skip_reason == block::ComputePhase::sk_none) { - return td::Status::Error(-669,"new ordinary transaction for smart contract "s + acc->addr.to_hex() + - " has not been accepted by the smart contract (?)"); - } + -701, PSLICE() << "inbound external message rejected by transaction " << acc->addr.to_hex() << ":\n" + << "exitcode=" << cp.exit_code << ", steps=" << cp.vm_steps << ", gas_used=" << cp.gas_used + << (cp.vm_log.empty() ? "" : "\nVM Log (truncated):\n..." + cp.vm_log)); + } else if (trans->compute_phase->skip_reason == block::ComputePhase::sk_none) { + return td::Status::Error(-669, "new ordinary transaction for smart contract "s + acc->addr.to_hex() + + " has not been accepted by the smart contract (?)"); + } } if (trans->compute_phase->success && !trans->prepare_action_phase(*action_phase_cfg)) { - return td::Status::Error(-669,"cannot create action phase of a new transaction for smart contract "s + acc->addr.to_hex()); + return td::Status::Error( + -669, "cannot create action phase of a new transaction for smart contract "s + acc->addr.to_hex()); } - if (trans->bounce_enabled && !trans->compute_phase->success && !trans->prepare_bounce_phase(*action_phase_cfg)) { - return td::Status::Error(-669,"cannot create bounce phase of a new transaction for smart contract "s + acc->addr.to_hex()); + if (trans->bounce_enabled && (!trans->compute_phase->success || trans->action_phase->state_size_too_big) && + !trans->prepare_bounce_phase(*action_phase_cfg)) { + return td::Status::Error( + -669, "cannot create bounce phase of a new transaction for smart contract "s + acc->addr.to_hex()); } if (!trans->serialize()) { - return td::Status::Error(-669,"cannot serialize new transaction for smart contract "s + acc->addr.to_hex()); + return td::Status::Error(-669, "cannot serialize new transaction for smart contract "s + acc->addr.to_hex()); } return std::move(trans); } @@ -2536,6 +2536,11 @@ int Collator::process_one_new_message(block::NewOutMsg msg, bool enqueue_only, R block_full_ = true; return 3; } + if (soft_timeout_.is_in_past(td::Timestamp::now())) { + LOG(WARNING) << "soft timeout reached, stop processing new messages"; + block_full_ = true; + return 3; + } return 1; } @@ -2816,6 +2821,11 @@ bool Collator::process_inbound_internal_messages() { LOG(INFO) << "BLOCK FULL, stop processing inbound internal messages"; break; } + if (soft_timeout_.is_in_past(td::Timestamp::now())) { + block_full_ = true; + LOG(WARNING) << "soft timeout reached, stop processing inbound internal messages"; + break; + } LOG(DEBUG) << "processing inbound message with (lt,hash)=(" << kv->lt << "," << kv->key.to_hex() << ") from neighbor #" << kv->source; if (verbosity > 2) { @@ -2847,6 +2857,10 @@ bool Collator::process_inbound_external_messages() { LOG(INFO) << "BLOCK FULL, stop processing external messages"; break; } + if (medium_timeout_.is_in_past(td::Timestamp::now())) { + LOG(WARNING) << "medium timeout reached, stop processing inbound external messages"; + break; + } auto ext_msg = ext_msg_pair.first; ton::Bits256 hash{ext_msg->get_hash().bits()}; int r = process_external_message(std::move(ext_msg)); @@ -3126,7 +3140,7 @@ static int update_one_shard(block::McShardHash& info, const block::McShardHash* if (info.is_fsm_none() && (info.want_split_ || depth < wc_info->min_split) && depth < wc_info->max_split && depth < 60) { // prepare split - info.set_fsm_split(now + ton::split_merge_delay, ton::split_merge_interval); + info.set_fsm_split(now + wc_info->split_merge_delay, wc_info->split_merge_interval); changed = true; LOG(INFO) << "preparing to split shard " << info.shard().to_str() << " during " << info.fsm_utime() << " .. " << info.fsm_utime_end(); @@ -3134,7 +3148,7 @@ static int update_one_shard(block::McShardHash& info, const block::McShardHash* sibling && !sibling->before_split_ && sibling->is_fsm_none() && (sibling->want_merge_ || depth > wc_info->max_split)) { // prepare merge - info.set_fsm_merge(now + ton::split_merge_delay, ton::split_merge_interval); + info.set_fsm_merge(now + wc_info->split_merge_delay, wc_info->split_merge_interval); changed = true; LOG(INFO) << "preparing to merge shard " << info.shard().to_str() << " with " << sibling->shard().to_str() << " during " << info.fsm_utime() << " .. " << info.fsm_utime_end(); @@ -3216,7 +3230,7 @@ bool Collator::create_mc_state_extra() { " contains an invalid configuration in its data, IGNORING CHANGES"; ignore_cfg_changes = true; } else { - cfg0 = cfg_dict.lookup_ref(td::BitArray<32>{(long long) 0}); + cfg0 = cfg_dict.lookup_ref(td::BitArray<32>{(long long)0}); } bool changed_cfg = false; if (cfg0.not_null()) { diff --git a/validator/impl/external-message.cpp b/validator/impl/external-message.cpp index 755370ca..ca8e6e48 100644 --- a/validator/impl/external-message.cpp +++ b/validator/impl/external-message.cpp @@ -38,8 +38,9 @@ ExtMessageQ::ExtMessageQ(td::BufferSlice data, td::Ref root, AccountId hash_ = block::compute_file_hash(data_); } -td::Result> ExtMessageQ::create_ext_message(td::BufferSlice data) { - if (data.size() > max_ext_msg_size) { +td::Result> ExtMessageQ::create_ext_message(td::BufferSlice data, + block::SizeLimitsConfig::ExtMsgLimits limits) { + if (data.size() > limits.max_size) { return td::Status::Error("external message too large, rejecting"); } vm::BagOfCells boc; @@ -54,7 +55,7 @@ td::Result> ExtMessageQ::create_ext_message(td::BufferSlice dat if (ext_msg->get_level() != 0) { return td::Status::Error("external message must have zero level"); } - if (ext_msg->get_depth() >= max_ext_msg_depth) { + if (ext_msg->get_depth() >= limits.max_depth) { return td::Status::Error("external message is too deep"); } vm::CellSlice cs{vm::NoVmOrd{}, ext_msg}; @@ -85,21 +86,25 @@ td::Result> ExtMessageQ::create_ext_message(td::BufferSlice dat return Ref{true, std::move(data), std::move(ext_msg), dest_prefix, wc, addr}; } -void ExtMessageQ::run_message(td::BufferSlice data, td::actor::ActorId manager, - td::Promise promise) { - auto R = create_ext_message(std::move(data)); +void ExtMessageQ::run_message(td::BufferSlice data, block::SizeLimitsConfig::ExtMsgLimits limits, + td::actor::ActorId manager, + td::Promise> promise) { + auto R = create_ext_message(std::move(data), limits); if (R.is_error()) { return promise.set_error(R.move_as_error_prefix("failed to parse external message ")); } auto M = R.move_as_ok(); auto root = M->root_cell(); block::gen::CommonMsgInfo::Record_ext_in_msg_info info; - tlb::unpack_cell_inexact(root, info); // checked in create message + tlb::unpack_cell_inexact(root, info); // checked in create message ton::StdSmcAddress addr = M->addr(); ton::WorkchainId wc = M->wc(); - run_fetch_account_state(wc, addr, manager, - [promise = std::move(promise), msg_root = root, wc = wc](td::Result,UnixTime,LogicalTime,std::unique_ptr>> res) mutable { + run_fetch_account_state( + wc, addr, manager, + [promise = std::move(promise), msg_root = root, wc, + M](td::Result, UnixTime, LogicalTime, std::unique_ptr>> + res) mutable { if (res.is_error()) { promise.set_error(td::Status::Error(PSLICE() << "Failed to get account state")); } else { @@ -109,20 +114,19 @@ void ExtMessageQ::run_message(td::BufferSlice data, td::actor::ActorId(tuple); auto lt = std::get<2>(tuple); auto config = std::move(std::get<3>(tuple)); - if(!acc.unpack(shard_acc, {}, utime, false)) { + if (!acc.unpack(shard_acc, {}, utime, false)) { promise.set_error(td::Status::Error(PSLICE() << "Failed to unpack account state")); } else { auto status = run_message_on_account(wc, &acc, utime, lt + 1, msg_root, std::move(config)); if (status.is_ok()) { - promise.set_value(td::Unit()); + promise.set_value(std::move(M)); } else { - promise.set_error(td::Status::Error( - PSLICE() << "External message was not accepted\n" << status.message())); + promise.set_error(td::Status::Error(PSLICE() << "External message was not accepted\n" + << status.message())); } } } - } - ); + }); } td::Status ExtMessageQ::run_message_on_account(ton::WorkchainId wc, diff --git a/validator/impl/external-message.hpp b/validator/impl/external-message.hpp index 1d1ec640..d5084761 100644 --- a/validator/impl/external-message.hpp +++ b/validator/impl/external-message.hpp @@ -37,8 +37,6 @@ class ExtMessageQ : public ExtMessage { ton::StdSmcAddress addr_; public: - static constexpr unsigned max_ext_msg_size = 65535; - static constexpr unsigned max_ext_msg_depth = 512; AccountIdPrefixFull shard() const override { return addr_prefix_; } @@ -59,10 +57,13 @@ class ExtMessageQ : public ExtMessage { return addr_; } - ExtMessageQ(td::BufferSlice data, td::Ref root, AccountIdPrefixFull shard, ton::WorkchainId wc, ton::StdSmcAddress addr); - static td::Result> create_ext_message(td::BufferSlice data); - static void run_message(td::BufferSlice data, td::actor::ActorId manager, - td::Promise promise); + ExtMessageQ(td::BufferSlice data, td::Ref root, AccountIdPrefixFull shard, ton::WorkchainId wc, + ton::StdSmcAddress addr); + static td::Result> create_ext_message(td::BufferSlice data, + block::SizeLimitsConfig::ExtMsgLimits limits); + static void run_message(td::BufferSlice data, block::SizeLimitsConfig::ExtMsgLimits limits, + td::actor::ActorId manager, + td::Promise> promise); static td::Status run_message_on_account(ton::WorkchainId wc, block::Account* acc, UnixTime utime, LogicalTime lt, diff --git a/validator/impl/fabric.cpp b/validator/impl/fabric.cpp index 47979af8..5d8b6440 100644 --- a/validator/impl/fabric.cpp +++ b/validator/impl/fabric.cpp @@ -111,13 +111,16 @@ td::Ref create_signature_set(std::vector sig_ return td::Ref{true, std::move(sig_set)}; } -td::Result> create_ext_message(td::BufferSlice data) { - TRY_RESULT(res, ExtMessageQ::create_ext_message(std::move(data))); +td::Result> create_ext_message(td::BufferSlice data, + block::SizeLimitsConfig::ExtMsgLimits limits) { + TRY_RESULT(res, ExtMessageQ::create_ext_message(std::move(data), limits)); return std::move(res); } -void run_check_external_message(td::BufferSlice data, td::actor::ActorId manager, td::Promise promise) { - ExtMessageQ::run_message(std::move(data), std::move(manager), std::move(promise)); +void run_check_external_message(td::BufferSlice data, block::SizeLimitsConfig::ExtMsgLimits limits, + td::actor::ActorId manager, + td::Promise> promise) { + ExtMessageQ::run_message(std::move(data), limits, std::move(manager), std::move(promise)); } td::Result> create_ihr_message(td::BufferSlice data) { diff --git a/validator/impl/liteserver.cpp b/validator/impl/liteserver.cpp index fadc596b..d9d2c333 100644 --- a/validator/impl/liteserver.cpp +++ b/validator/impl/liteserver.cpp @@ -153,6 +153,10 @@ void LiteQuery::start_up() { this->perform_getAccountState(ton::create_block_id(q.id_), static_cast(q.account_->workchain_), q.account_->id_, 0); }, + [&](lite_api::liteServer_getAccountStatePrunned& q) { + this->perform_getAccountState(ton::create_block_id(q.id_), static_cast(q.account_->workchain_), + q.account_->id_, 0x40000000); + }, [&](lite_api::liteServer_getOneTransaction& q) { this->perform_getOneTransaction(ton::create_block_id(q.id_), static_cast(q.account_->workchain_), q.account_->id_, @@ -228,7 +232,7 @@ void LiteQuery::perform_getMasterchainInfo(int mode) { return; } td::actor::send_closure_later( - manager_, &ton::validator::ValidatorManager::get_top_masterchain_state_block, + manager_, &ton::validator::ValidatorManager::get_last_liteserver_state_block, [Self = actor_id(this), return_state = bool(acc_state_promise_), mode](td::Result, BlockIdExt>> res) { if (res.is_error()) { td::actor::send_closure(Self, &LiteQuery::abort_query, res.move_as_error()); @@ -482,20 +486,13 @@ void LiteQuery::perform_sendMessage(td::BufferSlice data) { auto copy = data.clone(); td::actor::send_closure_later( manager_, &ValidatorManager::check_external_message, std::move(copy), - [Self = actor_id(this), data = std::move(data), manager = manager_](td::Result res) mutable { + [Self = actor_id(this), data = std::move(data), manager = manager_](td::Result> res) mutable { if(res.is_error()) { td::actor::send_closure(Self, &LiteQuery::abort_query, res.move_as_error_prefix("cannot apply external message to current state : "s)); } else { - auto crm = ton::validator::create_ext_message(std::move(data)); - if (crm.is_error()) { - //UNREACHABLE, checks in check_external_message, - td::actor::send_closure(Self, &LiteQuery::abort_query, - crm.move_as_error()); - return; - } LOG(INFO) << "sending an external message to validator manager"; - td::actor::send_closure_later(manager, &ValidatorManager::send_external_message, crm.move_as_ok()); + td::actor::send_closure_later(manager, &ValidatorManager::send_external_message, res.move_as_ok()); auto b = ton::create_serialize_tl_object(1); td::actor::send_closure(Self, &LiteQuery::finish_query, std::move(b)); } @@ -779,9 +776,9 @@ void LiteQuery::perform_getAccountState(BlockIdExt blkid, WorkchainId workchain, set_continuation([&]() -> void { continue_getAccountState(); }); request_mc_block_data_state(blkid); } else { - LOG(INFO) << "sending a get_top_masterchain_state_block query to manager"; + LOG(INFO) << "sending a get_last_liteserver_state_block query to manager"; td::actor::send_closure_later( - manager_, &ton::validator::ValidatorManager::get_top_masterchain_state_block, + manager_, &ton::validator::ValidatorManager::get_last_liteserver_state_block, [Self = actor_id(this)](td::Result, BlockIdExt>> res) -> void { if (res.is_error()) { td::actor::send_closure(Self, &LiteQuery::abort_query, res.move_as_error()); @@ -858,7 +855,7 @@ void LiteQuery::perform_getLibraries(std::vector library_list) { sort( library_list.begin(), library_list.end() ); library_list.erase( unique( library_list.begin(), library_list.end() ), library_list.end() ); td::actor::send_closure_later( - manager_, &ton::validator::ValidatorManager::get_top_masterchain_state_block, + manager_, &ton::validator::ValidatorManager::get_last_liteserver_state_block, [Self = actor_id(this), library_list](td::Result, BlockIdExt>> res) -> void { if (res.is_error()) { td::actor::send_closure(Self, &LiteQuery::abort_query, res.move_as_error()); @@ -1213,6 +1210,19 @@ void LiteQuery::finish_getAccountState(td::BufferSlice shard_proof) { } td::BufferSlice data; if (acc_root.not_null()) { + if (mode_ & 0x40000000) { + vm::MerkleProofBuilder mpb{acc_root}; + // account_none$0 = Account; + // account$1 addr:MsgAddressInt storage_stat:StorageInfo storage:AccountStorage = Account; + // account_storage$_ last_trans_lt:uint64 balance:CurrencyCollection state:AccountState = AccountStorage; + // account_active$1 _:StateInit = AccountState; + auto S = mpb.root()->load_cell(); + if (S.is_error()) { + fatal_error(S.move_as_error_prefix("Failed to load account: ")); + return; + } + acc_root = mpb.extract_proof(); + } auto res = vm::std_boc_serialize(std::move(acc_root)); if (res.is_error()) { fatal_error(res.move_as_error()); @@ -1532,7 +1542,7 @@ void LiteQuery::perform_getShardInfo(BlockIdExt blkid, ShardIdFull shard, bool e } void LiteQuery::load_prevKeyBlock(ton::BlockIdExt blkid, td::Promise>> promise) { - td::actor::send_closure_later(manager_, &ton::validator::ValidatorManager::get_top_masterchain_state_block, + td::actor::send_closure_later(manager_, &ton::validator::ValidatorManager::get_last_liteserver_state_block, [Self = actor_id(this), blkid, promise = std::move(promise)]( td::Result, BlockIdExt>> res) mutable { td::actor::send_closure_later(Self, &LiteQuery::continue_loadPrevKeyBlock, blkid, @@ -1955,7 +1965,7 @@ void LiteQuery::perform_getBlockProof(ton::BlockIdExt from, ton::BlockIdExt to, }); } else { td::actor::send_closure_later( - manager_, &ton::validator::ValidatorManager::get_top_masterchain_state_block, + manager_, &ton::validator::ValidatorManager::get_last_liteserver_state_block, [Self = actor_id(this), from, to, mode](td::Result, BlockIdExt>> res) { if (res.is_error()) { td::actor::send_closure(Self, &LiteQuery::abort_query, res.move_as_error()); @@ -1968,7 +1978,7 @@ void LiteQuery::perform_getBlockProof(ton::BlockIdExt from, ton::BlockIdExt to, } } else if (mode & 2) { td::actor::send_closure_later( - manager_, &ton::validator::ValidatorManager::get_top_masterchain_state_block, + manager_, &ton::validator::ValidatorManager::get_last_liteserver_state_block, [Self = actor_id(this), from, mode](td::Result, BlockIdExt>> res) { if (res.is_error()) { td::actor::send_closure(Self, &LiteQuery::abort_query, res.move_as_error()); diff --git a/validator/impl/shard.hpp b/validator/impl/shard.hpp index c726447f..b55cac35 100644 --- a/validator/impl/shard.hpp +++ b/validator/impl/shard.hpp @@ -126,6 +126,10 @@ class MasterchainStateQ : public MasterchainState, public ShardStateQ { ValidatorSessionConfig get_consensus_config() const override { return config_->get_consensus_config(); } + block::SizeLimitsConfig::ExtMsgLimits get_ext_msg_limits() const override { + auto R = config_->get_size_limits_config(); + return R.is_error() ? block::SizeLimitsConfig::ExtMsgLimits() : R.ok_ref().ext_msg_limits; + } BlockIdExt last_key_block_id() const override; BlockIdExt next_key_block_id(BlockSeqno seqno) const override; BlockIdExt prev_key_block_id(BlockSeqno seqno) const override; diff --git a/validator/impl/validate-query.cpp b/validator/impl/validate-query.cpp index 15249fe5..d152a5da 100644 --- a/validator/impl/validate-query.cpp +++ b/validator/impl/validate-query.cpp @@ -67,8 +67,8 @@ ValidateQuery::ValidateQuery(ShardIdFull shard, BlockIdExt min_masterchain_block , shard_pfx_(shard_.shard) , shard_pfx_len_(ton::shard_prefix_length(shard_)) , perf_timer_("validateblock", 0.1, [manager](double duration) { - send_closure(manager, &ValidatorManager::add_perf_timer_stat, "validateblock", duration); - }) { + send_closure(manager, &ValidatorManager::add_perf_timer_stat, "validateblock", duration); + }) { } void ValidateQuery::alarm() { @@ -737,6 +737,8 @@ bool ValidateQuery::try_unpack_mc_state() { return fatal_error(limits.move_as_error()); } block_limits_ = limits.move_as_ok(); + block_limits_->start_lt = start_lt_; + block_limit_status_ = std::make_unique(*block_limits_); if (!fetch_config_params()) { return false; } @@ -765,6 +767,14 @@ bool ValidateQuery::fetch_config_params() { // recover (not generate) rand seed from block header CHECK(!rand_seed_.is_zero()); } + block::SizeLimitsConfig size_limits; + { + auto res = config_->get_size_limits_config(); + if (res.is_error()) { + return fatal_error(res.move_as_error()); + } + size_limits = res.move_as_ok(); + } { // compute compute_phase_cfg / storage_phase_cfg auto cell = config_->get_config_param(is_masterchain() ? 20 : 21); @@ -777,6 +787,7 @@ bool ValidateQuery::fetch_config_params() { } compute_phase_cfg_.block_rand_seed = rand_seed_; compute_phase_cfg_.libraries = std::make_unique(config_->get_libraries_root(), 256); + compute_phase_cfg_.max_vm_data_depth = size_limits.max_vm_data_depth; compute_phase_cfg_.global_config = config_->get_root_cell(); } { @@ -798,6 +809,7 @@ bool ValidateQuery::fetch_config_params() { (unsigned)rec.first_frac, (unsigned)rec.next_frac}; action_phase_cfg_.workchains = &config_->get_workchain_list(); action_phase_cfg_.bounce_msg_body = (config_->has_capability(ton::capBounceMsgBody) ? 256 : 0); + action_phase_cfg_.size_limits = size_limits; } { // fetch block_grams_created @@ -998,6 +1010,16 @@ bool ValidateQuery::compute_prev_state() { return compute_prev_state_from_collated_data(); } CHECK(prev_states.size() == 1u + after_merge_); + // Extend validator timeout if previous block is too old + UnixTime prev_ts = prev_states[0]->get_unix_time(); + if (after_merge_) { + prev_ts = std::max(prev_ts, prev_states[1]->get_unix_time()); + } + td::Timestamp new_timeout = td::Timestamp::in(std::min(60.0, (td::Clocks::system() - (double)prev_ts) / 2)); + if (timeout < new_timeout) { + alarm_timestamp() = timeout = new_timeout; + } + prev_state_root_ = prev_states[0]->root_cell(); CHECK(prev_state_root_.not_null()); if (after_merge_) { @@ -1690,8 +1712,8 @@ bool ValidateQuery::check_one_shard(const block::McShardHash& info, const block: (sibling->want_merge_ || depth > wc_info->max_split); if (!fsm_inherited && !info.is_fsm_none()) { if (info.fsm_utime() < now_ || info.fsm_utime_end() <= info.fsm_utime() || - info.fsm_utime_end() < info.fsm_utime() + ton::min_split_merge_interval || - info.fsm_utime_end() > now_ + ton::max_split_merge_delay) { + info.fsm_utime_end() < info.fsm_utime() + wc_info->min_split_merge_interval || + info.fsm_utime_end() > now_ + wc_info->max_split_merge_delay) { return reject_query(PSTRING() << "incorrect future split/merge interval " << info.fsm_utime() << " .. " << info.fsm_utime_end() << " set for shard " << shard.to_str() << " in new shard configuration (it is " << now_ << " now)"); @@ -4183,6 +4205,9 @@ std::unique_ptr ValidateQuery::unpack_account(td::ConstBitPtr ad bool ValidateQuery::check_one_transaction(block::Account& account, ton::LogicalTime lt, Ref trans_root, bool is_first, bool is_last) { + if (!check_timeout()) { + return false; + } LOG(DEBUG) << "checking transaction " << lt << " of account " << account.addr.to_hex(); const StdSmcAddress& addr = account.addr; block::gen::Transaction::Record trans; @@ -4388,7 +4413,8 @@ bool ValidateQuery::check_one_transaction(block::Account& account, ton::LogicalT } } if (is_first && is_masterchain() && account.is_special && account.tick && - (tag != block::gen::TransactionDescr::trans_tick_tock || (td_cs.prefetch_ulong(4) & 1)) && account.orig_status == block::Account::acc_active) { + (tag != block::gen::TransactionDescr::trans_tick_tock || (td_cs.prefetch_ulong(4) & 1)) && + account.orig_status == block::Account::acc_active) { return reject_query(PSTRING() << "transaction " << lt << " of account " << addr.to_hex() << " is the first transaction for this special tick account in this block, but the " "transaction is not a tick transaction"); @@ -4417,6 +4443,13 @@ bool ValidateQuery::check_one_transaction(block::Account& account, ton::LogicalT int trans_type = block::Transaction::tr_none; switch (tag) { case block::gen::TransactionDescr::trans_ord: { + if (!block_limit_status_->fits(block::ParamLimits::cl_medium)) { + return reject_query(PSTRING() << "cannod add ordinary transaction because hard block limits are exceeded: " + << "gas_used=" << block_limit_status_->gas_used + << "(limit=" << block_limits_->gas.hard() << "), " + << "lt_delta=" << block_limit_status_->cur_lt - block_limits_->start_lt + << "(limit=" << block_limits_->lt_delta.hard() << ")"); + } trans_type = block::Transaction::tr_ord; if (in_msg_root.is_null()) { return reject_query(PSTRING() << "ordinary transaction " << lt << " of account " << addr.to_hex() @@ -4554,7 +4587,8 @@ bool ValidateQuery::check_one_transaction(block::Account& account, ton::LogicalT return reject_query(PSTRING() << "cannot re-create action phase of transaction " << lt << " for smart contract " << addr.to_hex()); } - if (trs->bounce_enabled && !trs->compute_phase->success && !trs->prepare_bounce_phase(action_phase_cfg_)) { + if (trs->bounce_enabled && (!trs->compute_phase->success || trs->action_phase->state_size_too_big) && + !trs->prepare_bounce_phase(action_phase_cfg_)) { return reject_query(PSTRING() << "cannot re-create bounce phase of transaction " << lt << " for smart contract " << addr.to_hex()); } @@ -4562,7 +4596,7 @@ bool ValidateQuery::check_one_transaction(block::Account& account, ton::LogicalT return reject_query(PSTRING() << "cannot re-create the serialization of transaction " << lt << " for smart contract " << addr.to_hex()); } - if (block_limit_status_ && !trs->update_limits(*block_limit_status_)) { + if (!trs->update_limits(*block_limit_status_, false)) { return fatal_error(PSTRING() << "cannot update block limit status to include transaction " << lt << " of account " << addr.to_hex()); } @@ -5037,7 +5071,7 @@ bool ValidateQuery::check_config_update(Ref old_conf_params, Ref< return reject_query("no important parameters have been changed, but the block is marked as a key block"); } vm::Dictionary dict1{ocfg_root, 32}; - auto param0 = dict1.lookup_ref(td::BitArray<32>{(long long) 0}); + auto param0 = dict1.lookup_ref(td::BitArray<32>{(long long)0}); if (param0.is_null()) { if (cfg_acc_changed) { return reject_query("new state of old configuration smart contract "s + old_cfg_addr.to_hex() + diff --git a/validator/impl/validate-query.hpp b/validator/impl/validate-query.hpp index a0167dab..59eabcbd 100644 --- a/validator/impl/validate-query.hpp +++ b/validator/impl/validate-query.hpp @@ -371,6 +371,14 @@ class ValidateQuery : public td::actor::Actor { bool check_mc_block_extra(); Ref get_virt_state_root(td::Bits256 block_root_hash); + + bool check_timeout() { + if (timeout && timeout.is_in_past()) { + abort_query(td::Status::Error(ErrorCode::timeout, "timeout")); + return false; + } + return true; + } }; } // namespace validator diff --git a/validator/interfaces/shard.h b/validator/interfaces/shard.h index 59211a85..ac355d7d 100644 --- a/validator/interfaces/shard.h +++ b/validator/interfaces/shard.h @@ -87,6 +87,7 @@ class MasterchainState : virtual public ShardState { virtual td::Status prepare() { return td::Status::OK(); } + virtual block::SizeLimitsConfig::ExtMsgLimits get_ext_msg_limits() const = 0; }; } // namespace validator diff --git a/validator/interfaces/validator-manager.h b/validator/interfaces/validator-manager.h index 38de7467..bcc5fd53 100644 --- a/validator/interfaces/validator-manager.h +++ b/validator/interfaces/validator-manager.h @@ -160,7 +160,8 @@ class ValidatorManager : public ValidatorManagerInterface { virtual void update_last_known_key_block(BlockHandle handle, bool send_request) = 0; virtual void update_gc_block_handle(BlockHandle handle, td::Promise promise) = 0; - virtual void update_shard_client_block_handle(BlockHandle handle, td::Promise promise) = 0; + virtual void update_shard_client_block_handle(BlockHandle handle, td::Ref state, + td::Promise promise) = 0; virtual void truncate(BlockSeqno seqno, ConstBlockHandle handle, td::Promise promise) = 0; diff --git a/validator/manager-disk.cpp b/validator/manager-disk.cpp index 26da9e16..5a314e2f 100644 --- a/validator/manager-disk.cpp +++ b/validator/manager-disk.cpp @@ -260,7 +260,10 @@ void ValidatorManagerImpl::get_key_block_proof_link(BlockIdExt block_id, td::Pro } void ValidatorManagerImpl::new_external_message(td::BufferSlice data) { - auto R = create_ext_message(std::move(data)); + if (last_masterchain_state_.is_null()) { + return; + } + auto R = create_ext_message(std::move(data), last_masterchain_state_->get_ext_msg_limits()); if (R.is_ok()) { ext_messages_.emplace_back(R.move_as_ok()); } @@ -872,6 +875,11 @@ void ValidatorManagerImpl::get_top_masterchain_state_block( std::pair, BlockIdExt>{last_masterchain_state_, last_masterchain_block_id_}); } +void ValidatorManagerImpl::get_last_liteserver_state_block( + td::Promise, BlockIdExt>> promise) { + return get_top_masterchain_state_block(std::move(promise)); +} + void ValidatorManagerImpl::send_get_block_request(BlockIdExt id, td::uint32 priority, td::Promise promise) { UNREACHABLE(); diff --git a/validator/manager-disk.hpp b/validator/manager-disk.hpp index e0aad62e..d41a6e34 100644 --- a/validator/manager-disk.hpp +++ b/validator/manager-disk.hpp @@ -124,7 +124,7 @@ class ValidatorManagerImpl : public ValidatorManager { //void get_block_description(BlockIdExt block_id, td::Promise promise) override; void new_external_message(td::BufferSlice data) override; - void check_external_message(td::BufferSlice data, td::Promise promise) override { + void check_external_message(td::BufferSlice data, td::Promise> promise) override { UNREACHABLE(); } void new_ihr_message(td::BufferSlice data) override; @@ -228,6 +228,7 @@ class ValidatorManagerImpl : public ValidatorManager { void get_top_masterchain_state(td::Promise> promise) override; void get_top_masterchain_block(td::Promise promise) override; void get_top_masterchain_state_block(td::Promise, BlockIdExt>> promise) override; + void get_last_liteserver_state_block(td::Promise, BlockIdExt>> promise) override; void send_get_block_request(BlockIdExt id, td::uint32 priority, td::Promise promise) override; void send_get_zero_state_request(BlockIdExt id, td::uint32 priority, td::Promise promise) override; @@ -359,7 +360,8 @@ class ValidatorManagerImpl : public ValidatorManager { } void update_last_known_key_block(BlockHandle handle, bool send_request) override { } - void update_shard_client_block_handle(BlockHandle handle, td::Promise promise) override { + void update_shard_client_block_handle(BlockHandle handle, td::Ref state, + td::Promise promise) override { } void prepare_stats(td::Promise>> promise) override { diff --git a/validator/manager-hardfork.cpp b/validator/manager-hardfork.cpp index d9718fa7..80a64d25 100644 --- a/validator/manager-hardfork.cpp +++ b/validator/manager-hardfork.cpp @@ -151,7 +151,7 @@ void ValidatorManagerImpl::get_key_block_proof_link(BlockIdExt block_id, td::Pro } void ValidatorManagerImpl::new_external_message(td::BufferSlice data) { - auto R = create_ext_message(std::move(data)); + auto R = create_ext_message(std::move(data), block::SizeLimitsConfig::ExtMsgLimits()); if (R.is_ok()) { ext_messages_.emplace_back(R.move_as_ok()); } diff --git a/validator/manager-hardfork.hpp b/validator/manager-hardfork.hpp index e8fa327b..0a9e8a2b 100644 --- a/validator/manager-hardfork.hpp +++ b/validator/manager-hardfork.hpp @@ -144,7 +144,7 @@ class ValidatorManagerImpl : public ValidatorManager { void get_key_block_proof_link(BlockIdExt block_id, td::Promise promise) override; void new_external_message(td::BufferSlice data) override; - void check_external_message(td::BufferSlice data, td::Promise promise) override { + void check_external_message(td::BufferSlice data, td::Promise> promise) override { UNREACHABLE(); } void new_ihr_message(td::BufferSlice data) override; @@ -284,6 +284,9 @@ class ValidatorManagerImpl : public ValidatorManager { void get_top_masterchain_state_block(td::Promise, BlockIdExt>> promise) override { UNREACHABLE(); } + void get_last_liteserver_state_block(td::Promise, BlockIdExt>> promise) override { + UNREACHABLE(); + } void send_get_block_request(BlockIdExt id, td::uint32 priority, td::Promise promise) override { UNREACHABLE(); @@ -419,7 +422,8 @@ class ValidatorManagerImpl : public ValidatorManager { } void update_last_known_key_block(BlockHandle handle, bool send_request) override { } - void update_shard_client_block_handle(BlockHandle handle, td::Promise promise) override { + void update_shard_client_block_handle(BlockHandle handle, td::Ref state, + td::Promise promise) override { } void prepare_stats(td::Promise>> promise) override { diff --git a/validator/manager-init.cpp b/validator/manager-init.cpp index 8935664e..3c40f1f6 100644 --- a/validator/manager-init.cpp +++ b/validator/manager-init.cpp @@ -354,7 +354,8 @@ void ValidatorManagerMasterchainStarter::got_init_block_handle(BlockHandle handl void ValidatorManagerMasterchainStarter::got_init_block_state(td::Ref state) { state_ = std::move(state); - CHECK(state_->get_block_id() == opts_->init_block_id() || state_->ancestor_is_valid(opts_->init_block_id())); + CHECK(state_->get_block_id() == opts_->init_block_id() || state_->ancestor_is_valid(opts_->init_block_id()) || + state_->get_block_id().seqno() < opts_->get_last_fork_masterchain_seqno()); //finish(); auto P = td::PromiseCreator::lambda( diff --git a/validator/manager.cpp b/validator/manager.cpp index f56d6901..3015e882 100644 --- a/validator/manager.cpp +++ b/validator/manager.cpp @@ -376,12 +376,16 @@ void ValidatorManagerImpl::new_external_message(td::BufferSlice data) { if (!is_collator()) { return; } + if (last_masterchain_state_.is_null()) { + VLOG(VALIDATOR_NOTICE) << "dropping ext message: validator is not ready"; + return; + } if ((double)ext_messages_.size() > max_mempool_num()) { return; } - auto R = create_ext_message(std::move(data)); + auto R = create_ext_message(std::move(data), last_masterchain_state_->get_ext_msg_limits()); if (R.is_error()) { - VLOG(VALIDATOR_NOTICE) << "dropping bad external message: " << R.move_as_error(); + VLOG(VALIDATOR_NOTICE) << "dropping bad ext message: " << R.move_as_error(); return; } add_external_message(R.move_as_ok()); @@ -400,8 +404,14 @@ void ValidatorManagerImpl::add_external_message(td::Ref msg) { } } } -void ValidatorManagerImpl::check_external_message(td::BufferSlice data, td::Promise promise) { - run_check_external_message(std::move(data), actor_id(this), std::move(promise)); +void ValidatorManagerImpl::check_external_message(td::BufferSlice data, td::Promise> promise) { + auto state = do_get_last_liteserver_state(); + if (state.is_null()) { + promise.set_error(td::Status::Error(ErrorCode::notready, "not ready")); + return; + } + run_check_external_message(std::move(data), state->get_ext_msg_limits(), actor_id(this), + std::move(promise)); } void ValidatorManagerImpl::new_ihr_message(td::BufferSlice data) { @@ -1397,6 +1407,16 @@ void ValidatorManagerImpl::get_top_masterchain_state(td::Promise ValidatorManagerImpl::do_get_last_liteserver_state() { + if (last_masterchain_state_.is_null()) { + return {}; + } + if (last_liteserver_state_.is_null() || last_liteserver_state_->get_unix_time() < td::Clocks::system() - 30) { + last_liteserver_state_ = last_masterchain_state_; + } + return last_liteserver_state_; +} + void ValidatorManagerImpl::get_top_masterchain_block(td::Promise promise) { if (!last_masterchain_block_id_.is_valid()) { promise.set_error(td::Status::Error(ton::ErrorCode::notready, "not started")); @@ -1415,6 +1435,16 @@ void ValidatorManagerImpl::get_top_masterchain_state_block( } } +void ValidatorManagerImpl::get_last_liteserver_state_block( + td::Promise, BlockIdExt>> promise) { + auto state = do_get_last_liteserver_state(); + if (state.is_null()) { + promise.set_error(td::Status::Error(ton::ErrorCode::notready, "not started")); + } else { + promise.set_result(std::pair, BlockIdExt>{state, state->get_block_id()}); + } +} + void ValidatorManagerImpl::send_get_block_request(BlockIdExt id, td::uint32 priority, td::Promise promise) { callback_->download_block(id, priority, td::Timestamp::in(10.0), std::move(promise)); @@ -2369,9 +2399,13 @@ void ValidatorManagerImpl::advance_gc(BlockHandle handle, td::Ref promise) { +void ValidatorManagerImpl::update_shard_client_block_handle(BlockHandle handle, td::Ref state, + td::Promise promise) { shard_client_handle_ = std::move(handle); auto seqno = shard_client_handle_->id().seqno(); + if (last_liteserver_state_.is_null() || last_liteserver_state_->get_block_id().seqno() < seqno) { + last_liteserver_state_ = std::move(state); + } shard_client_update(seqno); promise.set_value(td::Unit()); } diff --git a/validator/manager.hpp b/validator/manager.hpp index 6fca1178..835f7de0 100644 --- a/validator/manager.hpp +++ b/validator/manager.hpp @@ -255,6 +255,9 @@ class ValidatorManagerImpl : public ValidatorManager { BlockHandle last_key_block_handle_; BlockHandle last_known_key_block_handle_; BlockHandle shard_client_handle_; + td::Ref last_liteserver_state_; + + td::Ref do_get_last_liteserver_state(); BlockHandle gc_masterchain_handle_; td::Ref gc_masterchain_state_; @@ -279,7 +282,8 @@ class ValidatorManagerImpl : public ValidatorManager { void advance_gc(BlockHandle handle, td::Ref state); void try_advance_gc_masterchain_block(); void update_gc_block_handle(BlockHandle handle, td::Promise promise) override; - void update_shard_client_block_handle(BlockHandle handle, td::Promise promise) override; + void update_shard_client_block_handle(BlockHandle handle, td::Ref state, + td::Promise promise) override; bool out_of_sync(); void applied_hardfork(); @@ -344,7 +348,7 @@ class ValidatorManagerImpl : public ValidatorManager { void new_external_message(td::BufferSlice data) override; void add_external_message(td::Ref message); - void check_external_message(td::BufferSlice data, td::Promise promise) override; + void check_external_message(td::BufferSlice data, td::Promise> promise) override; void new_ihr_message(td::BufferSlice data) override; void new_shard_block(BlockIdExt block_id, CatchainSeqno cc_seqno, td::BufferSlice data) override; @@ -442,6 +446,7 @@ class ValidatorManagerImpl : public ValidatorManager { void get_top_masterchain_state(td::Promise> promise) override; void get_top_masterchain_block(td::Promise promise) override; void get_top_masterchain_state_block(td::Promise, BlockIdExt>> promise) override; + void get_last_liteserver_state_block(td::Promise, BlockIdExt>> promise) override; void send_get_block_request(BlockIdExt id, td::uint32 priority, td::Promise promise) override; void send_get_zero_state_request(BlockIdExt id, td::uint32 priority, td::Promise promise) override; diff --git a/validator/shard-client.cpp b/validator/shard-client.cpp index 5c25b783..303e6333 100644 --- a/validator/shard-client.cpp +++ b/validator/shard-client.cpp @@ -107,9 +107,6 @@ void ShardClient::start_up_init_mode() { void ShardClient::applied_all_shards() { LOG(DEBUG) << "shardclient: " << masterchain_block_handle_->id() << " finished"; - - masterchain_state_.clear(); - auto P = td::PromiseCreator::lambda([SelfId = actor_id(this)](td::Result R) { R.ensure(); td::actor::send_closure(SelfId, &ShardClient::saved_to_db); @@ -121,7 +118,8 @@ void ShardClient::applied_all_shards() { void ShardClient::saved_to_db() { CHECK(masterchain_block_handle_); td::actor::send_closure(manager_, &ValidatorManager::update_shard_client_block_handle, masterchain_block_handle_, - [](td::Unit) {}); + std::move(masterchain_state_), [](td::Unit) {}); + masterchain_state_.clear(); if (promise_) { promise_.set_value(td::Unit()); } diff --git a/validator/state-serializer.cpp b/validator/state-serializer.cpp index 1c8d677c..c668720b 100644 --- a/validator/state-serializer.cpp +++ b/validator/state-serializer.cpp @@ -81,13 +81,22 @@ void AsyncStateSerializer::alarm() { } void AsyncStateSerializer::request_masterchain_state() { - auto P = td::PromiseCreator::lambda([SelfId = actor_id(this)](td::Result> R) { + auto P = td::PromiseCreator::lambda([SelfId = actor_id(this), manager = manager_](td::Result> R) { if (R.is_error()) { td::actor::send_closure(SelfId, &AsyncStateSerializer::fail_handler, R.move_as_error_prefix("failed to get masterchain state: ")); } else { - td::actor::send_closure(SelfId, &AsyncStateSerializer::got_masterchain_state, - td::Ref(R.move_as_ok())); + td::actor::send_closure(manager, &ValidatorManager::get_cell_db_reader, + [SelfId, state = td::Ref(R.move_as_ok())]( + td::Result> R) mutable { + if (R.is_error()) { + td::actor::send_closure(SelfId, &AsyncStateSerializer::fail_handler, + R.move_as_error_prefix("failed to get cell db reader: ")); + } else { + td::actor::send_closure(SelfId, &AsyncStateSerializer::got_masterchain_state, + std::move(state), R.move_as_ok()); + } + }); } }); td::actor::send_closure(manager_, &ValidatorManager::get_shard_state_from_db, masterchain_handle_, std::move(P)); @@ -136,26 +145,13 @@ void AsyncStateSerializer::next_iteration() { td::actor::send_closure(manager_, &ValidatorManager::get_shard_state_from_db, masterchain_handle_, std::move(P)); return; } - if (!cell_db_reader_) { - running_ = true; - auto P = td::PromiseCreator::lambda([SelfId = actor_id(this)](td::Result> R) { - if (R.is_error()) { - td::actor::send_closure(SelfId, &AsyncStateSerializer::fail_handler, - R.move_as_error_prefix("failed to get cell db reader: ")); - } else { - td::actor::send_closure(SelfId, &AsyncStateSerializer::got_cell_db_reader, R.move_as_ok()); - } - }); - td::actor::send_closure(manager_, &ValidatorManager::get_cell_db_reader, std::move(P)); - return; - } if (!have_masterchain_state_) { LOG(INFO) << "started serializing persistent state for " << masterchain_handle_->id().id; // block next attempts immediately, but send actual request later running_ = true; - delay_action( - [SelfId = actor_id(this)]() { td::actor::send_closure(SelfId, &AsyncStateSerializer::request_masterchain_state); }, - td::Timestamp::in(td::Random::fast(0, 3600))); + delay_action([SelfId = actor_id( + this)]() { td::actor::send_closure(SelfId, &AsyncStateSerializer::request_masterchain_state); }, + td::Timestamp::in(td::Random::fast(0, 3600))); return; } while (next_idx_ < shards_.size()) { @@ -169,7 +165,6 @@ void AsyncStateSerializer::next_iteration() { LOG(INFO) << "finished serializing persistent state for " << masterchain_handle_->id().id; last_key_block_ts_ = masterchain_handle_->unix_time(); last_key_block_id_ = masterchain_handle_->id(); - cell_db_reader_ = nullptr; } if (!saved_to_db_) { running_ = true; @@ -218,13 +213,6 @@ void AsyncStateSerializer::store_persistent_state_description(td::Ref cell_db_reader) { - cell_db_reader_ = std::move(cell_db_reader); - running_ = false; - attempt_ = 0; - next_iteration(); -} - void AsyncStateSerializer::got_masterchain_handle(BlockHandle handle) { CHECK(!masterchain_handle_); masterchain_handle_ = std::move(handle); @@ -233,7 +221,8 @@ void AsyncStateSerializer::got_masterchain_handle(BlockHandle handle) { next_iteration(); } -void AsyncStateSerializer::got_masterchain_state(td::Ref state) { +void AsyncStateSerializer::got_masterchain_state(td::Ref state, + std::shared_ptr cell_db_reader) { LOG(INFO) << "serializing masterchain state " << masterchain_handle_->id().id; have_masterchain_state_ = true; CHECK(next_idx_ == 0); @@ -246,7 +235,7 @@ void AsyncStateSerializer::got_masterchain_state(td::Ref state } } - auto write_data = [hash = state->root_cell()->get_hash(), cell_db_reader = cell_db_reader_] (td::FileFd& fd) { + auto write_data = [hash = state->root_cell()->get_hash(), cell_db_reader](td::FileFd& fd) { return vm::std_boc_serialize_to_file_large(cell_db_reader, hash, fd, 31); }; auto P = td::PromiseCreator::lambda([SelfId = actor_id(this)](td::Result R) { @@ -265,20 +254,32 @@ void AsyncStateSerializer::stored_masterchain_state() { } void AsyncStateSerializer::got_shard_handle(BlockHandle handle) { - auto P = td::PromiseCreator::lambda([SelfId = actor_id(this), handle](td::Result> R) { - if (R.is_error()) { - td::actor::send_closure(SelfId, &AsyncStateSerializer::fail_handler, R.move_as_error()); - } else { - td::actor::send_closure(SelfId, &AsyncStateSerializer::got_shard_state, handle, R.move_as_ok()); - } - }); + auto P = td::PromiseCreator::lambda( + [SelfId = actor_id(this), handle, manager = manager_](td::Result> R) { + if (R.is_error()) { + td::actor::send_closure(SelfId, &AsyncStateSerializer::fail_handler, R.move_as_error()); + } else { + td::actor::send_closure( + manager, &ValidatorManager::get_cell_db_reader, + [SelfId, state = R.move_as_ok(), handle](td::Result> R) mutable { + if (R.is_error()) { + td::actor::send_closure(SelfId, &AsyncStateSerializer::fail_handler, + R.move_as_error_prefix("failed to get cell db reader: ")); + } else { + td::actor::send_closure(SelfId, &AsyncStateSerializer::got_shard_state, handle, std::move(state), + R.move_as_ok()); + } + }); + } + }); td::actor::send_closure(manager_, &ValidatorManager::get_shard_state_from_db, handle, std::move(P)); } -void AsyncStateSerializer::got_shard_state(BlockHandle handle, td::Ref state) { +void AsyncStateSerializer::got_shard_state(BlockHandle handle, td::Ref state, + std::shared_ptr cell_db_reader) { LOG(INFO) << "serializing shard state " << handle->id().id; - auto write_data = [hash = state->root_cell()->get_hash(), cell_db_reader = cell_db_reader_] (td::FileFd& fd) { + auto write_data = [hash = state->root_cell()->get_hash(), cell_db_reader](td::FileFd& fd) { return vm::std_boc_serialize_to_file_large(cell_db_reader, hash, fd, 31); }; auto P = td::PromiseCreator::lambda([SelfId = actor_id(this), handle](td::Result R) { diff --git a/validator/state-serializer.hpp b/validator/state-serializer.hpp index 1472c3b4..060c3f96 100644 --- a/validator/state-serializer.hpp +++ b/validator/state-serializer.hpp @@ -42,7 +42,6 @@ class AsyncStateSerializer : public td::actor::Actor { td::uint32 next_idx_ = 0; - std::shared_ptr cell_db_reader_ = nullptr; BlockHandle masterchain_handle_; bool stored_persistent_state_description_ = false; bool have_masterchain_state_ = false; @@ -72,12 +71,11 @@ class AsyncStateSerializer : public td::actor::Actor { void next_iteration(); void got_top_masterchain_handle(BlockIdExt block_id); void store_persistent_state_description(td::Ref state); - void got_cell_db_reader(std::shared_ptr cell_db_reader); void got_masterchain_handle(BlockHandle handle_); - void got_masterchain_state(td::Ref state); + void got_masterchain_state(td::Ref state, std::shared_ptr cell_db_reader); void stored_masterchain_state(); void got_shard_handle(BlockHandle handle); - void got_shard_state(BlockHandle handle, td::Ref state); + void got_shard_state(BlockHandle handle, td::Ref state, std::shared_ptr cell_db_reader); void get_masterchain_seqno(td::Promise promise) { promise.set_result(last_block_id_.id.seqno); diff --git a/validator/validator-group.cpp b/validator/validator-group.cpp index 21517a20..601fc526 100644 --- a/validator/validator-group.cpp +++ b/validator/validator-group.cpp @@ -36,13 +36,45 @@ void ValidatorGroup::generate_block_candidate(td::uint32 round_id, td::Promiseresult) { + promise.set_result(cached_collated_block_->result.value().clone()); + } else { + cached_collated_block_->promises.push_back(std::move(promise)); + } + return; + } + cached_collated_block_ = std::make_shared(); + cached_collated_block_->promises.push_back(std::move(promise)); + td::Promise P = [SelfId = actor_id(this), + cache = cached_collated_block_](td::Result R) { + td::actor::send_closure(SelfId, &ValidatorGroup::generated_block_candidate, std::move(cache), std::move(R)); + }; if (lite_mode_) { - send_collate_query(round_id, td::Timestamp::in(10.0), std::move(promise)); + send_collate_query(round_id, td::Timestamp::in(10.0), std::move(P)); return; } run_collate_query(shard_, min_masterchain_block_id_, prev_block_ids_, Ed25519_PublicKey{local_id_full_.ed25519_value().raw()}, validator_set_, manager_, - td::Timestamp::in(20.0), std::move(promise)); + td::Timestamp::in(10.0), std::move(P)); +} + +void ValidatorGroup::generated_block_candidate(std::shared_ptr cache, + td::Result R) { + if (R.is_error()) { + for (auto &p : cache->promises) { + p.set_error(R.error().clone()); + } + if (cache == cached_collated_block_) { + cached_collated_block_ = nullptr; + } + } else { + cache->result = R.move_as_ok(); + for (auto &p : cache->promises) { + p.set_value(cache->result.value().clone()); + } + } + cache->promises.clear(); } void ValidatorGroup::validate_block_candidate(td::uint32 round_id, BlockCandidate block, @@ -101,18 +133,12 @@ void ValidatorGroup::validate_block_candidate(td::uint32 round_id, BlockCandidat return; } VLOG(VALIDATOR_DEBUG) << "validating block candidate " << next_block_id; + block.id = next_block_id; run_validate_query(shard_, min_masterchain_block_id_, prev_block_ids_, std::move(block), validator_set_, manager_, - td::Timestamp::in(10.0), std::move(P), + td::Timestamp::in(15.0), std::move(P), collator_config_.full_collated_data ? ValidateMode::full_collated_data : 0); } -void ValidatorGroup::update_approve_cache(td::uint32 round_id, CacheKey key, UnixTime value) { - if (approved_candidates_cache_round_ != round_id) { - return; - } - approved_candidates_cache_[key] = value; -} - void ValidatorGroup::accept_block_candidate(td::uint32 round_id, PublicKeyHash src, td::BufferSlice block_data, RootHash root_hash, FileHash file_hash, std::vector signatures, @@ -139,6 +165,7 @@ void ValidatorGroup::accept_block_candidate(td::uint32 round_id, PublicKeyHash s accept_block_query(next_block_id, std::move(block), std::move(prev_block_ids_), std::move(sig_set), std::move(approve_sig_set), src == local_id_, std::move(promise)); prev_block_ids_ = std::vector{next_block_id}; + cached_collated_block_ = nullptr; } void ValidatorGroup::accept_block_query(BlockIdExt block_id, td::Ref block, std::vector prev, @@ -306,6 +333,7 @@ void ValidatorGroup::create_session() { void ValidatorGroup::start(std::vector prev, BlockIdExt min_masterchain_block_id) { prev_block_ids_ = prev; min_masterchain_block_id_ = min_masterchain_block_id; + cached_collated_block_ = nullptr; started_ = true; if (init_) { diff --git a/validator/validator-group.hpp b/validator/validator-group.hpp index fdeeae07..c7964ae5 100644 --- a/validator/validator-group.hpp +++ b/validator/validator-group.hpp @@ -128,6 +128,14 @@ class ValidatorGroup : public td::actor::Actor { bool lite_mode_ = false; td::uint32 last_known_round_id_ = 0; + struct CachedCollatedBlock { + td::optional result; + std::vector> promises; + }; + std::shared_ptr cached_collated_block_; + + void generated_block_candidate(std::shared_ptr cache, td::Result R); + typedef std::tuple CacheKey; std::map approved_candidates_cache_; td::uint32 approved_candidates_cache_round_ = 0; diff --git a/validator/validator.h b/validator/validator.h index 0a5a22b5..343de082 100644 --- a/validator/validator.h +++ b/validator/validator.h @@ -36,6 +36,7 @@ #include "interfaces/shard.h" #include "catchain/catchain-types.h" #include "interfaces/out-msg-queue-proof.h" +#include "interfaces/external-message.h" namespace ton { @@ -166,6 +167,8 @@ class ValidatorManagerInterface : public td::actor::Actor { virtual void get_top_masterchain_block(td::Promise promise) = 0; virtual void get_top_masterchain_state_block( td::Promise, BlockIdExt>> promise) = 0; + virtual void get_last_liteserver_state_block( + td::Promise, BlockIdExt>> promise) = 0; virtual void get_block_data(BlockHandle handle, td::Promise promise) = 0; virtual void check_zero_state_exists(BlockIdExt block_id, td::Promise promise) = 0; @@ -187,7 +190,7 @@ class ValidatorManagerInterface : public td::actor::Actor { virtual void write_handle(BlockHandle handle, td::Promise promise) = 0; virtual void new_external_message(td::BufferSlice data) = 0; - virtual void check_external_message(td::BufferSlice data, td::Promise promise) = 0; + virtual void check_external_message(td::BufferSlice data, td::Promise> promise) = 0; virtual void new_ihr_message(td::BufferSlice data) = 0; virtual void new_shard_block(BlockIdExt block_id, CatchainSeqno cc_seqno, td::BufferSlice data) = 0;