1
0
Fork 0
mirror of https://github.com/ton-blockchain/ton synced 2025-02-13 11:42:18 +00:00

Merge branch 'testnet' into block-generation

# Conflicts:
#	adnl/adnl-query.cpp
#	crypto/block/block.tlb
#	crypto/block/mc-config.h
#	lite-client/lite-client.cpp
#	overlay/overlay-manager.h
#	overlay/overlay-peers.cpp
#	overlay/overlay.cpp
#	overlay/overlay.h
#	overlay/overlay.hpp
#	overlay/overlays.h
#	rldp-http-proxy/DNSResolver.cpp
#	rldp-http-proxy/rldp-http-proxy.cpp
#	tl/generate/scheme/ton_api.tl
#	tl/generate/scheme/ton_api.tlo
#	tl/generate/scheme/tonlib_api.tlo
#	ton/ton-types.h
#	tonlib/tonlib/ExtClient.cpp
#	tonlib/tonlib/ExtClient.h
#	tonlib/tonlib/ExtClientLazy.cpp
#	tonlib/tonlib/ExtClientOutbound.h
#	tonlib/tonlib/ExtClientRaw.h
#	tonlib/tonlib/TonlibClient.cpp
#	tonlib/tonlib/TonlibClient.h
#	tonlib/tonlib/tonlib-cli.cpp
#	validator/impl/collator.cpp
#	validator/impl/validate-query.cpp
#	validator/impl/validate-query.hpp
#	validator/manager.cpp
#	validator/state-serializer.cpp
#	validator/state-serializer.hpp
#	validator/validator-group.cpp
#	validator/validator-group.hpp
#	validator/validator.h
This commit is contained in:
SpyCheese 2022-12-22 23:37:35 +03:00
commit d652f7d706
200 changed files with 13492 additions and 2997 deletions

18
.github/script/amd64-18.04.Dockerfile vendored Normal file
View file

@ -0,0 +1,18 @@
FROM ubuntu:18.04
RUN apt update
RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt-get -y install tzdata
RUN apt install -y build-essential cmake clang openssl libssl-dev zlib1g-dev gperf wget git curl libreadline-dev ccache libmicrohttpd-dev ninja-build
WORKDIR /
RUN git clone --recurse-submodules https://github.com/ton-blockchain/ton.git
WORKDIR /ton
RUN mkdir /ton/build
WORKDIR /ton/build
ENV CC clang
ENV CXX clang++
ENV CCACHE_DISABLE 1
RUN cmake -GNinja -DCMAKE_BUILD_TYPE=Release ..
RUN ninja tonlibjson blockchain-explorer fift func validator-engine validator-engine-console create-state generate-random-id create-hardfork dht-server lite-client

18
.github/script/amd64-20.04.Dockerfile vendored Normal file
View file

@ -0,0 +1,18 @@
FROM ubuntu:20.04
RUN apt update
RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt-get -y install tzdata
RUN apt install -y build-essential cmake clang openssl libssl-dev zlib1g-dev gperf wget git curl libreadline-dev ccache libmicrohttpd-dev ninja-build
WORKDIR /
RUN git clone --recurse-submodules https://github.com/ton-blockchain/ton.git
WORKDIR /ton
RUN mkdir /ton/build
WORKDIR /ton/build
ENV CC clang
ENV CXX clang++
ENV CCACHE_DISABLE 1
RUN cmake -GNinja -DCMAKE_BUILD_TYPE=Release ..
RUN ninja tonlibjson blockchain-explorer fift func validator-engine validator-engine-console create-state generate-random-id create-hardfork dht-server lite-client

18
.github/script/amd64-22.04.Dockerfile vendored Normal file
View file

@ -0,0 +1,18 @@
FROM ubuntu:22.04
RUN apt update
RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt-get -y install tzdata
RUN apt install -y build-essential cmake clang openssl libssl-dev zlib1g-dev gperf wget git curl libreadline-dev ccache libmicrohttpd-dev ninja-build
WORKDIR /
RUN git clone --recurse-submodules https://github.com/ton-blockchain/ton.git
WORKDIR /ton
RUN mkdir /ton/build
WORKDIR /ton/build
ENV CC clang
ENV CXX clang++
ENV CCACHE_DISABLE 1
RUN cmake -GNinja -DCMAKE_BUILD_TYPE=Release ..
RUN ninja tonlibjson blockchain-explorer fift func validator-engine validator-engine-console create-state generate-random-id create-hardfork dht-server lite-client

18
.github/script/arm64-18.04.Dockerfile vendored Normal file
View file

@ -0,0 +1,18 @@
FROM ubuntu:18.04
RUN apt update
RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt-get -y install tzdata
RUN apt install -y build-essential cmake clang openssl libssl-dev zlib1g-dev gperf wget git curl libreadline-dev ccache libmicrohttpd-dev ninja-build
WORKDIR /
RUN git clone --recurse-submodules https://github.com/ton-blockchain/ton.git
WORKDIR /ton
RUN mkdir /ton/build
WORKDIR /ton/build
ENV CC clang
ENV CXX clang++
ENV CCACHE_DISABLE 1
RUN cmake -GNinja -DCMAKE_BUILD_TYPE=Release -DTON_ARCH= ..
RUN ninja tonlibjson blockchain-explorer fift func validator-engine validator-engine-console create-state generate-random-id dht-server lite-client

18
.github/script/arm64-20.04.Dockerfile vendored Normal file
View file

@ -0,0 +1,18 @@
FROM ubuntu:20.04
RUN apt update
RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt-get -y install tzdata
RUN apt install -y build-essential cmake clang openssl libssl-dev zlib1g-dev gperf wget git curl libreadline-dev ccache libmicrohttpd-dev ninja-build
WORKDIR /
RUN git clone --recurse-submodules https://github.com/ton-blockchain/ton.git
WORKDIR /ton
RUN mkdir /ton/build
WORKDIR /ton/build
ENV CC clang
ENV CXX clang++
ENV CCACHE_DISABLE 1
RUN cmake -GNinja -DCMAKE_BUILD_TYPE=Release -DTON_ARCH= ..
RUN ninja tonlibjson blockchain-explorer fift func validator-engine validator-engine-console create-state generate-random-id dht-server lite-client

18
.github/script/arm64-22.04.Dockerfile vendored Normal file
View file

@ -0,0 +1,18 @@
FROM ubuntu:22.04
RUN apt update
RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt-get -y install tzdata
RUN apt install -y build-essential cmake clang openssl libssl-dev zlib1g-dev gperf wget git curl libreadline-dev ccache libmicrohttpd-dev ninja-build
WORKDIR /
RUN git clone --recurse-submodules https://github.com/ton-blockchain/ton.git
WORKDIR /ton
RUN mkdir /ton/build
WORKDIR /ton/build
ENV CC clang
ENV CXX clang++
ENV CCACHE_DISABLE 1
RUN cmake -GNinja -DCMAKE_BUILD_TYPE=Release -DTON_ARCH= ..
RUN ninja tonlibjson blockchain-explorer fift func validator-engine validator-engine-console create-state generate-random-id dht-server lite-client

167
.github/workflows/create-release.yml vendored Normal file
View file

@ -0,0 +1,167 @@
name: Create release
on: [workflow_dispatch]
permissions: write-all
jobs:
create-release:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v3
- name: Show all artifacts
run: |
mkdir artifacts
ls -lart artifacts
- name: Download Ubuntu x86-64 artifacts
uses: dawidd6/action-download-artifact@v2
with:
workflow: ubuntu-compile.yml
path: artifacts
workflow_conclusion: success
skip_unpack: true
- name: Download Ubuntu arm64 artifacts
uses: dawidd6/action-download-artifact@v2
with:
workflow: docker-compile-ubuntu.yml
path: artifacts
workflow_conclusion: success
skip_unpack: true
- name: Download MacOS 11.7 artifacts
uses: dawidd6/action-download-artifact@v2
with:
workflow: macos-11.7-compile.yml
path: artifacts
workflow_conclusion: success
skip_unpack: true
- name: Download MacOS 12.6 artifacts
uses: dawidd6/action-download-artifact@v2
with:
workflow: macos-12.6-compile.yml
path: artifacts
workflow_conclusion: success
skip_unpack: true
- name: Download Windows artifacts
uses: dawidd6/action-download-artifact@v2
with:
workflow: win-2019-compile.yml
path: artifacts
workflow_conclusion: success
skip_unpack: true
- name: Show all artifacts
run: |
tree artifacts
# create release
- name: Read Changelog.md and use it as a body of new release
id: read_release
shell: bash
run: |
r=$(cat Changelog.md)
r="${r//'%'/'%25'}"
r="${r//$'\n'/'%0A'}"
r="${r//$'\r'/'%0D'}"
echo "::set-output name=CHANGELOG_BODY::$r"
- name: Get current date
id: date
run: echo "::set-output name=date::$(date +'%Y.%m')"
- name: Get registration token
id: getRegToken
run: |
curl -X POST -H \"Accept: application/vnd.github+json\" -H 'Authorization: token ${{ secrets.GITHUB_TOKEN }}' https://api.github.com/repos/neodix42/HardTestDevelopment/actions/runners/registration-token
- name: Create release
id: create_release
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: v${{ steps.date.outputs.date }}
release_name: v${{ steps.date.outputs.date }}
body: |
${{ steps.read_release.outputs.CHANGELOG_BODY }}
draft: false
prerelease: false
- name: Upload Windows 2019 artifacts
uses: svenstaro/upload-release-action@v2
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
file: artifacts/ton-win-binaries.zip
asset_name: ton-windows-2019-x86-64.zip
tag: v${{ steps.date.outputs.date }}
- name: Upload MacOS 11.7 x86-64 artifacts
uses: svenstaro/upload-release-action@v2
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
file: artifacts/ton-macos-11.7.zip
asset_name: ton-macos-11.7-x86-64.zip
tag: v${{ steps.date.outputs.date }}
- name: Upload MacOS 12.6 x86-64 artifacts
uses: svenstaro/upload-release-action@v2
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
file: artifacts/ton-macos-12.6.zip
asset_name: ton-macos-12.6-x86-64.zip
tag: v${{ steps.date.outputs.date }}
- name: Upload Ubuntu 18.04 x86-64 artifacts
uses: svenstaro/upload-release-action@v2
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
file: artifacts/ton-binaries-ubuntu-18.04.zip
asset_name: ton-ubuntu-18.04-x86-64.zip
tag: v${{ steps.date.outputs.date }}
- name: Upload Ubuntu 20.04 x86-64 artifacts
uses: svenstaro/upload-release-action@v2
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
file: artifacts/ton-binaries-ubuntu-20.04.zip
asset_name: ton-ubuntu-20.04-x86-64.zip
tag: v${{ steps.date.outputs.date }}
- name: Upload Ubuntu 22.04 x86-64 artifacts
uses: svenstaro/upload-release-action@v2
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
file: artifacts/ton-binaries-ubuntu-22.04.zip
asset_name: ton-ubuntu-22.04-x86-64.zip
tag: v${{ steps.date.outputs.date }}
- name: Upload Ubuntu 18.04 arm64 artifacts
uses: svenstaro/upload-release-action@v2
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
file: artifacts/ton-ubuntu-18.04-arm64.zip
asset_name: ton-ubuntu-18.04-arm64.zip
tag: v${{ steps.date.outputs.date }}
- name: Upload Ubuntu 20.04 arm64 artifacts
uses: svenstaro/upload-release-action@v2
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
file: artifacts/ton-ubuntu-20.04-arm64.zip
asset_name: ton-ubuntu-20.04-arm64.zip
tag: v${{ steps.date.outputs.date }}
- name: Upload Ubuntu 22.04 arm64 artifacts
uses: svenstaro/upload-release-action@v2
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
file: artifacts/ton-ubuntu-22.04-arm64.zip
asset_name: ton-ubuntu-22.04-arm64.zip
tag: v${{ steps.date.outputs.date }}

View file

@ -0,0 +1,47 @@
name: Docker Ubuntu Compile arm64
on: [push,workflow_dispatch,workflow_call]
jobs:
build:
strategy:
fail-fast: false
max-parallel: 3
matrix:
arch: [arm64]
ver: [22.04, 18.04, 20.04 ]
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Build with docker buildx
run: |
mkdir build-${{matrix.ver}}-${{matrix.arch}}
docker buildx build --platform=linux/${{matrix.arch}} --progress=plain --load . -t build-${{matrix.ver}}-${{matrix.arch}} -f .github/script/${{matrix.arch}}-${{matrix.ver}}.Dockerfile
container_id=$(docker create --platform=linux/${{matrix.arch}} build-${{matrix.ver}}-${{matrix.arch}})
docker cp $container_id:/ton/build/dht-server/dht-server build-${{matrix.ver}}-${{matrix.arch}}/
docker cp -a $container_id:/ton/build/validator-engine/validator-engine build-${{matrix.ver}}-${{matrix.arch}}/
docker cp -a $container_id:/ton/build/validator-engine-console/validator-engine-console build-${{matrix.ver}}-${{matrix.arch}}/
docker cp -a $container_id:/ton/build/crypto/fift build-${{matrix.ver}}-${{matrix.arch}}/
docker cp -a $container_id:/ton/build/crypto/func build-${{matrix.ver}}-${{matrix.arch}}/
docker cp -a $container_id:/ton/build/crypto/create-state build-${{matrix.ver}}-${{matrix.arch}}/
docker cp -a $container_id:/ton/build/blockchain-explorer/blockchain-explorer build-${{matrix.ver}}-${{matrix.arch}}/
docker cp -a $container_id:/ton/build/lite-client/lite-client build-${{matrix.ver}}-${{matrix.arch}}/
docker cp -a $container_id:/ton/build/utils/generate-random-id build-${{matrix.ver}}-${{matrix.arch}}/
docker cp -a $container_id:/ton/build/tonlib/libtonlibjson.so.0.5 build-${{matrix.ver}}-${{matrix.arch}}/tonlibjson.so
docker cp -a $container_id:/ton/crypto/smartcont build-${{matrix.ver}}-${{matrix.arch}}/
docker cp -a $container_id:/ton/crypto/fift/lib build-${{matrix.ver}}-${{matrix.arch}}/
- name: Upload artifacts
uses: actions/upload-artifact@v1
with:
name: ton-ubuntu-${{matrix.ver}}-${{matrix.arch}}
path: build-${{matrix.ver}}-${{matrix.arch}}

View file

@ -1,4 +1,4 @@
name: Docker Ubuntu 18.04 image
name: Docker Ubuntu 20.04 image
on:
workflow_dispatch:
@ -10,10 +10,10 @@ env:
jobs:
build-and-push:
runs-on: ubuntu-18.04
runs-on: ubuntu-20.04
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v1

View file

@ -1,41 +0,0 @@
name: C/C++ CI macOS-10.15 Compile
on:
workflow_dispatch:
push:
jobs:
build:
runs-on: macos-10.15
steps:
- name: Check out repository
uses: actions/checkout@v2
with:
submodules: 'recursive'
- name: Compile OpenSSL
run: |
git clone https://github.com/openssl/openssl openssl_1_1_1
cd openssl_1_1_1
git checkout OpenSSL_1_1_1-stable
./Configure --prefix=/usr/local/macos darwin64-x86_64-cc -static -mmacosx-version-min=10.15
make build_libs -j4
- name: Build all
run: |
rootPath=`pwd`
mkdir build
cd build
cmake -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=$rootPath/openssl_1_1_1/include -DOPENSSL_CRYPTO_LIBRARY=$rootPath/openssl_1_1_1/libcrypto.a -DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=10.15 -DCMAKE_CXX_FLAGS="-stdlib=libc++" -DCMAKE_BUILD_TYPE=Release ..
make -j4
- name: Find & copy binaries
run: |
rsync -r --exclude 'CMakeFiles' --exclude 'Makefile' --exclude '*.a' --exclude '*.cmake' --exclude 'third-party' --exclude 'test-*' --exclude '*.cc' --exclude '*.json' --exclude '*.txt' build/* artifacts/
- name: Upload artifacts
uses: actions/upload-artifact@master
with:
name: ton-macos-binaries
path: artifacts

View file

@ -1,60 +0,0 @@
name: macOS-10.15 tonlib-java
on:
workflow_dispatch:
push:
branches:
- 'wallets'
jobs:
build:
runs-on: macos-10.15
steps:
- name: Check out repository
uses: actions/checkout@v2
with:
submodules: 'recursive'
- name: Compile OpenSSL
run: |
git clone https://github.com/openssl/openssl openssl_1_1_1
cd openssl_1_1_1
git checkout OpenSSL_1_1_1-stable
./Configure --prefix=/usr/local/macos darwin64-x86_64-cc -static -mmacosx-version-min=10.15
make build_libs -j4
- name: Configure & Build
run: |
rootPath=`pwd`
export CC=$(which clang)
export CXX=$(which clang++)
export CCACHE_DISABLE=1
export JAVA_AWT_LIBRARY=NotNeeded
export JAVA_JVM_LIBRARY=NotNeeded
export JAVA_INCLUDE_PATH=${JAVA_HOME}/include
export JAVA_AWT_INCLUDE_PATH=${JAVA_HOME}/include
export JAVA_INCLUDE_PATH2=${JAVA_HOME}/include/darwin
cd example/android/
mkdir build
cd build
cmake -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=$rootPath/openssl_1_1_1/include -DOPENSSL_CRYPTO_LIBRARY=$rootPath/openssl_1_1_1/libcrypto.a -DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=10.15 -DCMAKE_CXX_FLAGS="-stdlib=libc++" -DTON_ONLY_TONLIB=ON ..
cmake --build . --target prepare_cross_compiling
cmake --build . --target native-lib --config Release
- name: find & copy binaries
run: |
mkdir -p artifacts/tonlib-java
cp example/android/src/drinkless/org/ton/TonApi.java artifacts/tonlib-java/
cp example/android/build/libnative-lib.dylib artifacts/tonlib-java/
- name: Upload artifacts
uses: actions/upload-artifact@master
with:
name: tonlib-macos-java
path: artifacts

View file

@ -0,0 +1,59 @@
name: MacOS 11.7 Big Sur x86-64 Compile
on: [push,workflow_dispatch,workflow_call]
jobs:
build:
runs-on: macos-11
steps:
- name: Check out repository
uses: actions/checkout@v3
with:
submodules: 'recursive'
- name: Compile OpenSSL
run: |
git clone https://github.com/openssl/openssl openssl_1_1_1
cd openssl_1_1_1
git checkout OpenSSL_1_1_1-stable
./Configure --prefix=/usr/local/macos darwin64-x86_64-cc -static -mmacosx-version-min=11.7
make build_libs -j4
- name: Build all
run: |
export NONINTERACTIVE=1
brew install ninja
rootPath=`pwd`
mkdir build
cd build
cmake -GNinja -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=$rootPath/openssl_1_1_1/include -DOPENSSL_CRYPTO_LIBRARY=$rootPath/openssl_1_1_1/libcrypto.a -DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=11.7 -DCMAKE_CXX_FLAGS="-stdlib=libc++" -DCMAKE_BUILD_TYPE=Release ..
ninja fift func tonlib tonlibjson tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork tlbc
- name: Find & copy binaries
run: |
mkdir artifacts
cp build/crypto/fift artifacts/
cp build/crypto/func artifacts/
cp build/crypto/create-state artifacts/
cp build/crypto/tlbc artifacts/
cp build/validator-engine-console/validator-engine-console artifacts/
cp build/tonlib/tonlib-cli artifacts/
cp build/tonlib/libtonlibjson.0.5.dylib artifacts/
cp build/http/http-proxy artifacts/
cp build/rldp-http-proxy/rldp-http-proxy artifacts/
cp build/dht-server/dht-server artifacts/
cp build/lite-client/lite-client artifacts/
cp build/validator-engine/validator-engine artifacts/
cp build/utils/generate-random-id artifacts/
cp build/utils/json2tlo artifacts/
cp build/adnl/adnl-proxy artifacts/
rsync -r crypto/smartcont artifacts/
rsync -r crypto/fift/lib artifacts/
ls -laRt artifacts
- name: Upload artifacts
uses: actions/upload-artifact@master
with:
name: ton-macos-11.7
path: artifacts

View file

@ -0,0 +1,59 @@
name: MacOS 12.6 Monterey x86-64 Compile
on: [push,workflow_dispatch,workflow_call]
jobs:
build:
runs-on: macos-12
steps:
- name: Check out repository
uses: actions/checkout@v3
with:
submodules: 'recursive'
- name: Compile OpenSSL
run: |
git clone https://github.com/openssl/openssl openssl_1_1_1
cd openssl_1_1_1
git checkout OpenSSL_1_1_1-stable
./Configure --prefix=/usr/local/macos darwin64-x86_64-cc -static -mmacosx-version-min=12.6
make build_libs -j4
- name: Build all
run: |
export NONINTERACTIVE=1
brew install ninja
rootPath=`pwd`
mkdir build
cd build
cmake -GNinja -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=$rootPath/openssl_1_1_1/include -DOPENSSL_CRYPTO_LIBRARY=$rootPath/openssl_1_1_1/libcrypto.a -DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=12.6 -DCMAKE_CXX_FLAGS="-stdlib=libc++" -DCMAKE_BUILD_TYPE=Release ..
ninja fift func tonlib tonlibjson tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork tlbc
- name: Find & copy binaries
run: |
mkdir artifacts
cp build/crypto/fift artifacts/
cp build/crypto/func artifacts/
cp build/crypto/create-state artifacts/
cp build/crypto/tlbc artifacts/
cp build/validator-engine-console/validator-engine-console artifacts/
cp build/tonlib/tonlib-cli artifacts/
cp build/tonlib/libtonlibjson.0.5.dylib artifacts/
cp build/http/http-proxy artifacts/
cp build/rldp-http-proxy/rldp-http-proxy artifacts/
cp build/dht-server/dht-server artifacts/
cp build/lite-client/lite-client artifacts/
cp build/validator-engine/validator-engine artifacts/
cp build/utils/generate-random-id artifacts/
cp build/utils/json2tlo artifacts/
cp build/adnl/adnl-proxy artifacts/
rsync -r crypto/smartcont artifacts/
rsync -r crypto/fift/lib artifacts/
ls -laRt artifacts
- name: Upload artifacts
uses: actions/upload-artifact@master
with:
name: ton-macos-12.6
path: artifacts

View file

@ -0,0 +1,56 @@
name: Tonlib Android JNI
on: [push,workflow_dispatch,workflow_call]
jobs:
build:
runs-on: ubuntu-22.04
steps:
- name: Check out repository
uses: actions/checkout@v3
with:
submodules: 'recursive'
- name: Install libraries
run: |
sudo apt update
sudo apt install -y build-essential git make cmake clang libgflags-dev zlib1g-dev libssl-dev libreadline-dev libmicrohttpd-dev pkg-config libgsl-dev python3 python3-dev ninja-build
- name: Configure & Build
run: |
wget https://dl.google.com/android/repository/android-ndk-r25b-linux.zip
unzip android-ndk-r25b-linux.zip
export JAVA_AWT_LIBRARY=NotNeeded
export JAVA_JVM_LIBRARY=NotNeeded
export JAVA_INCLUDE_PATH=${JAVA_HOME}/include
export JAVA_AWT_INCLUDE_PATH=${JAVA_HOME}/include
export JAVA_INCLUDE_PATH2=${JAVA_HOME}/include/linux
export ANDROID_NDK_ROOT=$(pwd)/android-ndk-r25b
export OPENSSL_DIR=$(pwd)/example/android/third_party/crypto
rm -rf example/android/src/drinkless/org/ton/TonApi.java
cd example/android/
cmake -GNinja -DTON_ONLY_TONLIB=ON .
ninja prepare_cross_compiling
rm CMakeCache.txt
./build-all.sh
../../android-ndk-r25b/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip libs/x86/libnative-lib.so
../../android-ndk-r25b/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip libs/x86_64/libnative-lib.so
../../android-ndk-r25b/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip libs/armeabi-v7a/libnative-lib.so
../../android-ndk-r25b/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip libs/arm64-v8a/libnative-lib.so
find . -name "*.debug" -type f -delete
- name: Find & copy binaries
run: |
mkdir -p artifacts/tonlib-android-jni
cp example/android/src/drinkless/org/ton/TonApi.java artifacts/tonlib-android-jni/
cp -R example/android/libs/* artifacts/tonlib-android-jni/
- name: Upload artifacts
uses: actions/upload-artifact@master
with:
name: Tonlib JNI libraries for Android
path: artifacts

View file

@ -1,6 +1,6 @@
name: C/C++ CI Ubuntu 18.04 Compile
name: Ubuntu 18.04 Compile
on: [push,workflow_dispatch]
on: [push,workflow_dispatch,workflow_call]
jobs:
build:
@ -9,7 +9,7 @@ jobs:
steps:
- name: Check out repository
uses: actions/checkout@v2
uses: actions/checkout@v3
with:
submodules: 'recursive'
@ -32,9 +32,11 @@ jobs:
run: |
mkdir artifacts
cp build/crypto/fift build/crypto/tlbc build/crypto/func build/crypto/create-state build/validator-engine-console/validator-engine-console build/tonlib/tonlib-cli build/tonlib/libtonlibjson.so.0.5 build/http/http-proxy build/rldp-http-proxy/rldp-http-proxy build/dht-server/dht-server build/lite-client/lite-client build/validator-engine/validator-engine build/utils/generate-random-id build/utils/json2tlo build/adnl/adnl-proxy artifacts
cp -R crypto/smartcont artifacts/
cp -R crypto/fift/lib artifacts/
- name: Upload artifacts
uses: actions/upload-artifact@master
with:
name: ton-binaries
name: ton-ubuntu-binaries
path: artifacts

View file

@ -1,8 +1,6 @@
name: Ubuntu 18.04 TON ccpcheck
name: TON ccpcheck
on:
workflow_dispatch:
push:
on: [push,workflow_dispatch,workflow_call]
jobs:
build:
@ -11,7 +9,7 @@ jobs:
steps:
- name: Check out repository
uses: actions/checkout@v2
uses: actions/checkout@v3
with:
submodules: 'recursive'

View file

@ -1,50 +0,0 @@
name: Ubuntu 18.04 tonlib-java
on:
workflow_dispatch:
push:
branches:
- 'wallets'
jobs:
build:
runs-on: ubuntu-18.04
steps:
- name: Check out repository
uses: actions/checkout@v2
with:
submodules: 'recursive'
- name: Install libraries
run: |
sudo apt update
sudo apt install -y build-essential git make cmake clang libgflags-dev zlib1g-dev libssl-dev libreadline-dev libmicrohttpd-dev pkg-config libgsl-dev python3 python3-dev ninja-build
- name: Configure & Build
run: |
export JAVA_AWT_LIBRARY=NotNeeded
export JAVA_JVM_LIBRARY=NotNeeded
export JAVA_INCLUDE_PATH=${JAVA_HOME}/include
export JAVA_AWT_INCLUDE_PATH=${JAVA_HOME}/include
export JAVA_INCLUDE_PATH2=${JAVA_HOME}/include/linux
cd example/android/
mkdir build
cd build
cmake -DTON_ONLY_TONLIB=ON ..
cmake --build . --target prepare_cross_compiling
cmake --build . --target native-lib
- name: find & copy binaries
run: |
mkdir -p artifacts/tonlib-java
cp example/android/src/drinkless/org/ton/TonApi.java artifacts/tonlib-java/
cp example/android/build/libnative-lib.so artifacts/tonlib-java/
- name: Upload artifacts
uses: actions/upload-artifact@master
with:
name: tonlib-ubuntu-java
path: artifacts

View file

@ -1,6 +1,6 @@
name: Ubuntu Compile
name: Ubuntu Compile x86-64
on: [push,workflow_dispatch]
on: [push,workflow_dispatch,workflow_call]
jobs:
build:
@ -12,7 +12,7 @@ jobs:
steps:
- name: Check out repository
uses: actions/checkout@v2
uses: actions/checkout@v3
with:
submodules: 'recursive'
@ -29,10 +29,12 @@ jobs:
cd build-${{ matrix.os }}
cmake -GNinja -DCMAKE_BUILD_TYPE=Release ..
ninja fift func tonlib tonlibjson tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork
- name: find & copy binaries
- name: Find & copy binaries
run: |
mkdir artifacts-${{ matrix.os }}
cp build-${{ matrix.os }}/crypto/fift build-${{ matrix.os }}/crypto/tlbc build-${{ matrix.os }}/crypto/func build-${{ matrix.os }}/crypto/create-state build-${{ matrix.os }}/validator-engine-console/validator-engine-console build-${{ matrix.os }}/tonlib/tonlib-cli build-${{ matrix.os }}/tonlib/libtonlibjson.so.0.5 build-${{ matrix.os }}/http/http-proxy build-${{ matrix.os }}/rldp-http-proxy/rldp-http-proxy build-${{ matrix.os }}/dht-server/dht-server build-${{ matrix.os }}/lite-client/lite-client build-${{ matrix.os }}/validator-engine/validator-engine build-${{ matrix.os }}/utils/generate-random-id build-${{ matrix.os }}/utils/json2tlo build-${{ matrix.os }}/adnl/adnl-proxy artifacts-${{ matrix.os }}
cp -R crypto/smartcont artifacts-${{ matrix.os }}
cp -R crypto/fift/lib artifacts-${{ matrix.os }}
- name: Upload artifacts
uses: actions/upload-artifact@master
with:

88
.github/workflows/win-2019-compile.yml vendored Normal file
View file

@ -0,0 +1,88 @@
name: Windows Server 2019 x64 Compile
on: [push,workflow_dispatch,workflow_call]
defaults:
run:
shell: cmd
jobs:
build:
runs-on: windows-2019
steps:
- name: Get Current OS version
run: |
systeminfo | findstr /B /C:"OS Name" /C:"OS Version"
- name: Check out current repository
uses: actions/checkout@v3
with:
submodules: 'recursive'
- name: Check out zlib repository
uses: actions/checkout@v3
with:
repository: desktop-app/zlib
path: zlib
- name: Setup msbuild.exe
uses: microsoft/setup-msbuild@v1.0.2
- name: Compile zlib Win64
run: |
cd zlib\contrib\vstudio\vc14
msbuild zlibstat.vcxproj /p:Configuration=ReleaseWithoutAsm /p:platform=x64 -p:PlatformToolset=v142
- name: Install pre-compiled OpenSSL Win64
run: |
curl -Lo openssl-1.1.1o.zip https://github.com/neodiX42/precompiled-openssl-win64/raw/main/openssl-1.1.1o.zip
jar xf openssl-1.1.1o.zip
- name: Install pre-compiled libmicrohttpd Win64
run: |
curl -Lo libmicrohttpd-latest-w32-bin.zip https://ftpmirror.gnu.org/libmicrohttpd/libmicrohttpd-latest-w32-bin.zip
unzip libmicrohttpd-latest-w32-bin.zip
- name: Install pre-compiled Readline Win64
run: |
curl -Lo readline-5.0-1-lib.zip https://github.com/neodiX42/precompiled-openssl-win64/raw/main/readline-5.0-1-lib.zip
unzip readline-5.0-1-lib.zip
- name: Compile
run: |
set root=%cd%
echo %root%
mkdir build
cd build
cmake -DREADLINE_INCLUDE_DIR=%root%\readline-5.0-1-lib\include\readline -DREADLINE_LIBRARY=%root%\readline-5.0-1-lib\lib\readline.lib -DZLIB_FOUND=1 -DMHD_FOUND=1 -DMHD_LIBRARY=%root%\libmicrohttpd-0.9.75-w32-bin\x86_64\VS2019\Release-static\libmicrohttpd.lib -DMHD_INCLUDE_DIR=%root%\libmicrohttpd-0.9.75-w32-bin\x86_64\VS2019\Release-static -DZLIB_INCLUDE_DIR=%root%\zlib -DZLIB_LIBRARY=%root%\zlib\contrib\vstudio\vc14\x64\ZlibStatReleaseWithoutAsm\zlibstat.lib -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=%root%/openssl-1.1/x64/include -DOPENSSL_CRYPTO_LIBRARY=%root%/openssl-1.1/x64/lib/libcrypto.lib -DCMAKE_CXX_FLAGS="/DTD_WINDOWS=1 /EHsc /bigobj /W0" ..
cmake --build . --target fift func tonlib tonlibjson tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork --config Release
- name: Show executables
run: |
cd build
del Release\test-*
dir *.exe /a-D /S /B
dir *.dll /a-D /S /B
- name: Check if validator-engine.exe exists
run: |
set root=%cd%
copy %root%\build\validator-engine\Release\validator-engine.exe test
- name: Find & copy binaries
run: |
mkdir artifacts
mkdir artifacts\smartcont
mkdir artifacts\lib
for %%I in (build\crypto\Release\fift.exe build\crypto\Release\tlbc.exe build\crypto\Release\func.exe build\crypto\Release\create-state.exe build\validator-engine-console\Release\validator-engine-console.exe build\tonlib\Release\tonlib-cli.exe build\tonlib\Release\tonlibjson.dll build\http\Release\http-proxy.exe build\rldp-http-proxy\Release\rldp-http-proxy.exe build\dht-server\Release\dht-server.exe build\lite-client\Release\lite-client.exe build\validator-engine\Release\validator-engine.exe build\utils\Release\generate-random-id.exe build\utils\Release\json2tlo.exe build\adnl\Release\adnl-proxy.exe) do copy %%I artifacts\
xcopy /e /k /h /i crypto\smartcont artifacts\smartcont
xcopy /e /k /h /i crypto\fift\lib artifacts\lib
- name: Upload artifacts
uses: actions/upload-artifact@master
with:
name: ton-win-binaries
path: artifacts

View file

@ -1,80 +0,0 @@
name: Windows Server 2019 x64 Compile
on:
workflow_dispatch:
push:
defaults:
run:
shell: cmd
jobs:
build:
runs-on: windows-2019
steps:
- name: Get Current OS version
run: |
systeminfo | findstr /B /C:"OS Name" /C:"OS Version"
- name: Check out current repository
uses: actions/checkout@v2
with:
submodules: 'recursive'
- name: Check out zlib repository
uses: actions/checkout@v2
with:
repository: desktop-app/zlib
path: zlib
- name: Setup msbuild.exe
uses: microsoft/setup-msbuild@v1.0.2
- name: Compile zlib Win64
run: |
cd zlib\contrib\vstudio\vc14
msbuild zlibstat.vcxproj /p:Configuration=ReleaseWithoutAsm /p:platform=x64 -p:PlatformToolset=v142
- name: Install pre-compiled OpenSSL Win64
run: |
curl -Lo openssl-1.1.1o.zip https://github.com/neodiX42/precompiled-openssl-win64/raw/main/openssl-1.1.1o.zip
jar xf openssl-1.1.1o.zip
- name: Configure
run: |
set root=%cd%
echo %root%
mkdir build
cd build
cmake -DZLIB_FOUND=1 -DZLIB_INCLUDE_DIR=%root%\zlib -DZLIB_LIBRARY=%root%\zlib\contrib\vstudio\vc14\x64\ZlibStatReleaseWithoutAsm\zlibstat.lib -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=%root%/openssl-1.1/x64/include -DOPENSSL_CRYPTO_LIBRARY=%root%/openssl-1.1/x64/lib/libcrypto.lib -DCMAKE_CXX_FLAGS="/DTD_WINDOWS=1 /EHsc /bigobj /W0" ..
- name: Build
run: |
cd build
cmake --build . --target fift func tonlib tonlibjson tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork --config Release
- name: Show executables
run: |
cd build
del Release\test-*
dir *.exe /a-D /S /B
dir *.dll /a-D /S /B
- name: Check if validator-engine.exe exists
run: |
set root=%cd%
copy %root%\build\validator-engine\Release\validator-engine.exe test
- name: Find & copy binaries
run: |
mkdir artifacts
for /f %%a in ('dir *.exe /b /a /s') do copy /Y %%a artifacts
copy build\tonlib\Release\tonlibjson.dll artifacts
- name: Upload artifacts
uses: actions/upload-artifact@master
with:
name: ton-win64-binaries
path: artifacts

View file

@ -1,71 +0,0 @@
name: Windows 2019 tonlib-java
on:
workflow_dispatch:
push:
branches:
- 'wallets'
defaults:
run:
shell: cmd
jobs:
build:
runs-on: windows-2019
steps:
- name: Get Current OS version
run: |
systeminfo | findstr /B /C:"OS Name" /C:"OS Version"
- name: Check out current repository
uses: actions/checkout@v2
with:
submodules: 'recursive'
- name: Check out zlib repository
uses: actions/checkout@v2
with:
repository: desktop-app/zlib
path: zlib
- name: Setup msbuild.exe
uses: microsoft/setup-msbuild@v1.0.2
- name: Compile zlib Win64
run: |
cd zlib\contrib\vstudio\vc14
msbuild zlibstat.vcxproj /p:Configuration=ReleaseWithoutAsm /p:platform=x64 -p:PlatformToolset=v142
- name: Install precompiled OpenSSL Win64
run: |
curl -Lo openssl-1.1.1o.zip https://github.com/neodiX42/precompiled-openssl-win64/raw/main/openssl-1.1.1o.zip
jar xf openssl-1.1.1o.zip
- name: Configure & Build
run: |
set JAVA_AWT_LIBRARY=NotNeeded
set JAVA_JVM_LIBRARY=NotNeeded
set JAVA_INCLUDE_PATH=${JAVA_HOME}/include
set JAVA_AWT_INCLUDE_PATH=${JAVA_HOME}/include
set JAVA_INCLUDE_PATH2=${JAVA_HOME}/include/win32
set root=%cd%
echo %root%
cd example/android
mkdir build
cd build
cmake -DZLIB_FOUND=1 -DZLIB_INCLUDE_DIR=%root%\zlib -DZLIB_LIBRARY=%root%\zlib\contrib\vstudio\vc14\x64\ZlibStatReleaseWithoutAsm\zlibstat.lib -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=%root%/openssl-1.1/x64/include -DOPENSSL_CRYPTO_LIBRARY=%root%/openssl-1.1/x64/lib/libcrypto.lib -DTON_ONLY_TONLIB=ON -DCMAKE_CXX_FLAGS="/DTD_WINDOWS=1 /EHsc /bigobj /W0" ..
cmake --build . --target native-lib --config Release
- name: Find & copy binaries
run: |
mkdir tonlib-java
cp example/android/build/Release/native-lib.dll tonlib-java/
- name: Upload artifacts
uses: actions/upload-artifact@master
with:
name: tonlib-win64-java
path: tonlib-java

View file

@ -290,8 +290,9 @@ if (MSVC)
endif()
if (NOT MSVC)
add_cxx_compiler_flag("-Wall")
add_cxx_compiler_flag("-Wextra")
endif()
add_cxx_compiler_flag("-Wextra")
add_cxx_compiler_flag("-Wimplicit-fallthrough=2")
add_cxx_compiler_flag("-Wpointer-arith")
add_cxx_compiler_flag("-Wcast-qual")

View file

@ -26,3 +26,15 @@ Besides the work of the core team, this update is based on the efforts of @tvoro
Update coincided with persistent state serialization event which lead to block production speed deterioration (issue substantially mitigated in update itself). This phenomena was aggravated by the fact that after update some validators lost ability to participate in block creation. The last was caused by threshold based hardcoded protocol version bump, where threshold was set in such manner (based on block height with value higher than 9m), that it eluded detection in private net tests. The update was temporarily paused and resumed after persistent state serialization ended and issues with block creation were resolved.
Besides the work of the core team, this update is based on the efforts of @awesome-doge (help with abseil-cpp upgrade), @rec00rsiff (noted issues for exotic endianess and implemented network stats) and third-party security auditors.
## 10.2022 Update
* Added extended block creation and general perfomance stats gathering
* Forbidden report data on blocks not committed to the master chain for LS
* Improved debug in TVM
* FunC 0.3.0: multi-line asms, bitwise operations for constants, duplication of identical definition for constants and asms now allowed
* New tonlib methods: sendMessageReturnHash, getTransactionsV2, getMasterchainBlockSignatures, getShardBlockProof, getLibraries.
* Fixed bugs related to invalid TVM output (c4, c5, libaries) and non-validated network data; avoided too deep recursion in libraries loading
* Fixed multiple undefined behavior issues
* Added build of FunC and Fift to WASM
Besides the work of the core team, this update is based on the efforts of @tvorogme (debug improvements), @AlexeyFSL (WASM builds) and third-party security auditors.

View file

@ -1,3 +1,12 @@
<div align="center">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://ton.org/download/ton_logo_dark_background.svg">
<img alt="TON logo" src="https://ton.org/download/ton_logo_light_background.svg">
</picture>
<hr/>
</div>
[![TON Overflow Group][ton-overflow-badge]][ton-overflow-url]
[![Stack Overflow Group][stack-overflow-badge]][stack-overflow-url]
[![Telegram Foundation Group][telegram-foundation-badge]][telegram-foundation-url]
[![Telegram Community Group][telegram-community-badge]][telegram-community-url]
@ -11,11 +20,11 @@
[twitter-url]: https://twitter.com/ton_blockchain
[stack-overflow-badge]: https://img.shields.io/badge/-Stack%20Overflow-FE7A16?style=flat&logo=stack-overflow&logoColor=white
[stack-overflow-url]: https://stackoverflow.com/questions/tagged/ton
[ton-overflow-badge]: https://img.shields.io/badge/-TON%20Overflow-FE7A16?style=flat&logo=stack-overflow&logoColor=white
[ton-overflow-url]: https://answers.ton.org
# TON
Main TON monorepo, which includes the code of the node/validator, lite-client, tonlib, FunC compiler, etc.
## Updates flow:

View file

@ -185,7 +185,8 @@ td::Ref<AdnlAddressImpl> AdnlAddressImpl::create(const tl_object_ptr<ton_api::ad
*const_cast<ton_api::adnl_Address *>(addr.get()),
td::overloaded([&](const ton_api::adnl_address_udp &obj) { res = td::make_ref<AdnlAddressUdp>(obj); },
[&](const ton_api::adnl_address_udp6 &obj) { res = td::make_ref<AdnlAddressUdp6>(obj); },
[&](const ton_api::adnl_address_tunnel &obj) { res = td::make_ref<AdnlAddressTunnel>(obj); }));
[&](const ton_api::adnl_address_tunnel &obj) { res = td::make_ref<AdnlAddressTunnel>(obj); },
[&](const ton_api::adnl_address_reverse &obj) { res = td::make_ref<AdnlAddressReverse>(); }));
return res;
}
@ -202,7 +203,12 @@ AdnlAddressList::AdnlAddressList(const tl_object_ptr<ton_api::adnl_addressList>
version_ = static_cast<td::uint32>(addrs->version_);
std::vector<td::Ref<AdnlAddressImpl>> vec;
for (auto &addr : addrs->addrs_) {
vec.push_back(AdnlAddressImpl::create(addr));
auto obj = AdnlAddressImpl::create(addr);
if (obj->is_reverse()) {
has_reverse_ = true;
} else {
vec.push_back(std::move(obj));
}
}
addrs_ = std::move(vec);
reinit_date_ = addrs->reinit_date_;
@ -215,6 +221,9 @@ tl_object_ptr<ton_api::adnl_addressList> AdnlAddressList::tl() const {
for (auto &v : addrs_) {
addrs.emplace_back(v->tl());
}
if (has_reverse_) {
addrs.push_back(create_tl_object<ton_api::adnl_address_reverse>());
}
return create_tl_object<ton_api::adnl_addressList>(std::move(addrs), version_, reinit_date_, priority_, expire_at_);
}

View file

@ -39,6 +39,9 @@ class AdnlAddressImpl : public td::CntObject {
virtual td::actor::ActorOwn<AdnlNetworkConnection> create_connection(
td::actor::ActorId<AdnlNetworkManager> network_manager, td::actor::ActorId<Adnl> adnl,
std::unique_ptr<AdnlNetworkConnection::Callback> callback) const = 0;
virtual bool is_reverse() const {
return false;
}
static td::Ref<AdnlAddressImpl> create(const tl_object_ptr<ton_api::adnl_Address> &addr);
};
@ -54,6 +57,7 @@ class AdnlAddressList {
td::int32 priority_;
td::int32 expire_at_;
std::vector<AdnlAddress> addrs_;
bool has_reverse_{false};
public:
static constexpr td::uint32 max_serialized_size() {
@ -102,6 +106,13 @@ class AdnlAddressList {
static td::Result<AdnlAddressList> create(const tl_object_ptr<ton_api::adnl_addressList> &addr_list);
td::Status add_udp_address(td::IPAddress addr);
void set_reverse(bool x = true) {
has_reverse_ = x;
}
bool has_reverse() const {
return has_reverse_;
}
};
} // namespace adnl

View file

@ -116,6 +116,31 @@ class AdnlAddressTunnel : public AdnlAddressImpl {
std::unique_ptr<AdnlNetworkConnection::Callback> callback) const override;
};
class AdnlAddressReverse : public AdnlAddressImpl {
public:
AdnlAddressReverse *make_copy() const override {
return new AdnlAddressReverse();
}
bool is_public() const override {
return true;
}
td::uint32 serialized_size() const override {
return 4;
}
tl_object_ptr<ton_api::adnl_Address> tl() const override {
return create_tl_object<ton_api::adnl_address_reverse>();
}
td::actor::ActorOwn<AdnlNetworkConnection> create_connection(
td::actor::ActorId<AdnlNetworkManager> network_manager, td::actor::ActorId<Adnl> adnl,
std::unique_ptr<AdnlNetworkConnection::Callback> callback) const override {
LOG(ERROR) << "Cannot create connection for AdnlAddressReverse";
return {};
}
bool is_reverse() const override {
return true;
}
};
} // namespace adnl
} // namespace ton

View file

@ -121,7 +121,7 @@ void AdnlLocalId::update_address_list(AdnlAddressList addr_list) {
}
void AdnlLocalId::publish_address_list() {
if (dht_node_.empty() || addr_list_.empty() || addr_list_.size() == 0) {
if (dht_node_.empty() || addr_list_.empty() || (addr_list_.size() == 0 && !addr_list_.has_reverse())) {
VLOG(ADNL_NOTICE) << this << ": skipping public addr list, because localid (or dht node) not fully initialized";
return;
}
@ -175,6 +175,17 @@ void AdnlLocalId::publish_address_list() {
td::actor::send_closure(keyring_, &keyring::Keyring::sign_message, short_id_.pubkey_hash(), std::move(B),
std::move(P));
if (addr_list_.has_reverse()) {
td::actor::send_closure(
dht_node_, &dht::Dht::register_reverse_connection, id_, [print_id = print_id()](td::Result<td::Unit> R) {
if (R.is_error()) {
VLOG(ADNL_NOTICE) << print_id << ": failed to register reverse connection in DHT: " << R.move_as_error();
} else {
VLOG(ADNL_INFO) << print_id << ": registered reverse connection";
}
});
}
}
AdnlLocalId::AdnlLocalId(AdnlNodeIdFull id, AdnlAddressList addr_list, td::uint32 mode,

View file

@ -113,6 +113,7 @@ void AdnlPeerPairImpl::discover() {
}
void AdnlPeerPairImpl::receive_packet_checked(AdnlPacket packet) {
request_reverse_ping_after_ = td::Timestamp::in(15.0);
auto d = Adnl::adnl_start_time();
if (packet.dst_reinit_date() > d) {
VLOG(ADNL_WARNING) << this << ": dropping IN message: too new our reinit date " << packet.dst_reinit_date();
@ -669,10 +670,15 @@ td::Result<std::pair<td::actor::ActorId<AdnlNetworkConnection>, bool>> AdnlPeerP
}
if (conns_.size() == 0 && priority_conns_.size() == 0) {
return td::Status::Error(ErrorCode::notready, PSTRING()
<< "empty network information: version=" << addr_list_.version()
<< " reinit_date=" << addr_list_.reinit_date()
<< " real_reinit_date=" << reinit_date_);
if (has_reverse_addr_) {
request_reverse_ping();
return td::Status::Error(ErrorCode::notready, "waiting for reverse ping");
} else {
return td::Status::Error(ErrorCode::notready, PSTRING()
<< "empty network information: version=" << addr_list_.version()
<< " reinit_date=" << addr_list_.reinit_date()
<< " real_reinit_date=" << reinit_date_);
}
}
for (auto &conn : priority_conns_) {
@ -720,11 +726,18 @@ void AdnlPeerPairImpl::update_addr_list(AdnlAddressList addr_list) {
VLOG(ADNL_INFO) << this << ": updating addr list to version " << addr_list.version() << " size=" << addr_list.size();
const auto addrs = addr_list.addrs();
has_reverse_addr_ = addr_list.has_reverse();
if (has_reverse_addr_ && addrs.empty()) {
return;
}
std::vector<Conn> conns;
auto &old_conns = priority ? priority_conns_ : conns_;
size_t idx = 0;
for (const auto &addr : addrs) {
if (addr->is_reverse()) {
continue;
}
if ((mode_ & static_cast<td::uint32>(AdnlLocalIdMode::direct_only)) && !addr->is_public()) {
continue;
}
@ -746,7 +759,7 @@ void AdnlPeerPairImpl::get_conn_ip_str(td::Promise<td::string> promise) {
promise.set_value("undefined");
return;
}
for (auto &conn : priority_conns_) {
if (conn.ready()) {
td::actor::send_closure(conn.conn, &AdnlNetworkConnection::get_ip_str, std::move(promise));
@ -759,7 +772,7 @@ void AdnlPeerPairImpl::get_conn_ip_str(td::Promise<td::string> promise) {
return;
}
}
promise.set_value("undefined");
}
@ -827,7 +840,7 @@ void AdnlPeerImpl::receive_packet(AdnlNodeIdShort dst, td::uint32 dst_mode, td::
}
}
td::actor::send_closure(it->second.get(), &AdnlPeerPair::receive_packet_checked, std::move(packet));
td::actor::send_closure(it->second.get(), &AdnlPeerPair::receive_packet, std::move(packet));
}
void AdnlPeerImpl::send_messages(AdnlNodeIdShort src, td::uint32 src_mode, td::actor::ActorId<AdnlLocalId> src_actor,
@ -884,7 +897,7 @@ void AdnlPeerImpl::get_conn_ip_str(AdnlNodeIdShort l_id, td::Promise<td::string>
if (it == peer_pairs_.end()) {
promise.set_value("undefined");
return;
}
}
td::actor::send_closure(it->second, &AdnlPeerPair::get_conn_ip_str, std::move(promise));
}
@ -960,6 +973,36 @@ void AdnlPeerPairImpl::update_peer_id(AdnlNodeIdFull id) {
CHECK(!peer_id_.empty());
}
void AdnlPeerPairImpl::request_reverse_ping() {
if (request_reverse_ping_active_ || !request_reverse_ping_after_.is_in_past()) {
return;
}
VLOG(ADNL_INFO) << this << ": requesting reverse ping";
request_reverse_ping_after_ = td::Timestamp::in(15.0);
request_reverse_ping_active_ = true;
td::actor::send_closure(
local_actor_, &AdnlLocalId::get_self_node,
[SelfId = actor_id(this), peer = peer_id_short_, dht = dht_node_](td::Result<AdnlNode> R) {
if (R.is_error()) {
td::actor::send_closure(SelfId, &AdnlPeerPairImpl::request_reverse_ping_result, R.move_as_error());
return;
}
td::actor::send_closure(
dht, &dht::Dht::request_reverse_ping, R.move_as_ok(), peer, [SelfId](td::Result<td::Unit> R) {
td::actor::send_closure(SelfId, &AdnlPeerPairImpl::request_reverse_ping_result, std::move(R));
});
});
}
void AdnlPeerPairImpl::request_reverse_ping_result(td::Result<td::Unit> R) {
request_reverse_ping_active_ = false;
if (R.is_ok()) {
VLOG(ADNL_INFO) << this << ": reverse ping requested";
} else {
VLOG(ADNL_INFO) << this << ": failed to request reverse ping: " << R.move_as_error();
}
}
} // namespace adnl
} // namespace ton

View file

@ -154,6 +154,9 @@ class AdnlPeerPairImpl : public AdnlPeerPair {
}
}
void request_reverse_ping();
void request_reverse_ping_result(td::Result<td::Unit> R);
struct Conn {
class ConnCallback : public AdnlNetworkConnection::Callback {
public:
@ -251,6 +254,10 @@ class AdnlPeerPairImpl : public AdnlPeerPair {
td::Timestamp next_dht_query_at_ = td::Timestamp::never();
td::Timestamp next_db_update_at_ = td::Timestamp::never();
td::Timestamp retry_send_at_ = td::Timestamp::never();
bool has_reverse_addr_ = false;
td::Timestamp request_reverse_ping_after_ = td::Timestamp::now();
bool request_reverse_ping_active_ = false;
};
class AdnlPeerImpl : public AdnlPeer {

View file

@ -275,7 +275,12 @@ target_include_directories(ton_crypto PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_S
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/..>)
target_link_libraries(ton_crypto PUBLIC ${OPENSSL_CRYPTO_LIBRARY} tdutils tddb_utils)
if (NOT WIN32)
target_link_libraries(ton_crypto PUBLIC dl z)
find_library(DL dl)
if (DL)
target_link_libraries(ton_crypto PUBLIC dl z)
else()
target_link_libraries(ton_crypto PUBLIC z)
endif()
endif()
target_include_directories(ton_crypto SYSTEM PUBLIC $<BUILD_INTERFACE:${OPENSSL_INCLUDE_DIR}>)

View file

@ -113,7 +113,10 @@ var_uint$_ {n:#} len:(#< n) value:(uint (len * 8))
= VarUInteger n;
var_int$_ {n:#} len:(#< n) value:(int (len * 8))
= VarInteger n;
nanograms$_ amount:(VarUInteger 16) = Grams;
nanograms$_ amount:(VarUInteger 16) = Grams;
_ grams:Grams = Coins;
//
extra_currencies$_ dict:(HashmapE 32 (VarUInteger 32))
= ExtraCurrencyCollection;
@ -611,15 +614,29 @@ wfmt_ext#0 min_addr_len:(## 12) max_addr_len:(## 12) addr_len_step:(## 12)
workchain_type_id:(## 32) { workchain_type_id >= 1 }
= WorkchainFormat 0;
workchain#a6 enabled_since:uint32 actual_min_split:(## 8)
min_split:(## 8) max_split:(## 8) { actual_min_split <= min_split }
wc_split_merge_timings#0
split_merge_delay:uint32 split_merge_interval:uint32
min_split_merge_interval:uint32 max_split_merge_delay:uint32
= WcSplitMergeTimings;
//workchain#a5 enabled_since:uint32 min_split:(## 8) max_split:(## 8)
// { min_split <= max_split } { max_split <= 60 }
workchain#a6 enabled_since:uint32 actual_min_split:(## 8)
min_split:(## 8) max_split:(## 8) { actual_min_split <= min_split }
basic:(## 1) active:Bool accept_msgs:Bool flags:(## 13) { flags = 0 }
zerostate_root_hash:bits256 zerostate_file_hash:bits256
version:uint32 format:(WorkchainFormat basic)
= WorkchainDescr;
workchain_v2#a7 enabled_since:uint32 actual_min_split:(## 8)
min_split:(## 8) max_split:(## 8) { actual_min_split <= min_split }
basic:(## 1) active:Bool accept_msgs:Bool flags:(## 13) { flags = 0 }
zerostate_root_hash:bits256 zerostate_file_hash:bits256
version:uint32 format:(WorkchainFormat basic)
split_merge_timings:WcSplitMergeTimings
= WorkchainDescr;
_ workchains:(HashmapE 32 WorkchainDescr) = ConfigParam 12;
complaint_prices#1a deposit:Grams bit_price:Grams cell_price:Grams = ComplaintPricing;
@ -739,11 +756,32 @@ collator_info#0 full_node_id:(Maybe uint256) = CollatorInfo;
colator_config#a0 full_collated_data:Bool collator_nodes:(HashmapE 352 CollatorInfo) = CollatorConfig;
_ CollatorConfig = ConfigParam 41;
size_limits_config#01 max_msg_bits:uint32 max_msg_cells:uint32 max_library_cells:uint32 max_vm_data_depth:uint16
max_ext_msg_size:uint32 max_ext_msg_depth:uint16 = SizeLimitsConfig;
size_limits_config_v2#02 max_msg_bits:uint32 max_msg_cells:uint32 max_library_cells:uint32 max_vm_data_depth:uint16
max_ext_msg_size:uint32 max_ext_msg_depth:uint16 max_acc_state_cells:uint32 max_acc_state_bits:uint32 = SizeLimitsConfig;
_ SizeLimitsConfig = ConfigParam 43;
oracle_bridge_params#_ bridge_address:bits256 oracle_mutlisig_address:bits256 oracles:(HashmapE 256 uint256) external_chain_address:bits256 = OracleBridgeParams;
_ OracleBridgeParams = ConfigParam 71; // Ethereum bridge
_ OracleBridgeParams = ConfigParam 72; // Binance Smart Chain bridge
_ OracleBridgeParams = ConfigParam 73; // Polygon bridge
// Note that chains in which bridge, minter and jetton-wallet operate are fixated
jetton_bridge_prices#_ bridge_burn_fee:Coins bridge_mint_fee:Coins
wallet_min_tons_for_storage:Coins
wallet_gas_consumption:Coins
minter_min_tons_for_storage:Coins
discover_gas_consumption:Coins = JettonBridgePrices;
jetton_bridge_params_v0#00 bridge_address:bits256 oracles_address:bits256 oracles:(HashmapE 256 uint256) state_flags:uint8 burn_bridge_fee:Coins = JettonBridgeParams;
jetton_bridge_params_v1#01 bridge_address:bits256 oracles_address:bits256 oracles:(HashmapE 256 uint256) state_flags:uint8 prices:^JettonBridgePrices external_chain_address:bits256 = JettonBridgeParams;
_ JettonBridgeParams = ConfigParam 79; // ETH->TON token bridge
_ JettonBridgeParams = ConfigParam 80; // BNB->TON token bridge
_ JettonBridgeParams = ConfigParam 81; // Polygon->TON token bridge
//
// PROOFS
//
@ -870,4 +908,3 @@ chan_op_cmd#912838d1 msg:ChanSignedMsg = ChanOp;
chan_data$_ config:^ChanConfig state:^ChanState = ChanData;

View file

@ -1913,6 +1913,38 @@ std::vector<ton::ValidatorDescr> Config::compute_total_validator_set(int next) c
return res.move_as_ok()->export_validator_set();
}
td::Result<SizeLimitsConfig> Config::get_size_limits_config() const {
SizeLimitsConfig limits;
td::Ref<vm::Cell> param = get_config_param(43);
if (param.is_null()) {
return limits;
}
auto unpack_v1 = [&](auto& rec) {
limits.max_msg_bits = rec.max_msg_bits;
limits.max_msg_cells = rec.max_msg_cells;
limits.max_library_cells = rec.max_library_cells;
limits.max_vm_data_depth = static_cast<td::uint16>(rec.max_vm_data_depth);
limits.ext_msg_limits.max_size = rec.max_ext_msg_size;
limits.ext_msg_limits.max_depth = static_cast<td::uint16>(rec.max_ext_msg_depth);
};
auto unpack_v2 = [&](auto& rec) {
unpack_v1(rec);
limits.max_acc_state_bits = rec.max_acc_state_bits;
limits.max_acc_state_cells = rec.max_acc_state_cells;
};
gen::SizeLimitsConfig::Record_size_limits_config rec_v1;
gen::SizeLimitsConfig::Record_size_limits_config_v2 rec_v2;
if (tlb::unpack_cell(param, rec_v1)) {
unpack_v1(rec_v1);
} else if (tlb::unpack_cell(param, rec_v2)) {
unpack_v2(rec_v2);
} else {
return td::Status::Error("configuration parameter 43 is invalid");
}
return limits;
}
td::Result<std::pair<ton::UnixTime, ton::UnixTime>> Config::unpack_validator_set_start_stop(Ref<vm::Cell> vset_root) {
if (vset_root.is_null()) {
return td::Status::Error("validator set absent");
@ -1942,31 +1974,58 @@ bool WorkchainInfo::unpack(ton::WorkchainId wc, vm::CellSlice& cs) {
if (wc == ton::workchainInvalid) {
return false;
}
block::gen::WorkchainDescr::Record info;
if (!tlb::unpack(cs, info)) {
return false;
}
enabled_since = info.enabled_since;
actual_min_split = info.actual_min_split;
min_split = info.min_split;
max_split = info.max_split;
basic = info.basic;
active = info.active;
accept_msgs = info.accept_msgs;
flags = info.flags;
zerostate_root_hash = info.zerostate_root_hash;
zerostate_file_hash = info.zerostate_file_hash;
version = info.version;
if (basic) {
min_addr_len = max_addr_len = addr_len_step = 256;
} else {
block::gen::WorkchainFormat::Record_wfmt_ext ext;
if (!tlb::type_unpack(cs, block::gen::WorkchainFormat{basic}, ext)) {
auto unpack_v1 = [this](auto& info) {
enabled_since = info.enabled_since;
actual_min_split = info.actual_min_split;
min_split = info.min_split;
max_split = info.max_split;
basic = info.basic;
active = info.active;
accept_msgs = info.accept_msgs;
flags = info.flags;
zerostate_root_hash = info.zerostate_root_hash;
zerostate_file_hash = info.zerostate_file_hash;
version = info.version;
if (basic) {
min_addr_len = max_addr_len = addr_len_step = 256;
} else {
block::gen::WorkchainFormat::Record_wfmt_ext ext;
if (!tlb::csr_type_unpack(info.format, block::gen::WorkchainFormat{basic}, ext)) {
return false;
}
min_addr_len = ext.min_addr_len;
max_addr_len = ext.max_addr_len;
addr_len_step = ext.addr_len_step;
}
return true;
};
auto unpack_v2 = [&, this](auto& info) {
if (!unpack_v1(info)) {
return false;
}
min_addr_len = ext.min_addr_len;
max_addr_len = ext.max_addr_len;
addr_len_step = ext.addr_len_step;
block::gen::WcSplitMergeTimings::Record rec;
if (!tlb::csr_unpack(info.split_merge_timings, rec)) {
return false;
}
split_merge_delay = rec.split_merge_delay;
split_merge_interval = rec.split_merge_interval;
min_split_merge_interval = rec.min_split_merge_interval;
max_split_merge_delay = rec.max_split_merge_delay;
return true;
};
block::gen::WorkchainDescr::Record_workchain info_v1;
block::gen::WorkchainDescr::Record_workchain_v2 info_v2;
vm::CellSlice cs0 = cs;
if (tlb::unpack(cs, info_v1)) {
if (!unpack_v1(info_v1)) {
return false;
}
} else if (tlb::unpack(cs = cs0, info_v2)) {
if (!unpack_v2(info_v2)) {
return false;
}
} else {
return false;
}
workchain = wc;
LOG(DEBUG) << "unpacked info for workchain " << wc << ": basic=" << basic << ", active=" << active

View file

@ -376,6 +376,21 @@ struct MsgPrices {
td::RefInt256 get_next_part(td::RefInt256 total) const;
};
struct SizeLimitsConfig {
// Default values are used when not present in global config
struct ExtMsgLimits {
td::uint32 max_size = 65535;
td::uint16 max_depth = 512;
};
td::uint32 max_msg_bits = 1 << 21;
td::uint32 max_msg_cells = 1 << 13;
td::uint32 max_library_cells = 1000;
td::uint16 max_vm_data_depth = 512;
ExtMsgLimits ext_msg_limits;
td::uint32 max_acc_state_cells = 1 << 16;
td::uint32 max_acc_state_bits = (1 << 16) * 1023;
};
struct CatchainValidatorsConfig {
td::uint32 mc_cc_lifetime, shard_cc_lifetime, shard_val_lifetime, shard_val_num;
bool shuffle_mc_val;
@ -402,6 +417,13 @@ struct WorkchainInfo : public td::CntObject {
ton::RootHash zerostate_root_hash;
ton::FileHash zerostate_file_hash;
int min_addr_len, max_addr_len, addr_len_step;
// Default values are used when split_merge_timings is not set in config
unsigned split_merge_delay = 100; // prepare (delay) split/merge for 100 seconds
unsigned split_merge_interval = 100; // split/merge is enabled during 60 second interval
unsigned min_split_merge_interval = 30; // split/merge interval must be at least 30 seconds
unsigned max_split_merge_delay = 1000; // end of split/merge interval must be at most 1000 seconds in the future
bool is_valid() const {
return workchain != ton::workchainInvalid;
}
@ -605,6 +627,7 @@ class Config {
ton::CatchainSeqno cc_seqno) const;
std::vector<ton::ValidatorDescr> compute_total_validator_set(int next) const;
CollatorConfig get_collator_config(bool need_collator_nodes) const;
td::Result<SizeLimitsConfig> get_size_limits_config() const;
static std::vector<ton::ValidatorDescr> do_compute_validator_set(const block::CatchainValidatorsConfig& ccv_conf,
ton::ShardIdFull shard,
const block::ValidatorSet& vset, ton::UnixTime time,

View file

@ -24,6 +24,7 @@
#include "td/utils/uint128.h"
#include "ton/ton-shard.h"
#include "vm/vm.h"
#include "td/utils/Timer.h"
namespace {
class StringLoggerTail : public td::LogInterface {
@ -345,7 +346,7 @@ bool Account::unpack(Ref<vm::CellSlice> shard_account, Ref<vm::CellSlice> extra,
block::gen::AccountStorage::Record storage;
if (!(tlb::unpack_exact(acc_cs, acc) && (my_addr = acc.addr).not_null() && unpack_address(acc.addr.write()) &&
compute_my_addr() && unpack_storage_info(acc.storage_stat.write()) &&
tlb::csr_unpack(std::move(acc.storage), storage) &&
tlb::csr_unpack(this->storage = std::move(acc.storage), storage) &&
std::max(storage.last_trans_lt, 1ULL) > acc_info.last_trans_lt && balance.unpack(std::move(storage.balance)))) {
return false;
}
@ -459,7 +460,6 @@ bool Account::deactivate() {
return true;
}
bool Account::belongs_to_shard(ton::ShardIdFull shard) const {
return workchain == shard.workchain && ton::shard_is_ancestor(shard.shard, addr);
}
@ -593,7 +593,7 @@ bool Transaction::unpack_input_msg(bool ihr_delivered, const ActionPhaseConfig*
sstat.bits -= cs.size(); // bits in the root cells are free
sstat.cells--; // the root cell itself is not counted as a cell
LOG(DEBUG) << "storage paid for a message: " << sstat.cells << " cells, " << sstat.bits << " bits";
if (sstat.bits > max_msg_bits || sstat.cells > max_msg_cells) {
if (sstat.bits > cfg->size_limits.max_msg_bits || sstat.cells > cfg->size_limits.max_msg_cells) {
LOG(DEBUG) << "inbound external message too large, invalid";
return false;
}
@ -1043,12 +1043,15 @@ bool Transaction::prepare_compute_phase(const ComputePhaseConfig& cfg) {
vm_log.log_options = td::LogOptions(VERBOSITY_NAME(DEBUG), true, false);
}
vm::VmState vm{new_code, std::move(stack), gas, 1, new_data, vm_log, compute_vm_libraries(cfg)};
vm.set_max_data_depth(cfg.max_vm_data_depth);
vm.set_c7(prepare_vm_c7(cfg)); // tuple with SmartContractInfo
// vm.incr_stack_trace(1); // enable stack dump after each step
LOG(DEBUG) << "starting VM";
cp.vm_init_state_hash = vm.get_state_hash();
td::Timer timer;
cp.exit_code = ~vm.run();
double elapsed = timer.elapsed();
LOG(DEBUG) << "VM terminated with exit code " << cp.exit_code;
cp.out_of_gas = (cp.exit_code == ~(int)vm::Excno::out_of_gas);
cp.vm_final_state_hash = vm.get_final_state_hash(cp.exit_code);
@ -1064,7 +1067,8 @@ bool Transaction::prepare_compute_phase(const ComputePhaseConfig& cfg) {
}
LOG(INFO) << "steps: " << vm.get_steps_count() << " gas: used=" << gas.gas_consumed() << ", max=" << gas.gas_max
<< ", limit=" << gas.gas_limit << ", credit=" << gas.gas_credit;
LOG(INFO) << "out_of_gas=" << cp.out_of_gas << ", accepted=" << cp.accepted << ", success=" << cp.success;
LOG(INFO) << "out_of_gas=" << cp.out_of_gas << ", accepted=" << cp.accepted << ", success=" << cp.success
<< ", time=" << elapsed << "s";
if (logger != nullptr) {
cp.vm_log = logger->get_log();
}
@ -1121,6 +1125,25 @@ bool Transaction::prepare_action_phase(const ActionPhaseConfig& cfg) {
ap.total_action_fees = td::zero_refint();
ap.reserved_balance.set_zero();
td::Ref<vm::Cell> old_code = new_code, old_data = new_data, old_library = new_library;
auto enforce_state_size_limits = [&]() {
if (account.is_special) {
return true;
}
if (!check_state_size_limit(cfg)) {
// Rollback changes to state, fail action phase
LOG(INFO) << "Account state size exceeded limits";
new_storage_stat.clear();
new_code = old_code;
new_data = old_data;
new_library = old_library;
ap.result_code = 50;
ap.state_size_too_big = true;
return false;
}
return true;
};
int n = 0;
while (true) {
ap.action_list.push_back(list);
@ -1196,9 +1219,21 @@ bool Transaction::prepare_action_phase(const ActionPhaseConfig& cfg) {
ap.no_funds = true;
}
LOG(DEBUG) << "invalid action " << ap.result_arg << " in action list: error code " << ap.result_code;
// This is reuqired here because changes to libraries are applied even if actipn phase fails
enforce_state_size_limits();
return true;
}
}
end_lt = ap.end_lt;
if (ap.new_code.not_null()) {
new_code = ap.new_code;
}
new_data = compute_phase->new_data; // tentative persistent data update applied
if (!enforce_state_size_limits()) {
return true;
}
ap.result_arg = 0;
ap.result_code = 0;
CHECK(ap.remaining_balance.grams->sgn() >= 0);
@ -1212,12 +1247,7 @@ bool Transaction::prepare_action_phase(const ActionPhaseConfig& cfg) {
was_deleted = true;
}
ap.success = true;
end_lt = ap.end_lt;
out_msgs = std::move(ap.out_msgs);
if (ap.new_code.not_null()) {
new_code = ap.new_code;
}
new_data = compute_phase->new_data; // tentative persistent data update applied
total_fees +=
ap.total_action_fees; // NB: forwarding fees are not accounted here (they are not collected by the validators in this transaction)
balance = ap.remaining_balance;
@ -1272,6 +1302,11 @@ int Transaction::try_action_change_library(vm::CellSlice& cs, ActionPhase& ap, c
// library code not found
return 41;
}
vm::CellStorageStat sstat;
sstat.compute_used_storage(lib_ref);
if (sstat.cells > cfg.size_limits.max_library_cells) {
return 43;
}
vm::CellBuilder cb;
CHECK(cb.store_bool_bool(rec.mode >> 1) && cb.store_ref_bool(std::move(lib_ref)));
CHECK(dict.set_builder(hash, cb));
@ -1546,7 +1581,7 @@ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap,
sstat.add_used_storage(info.value->prefetch_ref());
}
LOG(DEBUG) << "storage paid for a message: " << sstat.cells << " cells, " << sstat.bits << " bits";
if (sstat.bits > max_msg_bits || sstat.cells > max_msg_cells) {
if (sstat.bits > cfg.size_limits.max_msg_bits || sstat.cells > cfg.size_limits.max_msg_cells) {
LOG(DEBUG) << "message too large, invalid";
return skip_invalid ? 0 : 40;
}
@ -1801,6 +1836,35 @@ int Transaction::try_action_reserve_currency(vm::CellSlice& cs, ActionPhase& ap,
return 0;
}
bool Transaction::check_state_size_limit(const ActionPhaseConfig& cfg) {
auto cell_equal = [](const td::Ref<vm::Cell>& a, const td::Ref<vm::Cell>& b) -> bool {
if (a.is_null()) {
return b.is_null();
}
if (b.is_null()) {
return false;
}
return a->get_hash() == b->get_hash();
};
if (cell_equal(account.code, new_code) && cell_equal(account.data, new_data) &&
cell_equal(account.library, new_library)) {
return true;
}
// new_storage_stat is used here beause these stats will be reused in compute_state()
new_storage_stat.limit_cells = cfg.size_limits.max_acc_state_cells;
new_storage_stat.limit_bits = cfg.size_limits.max_acc_state_bits;
new_storage_stat.add_used_storage(new_code);
new_storage_stat.add_used_storage(new_data);
new_storage_stat.add_used_storage(new_library);
if (acc_status == Account::acc_active) {
new_storage_stat.clear_limit();
} else {
new_storage_stat.clear();
}
return new_storage_stat.cells <= cfg.size_limits.max_acc_state_cells &&
new_storage_stat.bits <= cfg.size_limits.max_acc_state_bits;
}
bool Transaction::prepare_bounce_phase(const ActionPhaseConfig& cfg) {
if (in_msg.is_null() || !bounce_enabled) {
return false;
@ -1924,6 +1988,32 @@ bool Account::store_acc_status(vm::CellBuilder& cb, int acc_status) const {
return cb.store_long_bool(v, 2);
}
static td::optional<vm::CellStorageStat> try_update_storage_stat(const vm::CellStorageStat& old_stat,
td::Ref<vm::CellSlice> old_cs,
td::Ref<vm::Cell> new_cell) {
if (old_stat.cells == 0 || old_cs.is_null()) {
return {};
}
vm::CellSlice new_cs = vm::CellSlice(vm::NoVm(), new_cell);
if (old_cs->size_refs() != new_cs.size_refs()) {
return {};
}
for (unsigned i = 0; i < old_cs->size_refs(); ++i) {
if (old_cs->prefetch_ref(i)->get_hash() != new_cs.prefetch_ref(i)->get_hash()) {
return {};
}
}
if (old_stat.bits < old_cs->size()) {
return {};
}
vm::CellStorageStat new_stat;
new_stat.cells = old_stat.cells;
new_stat.bits = old_stat.bits - old_cs->size() + new_cs.size();
new_stat.public_cells = old_stat.public_cells;
return new_stat;
}
bool Transaction::compute_state() {
if (new_total_state.not_null()) {
return true;
@ -1985,6 +2075,7 @@ bool Transaction::compute_state() {
// code:(Maybe ^Cell) data:(Maybe ^Cell) library:(HashmapE 256 SimpleLib)
}
auto storage = cb.finalize();
new_storage = td::Ref<vm::CellSlice>(true, vm::NoVm(), storage);
if (si_pos) {
auto cs_ref = load_cell_slice_ref(storage);
CHECK(cs_ref.unique_write().skip_ext(si_pos));
@ -1993,7 +2084,16 @@ bool Transaction::compute_state() {
new_inner_state.clear();
}
vm::CellStorageStat& stats = new_storage_stat;
CHECK(stats.compute_used_storage(Ref<vm::Cell>(storage)));
auto new_stats = try_update_storage_stat(account.storage_stat, account.storage, storage);
if (new_stats) {
stats = new_stats.unwrap();
} else {
td::Timer timer;
CHECK(stats.add_used_storage(Ref<vm::Cell>(storage)));
if (timer.elapsed() > 0.1) {
LOG(INFO) << "Compute used storage took " << timer.elapsed() << "s";
}
}
CHECK(cb.store_long_bool(1, 1) // account$1
&& cb.append_cellslice_bool(account.my_addr) // addr:MsgAddressInt
&& block::store_UInt7(cb, stats.cells) // storage_used$_ cells:(VarUInteger 7)
@ -2265,9 +2365,15 @@ bool Transaction::would_fit(unsigned cls, const block::BlockLimitStatus& blimst)
return blimst.would_fit(cls, end_lt, gas_used(), &extra);
}
bool Transaction::update_limits(block::BlockLimitStatus& blimst) const {
return blimst.update_lt(end_lt) && blimst.update_gas(gas_used()) && blimst.add_proof(new_total_state) &&
blimst.add_cell(root) && blimst.add_transaction() && blimst.add_account(is_first);
bool Transaction::update_limits(block::BlockLimitStatus& blimst, bool with_size) const {
if (!(blimst.update_lt(end_lt) && blimst.update_gas(gas_used()))) {
return false;
}
if (with_size) {
return blimst.add_proof(new_total_state) && blimst.add_cell(root) && blimst.add_transaction() &&
blimst.add_account(is_first);
}
return true;
}
/*
@ -2296,6 +2402,7 @@ Ref<vm::Cell> Transaction::commit(Account& acc) {
acc.last_trans_hash_ = root->get_hash().bits();
acc.last_paid = last_paid;
acc.storage_stat = new_storage_stat;
acc.storage = new_storage;
acc.balance = std::move(balance);
acc.due_payment = std::move(due_payment);
acc.total_state = std::move(new_total_state);

View file

@ -107,6 +107,7 @@ struct ComputePhaseConfig {
Ref<vm::Cell> global_config;
td::BitArray<256> block_rand_seed;
bool with_vm_log{false};
td::uint16 max_vm_data_depth = 512;
ComputePhaseConfig(td::uint64 _gas_price = 0, td::uint64 _gas_limit = 0, td::uint64 _gas_credit = 0)
: gas_price(_gas_price), gas_limit(_gas_limit), special_gas_limit(_gas_limit), gas_credit(_gas_credit) {
compute_threshold();
@ -143,6 +144,7 @@ struct ActionPhaseConfig {
int bounce_msg_body{0}; // usually 0 or 256 bits
MsgPrices fwd_std;
MsgPrices fwd_mc; // from/to masterchain
SizeLimitsConfig size_limits;
const WorkchainSet* workchains{nullptr};
const MsgPrices& fetch_msg_prices(bool is_masterchain) const {
return is_masterchain ? fwd_mc : fwd_std;
@ -182,6 +184,7 @@ struct ActionPhase {
bool code_changed{false};
bool action_list_invalid{false};
bool acc_delete_req{false};
bool state_size_too_big{false};
enum { acst_unchanged = 0, acst_frozen = 2, acst_deleted = 3 };
int acc_status_change{acst_unchanged};
td::RefInt256 total_fwd_fees; // all fees debited from the account
@ -235,6 +238,7 @@ struct Account {
td::RefInt256 due_payment;
Ref<vm::Cell> orig_total_state; // ^Account
Ref<vm::Cell> total_state; // ^Account
Ref<vm::CellSlice> storage; // AccountStorage
Ref<vm::CellSlice> inner_state; // StateInit
ton::Bits256 state_hash; // hash of StateInit for frozen accounts
Ref<vm::Cell> code, data, library, orig_library;
@ -283,7 +287,6 @@ struct Account {
};
struct Transaction {
static constexpr unsigned max_msg_bits = (1 << 21), max_msg_cells = (1 << 13);
enum {
tr_none,
tr_ord,
@ -323,6 +326,7 @@ struct Transaction {
ton::UnixTime last_paid;
Ref<vm::Cell> root;
Ref<vm::Cell> new_total_state;
Ref<vm::CellSlice> new_storage;
Ref<vm::CellSlice> new_inner_state;
Ref<vm::Cell> new_code, new_data, new_library;
Ref<vm::Cell> in_msg, in_msg_state;
@ -348,6 +352,7 @@ struct Transaction {
std::vector<Ref<vm::Cell>> compute_vm_libraries(const ComputePhaseConfig& cfg);
bool prepare_compute_phase(const ComputePhaseConfig& cfg);
bool prepare_action_phase(const ActionPhaseConfig& cfg);
bool check_state_size_limit(const ActionPhaseConfig& cfg);
bool prepare_bounce_phase(const ActionPhaseConfig& cfg);
bool compute_state();
bool serialize();
@ -359,7 +364,7 @@ struct Transaction {
const vm::NewCellStorageStat& store_stat, const vm::CellUsageTree* usage_tree) const;
bool update_block_storage_profile(vm::NewCellStorageStat& store_stat, const vm::CellUsageTree* usage_tree) const;
bool would_fit(unsigned cls, const block::BlockLimitStatus& blk_lim_st) const;
bool update_limits(block::BlockLimitStatus& blk_lim_st) const;
bool update_limits(block::BlockLimitStatus& blk_lim_st, bool with_size = true) const;
Ref<vm::Cell> commit(Account& _account); // _account should point to the same account
LtCellRef extract_out_msg(unsigned i);

View file

@ -520,6 +520,14 @@ bool Op::compute_used_vars(const CodeBlob& code, bool edit) {
} while (changes <= edit);
return set_var_info(std::move(new_var_info));
}
case _TryCatch: {
code.compute_used_code_vars(block0, next_var_info, edit);
code.compute_used_code_vars(block1, next_var_info, edit);
VarDescrList merge_info = block0->var_info + block1->var_info + next_var_info;
merge_info -= left;
merge_info.clear_last();
return set_var_info(std::move(merge_info));
}
default:
std::cerr << "fatal: unknown operation <??" << cl << "> in compute_used_vars()\n";
throw src::ParseError{where, "unknown operation"};
@ -645,6 +653,10 @@ bool prune_unreachable(std::unique_ptr<Op>& ops) {
reach = true;
break;
}
case Op::_TryCatch: {
reach = prune_unreachable(op.block0) | prune_unreachable(op.block1);
break;
}
default:
std::cerr << "fatal: unknown operation <??" << op.cl << ">\n";
throw src::ParseError{op.where, "unknown operation in prune_unreachable()"};
@ -825,6 +837,12 @@ VarDescrList Op::fwd_analyze(VarDescrList values) {
values = block0->fwd_analyze(values);
break;
}
case _TryCatch: {
VarDescrList val1 = block0->fwd_analyze(values);
VarDescrList val2 = block1->fwd_analyze(std::move(values));
values = val1 | val2;
break;
}
default:
std::cerr << "fatal: unknown operation <??" << cl << ">\n";
throw src::ParseError{where, "unknown operation in fwd_analyze()"};
@ -866,6 +884,7 @@ bool Op::mark_noreturn() {
case _Return:
return set_noreturn(true);
case _If:
case _TryCatch:
return set_noreturn((block0->mark_noreturn() & (block1 && block1->mark_noreturn())) | next->mark_noreturn());
case _Again:
block0->mark_noreturn();

View file

@ -13,7 +13,7 @@ def getenv(name, default=None):
print("Environemnt variable", name, "is not set", file=sys.stderr)
exit(1)
VAR_CNT = 5
VAR_CNT = 10
TMP_DIR = tempfile.mkdtemp()
FUNC_EXECUTABLE = getenv("FUNC_EXECUTABLE", "func")
FIFT_EXECUTABLE = getenv("FIFT_EXECUTABLE", "fift")
@ -31,6 +31,15 @@ class State:
self.x = x
self.vs = [0] * VAR_CNT
def copy(self):
s = State(self.x)
s.vs = self.vs.copy()
return s
def copy_from(self, s):
self.x = s.x
self.vs = s.vs.copy()
class Code:
pass
@ -136,6 +145,37 @@ class CodeRepeat(Code):
print(" " * (indent + 1) + "%s += 1;" % var, file=f)
print(" " * indent + "} until (%s >= %d);" % (var, self.n), file=f)
class CodeThrow(Code):
def __init__(self):
pass
def execute(self, state):
return "EXCEPTION"
def write(self, f, indent=0):
print(" " * indent + "throw(42);", file=f)
class CodeTryCatch(Code):
def __init__(self, c1, c2):
self.c1 = c1
self.c2 = c2
def execute(self, state):
state0 = state.copy()
res = self.c1.execute(state)
if res == "EXCEPTION":
state.copy_from(state0)
return self.c2.execute(state)
else:
return res
def write(self, f, indent=0):
print(" " * indent + "try {", file=f)
self.c1.write(f, indent + 1)
print(" " * indent + "} catch (_, _) {", file=f)
self.c2.write(f, indent + 1)
print(" " * indent + "}", file=f)
def write_function(f, name, body, inline=False, inline_ref=False, method_id=None):
print("_ %s(int x)" % name, file=f, end="")
if inline:
@ -147,31 +187,37 @@ def write_function(f, name, body, inline=False, inline_ref=False, method_id=None
print(" {", file=f)
for i in range(VAR_CNT):
print(" int v%d = 0;" % i, file=f)
body.write(f, 1);
body.write(f, 1)
print("}", file=f)
def gen_code(xl, xr, with_return, loop_depth=0):
def gen_code(xl, xr, with_return, loop_depth=0, try_catch_depth=0, can_throw=False):
if try_catch_depth < 3 and random.randint(0, 5) == 0:
c1 = gen_code(xl, xr, with_return, loop_depth, try_catch_depth + 1, random.randint(0, 1) == 0)
c2 = gen_code(xl, xr, with_return, loop_depth, try_catch_depth + 1, can_throw)
return CodeTryCatch(c1, c2)
code = []
for _ in range(random.randint(0, 2)):
if random.randint(0, 3) == 0 and loop_depth < 3:
c = gen_code(xl, xr, False, loop_depth + 1)
c = gen_code(xl, xr, False, loop_depth + 1, try_catch_depth, can_throw)
code.append(CodeRepeat(random.randint(0, 3), c, random.randint(0, 2)))
elif xr - xl > 1:
xmid = random.randrange(xl + 1, xr)
ret = random.choice((0, 0, 0, 0, 0, 1, 2))
c1 = gen_code(xl, xmid, ret == 1, loop_depth)
c1 = gen_code(xl, xmid, ret == 1, loop_depth, try_catch_depth, can_throw)
if random.randrange(5) == 0:
c2 = CodeEmpty()
else:
c2 = gen_code(xmid, xr, ret == 2, loop_depth)
c2 = gen_code(xmid, xr, ret == 2, loop_depth, try_catch_depth, can_throw)
code.append(CodeIfRange(xl, xmid, c1, c2))
if xr - xl == 1 and can_throw and random.randint(0, 5) == 0:
code.append(CodeThrow())
if with_return:
if xr - xl == 1:
code.append(CodeReturn(random.randrange(10**9)))
else:
xmid = random.randrange(xl + 1, xr)
c1 = gen_code(xl, xmid, True, loop_depth)
c2 = gen_code(xmid, xr, True, loop_depth)
c1 = gen_code(xl, xmid, True, loop_depth, try_catch_depth, can_throw)
c2 = gen_code(xmid, xr, True, loop_depth, try_catch_depth, can_throw)
code.append(CodeIfRange(xl, xmid, c1, c2))
for _ in range(random.randint(0, 3)):
pos = random.randint(0, len(code))
@ -203,6 +249,7 @@ def runvm(compiled_fif, xl, xr):
output.append(list(map(int, s.split())))
return output
cnt_ok = 0
cnt_fail = 0
for test_id in range(0, 1000000):

View file

@ -229,16 +229,25 @@ int emulate_xor(int a, int b) {
}
int emulate_not(int a) {
if ((a & VarDescr::ConstZero) == VarDescr::ConstZero) {
return VarDescr::ConstTrue;
}
if ((a & VarDescr::ConstTrue) == VarDescr::ConstTrue) {
return VarDescr::ConstZero;
}
int a2 = a;
int f = VarDescr::_Even | VarDescr::_Odd;
if ((a & f) && (~a & f)) {
a ^= f;
if ((a2 & f) && (~a2 & f)) {
a2 ^= f;
}
f = VarDescr::_Pos | VarDescr::_Neg;
if ((a & f) && (~a & f)) {
a ^= f;
a2 &= ~(VarDescr::_Zero | VarDescr::_NonZero | VarDescr::_Bit | VarDescr::_Pos | VarDescr::_Neg);
if ((a & VarDescr::_Neg) && (a & VarDescr::_NonZero)) {
a2 |= VarDescr::_Pos;
}
a &= ~(VarDescr::_Zero | VarDescr::_NonZero | VarDescr::_Bit);
return a;
if (a & VarDescr::_Pos) {
a2 |= VarDescr::_Neg;
}
return a2;
}
int emulate_lshift(int a, int b) {
@ -977,6 +986,38 @@ AsmOp compile_cond_throw(std::vector<VarDescr>& res, std::vector<VarDescr>& args
}
}
AsmOp compile_throw_arg(std::vector<VarDescr>& res, std::vector<VarDescr>& args) {
assert(res.empty() && args.size() == 2);
VarDescr &x = args[1];
if (x.is_int_const() && x.int_const->unsigned_fits_bits(11)) {
x.unused();
return exec_arg_op("THROWARG", x.int_const, 1, 0);
} else {
return exec_op("THROWARGANY", 2, 0);
}
}
AsmOp compile_cond_throw_arg(std::vector<VarDescr>& res, std::vector<VarDescr>& args, bool mode) {
assert(res.empty() && args.size() == 3);
VarDescr &x = args[1], &y = args[2];
std::string suff = (mode ? "IF" : "IFNOT");
bool skip_cond = false;
if (y.always_true() || y.always_false()) {
y.unused();
skip_cond = true;
if (y.always_true() != mode) {
x.unused();
return AsmOp::Nop();
}
}
if (x.is_int_const() && x.int_const->unsigned_fits_bits(11)) {
x.unused();
return skip_cond ? exec_arg_op("THROWARG", x.int_const, 1, 0) : exec_arg_op("THROWARG"s + suff, x.int_const, 2, 0);
} else {
return skip_cond ? exec_op("THROWARGANY", 2, 0) : exec_op("THROWARGANY"s + suff, 3, 0);
}
}
AsmOp compile_bool_const(std::vector<VarDescr>& res, std::vector<VarDescr>& args, bool val) {
assert(res.size() == 1 && args.empty());
VarDescr& r = res[0];
@ -1102,6 +1143,8 @@ void define_builtins() {
auto fetch_slice_op = TypeExpr::new_map(SliceInt, TypeExpr::new_tensor({Slice, Slice}));
auto prefetch_slice_op = TypeExpr::new_map(SliceInt, Slice);
//auto arith_null_op = TypeExpr::new_map(TypeExpr::new_unit(), Int);
auto throw_arg_op = TypeExpr::new_forall({X}, TypeExpr::new_map(TypeExpr::new_tensor({X, Int}), Unit));
auto cond_throw_arg_op = TypeExpr::new_forall({X}, TypeExpr::new_map(TypeExpr::new_tensor({X, Int, Int}), Unit));
define_builtin_func("_+_", arith_bin_op, compile_add);
define_builtin_func("_-_", arith_bin_op, compile_sub);
define_builtin_func("-_", arith_un_op, compile_negate);
@ -1161,6 +1204,9 @@ void define_builtins() {
define_builtin_func("throw", impure_un_op, compile_throw, true);
define_builtin_func("throw_if", impure_bin_op, std::bind(compile_cond_throw, _1, _2, true), true);
define_builtin_func("throw_unless", impure_bin_op, std::bind(compile_cond_throw, _1, _2, false), true);
define_builtin_func("throw_arg", throw_arg_op, compile_throw_arg, true);
define_builtin_func("throw_arg_if", cond_throw_arg_op, std::bind(compile_cond_throw_arg, _1, _2, true), true);
define_builtin_func("throw_arg_unless", cond_throw_arg_op, std::bind(compile_cond_throw_arg, _1, _2, false), true);
define_builtin_func("load_int", fetch_int_op, std::bind(compile_fetch_int, _1, _2, true, true), {}, {1, 0});
define_builtin_func("load_uint", fetch_int_op, std::bind(compile_fetch_int, _1, _2, true, false), {}, {1, 0});
define_builtin_func("preload_int", prefetch_int_op, std::bind(compile_fetch_int, _1, _2, false, true));

View file

@ -782,6 +782,77 @@ bool Op::generate_code_step(Stack& stack) {
return false;
}
}
case _TryCatch: {
if (block0->is_empty() && block1->is_empty()) {
return true;
}
if (block0->noreturn() || block1->noreturn()) {
stack.o.retalt_ = true;
}
Stack catch_stack{stack.o};
std::vector<var_idx_t> catch_vars;
std::vector<bool> catch_last;
for (const VarDescr& var : block1->var_info.list) {
if (stack.find(var.idx) >= 0) {
catch_vars.push_back(var.idx);
catch_last.push_back(!block0->var_info[var.idx]);
}
}
const size_t block_size = 255;
for (size_t begin = catch_vars.size(), end = begin; end > 0; end = begin) {
begin = end >= block_size ? end - block_size : 0;
for (size_t i = begin; i < end; ++i) {
catch_stack.push_new_var(catch_vars[i]);
}
}
catch_stack.push_new_var(left[0]);
catch_stack.push_new_var(left[1]);
stack.rearrange_top(catch_vars, catch_last);
stack.opt_show();
stack.o << "c4 PUSH";
stack.o << "c5 PUSH";
stack.o << "c7 PUSH";
stack.o << "<{";
stack.o.indent();
if (block1->noreturn()) {
catch_stack.mode |= Stack::_NeedRetAlt;
}
block1->generate_code_all(catch_stack);
catch_stack.drop_vars_except(next->var_info);
catch_stack.opt_show();
stack.o.undent();
stack.o << "}>CONT";
stack.o << "c7 SETCONT";
stack.o << "c5 SETCONT";
stack.o << "c4 SETCONT";
for (size_t begin = catch_vars.size(), end = begin; end > 0; end = begin) {
begin = end >= block_size ? end - block_size : 0;
stack.o << std::to_string(end - begin) + " PUSHINT";
stack.o << "-1 PUSHINT";
stack.o << "SETCONTVARARGS";
}
stack.s.erase(stack.s.end() - catch_vars.size(), stack.s.end());
stack.modified();
stack.o << "<{";
stack.o.indent();
if (block0->noreturn()) {
stack.mode |= Stack::_NeedRetAlt;
}
block0->generate_code_all(stack);
if (block0->noreturn()) {
stack.s = std::move(catch_stack.s);
} else if (!block1->noreturn()) {
stack.merge_state(catch_stack);
}
stack.opt_show();
stack.o.undent();
stack.o << "}>CONT";
stack.o << "c1 PUSH";
stack.o << "COMPOSALT";
stack.o << "SWAP";
stack.o << "TRY";
return true;
}
default:
std::cerr << "fatal: unknown operation <??" << cl << ">\n";
throw src::ParseError{where, "unknown operation in generate_code()"};

View file

@ -53,6 +53,8 @@ enum Keyword {
_Do,
_While,
_Until,
_Try,
_Catch,
_If,
_Ifnot,
_Then,
@ -537,6 +539,7 @@ struct Op {
_Until,
_Repeat,
_Again,
_TryCatch,
_SliceConst
};
int cl;
@ -1559,6 +1562,9 @@ struct Stack {
int find_outside(var_idx_t var, int from, int to) const;
void forget_const();
void validate(int i) const {
if (i > 255) {
throw src::Fatal{"Too deep stack"};
}
assert(i >= 0 && i < depth() && "invalid stack reference");
}
void modified() {
@ -1593,6 +1599,7 @@ struct Stack {
void apply_wrappers() {
if (o.retalt_) {
o.insert(0, "SAMEALTSAVE");
o.insert(0, "c2 SAVE");
if (mode & _InlineFunc) {
o.indent_all();
o.insert(0, "CONT:<{");

View file

@ -97,6 +97,8 @@ void define_keywords() {
.add_keyword("do", Kw::_Do)
.add_keyword("while", Kw::_While)
.add_keyword("until", Kw::_Until)
.add_keyword("try", Kw::_Try)
.add_keyword("catch", Kw::_Catch)
.add_keyword("if", Kw::_If)
.add_keyword("ifnot", Kw::_Ifnot)
.add_keyword("then", Kw::_Then)

View file

@ -1102,6 +1102,36 @@ blk_fl::val parse_do_stmt(Lexer& lex, CodeBlob& code) {
return res & ~blk_fl::empty;
}
blk_fl::val parse_try_catch_stmt(Lexer& lex, CodeBlob& code) {
lex.expect(_Try);
Op& try_catch_op = code.emplace_back(lex.cur().loc, Op::_TryCatch);
code.push_set_cur(try_catch_op.block0);
blk_fl::val res0 = parse_block_stmt(lex, code);
code.close_pop_cur(lex.cur().loc);
lex.expect(_Catch);
code.push_set_cur(try_catch_op.block1);
sym::open_scope(lex);
Expr* expr = parse_expr(lex, code, true);
expr->chk_lvalue(lex.cur());
TypeExpr* tvm_error_type = TypeExpr::new_tensor(TypeExpr::new_var(), TypeExpr::new_atomic(_Int));
try {
unify(expr->e_type, tvm_error_type);
} catch (UnifyError& ue) {
std::ostringstream os;
os << "`catch` arguments have incorrect type " << expr->e_type << ": " << ue;
lex.cur().error(os.str());
}
expr->predefine_vars();
expr->define_new_vars(code);
try_catch_op.left = expr->pre_compile(code);
assert(try_catch_op.left.size() == 2);
blk_fl::val res1 = parse_block_stmt(lex, code);
sym::close_scope(lex);
code.close_pop_cur(lex.cur().loc);
blk_fl::combine_parallel(res0, res1);
return res0;
}
blk_fl::val parse_if_stmt(Lexer& lex, CodeBlob& code, int first_lex = _If) {
SrcLocation loc{lex.cur().loc};
lex.expect(first_lex);
@ -1165,6 +1195,8 @@ blk_fl::val parse_stmt(Lexer& lex, CodeBlob& code) {
return parse_do_stmt(lex, code);
case _While:
return parse_while_stmt(lex, code);
case _Try:
return parse_try_catch_stmt(lex, code);
default: {
auto expr = parse_expr(lex, code);
expr->chk_rvalue(lex.cur());

113
crypto/func/test/tc1.fc Normal file
View file

@ -0,0 +1,113 @@
() test1() impure {
int i = 3;
repeat (3) {
try {
int j = i;
i *= 2;
throw_unless(500, j <= 10);
} catch (x, e) {
i -= 2;
}
i += i + 1;
}
throw_unless(501, i == 43);
}
int divide_by_ten(int num) {
try {
throw_unless(500, num < 10);
} catch (x, e) {
return divide_by_ten(num - 10) + 1;
}
return 0;
}
() test2() impure {
int n = divide_by_ten(37);
throw_unless(502, n == 3);
}
(int, int) swap_int(int a, int b) {
try {
a = a * b;
b = a / b;
a = a / b;
return (a, b);
} catch (x, e) {
throw_unless(500, b == 0);
}
return (0, a);
}
() test3() impure {
int a = 0;
int b = 57;
try {
(a, b) = swap_int(a, b);
} catch (x, e) {
throw_unless(500, a == 0);
a = b;
b = 0;
}
throw_unless(503, (a == 57) & (b == 0));
}
int get_x(int x, int y) {
try {
} catch (x, e) {
return -1;
}
return x;
}
int get_y(int x, int y) {
try {
return -1;
} catch (x, e) {
}
return y;
}
() test4() impure {
throw_unless(504, get_x(3, 4) == 3);
throw_unless(504, get_y(3, 4) == -1);
}
(int, int, int, int, int) foo(int a, int b, int c, int d, int e) {
try {
throw(11);
} catch (x, y) {
a += 1;
b += 2;
c += 3;
d += 4;
e += 5;
}
return (a, b, c, d, e);
}
() test5() impure {
var (a, b, c, d, e) = foo(10, 20, 30, 40, 50);
throw_unless(505, (a == 11) & (b == 22) & (c == 33) & (d == 44) & (e == 55));
}
() test6() impure {
int a = 0;
int b = 0;
int c = 0;
try {
b = 3;
} catch (x, y) {
b = 12;
}
throw_unless(506, (a == 0) & (b == 3) & (c == 0));
}
() main() {
test1();
test2();
test3();
test4();
test5();
test6();
}

84
crypto/func/test/tc2.fc Normal file
View file

@ -0,0 +1,84 @@
forall X -> int cast_to_int(X x) asm "NOP";
forall X -> builder cast_to_builder(X x) asm "NOP";
_ test1_body() {
int a = 3;
builder b = begin_cell();
int c = 1;
try {
c = 3;
throw_arg(b, 100);
} catch (x, y) {
return (a + c + y, cast_to_builder(x));
}
return (0, null());
}
() test1() impure {
var (x, y) = test1_body();
throw_unless(101, x == 104);
throw_unless(102, y.builder_refs() == y.builder_bits());
}
_ test2_body(int a, int b, int c) {
try {
try {
try {
try {
throw_arg_if(1, 201, a + b + c == 3);
throw_arg_if(2, 201, a == 3);
throw_arg_unless(1, 202, b == 4);
return 1;
} catch (y, x) {
int y = y.cast_to_int();
throw_arg_unless(y, x, x == 202);
throw_arg(y + 1, 200);
}
} catch (y, x) {
int y = y.cast_to_int();
throw_arg_if(y, x, x == 200);
throw_arg_if(y + 2, x, y < 2);
throw_arg_if(y + 3, 203, a + b + c == 4);
throw_arg_unless(y + 4, 204, b == 4);
return 3;
}
} catch (y, x) {
int y = y.cast_to_int();
try {
throw_arg_if(y, x, x == 200);
throw_arg_if(y + 1, 200, x == 201);
throw_arg_if(x - 203, 200, x == 202);
throw_arg_if(y, 200, x == 203);
throw_arg_if(a + 4, 205, a + b + c == 5);
throw_arg(7, 200);
} catch (v, u) {
int v = v.cast_to_int();
throw_arg_unless(v, u, u == 205);
if (c == 0) {
return b + 4;
}
throw_arg(v + 1, 200);
}
}
} catch (y, x) {
throw_unless(x, x == 200);
return y.cast_to_int();
}
return null();
}
() test2() impure {
throw_unless(201, test2_body(0, 4, 0) == 1);
throw_unless(202, test2_body(0, 5, 0) == 2);
throw_unless(203, test2_body(3, 4, 0) == 3);
throw_unless(204, test2_body(3, 0, 0) == 4);
throw_unless(205, test2_body(3, 1, 0) == 5);
throw_unless(206, test2_body(3, 2, 0) == 6);
throw_unless(207, test2_body(3, 1, 2) == 7);
throw_unless(208, test2_body(3, 1, 1) == 8);
}
() main() {
test1();
test2();
}

View file

@ -1,132 +1,525 @@
;; Standard library for funC
;;
{-
# Tuple manipulation primitives
The names and the types are mostly self-explaining.
See [polymorhism with forall](https://ton.org/docs/#/func/functions?id=polymorphism-with-forall)
for more info on the polymorphic functions.
Note that currently values of atomic type `tuple` can't be cast to composite tuple type (e.g. `[int, cell]`)
and vise versa.
-}
{-
# Lisp-style lists
Lists can be represented as nested 2-elements tuples.
Empty list is conventionally represented as TVM `null` value (it can be obtained by calling [null()]).
For example, tuple `(1, (2, (3, null)))` represents list `[1, 2, 3]`. Elements of a list can be of different types.
-}
;;; Adds an element to the beginning of lisp-style list.
forall X -> tuple cons(X head, tuple tail) asm "CONS";
;;; Extracts the head and the tail of lisp-style list.
forall X -> (X, tuple) uncons(tuple list) asm "UNCONS";
;;; Extracts the tail and the head of lisp-style list.
forall X -> (tuple, X) list_next(tuple list) asm( -> 1 0) "UNCONS";
;;; Returns the head of lisp-style list.
forall X -> X car(tuple list) asm "CAR";
;;; Returns the tail of lisp-style list.
tuple cdr(tuple list) asm "CDR";
;;; Creates tuple with zero elements.
tuple empty_tuple() asm "NIL";
;;; Appends a value `x` to a `Tuple t = (x1, ..., xn)`, but only if the resulting `Tuple t' = (x1, ..., xn, x)`
;;; is of length at most 255. Otherwise throws a type check exception.
forall X -> tuple tpush(tuple t, X value) asm "TPUSH";
forall X -> (tuple, ()) ~tpush(tuple t, X value) asm "TPUSH";
;;; Creates a tuple of length one with given argument as element.
forall X -> [X] single(X x) asm "SINGLE";
;;; Unpacks a tuple of length one
forall X -> X unsingle([X] t) asm "UNSINGLE";
;;; Creates a tuple of length two with given arguments as elements.
forall X, Y -> [X, Y] pair(X x, Y y) asm "PAIR";
;;; Unpacks a tuple of length two
forall X, Y -> (X, Y) unpair([X, Y] t) asm "UNPAIR";
;;; Creates a tuple of length three with given arguments as elements.
forall X, Y, Z -> [X, Y, Z] triple(X x, Y y, Z z) asm "TRIPLE";
;;; Unpacks a tuple of length three
forall X, Y, Z -> (X, Y, Z) untriple([X, Y, Z] t) asm "UNTRIPLE";
;;; Creates a tuple of length four with given arguments as elements.
forall X, Y, Z, W -> [X, Y, Z, W] tuple4(X x, Y y, Z z, W w) asm "4 TUPLE";
;;; Unpacks a tuple of length four
forall X, Y, Z, W -> (X, Y, Z, W) untuple4([X, Y, Z, W] t) asm "4 UNTUPLE";
;;; Returns the first element of a tuple (with unknown element types).
forall X -> X first(tuple t) asm "FIRST";
;;; Returns the second element of a tuple (with unknown element types).
forall X -> X second(tuple t) asm "SECOND";
;;; Returns the third element of a tuple (with unknown element types).
forall X -> X third(tuple t) asm "THIRD";
;;; Returns the fourth element of a tuple (with unknown element types).
forall X -> X fourth(tuple t) asm "3 INDEX";
;;; Returns the first element of a pair tuple.
forall X, Y -> X pair_first([X, Y] p) asm "FIRST";
;;; Returns the second element of a pair tuple.
forall X, Y -> Y pair_second([X, Y] p) asm "SECOND";
;;; Returns the first element of a triple tuple.
forall X, Y, Z -> X triple_first([X, Y, Z] p) asm "FIRST";
;;; Returns the second element of a triple tuple.
forall X, Y, Z -> Y triple_second([X, Y, Z] p) asm "SECOND";
;;; Returns the third element of a triple tuple.
forall X, Y, Z -> Z triple_third([X, Y, Z] p) asm "THIRD";
;;; Push null element (casted to given type)
;;; By the TVM type `Null` FunC represents absence of a value of some atomic type.
;;; So `null` can actually have any atomic type.
forall X -> X null() asm "PUSHNULL";
;;; Moves a variable [x] to the top of the stack
forall X -> (X, ()) ~impure_touch(X x) impure asm "NOP";
;;; Returns the current Unix time as an Integer
int now() asm "NOW";
;;; Returns the internal address of the current smart contract as a Slice with a `MsgAddressInt`.
;;; If necessary, it can be parsed further using primitives such as [parse_std_addr].
slice my_address() asm "MYADDR";
;;; Returns the balance of the smart contract as a tuple consisting of an int
;;; (balance in nanotoncoins) and a `cell`
;;; (a dictionary with 32-bit keys representing the balance of "extra currencies")
;;; at the start of Computation Phase.
;;; Note that RAW primitives such as [send_raw_message] do not update this field.
[int, cell] get_balance() asm "BALANCE";
;;; Returns the logical time of the current transaction.
int cur_lt() asm "LTIME";
;;; Returns the starting logical time of the current block.
int block_lt() asm "BLOCKLT";
;;; Computes the representation hash of a `cell` [c] and returns it as a 256-bit unsigned integer `x`.
;;; Useful for signing and checking signatures of arbitrary entities represented by a tree of cells.
int cell_hash(cell c) asm "HASHCU";
;;; Computes the hash of a `slice s` and returns it as a 256-bit unsigned integer `x`.
;;; The result is the same as if an ordinary cell containing only data and references from `s` had been created
;;; and its hash computed by [cell_hash].
int slice_hash(slice s) asm "HASHSU";
;;; Computes sha256 of the data bits of `slice` [s]. If the bit length of `s` is not divisible by eight,
;;; throws a cell underflow exception. The hash value is returned as a 256-bit unsigned integer `x`.
int string_hash(slice s) asm "SHA256U";
{-
# Signature checks
-}
;;; Checks the Ed25519-`signature` of a `hash` (a 256-bit unsigned integer, usually computed as the hash of some data)
;;; using [public_key] (also represented by a 256-bit unsigned integer).
;;; The signature must contain at least 512 data bits; only the first 512 bits are used.
;;; The result is `1` if the signature is valid, `0` otherwise.
;;; Note that `CHKSIGNU` creates a 256-bit slice with the hash and calls `CHKSIGNS`.
;;; That is, if [hash] is computed as the hash of some data, these data are hashed twice,
;;; the second hashing occurring inside `CHKSIGNS`.
int check_signature(int hash, slice signature, int public_key) asm "CHKSIGNU";
;;; Checks whether [signature] is a valid Ed25519-signature of the data portion of `slice data` using `public_key`,
;;; similarly to [check_signature].
;;; If the bit length of [data] is not divisible by eight, throws a cell underflow exception.
;;; The verification of Ed25519 signatures is the standard one,
;;; with sha256 used to reduce [data] to the 256-bit number that is actually signed.
int check_data_signature(slice data, slice signature, int public_key) asm "CHKSIGNS";
{---
# Computation of boc size
The primitives below may be useful for computing storage fees of user-provided data.
-}
;;; Returns `(x, y, z, -1)` or `(null, null, null, 0)`.
;;; Recursively computes the count of distinct cells `x`, data bits `y`, and cell references `z`
;;; in the DAG rooted at `cell` [c], effectively returning the total storage used by this DAG taking into account
;;; the identification of equal cells.
;;; The values of `x`, `y`, and `z` are computed by a depth-first traversal of this DAG,
;;; with a hash table of visited cell hashes used to prevent visits of already-visited cells.
;;; The total count of visited cells `x` cannot exceed non-negative [max_cells];
;;; otherwise the computation is aborted before visiting the `(max_cells + 1)`-st cell and
;;; a zero flag is returned to indicate failure. If [c] is `null`, returns `x = y = z = 0`.
(int, int, int) compute_data_size(cell c, int max_cells) impure asm "CDATASIZE";
;;; Similar to [compute_data_size?], but accepting a `slice` [s] instead of a `cell`.
;;; The returned value of `x` does not take into account the cell that contains the `slice` [s] itself;
;;; however, the data bits and the cell references of [s] are accounted for in `y` and `z`.
(int, int, int) slice_compute_data_size(slice s, int max_cells) impure asm "SDATASIZE";
;;; A non-quiet version of [compute_data_size?] that throws a cell overflow exception (`8`) on failure.
(int, int, int, int) compute_data_size?(cell c, int max_cells) asm "CDATASIZEQ NULLSWAPIFNOT2 NULLSWAPIFNOT";
;;; A non-quiet version of [slice_compute_data_size?] that throws a cell overflow exception (8) on failure.
(int, int, int, int) slice_compute_data_size?(cell c, int max_cells) asm "SDATASIZEQ NULLSWAPIFNOT2 NULLSWAPIFNOT";
;;; Throws an exception with exit_code excno if cond is not 0 (commented since implemented in compilator)
;; () throw_if(int excno, int cond) impure asm "THROWARGIF";
{--
# Debug primitives
Only works for local TVM execution with debug level verbosity
-}
;;; Dumps the stack (at most the top 255 values) and shows the total stack depth.
() dump_stack() impure asm "DUMPSTK";
{-
# Persistent storage save and load
-}
;;; Returns the persistent contract storage cell. It can be parsed or modified with slice and builder primitives later.
cell get_data() asm "c4 PUSH";
;;; Sets `cell` [c] as persistent contract data. You can update persistent contract storage with this primitive.
() set_data(cell c) impure asm "c4 POP";
{-
# Continuation primitives
-}
;;; Usually `c3` has a continuation initialized by the whole code of the contract. It is used for function calls.
;;; The primitive returns the current value of `c3`.
cont get_c3() impure asm "c3 PUSH";
;;; Updates the current value of `c3`. Usually, it is used for updating smart contract code in run-time.
;;; Note that after execution of this primitive the current code
;;; (and the stack of recursive function calls) won't change,
;;; but any other function call will use a function from the new code.
() set_c3(cont c) impure asm "c3 POP";
;;; Transforms a `slice` [s] into a simple ordinary continuation `c`, with `c.code = s` and an empty stack and savelist.
cont bless(slice s) impure asm "BLESS";
() accept_message() impure asm "ACCEPT";
() set_gas_limit(int limit) impure asm "SETGASLIMIT";
() commit() impure asm "COMMIT";
() buy_gas(int gram) impure asm "BUYGAS";
{---
# Gas related primitives
-}
;;; Sets current gas limit `gl` to its maximal allowed value `gm`, and resets the gas credit `gc` to zero,
;;; decreasing the value of `gr` by `gc` in the process.
;;; In other words, the current smart contract agrees to buy some gas to finish the current transaction.
;;; This action is required to process external messages, which bring no value (hence no gas) with themselves.
;;;
;;; For more details check [accept_message effects](https://ton.org/docs/#/smart-contracts/accept).
() accept_message() impure asm "ACCEPT";
;;; Sets current gas limit `gl` to the minimum of limit and `gm`, and resets the gas credit `gc` to zero.
;;; If the gas consumed so far (including the present instruction) exceeds the resulting value of `gl`,
;;; an (unhandled) out of gas exception is thrown before setting new gas limits.
;;; Notice that [set_gas_limit] with an argument `limit ≥ 2^63 1` is equivalent to [accept_message].
() set_gas_limit(int limit) impure asm "SETGASLIMIT";
;;; Commits the current state of registers `c4` (“persistent data”) and `c5` (“actions”)
;;; so that the current execution is considered “successful” with the saved values even if an exception
;;; in Computation Phase is thrown later.
() commit() impure asm "COMMIT";
;;; Not implemented
;;() buy_gas(int gram) impure asm "BUYGAS";
;;; Computes the amount of gas that can be bought for `amount` nanoTONs,
;;; and sets `gl` accordingly in the same way as [set_gas_limit].
() buy_gas(int amount) impure asm "BUYGAS";
;;; Computes the minimum of two integers [x] and [y].
int min(int x, int y) asm "MIN";
;;; Computes the maximum of two integers [x] and [y].
int max(int x, int y) asm "MAX";
;;; Sorts two integers.
(int, int) minmax(int x, int y) asm "MINMAX";
;;; Computes the absolute value of an integer [x].
int abs(int x) asm "ABS";
{-
# Slice primitives
It is said that a primitive _loads_ some data,
if it returns the data and the remainder of the slice
(so it can also be used as [modifying method](https://ton.org/docs/#/func/statements?id=modifying-methods)).
It is said that a primitive _preloads_ some data, if it returns only the data
(it can be used as [non-modifying method](https://ton.org/docs/#/func/statements?id=non-modifying-methods)).
Unless otherwise stated, loading and preloading primitives read the data from a prefix of the slice.
-}
;;; Converts a `cell` [c] into a `slice`. Notice that [c] must be either an ordinary cell,
;;; or an exotic cell (see [TVM.pdf](https://ton-blockchain.github.io/docs/tvm.pdf), 3.1.2)
;;; which is automatically loaded to yield an ordinary cell `c'`, converted into a `slice` afterwards.
slice begin_parse(cell c) asm "CTOS";
;;; Checks if [s] is empty. If not, throws an exception.
() end_parse(slice s) impure asm "ENDS";
;;; Loads the first reference from the slice.
(slice, cell) load_ref(slice s) asm( -> 1 0) "LDREF";
;;; Preloads the first reference from the slice.
cell preload_ref(slice s) asm "PLDREF";
{- Functions below are commented because are implemented on compilator level for optimisation -}
;;; Loads a signed [len]-bit integer from a slice [s].
;; (slice, int) ~load_int(slice s, int len) asm(s len -> 1 0) "LDIX";
;;; Loads an unsigned [len]-bit integer from a slice [s].
;; (slice, int) ~load_uint(slice s, int len) asm( -> 1 0) "LDUX";
;;; Preloads a signed [len]-bit integer from a slice [s].
;; int preload_int(slice s, int len) asm "PLDIX";
;;; Preloads an unsigned [len]-bit integer from a slice [s].
;; int preload_uint(slice s, int len) asm "PLDUX";
;;; Loads the first `0 ≤ len ≤ 1023` bits from slice [s] into a separate `slice s''`.
;; (slice, slice) load_bits(slice s, int len) asm(s len -> 1 0) "LDSLICEX";
;;; Preloads the first `0 ≤ len ≤ 1023` bits from slice [s] into a separate `slice s''`.
;; slice preload_bits(slice s, int len) asm "PLDSLICEX";
;;; Loads serialized amount of TonCoins (any unsigned integer up to `2^128 - 1`).
(slice, int) load_grams(slice s) asm( -> 1 0) "LDGRAMS";
(slice, int) load_coins(slice s) asm( -> 1 0) "LDGRAMS";
;;; Returns all but the first `0 ≤ len ≤ 1023` bits of `slice` [s].
slice skip_bits(slice s, int len) asm "SDSKIPFIRST";
(slice, ()) ~skip_bits(slice s, int len) asm "SDSKIPFIRST";
;;; Returns the first `0 ≤ len ≤ 1023` bits of `slice` [s].
slice first_bits(slice s, int len) asm "SDCUTFIRST";
;;; Returns all but the last `0 ≤ len ≤ 1023` bits of `slice` [s].
slice skip_last_bits(slice s, int len) asm "SDSKIPLAST";
(slice, ()) ~skip_last_bits(slice s, int len) asm "SDSKIPLAST";
;;; Returns the last `0 ≤ len ≤ 1023` bits of `slice` [s].
slice slice_last(slice s, int len) asm "SDCUTLAST";
;;; Loads a dictionary `D` (HashMapE) from `slice` [s].
;;; (returns `null` if `nothing` constructor is used).
(slice, cell) load_dict(slice s) asm( -> 1 0) "LDDICT";
;;; Preloads a dictionary `D` from `slice` [s].
cell preload_dict(slice s) asm "PLDDICT";
;;; Loads a dictionary as [load_dict], but returns only the remainder of the slice.
slice skip_dict(slice s) asm "SKIPDICT";
;;; Loads (Maybe ^Cell) from `slice` [s].
;;; In other words loads 1 bit and if it is true
;;; loads first ref and return it with slice remainder
;;; otherwise returns `null` and slice remainder
(slice, cell) load_maybe_ref(slice s) asm( -> 1 0) "LDOPTREF";
cell preload_maybe_ref(slice s) asm "PLDOPTREF";
builder store_maybe_ref(builder b, cell c) asm(c b) "STOPTREF";
;;; Preloads (Maybe ^Cell) from `slice` [s].
cell preload_maybe_ref(slice s) asm "PLDOPTREF";
;;; Returns the depth of `cell` [c].
;;; If [c] has no references, then return `0`;
;;; otherwise the returned value is one plus the maximum of depths of cells referred to from [c].
;;; If [c] is a `null` instead of a cell, returns zero.
int cell_depth(cell c) asm "CDEPTH";
{-
# Slice size primitives
-}
;;; Returns the number of references in `slice` [s].
int slice_refs(slice s) asm "SREFS";
;;; Returns the number of data bits in `slice` [s].
int slice_bits(slice s) asm "SBITS";
;;; Returns both the number of data bits and the number of references in `slice` [s].
(int, int) slice_bits_refs(slice s) asm "SBITREFS";
;;; Checks whether a `slice` [s] is empty (i.e., contains no bits of data and no cell references).
int slice_empty?(slice s) asm "SEMPTY";
;;; Checks whether `slice` [s] has no bits of data.
int slice_data_empty?(slice s) asm "SDEMPTY";
;;; Checks whether `slice` [s] has no references.
int slice_refs_empty?(slice s) asm "SREMPTY";
;;; Returns the depth of `slice` [s].
;;; If [s] has no references, then returns `0`;
;;; otherwise the returned value is one plus the maximum of depths of cells referred to from [s].
int slice_depth(slice s) asm "SDEPTH";
{-
# Builder size primitives
-}
;;; Returns the number of cell references already stored in `builder` [b]
int builder_refs(builder b) asm "BREFS";
;;; Returns the number of data bits already stored in `builder` [b].
int builder_bits(builder b) asm "BBITS";
;;; Returns the depth of `builder` [b].
;;; If no cell references are stored in [b], then returns 0;
;;; otherwise the returned value is one plus the maximum of depths of cells referred to from [b].
int builder_depth(builder b) asm "BDEPTH";
{-
# Builder primitives
It is said that a primitive _stores_ a value `x` into a builder `b`
if it returns a modified version of the builder `b'` with the value `x` stored at the end of it.
It can be used as [non-modifying method](https://ton.org/docs/#/func/statements?id=non-modifying-methods).
All the primitives below first check whether there is enough space in the `builder`,
and only then check the range of the value being serialized.
-}
;;; Creates a new empty `builder`.
builder begin_cell() asm "NEWC";
;;; Converts a `builder` into an ordinary `cell`.
cell end_cell(builder b) asm "ENDC";
;;; Stores a reference to `cell` [c] into `builder` [b].
builder store_ref(builder b, cell c) asm(c b) "STREF";
;;; Stores an unsigned [len]-bit integer `x` into `b` for `0 ≤ len ≤ 256`.
;; builder store_uint(builder b, int x, int len) asm(x b len) "STUX";
;;; Stores a signed [len]-bit integer `x` into `b` for` 0 ≤ len ≤ 257`.
;; builder store_int(builder b, int x, int len) asm(x b len) "STIX";
;;; Stores `slice` [s] into `builder` [b]
builder store_slice(builder b, slice s) asm "STSLICER";
;;; Stores (serializes) an integer [x] in the range `0..2^128 1` into `builder` [b].
;;; The serialization of [x] consists of a 4-bit unsigned big-endian integer `l`,
;;; which is the smallest integer `l ≥ 0`, such that `x < 2^8l`,
;;; followed by an `8l`-bit unsigned big-endian representation of [x].
;;; If [x] does not belong to the supported range, a range check exception is thrown.
;;;
;;; Store amounts of TonCoins to the builder as VarUInteger 16
builder store_grams(builder b, int x) asm "STGRAMS";
builder store_coins(builder b, int x) asm "STGRAMS";
;;; Stores dictionary `D` represented by `cell` [c] or `null` into `builder` [b].
;;; In other words, stores a `1`-bit and a reference to [c] if [c] is not `null` and `0`-bit otherwise.
builder store_dict(builder b, cell c) asm(c b) "STDICT";
;;; Stores (Maybe ^Cell) to builder:
;;; if cell is null store 1 zero bit
;;; otherwise store 1 true bit and ref to cell
builder store_maybe_ref(builder b, cell c) asm(c b) "STOPTREF";
{-
# Address manipulation primitives
The address manipulation primitives listed below serialize and deserialize values according to the following TL-B scheme:
```TL-B
addr_none$00 = MsgAddressExt;
addr_extern$01 len:(## 8) external_address:(bits len)
= MsgAddressExt;
anycast_info$_ depth:(#<= 30) { depth >= 1 }
rewrite_pfx:(bits depth) = Anycast;
addr_std$10 anycast:(Maybe Anycast)
workchain_id:int8 address:bits256 = MsgAddressInt;
addr_var$11 anycast:(Maybe Anycast) addr_len:(## 9)
workchain_id:int32 address:(bits addr_len) = MsgAddressInt;
_ _:MsgAddressInt = MsgAddress;
_ _:MsgAddressExt = MsgAddress;
int_msg_info$0 ihr_disabled:Bool bounce:Bool bounced:Bool
src:MsgAddress dest:MsgAddressInt
value:CurrencyCollection ihr_fee:Grams fwd_fee:Grams
created_lt:uint64 created_at:uint32 = CommonMsgInfoRelaxed;
ext_out_msg_info$11 src:MsgAddress dest:MsgAddressExt
created_lt:uint64 created_at:uint32 = CommonMsgInfoRelaxed;
```
A deserialized `MsgAddress` is represented by a tuple `t` as follows:
- `addr_none` is represented by `t = (0)`,
i.e., a tuple containing exactly one integer equal to zero.
- `addr_extern` is represented by `t = (1, s)`,
where slice `s` contains the field `external_address`. In other words, `
t` is a pair (a tuple consisting of two entries), containing an integer equal to one and slice `s`.
- `addr_std` is represented by `t = (2, u, x, s)`,
where `u` is either a `null` (if `anycast` is absent) or a slice `s'` containing `rewrite_pfx` (if anycast is present).
Next, integer `x` is the `workchain_id`, and slice `s` contains the address.
- `addr_var` is represented by `t = (3, u, x, s)`,
where `u`, `x`, and `s` have the same meaning as for `addr_std`.
-}
;;; Loads from slice [s] the only prefix that is a valid `MsgAddress`,
;;; and returns both this prefix `s'` and the remainder `s''` of [s] as slices.
(slice, slice) load_msg_addr(slice s) asm( -> 1 0) "LDMSGADDR";
;;; Decomposes slice [s] containing a valid `MsgAddress` into a `tuple t` with separate fields of this `MsgAddress`.
;;; If [s] is not a valid `MsgAddress`, a cell deserialization exception is thrown.
tuple parse_addr(slice s) asm "PARSEMSGADDR";
;;; Parses slice [s] containing a valid `MsgAddressInt` (usually a `msg_addr_std`),
;;; applies rewriting from the anycast (if present) to the same-length prefix of the address,
;;; and returns both the workchain and the 256-bit address as integers.
;;; If the address is not 256-bit, or if [s] is not a valid serialization of `MsgAddressInt`,
;;; throws a cell deserialization exception.
(int, int) parse_std_addr(slice s) asm "REWRITESTDADDR";
;;; A variant of [parse_std_addr] that returns the (rewritten) address as a slice [s],
;;; even if it is not exactly 256 bit long (represented by a `msg_addr_var`).
(int, slice) parse_var_addr(slice s) asm "REWRITEVARADDR";
{-
# Dictionary primitives
-}
;;; Sets the value associated with [key_len]-bit key signed index in dictionary [dict] to [value] (cell),
;;; and returns the resulting dictionary.
cell idict_set_ref(cell dict, int key_len, int index, cell value) asm(value index dict key_len) "DICTISETREF";
(cell, ()) ~idict_set_ref(cell dict, int key_len, int index, cell value) asm(value index dict key_len) "DICTISETREF";
;;; Sets the value associated with [key_len]-bit key unsigned index in dictionary [dict] to [value] (cell),
;;; and returns the resulting dictionary.
cell udict_set_ref(cell dict, int key_len, int index, cell value) asm(value index dict key_len) "DICTUSETREF";
(cell, ()) ~udict_set_ref(cell dict, int key_len, int index, cell value) asm(value index dict key_len) "DICTUSETREF";
cell idict_get_ref(cell dict, int key_len, int index) asm(index dict key_len) "DICTIGETOPTREF";
(cell, int) idict_get_ref?(cell dict, int key_len, int index) asm(index dict key_len) "DICTIGETREF";
(cell, int) udict_get_ref?(cell dict, int key_len, int index) asm(index dict key_len) "DICTUGETREF";
(cell, int) idict_get_ref?(cell dict, int key_len, int index) asm(index dict key_len) "DICTIGETREF" "NULLSWAPIFNOT";
(cell, int) udict_get_ref?(cell dict, int key_len, int index) asm(index dict key_len) "DICTUGETREF" "NULLSWAPIFNOT";
(cell, cell) idict_set_get_ref(cell dict, int key_len, int index, cell value) asm(value index dict key_len) "DICTISETGETOPTREF";
(cell, cell) udict_set_get_ref(cell dict, int key_len, int index, cell value) asm(value index dict key_len) "DICTUSETGETOPTREF";
(cell, int) idict_delete?(cell dict, int key_len, int index) asm(index dict key_len) "DICTIDEL";
@ -185,24 +578,47 @@ cell dict_set_builder(cell dict, int key_len, slice index, builder value) asm(va
(int, slice, int) idict_get_nexteq?(cell dict, int key_len, int pivot) asm(pivot dict key_len -> 1 0 2) "DICTIGETNEXTEQ" "NULLSWAPIFNOT2";
(int, slice, int) idict_get_prev?(cell dict, int key_len, int pivot) asm(pivot dict key_len -> 1 0 2) "DICTIGETPREV" "NULLSWAPIFNOT2";
(int, slice, int) idict_get_preveq?(cell dict, int key_len, int pivot) asm(pivot dict key_len -> 1 0 2) "DICTIGETPREVEQ" "NULLSWAPIFNOT2";
;;; Creates an empty dictionary, which is actually a null value. Equivalent to PUSHNULL
cell new_dict() asm "NEWDICT";
;;; Checks whether a dictionary is empty. Equivalent to cell_null?.
int dict_empty?(cell c) asm "DICTEMPTY";
{- Prefix dictionary primitives -}
(slice, slice, slice, int) pfxdict_get?(cell dict, int key_len, slice key) asm(key dict key_len) "PFXDICTGETQ" "NULLSWAPIFNOT2";
(cell, int) pfxdict_set?(cell dict, int key_len, slice key, slice value) asm(value key dict key_len) "PFXDICTSET";
(cell, int) pfxdict_delete?(cell dict, int key_len, slice key) asm(key dict key_len) "PFXDICTDEL";
;;; Returns the value of the global configuration parameter with integer index `i` as a `cell` or `null` value.
cell config_param(int x) asm "CONFIGOPTPARAM";
;;; Checks whether c is a null. Note, that FunC also has polymorphic null? built-in.
int cell_null?(cell c) asm "ISNULL";
;;; Creates an output action which would reserve exactly amount nanotoncoins (if mode = 0), at most amount nanotoncoins (if mode = 2), or all but amount nanotoncoins (if mode = 1 or mode = 3), from the remaining balance of the account. It is roughly equivalent to creating an outbound message carrying amount nanotoncoins (or b amount nanotoncoins, where b is the remaining balance) to oneself, so that the subsequent output actions would not be able to spend more money than the remainder. Bit +2 in mode means that the external action does not fail if the specified amount cannot be reserved; instead, all remaining balance is reserved. Bit +8 in mode means `amount <- -amount` before performing any further actions. Bit +4 in mode means that amount is increased by the original balance of the current account (before the compute phase), including all extra currencies, before performing any other checks and actions. Currently, amount must be a non-negative integer, and mode must be in the range 0..15.
() raw_reserve(int amount, int mode) impure asm "RAWRESERVE";
;;; Similar to raw_reserve, but also accepts a dictionary extra_amount (represented by a cell or null) with extra currencies. In this way currencies other than TonCoin can be reserved.
() raw_reserve_extra(int amount, cell extra_amount, int mode) impure asm "RAWRESERVEX";
;;; Sends a raw message contained in msg, which should contain a correctly serialized object Message X, with the only exception that the source address is allowed to have dummy value addr_none (to be automatically replaced with the current smart contract address), and ihr_fee, fwd_fee, created_lt and created_at fields can have arbitrary values (to be rewritten with correct values during the action phase of the current transaction). Integer parameter mode contains the flags. Currently mode = 0 is used for ordinary messages; mode = 128 is used for messages that are to carry all the remaining balance of the current smart contract (instead of the value originally indicated in the message); mode = 64 is used for messages that carry all the remaining value of the inbound message in addition to the value initially indicated in the new message (if bit 0 is not set, the gas fees are deducted from this amount); mode' = mode + 1 means that the sender wants to pay transfer fees separately; mode' = mode + 2 means that any errors arising while processing this message during the action phase should be ignored. Finally, mode' = mode + 32 means that the current account must be destroyed if its resulting balance is zero. This flag is usually employed together with +128.
() send_raw_message(cell msg, int mode) impure asm "SENDRAWMSG";
;;; Creates an output action that would change this smart contract code to that given by cell new_code. Notice that this change will take effect only after the successful termination of the current run of the smart contract
() set_code(cell new_code) impure asm "SETCODE";
;;; Generates a new pseudo-random unsigned 256-bit integer x. The algorithm is as follows: if r is the old value of the random seed, considered as a 32-byte array (by constructing the big-endian representation of an unsigned 256-bit integer), then its sha512(r) is computed; the first 32 bytes of this hash are stored as the new value r' of the random seed, and the remaining 32 bytes are returned as the next random value x.
int random() impure asm "RANDU256";
;;; Generates a new pseudo-random integer z in the range 0..range1 (or range..1, if range < 0). More precisely, an unsigned random value x is generated as in random; then z := x * range / 2^256 is computed.
int rand(int range) impure asm "RAND";
;;; Returns the current random seed as an unsigned 256-bit Integer.
int get_seed() impure asm "RANDSEED";
int set_seed() impure asm "SETRAND";
;;; Sets the random seed to unsigned 256-bit seed.
() set_seed(int) impure asm "SETRAND";
;;; Mixes unsigned 256-bit integer x into the random seed r by setting the random seed to sha256 of the concatenation of two 32-byte strings: the first with the big-endian representation of the old seed r, and the second with the big-endian representation of x.
() randomize(int x) impure asm "ADDRAND";
;;; Equivalent to randomize(cur_lt());.
() randomize_lt() impure asm "LTIME" "ADDRAND";
;;; Checks whether the data parts of two slices coinside
int equal_slice_bits (slice a, slice b) asm "SDEQ";
;;; Concatenates two builders
builder store_builder(builder to, builder from) asm "STBR";

View file

@ -1039,9 +1039,15 @@ bool CellStorageStat::add_used_storage(Ref<vm::CellSlice> cs_ref, bool kill_dup,
bool CellStorageStat::add_used_storage(const CellSlice& cs, bool kill_dup, unsigned skip_count_root) {
if (!(skip_count_root & 1)) {
++cells;
if (cells > limit_cells) {
return false;
}
}
if (!(skip_count_root & 2)) {
bits += cs.size();
if (bits > limit_bits) {
return false;
}
}
for (unsigned i = 0; i < cs.size_refs(); i++) {
if (!add_used_storage(cs.prefetch_ref(i), kill_dup)) {
@ -1054,9 +1060,15 @@ bool CellStorageStat::add_used_storage(const CellSlice& cs, bool kill_dup, unsig
bool CellStorageStat::add_used_storage(CellSlice&& cs, bool kill_dup, unsigned skip_count_root) {
if (!(skip_count_root & 1)) {
++cells;
if (cells > limit_cells) {
return false;
}
}
if (!(skip_count_root & 2)) {
bits += cs.size();
if (bits > limit_bits) {
return false;
}
}
while (cs.size_refs()) {
if (!add_used_storage(cs.fetch_ref(), kill_dup)) {

View file

@ -117,8 +117,13 @@ struct CellStorageStat {
}
void clear() {
cells = bits = public_cells = 0;
clear_limit();
clear_seen();
}
void clear_limit() {
limit_cells = std::numeric_limits<unsigned long long>::max();
limit_bits = std::numeric_limits<unsigned long long>::max();
}
bool compute_used_storage(Ref<vm::CellSlice> cs_ref, bool kill_dup = true, unsigned skip_count_root = 0);
bool compute_used_storage(const CellSlice& cs, bool kill_dup = true, unsigned skip_count_root = 0);
bool compute_used_storage(CellSlice&& cs, bool kill_dup = true, unsigned skip_count_root = 0);
@ -128,6 +133,9 @@ struct CellStorageStat {
bool add_used_storage(const CellSlice& cs, bool kill_dup = true, unsigned skip_count_root = 0);
bool add_used_storage(CellSlice&& cs, bool kill_dup = true, unsigned skip_count_root = 0);
bool add_used_storage(Ref<vm::Cell> cell, bool kill_dup = true, unsigned skip_count_root = 0);
unsigned long long limit_cells = std::numeric_limits<unsigned long long>::max();
unsigned long long limit_bits = std::numeric_limits<unsigned long long>::max();
};
struct VmStorageStat {

View file

@ -293,7 +293,9 @@ class DynamicBagOfCellsDbImpl : public DynamicBagOfCellsDb, private ExtCellCreat
return db_->load_cell(hash);
}
TRY_RESULT(load_result, cell_loader_->load(hash, true, *this));
CHECK(load_result.status == CellLoader::LoadResult::Ok);
if (load_result.status != CellLoader::LoadResult::Ok) {
return td::Status::Error("cell not found");
}
return std::move(load_result.cell());
}

View file

@ -97,6 +97,7 @@ class VmState final : public VmStateInterface {
int stack_trace{0}, debug_off{0};
bool chksig_always_succeed{false};
td::ConstBitPtr missing_library{0};
td::uint16 max_data_depth = 512; // Default value
public:
enum {
@ -108,8 +109,7 @@ class VmState final : public VmStateInterface {
implicit_jmpref_gas_price = 10,
implicit_ret_gas_price = 5,
free_stack_depth = 32,
stack_entry_gas_price = 1,
max_data_depth = 512
stack_entry_gas_price = 1
};
VmState();
VmState(Ref<CellSlice> _code);
@ -325,6 +325,9 @@ class VmState final : public VmStateInterface {
td::ConstBitPtr get_missing_library() const {
return missing_library;
}
void set_max_data_depth(td::uint16 depth) {
max_data_depth = depth;
}
private:
void init_cregs(bool same_c3 = false, bool push_0 = true);

View file

@ -30,3 +30,8 @@ target_include_directories(dht PUBLIC
)
target_link_libraries(dht PRIVATE tdutils tdactor adnl tl_api)
add_executable(dht-ping-servers utils/dht-ping-servers.cpp)
target_link_libraries(dht-ping-servers PRIVATE tdutils tdactor adnl dht terminal)
add_executable(dht-resolve utils/dht-resolve.cpp)
target_link_libraries(dht-resolve PRIVATE tdutils tdactor adnl dht terminal)

View file

@ -66,38 +66,66 @@ td::uint32 DhtBucket::active_cnt() {
}
td::Status DhtBucket::add_full_node(DhtKeyId id, DhtNode newnode, td::actor::ActorId<adnl::Adnl> adnl,
adnl::AdnlNodeIdShort self_id) {
adnl::AdnlNodeIdShort self_id, td::int32 our_network_id, bool set_active) {
for (auto &node : active_nodes_) {
if (node && node->get_key() == id) {
return node->update_value(std::move(newnode), adnl, self_id);
if (set_active) {
return node->receive_ping(std::move(newnode), adnl, self_id);
} else {
return node->update_value(std::move(newnode), adnl, self_id);
}
}
}
for (auto &node : backup_nodes_) {
for (size_t i = 0; i < backup_nodes_.size(); ++i) {
auto &node = backup_nodes_[i];
if (node && node->get_key() == id) {
return node->update_value(std::move(newnode), adnl, self_id);
if (set_active) {
TRY_STATUS(node->receive_ping(std::move(newnode), adnl, self_id));
if (node->is_ready()) {
promote_node(i);
}
return td::Status::OK();
} else {
return node->update_value(std::move(newnode), adnl, self_id);
}
}
}
TRY_RESULT_PREFIX(N, DhtRemoteNode::create(std::move(newnode), max_missed_pings_), "failed to add new node: ");
for (auto &node : backup_nodes_) {
if (node == nullptr) {
node = std::move(N);
return td::Status::OK();
TRY_RESULT_PREFIX(N, DhtRemoteNode::create(std::move(newnode), max_missed_pings_, our_network_id),
"failed to add new node: ");
if (set_active) {
for (auto &node : active_nodes_) {
if (node == nullptr) {
node = std::move(N);
node->receive_ping();
return td::Status::OK();
}
}
}
for (auto &node : backup_nodes_) {
CHECK(node);
if (node->ready_from() == 0 && node->failed_from() + 60 < td::Time::now_cached()) {
node = std::move(N);
return td::Status::OK();
}
size_t idx = select_backup_node_to_drop();
if (idx < backup_nodes_.size()) {
backup_nodes_[idx] = std::move(N);
}
return td::Status::OK();
}
size_t DhtBucket::select_backup_node_to_drop() const {
size_t result = backup_nodes_.size();
for (size_t idx = 0; idx < backup_nodes_.size(); ++idx) {
const auto &node = backup_nodes_[idx];
if (node == nullptr) {
return idx;
}
if (node->ready_from() == 0 && node->failed_from() + 60 < td::Time::now_cached()) {
if (result == backup_nodes_.size() || node->failed_from() < backup_nodes_[result]->failed_from()) {
result = idx;
}
}
}
return result;
}
void DhtBucket::receive_ping(DhtKeyId id, DhtNode result, td::actor::ActorId<adnl::Adnl> adnl,
adnl::AdnlNodeIdShort self_id) {
for (auto &node : active_nodes_) {
@ -119,17 +147,9 @@ void DhtBucket::receive_ping(DhtKeyId id, DhtNode result, td::actor::ActorId<adn
}
void DhtBucket::demote_node(size_t idx) {
for (auto &node : backup_nodes_) {
if (node == nullptr) {
node = std::move(active_nodes_[idx]);
return;
}
}
for (auto &node : backup_nodes_) {
if (node->ready_from() == 0 && node->failed_from() + 60 < td::Time::now_cached()) {
node = std::move(active_nodes_[idx]);
return;
}
size_t new_idx = select_backup_node_to_drop();
if (new_idx < backup_nodes_.size()) {
backup_nodes_[new_idx] = std::move(active_nodes_[idx]);
}
active_nodes_[idx] = nullptr;
}
@ -150,7 +170,7 @@ void DhtBucket::check(bool client_only, td::actor::ActorId<adnl::Adnl> adnl, td:
size_t have_space = 0;
for (size_t i = 0; i < active_nodes_.size(); i++) {
auto &node = active_nodes_[i];
if (node && td::Time::now_cached() - node->last_ping_at() > ping_timeout_) {
if (node && td::Time::now_cached() - node->last_ping_at() > node->ping_interval()) {
node->send_ping(client_only, adnl, dht, src);
if (node->ready_from() == 0) {
demote_node(i);
@ -162,7 +182,7 @@ void DhtBucket::check(bool client_only, td::actor::ActorId<adnl::Adnl> adnl, td:
}
for (size_t i = 0; i < backup_nodes_.size(); i++) {
auto &node = backup_nodes_[i];
if (node && td::Time::now_cached() - node->last_ping_at() > ping_timeout_) {
if (node && td::Time::now_cached() - node->last_ping_at() > node->ping_interval()) {
node->send_ping(client_only, adnl, dht, src);
}
if (node && have_space > 0 && node->is_ready()) {
@ -200,6 +220,9 @@ DhtNodesList DhtBucket::export_nodes() const {
list.push_back(node->get_node());
}
}
if (list.size() > k_) {
list.list().resize(k_);
}
return list;
}

View file

@ -31,7 +31,6 @@ class DhtMember;
class DhtBucket {
private:
double ping_timeout_ = 60;
td::uint32 max_missed_pings_ = 3;
std::vector<std::unique_ptr<DhtRemoteNode>> active_nodes_;
@ -43,6 +42,7 @@ class DhtBucket {
// const DhtMember::PrintId &print_id);
void demote_node(size_t idx);
void promote_node(size_t idx);
size_t select_backup_node_to_drop() const;
public:
DhtBucket(td::uint32 k) : k_(k) {
@ -51,7 +51,7 @@ class DhtBucket {
}
td::uint32 active_cnt();
td::Status add_full_node(DhtKeyId id, DhtNode node, td::actor::ActorId<adnl::Adnl> adnl,
adnl::AdnlNodeIdShort self_id);
adnl::AdnlNodeIdShort self_id, td::int32 our_network_id, bool set_active = false);
void check(bool client_only, td::actor::ActorId<adnl::Adnl> adnl, td::actor::ActorId<DhtMember> node,
adnl::AdnlNodeIdShort src);
void receive_ping(DhtKeyId id, DhtNode result, td::actor::ActorId<adnl::Adnl> adnl, adnl::AdnlNodeIdShort self_id);

View file

@ -46,6 +46,7 @@ class DhtMemberImpl : public DhtMember {
DhtKeyId key_;
td::uint32 k_;
td::uint32 a_;
td::int32 network_id_{-1};
td::uint32 max_cache_time_ = 60;
td::uint32 max_cache_size_ = 100;
@ -66,6 +67,15 @@ class DhtMemberImpl : public DhtMember {
DhtKeyId last_republish_key_ = DhtKeyId::zero();
DhtKeyId last_check_key_ = DhtKeyId::zero();
adnl::AdnlNodeIdShort last_check_reverse_conn_ = adnl::AdnlNodeIdShort::zero();
struct ReverseConnection {
adnl::AdnlNodeIdShort dht_node_;
DhtKeyId key_id_;
td::Timestamp ttl_;
};
std::map<adnl::AdnlNodeIdShort, ReverseConnection> reverse_connections_;
std::set<adnl::AdnlNodeIdShort> our_reverse_connections_;
class Callback : public adnl::Adnl::Callback {
public:
@ -122,17 +132,33 @@ class DhtMemberImpl : public DhtMember {
void process_query(adnl::AdnlNodeIdShort src, ton_api::dht_store &query, td::Promise<td::BufferSlice> promise);
void process_query(adnl::AdnlNodeIdShort src, ton_api::dht_getSignedAddressList &query,
td::Promise<td::BufferSlice> promise);
void process_query(adnl::AdnlNodeIdShort src, ton_api::dht_registerReverseConnection &query,
td::Promise<td::BufferSlice> promise);
void process_query(adnl::AdnlNodeIdShort src, ton_api::dht_requestReversePing &query,
td::Promise<td::BufferSlice> promise);
public:
DhtMemberImpl(adnl::AdnlNodeIdShort id, std::string db_root, td::actor::ActorId<keyring::Keyring> keyring,
td::actor::ActorId<adnl::Adnl> adnl, td::uint32 k, td::uint32 a = 3, bool client_only = false)
: id_(id), key_{id_}, k_(k), a_(a), db_root_(db_root), keyring_(keyring), adnl_(adnl), client_only_(client_only) {
td::actor::ActorId<adnl::Adnl> adnl, td::int32 network_id, td::uint32 k, td::uint32 a = 3,
bool client_only = false)
: id_(id)
, key_{id_}
, k_(k)
, a_(a)
, network_id_(network_id)
, db_root_(db_root)
, keyring_(keyring)
, adnl_(adnl)
, client_only_(client_only) {
for (size_t i = 0; i < 256; i++) {
buckets_.emplace_back(k_);
}
}
void add_full_node(DhtKeyId id, DhtNode node) override;
void add_full_node(DhtKeyId id, DhtNode node) override {
add_full_node_impl(id, std::move(node));
}
void add_full_node_impl(DhtKeyId id, DhtNode node, bool set_active = false);
adnl::AdnlNodeIdShort get_id() const override {
return id_;
@ -143,6 +169,12 @@ class DhtMemberImpl : public DhtMember {
void set_value(DhtValue key_value, td::Promise<td::Unit> result) override;
td::uint32 distance(DhtKeyId key_id, td::uint32 max_value);
void register_reverse_connection(adnl::AdnlNodeIdFull client, td::Promise<td::Unit> promise) override;
void request_reverse_ping(adnl::AdnlNode target, adnl::AdnlNodeIdShort client,
td::Promise<td::Unit> promise) override;
void request_reverse_ping_cont(adnl::AdnlNode target, td::BufferSlice signature, adnl::AdnlNodeIdShort client,
td::Promise<td::Unit> promise);
td::Status store_in(DhtValue value) override;
void send_store(DhtValue value, td::Promise<td::Unit> promise);

View file

@ -23,31 +23,46 @@ namespace ton {
namespace dht {
td::Status DhtNode::update(tl_object_ptr<ton_api::dht_node> obj) {
td::Status DhtNode::update(tl_object_ptr<ton_api::dht_node> obj, td::int32 our_network_id) {
if (version_ && obj->version_ <= version_) {
return td::Status::Error(ErrorCode::notready, "too old version");
}
auto signature = std::move(obj->signature_);
auto B = serialize_tl_object(obj, true);
td::BufferSlice signature;
td::int32 network_id = -1;
if (obj->signature_.size() == 64) {
signature = std::move(obj->signature_);
} else if (obj->signature_.size() == 64 + 4) {
signature = td::BufferSlice{obj->signature_.as_slice().remove_prefix(4)};
network_id = *(td::uint32 *)obj->signature_.as_slice().remove_suffix(64).data();
} else {
return td::Status::Error(ErrorCode::notready, "invalid length of signature");
}
if (network_id != our_network_id && network_id != -1 && our_network_id != -1) {
// Remove (network_id != -1 && our_network_id != -1) after network update
return td::Status::Error(ErrorCode::notready, PSTRING() << "wrong network id (expected " << our_network_id
<< ", found " << network_id << ")");
}
TRY_RESULT(pub, adnl::AdnlNodeIdFull::create(obj->id_));
TRY_RESULT(addr_list, adnl::AdnlAddressList::create(std::move(obj->addr_list_)));
if (!addr_list.public_only()) {
return td::Status::Error(ErrorCode::notready, "dht node must have only public addresses");
}
if (!addr_list.size()) {
return td::Status::Error(ErrorCode::notready, "dht node must have >0 addresses");
}
DhtNode new_node{std::move(pub), std::move(addr_list), obj->version_, network_id, std::move(signature)};
TRY_STATUS(new_node.check_signature());
TRY_RESULT(E, pub.pubkey().create_encryptor());
TRY_STATUS(E->check_signature(B.as_slice(), signature.as_slice()));
id_ = pub;
addr_list_ = addr_list;
version_ = obj->version_;
signature_ = td::SharedSlice(signature.as_slice());
*this = std::move(new_node);
return td::Status::OK();
}
td::Status DhtNode::check_signature() const {
TRY_RESULT(enc, id_.pubkey().create_encryptor());
auto node2 = clone();
node2.signature_ = {};
TRY_STATUS_PREFIX(enc->check_signature(serialize_tl_object(node2.tl(), true).as_slice(), signature_.as_slice()),
"bad node signature: ");
return td::Status::OK();
}

View file

@ -22,6 +22,8 @@
#include "adnl/adnl-address-list.hpp"
#include "dht-types.h"
#include "auto/tl/ton_api.hpp"
#include "td/utils/overloaded.h"
namespace ton {
@ -32,26 +34,26 @@ class DhtNode {
adnl::AdnlNodeIdFull id_;
adnl::AdnlAddressList addr_list_;
td::int32 version_{0};
td::int32 network_id_{-1};
td::SharedSlice signature_;
public:
DhtNode() {
DhtNode() = default;
DhtNode(adnl::AdnlNodeIdFull id, adnl::AdnlAddressList addr_list, td::int32 version, td::int32 network_id, td::BufferSlice signature)
: id_(id), addr_list_(addr_list), version_(version), network_id_(network_id), signature_(signature.as_slice()) {
}
DhtNode(adnl::AdnlNodeIdFull id, adnl::AdnlAddressList addr_list, td::int32 version, td::BufferSlice signature)
: id_(id), addr_list_(addr_list), version_(version), signature_(signature.as_slice()) {
DhtNode(adnl::AdnlNodeIdFull id, adnl::AdnlAddressList addr_list, td::int32 version, td::int32 network_id, td::SharedSlice signature)
: id_(id), addr_list_(addr_list), version_(version), network_id_(network_id), signature_(std::move(signature)) {
}
DhtNode(adnl::AdnlNodeIdFull id, adnl::AdnlAddressList addr_list, td::int32 version, td::SharedSlice signature)
: id_(id), addr_list_(addr_list), version_(version), signature_(std::move(signature)) {
}
static td::Result<DhtNode> create(tl_object_ptr<ton_api::dht_node> obj) {
static td::Result<DhtNode> create(tl_object_ptr<ton_api::dht_node> obj, td::int32 our_network_id) {
if (obj->version_ == 0) {
return td::Status::Error(ErrorCode::protoviolation, "zero version");
}
DhtNode n;
TRY_STATUS(n.update(std::move(obj)));
TRY_STATUS(n.update(std::move(obj), our_network_id));
return std::move(n);
}
td::Status update(tl_object_ptr<ton_api::dht_node> obj);
td::Status update(tl_object_ptr<ton_api::dht_node> obj, td::int32 our_network_id);
DhtKeyId get_key() const {
CHECK(!id_.empty());
return DhtKeyId{id_.compute_short_id()};
@ -68,20 +70,30 @@ class DhtNode {
}
tl_object_ptr<ton_api::dht_node> tl() const {
return create_tl_object<ton_api::dht_node>(id_.tl(), addr_list_.tl(), version_, signature_.clone_as_buffer_slice());
td::BufferSlice signature_ext;
if (network_id_ == -1) {
signature_ext = signature_.clone_as_buffer_slice();
} else {
signature_ext = td::BufferSlice{4 + signature_.size()};
td::MutableSlice s = signature_ext.as_slice();
s.copy_from(td::Slice(reinterpret_cast<const td::uint8 *>(&network_id_), 4));
s.remove_prefix(4);
s.copy_from(signature_.as_slice());
}
return create_tl_object<ton_api::dht_node>(id_.tl(), addr_list_.tl(), version_, std::move(signature_ext));
}
DhtNode clone() const {
return DhtNode{id_, addr_list_, version_, signature_.clone()};
return DhtNode{id_, addr_list_, version_, network_id_, signature_.clone()};
}
td::Status check_signature() const;
};
class DhtNodesList {
public:
DhtNodesList() {
}
DhtNodesList(tl_object_ptr<ton_api::dht_nodes> R) {
DhtNodesList() = default;
DhtNodesList(tl_object_ptr<ton_api::dht_nodes> R, td::int32 our_network_id) {
for (auto &n : R->nodes_) {
auto N = DhtNode::create(std::move(n));
auto N = DhtNode::create(std::move(n), our_network_id);
if (N.is_ok()) {
list_.emplace_back(N.move_as_ok());
} else {

View file

@ -20,14 +20,11 @@
#include "td/utils/tl_storers.h"
#include "td/utils/crypto.h"
#include "td/utils/tl_parsers.h"
#include "td/utils/Random.h"
#include "td/utils/overloaded.h"
#include "td/utils/format.h"
#include "keys/encryptor.h"
#include "auto/tl/ton_api.hpp"
#include "dht-query.hpp"
@ -123,7 +120,7 @@ void DhtQueryFindNodes::on_result(td::Result<td::BufferSlice> R, adnl::AdnlNodeI
VLOG(DHT_WARNING) << this << ": incorrect result on dht.findNodes query from " << dst << ": "
<< Res.move_as_error();
} else {
add_nodes(DhtNodesList{Res.move_as_ok()});
add_nodes(DhtNodesList{Res.move_as_ok(), our_network_id()});
}
finish_query();
}
@ -149,6 +146,23 @@ void DhtQueryFindValue::send_one_query(adnl::AdnlNodeIdShort id) {
td::Timestamp::in(2.0 + td::Random::fast(0, 20) * 0.1), std::move(B));
}
void DhtQueryFindValue::send_one_query_nodes(adnl::AdnlNodeIdShort id) {
auto P = create_serialize_tl_object<ton_api::dht_findNode>(get_key().tl(), get_k());
td::BufferSlice B;
if (client_only_) {
B = std::move(P);
} else {
B = create_serialize_tl_object_suffix<ton_api::dht_query>(P.as_slice(), self_.tl());
}
auto Pr = td::PromiseCreator::lambda([SelfId = actor_id(this), dst = id](td::Result<td::BufferSlice> R) {
td::actor::send_closure(SelfId, &DhtQueryFindValue::on_result_nodes, std::move(R), dst);
});
td::actor::send_closure(adnl_, &adnl::Adnl::send_query, get_src(), id, "dht findValue", std::move(Pr),
td::Timestamp::in(2.0 + td::Random::fast(0, 20) * 0.1), std::move(B));
}
void DhtQueryFindValue::on_result(td::Result<td::BufferSlice> R, adnl::AdnlNodeIdShort dst) {
if (R.is_error()) {
VLOG(DHT_INFO) << this << ": failed find value query " << get_src() << "->" << dst << ": " << R.move_as_error();
@ -164,44 +178,72 @@ void DhtQueryFindValue::on_result(td::Result<td::BufferSlice> R, adnl::AdnlNodeI
}
bool need_stop = false;
bool send_get_nodes = false;
auto A = Res.move_as_ok();
ton_api::downcast_call(
*A.get(), td::overloaded(
[&](ton_api::dht_valueFound &v) {
auto valueR = DhtValue::create(std::move(v.value_), true);
if (valueR.is_error()) {
VLOG(DHT_WARNING) << this << ": received incorrect dht answer on find value query from " << dst
<< ": " << valueR.move_as_error();
return;
}
auto value = valueR.move_as_ok();
if (value.key_id() != key_) {
VLOG(DHT_WARNING) << this << ": received value for bad key on find value query from " << dst;
return;
}
promise_.set_value(std::move(value));
need_stop = true;
},
[&](ton_api::dht_valueNotFound &v) { add_nodes(DhtNodesList{std::move(v.nodes_)}); }));
*A, td::overloaded(
[&](ton_api::dht_valueFound &v) {
auto valueR = DhtValue::create(std::move(v.value_), true);
if (valueR.is_error()) {
VLOG(DHT_WARNING) << this << ": received incorrect dht answer on find value query from " << dst
<< ": " << valueR.move_as_error();
return;
}
auto value = valueR.move_as_ok();
if (value.key_id() != key_) {
VLOG(DHT_WARNING) << this << ": received value for bad key on find value query from " << dst;
return;
}
if (!value.check_is_acceptable()) {
send_get_nodes = true;
return;
}
promise_.set_value(std::move(value));
need_stop = true;
},
[&](ton_api::dht_valueNotFound &v) {
add_nodes(DhtNodesList{std::move(v.nodes_), our_network_id()});
}));
if (need_stop) {
stop();
} else if (send_get_nodes) {
send_one_query_nodes(dst);
} else {
finish_query();
}
}
void DhtQueryFindValue::on_result_nodes(td::Result<td::BufferSlice> R, adnl::AdnlNodeIdShort dst) {
if (R.is_error()) {
VLOG(DHT_INFO) << this << ": failed find nodes query " << get_src() << "->" << dst << ": " << R.move_as_error();
finish_query();
return;
}
auto Res = fetch_tl_object<ton_api::dht_nodes>(R.move_as_ok(), true);
if (Res.is_error()) {
VLOG(DHT_WARNING) << this << ": dropping incorrect answer on dht.findNodes query from " << dst << ": "
<< Res.move_as_error();
finish_query();
return;
}
auto r = Res.move_as_ok();
add_nodes(DhtNodesList{create_tl_object<ton_api::dht_nodes>(std::move(r->nodes_)), our_network_id()});
finish_query();
}
void DhtQueryFindValue::finish(DhtNodesList list) {
promise_.set_error(td::Status::Error(ErrorCode::notready, "dht key not found"));
}
DhtQueryStore::DhtQueryStore(DhtValue key_value, DhtMember::PrintId print_id, adnl::AdnlNodeIdShort src,
DhtNodesList list, td::uint32 k, td::uint32 a, DhtNode self, bool client_only,
td::actor::ActorId<DhtMember> node, td::actor::ActorId<adnl::Adnl> adnl,
DhtNodesList list, td::uint32 k, td::uint32 a, td::int32 our_network_id, DhtNode self,
bool client_only, td::actor::ActorId<DhtMember> node, td::actor::ActorId<adnl::Adnl> adnl,
td::Promise<td::Unit> promise)
: print_id_(print_id)
, k_(k)
, a_(a)
, our_network_id_(our_network_id)
, promise_(std::move(promise))
, value_(std::move(key_value))
, list_(std::move(list))
@ -219,7 +261,8 @@ void DhtQueryStore::start_up() {
auto key = value_.key_id();
auto A = td::actor::create_actor<DhtQueryFindNodes>("FindNodesQuery", key, print_id_, src_, std::move(list_), k_, a_,
self_.clone(), client_only_, node_, adnl_, std::move(P));
our_network_id_, self_.clone(), client_only_, node_, adnl_,
std::move(P));
A.release();
}
@ -279,6 +322,133 @@ void DhtQueryStore::store_ready(td::Result<td::BufferSlice> R) {
}
}
DhtQueryRegisterReverseConnection::DhtQueryRegisterReverseConnection(
DhtKeyId key_id, adnl::AdnlNodeIdFull client, td::uint32 ttl, td::BufferSlice signature,
DhtMember::PrintId print_id, adnl::AdnlNodeIdShort src, DhtNodesList list, td::uint32 k, td::uint32 a,
td::int32 our_network_id, DhtNode self, bool client_only, td::actor::ActorId<DhtMember> node,
td::actor::ActorId<adnl::Adnl> adnl, td::Promise<td::Unit> promise)
: print_id_(print_id)
, k_(k)
, a_(a)
, our_network_id_(our_network_id)
, promise_(std::move(promise))
, key_id_(key_id)
, list_(std::move(list))
, self_(std::move(self))
, client_only_(client_only) {
node_ = node;
adnl_ = adnl;
src_ = src;
query_ = create_serialize_tl_object<ton_api::dht_registerReverseConnection>(client.tl(), ttl, std::move(signature));
}
void DhtQueryRegisterReverseConnection::start_up() {
auto P = td::PromiseCreator::lambda([SelfId = actor_id(this)](td::Result<DhtNodesList> res) {
td::actor::send_closure(SelfId, &DhtQueryRegisterReverseConnection::send_queries, std::move(res));
});
auto A = td::actor::create_actor<DhtQueryFindNodes>("FindNodesQuery", key_id_, print_id_, src_, std::move(list_), k_,
a_, our_network_id_, self_.clone(), client_only_, node_, adnl_,
std::move(P));
A.release();
}
void DhtQueryRegisterReverseConnection::send_queries(td::Result<DhtNodesList> R) {
if (R.is_error()) {
auto S = R.move_as_error();
VLOG(DHT_NOTICE) << this << ": failed to get nearest nodes to " << key_id_ << ": " << S;
promise_.set_error(std::move(S));
stop();
return;
}
auto list = R.move_as_ok();
remaining_ = static_cast<td::uint32>(list.size());
if (remaining_ == 0) {
VLOG(DHT_NOTICE) << this << ": failed to get nearest nodes to " << key_id_ << ": no nodes";
promise_.set_error(td::Status::Error("no dht nodes"));
stop();
return;
}
for (auto &node : list.list()) {
auto P = td::PromiseCreator::lambda([SelfId = actor_id(this)](td::Result<td::BufferSlice> R) {
td::actor::send_closure(SelfId, &DhtQueryRegisterReverseConnection::ready, std::move(R));
});
td::actor::send_closure(adnl_, &adnl::Adnl::send_query, src_, node.adnl_id().compute_short_id(), "dht regrevcon",
std::move(P), td::Timestamp::in(2.0 + td::Random::fast(0, 20) * 0.1), query_.clone());
}
}
void DhtQueryRegisterReverseConnection::ready(td::Result<td::BufferSlice> R) {
if (R.is_error()) {
fail_++;
VLOG(DHT_INFO) << this << ": failed register reverse connection query: " << R.move_as_error();
} else {
auto R2 = fetch_tl_object<ton_api::dht_stored>(R.move_as_ok(), true);
if (R2.is_error()) {
fail_++;
VLOG(DHT_WARNING) << this << ": can not parse answer (expected dht.stored): " << R2.move_as_error();
} else {
success_++;
}
}
CHECK(remaining_ > 0);
remaining_--;
if (remaining_ == 0) {
if (success_ > 0) {
promise_.set_value(td::Unit());
} else {
promise_.set_result(td::Status::Error("failed to make actual query"));
}
stop();
}
}
void DhtQueryRequestReversePing::send_one_query(adnl::AdnlNodeIdShort id) {
td::BufferSlice B;
if (client_only_) {
B = query_.clone();
} else {
B = create_serialize_tl_object_suffix<ton_api::dht_query>(query_.as_slice(), self_.tl());
}
auto P = td::PromiseCreator::lambda([SelfId = actor_id(this), dst = id](td::Result<td::BufferSlice> R) {
td::actor::send_closure(SelfId, &DhtQueryRequestReversePing::on_result, std::move(R), dst);
});
td::actor::send_closure(adnl_, &adnl::Adnl::send_query, get_src(), id, "dht requestReversePing", std::move(P),
td::Timestamp::in(2.0 + td::Random::fast(0, 20) * 0.1), std::move(B));
}
void DhtQueryRequestReversePing::on_result(td::Result<td::BufferSlice> R, adnl::AdnlNodeIdShort dst) {
if (R.is_error()) {
VLOG(DHT_INFO) << this << ": failed reverse ping query " << get_src() << "->" << dst << ": " << R.move_as_error();
finish_query();
return;
}
auto Res = fetch_tl_object<ton_api::dht_ReversePingResult>(R.move_as_ok(), true);
if (Res.is_error()) {
VLOG(DHT_WARNING) << this << ": dropping incorrect answer on dht.requestReversePing query from " << dst << ": "
<< Res.move_as_error();
finish_query();
return;
}
auto A = Res.move_as_ok();
ton_api::downcast_call(*A, td::overloaded(
[&](ton_api::dht_reversePingOk &v) {
promise_.set_value(td::Unit());
stop();
},
[&](ton_api::dht_clientNotFound &v) {
add_nodes(DhtNodesList{std::move(v.nodes_), our_network_id()});
finish_query();
}));
}
void DhtQueryRequestReversePing::finish(DhtNodesList list) {
promise_.set_error(td::Status::Error(ErrorCode::notready, "dht key not found"));
}
} // namespace dht
} // namespace ton

View file

@ -45,7 +45,7 @@ class DhtQuery : public td::actor::Actor {
public:
DhtQuery(DhtKeyId key, DhtMember::PrintId print_id, adnl::AdnlNodeIdShort src, DhtNodesList list, td::uint32 k,
td::uint32 a, DhtNode self, bool client_only, td::actor::ActorId<DhtMember> node,
td::uint32 a, td::int32 our_network_id, DhtNode self, bool client_only, td::actor::ActorId<DhtMember> node,
td::actor::ActorId<adnl::Adnl> adnl)
: key_(key)
, self_(std::move(self))
@ -54,6 +54,7 @@ class DhtQuery : public td::actor::Actor {
, src_(src)
, k_(k)
, a_(a)
, our_network_id_(our_network_id)
, node_(node)
, adnl_(adnl) {
add_nodes(std::move(list));
@ -77,6 +78,9 @@ class DhtQuery : public td::actor::Actor {
td::uint32 get_k() const {
return k_;
}
td::int32 our_network_id() const {
return our_network_id_;
}
void start_up() override {
send_queries();
}
@ -91,6 +95,7 @@ class DhtQuery : public td::actor::Actor {
std::set<DhtKeyId> pending_ids_;
td::uint32 k_;
td::uint32 a_;
td::int32 our_network_id_;
td::actor::ActorId<DhtMember> node_;
td::uint32 active_queries_ = 0;
@ -104,9 +109,10 @@ class DhtQueryFindNodes : public DhtQuery {
public:
DhtQueryFindNodes(DhtKeyId key, DhtMember::PrintId print_id, adnl::AdnlNodeIdShort src, DhtNodesList list,
td::uint32 k, td::uint32 a, DhtNode self, bool client_only, td::actor::ActorId<DhtMember> node,
td::actor::ActorId<adnl::Adnl> adnl, td::Promise<DhtNodesList> promise)
: DhtQuery(key, print_id, src, std::move(list), k, a, std::move(self), client_only, node, adnl)
td::uint32 k, td::uint32 a, td::int32 our_network_id, DhtNode self, bool client_only,
td::actor::ActorId<DhtMember> node, td::actor::ActorId<adnl::Adnl> adnl,
td::Promise<DhtNodesList> promise)
: DhtQuery(key, print_id, src, std::move(list), k, a, our_network_id, std::move(self), client_only, node, adnl)
, promise_(std::move(promise)) {
}
void send_one_query(adnl::AdnlNodeIdShort id) override;
@ -123,13 +129,16 @@ class DhtQueryFindValue : public DhtQuery {
public:
DhtQueryFindValue(DhtKeyId key, DhtMember::PrintId print_id, adnl::AdnlNodeIdShort src, DhtNodesList list,
td::uint32 k, td::uint32 a, DhtNode self, bool client_only, td::actor::ActorId<DhtMember> node,
td::actor::ActorId<adnl::Adnl> adnl, td::Promise<DhtValue> promise)
: DhtQuery(key, print_id, src, std::move(list), k, a, std::move(self), client_only, node, adnl)
td::uint32 k, td::uint32 a, td::int32 our_network_id, DhtNode self, bool client_only,
td::actor::ActorId<DhtMember> node, td::actor::ActorId<adnl::Adnl> adnl,
td::Promise<DhtValue> promise)
: DhtQuery(key, print_id, src, std::move(list), k, a, our_network_id, std::move(self), client_only, node, adnl)
, promise_(std::move(promise)) {
}
void send_one_query(adnl::AdnlNodeIdShort id) override;
void send_one_query_nodes(adnl::AdnlNodeIdShort id);
void on_result(td::Result<td::BufferSlice> R, adnl::AdnlNodeIdShort dst);
void on_result_nodes(td::Result<td::BufferSlice> R, adnl::AdnlNodeIdShort dst);
void finish(DhtNodesList list) override;
std::string get_name() const override {
return "find value";
@ -141,6 +150,7 @@ class DhtQueryStore : public td::actor::Actor {
DhtMember::PrintId print_id_;
td::uint32 k_;
td::uint32 a_;
td::int32 our_network_id_;
td::Promise<td::Unit> promise_;
td::actor::ActorId<DhtMember> node_;
td::actor::ActorId<adnl::Adnl> adnl_;
@ -155,7 +165,7 @@ class DhtQueryStore : public td::actor::Actor {
public:
DhtQueryStore(DhtValue key_value, DhtMember::PrintId print_id, adnl::AdnlNodeIdShort src, DhtNodesList list,
td::uint32 k, td::uint32 a, DhtNode self, bool client_only, td::actor::ActorId<DhtMember> node,
td::uint32 k, td::uint32 a, td::int32 our_network_id, DhtNode self, bool client_only, td::actor::ActorId<DhtMember> node,
td::actor::ActorId<adnl::Adnl> adnl, td::Promise<td::Unit> promise);
void send_stores(td::Result<DhtNodesList> res);
void store_ready(td::Result<td::BufferSlice> res);
@ -165,6 +175,64 @@ class DhtQueryStore : public td::actor::Actor {
}
};
class DhtQueryRegisterReverseConnection : public td::actor::Actor {
private:
DhtMember::PrintId print_id_;
td::uint32 k_;
td::uint32 a_;
td::int32 our_network_id_;
td::Promise<td::Unit> promise_;
td::actor::ActorId<DhtMember> node_;
td::actor::ActorId<adnl::Adnl> adnl_;
adnl::AdnlNodeIdShort src_;
DhtKeyId key_id_;
td::BufferSlice query_;
td::uint32 success_ = 0;
td::uint32 fail_ = 0;
td::uint32 remaining_;
DhtNodesList list_;
DhtNode self_;
bool client_only_;
public:
DhtQueryRegisterReverseConnection(DhtKeyId key_id, adnl::AdnlNodeIdFull client, td::uint32 ttl,
td::BufferSlice signature, DhtMember::PrintId print_id, adnl::AdnlNodeIdShort src,
DhtNodesList list, td::uint32 k, td::uint32 a, td::int32 our_network_id,
DhtNode self, bool client_only, td::actor::ActorId<DhtMember> node,
td::actor::ActorId<adnl::Adnl> adnl, td::Promise<td::Unit> promise);
void send_queries(td::Result<DhtNodesList> R);
void ready(td::Result<td::BufferSlice> R);
void start_up() override;
DhtMember::PrintId print_id() const {
return print_id_;
}
};
class DhtQueryRequestReversePing : public DhtQuery {
private:
td::Promise<td::Unit> promise_;
td::BufferSlice query_;
public:
DhtQueryRequestReversePing(adnl::AdnlNodeIdShort client, adnl::AdnlNode target, td::BufferSlice signature,
DhtMember::PrintId print_id, adnl::AdnlNodeIdShort src, DhtNodesList list, td::uint32 k,
td::uint32 a, td::int32 our_network_id, DhtNode self, bool client_only,
td::actor::ActorId<DhtMember> node, td::actor::ActorId<adnl::Adnl> adnl,
td::Promise<td::Unit> promise)
: DhtQuery(DhtMember::get_reverse_connection_key(client).compute_key_id(), print_id, src, std::move(list), k, a,
our_network_id, std::move(self), client_only, node, adnl)
, promise_(std::move(promise))
, query_(create_serialize_tl_object<ton_api::dht_requestReversePing>(target.tl(), std::move(signature),
client.bits256_value(), k)) {
}
void send_one_query(adnl::AdnlNodeIdShort id) override;
void on_result(td::Result<td::BufferSlice> R, adnl::AdnlNodeIdShort dst);
void finish(DhtNodesList list) override;
std::string get_name() const override {
return "request remote ping";
}
};
inline td::StringBuilder &operator<<(td::StringBuilder &sb, const DhtQuery &dht) {
sb << dht.print_id();
return sb;

View file

@ -20,13 +20,10 @@
#include "td/utils/tl_storers.h"
#include "td/utils/crypto.h"
#include "td/utils/tl_parsers.h"
#include "td/utils/Random.h"
#include "td/utils/format.h"
#include "keys/encryptor.h"
#include "auto/tl/ton_api.hpp"
#include "dht-remote-node.hpp"
@ -35,28 +32,43 @@ namespace ton {
namespace dht {
static const double PING_INTERVAL_DEFAULT = 60.0;
static const double PING_INTERVAL_MULTIPLIER = 1.1;
static const double PING_INTERVAL_MAX = 3600.0 * 4;
DhtRemoteNode::DhtRemoteNode(DhtNode node, td::uint32 max_missed_pings, td::int32 our_network_id)
: node_(std::move(node))
, max_missed_pings_(max_missed_pings)
, our_network_id_(our_network_id)
, ping_interval_(PING_INTERVAL_DEFAULT) {
failed_from_ = td::Time::now_cached();
id_ = node_.get_key();
}
td::Status DhtRemoteNode::receive_ping(DhtNode node, td::actor::ActorId<adnl::Adnl> adnl,
adnl::AdnlNodeIdShort self_id) {
TRY_STATUS(update_value(std::move(node), adnl, self_id));
receive_ping();
return td::Status::OK();
}
void DhtRemoteNode::receive_ping() {
missed_pings_ = 0;
ping_interval_ = PING_INTERVAL_DEFAULT;
if (ready_from_ == 0) {
ready_from_ = td::Time::now_cached();
}
return td::Status::OK();
}
td::Status DhtRemoteNode::update_value(DhtNode node, td::actor::ActorId<adnl::Adnl> adnl,
adnl::AdnlNodeIdShort self_id) {
CHECK(node.adnl_id() == node_.adnl_id());
if (node.adnl_id() != node_.adnl_id()) {
return td::Status::Error("Wrong adnl id");
}
if (node.version() <= node_.version()) {
return td::Status::OK();
}
TRY_RESULT(enc, node.adnl_id().pubkey().create_encryptor());
auto tl = node.tl();
auto sig = std::move(tl->signature_);
TRY_STATUS_PREFIX(enc->check_signature(serialize_tl_object(tl, true).as_slice(), sig.as_slice()),
"bad node signature: ");
TRY_STATUS(node.check_signature());
node_ = std::move(node);
td::actor::send_closure(adnl, &adnl::Adnl::add_peer, self_id, node_.adnl_id(), node_.addr_list());
@ -66,22 +78,25 @@ td::Status DhtRemoteNode::update_value(DhtNode node, td::actor::ActorId<adnl::Ad
void DhtRemoteNode::send_ping(bool client_only, td::actor::ActorId<adnl::Adnl> adnl, td::actor::ActorId<DhtMember> node,
adnl::AdnlNodeIdShort src) {
missed_pings_++;
if (missed_pings_ > max_missed_pings_ && ready_from_ > 0) {
ready_from_ = 0;
failed_from_ = td::Time::now_cached();
if (missed_pings_ > max_missed_pings_) {
ping_interval_ = std::min(ping_interval_ * PING_INTERVAL_MULTIPLIER, PING_INTERVAL_MAX);
if (ready_from_ > 0) {
ready_from_ = 0;
failed_from_ = td::Time::now_cached();
}
}
last_ping_at_ = td::Time::now_cached();
td::actor::send_closure(adnl, &adnl::Adnl::add_peer, src, node_.adnl_id(), node_.addr_list());
auto P = td::PromiseCreator::lambda([key = id_, id = node_.adnl_id().compute_short_id(), client_only, node, src,
adnl](td::Result<DhtNode> R) mutable {
auto P = td::PromiseCreator::lambda([key = id_, id = node_.adnl_id().compute_short_id(), client_only, node, src, adnl,
our_network_id = our_network_id_](td::Result<DhtNode> R) mutable {
if (R.is_error()) {
LOG(ERROR) << "[dht]: failed to get self node";
return;
}
auto P = td::PromiseCreator::lambda([key, node, adnl](td::Result<td::BufferSlice> R) {
auto P = td::PromiseCreator::lambda([key, node, adnl, our_network_id](td::Result<td::BufferSlice> R) {
if (R.is_error()) {
VLOG(DHT_INFO) << "[dht]: received error for query to " << key << ": " << R.move_as_error();
return;
@ -89,7 +104,7 @@ void DhtRemoteNode::send_ping(bool client_only, td::actor::ActorId<adnl::Adnl> a
auto F = fetch_tl_object<ton_api::dht_node>(R.move_as_ok(), true);
if (F.is_ok()) {
auto N = DhtNode::create(F.move_as_ok());
auto N = DhtNode::create(F.move_as_ok(), our_network_id);
if (N.is_ok()) {
td::actor::send_closure(node, &DhtMember::receive_ping, key, N.move_as_ok());
} else {
@ -123,7 +138,8 @@ adnl::AdnlNodeIdFull DhtRemoteNode::get_full_id() const {
return node_.adnl_id();
}
td::Result<std::unique_ptr<DhtRemoteNode>> DhtRemoteNode::create(DhtNode node, td::uint32 max_missed_pings) {
td::Result<std::unique_ptr<DhtRemoteNode>> DhtRemoteNode::create(DhtNode node, td::uint32 max_missed_pings,
td::int32 our_network_id) {
TRY_RESULT(enc, node.adnl_id().pubkey().create_encryptor());
auto tl = node.tl();
auto sig = std::move(tl->signature_);
@ -131,7 +147,7 @@ td::Result<std::unique_ptr<DhtRemoteNode>> DhtRemoteNode::create(DhtNode node, t
TRY_STATUS_PREFIX(enc->check_signature(serialize_tl_object(tl, true).as_slice(), sig.as_slice()),
"bad node signature: ");
return std::make_unique<DhtRemoteNode>(std::move(node), max_missed_pings);
return std::make_unique<DhtRemoteNode>(std::move(node), max_missed_pings, our_network_id);
}
} // namespace dht

View file

@ -40,19 +40,18 @@ class DhtRemoteNode {
DhtNode node_;
td::uint32 max_missed_pings_;
td::int32 our_network_id_;
td::uint32 missed_pings_ = 0;
double last_ping_at_ = 0;
double ready_from_ = 0;
double failed_from_ = 0;
double ping_interval_;
td::int32 version_;
public:
DhtRemoteNode(DhtNode node, td::uint32 max_missed_pings)
: node_(std::move(node)), max_missed_pings_(max_missed_pings) {
failed_from_ = td::Time::now_cached();
id_ = node_.get_key();
}
static td::Result<std::unique_ptr<DhtRemoteNode>> create(DhtNode node, td::uint32 max_missed_pings);
DhtRemoteNode(DhtNode node, td::uint32 max_missed_pings, td::int32 our_network_id);
static td::Result<std::unique_ptr<DhtRemoteNode>> create(DhtNode node, td::uint32 max_missed_pings,
td::int32 our_network_id);
DhtNode get_node() const {
return node_.clone();
}
@ -76,9 +75,13 @@ class DhtRemoteNode {
double last_ping_at() const {
return last_ping_at_;
}
double ping_interval() const {
return ping_interval_;
}
void send_ping(bool client_only, td::actor::ActorId<adnl::Adnl> adnl, td::actor::ActorId<DhtMember> node,
adnl::AdnlNodeIdShort src);
td::Status receive_ping(DhtNode node, td::actor::ActorId<adnl::Adnl> adnl, adnl::AdnlNodeIdShort self_id);
void receive_ping();
td::Status update_value(DhtNode node, td::actor::ActorId<adnl::Adnl> adnl, adnl::AdnlNodeIdShort self_id);
};

View file

@ -209,6 +209,10 @@ td::Status DhtValue::check() const {
return key_.update_rule()->check_value(*this);
}
bool DhtValue::check_is_acceptable() const {
return key_.update_rule()->check_is_acceptable(*this);
}
DhtKeyId DhtValue::key_id() const {
return key_.key().compute_key_id();
}
@ -360,6 +364,21 @@ td::Status DhtUpdateRuleOverlayNodes::update_value(DhtValue &value, DhtValue &&n
return td::Status::OK();
}
bool DhtUpdateRuleOverlayNodes::check_is_acceptable(const ton::dht::DhtValue &value) {
auto F = fetch_tl_object<ton_api::overlay_nodes>(value.value().clone_as_buffer_slice(), true);
if (F.is_error()) {
return false;
}
auto L = F.move_as_ok();
auto now = td::Clocks::system();
for (auto &node : L->nodes_) {
if (node->version_ + 600 > now) {
return true;
}
}
return false;
}
tl_object_ptr<ton_api::dht_UpdateRule> DhtUpdateRuleOverlayNodes::tl() const {
return create_tl_object<ton_api::dht_updateRule_overlayNodes>();
}

View file

@ -119,6 +119,9 @@ class DhtUpdateRule {
virtual td::Status check_value(const DhtValue &value) = 0;
virtual td::Status update_value(DhtValue &value, DhtValue &&new_value) = 0;
virtual bool need_republish() const = 0;
virtual bool check_is_acceptable(const DhtValue &value) {
return true;
}
virtual tl_object_ptr<ton_api::dht_UpdateRule> tl() const = 0;
static td::Result<std::shared_ptr<DhtUpdateRule>> create(tl_object_ptr<ton_api::dht_UpdateRule> obj);
};
@ -210,6 +213,7 @@ class DhtValue {
void update_signature(td::BufferSlice signature);
void update_signature(td::SharedSlice signature);
td::Status check() const;
bool check_is_acceptable() const;
DhtKeyId key_id() const;
@ -249,6 +253,7 @@ class DhtUpdateRuleOverlayNodes : public DhtUpdateRule {
bool need_republish() const override {
return false;
}
bool check_is_acceptable(const DhtValue &value) override;
tl_object_ptr<ton_api::dht_UpdateRule> tl() const override;
static td::Result<std::shared_ptr<DhtUpdateRule>> create();
};

View file

@ -20,7 +20,6 @@
#include "td/utils/tl_storers.h"
#include "td/utils/crypto.h"
#include "td/utils/tl_parsers.h"
#include "td/utils/Random.h"
#include "td/utils/base64.h"
@ -28,9 +27,6 @@
#include "td/db/RocksDb.h"
#include "keys/encryptor.h"
#include "adnl/utils.hpp"
#include "auto/tl/ton_api.hpp"
#include "dht.h"
@ -44,10 +40,9 @@ namespace dht {
td::actor::ActorOwn<DhtMember> DhtMember::create(adnl::AdnlNodeIdShort id, std::string db_root,
td::actor::ActorId<keyring::Keyring> keyring,
td::actor::ActorId<adnl::Adnl> adnl, td::uint32 k, td::uint32 a,
bool client_only) {
return td::actor::ActorOwn<DhtMember>(
td::actor::create_actor<DhtMemberImpl>("dht", id, db_root, keyring, adnl, k, a, client_only));
td::actor::ActorId<adnl::Adnl> adnl, td::int32 network_id,
td::uint32 k, td::uint32 a, bool client_only) {
return td::actor::create_actor<DhtMemberImpl>("dht", id, db_root, keyring, adnl, network_id, k, a, client_only);
}
td::Result<td::actor::ActorOwn<Dht>> Dht::create(adnl::AdnlNodeIdShort id, std::string db_root,
@ -57,7 +52,7 @@ td::Result<td::actor::ActorOwn<Dht>> Dht::create(adnl::AdnlNodeIdShort id, std::
CHECK(conf->get_k() > 0);
CHECK(conf->get_a() > 0);
auto D = DhtMember::create(id, db_root, keyring, adnl, conf->get_k(), conf->get_a());
auto D = DhtMember::create(id, db_root, keyring, adnl, conf->get_network_id(), conf->get_k(), conf->get_a());
auto &nodes = conf->nodes();
for (auto &node : nodes.list()) {
@ -74,7 +69,7 @@ td::Result<td::actor::ActorOwn<Dht>> Dht::create_client(adnl::AdnlNodeIdShort id
CHECK(conf->get_k() > 0);
CHECK(conf->get_a() > 0);
auto D = DhtMember::create(id, db_root, keyring, adnl, conf->get_k(), conf->get_a(), true);
auto D = DhtMember::create(id, db_root, keyring, adnl, conf->get_network_id(), conf->get_k(), conf->get_a(), true);
auto &nodes = conf->nodes();
for (auto &node : nodes.list()) {
@ -90,8 +85,11 @@ void DhtMemberImpl::start_up() {
ton_api::dht_findValue::ID,
ton_api::dht_store::ID,
ton_api::dht_ping::ID,
ton_api::dht_registerReverseConnection::ID,
ton_api::dht_requestReversePing::ID,
ton_api::dht_query::ID,
ton_api::dht_message::ID};
ton_api::dht_message::ID,
ton_api::dht_requestReversePingCont::ID};
for (auto it : methods) {
td::actor::send_closure(adnl_, &adnl::Adnl::subscribe, id_, adnl::Adnl::int_to_bytestring(it),
@ -112,12 +110,12 @@ void DhtMemberImpl::start_up() {
V.ensure();
auto nodes = std::move(V.move_as_ok()->nodes_);
auto s = nodes->nodes_.size();
DhtNodesList list{std::move(nodes)};
CHECK(list.size() == s);
DhtNodesList list{std::move(nodes), network_id_};
CHECK(list.size() <= s); // Some nodes can be dropped due to a wrong network id
auto &B = buckets_[bit];
for (auto &node : list.list()) {
auto key = node.get_key();
B.add_full_node(key, std::move(node), adnl_, id_);
B.add_full_node(key, std::move(node), adnl_, id_, network_id_);
}
}
}
@ -131,8 +129,11 @@ void DhtMemberImpl::tear_down() {
ton_api::dht_findValue::ID,
ton_api::dht_store::ID,
ton_api::dht_ping::ID,
ton_api::dht_registerReverseConnection::ID,
ton_api::dht_requestReversePing::ID,
ton_api::dht_query::ID,
ton_api::dht_message::ID};
ton_api::dht_message::ID,
ton_api::dht_requestReversePingCont::ID};
for (auto it : methods) {
td::actor::send_closure(adnl_, &adnl::Adnl::unsubscribe, id_, adnl::Adnl::int_to_bytestring(it));
@ -299,6 +300,61 @@ void DhtMemberImpl::process_query(adnl::AdnlNodeIdShort src, ton_api::dht_getSig
get_self_node(std::move(P));
}
static td::BufferSlice register_reverse_connection_to_sign(adnl::AdnlNodeIdShort client, adnl::AdnlNodeIdShort dht_id,
td::uint32 ttl) {
td::BufferSlice result(32 + 32 + 4);
td::MutableSlice s = result.as_slice();
s.copy_from(client.as_slice());
s.remove_prefix(32);
s.copy_from(dht_id.as_slice());
s.remove_prefix(32);
s.copy_from(std::string(reinterpret_cast<char *>(&ttl), 4));
return result;
}
void DhtMemberImpl::process_query(adnl::AdnlNodeIdShort src, ton_api::dht_registerReverseConnection &query,
td::Promise<td::BufferSlice> promise) {
td::uint32 ttl = query.ttl_, now = (td::uint32)td::Clocks::system();
if (ttl <= now) {
return;
}
PublicKey pub{query.node_};
adnl::AdnlNodeIdShort client_id{pub.compute_short_id()};
td::BufferSlice to_sign = register_reverse_connection_to_sign(client_id, src, ttl);
TRY_RESULT_PROMISE(promise, encryptor, pub.create_encryptor());
TRY_STATUS_PROMISE(promise, encryptor->check_signature(to_sign, query.signature_));
DhtKeyId key_id = get_reverse_connection_key(client_id).compute_key_id();
reverse_connections_[client_id] = ReverseConnection{src, key_id, td::Timestamp::at_unix(std::min(ttl, now + 300))};
promise.set_value(create_serialize_tl_object<ton_api::dht_stored>());
}
void DhtMemberImpl::process_query(adnl::AdnlNodeIdShort src, ton_api::dht_requestReversePing &query,
td::Promise<td::BufferSlice> promise) {
adnl::AdnlNodeIdShort client{query.client_};
auto it = reverse_connections_.find(client);
if (it != reverse_connections_.end()) {
if (it->second.ttl_.is_in_past()) {
reverse_connections_.erase(it);
} else {
PublicKey pub{query.target_->id_};
TRY_RESULT_PROMISE(promise, encryptor, pub.create_encryptor());
TRY_STATUS_PROMISE(promise,
encryptor->check_signature(serialize_tl_object(query.target_, true), query.signature_));
td::actor::send_closure(adnl_, &adnl::Adnl::send_message, id_, it->second.dht_node_,
create_serialize_tl_object<ton_api::dht_requestReversePingCont>(
std::move(query.target_), std::move(query.signature_), query.client_));
promise.set_result(create_serialize_tl_object<ton_api::dht_reversePingOk>());
return;
}
}
auto k = static_cast<td::uint32>(query.k_);
if (k > max_k()) {
k = max_k();
}
auto R = get_nearest_nodes(get_reverse_connection_key(client).compute_key_id(), k);
promise.set_value(create_serialize_tl_object<ton_api::dht_clientNotFound>(R.tl()));
}
void DhtMemberImpl::receive_query(adnl::AdnlNodeIdShort src, td::BufferSlice data,
td::Promise<td::BufferSlice> promise) {
if (client_only_) {
@ -307,11 +363,15 @@ void DhtMemberImpl::receive_query(adnl::AdnlNodeIdShort src, td::BufferSlice dat
{
auto R = fetch_tl_prefix<ton_api::dht_query>(data, true);
if (R.is_ok()) {
auto N = DhtNode::create(std::move(R.move_as_ok()->node_));
auto N = DhtNode::create(std::move(R.move_as_ok()->node_), network_id_);
if (N.is_ok()) {
auto node = N.move_as_ok();
auto key = node.get_key();
add_full_node(key, std::move(node));
if (node.adnl_id().compute_short_id() == src) {
auto key = node.get_key();
add_full_node_impl(key, std::move(node), true);
} else {
VLOG(DHT_WARNING) << this << ": dropping bad node: unexpected adnl id";
}
} else {
VLOG(DHT_WARNING) << this << ": dropping bad node " << N.move_as_error();
}
@ -335,10 +395,10 @@ void DhtMemberImpl::receive_query(adnl::AdnlNodeIdShort src, td::BufferSlice dat
VLOG(DHT_EXTRA_DEBUG) << this << ": query to DHT from " << src << ": " << ton_api::to_string(Q);
ton_api::downcast_call(*Q.get(), [&](auto &object) { this->process_query(src, object, std::move(promise)); });
ton_api::downcast_call(*Q, [&](auto &object) { this->process_query(src, object, std::move(promise)); });
}
void DhtMemberImpl::add_full_node(DhtKeyId key, DhtNode node) {
void DhtMemberImpl::add_full_node_impl(DhtKeyId key, DhtNode node, bool set_active) {
VLOG(DHT_EXTRA_DEBUG) << this << ": adding full node " << key;
auto eid = key ^ key_;
@ -350,7 +410,7 @@ void DhtMemberImpl::add_full_node(DhtKeyId key, DhtNode node) {
#endif
if (bit < 256) {
CHECK(key.get_bit(bit) != key_.get_bit(bit));
buckets_[bit].add_full_node(key, std::move(node), adnl_, id_);
buckets_[bit].add_full_node(key, std::move(node), adnl_, id_, network_id_, set_active);
} else {
CHECK(key == key_);
}
@ -369,6 +429,27 @@ void DhtMemberImpl::receive_ping(DhtKeyId key, DhtNode result) {
}
void DhtMemberImpl::receive_message(adnl::AdnlNodeIdShort src, td::BufferSlice data) {
auto F = fetch_tl_object<ton_api::dht_requestReversePingCont>(data, true);
if (F.is_ok()) {
auto S = [&]() -> td::Status {
auto f = F.move_as_ok();
adnl::AdnlNodeIdShort client{f->client_};
if (!our_reverse_connections_.count(client)) {
return td::Status::Error(PSTRING() << ": unknown id for reverse ping: " << client);
}
TRY_RESULT_PREFIX(node, adnl::AdnlNode::create(f->target_), "failed to parse node: ");
TRY_RESULT_PREFIX(encryptor, node.pub_id().pubkey().create_encryptor(), "failed to create encryptor: ");
TRY_STATUS_PREFIX(encryptor->check_signature(serialize_tl_object(f->target_, true), f->signature_),
"invalid signature: ");
VLOG(DHT_INFO) << this << ": sending reverse ping to " << node.compute_short_id();
td::actor::send_closure(adnl_, &adnl::Adnl::add_peer, client, node.pub_id(), node.addr_list());
td::actor::send_closure(adnl_, &adnl::Adnl::send_message, client, node.compute_short_id(), td::BufferSlice());
return td::Status::OK();
}();
if (S.is_error()) {
VLOG(DHT_INFO) << this << ": " << S;
}
}
}
void DhtMemberImpl::set_value(DhtValue value, td::Promise<td::Unit> promise) {
@ -385,10 +466,11 @@ void DhtMemberImpl::set_value(DhtValue value, td::Promise<td::Unit> promise) {
void DhtMemberImpl::get_value_in(DhtKeyId key, td::Promise<DhtValue> result) {
auto P = td::PromiseCreator::lambda([key, promise = std::move(result), SelfId = actor_id(this), print_id = print_id(),
adnl = adnl_, list = get_nearest_nodes(key, k_), k = k_, a = a_, id = id_,
adnl = adnl_, list = get_nearest_nodes(key, k_), k = k_, a = a_,
network_id = network_id_, id = id_,
client_only = client_only_](td::Result<DhtNode> R) mutable {
R.ensure();
td::actor::create_actor<DhtQueryFindValue>("FindValueQuery", key, print_id, id, std::move(list), k, a,
td::actor::create_actor<DhtQueryFindValue>("FindValueQuery", key, print_id, id, std::move(list), k, a, network_id,
R.move_as_ok(), client_only, SelfId, adnl, std::move(promise))
.release();
});
@ -396,6 +478,70 @@ void DhtMemberImpl::get_value_in(DhtKeyId key, td::Promise<DhtValue> result) {
get_self_node(std::move(P));
}
void DhtMemberImpl::register_reverse_connection(adnl::AdnlNodeIdFull client, td::Promise<td::Unit> promise) {
auto client_short = client.compute_short_id();
td::uint32 ttl = (td::uint32)td::Clocks::system() + 300;
our_reverse_connections_.insert(client_short);
auto key_id = get_reverse_connection_key(client_short).compute_key_id();
td::actor::send_closure(keyring_, &keyring::Keyring::sign_message, client_short.pubkey_hash(),
register_reverse_connection_to_sign(client_short, id_, ttl),
[=, print_id = print_id(), list = get_nearest_nodes(key_id, k_), SelfId = actor_id(this),
promise = std::move(promise)](td::Result<td::BufferSlice> R) mutable {
TRY_RESULT_PROMISE_PREFIX(promise, signature, std::move(R), "Failed to sign: ");
td::actor::send_closure(SelfId, &DhtMemberImpl::get_self_node,
[=, list = std::move(list), signature = std::move(signature),
promise = std::move(promise)](td::Result<DhtNode> R) mutable {
R.ensure();
td::actor::create_actor<DhtQueryRegisterReverseConnection>(
"RegisterReverseQuery", key_id, std::move(client), ttl,
std::move(signature), print_id, id_, std::move(list), k_, a_,
network_id_, R.move_as_ok(), client_only_, SelfId, adnl_,
std::move(promise))
.release();
});
});
}
void DhtMemberImpl::request_reverse_ping(adnl::AdnlNode target, adnl::AdnlNodeIdShort client,
td::Promise<td::Unit> promise) {
auto pubkey_hash = target.compute_short_id().pubkey_hash();
td::BufferSlice to_sign = serialize_tl_object(target.tl(), true);
td::actor::send_closure(keyring_, &keyring::Keyring::sign_message, pubkey_hash, std::move(to_sign),
[SelfId = actor_id(this), promise = std::move(promise), target = std::move(target),
client](td::Result<td::BufferSlice> R) mutable {
TRY_RESULT_PROMISE(promise, signature, std::move(R));
td::actor::send_closure(SelfId, &DhtMemberImpl::request_reverse_ping_cont,
std::move(target), std::move(signature), client,
std::move(promise));
});
}
void DhtMemberImpl::request_reverse_ping_cont(adnl::AdnlNode target, td::BufferSlice signature,
adnl::AdnlNodeIdShort client, td::Promise<td::Unit> promise) {
auto it = reverse_connections_.find(client);
if (it != reverse_connections_.end()) {
if (it->second.ttl_.is_in_past()) {
reverse_connections_.erase(it);
} else {
td::actor::send_closure(adnl_, &adnl::Adnl::send_message, id_, it->second.dht_node_,
create_serialize_tl_object<ton_api::dht_requestReversePingCont>(
target.tl(), std::move(signature), client.bits256_value()));
promise.set_result(td::Unit());
return;
}
}
auto key_id = get_reverse_connection_key(client).compute_key_id();
get_self_node([=, target = std::move(target), signature = std::move(signature), promise = std::move(promise),
SelfId = actor_id(this), print_id = print_id(), list = get_nearest_nodes(key_id, k_),
client_only = client_only_](td::Result<DhtNode> R) mutable {
R.ensure();
td::actor::create_actor<DhtQueryRequestReversePing>(
"RequestReversePing", client, std::move(target), std::move(signature), print_id, id_, std::move(list), k_, a_,
network_id_, R.move_as_ok(), client_only, SelfId, adnl_, std::move(promise))
.release();
});
}
void DhtMemberImpl::check() {
VLOG(DHT_INFO) << this << ": ping=" << ping_queries_ << " fnode=" << find_node_queries_
<< " fvalue=" << find_value_queries_ << " store=" << store_queries_
@ -454,6 +600,16 @@ void DhtMemberImpl::check() {
}
}
}
if (reverse_connections_.size() > 0) {
auto it = reverse_connections_.upper_bound(last_check_reverse_conn_);
if (it == reverse_connections_.end()) {
it = reverse_connections_.begin();
}
last_check_reverse_conn_ = it->first;
if (it->second.ttl_.is_in_past()) {
reverse_connections_.erase(it);
}
}
if (republish_att_.is_in_past()) {
auto it = our_values_.lower_bound(last_republish_key_);
@ -496,9 +652,10 @@ void DhtMemberImpl::check() {
DhtKeyId key{x};
auto P = td::PromiseCreator::lambda([key, promise = std::move(promise), SelfId = actor_id(this),
print_id = print_id(), adnl = adnl_, list = get_nearest_nodes(key, k_), k = k_,
a = a_, id = id_, client_only = client_only_](td::Result<DhtNode> R) mutable {
a = a_, network_id = network_id_, id = id_,
client_only = client_only_](td::Result<DhtNode> R) mutable {
R.ensure();
td::actor::create_actor<DhtQueryFindNodes>("FindNodesQuery", key, print_id, id, std::move(list), k, a,
td::actor::create_actor<DhtQueryFindNodes>("FindNodesQuery", key, print_id, id, std::move(list), k, a, network_id,
R.move_as_ok(), client_only, SelfId, adnl, std::move(promise))
.release();
});
@ -519,63 +676,71 @@ void DhtMemberImpl::send_store(DhtValue value, td::Promise<td::Unit> promise) {
value.check().ensure();
auto key_id = value.key_id();
auto P =
td::PromiseCreator::lambda([value = std::move(value), print_id = print_id(), id = id_, client_only = client_only_,
list = get_nearest_nodes(key_id, k_), k = k_, a = a_, SelfId = actor_id(this),
adnl = adnl_, promise = std::move(promise)](td::Result<DhtNode> R) mutable {
R.ensure();
td::actor::create_actor<DhtQueryStore>("StoreQuery", std::move(value), print_id, id, std::move(list), k, a,
R.move_as_ok(), client_only, SelfId, adnl, std::move(promise))
.release();
});
auto P = td::PromiseCreator::lambda([value = std::move(value), print_id = print_id(), id = id_,
client_only = client_only_, list = get_nearest_nodes(key_id, k_), k = k_, a = a_,
network_id = network_id_, SelfId = actor_id(this), adnl = adnl_,
promise = std::move(promise)](td::Result<DhtNode> R) mutable {
R.ensure();
td::actor::create_actor<DhtQueryStore>("StoreQuery", std::move(value), print_id, id, std::move(list), k, a,
network_id, R.move_as_ok(), client_only, SelfId, adnl, std::move(promise))
.release();
});
get_self_node(std::move(P));
}
void DhtMemberImpl::get_self_node(td::Promise<DhtNode> promise) {
auto P =
td::PromiseCreator::lambda([promise = std::move(promise), print_id = print_id(), id = id_, keyring = keyring_,
client_only = client_only_](td::Result<adnl::AdnlNode> R) mutable {
R.ensure();
auto node = R.move_as_ok();
auto version = static_cast<td::int32>(td::Clocks::system());
auto B = create_serialize_tl_object<ton_api::dht_node>(node.pub_id().tl(), node.addr_list().tl(), version,
td::BufferSlice());
if (!client_only) {
CHECK(node.addr_list().size() > 0);
}
auto P = td::PromiseCreator::lambda(
[promise = std::move(promise), node = std::move(node), version](td::Result<td::BufferSlice> R) mutable {
R.ensure();
DhtNode n{node.pub_id(), node.addr_list(), version, R.move_as_ok()};
promise.set_result(std::move(n));
});
td::actor::send_closure(keyring, &keyring::Keyring::sign_message, id.pubkey_hash(), std::move(B), std::move(P));
});
auto P = td::PromiseCreator::lambda([promise = std::move(promise), print_id = print_id(), id = id_,
keyring = keyring_, client_only = client_only_,
network_id = network_id_](td::Result<adnl::AdnlNode> R) mutable {
R.ensure();
auto node = R.move_as_ok();
auto version = static_cast<td::int32>(td::Clocks::system());
td::BufferSlice B = serialize_tl_object(
DhtNode{node.pub_id(), node.addr_list(), version, network_id, td::BufferSlice{}}.tl(), true);
if (!client_only) {
CHECK(node.addr_list().size() > 0);
}
auto P = td::PromiseCreator::lambda([promise = std::move(promise), node = std::move(node), version,
network_id](td::Result<td::BufferSlice> R) mutable {
R.ensure();
DhtNode n{node.pub_id(), node.addr_list(), version, network_id, R.move_as_ok()};
promise.set_result(std::move(n));
});
td::actor::send_closure(keyring, &keyring::Keyring::sign_message, id.pubkey_hash(), std::move(B), std::move(P));
});
td::actor::send_closure(adnl_, &adnl::Adnl::get_self_node, id_, std::move(P));
}
td::Result<std::shared_ptr<DhtGlobalConfig>> Dht::create_global_config(tl_object_ptr<ton_api::dht_config_global> conf) {
td::uint32 k;
if (conf->k_ == 0) {
td::Result<std::shared_ptr<DhtGlobalConfig>> Dht::create_global_config(tl_object_ptr<ton_api::dht_config_Global> conf) {
td::uint32 k = 0, a = 0;
td::int32 network_id = -1;
tl_object_ptr<ton_api::dht_nodes> static_nodes;
ton_api::downcast_call(*conf, td::overloaded(
[&](ton_api::dht_config_global &f) {
k = f.k_;
a = f.a_;
network_id = -1;
static_nodes = std::move(f.static_nodes_);
},
[&](ton_api::dht_config_global_v2 &f) {
k = f.k_;
a = f.a_;
network_id = f.network_id_;
static_nodes = std::move(f.static_nodes_);
}));
if (k == 0) {
k = DhtMember::default_k();
} else if (conf->k_ > 0 && static_cast<td::uint32>(conf->k_) <= DhtMember::max_k()) {
k = conf->k_;
} else {
return td::Status::Error(ErrorCode::protoviolation, PSTRING() << "bad value k=" << conf->k_);
} else if (k > DhtMember::max_k()) {
return td::Status::Error(ErrorCode::protoviolation, PSTRING() << "bad value k=" << k);
}
td::uint32 a;
if (conf->a_ == 0) {
if (a == 0) {
a = DhtMember::default_a();
} else if (conf->a_ > 0 && static_cast<td::uint32>(conf->a_) <= DhtMember::max_a()) {
a = conf->a_;
} else {
return td::Status::Error(ErrorCode::protoviolation, PSTRING() << "bad value a=" << conf->a_);
} else if (a > DhtMember::max_a()) {
return td::Status::Error(ErrorCode::protoviolation, PSTRING() << "bad value a=" << a);
}
DhtNodesList l{std::move(conf->static_nodes_)};
return std::make_shared<DhtGlobalConfig>(k, a, std::move(l));
DhtNodesList l{std::move(static_nodes), network_id};
return std::make_shared<DhtGlobalConfig>(k, a, network_id, std::move(l));
}
} // namespace dht

View file

@ -47,13 +47,17 @@ class Dht : public td::actor::Actor {
td::actor::ActorId<keyring::Keyring> keyring,
td::actor::ActorId<adnl::Adnl> adnl);
static td::Result<std::shared_ptr<DhtGlobalConfig>> create_global_config(
tl_object_ptr<ton_api::dht_config_global> conf);
tl_object_ptr<ton_api::dht_config_Global> conf);
virtual adnl::AdnlNodeIdShort get_id() const = 0;
virtual void set_value(DhtValue key_value, td::Promise<td::Unit> result) = 0;
virtual void get_value(DhtKey key, td::Promise<DhtValue> result) = 0;
virtual void register_reverse_connection(adnl::AdnlNodeIdFull client, td::Promise<td::Unit> promise) = 0;
virtual void request_reverse_ping(adnl::AdnlNode target, adnl::AdnlNodeIdShort client,
td::Promise<td::Unit> promise) = 0;
virtual void dump(td::StringBuilder &sb) const = 0;
virtual ~Dht() = default;

View file

@ -52,15 +52,20 @@ class DhtGlobalConfig {
auto get_a() const {
return a_;
}
auto get_network_id() const {
return network_id_;
}
const auto &nodes() const {
return static_nodes_;
}
DhtGlobalConfig(td::uint32 k, td::uint32 a, DhtNodesList nodes) : k_(k), a_(a), static_nodes_(std::move(nodes)) {
DhtGlobalConfig(td::uint32 k, td::uint32 a, td::int32 network_id, DhtNodesList nodes)
: k_(k), a_(a), network_id_(network_id), static_nodes_(std::move(nodes)) {
}
private:
td::uint32 k_;
td::uint32 a_;
td::int32 network_id_;
DhtNodesList static_nodes_;
};
@ -85,8 +90,8 @@ class DhtMember : public Dht {
static td::actor::ActorOwn<DhtMember> create(adnl::AdnlNodeIdShort id, std::string db_root,
td::actor::ActorId<keyring::Keyring> keyring,
td::actor::ActorId<adnl::Adnl> adnl, td::uint32 k = 10, td::uint32 a = 3,
bool client_only = false);
td::actor::ActorId<adnl::Adnl> adnl, td::int32 network_id,
td::uint32 k = 10, td::uint32 a = 3, bool client_only = false);
//virtual void update_addr_list(tl_object_ptr<ton_api::adnl_addressList> addr_list) = 0;
//virtual void add_node(adnl::AdnlNodeIdShort id) = 0;
@ -101,6 +106,10 @@ class DhtMember : public Dht {
virtual void get_self_node(td::Promise<DhtNode> promise) = 0;
virtual PrintId print_id() const = 0;
static DhtKey get_reverse_connection_key(adnl::AdnlNodeIdShort node) {
return DhtKey{node.pubkey_hash(), "address", 0};
}
};
inline td::StringBuilder &operator<<(td::StringBuilder &sb, const DhtMember::PrintId &id) {

View file

@ -1,303 +0,0 @@
/*
This file is part of TON Blockchain source code.
TON Blockchain is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
TON Blockchain is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with TON Blockchain. If not, see <http://www.gnu.org/licenses/>.
In addition, as a special exception, the copyright holders give permission
to link the code of portions of this program with the OpenSSL library.
You must obey the GNU General Public License in all respects for all
of the code used other than OpenSSL. If you modify file(s) with this
exception, you may extend this exception to your version of the file(s),
but you are not obligated to do so. If you do not wish to do so, delete this
exception statement from your version. If you delete this exception statement
from all source files in the program, then also delete it here.
Copyright 2017-2020 Telegram Systems LLP
*/
#include "adnl/adnl-network-manager.h"
#include "adnl/adnl-peer-table.h"
#include "adnl/utils.hpp"
#include "keys/encryptor.h"
#include "td/utils/Time.h"
#include "td/utils/format.h"
#include "td/utils/OptionParser.h"
#include "td/utils/filesystem.h"
#include "dht/dht.h"
#include "auto/tl/ton_api_json.h"
#include <iostream>
#include <sstream>
template <std::size_t size>
std::ostream &operator<<(std::ostream &stream, const td::UInt<size> &x) {
for (size_t i = 0; i < size / 8; i++) {
stream << td::format::hex_digit((x.raw[i] >> 4) & 15) << td::format::hex_digit(x.raw[i] & 15);
}
return stream;
}
class adnl::AdnlNode : public td::actor::Actor {
private:
std::vector<td::UInt256> ping_ids_;
td::actor::ActorOwn<ton::adnl::AdnlNetworkManager> network_manager_;
td::actor::ActorOwn<ton::adnl::AdnlPeerTable> peer_table_;
td::actor::ActorOwn<ton::DhtNode> dht_node_;
td::UInt256 local_id_;
bool local_id_set_ = false;
std::string host_ = "127.0.0.1";
td::uint32 ip_ = 0x7f000001;
td::uint16 port_ = 2380;
std::string local_config_ = "ton-local.config";
std::string global_config_ = "ton-global.config";
void receive_message(td::UInt256 src, td::UInt256 dst, td::BufferSlice data) {
std::cout << "MESSAGE FROM " << src << " to " << dst << " of size " << std::to_string(data.size()) << "\n";
}
void receive_query(td::UInt256 src, td::UInt256 dst, td::uint64 query_id, td::BufferSlice data) {
std::cout << "QUERY " << std::to_string(query_id) << " FROM " << src << " to " << dst << " of size "
<< std::to_string(data.size()) << "\n";
td::actor::send_closure(peer_table_, &ton::adnl::AdnlPeerTable::answer_query, dst, src, query_id,
ton::create_tl_object<ton::ton_api::testObject>());
}
std::unique_ptr<ton::adnl::AdnlPeerTable::Callback> make_callback() {
class Callback : public ton::adnl::AdnlPeerTable::Callback {
public:
void receive_message(td::UInt256 src, td::UInt256 dst, td::BufferSlice data) override {
td::actor::send_closure(id_, &adnl::AdnlNode::receive_message, src, dst, std::move(data));
}
void receive_query(td::UInt256 src, td::UInt256 dst, td::uint64 query_id, td::BufferSlice data) override {
td::actor::send_closure(id_, &adnl::AdnlNode::receive_query, src, dst, query_id, std::move(data));
}
Callback(td::actor::ActorId<adnl::AdnlNode> id) : id_(std::move(id)) {
}
private:
td::actor::ActorId<adnl::AdnlNode> id_;
};
return std::make_unique<Callback>(td::actor::actor_id(this));
}
public:
void set_local_config(std::string str) {
local_config_ = str;
}
void set_global_config(std::string str) {
global_config_ = str;
}
void start_up() override {
alarm_timestamp() = td::Timestamp::in(1);
}
adnl::AdnlNode() {
network_manager_ = ton::adnl::AdnlNetworkManager::create();
peer_table_ = ton::adnl::AdnlPeerTable::create();
td::actor::send_closure(network_manager_, &ton::adnl::AdnlNetworkManager::register_peer_table, peer_table_.get());
td::actor::send_closure(peer_table_, &ton::adnl::AdnlPeerTable::register_network_manager, network_manager_.get());
}
void listen_udp(td::uint16 port) {
td::actor::send_closure(network_manager_, &ton::adnl::AdnlNetworkManager::add_listening_udp_port, "0.0.0.0", port);
port_ = port;
}
void run() {
auto L = td::read_file(local_config_);
if (L.is_error()) {
LOG(FATAL) << "can not read local config: " << L.move_as_error();
}
auto L2 = td::json_decode(L.move_as_ok().as_slice());
if (L2.is_error()) {
LOG(FATAL) << "can not parse local config: " << L2.move_as_error();
}
auto lc_j = L2.move_as_ok();
if (lc_j.type() != td::JsonValue::Type::Object) {
LOG(FATAL) << "can not parse local config: expected json object";
}
ton::ton_api::config_local lc;
auto rl = ton::ton_api::from_json(lc, lc_j.get_object());
if (rl.is_error()) {
LOG(FATAL) << "can not interpret local config: " << rl.move_as_error();
}
auto G = td::read_file(global_config_);
if (G.is_error()) {
LOG(FATAL) << "can not read global config: " << G.move_as_error();
}
auto G2 = td::json_decode(G.move_as_ok().as_slice());
if (G2.is_error()) {
LOG(FATAL) << "can not parse global config: " << G2.move_as_error();
}
auto gc_j = G2.move_as_ok();
if (gc_j.type() != td::JsonValue::Type::Object) {
LOG(FATAL) << "can not parse global config: expected json object";
}
ton::ton_api::config_global gc;
auto rg = ton::ton_api::from_json(gc, gc_j.get_object());
if (rg.is_error()) {
LOG(FATAL) << "can not interpret local config: " << rg.move_as_error();
}
if (gc.adnl_) {
auto it = gc.adnl_->static_nodes_.begin();
while (it != gc.adnl_->static_nodes_.end()) {
auto R = ton::adnl_validate_full_id(std::move((*it)->id_));
if (R.is_error()) {
LOG(FATAL) << "can not apply global config: " << R.move_as_error();
}
auto R2 = ton::adnl_validate_addr_list(std::move((*it)->addr_list_));
if (R2.is_error()) {
LOG(FATAL) << "can not apply global config: " << R2.move_as_error();
}
td::actor::send_closure(peer_table_, &ton::adnl::AdnlPeerTable::add_peer, R.move_as_ok(), R2.move_as_ok());
it++;
}
}
if (!gc.dht_) {
LOG(FATAL) << "global config does not contain dht section";
}
if (lc.dht_.size() != 1) {
LOG(FATAL) << "local config must contain exactly one dht section";
}
auto R = ton::DhtNode::create_from_json(std::move(gc.dht_), std::move(lc.dht_[0]), peer_table_.get());
if (R.is_error()) {
LOG(FATAL) << "fail creating dht node: " << R.move_as_error();
}
dht_node_ = R.move_as_ok();
}
/*
void set_host(td::IPAddress ip, std::string host) {
ip_ = ip.get_ipv4();
host_ = host;
}
void send_pings_to(td::UInt256 id) {
std::cout << "send pings to " << id << "\n";
ping_ids_.push_back(id);
}
void add_local_id(ton::tl_object_ptr<ton::ton_api::adnl_id_Pk> pk_) {
auto pub_ = ton::get_public_key(pk_);
local_id_ = ton::adnl_short_id(pub_);
std::cout << "local_id = '" << local_id_ << "'\n";
auto x = ton::create_tl_object<ton::ton_api::adnl_address_udp>(ip_, port_);
auto v = std::vector<ton::tl_object_ptr<ton::ton_api::adnl_Address>>();
v.push_back(ton::move_tl_object_as<ton::ton_api::adnl_Address>(x));
auto y =
ton::create_tl_object<ton::ton_api::adnl_addressList>(std::move(v), static_cast<td::int32>(td::Time::now()));
LOG(INFO) << "local_addr_list: " << ton::ton_api::to_string(y);
td::actor::send_closure(peer_table_, &ton::adnl::AdnlPeerTable::add_id, ton::clone_tl_object(pk_),
ton::clone_tl_object(y));
td::actor::send_closure(peer_table_, &ton::adnl::AdnlPeerTable::subscribe_custom, local_id_, "TEST", make_callback());
local_id_set_ = true;
dht_node_ = ton::DhtNode::create(std::move(pk_), peer_table_.get());
td::actor::send_closure(dht_node_, &ton::DhtNode::update_addr_list, std::move(y));
}
void add_static_dht_node(ton::tl_object_ptr<ton::ton_api::adnl_id_Full> id,
ton::tl_object_ptr<ton::ton_api::adnl_addressList> addr_list,
td::BufferSlice signature) {
auto Id = ton::adnl_short_id(id);
td::actor::send_closure(
dht_node_, &ton::DhtNode::add_full_node, Id,
ton::create_tl_object<ton::ton_api::dht_node>(std::move(id), std::move(addr_list), signature.as_slice().str()));
}
void add_foreign(ton::tl_object_ptr<ton::ton_api::adnl_id_Full> id,
ton::tl_object_ptr<ton::ton_api::adnl_addressList> addr_list) {
std::cout << ton::adnl_short_id(id) << "\n";
td::actor::send_closure(peer_table_, &ton::adnl::AdnlPeerTable::add_peer, std::move(id), std::move(addr_list));
}
void alarm() override {
std::cout << "alarm\n";
if (local_id_set_) {
for (auto it = ping_ids_.begin(); it != ping_ids_.end(); it++) {
auto P = td::PromiseCreator::lambda([](td::Result<ton::tl_object_ptr<ton::ton_api::adnl_Message>> result) {
if (result.is_error()) {
std::cout << "received error " << result.move_as_error().to_string() << "\n";
} else {
auto message = result.move_as_ok();
std::cout << "received answer to query\n";
}
});
td::actor::send_closure(peer_table_, &ton::adnl::AdnlPeerTable::send_query, local_id_, *it, std::move(P),
td::Timestamp::in(5),
ton::move_tl_object_as<ton::ton_api::adnl_Message>(
ton::create_tl_object<ton::ton_api::adnl_message_custom>("TEST")));
}
}
}
*/
};
td::Result<td::UInt256> get_uint256(std::string str) {
if (str.size() != 64) {
return td::Status::Error("uint256 must have 64 bytes");
}
td::UInt256 res;
for (size_t i = 0; i < 32; i++) {
res.raw[i] = static_cast<td::uint8>(td::hex_to_int(str[2 * i]) * 16 + td::hex_to_int(str[2 * i + 1]));
}
return res;
}
int main(int argc, char *argv[]) {
td::actor::ActorOwn<adnl::AdnlNode> x;
td::OptionParser p;
p.set_description("test basic adnl functionality");
p.add_option('h', "help", "prints_help", [&]() {
char b[10240];
td::StringBuilder sb({b, 10000});
sb << p;
std::cout << sb.as_cslice().c_str();
std::exit(2);
return td::Status::OK();
});
p.add_option('p', "port", "sets udp port", [&](td::Slice port) {
td::actor::send_closure(x, &adnl::AdnlNode::listen_udp, static_cast<td::uint16>(std::stoi(port.str())));
return td::Status::OK();
});
p.add_option('C', "global-config", "file to read global config", [&](td::Slice fname) {
td::actor::send_closure(x, &adnl::AdnlNode::set_global_config, fname.str());
return td::Status::OK();
});
p.add_option('c', "local-config", "file to read local config", [&](td::Slice fname) {
td::actor::send_closure(x, &adnl::AdnlNode::set_local_config, fname.str());
return td::Status::OK();
});
td::actor::Scheduler scheduler({2});
scheduler.run_in_context([&] {
x = td::actor::create_actor<adnl::AdnlNode>(td::actor::ActorInfoCreator::Options().with_name("A").with_poll());
});
scheduler.run_in_context([&] { p.run(argc, argv).ensure(); });
scheduler.run_in_context([&] { td::actor::send_closure(x, &adnl::AdnlNode::run); });
scheduler.run();
return 0;
}

View file

@ -0,0 +1,214 @@
/*
This file is part of TON Blockchain source code.
TON Blockchain is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
TON Blockchain is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with TON Blockchain. If not, see <http://www.gnu.org/licenses/>.
In addition, as a special exception, the copyright holders give permission
to link the code of portions of this program with the OpenSSL library.
You must obey the GNU General Public License in all respects for all
of the code used other than OpenSSL. If you modify file(s) with this
exception, you may extend this exception to your version of the file(s),
but you are not obligated to do so. If you do not wish to do so, delete this
exception statement from your version. If you delete this exception statement
from all source files in the program, then also delete it here.
Copyright 2017-2020 Telegram Systems LLP
*/
#include "adnl/adnl-network-manager.h"
#include "adnl/adnl.h"
#include "adnl/utils.hpp"
#include "keys/encryptor.h"
#include "td/utils/Time.h"
#include "td/utils/format.h"
#include "td/utils/OptionParser.h"
#include "td/utils/filesystem.h"
#include "dht/dht.hpp"
#include "auto/tl/ton_api_json.h"
#include "common/delay.h"
#include "td/utils/Random.h"
#include "terminal/terminal.h"
#include <iostream>
class AdnlNode : public td::actor::Actor {
private:
td::actor::ActorOwn<ton::adnl::AdnlNetworkManager> network_manager_;
td::actor::ActorOwn<ton::adnl::Adnl> adnl_;
td::actor::ActorOwn<ton::keyring::Keyring> keyring_;
ton::adnl::AdnlNodeIdShort local_id_;
std::string host_ = "127.0.0.1";
td::uint16 port_ = 2380;
std::string global_config_ = "ton-global.config";
struct NodeInfo {
ton::adnl::AdnlNodeIdShort id;
td::uint32 sent = 0, received = 0;
double sum_time = 0.0;
explicit NodeInfo(ton::adnl::AdnlNodeIdShort id) : id(id) {
}
};
std::vector<NodeInfo> nodes_;
td::uint32 pings_remaining_ = 4;
td::uint32 pending_ = 1;
public:
void set_global_config(std::string str) {
global_config_ = str;
}
void listen_udp(td::uint16 port) {
port_ = port;
}
AdnlNode() {
}
void run() {
network_manager_ = ton::adnl::AdnlNetworkManager::create(port_);
keyring_ = ton::keyring::Keyring::create("");
adnl_ = ton::adnl::Adnl::create("", keyring_.get());
td::actor::send_closure(adnl_, &ton::adnl::Adnl::register_network_manager, network_manager_.get());
td::IPAddress addr;
addr.init_host_port(host_, port_).ensure();
ton::adnl::AdnlCategoryMask mask;
mask[0] = true;
td::actor::send_closure(network_manager_, &ton::adnl::AdnlNetworkManager::add_self_addr, addr, mask, 0);
auto pk = ton::privkeys::Ed25519::random();
td::actor::send_closure(keyring_, &ton::keyring::Keyring::add_key, pk, true, [](td::Result<td::Unit>) {});
ton::adnl::AdnlNodeIdFull local_id_full(pk.pub());
ton::adnl::AdnlAddressList addr_list;
addr_list.set_version(static_cast<td::int32>(td::Clocks::system()));
addr_list.set_reinit_date(ton::adnl::Adnl::adnl_start_time());
td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_id, local_id_full, std::move(addr_list), (td::uint8)0);
local_id_ = local_id_full.compute_short_id();
auto r_dht = get_dht_config();
if (r_dht.is_error()) {
LOG(FATAL) << "Cannot get dht config: " << r_dht.move_as_error();
}
auto dht = r_dht.move_as_ok();
ton::adnl::AdnlNodesList static_nodes;
for (const auto &node : dht->nodes().list()) {
LOG(INFO) << "Node #" << nodes_.size() << " : " << node.adnl_id().compute_short_id();
nodes_.emplace_back(node.adnl_id().compute_short_id());
static_nodes.push(ton::adnl::AdnlNode(node.adnl_id(), node.addr_list()));
}
td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_static_nodes_from_config, std::move(static_nodes));
ton::delay_action([SelfId = actor_id(this)]() { td::actor::send_closure(SelfId, &AdnlNode::send_pings); },
td::Timestamp::in(1.0));
}
td::Result<std::shared_ptr<ton::dht::DhtGlobalConfig>> get_dht_config() {
TRY_RESULT_PREFIX(conf_data, td::read_file(global_config_), "failed to read: ");
TRY_RESULT_PREFIX(conf_json, td::json_decode(conf_data.as_slice()), "failed to parse json: ");
ton::ton_api::config_global conf;
TRY_STATUS_PREFIX(ton::ton_api::from_json(conf, conf_json.get_object()), "json does not fit TL scheme: ");
if (!conf.dht_) {
return td::Status::Error(ton::ErrorCode::error, "does not contain [dht] section");
}
TRY_RESULT_PREFIX(dht, ton::dht::Dht::create_global_config(std::move(conf.dht_)), "bad [dht] section: ");
return std::move(dht);
}
void send_pings() {
CHECK(pings_remaining_);
--pings_remaining_;
for (size_t i = 0; i < nodes_.size(); ++i) {
auto id = nodes_[i].id;
LOG(INFO) << "Sending ping to " << id;
++pending_;
td::actor::send_closure(
adnl_, &ton::adnl::Adnl::send_query, local_id_, id, "ping",
[SelfId = actor_id(this), i, timer = td::Timer()](td::Result<td::BufferSlice> R) {
td::actor::send_closure(SelfId, &AdnlNode::on_pong, i, timer.elapsed(), R.is_ok());
}, td::Timestamp::in(5.0),
ton::create_serialize_tl_object<ton::ton_api::dht_ping>(td::Random::fast_uint64()));
}
if (pings_remaining_ == 0) {
--pending_;
try_finish();
} else {
ton::delay_action([SelfId = actor_id(this)]() { td::actor::send_closure(SelfId, &AdnlNode::send_pings); },
td::Timestamp::in(1.0));
}
}
void on_pong(size_t i, double time, bool success) {
auto &node = nodes_[i];
++node.sent;
if (success) {
++node.received;
node.sum_time += time;
LOG(INFO) << "Pong from " << node.id << " in " << time << "s";
} else {
LOG(INFO) << "Pong from " << node.id << " : timeout";
}
--pending_;
try_finish();
}
void try_finish() {
if (pending_) {
return;
}
td::TerminalIO::out() << "Pinged " << nodes_.size() << " nodes:\n";
for (const auto& node : nodes_) {
td::TerminalIO::out() << node.id << " : " << node.received << "/" << node.sent;
if (node.received > 0) {
td::TerminalIO::out() << " (avg. time = " << node.sum_time / node.received << ")";
}
td::TerminalIO::out() << "\n";
}
std::exit(0);
}
};
int main(int argc, char *argv[]) {
td::actor::ActorOwn<AdnlNode> x;
td::OptionParser p;
p.set_description("ping dht servers from config");
p.add_option('h', "help", "print help", [&]() {
char b[10240];
td::StringBuilder sb(td::MutableSlice{b, 10000});
sb << p;
std::cout << sb.as_cslice().c_str();
std::exit(2);
});
p.add_option('p', "port", "set udp port", [&](td::Slice port) {
td::actor::send_closure(x, &AdnlNode::listen_udp, static_cast<td::uint16>(std::stoi(port.str())));
});
p.add_option('C', "global-config", "file to read global config from",
[&](td::Slice fname) { td::actor::send_closure(x, &AdnlNode::set_global_config, fname.str()); });
p.add_option('v', "verbosity", "set verbosity", [&](td::Slice arg) {
int v = VERBOSITY_NAME(FATAL) + (td::to_integer<int>(arg));
SET_VERBOSITY_LEVEL(v);
});
td::actor::Scheduler scheduler({2});
scheduler.run_in_context([&] { x = td::actor::create_actor<AdnlNode>("AdnlNode"); });
scheduler.run_in_context([&] { p.run(argc, argv).ensure(); });
scheduler.run_in_context([&] { td::actor::send_closure(x, &AdnlNode::run); });
scheduler.run();
return 0;
}

219
dht/utils/dht-resolve.cpp Normal file
View file

@ -0,0 +1,219 @@
/*
This file is part of TON Blockchain source code.
TON Blockchain is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
TON Blockchain is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with TON Blockchain. If not, see <http://www.gnu.org/licenses/>.
In addition, as a special exception, the copyright holders give permission
to link the code of portions of this program with the OpenSSL library.
You must obey the GNU General Public License in all respects for all
of the code used other than OpenSSL. If you modify file(s) with this
exception, you may extend this exception to your version of the file(s),
but you are not obligated to do so. If you do not wish to do so, delete this
exception statement from your version. If you delete this exception statement
from all source files in the program, then also delete it here.
Copyright 2017-2020 Telegram Systems LLP
*/
#include "adnl/adnl-network-manager.h"
#include "adnl/adnl.h"
#include "adnl/utils.hpp"
#include "keys/encryptor.h"
#include "td/utils/Time.h"
#include "td/utils/format.h"
#include "td/utils/OptionParser.h"
#include "td/utils/filesystem.h"
#include "dht/dht.hpp"
#include "auto/tl/ton_api_json.h"
#include "common/delay.h"
#include "td/utils/Random.h"
#include "terminal/terminal.h"
#include "common/util.h"
#include <iostream>
class Resolver : public td::actor::Actor {
private:
td::actor::ActorOwn<ton::adnl::AdnlNetworkManager> network_manager_;
td::actor::ActorOwn<ton::adnl::Adnl> adnl_;
td::actor::ActorOwn<ton::keyring::Keyring> keyring_;
ton::adnl::AdnlNodeIdShort local_id_;
td::actor::ActorOwn<ton::dht::Dht> dht_;
std::string global_config_;
int server_idx_;
std::string host_ = "127.0.0.1";
td::uint16 port_;
ton::dht::DhtKey key_;
double timeout_;
public:
Resolver(std::string global_config, int server_idx, td::uint16 port, ton::dht::DhtKey key, double timeout)
: global_config_(global_config), server_idx_(server_idx), port_(port), key_(std::move(key)), timeout_(timeout) {
}
void run() {
network_manager_ = ton::adnl::AdnlNetworkManager::create(port_);
keyring_ = ton::keyring::Keyring::create("");
adnl_ = ton::adnl::Adnl::create("", keyring_.get());
td::actor::send_closure(adnl_, &ton::adnl::Adnl::register_network_manager, network_manager_.get());
td::IPAddress addr;
addr.init_host_port(host_, port_).ensure();
ton::adnl::AdnlCategoryMask mask;
mask[0] = true;
td::actor::send_closure(network_manager_, &ton::adnl::AdnlNetworkManager::add_self_addr, addr, mask, 0);
auto pk = ton::privkeys::Ed25519::random();
td::actor::send_closure(keyring_, &ton::keyring::Keyring::add_key, pk, true, [](td::Result<td::Unit>) {});
ton::adnl::AdnlNodeIdFull local_id_full(pk.pub());
ton::adnl::AdnlAddressList addr_list;
addr_list.set_version(static_cast<td::int32>(td::Clocks::system()));
addr_list.set_reinit_date(ton::adnl::Adnl::adnl_start_time());
td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_id, local_id_full, std::move(addr_list), (td::uint8)0);
local_id_ = local_id_full.compute_short_id();
auto dht_config = get_dht_config();
if (dht_config.is_error()) {
LOG(FATAL) << "Failed to load dht config: " << dht_config.move_as_error();
}
auto D = ton::dht::Dht::create_client(local_id_, "", dht_config.move_as_ok(), keyring_.get(), adnl_.get());
if (D.is_error()) {
LOG(FATAL) << "Failed to init dht client: " << D.move_as_error();
}
dht_ = D.move_as_ok();
LOG(INFO) << "Get value " << key_.public_key_hash() << " " << key_.name() << " " << key_.idx();
send_query();
alarm_timestamp() = td::Timestamp::in(timeout_);
}
void send_query() {
td::actor::send_closure(dht_, &ton::dht::Dht::get_value, key_,
[SelfId = actor_id(this)](td::Result<ton::dht::DhtValue> R) {
td::actor::send_closure(SelfId, &Resolver::got_result, std::move(R));
});
}
void got_result(td::Result<ton::dht::DhtValue> R) {
if (R.is_error()) {
LOG(WARNING) << "Failed to get value, retrying: " << R.move_as_error();
ton::delay_action([SelfId = actor_id(this)]() { td::actor::send_closure(SelfId, &Resolver::send_query); },
td::Timestamp::in(0.25));
return;
}
auto r = R.move_as_ok();
LOG(INFO) << "Got result";
td::TerminalIO::out() << "KEY: " << td::base64_encode(ton::serialize_tl_object(r.key().public_key().tl(), true))
<< "\n";
td::TerminalIO::out() << "VALUE: " << td::base64_encode(r.value().as_slice()) << "\n";
std::exit(0);
}
void alarm() override {
LOG(FATAL) << "Failed to get value: timeout";
}
td::Result<std::shared_ptr<ton::dht::DhtGlobalConfig>> get_dht_config() {
TRY_RESULT_PREFIX(conf_data, td::read_file(global_config_), "failed to read: ");
TRY_RESULT_PREFIX(conf_json, td::json_decode(conf_data.as_slice()), "failed to parse json: ");
ton::ton_api::config_global conf;
TRY_STATUS_PREFIX(ton::ton_api::from_json(conf, conf_json.get_object()), "json does not fit TL scheme: ");
if (!conf.dht_) {
return td::Status::Error(ton::ErrorCode::error, "does not contain [dht] section");
}
ton::ton_api::dht_nodes* static_nodes = nullptr;
ton::ton_api::downcast_call(*conf.dht_, [&](auto &f) { static_nodes = f.static_nodes_.get(); });
auto &nodes = static_nodes->nodes_;
if (server_idx_ >= 0) {
CHECK(server_idx_ < (int)nodes.size());
LOG(INFO) << "Using server #" << server_idx_;
std::swap(nodes[0], nodes[server_idx_]);
nodes.resize(1);
} else {
LOG(INFO) << "Using all " << nodes.size() << " servers";
}
TRY_RESULT_PREFIX(dht, ton::dht::Dht::create_global_config(std::move(conf.dht_)), "bad [dht] section: ");
return std::move(dht);
}
};
td::Result<td::Bits256> parse_bits256(td::Slice s) {
td::BufferSlice str = td::base64_decode(s, true);
if (str.size() != 32) {
return td::Status::Error("Invalid bits256");
}
return td::Bits256(td::BitPtr((unsigned char *)str.data()));
}
int main(int argc, char *argv[]) {
td::actor::ActorOwn<Resolver> x;
td::optional<std::string> global_config;
int server_idx = -1;
td::uint16 port = 2380;
td::optional<td::Bits256> key_id;
td::optional<std::string> key_name;
td::uint32 key_idx = 0;
double timeout = 5.0;
td::OptionParser p;
p.set_description("find value in dht by the given key (key-id, key-name, ket-idx)");
p.add_option('h', "help", "print help", [&]() {
char b[10240];
td::StringBuilder sb(td::MutableSlice{b, 10000});
sb << p;
std::cout << sb.as_cslice().c_str();
std::exit(2);
});
p.add_option('C', "global-config", "global config", [&](td::Slice arg) { global_config = arg.str(); });
p.add_checked_option('s', "server-idx", "index of dht server from global config (default: all)", [&](td::Slice arg) {
TRY_RESULT_ASSIGN(server_idx, td::to_integer_safe<int>(arg));
return td::Status::OK();
});
p.add_checked_option('p', "port", "set udp port", [&](td::Slice arg) {
TRY_RESULT_ASSIGN(port, td::to_integer_safe<td::uint16>(arg));
return td::Status::OK();
});
p.add_option('v', "verbosity", "set verbosity", [&](td::Slice arg) {
int v = VERBOSITY_NAME(FATAL) + (td::to_integer<int>(arg));
SET_VERBOSITY_LEVEL(v);
});
p.add_checked_option('k', "key-id", "set key id (256-bit, base64)", [&](td::Slice arg) {
TRY_RESULT_ASSIGN(key_id, parse_bits256(arg));
return td::Status::OK();
});
p.add_option('n', "key-name", "set key name", [&](td::Slice arg) { key_name = arg.str(); });
p.add_checked_option('i', "key-idx", "set key idx (default: 0)", [&](td::Slice arg) {
TRY_RESULT_ASSIGN(key_idx, td::to_integer_safe<td::uint32>(arg));
return td::Status::OK();
});
p.add_option('t', "timeout", "set timeout (default: 5s)", [&](td::Slice arg) { timeout = td::to_double(arg); });
td::actor::Scheduler scheduler({2});
scheduler.run_in_context([&] { p.run(argc, argv).ensure(); });
scheduler.run_in_context([&] {
LOG_IF(FATAL, !global_config) << "global config is not set";
LOG_IF(FATAL, !key_id) << "key-id is not set";
LOG_IF(FATAL, !key_name) << "key-name is not set";
x = td::actor::create_actor<Resolver>(
"Resolver", global_config.value(), server_idx, port,
ton::dht::DhtKey{ton::PublicKeyHash(key_id.value()), key_name.value(), key_idx}, timeout);
});
scheduler.run_in_context([&] { td::actor::send_closure(x, &Resolver::run); });
scheduler.run();
return 0;
}

View file

@ -1,21 +1,22 @@
FROM ubuntu:20.04 as builder
RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y build-essential cmake clang-6.0 openssl libssl-dev zlib1g-dev gperf wget git && \
DEBIAN_FRONTEND=noninteractive apt-get install -y build-essential cmake clang-6.0 openssl libssl-dev zlib1g-dev gperf wget git ninja-build && \
rm -rf /var/lib/apt/lists/*
ENV CC clang-6.0
ENV CXX clang++-6.0
ENV CCACHE_DISABLE 1
WORKDIR /
RUN git clone --recursive https://github.com/ton-blockchain/ton
WORKDIR /ton
RUN mkdir build && \
cd build && \
cmake .. -DCMAKE_BUILD_TYPE=Release && \
make -j 4
cmake -GNinja -DCMAKE_BUILD_TYPE=Release .. && \
ninja tonlibjson fift func validator-engine validator-engine-console generate-random-id dht-server lite-client
FROM ubuntu:20.04
RUN apt-get update && \
apt-get install -y openssl wget&& \
apt-get install -y openssl wget libatomic1 && \
rm -rf /var/lib/apt/lists/*
RUN mkdir -p /var/ton-work/db && \
mkdir -p /var/ton-work/db/static

View file

@ -2,11 +2,11 @@
Dockerfile for The Open Network Node
#### Install
```docker pull ghcr.io/newton-blockchain/ton```
```docker pull ghcr.io/ton-blockchain/ton:latest```
#### Create volume
```docker volume create ton-db```
#### Run
```docker run -d --name ton-node --mount source=ton-db,target=/var/ton-work/db --network host -e "PUBLIC_IP=<YOUR_PUBLIC_IP>" -e "CONSOLE_PORT=<TCP-PORT1>" -e "LITESERVER=true" -e "LITE_PORT=<TCP-PORT2>" -it ghcr.io/newton-blockchain/ton```
```docker run -d --name ton-node --mount source=ton-db,target=/var/ton-work/db --network host -e "PUBLIC_IP=<YOUR_PUBLIC_IP>" -e "CONSOLE_PORT=<TCP-PORT1>" -e "LITESERVER=true" -e "LITE_PORT=<TCP-PORT2>" -it ghcr.io/ton-blockchain/ton```
If you don't need Liteserver, then remove -e "LITESERVER=true".

View file

@ -7,8 +7,14 @@ cmake_minimum_required(VERSION 3.4.1 FATAL_ERROR)
option(TONLIB_ENABLE_JNI "Enable JNI-compatible TonLib API" ON)
string(APPEND CMAKE_CXX_FLAGS " -std=c++14 -Wall -Wextra -Wno-unused-parameter -Wno-deprecated-declarations -Wconversion -Wno-sign-conversion -fno-omit-frame-pointer -ffunction-sections -fdata-sections")
string(APPEND CMAKE_SHARED_LINKER_FLAGS " -Wl,--gc-sections -Wl,--exclude-libs,ALL")
if (${CMAKE_SYSTEM_NAME} MATCHES "Windows")
string(APPEND CMAKE_CXX_FLAGS " -std=c++14 -fno-omit-frame-pointer -ffunction-sections -fdata-sections")
else()
string(APPEND CMAKE_CXX_FLAGS " -std=c++14 -Wall -Wextra -Wno-unused-parameter -Wno-deprecated-declarations -Wconversion -Wno-sign-conversion -fno-omit-frame-pointer -ffunction-sections -fdata-sections")
endif()
if (NOT ${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
string(APPEND CMAKE_SHARED_LINKER_FLAGS " -Wl,--gc-sections -Wl,--exclude-libs,ALL")
endif()
# Creates and names a library, sets it as either STATIC
# or SHARED, and provides the relative paths to its source code.
@ -37,9 +43,15 @@ target_link_libraries(native-lib tonlib)
set(TONLIB_API_JAVA_PACKAGE "drinkless/org/ton")
target_compile_definitions(native-lib PRIVATE PACKAGE_NAME="${TONLIB_API_JAVA_PACKAGE}")
add_custom_command(TARGET native-lib POST_BUILD
COMMAND ${CMAKE_COMMAND} -E rename $<TARGET_FILE:native-lib> $<TARGET_FILE:native-lib>.debug
COMMAND ${CMAKE_STRIP} --strip-debug --strip-unneeded $<TARGET_FILE:native-lib>.debug -o $<TARGET_FILE:native-lib>)
if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
add_custom_command(TARGET native-lib POST_BUILD
COMMAND ${CMAKE_COMMAND} -E rename $<TARGET_FILE:native-lib> $<TARGET_FILE:native-lib>.debug
COMMAND ${CMAKE_STRIP} -S $<TARGET_FILE:native-lib>.debug -o $<TARGET_FILE:native-lib>)
elseif (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
add_custom_command(TARGET native-lib POST_BUILD
COMMAND ${CMAKE_COMMAND} -E rename $<TARGET_FILE:native-lib> $<TARGET_FILE:native-lib>.debug
COMMAND ${CMAKE_STRIP} --strip-debug --strip-unneeded $<TARGET_FILE:native-lib>.debug -o $<TARGET_FILE:native-lib>)
endif()
if (NOT CMAKE_CROSSCOMPILING)
set(TONLIB_API_JAVA_PATH ${CMAKE_CURRENT_SOURCE_DIR}/src/)

48
example/android/README.md Normal file
View file

@ -0,0 +1,48 @@
# Generation of Tonlib libraries for Android OS
**Tl;dr** Download the latest version of Tonlib libraries for Android from TON release page or check the artifacts from [Android JNI GitHub action](https://github.com/ton-blockchain/ton/actions/workflows/tonlib-android-jni.yml).
## Compile Tonlib for Android manually
Prerequisite: installed Java and set environment variable JAVA_HOME.
```bash
git clone --recursive https://github.com/ton-blockchain/ton.git
cd ton
wget https://dl.google.com/android/repository/android-ndk-r25b-linux.zip
unzip android-ndk-r25b-linux.zip
export JAVA_AWT_LIBRARY=NotNeeded
export JAVA_JVM_LIBRARY=NotNeeded
export JAVA_INCLUDE_PATH=${JAVA_HOME}/include
export JAVA_AWT_INCLUDE_PATH=${JAVA_HOME}/include
export JAVA_INCLUDE_PATH2=${JAVA_HOME}/include/linux
export ANDROID_NDK_ROOT=$(pwd)/android-ndk-r25b
export OPENSSL_DIR=$(pwd)/example/android/third_party/crypto
rm -rf example/android/src/drinkless/org/ton/TonApi.java
cd example/android/
cmake -GNinja -DTON_ONLY_TONLIB=ON .
ninja prepare_cross_compiling
rm CMakeCache.txt
./build-all.sh
```
# Generation of Tonlib libraries for iOS in Xcode
1. Clone repository https://github.com/labraburn/tonlib-xcframework
2. Open repository directory in Terminal
3. Run command:
```bash
swift run builder --output ./build --clean
```
5. Run command:
```bash
echo ./build/TON.xcframework/* | xargs -n 1 cp -R ./Resources/Headers
````
7. Import **OpenSSL.xcframework** and **TON.xcframework** in XCode in section _"Frameworks, Libraries, and Embedded Content"_
8. Now you can start using Tonlib client by importing it in C or Objective-C source files:
```objective-c
#import <tonlib/tonlib_client_json.h>
```
# Generation of Tonlib libraries for Desktop applications
You can use Tonlib compiled in an ordinary way for desktop applications. If you use Java you can load the library using JNA.
The latest Tonlib library can be found among other TON artifacts either on TON release page or inside the [appropriate GitHub action](https://github.com/ton-blockchain/ton/actions/).

View file

@ -33,8 +33,7 @@ mkdir -p build-$ARCH
cd build-$ARCH
cmake .. \
-DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK_ROOT}/build/cmake/android.toolchain.cmake -DCMAKE_BUILD_TYPE=Release -GNinja -DANDROID_ABI=${ABI} -DOPENSSL_ROOT_DIR=/Users/arseny30/Code/td_android/libtd/src/main/jni/third_party/crypto/${ARCH} -DTON_ARCH="" -DTON_ONLY_TONLIB=ON || exit 1
cmake .. -DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK_ROOT}/build/cmake/android.toolchain.cmake -DCMAKE_BUILD_TYPE=Release -GNinja -DANDROID_ABI=${ABI} -DOPENSSL_ROOT_DIR=${OPENSSL_DIR}/${ARCH} -DTON_ARCH="" -DTON_ONLY_TONLIB=ON || exit 1
ninja native-lib || exit 1
popd

File diff suppressed because it is too large Load diff

View file

@ -205,10 +205,11 @@ class TestNode : public td::actor::Actor {
td::Status send_ext_msg_from_filename(std::string filename);
td::Status save_db_file(ton::FileHash file_hash, td::BufferSlice data);
bool get_account_state(ton::WorkchainId workchain, ton::StdSmcAddress addr, ton::BlockIdExt ref_blkid,
int addr_ext = 0, std::string filename = "", int mode = -1);
int addr_ext = 0, std::string filename = "", int mode = -1, bool prunned = false);
void got_account_state(ton::BlockIdExt ref_blk, ton::BlockIdExt blk, ton::BlockIdExt shard_blk,
td::BufferSlice shard_proof, td::BufferSlice proof, td::BufferSlice state,
ton::WorkchainId workchain, ton::StdSmcAddress addr, std::string filename, int mode);
ton::WorkchainId workchain, ton::StdSmcAddress addr, std::string filename, int mode,
bool prunned);
bool parse_run_method(ton::WorkchainId workchain, ton::StdSmcAddress addr, ton::BlockIdExt ref_blkid, int addr_ext,
std::string method_name, bool ext_mode);
bool after_parse_run_method(ton::WorkchainId workchain, ton::StdSmcAddress addr, ton::BlockIdExt ref_blkid,

View file

@ -89,12 +89,20 @@ void OverlayManager::delete_overlay(adnl::AdnlNodeIdShort local_id, OverlayIdSho
}
void OverlayManager::create_public_overlay(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id,
std::unique_ptr<Callback> callback, OverlayPrivacyRules rules, td::string scope) {
std::unique_ptr<Callback> callback, OverlayPrivacyRules rules,
td::string scope) {
create_public_overlay_ex(local_id, std::move(overlay_id), std::move(callback), std::move(rules), std::move(scope),
true);
}
void OverlayManager::create_public_overlay_ex(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id,
std::unique_ptr<Callback> callback, OverlayPrivacyRules rules,
td::string scope, bool announce_self) {
CHECK(!dht_node_.empty());
auto id = overlay_id.compute_short_id();
register_overlay(local_id, id,
Overlay::create(keyring_, adnl_, actor_id(this), dht_node_, local_id, std::move(overlay_id),
std::move(callback), std::move(rules), scope));
std::move(callback), std::move(rules), scope, announce_self));
}
void OverlayManager::create_public_overlay_external(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id,

View file

@ -52,9 +52,9 @@ class OverlayManager : public Overlays {
void create_public_overlay(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id,
std::unique_ptr<Callback> callback, OverlayPrivacyRules rules, td::string scope) override;
void create_public_overlay_external(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id,
std::unique_ptr<Callback> callback, OverlayPrivacyRules rules,
td::string scope) override;
void create_public_overlay_ex(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id,
std::unique_ptr<Callback> callback, OverlayPrivacyRules rules, td::string scope,
bool announce_self) override;
void create_private_overlay(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id,
std::vector<adnl::AdnlNodeIdShort> nodes, std::unique_ptr<Callback> callback,
OverlayPrivacyRules rules) override;

View file

@ -43,6 +43,7 @@ void OverlayImpl::del_peer(adnl::AdnlNodeIdShort id) {
P->set_neighbour(false);
}
peers_.remove(id);
bad_peers_.erase(id);
update_neighbours(0);
}
@ -51,7 +52,16 @@ void OverlayImpl::del_some_peers() {
return;
}
while (peers_.size() > max_peers()) {
auto P = get_random_peer();
OverlayPeer *P;
if (bad_peers_.empty()) {
P = get_random_peer();
} else {
auto it = bad_peers_.upper_bound(next_bad_peer_);
if (it == bad_peers_.end()) {
it = bad_peers_.begin();
}
P = peers_.get(next_bad_peer_ = *it);
}
if (P) {
auto id = P->get_id();
del_peer(id);
@ -118,16 +128,35 @@ void OverlayImpl::add_peer(OverlayNode P) {
add_peer_in(std::move(P));
}
void OverlayImpl::receive_random_peers(adnl::AdnlNodeIdShort src, td::BufferSlice data) {
void OverlayImpl::on_ping_result(adnl::AdnlNodeIdShort peer, bool success) {
if (!public_) {
return;
}
if (OverlayPeer *p = peers_.get(peer)) {
p->on_ping_result(success);
if (p->is_alive()) {
bad_peers_.erase(peer);
} else {
bad_peers_.insert(peer);
}
}
}
void OverlayImpl::receive_random_peers(adnl::AdnlNodeIdShort src, td::Result<td::BufferSlice> R) {
CHECK(public_);
auto R = fetch_tl_object<ton_api::overlay_nodes>(std::move(data), true);
on_ping_result(src, R.is_ok());
if (R.is_error()) {
VLOG(OVERLAY_NOTICE) << this << ": failed getRandomPeers query: " << R.move_as_error();
return;
}
auto R2 = fetch_tl_object<ton_api::overlay_nodes>(R.move_as_ok(), true);
if (R2.is_error()) {
VLOG(OVERLAY_WARNING) << this << ": dropping incorrect answer to overlay.getRandomPeers query from " << src << ": "
<< R.move_as_error();
<< R2.move_as_error();
return;
}
auto res = R.move_as_ok();
auto res = R2.move_as_ok();
std::vector<OverlayNode> nodes;
for (auto &n : res->nodes_) {
@ -142,12 +171,12 @@ void OverlayImpl::receive_random_peers(adnl::AdnlNodeIdShort src, td::BufferSlic
void OverlayImpl::send_random_peers_cont(adnl::AdnlNodeIdShort src, OverlayNode node,
td::Promise<td::BufferSlice> promise) {
std::vector<tl_object_ptr<ton_api::overlay_node>> vec;
if (!is_external_) {
if (announce_self_) {
vec.emplace_back(node.tl());
}
for (td::uint32 i = 0; i < nodes_to_send(); i++) {
auto P = get_random_peer();
auto P = get_random_peer(true);
if (P) {
vec.emplace_back(P->get().tl());
} else {
@ -161,11 +190,7 @@ void OverlayImpl::send_random_peers_cont(adnl::AdnlNodeIdShort src, OverlayNode
} else {
auto P =
td::PromiseCreator::lambda([SelfId = actor_id(this), src, oid = print_id()](td::Result<td::BufferSlice> res) {
if (res.is_error()) {
VLOG(OVERLAY_NOTICE) << oid << ": failed getRandomPeers query: " << res.move_as_error();
return;
}
td::actor::send_closure(SelfId, &OverlayImpl::receive_random_peers, src, res.move_as_ok());
td::actor::send_closure(SelfId, &OverlayImpl::receive_random_peers, src, std::move(res));
});
auto Q =
create_tl_object<ton_api::overlay_getRandomPeers>(create_tl_object<ton_api::overlay_nodes>(std::move(vec)));
@ -218,6 +243,7 @@ void OverlayImpl::update_neighbours(td::uint32 nodes_to_change) {
neighbours_.pop_back();
X->set_neighbour(false);
}
bad_peers_.erase(X->get_id());
peers_.remove(X->get_id());
continue;
}
@ -246,15 +272,25 @@ void OverlayImpl::update_neighbours(td::uint32 nodes_to_change) {
}
}
OverlayPeer *OverlayImpl::get_random_peer() {
while (peers_.size() > 0) {
OverlayPeer *OverlayImpl::get_random_peer(bool only_alive) {
size_t skip_bad = 3;
while (peers_.size() > (only_alive ? bad_peers_.size() : 0)) {
auto P = peers_.get_random();
if (public_ && P->get_version() + 3600 < td::Clocks::system()) {
VLOG(OVERLAY_INFO) << this << ": deleting outdated peer " << P->get_id();
del_peer(P->get_id());
} else {
return P;
continue;
}
if (!P->is_alive()) {
if (only_alive) {
continue;
}
if (skip_bad > 0) {
--skip_bad;
continue;
}
}
return P;
}
return nullptr;
}
@ -263,17 +299,17 @@ void OverlayImpl::get_overlay_random_peers(td::uint32 max_peers,
td::Promise<std::vector<adnl::AdnlNodeIdShort>> promise) {
std::vector<adnl::AdnlNodeIdShort> v;
auto t = td::Clocks::system();
while (peers_.size() > v.size()) {
while (v.size() < max_peers && v.size() < peers_.size() - bad_peers_.size()) {
auto P = peers_.get_random();
if (P->get_version() + 3600 < t) {
VLOG(OVERLAY_INFO) << this << ": deleting outdated peer " << P->get_id();
del_peer(P->get_id());
} else {
} else if (P->is_alive()) {
bool dup = false;
for (auto &n : v) {
if (n == P->get_id()) {
dup = true;
continue;
break;
}
}
if (!dup) {

View file

@ -38,10 +38,10 @@ td::actor::ActorOwn<Overlay> Overlay::create(td::actor::ActorId<keyring::Keyring
td::actor::ActorId<OverlayManager> manager,
td::actor::ActorId<dht::Dht> dht_node, adnl::AdnlNodeIdShort local_id,
OverlayIdFull overlay_id, std::unique_ptr<Overlays::Callback> callback,
OverlayPrivacyRules rules, td::string scope, bool is_external) {
OverlayPrivacyRules rules, td::string scope, bool announce_self) {
auto R = td::actor::create_actor<OverlayImpl>("overlay", keyring, adnl, manager, dht_node, local_id,
std::move(overlay_id), true, std::vector<adnl::AdnlNodeIdShort>(),
std::move(callback), std::move(rules), scope, is_external);
std::move(callback), std::move(rules), scope, announce_self);
return td::actor::ActorOwn<Overlay>(std::move(R));
}
@ -61,7 +61,7 @@ OverlayImpl::OverlayImpl(td::actor::ActorId<keyring::Keyring> keyring, td::actor
td::actor::ActorId<OverlayManager> manager, td::actor::ActorId<dht::Dht> dht_node,
adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, bool pub,
std::vector<adnl::AdnlNodeIdShort> nodes, std::unique_ptr<Overlays::Callback> callback,
OverlayPrivacyRules rules, td::string scope, bool is_external)
OverlayPrivacyRules rules, td::string scope, bool announce_self)
: keyring_(keyring)
, adnl_(adnl)
, manager_(manager)
@ -72,15 +72,10 @@ OverlayImpl::OverlayImpl(td::actor::ActorId<keyring::Keyring> keyring, td::actor
, public_(pub)
, rules_(std::move(rules))
, scope_(scope)
, is_external_(is_external) {
, announce_self_(announce_self) {
overlay_id_ = id_full_.compute_short_id();
if (is_external_) {
CHECK(public_);
VLOG(OVERLAY_INFO) << this << ": creating public external";
} else {
VLOG(OVERLAY_INFO) << this << ": creating " << (public_ ? "public" : "private");
}
VLOG(OVERLAY_INFO) << this << ": creating " << (public_ ? "public" : "private");
for (auto &node : nodes) {
CHECK(!public_);
@ -139,13 +134,6 @@ void OverlayImpl::process_query(adnl::AdnlNodeIdShort src, ton_api::overlay_getB
}
void OverlayImpl::receive_query(adnl::AdnlNodeIdShort src, td::BufferSlice data, td::Promise<td::BufferSlice> promise) {
if (is_external_) {
LOG(OVERLAY_WARNING) << "dropping query in external overlay " << overlay_id_;
promise.set_error(td::Status::Error("overlay is external"));
td::actor::send_closure(manager_, &Overlays::send_message, src, local_id_, overlay_id_,
create_serialize_tl_object<ton_api::overlay_message_removePeer>());
return;
}
if (!public_) {
auto P = peers_.get(src);
if (P == nullptr) {
@ -153,6 +141,8 @@ void OverlayImpl::receive_query(adnl::AdnlNodeIdShort src, td::BufferSlice data,
promise.set_error(td::Status::Error(ErrorCode::protoviolation, "overlay is private"));
return;
}
} else {
on_ping_result(src, true);
}
auto R = fetch_tl_object<ton_api::Function>(data.clone(), true);
@ -231,24 +221,10 @@ void OverlayImpl::receive_message(adnl::AdnlNodeIdShort src, td::BufferSlice dat
VLOG(OVERLAY_WARNING) << this << ": received query in private overlay from unknown source " << src;
return;
}
} else {
on_ping_result(src, true);
}
auto X = fetch_tl_object<ton_api::overlay_Broadcast>(data.clone(), true);
if (X.is_error()) {
auto Y = fetch_tl_object<ton_api::overlay_message_removePeer>(data.clone(), true);
if (Y.is_ok() && public_) {
VLOG(OVERLAY_DEBUG) << this << ": received removePeer message from " << src;
if (peers_.exists(src)) {
del_peer(src);
}
callback_->on_remove_peer(src);
return;
}
}
if (is_external_) {
LOG(OVERLAY_WARNING) << "dropping message in external overlay " << overlay_id_;
return;
}
if (X.is_error()) {
VLOG(OVERLAY_DEBUG) << this << ": received custom message";
callback_->receive_message(src, overlay_id_, std::move(data));
@ -349,7 +325,7 @@ void OverlayImpl::receive_dht_nodes(td::Result<dht::DhtValue> res, bool dummy) {
VLOG(OVERLAY_NOTICE) << this << ": can not get value from DHT: " << res.move_as_error();
}
if (is_external_) {
if (!announce_self_) {
return;
}
@ -555,9 +531,6 @@ void OverlayImpl::send_new_fec_broadcast_part(PublicKeyHash local_id, Overlay::B
}
void OverlayImpl::deliver_broadcast(PublicKeyHash source, td::BufferSlice data) {
if (is_external_) {
return;
}
callback_->receive_broadcast(source, overlay_id_, std::move(data));
}
@ -630,10 +603,6 @@ void OverlayImpl::set_privacy_rules(OverlayPrivacyRules rules) {
}
void OverlayImpl::check_broadcast(PublicKeyHash src, td::BufferSlice data, td::Promise<td::Unit> promise) {
if (is_external_) {
promise.set_result(td::Unit());
return;
}
callback_->check_broadcast(src, overlay_id_, std::move(data), std::move(promise));
}

View file

@ -42,7 +42,7 @@ class Overlay : public td::actor::Actor {
td::actor::ActorId<OverlayManager> manager,
td::actor::ActorId<dht::Dht> dht_node, adnl::AdnlNodeIdShort local_id,
OverlayIdFull overlay_id, std::unique_ptr<Overlays::Callback> callback,
OverlayPrivacyRules rules, td::string scope, bool is_external = false);
OverlayPrivacyRules rules, td::string scope, bool announce_self = true);
static td::actor::ActorOwn<Overlay> create(td::actor::ActorId<keyring::Keyring> keyring,
td::actor::ActorId<adnl::Adnl> adnl,
td::actor::ActorId<OverlayManager> manager,

View file

@ -79,7 +79,17 @@ class OverlayPeer {
td::int32 get_version() const {
return node_.version();
}
void on_ping_result(bool success) {
if (success) {
missed_pings_ = 0;
} else {
++missed_pings_;
}
}
bool is_alive() const {
return missed_pings_ < 3;
}
td::uint32 throughput_out_bytes = 0;
td::uint32 throughput_in_bytes = 0;
@ -105,6 +115,7 @@ class OverlayPeer {
adnl::AdnlNodeIdShort id_;
bool is_neighbour_ = false;
size_t missed_pings_ = 0;
};
class OverlayImpl : public Overlay {
@ -113,7 +124,7 @@ class OverlayImpl : public Overlay {
td::actor::ActorId<OverlayManager> manager, td::actor::ActorId<dht::Dht> dht_node,
adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, bool pub,
std::vector<adnl::AdnlNodeIdShort> nodes, std::unique_ptr<Overlays::Callback> callback,
OverlayPrivacyRules rules, td::string scope = "{ \"type\": \"undefined\" }", bool is_external = false);
OverlayPrivacyRules rules, td::string scope = "{ \"type\": \"undefined\" }", bool announce_self = true);
void update_dht_node(td::actor::ActorId<dht::Dht> dht) override {
dht_node_ = dht;
}
@ -138,7 +149,8 @@ class OverlayImpl : public Overlay {
alarm_timestamp() = td::Timestamp::in(1);
}
void receive_random_peers(adnl::AdnlNodeIdShort src, td::BufferSlice data);
void on_ping_result(adnl::AdnlNodeIdShort peer, bool success);
void receive_random_peers(adnl::AdnlNodeIdShort src, td::Result<td::BufferSlice> R);
void send_random_peers(adnl::AdnlNodeIdShort dst, td::Promise<td::BufferSlice> promise);
void send_random_peers_cont(adnl::AdnlNodeIdShort dst, OverlayNode node, td::Promise<td::BufferSlice> promise);
void get_overlay_random_peers(td::uint32 max_peers, td::Promise<std::vector<adnl::AdnlNodeIdShort>> promise) override;
@ -299,7 +311,7 @@ class OverlayImpl : public Overlay {
void add_peers(std::vector<OverlayNode> nodes);
void del_some_peers();
void del_peer(adnl::AdnlNodeIdShort id);
OverlayPeer *get_random_peer();
OverlayPeer *get_random_peer(bool only_alive = false);
td::actor::ActorId<keyring::Keyring> keyring_;
td::actor::ActorId<adnl::Adnl> adnl_;
@ -314,6 +326,8 @@ class OverlayImpl : public Overlay {
td::Timestamp update_db_at_;
td::Timestamp update_throughput_at_;
td::Timestamp last_throughput_update_;
std::set<adnl::AdnlNodeIdShort> bad_peers_;
adnl::AdnlNodeIdShort next_bad_peer_ = adnl::AdnlNodeIdShort::zero();
std::unique_ptr<Overlays::Callback> callback_;
@ -371,7 +385,7 @@ class OverlayImpl : public Overlay {
bool semi_public_ = false;
OverlayPrivacyRules rules_;
td::string scope_;
bool is_external_ = false;
bool announce_self_ = true;
std::map<PublicKeyHash, std::shared_ptr<Certificate>> certs_;
class CachedEncryptor : public td::ListNode {

View file

@ -195,10 +195,11 @@ class Overlays : public td::actor::Actor {
virtual void update_dht_node(td::actor::ActorId<dht::Dht> dht) = 0;
virtual void create_public_overlay(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id,
std::unique_ptr<Callback> callback, OverlayPrivacyRules rules, td::string scope) = 0;
virtual void create_public_overlay_external(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id,
std::unique_ptr<Callback> callback, OverlayPrivacyRules rules,
td::string scope) = 0;
std::unique_ptr<Callback> callback, OverlayPrivacyRules rules,
td::string scope) = 0;
virtual void create_public_overlay_ex(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id,
std::unique_ptr<Callback> callback, OverlayPrivacyRules rules,
td::string scope, bool announce_self) = 0;
virtual void create_private_overlay(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id,
std::vector<adnl::AdnlNodeIdShort> nodes, std::unique_ptr<Callback> callback,
OverlayPrivacyRules rules) = 0;

View file

@ -1,5 +1,5 @@
cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR)
add_executable(rldp-http-proxy rldp-http-proxy.cpp DNSResolver.h TonlibClient.h TonlibClient.cpp DNSResolver.cpp)
add_executable(rldp-http-proxy rldp-http-proxy.cpp DNSResolver.h DNSResolver.cpp)
target_include_directories(rldp-http-proxy PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/..>)
target_link_libraries(rldp-http-proxy PRIVATE tonhttp rldp dht tonlib git)

View file

@ -30,7 +30,8 @@
static const double CACHE_TIMEOUT_HARD = 300.0;
static const double CACHE_TIMEOUT_SOFT = 270.0;
DNSResolver::DNSResolver(td::actor::ActorId<TonlibClient> tonlib_client) : tonlib_client_(std::move(tonlib_client)) {
DNSResolver::DNSResolver(td::actor::ActorId<tonlib::TonlibClientWrapper> tonlib_client)
: tonlib_client_(std::move(tonlib_client)) {
}
void DNSResolver::start_up() {
@ -39,14 +40,15 @@ void DNSResolver::start_up() {
void DNSResolver::sync() {
auto obj = tonlib_api::make_object<tonlib_api::sync>();
auto P = td::PromiseCreator::lambda([SelfId =
actor_id(this)](td::Result<tonlib_api::object_ptr<tonlib_api::Object>> R) {
auto P = td::PromiseCreator::lambda([SelfId = actor_id(this)](
td::Result<tonlib_api::object_ptr<tonlib_api::ton_blockIdExt>> R) {
if (R.is_error()) {
LOG(WARNING) << "Sync error: " << R.move_as_error();
ton::delay_action([SelfId]() { td::actor::send_closure(SelfId, &DNSResolver::sync); }, td::Timestamp::in(5.0));
}
});
td::actor::send_closure(tonlib_client_, &TonlibClient::send_request, std::move(obj), std::move(P));
td::actor::send_closure(tonlib_client_, &tonlib::TonlibClientWrapper::send_request<tonlib_api::sync>, std::move(obj),
std::move(P));
}
void DNSResolver::resolve(std::string host, td::Promise<ton::adnl::AdnlNodeIdShort> promise) {
@ -66,18 +68,13 @@ void DNSResolver::resolve(std::string host, td::Promise<ton::adnl::AdnlNodeIdSho
td::Bits256 category = td::sha256_bits256(td::Slice("site", 4));
auto obj = tonlib_api::make_object<tonlib_api::dns_resolve>(nullptr, host, category, 16);
auto P = td::PromiseCreator::lambda([SelfId = actor_id(this), promise = std::move(promise), host = std::move(host)](
td::Result<tonlib_api::object_ptr<tonlib_api::Object>> R) mutable {
td::Result<tonlib_api::object_ptr<tonlib_api::dns_resolved>> R) mutable {
if (R.is_error()) {
if (promise) {
promise.set_result(R.move_as_error());
}
} else {
auto v = R.move_as_ok();
auto obj = dynamic_cast<tonlib_api::dns_resolved *>(v.get());
if (obj == nullptr) {
promise.set_result(td::Status::Error("invalid response from tonlib"));
return;
}
auto obj = R.move_as_ok();
ton::adnl::AdnlNodeIdShort id;
td::uint32 cnt = 0;
for (auto &e : obj->entries_) {
@ -106,7 +103,8 @@ void DNSResolver::resolve(std::string host, td::Promise<ton::adnl::AdnlNodeIdSho
}
}
});
td::actor::send_closure(tonlib_client_, &TonlibClient::send_request, std::move(obj), std::move(P));
td::actor::send_closure(tonlib_client_, &tonlib::TonlibClientWrapper::send_request<tonlib_api::dns_resolve>,
std::move(obj), std::move(P));
}
void DNSResolver::save_to_cache(std::string host, ton::adnl::AdnlNodeIdShort id) {

View file

@ -25,13 +25,13 @@
*/
#pragma once
#include "td/actor/actor.h"
#include "TonlibClient.h"
#include "tonlib/tonlib/TonlibClientWrapper.h"
#include "adnl/adnl.h"
#include "td/actor/PromiseFuture.h"
class DNSResolver : public td::actor::Actor {
public:
explicit DNSResolver(td::actor::ActorId<TonlibClient> tonlib_client);
explicit DNSResolver(td::actor::ActorId<tonlib::TonlibClientWrapper> tonlib_client);
void start_up() override;
void resolve(std::string host, td::Promise<ton::adnl::AdnlNodeIdShort> promise);
@ -40,7 +40,7 @@ class DNSResolver : public td::actor::Actor {
void sync();
void save_to_cache(std::string host, ton::adnl::AdnlNodeIdShort id);
td::actor::ActorId<TonlibClient> tonlib_client_;
td::actor::ActorId<tonlib::TonlibClientWrapper> tonlib_client_;
struct CacheEntry {
ton::adnl::AdnlNodeIdShort id_;

View file

@ -33,7 +33,6 @@
#include "td/utils/FileLog.h"
#include "td/utils/Random.h"
#include "td/utils/filesystem.h"
#include "td/utils/overloaded.h"
#include "auto/tl/ton_api_json.h"
#include "auto/tl/tonlib_api.hpp"
@ -55,7 +54,7 @@
#include "td/utils/BufferedFd.h"
#include "common/delay.h"
#include "TonlibClient.h"
#include "tonlib/tonlib/TonlibClientWrapper.h"
#include "DNSResolver.h"
#if TD_DARWIN || TD_LINUX
@ -134,6 +133,15 @@ td::BufferSlice create_error_response(const std::string &proto_version, int code
proto_version, code, reason, std::vector<ton::tl_object_ptr<ton::ton_api::http_header>>(), true);
}
const std::string PROXY_SITE_VERISON_HEADER_NAME = "Ton-Proxy-Site-Version";
const std::string PROXY_ENTRY_VERISON_HEADER_NAME = "Ton-Proxy-Entry-Version";
const std::string PROXY_VERSION_HEADER = PSTRING() << "Commit: " << GitMetadata::CommitSHA1()
<< ", Date: " << GitMetadata::CommitDate();
using RegisteredPayloadSenderGuard =
std::unique_ptr<std::pair<td::actor::ActorId<RldpHttpProxy>, td::Bits256>,
std::function<void(std::pair<td::actor::ActorId<RldpHttpProxy>, td::Bits256> *)>>;
class HttpRldpPayloadReceiver : public td::actor::Actor {
public:
HttpRldpPayloadReceiver(std::shared_ptr<ton::http::HttpPayload> payload, td::Bits256 transfer_id,
@ -244,10 +252,10 @@ class HttpRldpPayloadReceiver : public td::actor::Actor {
private:
static constexpr size_t watermark() {
return 1 << 15;
return (1 << 21) - (1 << 11);
}
static constexpr size_t chunk_size() {
return 1 << 17;
return (1 << 21) - (1 << 11);
}
std::shared_ptr<ton::http::HttpPayload> payload_;
@ -268,12 +276,14 @@ class HttpRldpPayloadSender : public td::actor::Actor {
public:
HttpRldpPayloadSender(std::shared_ptr<ton::http::HttpPayload> payload, td::Bits256 transfer_id,
ton::adnl::AdnlNodeIdShort local_id, td::actor::ActorId<ton::adnl::Adnl> adnl,
td::actor::ActorId<ton::rldp::Rldp> rldp, bool is_tunnel = false)
td::actor::ActorId<ton::rldp::Rldp> rldp, td::actor::ActorId<RldpHttpProxy> proxy,
bool is_tunnel = false)
: payload_(std::move(payload))
, id_(transfer_id)
, local_id_(local_id)
, adnl_(adnl)
, rldp_(rldp)
, proxy_(proxy)
, is_tunnel_(is_tunnel) {
}
@ -289,52 +299,10 @@ class HttpRldpPayloadSender : public td::actor::Actor {
return x;
}
void start_up() override {
class AdnlCb : public ton::adnl::Adnl::Callback {
public:
AdnlCb(td::actor::ActorId<HttpRldpPayloadSender> id) : self_id_(id) {
}
void receive_message(ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst,
td::BufferSlice data) override {
LOG(INFO) << "http payload sender: dropping message";
}
void receive_query(ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst, td::BufferSlice data,
td::Promise<td::BufferSlice> promise) override {
td::actor::send_closure(self_id_, &HttpRldpPayloadSender::receive_query, std::move(data), std::move(promise));
}
void start_up() override;
private:
td::actor::ActorId<HttpRldpPayloadSender> self_id_;
};
td::actor::send_closure(adnl_, &ton::adnl::Adnl::subscribe, local_id_, generate_prefix(),
std::make_unique<AdnlCb>(actor_id(this)));
class Cb : public ton::http::HttpPayload::Callback {
public:
Cb(td::actor::ActorId<HttpRldpPayloadSender> id, size_t watermark) : self_id_(id), watermark_(watermark) {
}
void run(size_t ready_bytes) override {
if (!reached_ && ready_bytes >= watermark_) {
reached_ = true;
td::actor::send_closure(self_id_, &HttpRldpPayloadSender::try_answer_query, false);
} else if (reached_ && ready_bytes < watermark_) {
reached_ = false;
}
}
void completed() override {
td::actor::send_closure(self_id_, &HttpRldpPayloadSender::try_answer_query, false);
}
private:
bool reached_ = false;
td::actor::ActorId<HttpRldpPayloadSender> self_id_;
size_t watermark_;
};
payload_->add_callback(
std::make_unique<Cb>(actor_id(this), is_tunnel_ ? 1 : ton::http::HttpRequest::low_watermark()));
alarm_timestamp() = td::Timestamp::in(is_tunnel_ ? 60.0 : 10.0);
void registered_sender(RegisteredPayloadSenderGuard guard) {
guard_ = std::move(guard);
}
void try_answer_query(bool from_timer = false) {
@ -391,13 +359,9 @@ class HttpRldpPayloadSender : public td::actor::Actor {
try_answer_query(false);
}
void receive_query(td::BufferSlice data, td::Promise<td::BufferSlice> promise) {
auto F = ton::fetch_tl_object<ton::ton_api::http_getNextPayloadPart>(data, true);
if (F.is_error()) {
LOG(INFO) << "failed to parse query: " << F.move_as_error();
return;
}
send_data(F.move_as_ok(), std::move(promise));
void receive_query(ton::tl_object_ptr<ton::ton_api::http_getNextPayloadPart> f,
td::Promise<td::BufferSlice> promise) {
send_data(std::move(f), std::move(promise));
}
void alarm() override {
@ -429,24 +393,22 @@ class HttpRldpPayloadSender : public td::actor::Actor {
stop();
}
void tear_down() override {
td::actor::send_closure(adnl_, &ton::adnl::Adnl::unsubscribe, local_id_, generate_prefix());
}
private:
static constexpr size_t watermark() {
return 1 << 15;
return (1 << 21) - (1 << 11);
}
std::shared_ptr<ton::http::HttpPayload> payload_;
td::Bits256 id_;
RegisteredPayloadSenderGuard guard_;
td::int32 seqno_ = 0;
ton::adnl::AdnlNodeIdShort local_id_;
td::actor::ActorId<ton::adnl::Adnl> adnl_;
td::actor::ActorId<ton::rldp::Rldp> rldp_;
td::actor::ActorId<RldpHttpProxy> proxy_;
size_t cur_query_size_;
td::Promise<td::BufferSlice> cur_query_promise_;
@ -462,7 +424,8 @@ class TcpToRldpRequestSender : public td::actor::Actor {
std::shared_ptr<ton::http::HttpPayload> request_payload,
td::Promise<std::pair<std::unique_ptr<ton::http::HttpResponse>, std::shared_ptr<ton::http::HttpPayload>>> promise,
td::actor::ActorId<ton::adnl::Adnl> adnl, td::actor::ActorId<ton::dht::Dht> dht,
td::actor::ActorId<ton::rldp::Rldp> rldp, td::actor::ActorId<DNSResolver> dns_resolver)
td::actor::ActorId<ton::rldp::Rldp> rldp, td::actor::ActorId<RldpHttpProxy> proxy,
td::actor::ActorId<DNSResolver> dns_resolver)
: local_id_(local_id)
, host_(std::move(host))
, request_(std::move(request))
@ -471,6 +434,7 @@ class TcpToRldpRequestSender : public td::actor::Actor {
, adnl_(adnl)
, dht_(dht)
, rldp_(rldp)
, proxy_(proxy)
, dns_resolver_(dns_resolver) {
}
void start_up() override {
@ -492,7 +456,7 @@ class TcpToRldpRequestSender : public td::actor::Actor {
});
td::actor::create_actor<HttpRldpPayloadSender>("HttpPayloadSender", request_payload_, id_, local_id_, adnl_, rldp_,
is_tunnel())
proxy_, is_tunnel())
.release();
auto f = ton::serialize_tl_object(request_->store_tl(id_), true);
@ -523,6 +487,7 @@ class TcpToRldpRequestSender : public td::actor::Actor {
}
response_->add_header(std::move(h));
}
response_->add_header({PROXY_ENTRY_VERISON_HEADER_NAME, PROXY_VERSION_HEADER});
auto S = response_->complete_parse_header();
if (S.is_error()) {
abort_query(S.move_as_error());
@ -579,6 +544,7 @@ class TcpToRldpRequestSender : public td::actor::Actor {
td::actor::ActorId<ton::adnl::Adnl> adnl_;
td::actor::ActorId<ton::dht::Dht> dht_;
td::actor::ActorId<ton::rldp::Rldp> rldp_;
td::actor::ActorId<RldpHttpProxy> proxy_;
td::actor::ActorId<DNSResolver> dns_resolver_;
std::unique_ptr<ton::http::HttpResponse> response_;
@ -588,47 +554,28 @@ class TcpToRldpRequestSender : public td::actor::Actor {
class RldpTcpTunnel : public td::actor::Actor, private td::ObserverBase {
public:
RldpTcpTunnel(td::Bits256 transfer_id, ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort local_id,
td::actor::ActorId<ton::adnl::Adnl> adnl, td::actor::ActorId<ton::rldp::Rldp> rldp, td::SocketFd fd)
td::actor::ActorId<ton::adnl::Adnl> adnl, td::actor::ActorId<ton::rldp::Rldp> rldp,
td::actor::ActorId<RldpHttpProxy> proxy, td::SocketFd fd)
: id_(transfer_id)
, src_(src)
, local_id_(local_id)
, adnl_(std::move(adnl))
, rldp_(std::move(rldp))
, proxy_(std::move(proxy))
, fd_(std::move(fd)) {
}
void start_up() override {
self_ = actor_id(this);
td::actor::SchedulerContext::get()->get_poll().subscribe(fd_.get_poll_info().extract_pollable_fd(this),
td::PollFlags::ReadWrite());
class Cb : public ton::adnl::Adnl::Callback {
public:
explicit Cb(td::actor::ActorId<RldpTcpTunnel> id) : self_id_(std::move(id)) {
}
void receive_message(ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst,
td::BufferSlice data) override {
LOG(INFO) << "rldp tcp tunnel: dropping message";
}
void receive_query(ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst, td::BufferSlice data,
td::Promise<td::BufferSlice> promise) override {
td::actor::send_closure(self_id_, &RldpTcpTunnel::receive_query, std::move(data), std::move(promise));
}
private:
td::actor::ActorId<RldpTcpTunnel> self_id_;
};
td::actor::send_closure(adnl_, &ton::adnl::Adnl::subscribe, local_id_, generate_prefix(),
std::make_unique<Cb>(actor_id(this)));
process();
}
void start_up() override;
void tear_down() override {
LOG(INFO) << "RldpTcpTunnel: tear_down";
td::actor::send_closure(adnl_, &ton::adnl::Adnl::unsubscribe, local_id_, generate_prefix());
td::actor::SchedulerContext::get()->get_poll().unsubscribe(fd_.get_poll_info().get_pollable_fd_ref());
}
void registered_sender(RegisteredPayloadSenderGuard guard) {
guard_ = std::move(guard);
}
void notify() override {
td::actor::send_closure(self_, &RldpTcpTunnel::process);
}
@ -642,19 +589,14 @@ class RldpTcpTunnel : public td::actor::Actor, private td::ObserverBase {
td::actor::send_closure(SelfId, &RldpTcpTunnel::got_data_from_rldp, std::move(R));
});
auto f = ton::create_serialize_tl_object<ton::ton_api::http_getNextPayloadPart>(id_, out_seqno_++, 1 << 17);
auto f = ton::create_serialize_tl_object<ton::ton_api::http_getNextPayloadPart>(id_, out_seqno_++,
(1 << 21) - (1 << 11));
td::actor::send_closure(rldp_, &ton::rldp::Rldp::send_query_ex, local_id_, src_, "payload part", std::move(P),
td::Timestamp::in(60.0), std::move(f), (1 << 18) + 1024);
td::Timestamp::in(60.0), std::move(f), (1 << 21) + 1024);
}
void receive_query(td::BufferSlice data, td::Promise<td::BufferSlice> promise) {
auto F = ton::fetch_tl_object<ton::ton_api::http_getNextPayloadPart>(data, true);
if (F.is_error()) {
LOG(INFO) << "failed to parse query: " << F.error();
promise.set_error(F.move_as_error());
return;
}
auto f = F.move_as_ok();
void receive_query(ton::tl_object_ptr<ton::ton_api::http_getNextPayloadPart> f,
td::Promise<td::BufferSlice> promise) {
if (cur_promise_) {
LOG(INFO) << "failed to process query: previous query is active";
promise.set_error(td::Status::Error("previous query is active"));
@ -772,11 +714,13 @@ class RldpTcpTunnel : public td::actor::Actor, private td::ObserverBase {
}
td::Bits256 id_;
RegisteredPayloadSenderGuard guard_;
ton::adnl::AdnlNodeIdShort src_;
ton::adnl::AdnlNodeIdShort local_id_;
td::actor::ActorId<ton::adnl::Adnl> adnl_;
td::actor::ActorId<ton::rldp::Rldp> rldp_;
td::actor::ActorId<RldpHttpProxy> proxy_;
td::BufferedFd<td::SocketFd> fd_;
@ -795,7 +739,7 @@ class RldpToTcpRequestSender : public td::actor::Actor {
std::unique_ptr<ton::http::HttpRequest> request,
std::shared_ptr<ton::http::HttpPayload> request_payload, td::Promise<td::BufferSlice> promise,
td::actor::ActorId<ton::adnl::Adnl> adnl, td::actor::ActorId<ton::rldp::Rldp> rldp,
td::actor::ActorId<HttpRemote> remote)
td::actor::ActorId<RldpHttpProxy> proxy, td::actor::ActorId<HttpRemote> remote)
: id_(id)
, local_id_(local_id)
, dst_(dst)
@ -805,6 +749,7 @@ class RldpToTcpRequestSender : public td::actor::Actor {
, promise_(std::move(promise))
, adnl_(adnl)
, rldp_(rldp)
, proxy_(proxy)
, remote_(std::move(remote)) {
}
void start_up() override {
@ -826,8 +771,9 @@ class RldpToTcpRequestSender : public td::actor::Actor {
void got_result(std::pair<std::unique_ptr<ton::http::HttpResponse>, std::shared_ptr<ton::http::HttpPayload>> R) {
td::actor::create_actor<HttpRldpPayloadSender>("HttpPayloadSender(R)", std::move(R.second), id_, local_id_, adnl_,
rldp_)
rldp_, proxy_)
.release();
R.first->add_header({PROXY_SITE_VERISON_HEADER_NAME, PROXY_VERSION_HEADER});
auto f = ton::serialize_tl_object(R.first->store_tl(), true);
promise_.set_value(std::move(f));
stop();
@ -853,6 +799,7 @@ class RldpToTcpRequestSender : public td::actor::Actor {
td::actor::ActorId<ton::adnl::Adnl> adnl_;
td::actor::ActorId<ton::rldp::Rldp> rldp_;
td::actor::ActorId<RldpHttpProxy> proxy_;
td::actor::ActorId<HttpRemote> remote_;
};
@ -972,7 +919,7 @@ class RldpHttpProxy : public td::actor::Actor {
auto tonlib_options = tonlib_api::make_object<tonlib_api::options>(
tonlib_api::make_object<tonlib_api::config>(conf_dataR.move_as_ok().as_slice().str(), "", false, false),
tonlib_api::make_object<tonlib_api::keyStoreTypeInMemory>());
tonlib_client_ = td::actor::create_actor<TonlibClient>("tonlibclient", std::move(tonlib_options));
tonlib_client_ = td::actor::create_actor<tonlib::TonlibClientWrapper>("tonlibclient", std::move(tonlib_options));
dns_resolver_ = td::actor::create_actor<DNSResolver>("dnsresolver", tonlib_client_.get());
}
@ -1067,6 +1014,22 @@ class RldpHttpProxy : public td::actor::Actor {
server_ = ton::http::HttpServer::create(port_, std::make_shared<Cb>(actor_id(this)));
}
class AdnlPayloadCb : public ton::adnl::Adnl::Callback {
public:
AdnlPayloadCb(td::actor::ActorId<RldpHttpProxy> id) : self_id_(id) {
}
void receive_message(ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst,
td::BufferSlice data) override {
}
void receive_query(ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst, td::BufferSlice data,
td::Promise<td::BufferSlice> promise) override {
td::actor::send_closure(self_id_, &RldpHttpProxy::receive_payload_part_request, std::move(data),
std::move(promise));
}
private:
td::actor::ActorId<RldpHttpProxy> self_id_;
};
for (auto &serv_id : server_ids_) {
class AdnlCb : public ton::adnl::Adnl::Callback {
public:
@ -1087,7 +1050,15 @@ class RldpHttpProxy : public td::actor::Actor {
td::actor::send_closure(adnl_, &ton::adnl::Adnl::subscribe, serv_id,
ton::adnl::Adnl::int_to_bytestring(ton::ton_api::http_request::ID),
std::make_unique<AdnlCb>(actor_id(this)));
if (local_id_ != serv_id) {
td::actor::send_closure(adnl_, &ton::adnl::Adnl::subscribe, serv_id,
ton::adnl::Adnl::int_to_bytestring(ton::ton_api::http_getNextPayloadPart::ID),
std::make_unique<AdnlPayloadCb>(actor_id(this)));
}
}
td::actor::send_closure(adnl_, &ton::adnl::Adnl::subscribe, local_id_,
ton::adnl::Adnl::int_to_bytestring(ton::ton_api::http_getNextPayloadPart::ID),
std::make_unique<AdnlPayloadCb>(actor_id(this)));
rldp_ = ton::rldp::Rldp::create(adnl_.get());
td::actor::send_closure(rldp_, &ton::rldp::Rldp::set_default_mtu, 16 << 10);
@ -1141,7 +1112,7 @@ class RldpHttpProxy : public td::actor::Actor {
td::actor::create_actor<TcpToRldpRequestSender>("outboundreq", local_id_, host, std::move(request),
std::move(payload), std::move(promise), adnl_.get(), dht_.get(),
rldp_.get(), dns_resolver_.get())
rldp_.get(), actor_id(this), dns_resolver_.get())
.release();
}
@ -1237,7 +1208,7 @@ class RldpHttpProxy : public td::actor::Actor {
LOG(INFO) << "starting HTTP over RLDP request";
td::actor::create_actor<RldpToTcpRequestSender>("inboundreq", f->id_, dst, src, std::move(request),
payload.move_as_ok(), std::move(promise), adnl_.get(), rldp_.get(),
server.http_remote_.get())
actor_id(this), server.http_remote_.get())
.release();
}
@ -1249,10 +1220,52 @@ class RldpHttpProxy : public td::actor::Actor {
return;
}
td::actor::create_actor<RldpTcpTunnel>(td::actor::ActorOptions().with_name("tunnel").with_poll(), id, src, local_id,
adnl_.get(), rldp_.get(), fd.move_as_ok()).release();
adnl_.get(), rldp_.get(), actor_id(this), fd.move_as_ok())
.release();
std::vector<ton::tl_object_ptr<ton::ton_api::http_header>> headers;
headers.push_back(
ton::create_tl_object<ton::ton_api::http_header>(PROXY_SITE_VERISON_HEADER_NAME, PROXY_VERSION_HEADER));
promise.set_result(ton::create_serialize_tl_object<ton::ton_api::http_response>(
http_version, 200, "Connection Established", std::vector<ton::tl_object_ptr<ton::ton_api::http_header>>(),
false));
http_version, 200, "Connection Established", std::move(headers), false));
}
void receive_payload_part_request(td::BufferSlice data, td::Promise<td::BufferSlice> promise) {
auto F = ton::fetch_tl_object<ton::ton_api::http_getNextPayloadPart>(data, true);
if (F.is_error()) {
LOG(INFO) << "failed to parse query: " << F.error();
promise.set_error(F.move_as_error());
return;
}
auto f = F.move_as_ok();
auto it = payload_senders_.find(f->id_);
if (it == payload_senders_.end()) {
LOG(INFO) << "failed to answer query: unknown request id";
promise.set_error(td::Status::Error("unknown request id"));
return;
}
it->second(std::move(f), std::move(promise));
}
void register_payload_sender(
td::Bits256 id,
std::function<void(ton::tl_object_ptr<ton::ton_api::http_getNextPayloadPart>, td::Promise<td::BufferSlice>)> f,
td::Promise<RegisteredPayloadSenderGuard> promise) {
auto &f1 = payload_senders_[id];
if (f1) {
promise.set_error(td::Status::Error("duplicate id"));
return;
}
f1 = std::move(f);
promise.set_result(RegisteredPayloadSenderGuard(
new std::pair<td::actor::ActorId<RldpHttpProxy>, td::Bits256>(actor_id(this), id),
[](std::pair<td::actor::ActorId<RldpHttpProxy>, td::Bits256> *x) {
td::actor::send_closure(x->first, &RldpHttpProxy::unregister_payload_sender, x->second);
delete x;
}));
}
void unregister_payload_sender(td::Bits256 id) {
payload_senders_.erase(id);
}
void add_adnl_addr(ton::adnl::AdnlNodeIdShort id) {
@ -1302,8 +1315,12 @@ class RldpHttpProxy : public td::actor::Actor {
std::string db_root_ = ".";
bool proxy_all_ = false;
td::actor::ActorOwn<TonlibClient> tonlib_client_;
td::actor::ActorOwn<tonlib::TonlibClientWrapper> tonlib_client_;
td::actor::ActorOwn<DNSResolver> dns_resolver_;
std::map<td::Bits256,
std::function<void(ton::tl_object_ptr<ton::ton_api::http_getNextPayloadPart>, td::Promise<td::BufferSlice>)>>
payload_senders_;
};
void TcpToRldpRequestSender::resolve() {
@ -1329,6 +1346,67 @@ void TcpToRldpRequestSender::resolve() {
td::actor::send_closure(dns_resolver_, &DNSResolver::resolve, host_, std::move(P));
}
void HttpRldpPayloadSender::start_up() {
td::actor::send_closure(
proxy_, &RldpHttpProxy::register_payload_sender, id_,
[SelfId = actor_id(this)](ton::tl_object_ptr<ton::ton_api::http_getNextPayloadPart> f,
td::Promise<td::BufferSlice> promise) {
td::actor::send_closure(SelfId, &HttpRldpPayloadSender::receive_query, std::move(f), std::move(promise));
},
[SelfId = actor_id(this)](td::Result<RegisteredPayloadSenderGuard> R) {
if (R.is_error()) {
LOG(INFO) << "Failed to register request sender: " << R.move_as_error();
}
td::actor::send_closure(SelfId, &HttpRldpPayloadSender::registered_sender, R.move_as_ok());
});
class Cb : public ton::http::HttpPayload::Callback {
public:
Cb(td::actor::ActorId<HttpRldpPayloadSender> id, size_t watermark) : self_id_(id), watermark_(watermark) {
}
void run(size_t ready_bytes) override {
if (!reached_ && ready_bytes >= watermark_) {
reached_ = true;
td::actor::send_closure(self_id_, &HttpRldpPayloadSender::try_answer_query, false);
} else if (reached_ && ready_bytes < watermark_) {
reached_ = false;
}
}
void completed() override {
td::actor::send_closure(self_id_, &HttpRldpPayloadSender::try_answer_query, false);
}
private:
bool reached_ = false;
td::actor::ActorId<HttpRldpPayloadSender> self_id_;
size_t watermark_;
};
payload_->add_callback(
std::make_unique<Cb>(actor_id(this), is_tunnel_ ? 1 : ton::http::HttpRequest::low_watermark()));
alarm_timestamp() = td::Timestamp::in(is_tunnel_ ? 60.0 : 10.0);
}
void RldpTcpTunnel::start_up() {
self_ = actor_id(this);
td::actor::SchedulerContext::get()->get_poll().subscribe(fd_.get_poll_info().extract_pollable_fd(this),
td::PollFlags::ReadWrite());
td::actor::send_closure(
proxy_, &RldpHttpProxy::register_payload_sender, id_,
[SelfId = actor_id(this)](ton::tl_object_ptr<ton::ton_api::http_getNextPayloadPart> f,
td::Promise<td::BufferSlice> promise) {
td::actor::send_closure(SelfId, &RldpTcpTunnel::receive_query, std::move(f), std::move(promise));
},
[SelfId = actor_id(this)](td::Result<RegisteredPayloadSenderGuard> R) {
if (R.is_error()) {
LOG(INFO) << "Failed to register request sender: " << R.move_as_error();
}
td::actor::send_closure(SelfId, &RldpTcpTunnel::registered_sender, R.move_as_ok());
});
process();
}
int main(int argc, char *argv[]) {
SET_VERBOSITY_LEVEL(verbosity_WARNING);
@ -1340,7 +1418,7 @@ int main(int argc, char *argv[]) {
td::log_interface = td::default_log_interface;
};
auto add_local_host = [&](const std::string& local, const std::string& remote) -> td::Status {
auto add_local_host = [&](const std::string &local, const std::string &remote) -> td::Status {
std::string host;
std::vector<td::uint16> ports;
auto p = local.find(':');
@ -1357,7 +1435,7 @@ int main(int argc, char *argv[]) {
}
try {
ports.push_back((td::uint16)std::stoul(local.substr(p, p2 - p)));
} catch (const std::logic_error& e) {
} catch (const std::logic_error &e) {
return td::Status::Error(PSLICE() << "Invalid port: " << local.substr(p, p2 - p));
}
p = p2 + 1;
@ -1427,9 +1505,7 @@ int main(int argc, char *argv[]) {
p.add_checked_option('L', "local",
"<hosthame>:<ports>, hostname that will be proxied to localhost\n"
"<ports> is a comma-separated list of ports (may be omitted, default: 80, 443)\n",
[&](td::Slice arg) -> td::Status {
return add_local_host(arg.str(), "127.0.0.1");
});
[&](td::Slice arg) -> td::Status { return add_local_host(arg.str(), "127.0.0.1"); });
p.add_option('D', "db", "db root",
[&](td::Slice arg) { td::actor::send_closure(x, &RldpHttpProxy::set_db_root, arg.str()); });
p.add_checked_option(

View file

@ -100,7 +100,7 @@ class RldpIn : public RldpImpl {
void receive_message(adnl::AdnlNodeIdShort source, adnl::AdnlNodeIdShort local_id, TransferId transfer_id,
td::BufferSlice data);
void in_transfer_completed(TransferId transfer_id);
void in_transfer_completed(TransferId transfer_id, bool success);
void add_id(adnl::AdnlNodeIdShort local_id) override;
void get_conn_ip_str(adnl::AdnlNodeIdShort l_id, adnl::AdnlNodeIdShort p_id, td::Promise<td::string> promise) override;

View file

@ -141,11 +141,11 @@ void RldpIn::process_message_part(adnl::AdnlNodeIdShort source, adnl::AdnlNodeId
}
auto P = td::PromiseCreator::lambda(
[SelfId = actor_id(this), source, local_id, transfer_id = part.transfer_id_](td::Result<td::BufferSlice> R) {
td::actor::send_closure(SelfId, &RldpIn::in_transfer_completed, transfer_id, R.is_ok());
if (R.is_error()) {
VLOG(RLDP_INFO) << "failed to receive: " << R.move_as_error();
return;
}
td::actor::send_closure(SelfId, &RldpIn::in_transfer_completed, transfer_id);
td::actor::send_closure(SelfId, &RldpIn::receive_message, source, local_id, transfer_id, R.move_as_ok());
});
@ -250,8 +250,9 @@ void RldpIn::transfer_completed(TransferId transfer_id) {
VLOG(RLDP_DEBUG) << "rldp: completed transfer " << transfer_id << "; " << senders_.size() << " out transfer pending ";
}
void RldpIn::in_transfer_completed(TransferId transfer_id) {
if (lru_set_.count(transfer_id) == 1) {
void RldpIn::in_transfer_completed(TransferId transfer_id, bool success) {
receivers_.erase(transfer_id);
if (!success || lru_set_.count(transfer_id) == 1) {
return;
}
while (lru_size_ >= lru_size()) {

View file

@ -18,6 +18,7 @@
*/
#include "BdwStats.h"
#include "rldp.hpp"
namespace ton {
namespace rldp2 {
@ -39,7 +40,7 @@ void BdwStats::on_packet_ack(const PacketInfo &info, td::Timestamp sent_at, td::
auto ack_passed = now.at() - info.delivered_now.at();
auto passed = td::max(sent_passed, ack_passed);
if (passed < 0.01) {
LOG(ERROR) << "Invalid passed " << passed;
VLOG(RLDP_WARNING) << "Invalid passed " << passed;
}
auto delivered = delivered_count - info.delivered_count;
on_rate_sample((double)delivered / passed, now, info.is_paused);

View file

@ -18,6 +18,7 @@
*/
#include "RldpConnection.h"
#include "rldp.hpp"
#include "td/utils/overloaded.h"
#include "td/utils/Random.h"
@ -83,7 +84,7 @@ td::Timestamp RldpConnection::loop_limits(td::Timestamp now) {
outbound_transfers_.erase(it);
to_on_sent_.emplace_back(limit->transfer_id, std::move(error));
} else {
LOG(ERROR) << "Timeout on unknown transfer " << limit->transfer_id.to_hex();
VLOG(RLDP_WARNING) << "Timeout on unknown transfer " << limit->transfer_id.to_hex();
}
}
limits_set_.erase(*limit);
@ -113,7 +114,7 @@ void RldpConnection::send(TransferId transfer_id, td::BufferSlice data, td::Time
td::Random::secure_bytes(transfer_id.as_slice());
} else {
if (outbound_transfers_.find(transfer_id) != outbound_transfers_.end()) {
LOG(WARNING) << "Skip resend of " << transfer_id.to_hex();
VLOG(RLDP_WARNING) << "Skip resend of " << transfer_id.to_hex();
return;
}
}
@ -143,17 +144,6 @@ void RldpConnection::loop_bbr(td::Timestamp now) {
double speed = bbr_.get_rate();
td::uint32 congestion_window = bbr_.get_window_size();
static td::Timestamp next;
//FIXME: remove this UNSAFE debug output
if (next.is_in_past(now)) {
next = td::Timestamp::in(1, now);
if (td::actor::core::ActorExecuteContext::get()->actor().get_actor_info_ptr()->get_name() == "Alice") {
LOG(ERROR) << "speed=" << td::format::as_size((td::int64)speed * 768) << " "
<< "cgw=" << td::format::as_size((td::int64)congestion_window * 768) << " "
<< "loss=" << loss_stats_.loss * 100 << "%";
}
}
pacer_.set_speed(speed);
congestion_window_ = congestion_window;
}
@ -301,7 +291,7 @@ void RldpConnection::receive_raw_obj(ton::ton_api::rldp2_messagePart &part) {
max_size = limit_it->max_size;
}
if (total_size > max_size) {
LOG(INFO) << "Drop too big rldp query " << part.total_size_ << " > " << max_size;
VLOG(RLDP_INFO) << "Drop too big rldp query " << part.total_size_ << " > " << max_size;
return;
}
@ -324,7 +314,7 @@ void RldpConnection::receive_raw_obj(ton::ton_api::rldp2_messagePart &part) {
}
return {};
}
if (in_part->receiver.on_received(part.seqno_, td::Timestamp::now())) {
if (in_part->receiver.on_received(part.seqno_ + 1, td::Timestamp::now())) {
TRY_STATUS_PREFIX(in_part->decoder->add_symbol({static_cast<td::uint32>(part.seqno_), std::move(part.data_)}),
td::Status::Error(ErrorCode::protoviolation, "invalid symbol"));
if (in_part->decoder->may_try_decode()) {

View file

@ -33,9 +33,8 @@ void RldpReceiver::on_ack_sent(td::Timestamp now) {
//LOG(ERROR) << "RESEND ACK " << cnt_;
}
cnt_++;
if (cnt_ > 7) {
send_ack_at_ = {};
} else {
send_ack_at_ = {};
if (cnt_ <= 7) {
send_ack_at_.relax(td::Timestamp::at(now.at() + config_.ack_delay * (1 << cnt_)));
}
}

View file

@ -49,7 +49,6 @@ SenderPackets::Update RldpSender::on_ack(const Ack &ack, double ack_delay, td::T
//LOG(ERROR) << "ON ACK " << ack.max_seqno << " " << ack.received_mask << " " << ack.received_count;
auto update = packets_.on_ack(ack);
if (!update.was_max_updated) {
CHECK(!update.new_received);
return update;
}

View file

@ -18,17 +18,18 @@
*/
#include "RttStats.h"
#include "rldp.hpp"
#include <cmath>
namespace ton {
namespace rldp2 {
void RttStats::on_rtt_sample(double rtt_sample, double ack_delay, td::Timestamp now) {
if (rtt_sample < 0.001 || rtt_sample > 10) {
LOG(WARNING) << "Suspicious rtt sample " << rtt_sample;
VLOG(RLDP_WARNING) << "Suspicious rtt sample " << rtt_sample;
return;
}
if (ack_delay < -1e-9 || ack_delay > 10) {
LOG(WARNING) << "Suspicious ack_delay " << ack_delay;
VLOG(RLDP_WARNING) << "Suspicious ack_delay " << ack_delay;
return;
}
rtt_sample = td::max(0.01, rtt_sample);

View file

@ -53,13 +53,28 @@ struct Bitset {
}
auto mask = 1 << bit_i;
if ((bits_[i] & mask) == 0) {
bits_[i] |= mask;
bits_[i] |= (char)mask;
count_++;
return true;
}
return false;
}
bool set_zero(size_t offset) {
auto i = offset / 8;
if (i >= bits_.size()) {
return false;
}
auto bit_i = offset % 8;
auto mask = 1 << bit_i;
if (bits_[i] & mask) {
bits_[i] &= (char)~mask;
count_--;
return true;
}
return false;
}
size_t ones_count() const {
return count_;
}

View file

@ -17,6 +17,7 @@ set(STORAGE_SOURCE
TorrentInfo.cpp
TorrentMeta.cpp
db.h
Bitset.h
LoadSpeed.h
MerkleTree.h
@ -24,32 +25,33 @@ set(STORAGE_SOURCE
PartsHelper.h
PeerActor.h
PeerState.h
SharedState.h
Torrent.h
TorrentCreator.h
TorrentHeader.h
TorrentInfo.h
TorrentMeta.h
)
PeerManager.h
MicrochunkTree.h MicrochunkTree.cpp)
set(STORAGE_CLI_SOURCE
storage-cli.cpp
)
add_library(storage ${STORAGE_SOURCE})
target_link_libraries(storage tdutils tdactor tddb ton_crypto tl_api ${JEMALLOC_LIBRARIES})
target_include_directories(storage PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>
target_include_directories(storage PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>
)
add_executable(storage-cli ${STORAGE_CLI_SOURCE})
target_link_libraries(storage-cli storage overlay tdutils tdactor adnl tl_api dht
rldp rldp2 catchain validatorsession full-node validator ton_validator validator
fift-lib memprof terminal git ${JEMALLOC_LIBRARIES})
rldp rldp2 fift-lib memprof terminal git ${JEMALLOC_LIBRARIES})
set(STORAGE_TEST_SOURCE
${CMAKE_CURRENT_SOURCE_DIR}/test/storage.cpp
PARENT_SCOPE
)
add_subdirectory(storage-daemon)
# Do not install it yet
#install(TARGETS storage-cli RUNTIME DESTINATION bin)

View file

@ -29,7 +29,7 @@ void LoadSpeed::add(std::size_t size, td::Timestamp now) {
}
double LoadSpeed::speed(td::Timestamp now) const {
update(now);
return total_size_ / duration();
return (double)total_size_ / duration(now);
}
td::StringBuilder &operator<<(td::StringBuilder &sb, const LoadSpeed &speed) {
@ -37,15 +37,15 @@ td::StringBuilder &operator<<(td::StringBuilder &sb, const LoadSpeed &speed) {
}
void LoadSpeed::update(td::Timestamp now) const {
while (duration() > 60) {
while (duration(now) > 30) {
total_size_ -= events_.front().size;
events_.pop();
}
}
double LoadSpeed::duration() const {
double LoadSpeed::duration(td::Timestamp now) const {
double res = 5;
if (events_.size() > 1) {
res = std::max(res, events_.back().at.at() - events_.front().at.at());
if (!events_.empty()) {
res = std::max(res, now.at() - events_.front().at.at());
}
return res;
}

View file

@ -26,19 +26,19 @@
namespace ton {
class LoadSpeed {
public:
void add(std::size_t size, td::Timestamp now);
void add(td::uint64 size, td::Timestamp now = td::Timestamp::now());
double speed(td::Timestamp now = td::Timestamp::now()) const;
friend td::StringBuilder &operator<<(td::StringBuilder &sb, const LoadSpeed &speed);
private:
struct Event {
std::size_t size;
td::uint64 size;
td::Timestamp at;
};
mutable td::VectorQueue<Event> events_;
mutable std::size_t total_size_{0};
mutable td::uint64 total_size_{0};
double duration() const;
double duration(td::Timestamp now) const;
void update(td::Timestamp now) const;
};
} // namespace ton

View file

@ -28,152 +28,51 @@
#include "vm/excno.hpp"
namespace ton {
static td::Ref<vm::Cell> unpack_proof(td::Ref<vm::Cell> root) {
static td::Result<td::Ref<vm::Cell>> unpack_proof(td::Ref<vm::Cell> root) {
vm::CellSlice cs(vm::NoVm(), root);
CHECK(cs.special_type() == vm::Cell::SpecialType::MerkleProof);
if (cs.special_type() != vm::Cell::SpecialType::MerkleProof) {
return td::Status::Error("Not a merkle proof");
}
return cs.fetch_ref();
}
td::uint32 MerkleTree::get_depth() const {
return log_n_;
}
td::Ref<vm::Cell> MerkleTree::get_root(size_t depth_limit) const {
if (depth_limit > log_n_ || root_proof_.is_null()) {
return root_proof_;
MerkleTree::MerkleTree(size_t pieces_count, td::Bits256 root_hash)
: pieces_count_(pieces_count), root_hash_(root_hash) {
depth_ = 0;
n_ = 1;
while (n_ < pieces_count_) {
++depth_;
n_ <<= 1;
}
auto usage_tree = std::make_shared<vm::CellUsageTree>();
auto root_raw = vm::MerkleProof::virtualize(root_proof_, 1);
auto usage_cell = vm::UsageCell::create(root_raw, usage_tree->root_ptr());
do_gen_proof(std::move(usage_cell), unpack_proof(root_proof_), depth_limit);
auto res = vm::MerkleProof::generate(root_raw, usage_tree.get());
CHECK(res.not_null());
return res;
}
void MerkleTree::do_gen_proof(td::Ref<vm::Cell> node, td::Ref<vm::Cell> node_raw, size_t depth_limit) const {
if (depth_limit == 0) {
return;
static td::Ref<vm::Cell> build_tree(td::Bits256 *hashes, size_t len) {
if (len == 1) {
return vm::CellBuilder().store_bytes(hashes[0].as_slice()).finalize();
}
// check if it is possible to load node without breaking virtualization
vm::CellSlice cs_raw(vm::NoVm(), std::move(node_raw));
if (cs_raw.is_special()) {
return;
td::Ref<vm::Cell> l = build_tree(hashes, len / 2);
td::Ref<vm::Cell> r = build_tree(hashes + len / 2, len / 2);
return vm::CellBuilder().store_ref(l).store_ref(r).finalize();
};
MerkleTree::MerkleTree(std::vector<td::Bits256> hashes) : pieces_count_(hashes.size()) {
depth_ = 0;
n_ = 1;
while (n_ < pieces_count_) {
++depth_;
n_ <<= 1;
}
hashes.resize(n_, td::Bits256::zero());
td::Ref<vm::Cell> root = build_tree(hashes.data(), n_);
root_hash_ = root->get_hash().bits();
root_proof_ = vm::CellBuilder::create_merkle_proof(std::move(root));
}
static td::Status do_validate_proof(td::Ref<vm::Cell> node, size_t depth) {
if (node->get_depth(0) != depth) {
return td::Status::Error("Depth mismatch");
}
vm::CellSlice cs(vm::NoVm(), std::move(node));
while (cs.have_refs()) {
do_gen_proof(cs.fetch_ref(), cs_raw.fetch_ref(), depth_limit - 1);
}
}
td::Bits256 MerkleTree::get_root_hash() const {
CHECK(root_hash_);
return root_hash_.value();
}
MerkleTree::MerkleTree(size_t chunks_count, td::Bits256 root_hash) {
init_begin(chunks_count);
root_hash_ = root_hash;
init_finish();
}
MerkleTree::MerkleTree(size_t chunks_count, td::Ref<vm::Cell> root_proof) {
init_begin(chunks_count);
root_hash_ = unpack_proof(root_proof)->get_hash(0).as_array();
root_proof_ = std::move(root_proof);
init_finish();
}
MerkleTree::MerkleTree(td::Span<Chunk> chunks) {
init_begin(chunks.size());
for (size_t i = 0; i < chunks.size(); i++) {
CHECK(chunks[i].index == i);
init_add_chunk(i, chunks[i].hash.as_slice());
}
init_finish();
}
void MerkleTree::init_begin(size_t chunks_count) {
log_n_ = 0;
while ((size_t(1) << log_n_) < chunks_count) {
log_n_++;
}
n_ = size_t(1) << log_n_;
total_blocks_ = chunks_count;
mark_.resize(n_ * 2);
proof_.resize(n_ * 2);
td::UInt256 null{};
auto cell = vm::CellBuilder().store_bytes(null.as_slice()).finalize();
for (auto i = chunks_count; i < n_; i++) {
proof_[i + n_] = cell;
}
}
void MerkleTree::init_add_chunk(size_t index, td::Slice hash) {
CHECK(index < total_blocks_);
CHECK(proof_[index + n_].is_null());
proof_[index + n_] = vm::CellBuilder().store_bytes(hash).finalize();
}
void MerkleTree::init_finish() {
for (size_t i = n_ - 1; i >= 1; i--) {
auto j = i * 2;
if (proof_[j].is_null()) {
continue;
}
if (i + 1 < n_ && proof_[i + 1].not_null() && proof_[j]->get_hash() == proof_[j + 2]->get_hash() &&
proof_[j + 1]->get_hash() == proof_[j + 3]->get_hash()) {
// minor optimization for same chunks
proof_[i] = proof_[i + 1];
} else {
proof_[i] = vm::CellBuilder().store_ref(proof_[j]).store_ref(proof_[j + 1]).finalize();
}
}
if (proof_[1].not_null()) {
init_proof();
}
CHECK(root_hash_);
}
void MerkleTree::remove_chunk(std::size_t index) {
CHECK(index < n_);
index += n_;
while (proof_[index].not_null()) {
proof_[index] = {};
index /= 2;
}
}
bool MerkleTree::has_chunk(std::size_t index) const {
CHECK(index < n_);
index += n_;
return proof_[index].not_null();
}
void MerkleTree::add_chunk(std::size_t index, td::Slice hash) {
CHECK(hash.size() == 32);
CHECK(index < n_);
index += n_;
auto cell = vm::CellBuilder().store_bytes(hash).finalize();
CHECK(proof_[index].is_null());
proof_[index] = std::move(cell);
mark_[index] = mark_id_;
for (index /= 2; index != 0; index /= 2) {
CHECK(proof_[index].is_null());
auto &left = proof_[index * 2];
auto &right = proof_[index * 2 + 1];
if (left.not_null() && right.not_null()) {
proof_[index] = vm::CellBuilder().store_ref(left).store_ref(right).finalize();
mark_[index] = mark_id_;
}
}
}
static td::Status do_validate(td::Ref<vm::Cell> ref, size_t depth) {
vm::CellSlice cs(vm::NoVm(), std::move(ref));
if (cs.is_special()) {
if (cs.special_type() != vm::Cell::SpecialType::PrunnedBranch) {
return td::Status::Error("Unexpected special cell");
@ -194,154 +93,65 @@ static td::Status do_validate(td::Ref<vm::Cell> ref, size_t depth) {
if (cs.size_refs() != 2) {
return td::Status::Error("Node in proof must have two refs");
}
TRY_STATUS(do_validate(cs.fetch_ref(), depth - 1));
TRY_STATUS(do_validate(cs.fetch_ref(), depth - 1));
TRY_STATUS(do_validate_proof(cs.fetch_ref(), depth - 1));
TRY_STATUS(do_validate_proof(cs.fetch_ref(), depth - 1));
}
return td::Status::OK();
}
td::Status MerkleTree::validate_proof(td::Ref<vm::Cell> new_root) {
// 1. depth <= log_n
// 2. each non special node has two refs and nothing else
// 3. each list contains only hash
// 4. all special nodes are merkle proofs
vm::CellSlice cs(vm::NoVm(), new_root);
if (cs.special_type() != vm::Cell::SpecialType::MerkleProof) {
return td::Status::Error("Proof must be a mekle proof cell");
td::Status MerkleTree::add_proof(td::Ref<vm::Cell> proof) {
if (proof.is_null()) {
return td::Status::OK();
}
auto root = cs.fetch_ref();
if (root_hash_ && root->get_hash(0).as_slice() != root_hash_.value().as_slice()) {
return td::Status::Error("Proof has invalid root hash");
TRY_RESULT(proof_raw, unpack_proof(proof));
if (root_hash_ != proof_raw->get_hash(0).bits()) {
return td::Status::Error("Root hash mismatch");
}
return do_validate(std::move(root), log_n_);
}
td::Status MerkleTree::add_proof(td::Ref<vm::Cell> new_root) {
CHECK(root_proof_.not_null() || root_hash_);
TRY_STATUS(validate_proof(new_root));
if (root_proof_.not_null()) {
auto combined = vm::MerkleProof::combine_fast(root_proof_, std::move(new_root));
TRY_STATUS(do_validate_proof(proof_raw, depth_));
if (root_proof_.is_null()) {
root_proof_ = std::move(proof);
} else {
auto combined = vm::MerkleProof::combine_fast(root_proof_, std::move(proof));
if (combined.is_null()) {
return td::Status::Error("Can't combine proofs");
}
root_proof_ = std::move(combined);
} else {
root_proof_ = std::move(new_root);
}
return td::Status::OK();
}
td::Status MerkleTree::validate_existing_chunk(const Chunk &chunk) {
vm::CellSlice cs(vm::NoVm(), proof_[chunk.index + n_]);
CHECK(cs.size() == chunk.hash.size());
if (cs.as_bitslice().compare(chunk.hash.cbits()) != 0) {
return td::Status::Error("Hash mismatch");
td::Result<td::Bits256> MerkleTree::get_piece_hash(size_t idx) const {
if (idx >= n_) {
return td::Status::Error("Index is too big");
}
return td::Status::OK();
}
td::Status MerkleTree::try_add_chunks(td::Span<Chunk> chunks) {
td::Bitset bitmask;
add_chunks(chunks, bitmask);
for (size_t i = 0; i < chunks.size(); i++) {
if (!bitmask.get(i)) {
return td::Status::Error(PSLICE() << "Invalid chunk #" << chunks[i].index);
}
}
return td::Status::OK();
}
void MerkleTree::add_chunks(td::Span<Chunk> chunks, td::Bitset &bitmask) {
if (root_proof_.is_null()) {
return;
return td::Status::Error("Hash is not known");
}
mark_id_++;
bitmask.reserve(chunks.size());
for (size_t i = 0; i < chunks.size(); i++) {
const auto &chunk = chunks[i];
if (has_chunk(chunk.index)) {
if (validate_existing_chunk(chunk).is_ok()) {
bitmask.set_one(i);
}
continue;
size_t l = 0, r = n_ - 1;
td::Ref<vm::Cell> node = unpack_proof(root_proof_).move_as_ok();
while (true) {
vm::CellSlice cs(vm::NoVm(), std::move(node));
if (cs.is_special()) {
return td::Status::Error("Hash is not known");
}
add_chunk(chunk.index, chunk.hash.as_slice());
}
root_proof_ = vm::CellBuilder::create_merkle_proof(merge(unpack_proof(root_proof_), 1));
for (size_t i = 0; i < chunks.size(); i++) {
const auto &chunk = chunks[i];
if (has_chunk(chunk.index) && mark_[chunk.index + n_] == mark_id_) {
bitmask.set_one(i);
if (l == r) {
td::Bits256 hash;
CHECK(cs.fetch_bits_to(hash.bits(), 256));
return hash;
}
}
}
td::Ref<vm::Cell> MerkleTree::merge(td::Ref<vm::Cell> root, size_t index) {
const auto &down = proof_[index];
if (down.not_null()) {
if (down->get_hash() != root->get_hash(0)) {
proof_[index] = {};
CHECK(cs.size_refs() == 2);
size_t mid = (l + r) / 2;
if (idx <= mid) {
node = cs.prefetch_ref(0);
r = mid;
} else {
return down;
node = cs.prefetch_ref(1);
l = mid + 1;
}
}
if (mark_[index] != mark_id_ || index >= n_) {
return root;
}
vm::CellSlice cs(vm::NoVm(), root);
if (cs.is_special()) {
cleanup_add(index);
return root;
}
CHECK(cs.size_refs() == 2);
vm::CellBuilder cb;
cb.store_bits(cs.fetch_bits(cs.size()));
auto left = merge(cs.fetch_ref(), index * 2);
auto right = merge(cs.fetch_ref(), index * 2 + 1);
cb.store_ref(std::move(left)).store_ref(std::move(right));
return cb.finalize();
}
void MerkleTree::cleanup_add(size_t index) {
if (mark_[index] != mark_id_) {
return;
}
proof_[index] = {};
if (index >= n_) {
return;
}
cleanup_add(index * 2);
cleanup_add(index * 2 + 1);
}
void MerkleTree::init_proof() {
CHECK(proof_[1].not_null());
td::Bits256 new_root_hash = proof_[1]->get_hash(0).as_array();
CHECK(!root_hash_ || root_hash_.value() == new_root_hash);
root_hash_ = new_root_hash;
root_proof_ = vm::CellBuilder::create_merkle_proof(proof_[1]);
}
td::Result<td::Ref<vm::Cell>> MerkleTree::gen_proof(size_t l, size_t r) {
if (root_proof_.is_null()) {
return td::Status::Error("got no proofs yet");
}
auto usage_tree = std::make_shared<vm::CellUsageTree>();
auto root_raw = vm::MerkleProof::virtualize(root_proof_, 1);
auto usage_cell = vm::UsageCell::create(root_raw, usage_tree->root_ptr());
TRY_STATUS(TRY_VM(do_gen_proof(std::move(usage_cell), 0, n_ - 1, l, r)));
auto res = vm::MerkleProof::generate(root_raw, usage_tree.get());
CHECK(res.not_null());
return res;
}
td::Status MerkleTree::do_gen_proof(td::Ref<vm::Cell> node, size_t il, size_t ir, size_t l, size_t r) const {
static td::Status do_gen_proof(td::Ref<vm::Cell> node, size_t il, size_t ir, size_t l, size_t r) {
if (ir < l || il > r) {
return td::Status::OK();
}
@ -358,4 +168,114 @@ td::Status MerkleTree::do_gen_proof(td::Ref<vm::Cell> node, size_t il, size_t ir
TRY_STATUS(do_gen_proof(cs.fetch_ref(), ic + 1, ir, l, r));
return td::Status::OK();
}
td::Result<td::Ref<vm::Cell>> MerkleTree::gen_proof(size_t l, size_t r) const {
if (root_proof_.is_null()) {
return td::Status::Error("Got no proofs yet");
}
auto usage_tree = std::make_shared<vm::CellUsageTree>();
auto root_raw = vm::MerkleProof::virtualize(root_proof_, 1);
auto usage_cell = vm::UsageCell::create(root_raw, usage_tree->root_ptr());
TRY_STATUS(TRY_VM(do_gen_proof(std::move(usage_cell), 0, n_ - 1, l, r)));
auto res = vm::MerkleProof::generate(root_raw, usage_tree.get());
CHECK(res.not_null());
return res;
}
static void do_gen_proof(td::Ref<vm::Cell> node, td::Ref<vm::Cell> node_raw, size_t depth_limit) {
if (depth_limit == 0) {
return;
}
// check if it is possible to load node without breaking virtualization
vm::CellSlice cs_raw(vm::NoVm(), std::move(node_raw));
if (cs_raw.is_special()) {
return;
}
vm::CellSlice cs(vm::NoVm(), std::move(node));
while (cs.have_refs()) {
do_gen_proof(cs.fetch_ref(), cs_raw.fetch_ref(), depth_limit - 1);
}
}
td::Ref<vm::Cell> MerkleTree::get_root(size_t depth_limit) const {
if (depth_limit > depth_ || root_proof_.is_null()) {
return root_proof_;
}
auto usage_tree = std::make_shared<vm::CellUsageTree>();
auto root_raw = vm::MerkleProof::virtualize(root_proof_, 1);
auto usage_cell = vm::UsageCell::create(root_raw, usage_tree->root_ptr());
do_gen_proof(std::move(usage_cell), unpack_proof(root_proof_).move_as_ok(), depth_limit);
auto res = vm::MerkleProof::generate(root_raw, usage_tree.get());
CHECK(res.not_null());
return res;
}
static td::Ref<vm::Cell> build_from_hashes(std::pair<size_t, td::Bits256> *p, std::pair<size_t, td::Bits256> *pend,
size_t len) {
if (len == 1) {
return vm::CellBuilder().store_bytes((p < pend ? p->second : td::Bits256::zero()).as_slice()).finalize();
}
td::Ref<vm::Cell> l = build_from_hashes(p, pend, len / 2);
td::Ref<vm::Cell> r = build_from_hashes(p + len / 2, pend, len / 2);
return vm::CellBuilder().store_ref(l).store_ref(r).finalize();
}
td::Ref<vm::Cell> MerkleTree::do_add_pieces(td::Ref<vm::Cell> node, std::vector<size_t> &ok_pieces, size_t il,
size_t ir, std::pair<size_t, td::Bits256> *pl,
std::pair<size_t, td::Bits256> *pr) {
if (pl == pr || il >= pieces_count_) {
return node;
}
vm::CellSlice cs;
if (node.is_null() || (cs = vm::CellSlice(vm::NoVm(), node)).is_special() || il + 1 == ir) {
if ((size_t)(pr - pl) != std::min(ir, pieces_count_) - il) {
return node;
}
td::Ref<vm::Cell> new_node = build_from_hashes(pl, pr, ir - il);
td::Bits256 new_hash = new_node->get_hash().bits();
if (new_hash != (node.is_null() ? root_hash_ : node->get_hash(0).bits())) {
return node;
}
for (auto p = pl; p != pr; ++p) {
ok_pieces.push_back(p->first);
}
if (node.is_null() || cs.is_special()) {
node = std::move(new_node);
}
return node;
}
size_t imid = (il + ir) / 2;
auto pmid = pl;
while (pmid != pr && pmid->first < imid) {
++pmid;
}
td::Ref<vm::Cell> l = do_add_pieces(cs.prefetch_ref(0), ok_pieces, il, imid, pl, pmid);
td::Ref<vm::Cell> r = do_add_pieces(cs.prefetch_ref(1), ok_pieces, imid, ir, pmid, pr);
if (l != cs.prefetch_ref(0) || r != cs.prefetch_ref(1)) {
node = vm::CellBuilder().store_ref(l).store_ref(r).finalize();
}
return node;
}
std::vector<size_t> MerkleTree::add_pieces(std::vector<std::pair<size_t, td::Bits256>> pieces) {
if (pieces.empty()) {
return {};
}
std::sort(pieces.begin(), pieces.end());
for (size_t i = 0; i + 1 < pieces.size(); ++i) {
CHECK(pieces[i].first != pieces[i + 1].first);
}
CHECK(pieces.back().first < pieces_count_);
std::vector<size_t> ok_pieces;
td::Ref<vm::Cell> root;
if (!root_proof_.is_null()) {
root = unpack_proof(root_proof_).move_as_ok();
}
root = do_add_pieces(root, ok_pieces, 0, n_, pieces.data(), pieces.data() + pieces.size());
if (!root.is_null()) {
root_proof_ = vm::CellBuilder::create_merkle_proof(std::move(root));
}
return ok_pieces;
}
} // namespace ton

View file

@ -24,6 +24,7 @@
#include "vm/cells.h"
#include "Bitset.h"
#include <map>
namespace ton {
// merkle_node$_ {n:#} left:^(ton::MerkleTree n) right:^(ton::MerkleTree n) = ton::MerkleTree (n + 1);
@ -31,66 +32,33 @@ namespace ton {
class MerkleTree {
public:
td::uint32 get_depth() const;
td::Ref<vm::Cell> get_root(size_t depth_limit = std::numeric_limits<size_t>::max()) const;
td::Bits256 get_root_hash() const;
MerkleTree(size_t chunks_count, td::Bits256 root_hash);
MerkleTree(size_t chunks_count, td::Ref<vm::Cell> root_proof);
struct Chunk {
std::size_t index{0};
td::Bits256 hash;
};
explicit MerkleTree(td::Span<Chunk> chunks);
MerkleTree() = default;
void init_begin(size_t chunks_count);
void init_add_chunk(std::size_t index, td::Slice hash);
void init_finish();
MerkleTree(size_t pieces_count, td::Bits256 root_hash);
explicit MerkleTree(std::vector<td::Bits256> hashes);
// merge external proof with an existing proof
td::Status add_proof(td::Ref<vm::Cell> new_root);
// generate proof for all chunks from l to r inclusive
td::Result<td::Ref<vm::Cell>> gen_proof(size_t l, size_t r);
td::Status add_proof(td::Ref<vm::Cell> proof);
td::Result<td::Bits256> get_piece_hash(size_t idx) const;
td::Result<td::Ref<vm::Cell>> gen_proof(size_t l, size_t r) const;
td::Ref<vm::Cell> get_root(size_t depth_limit = std::numeric_limits<size_t>::max()) const;
// Trying to add and validate list of chunks simultaniously
td::Status try_add_chunks(td::Span<Chunk> chunks);
std::vector<size_t> add_pieces(std::vector<std::pair<size_t, td::Bits256>> pieces);
// Returns bitmask of successfully added chunks
// Intended to be used during validation of a torrent.
// We got arbitrary chunks read from disk, and we got an arbirary proof.
// Now we can say about some chunks that they are correct. This ia a general way
// to do this.
//
// NB: already added chunks are simply validated. One should be careful
// not to process them twice
void add_chunks(td::Span<Chunk> chunks, td::Bitset &bitmask);
size_t get_depth() const {
return depth_;
}
td::Bits256 get_root_hash() const {
return root_hash_;
}
private:
td::uint64 total_blocks_;
std::size_t n_; // n = 2^log_n
td::uint32 log_n_;
std::size_t mark_id_{0};
std::vector<std::size_t> mark_; // n_ * 2
std::vector<td::Ref<vm::Cell>> proof_; // n_ * 2
td::optional<td::Bits256> root_hash_;
size_t pieces_count_{0};
td::Bits256 root_hash_ = td::Bits256::zero();
size_t depth_{0}, n_{1};
td::Ref<vm::Cell> root_proof_;
td::Status validate_proof(td::Ref<vm::Cell> new_root);
bool has_chunk(std::size_t index) const;
void remove_chunk(std::size_t index);
void add_chunk(std::size_t index, td::Slice hash);
void init_proof();
td::Ref<vm::Cell> merge(td::Ref<vm::Cell> root, size_t index);
void cleanup_add(size_t index);
td::Status do_gen_proof(td::Ref<vm::Cell> node, size_t il, size_t ir, size_t l, size_t r) const;
void do_gen_proof(td::Ref<vm::Cell> node, td::Ref<vm::Cell> node_raw, size_t depth_limit) const;
td::Status validate_existing_chunk(const Chunk &chunk);
td::Ref<vm::Cell> do_add_pieces(td::Ref<vm::Cell> node, std::vector<size_t> &ok_pieces, size_t il, size_t ir,
std::pair<size_t, td::Bits256> *pl, std::pair<size_t, td::Bits256> *pr);
};
} // namespace ton

214
storage/MicrochunkTree.cpp Normal file
View file

@ -0,0 +1,214 @@
/*
This file is part of TON Blockchain Library.
TON Blockchain Library is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
TON Blockchain Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
*/
#include "MicrochunkTree.h"
#include "Torrent.h"
#include "vm/cells/CellSlice.h"
#include "vm/cells/MerkleProof.h"
namespace ton {
static td::Ref<vm::Cell> prun(const td::Ref<vm::Cell> &node) {
vm::CellBuilder cb;
cb.store_long(static_cast<td::uint8>(vm::Cell::SpecialType::PrunnedBranch), 8);
cb.store_long(1, 8);
cb.store_bytes(node->get_hash(0).as_slice());
cb.store_long(node->get_depth(0), 16);
return cb.finalize(true);
}
MicrochunkTree::Builder::Builder(td::uint64 file_size, td::uint64 prun_size)
: file_size_(file_size), prun_size_(prun_size) {
total_size_ = MICROCHUNK_SIZE;
while (total_size_ < file_size) {
total_size_ *= 2;
}
}
void MicrochunkTree::Builder::add_data(td::Slice s) {
CHECK(cur_size_ + s.size() <= file_size_);
while (s.size() > 0) {
size_t buf_ptr = cur_size_ % MICROCHUNK_SIZE;
size_t buf_remaining = MICROCHUNK_SIZE - buf_ptr;
if (buf_remaining > s.size()) {
memcpy(cur_microchunk_ + buf_ptr, s.data(), s.size());
cur_size_ += s.size();
return;
}
memcpy(cur_microchunk_ + buf_ptr, s.data(), buf_remaining);
cur_size_ += buf_remaining;
s.remove_prefix(buf_remaining);
add_microchunk(td::Slice(cur_microchunk_, MICROCHUNK_SIZE));
}
}
MicrochunkTree MicrochunkTree::Builder::finalize() {
CHECK(cur_size_ == file_size_);
if (cur_size_ % MICROCHUNK_SIZE != 0) {
size_t buf_ptr = cur_size_ % MICROCHUNK_SIZE;
size_t buf_remaining = MICROCHUNK_SIZE - buf_ptr;
memset(cur_microchunk_ + buf_ptr, 0, buf_remaining);
cur_size_ += buf_remaining;
add_microchunk(td::Slice(cur_microchunk_, MICROCHUNK_SIZE));
}
memset(cur_microchunk_, 0, MICROCHUNK_SIZE);
while (cur_size_ < total_size_) {
add_microchunk(td::Slice(cur_microchunk_, MICROCHUNK_SIZE));
cur_size_ += MICROCHUNK_SIZE;
}
CHECK(proof_.size() == 1);
MicrochunkTree tree(vm::CellBuilder::create_merkle_proof(std::move(proof_[0])));
CHECK(tree.total_size_ == total_size_);
return tree;
}
void MicrochunkTree::Builder::add_microchunk(td::Slice s) {
CHECK(s.size() == MICROCHUNK_SIZE);
td::Ref<vm::Cell> node = vm::CellBuilder().store_zeroes(2).store_bytes(s).finalize_novm();
while (!proof_.empty() && proof_.back()->get_depth(0) == node->get_depth(0)) {
td::Ref<vm::Cell> left = std::move(proof_.back());
proof_.pop_back();
node = vm::CellBuilder().store_zeroes(2).store_ref(std::move(left)).store_ref(std::move(node)).finalize_novm();
if ((MICROCHUNK_SIZE << node->get_depth(0)) <= prun_size_) {
node = prun(node);
}
}
proof_.push_back(std::move(node));
}
MicrochunkTree::MicrochunkTree(td::Ref<vm::Cell> root_proof) : root_proof_(root_proof) {
td::Ref<vm::Cell> virt_root = vm::MerkleProof::virtualize(root_proof_, 1);
CHECK(!virt_root.is_null());
CHECK(virt_root->get_depth() <= 50);
total_size_ = MICROCHUNK_SIZE << virt_root->get_depth();
root_hash_ = virt_root->get_hash().bits();
}
class GetMicrochunkProof {
public:
GetMicrochunkProof(td::uint64 l, td::uint64 r, Torrent &torrent) : l(l), r(r), torrent(torrent) {
}
td::Result<td::Ref<vm::Cell>> unprun(td::uint64 il, td::uint64 ir) {
if (ir - il == MicrochunkTree::MICROCHUNK_SIZE) {
TRY_RESULT(data, get_microchunk(il));
return vm::CellBuilder().store_zeroes(2).store_bytes(data).finalize_novm();
}
td::uint64 imid = (il + ir) / 2;
TRY_RESULT(node_l, unprun(il, imid));
TRY_RESULT(node_r, unprun(imid, ir));
td::Ref<vm::Cell> node =
vm::CellBuilder().store_zeroes(2).store_ref(std::move(node_l)).store_ref(std::move(node_r)).finalize_novm();
if (l >= ir || il >= r) {
node = prun(node);
}
return node;
}
td::Result<td::Ref<vm::Cell>> unprun(const td::Ref<vm::Cell> &node, td::uint64 il, td::uint64 ir) {
vm::CellSlice cs(vm::NoVm(), node);
if (!cs.is_special()) {
return node;
}
TRY_RESULT(result, unprun(il, ir));
if (result->get_hash(0) != node->get_hash(0)) {
return td::Status::Error("Hash mismatch");
}
return result;
}
td::Result<td::Ref<vm::Cell>> get_proof(td::Ref<vm::Cell> node, td::uint64 il, td::uint64 ir) {
if (l >= ir || il >= r) {
return prun(node);
}
if (ir - il == MicrochunkTree::MICROCHUNK_SIZE) {
return unprun(node, il, ir);
}
if (l <= il && ir <= r) {
return prun(node);
}
td::uint64 imid = (il + ir) / 2;
TRY_RESULT_ASSIGN(node, unprun(node, il, ir));
vm::CellSlice cs(vm::NoVm(), node);
if (cs.size_ext() != 2 + (2 << 16)) {
return td::Status::Error("Invalid node in microchunk tree");
}
TRY_RESULT(node_l, get_proof(cs.prefetch_ref(0), il, imid));
TRY_RESULT(node_r, get_proof(cs.prefetch_ref(1), imid, ir));
return vm::CellBuilder().store_zeroes(2).store_ref(std::move(node_l)).store_ref(std::move(node_r)).finalize_novm();
}
private:
td::uint64 l, r;
Torrent &torrent;
td::uint64 cache_offset = 0;
std::string cache;
td::Result<td::Slice> get_microchunk(td::uint64 l) {
DCHECK(l % MicrochunkTree::MICROCHUNK_SIZE == 0);
td::uint64 r = l + MicrochunkTree::MICROCHUNK_SIZE;
if (!(cache_offset <= l && r <= cache_offset + cache.size())) {
td::uint64 piece_size = torrent.get_info().piece_size;
td::uint64 piece_i = l / piece_size;
if (piece_i < torrent.get_info().pieces_count()) {
TRY_RESULT(piece, torrent.get_piece_data(piece_i));
piece.resize(piece_size, '\0');
cache = std::move(piece);
} else {
cache = std::string(piece_size, '\0');
}
cache_offset = piece_i * piece_size;
}
return td::Slice{cache.data() + (l - cache_offset), MicrochunkTree::MICROCHUNK_SIZE};
}
};
td::Result<td::Ref<vm::Cell>> MicrochunkTree::get_proof(td::uint64 l, td::uint64 r, Torrent &torrent) const {
if (root_proof_.is_null()) {
return td::Status::Error("Empty microchunk tree");
}
if (l % MICROCHUNK_SIZE != 0 || r % MICROCHUNK_SIZE != 0 || l >= r || r > total_size_) {
return td::Status::Error("Invalid range");
}
if (!torrent.inited_info()) {
return td::Status::Error("Torrent info is not ready");
}
if (!torrent.get_info().piece_size % MICROCHUNK_SIZE != 0) {
return td::Status::Error("Invalid piece size in torrent");
}
td::Ref<vm::Cell> root_raw = vm::CellSlice(vm::NoVm(), root_proof_).prefetch_ref();
TRY_RESULT(result, GetMicrochunkProof(l, r, torrent).get_proof(std::move(root_raw), 0, total_size_));
return vm::CellBuilder::create_merkle_proof(std::move(result));
}
td::Result<MicrochunkTree> MicrochunkTree::Builder::build_for_torrent(Torrent &torrent, td::uint64 prun_size) {
if (!torrent.inited_info()) {
return td::Status::Error("Torrent info is not available");
}
const TorrentInfo &info = torrent.get_info();
Builder builder(info.file_size, prun_size);
td::uint64 pieces_count = info.pieces_count();
for (td::uint64 i = 0; i < pieces_count; ++i) {
TRY_RESULT(piece, torrent.get_piece_data(i));
builder.add_data(piece);
}
MicrochunkTree tree = builder.finalize();
return tree;
}
} // namespace ton

Some files were not shown because too many files have changed in this diff Show more