mirror of
https://github.com/ton-blockchain/ton
synced 2025-03-09 15:40:10 +00:00
Merge branch 'testnet' into accelerator
This commit is contained in:
commit
d74d3f1fc2
45 changed files with 1770 additions and 126 deletions
37
.github/workflows/docker-ubuntu-image.yml
vendored
37
.github/workflows/docker-ubuntu-image.yml
vendored
|
@ -20,22 +20,49 @@ jobs:
|
||||||
submodules: 'recursive'
|
submodules: 'recursive'
|
||||||
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v1
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v1
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
- name: Login to GitHub Container Registry
|
- name: Login to GitHub Container Registry
|
||||||
uses: docker/login-action@v1
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
registry: ${{ env.REGISTRY }}
|
registry: ${{ env.REGISTRY }}
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Build and export to Docker
|
||||||
|
uses: docker/build-push-action@v6
|
||||||
|
with:
|
||||||
|
load: true
|
||||||
|
context: ./
|
||||||
|
tags: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:test
|
||||||
|
|
||||||
|
- name: Test
|
||||||
|
run: |
|
||||||
|
docker run --rm -e "TEST=1" ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:test
|
||||||
|
|
||||||
|
- name: Get next tag
|
||||||
|
id: tag
|
||||||
|
run: |
|
||||||
|
git fetch --all --tags
|
||||||
|
git tag -l
|
||||||
|
NEW_TAG=v$(date +'%Y.%m')
|
||||||
|
FOUND=$(git tag -l | grep $NEW_TAG | wc -l)
|
||||||
|
if [ $FOUND -eq 0 ]; then
|
||||||
|
echo "TAG=$NEW_TAG" >> $GITHUB_OUTPUT
|
||||||
|
else
|
||||||
|
echo "TAG=$NEW_TAG-$FOUND" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
|
|
||||||
- name: Build and push
|
- name: Build and push
|
||||||
id: docker_build
|
id: docker_build
|
||||||
uses: docker/build-push-action@v2
|
uses: docker/build-push-action@v6
|
||||||
with:
|
with:
|
||||||
|
platforms: linux/amd64,linux/arm64
|
||||||
push: true
|
push: true
|
||||||
context: ./
|
context: ./
|
||||||
tags: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest
|
tags: |
|
||||||
|
${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest
|
||||||
|
${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tag.outputs.TAG }}
|
||||||
|
|
|
@ -573,6 +573,7 @@ add_test(test-cells test-cells ${TEST_OPTIONS})
|
||||||
add_test(test-smartcont test-smartcont)
|
add_test(test-smartcont test-smartcont)
|
||||||
add_test(test-net test-net)
|
add_test(test-net test-net)
|
||||||
add_test(test-actors test-tdactor)
|
add_test(test-actors test-tdactor)
|
||||||
|
add_test(test-emulator test-emulator)
|
||||||
|
|
||||||
#BEGIN tonlib
|
#BEGIN tonlib
|
||||||
add_test(test-tdutils test-tdutils)
|
add_test(test-tdutils test-tdutils)
|
||||||
|
|
34
Dockerfile
34
Dockerfile
|
@ -1,10 +1,15 @@
|
||||||
FROM ubuntu:22.04 as builder
|
FROM ubuntu:22.04 AS builder
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
DEBIAN_FRONTEND=noninteractive apt-get install -y build-essential cmake clang openssl libssl-dev zlib1g-dev gperf wget git ninja-build libsecp256k1-dev libsodium-dev libmicrohttpd-dev liblz4-dev pkg-config autoconf automake libtool libjemalloc-dev && \
|
DEBIAN_FRONTEND=noninteractive apt-get install -y build-essential cmake clang openssl libssl-dev zlib1g-dev gperf wget git ninja-build libsecp256k1-dev libsodium-dev libmicrohttpd-dev liblz4-dev pkg-config autoconf automake libtool libjemalloc-dev lsb-release software-properties-common gnupg
|
||||||
rm -rf /var/lib/apt/lists/*
|
|
||||||
ENV CC clang
|
RUN wget https://apt.llvm.org/llvm.sh && \
|
||||||
ENV CXX clang++
|
chmod +x llvm.sh && \
|
||||||
ENV CCACHE_DISABLE 1
|
./llvm.sh 16 all && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
ENV CC=/usr/bin/clang-16
|
||||||
|
ENV CXX=/usr/bin/clang++-16
|
||||||
|
ENV CCACHE_DISABLE=1
|
||||||
|
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
RUN mkdir ton
|
RUN mkdir ton
|
||||||
|
@ -13,17 +18,16 @@ WORKDIR /ton
|
||||||
COPY ./ ./
|
COPY ./ ./
|
||||||
|
|
||||||
RUN mkdir build && \
|
RUN mkdir build && \
|
||||||
cd build && \
|
cd build && \
|
||||||
cmake -GNinja -DCMAKE_BUILD_TYPE=Release -DPORTABLE=1 -DTON_ARCH= -DTON_USE_JEMALLOC=ON .. && \
|
cmake -GNinja -DCMAKE_BUILD_TYPE=Release -DPORTABLE=1 -DTON_ARCH= -DTON_USE_JEMALLOC=ON .. && \
|
||||||
ninja storage-daemon storage-daemon-cli tonlibjson fift func validator-engine validator-engine-console generate-random-id dht-server lite-client
|
ninja storage-daemon storage-daemon-cli tonlibjson fift func validator-engine validator-engine-console generate-random-id dht-server lite-client
|
||||||
|
|
||||||
FROM ubuntu:22.04
|
FROM ubuntu:22.04
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y wget libatomic1 openssl libsecp256k1-dev libsodium-dev libmicrohttpd-dev liblz4-dev libjemalloc-dev && \
|
apt-get install -y wget curl libatomic1 openssl libsecp256k1-dev libsodium-dev libmicrohttpd-dev liblz4-dev libjemalloc-dev htop net-tools netcat iptraf-ng jq tcpdump pv plzip && \
|
||||||
rm -rf /var/lib/apt/lists/*
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
RUN mkdir -p /var/ton-work/db && \
|
RUN mkdir -p /var/ton-work/db /var/ton-work/scripts
|
||||||
mkdir -p /var/ton-work/db/static
|
|
||||||
|
|
||||||
COPY --from=builder /ton/build/storage/storage-daemon/storage-daemon /usr/local/bin/
|
COPY --from=builder /ton/build/storage/storage-daemon/storage-daemon /usr/local/bin/
|
||||||
COPY --from=builder /ton/build/storage/storage-daemon/storage-daemon-cli /usr/local/bin/
|
COPY --from=builder /ton/build/storage/storage-daemon/storage-daemon-cli /usr/local/bin/
|
||||||
|
@ -33,7 +37,7 @@ COPY --from=builder /ton/build/validator-engine-console/validator-engine-console
|
||||||
COPY --from=builder /ton/build/utils/generate-random-id /usr/local/bin/
|
COPY --from=builder /ton/build/utils/generate-random-id /usr/local/bin/
|
||||||
|
|
||||||
WORKDIR /var/ton-work/db
|
WORKDIR /var/ton-work/db
|
||||||
COPY ./docker/init.sh ./docker/control.template ./
|
COPY ./docker/init.sh ./docker/control.template /var/ton-work/scripts/
|
||||||
RUN chmod +x init.sh
|
RUN chmod +x /var/ton-work/scripts/init.sh
|
||||||
|
|
||||||
ENTRYPOINT ["/var/ton-work/db/init.sh"]
|
ENTRYPOINT ["/var/ton-work/scripts/init.sh"]
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
pipeline {
|
pipeline {
|
||||||
|
|
||||||
agent none
|
agent none
|
||||||
stages {
|
stages {
|
||||||
stage('Run Builds') {
|
stage('Run Builds') {
|
||||||
|
@ -12,7 +13,7 @@ pipeline {
|
||||||
sh '''
|
sh '''
|
||||||
cp assembly/native/build-ubuntu-shared.sh .
|
cp assembly/native/build-ubuntu-shared.sh .
|
||||||
chmod +x build-ubuntu-shared.sh
|
chmod +x build-ubuntu-shared.sh
|
||||||
./build-ubuntu-shared.sh -t -a
|
./build-ubuntu-shared.sh -a
|
||||||
'''
|
'''
|
||||||
sh '''
|
sh '''
|
||||||
cd artifacts
|
cd artifacts
|
||||||
|
@ -31,7 +32,7 @@ pipeline {
|
||||||
sh '''
|
sh '''
|
||||||
cp assembly/nix/build-linux-x86-64-nix.sh .
|
cp assembly/nix/build-linux-x86-64-nix.sh .
|
||||||
chmod +x build-linux-x86-64-nix.sh
|
chmod +x build-linux-x86-64-nix.sh
|
||||||
./build-linux-x86-64-nix.sh -t
|
./build-linux-x86-64-nix.sh
|
||||||
'''
|
'''
|
||||||
sh '''
|
sh '''
|
||||||
cd artifacts
|
cd artifacts
|
||||||
|
@ -50,7 +51,7 @@ pipeline {
|
||||||
sh '''
|
sh '''
|
||||||
cp assembly/native/build-ubuntu-shared.sh .
|
cp assembly/native/build-ubuntu-shared.sh .
|
||||||
chmod +x build-ubuntu-shared.sh
|
chmod +x build-ubuntu-shared.sh
|
||||||
./build-ubuntu-shared.sh -t -a
|
./build-ubuntu-shared.sh -a
|
||||||
'''
|
'''
|
||||||
sh '''
|
sh '''
|
||||||
cd artifacts
|
cd artifacts
|
||||||
|
@ -69,7 +70,7 @@ pipeline {
|
||||||
sh '''
|
sh '''
|
||||||
cp assembly/nix/build-linux-arm64-nix.sh .
|
cp assembly/nix/build-linux-arm64-nix.sh .
|
||||||
chmod +x build-linux-arm64-nix.sh
|
chmod +x build-linux-arm64-nix.sh
|
||||||
./build-linux-arm64-nix.sh -t
|
./build-linux-arm64-nix.sh
|
||||||
'''
|
'''
|
||||||
sh '''
|
sh '''
|
||||||
cd artifacts
|
cd artifacts
|
||||||
|
@ -88,7 +89,7 @@ pipeline {
|
||||||
sh '''
|
sh '''
|
||||||
cp assembly/native/build-macos-shared.sh .
|
cp assembly/native/build-macos-shared.sh .
|
||||||
chmod +x build-macos-shared.sh
|
chmod +x build-macos-shared.sh
|
||||||
./build-macos-shared.sh -t -a
|
./build-macos-shared.sh -a
|
||||||
'''
|
'''
|
||||||
sh '''
|
sh '''
|
||||||
cd artifacts
|
cd artifacts
|
||||||
|
@ -107,7 +108,7 @@ pipeline {
|
||||||
sh '''
|
sh '''
|
||||||
cp assembly/nix/build-macos-nix.sh .
|
cp assembly/nix/build-macos-nix.sh .
|
||||||
chmod +x build-macos-nix.sh
|
chmod +x build-macos-nix.sh
|
||||||
./build-macos-nix.sh -t
|
./build-macos-nix.sh
|
||||||
'''
|
'''
|
||||||
sh '''
|
sh '''
|
||||||
cd artifacts
|
cd artifacts
|
||||||
|
@ -126,7 +127,7 @@ pipeline {
|
||||||
sh '''
|
sh '''
|
||||||
cp assembly/native/build-macos-shared.sh .
|
cp assembly/native/build-macos-shared.sh .
|
||||||
chmod +x build-macos-shared.sh
|
chmod +x build-macos-shared.sh
|
||||||
./build-macos-shared.sh -t -a
|
./build-macos-shared.sh -a
|
||||||
'''
|
'''
|
||||||
sh '''
|
sh '''
|
||||||
cd artifacts
|
cd artifacts
|
||||||
|
@ -145,7 +146,7 @@ pipeline {
|
||||||
sh '''
|
sh '''
|
||||||
cp assembly/nix/build-macos-nix.sh .
|
cp assembly/nix/build-macos-nix.sh .
|
||||||
chmod +x build-macos-nix.sh
|
chmod +x build-macos-nix.sh
|
||||||
./build-macos-nix.sh -t
|
./build-macos-nix.sh
|
||||||
'''
|
'''
|
||||||
sh '''
|
sh '''
|
||||||
cd artifacts
|
cd artifacts
|
||||||
|
@ -164,7 +165,7 @@ pipeline {
|
||||||
sh '''
|
sh '''
|
||||||
cp assembly/native/build-macos-shared.sh .
|
cp assembly/native/build-macos-shared.sh .
|
||||||
chmod +x build-macos-shared.sh
|
chmod +x build-macos-shared.sh
|
||||||
./build-macos-shared.sh -t -a
|
./build-macos-shared.sh -a
|
||||||
'''
|
'''
|
||||||
sh '''
|
sh '''
|
||||||
cd artifacts
|
cd artifacts
|
||||||
|
@ -182,7 +183,7 @@ pipeline {
|
||||||
timeout(time: 180, unit: 'MINUTES') {
|
timeout(time: 180, unit: 'MINUTES') {
|
||||||
bat '''
|
bat '''
|
||||||
copy assembly\\native\\build-windows.bat .
|
copy assembly\\native\\build-windows.bat .
|
||||||
build-windows.bat -t
|
build-windows.bat
|
||||||
'''
|
'''
|
||||||
bat '''
|
bat '''
|
||||||
cd artifacts
|
cd artifacts
|
||||||
|
|
|
@ -158,7 +158,7 @@ if [ "$with_tests" = true ]; then
|
||||||
http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork tlbc emulator \
|
http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork tlbc emulator \
|
||||||
test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont \
|
test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont \
|
||||||
test-net test-tdactor test-tdutils test-tonlib-offline test-adnl test-dht test-rldp \
|
test-net test-tdactor test-tdutils test-tonlib-offline test-adnl test-dht test-rldp \
|
||||||
test-rldp2 test-catchain test-fec test-tddb test-db test-validator-session-state
|
test-rldp2 test-catchain test-fec test-tddb test-db test-validator-session-state test-emulator
|
||||||
test $? -eq 0 || { echo "Can't compile ton"; exit 1; }
|
test $? -eq 0 || { echo "Can't compile ton"; exit 1; }
|
||||||
else
|
else
|
||||||
ninja storage-daemon storage-daemon-cli blockchain-explorer \
|
ninja storage-daemon storage-daemon-cli blockchain-explorer \
|
||||||
|
|
|
@ -86,7 +86,7 @@ if [ "$with_tests" = true ]; then
|
||||||
http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork tlbc emulator \
|
http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork tlbc emulator \
|
||||||
test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont \
|
test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont \
|
||||||
test-net test-tdactor test-tdutils test-tonlib-offline test-adnl test-dht test-rldp \
|
test-net test-tdactor test-tdutils test-tonlib-offline test-adnl test-dht test-rldp \
|
||||||
test-rldp2 test-catchain test-fec test-tddb test-db test-validator-session-state
|
test-rldp2 test-catchain test-fec test-tddb test-db test-validator-session-state test-emulator
|
||||||
test $? -eq 0 || { echo "Can't compile ton"; exit 1; }
|
test $? -eq 0 || { echo "Can't compile ton"; exit 1; }
|
||||||
else
|
else
|
||||||
ninja storage-daemon storage-daemon-cli blockchain-explorer \
|
ninja storage-daemon storage-daemon-cli blockchain-explorer \
|
||||||
|
|
|
@ -150,7 +150,7 @@ ninja storage-daemon storage-daemon-cli fift func tonlib tonlibjson tonlib-cli \
|
||||||
adnl-proxy create-state emulator test-ed25519 test-ed25519-crypto test-bigint \
|
adnl-proxy create-state emulator test-ed25519 test-ed25519-crypto test-bigint \
|
||||||
test-vm test-fift test-cells test-smartcont test-net test-tdactor test-tdutils \
|
test-vm test-fift test-cells test-smartcont test-net test-tdactor test-tdutils \
|
||||||
test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain \
|
test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain \
|
||||||
test-fec test-tddb test-db test-validator-session-state
|
test-fec test-tddb test-db test-validator-session-state test-emulator
|
||||||
test $? -eq 0 || { echo "Can't compile ton"; exit 1; }
|
test $? -eq 0 || { echo "Can't compile ton"; exit 1; }
|
||||||
else
|
else
|
||||||
ninja storage-daemon storage-daemon-cli fift func tonlib tonlibjson tonlib-cli \
|
ninja storage-daemon storage-daemon-cli fift func tonlib tonlibjson tonlib-cli \
|
||||||
|
|
|
@ -58,7 +58,7 @@ ninja storage-daemon storage-daemon-cli fift func tonlib tonlibjson tonlib-cli \
|
||||||
adnl-proxy create-state emulator test-ed25519 test-ed25519-crypto test-bigint \
|
adnl-proxy create-state emulator test-ed25519 test-ed25519-crypto test-bigint \
|
||||||
test-vm test-fift test-cells test-smartcont test-net test-tdactor test-tdutils \
|
test-vm test-fift test-cells test-smartcont test-net test-tdactor test-tdutils \
|
||||||
test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain \
|
test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain \
|
||||||
test-fec test-tddb test-db test-validator-session-state
|
test-fec test-tddb test-db test-validator-session-state test-emulator
|
||||||
test $? -eq 0 || { echo "Can't compile ton"; exit 1; }
|
test $? -eq 0 || { echo "Can't compile ton"; exit 1; }
|
||||||
else
|
else
|
||||||
ninja storage-daemon storage-daemon-cli fift func tonlib tonlibjson tonlib-cli \
|
ninja storage-daemon storage-daemon-cli fift func tonlib tonlibjson tonlib-cli \
|
||||||
|
|
|
@ -160,7 +160,7 @@ tonlib-cli validator-engine lite-client pow-miner validator-engine-console gener
|
||||||
json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork emulator ^
|
json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork emulator ^
|
||||||
test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont test-net ^
|
test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont test-net ^
|
||||||
test-tdactor test-tdutils test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain ^
|
test-tdactor test-tdutils test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain ^
|
||||||
test-fec test-tddb test-db test-validator-session-state
|
test-fec test-tddb test-db test-validator-session-state test-emulator
|
||||||
IF %errorlevel% NEQ 0 (
|
IF %errorlevel% NEQ 0 (
|
||||||
echo Can't compile TON
|
echo Can't compile TON
|
||||||
exit /b %errorlevel%
|
exit /b %errorlevel%
|
||||||
|
|
|
@ -161,7 +161,7 @@ tonlib-cli validator-engine lite-client pow-miner validator-engine-console gener
|
||||||
json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork emulator ^
|
json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork emulator ^
|
||||||
test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont test-net ^
|
test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont test-net ^
|
||||||
test-tdactor test-tdutils test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain ^
|
test-tdactor test-tdutils test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain ^
|
||||||
test-fec test-tddb test-db test-validator-session-state
|
test-fec test-tddb test-db test-validator-session-state test-emulator
|
||||||
IF %errorlevel% NEQ 0 (
|
IF %errorlevel% NEQ 0 (
|
||||||
echo Can't compile TON
|
echo Can't compile TON
|
||||||
exit /b %errorlevel%
|
exit /b %errorlevel%
|
||||||
|
|
|
@ -810,7 +810,7 @@ size_limits_config#01 max_msg_bits:uint32 max_msg_cells:uint32 max_library_cells
|
||||||
max_ext_msg_size:uint32 max_ext_msg_depth:uint16 = SizeLimitsConfig;
|
max_ext_msg_size:uint32 max_ext_msg_depth:uint16 = SizeLimitsConfig;
|
||||||
size_limits_config_v2#02 max_msg_bits:uint32 max_msg_cells:uint32 max_library_cells:uint32 max_vm_data_depth:uint16
|
size_limits_config_v2#02 max_msg_bits:uint32 max_msg_cells:uint32 max_library_cells:uint32 max_vm_data_depth:uint16
|
||||||
max_ext_msg_size:uint32 max_ext_msg_depth:uint16 max_acc_state_cells:uint32 max_acc_state_bits:uint32
|
max_ext_msg_size:uint32 max_ext_msg_depth:uint16 max_acc_state_cells:uint32 max_acc_state_bits:uint32
|
||||||
max_acc_public_libraries:uint32 = SizeLimitsConfig;
|
max_acc_public_libraries:uint32 defer_out_queue_size_limit:uint32 = SizeLimitsConfig;
|
||||||
_ SizeLimitsConfig = ConfigParam 43;
|
_ SizeLimitsConfig = ConfigParam 43;
|
||||||
|
|
||||||
// key is [ wc:int32 addr:uint256 ]
|
// key is [ wc:int32 addr:uint256 ]
|
||||||
|
|
|
@ -1956,6 +1956,7 @@ td::Result<SizeLimitsConfig> Config::do_get_size_limits_config(td::Ref<vm::CellS
|
||||||
limits.max_acc_state_bits = rec.max_acc_state_bits;
|
limits.max_acc_state_bits = rec.max_acc_state_bits;
|
||||||
limits.max_acc_state_cells = rec.max_acc_state_cells;
|
limits.max_acc_state_cells = rec.max_acc_state_cells;
|
||||||
limits.max_acc_public_libraries = rec.max_acc_public_libraries;
|
limits.max_acc_public_libraries = rec.max_acc_public_libraries;
|
||||||
|
limits.defer_out_queue_size_limit = rec.defer_out_queue_size_limit;
|
||||||
};
|
};
|
||||||
gen::SizeLimitsConfig::Record_size_limits_config rec_v1;
|
gen::SizeLimitsConfig::Record_size_limits_config rec_v1;
|
||||||
gen::SizeLimitsConfig::Record_size_limits_config_v2 rec_v2;
|
gen::SizeLimitsConfig::Record_size_limits_config_v2 rec_v2;
|
||||||
|
|
|
@ -395,6 +395,7 @@ struct SizeLimitsConfig {
|
||||||
td::uint32 max_acc_state_cells = 1 << 16;
|
td::uint32 max_acc_state_cells = 1 << 16;
|
||||||
td::uint32 max_acc_state_bits = (1 << 16) * 1023;
|
td::uint32 max_acc_state_bits = (1 << 16) * 1023;
|
||||||
td::uint32 max_acc_public_libraries = 256;
|
td::uint32 max_acc_public_libraries = 256;
|
||||||
|
td::uint32 defer_out_queue_size_limit = 256;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct CatchainValidatorsConfig {
|
struct CatchainValidatorsConfig {
|
||||||
|
|
|
@ -2392,8 +2392,12 @@ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap,
|
||||||
if (!tlb::csr_unpack(msg.info, info) || !block::tlb::t_CurrencyCollection.validate_csr(info.value)) {
|
if (!tlb::csr_unpack(msg.info, info) || !block::tlb::t_CurrencyCollection.validate_csr(info.value)) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
fwd_fee = block::tlb::t_Grams.as_integer(info.fwd_fee);
|
if (cfg.disable_custom_fess) {
|
||||||
ihr_fee = block::tlb::t_Grams.as_integer(info.ihr_fee);
|
fwd_fee = ihr_fee = td::zero_refint();
|
||||||
|
} else {
|
||||||
|
fwd_fee = block::tlb::t_Grams.as_integer(info.fwd_fee);
|
||||||
|
ihr_fee = block::tlb::t_Grams.as_integer(info.ihr_fee);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// set created_at and created_lt to correct values
|
// set created_at and created_lt to correct values
|
||||||
info.created_at = now;
|
info.created_at = now;
|
||||||
|
@ -3755,6 +3759,7 @@ td::Status FetchConfigParams::fetch_config_params(
|
||||||
action_phase_cfg->action_fine_enabled = config.get_global_version() >= 4;
|
action_phase_cfg->action_fine_enabled = config.get_global_version() >= 4;
|
||||||
action_phase_cfg->bounce_on_fail_enabled = config.get_global_version() >= 4;
|
action_phase_cfg->bounce_on_fail_enabled = config.get_global_version() >= 4;
|
||||||
action_phase_cfg->message_skip_enabled = config.get_global_version() >= 8;
|
action_phase_cfg->message_skip_enabled = config.get_global_version() >= 8;
|
||||||
|
action_phase_cfg->disable_custom_fess = config.get_global_version() >= 8;
|
||||||
action_phase_cfg->mc_blackhole_addr = config.get_burning_config().blackhole_addr;
|
action_phase_cfg->mc_blackhole_addr = config.get_burning_config().blackhole_addr;
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
|
|
|
@ -168,6 +168,7 @@ struct ActionPhaseConfig {
|
||||||
bool action_fine_enabled{false};
|
bool action_fine_enabled{false};
|
||||||
bool bounce_on_fail_enabled{false};
|
bool bounce_on_fail_enabled{false};
|
||||||
bool message_skip_enabled{false};
|
bool message_skip_enabled{false};
|
||||||
|
bool disable_custom_fess{false};
|
||||||
td::optional<td::Bits256> mc_blackhole_addr;
|
td::optional<td::Bits256> mc_blackhole_addr;
|
||||||
const MsgPrices& fetch_msg_prices(bool is_masterchain) const {
|
const MsgPrices& fetch_msg_prices(bool is_masterchain) const {
|
||||||
return is_masterchain ? fwd_mc : fwd_std;
|
return is_masterchain ? fwd_mc : fwd_std;
|
||||||
|
|
|
@ -109,3 +109,4 @@ Operations for working with Merkle proofs, where cells can have non-zero level a
|
||||||
- Slightly change random seed generation to fix mix of `addr_rewrite` and `addr`.
|
- Slightly change random seed generation to fix mix of `addr_rewrite` and `addr`.
|
||||||
- Fill in `skipped_actions` for both invalid and valid messages with `IGNORE_ERROR` mode that can't be sent.
|
- Fill in `skipped_actions` for both invalid and valid messages with `IGNORE_ERROR` mode that can't be sent.
|
||||||
- Allow unfreeze through external messages.
|
- Allow unfreeze through external messages.
|
||||||
|
- Don't use user-provided `fwd_fee` and `ihr_fee` for internal messages.
|
545
docker/README.md
545
docker/README.md
|
@ -1,28 +1,525 @@
|
||||||
# The Open Network Node
|
# Official TON Docker image
|
||||||
Dockerfile for The Open Network Node
|
|
||||||
|
1. [Dockerfile](#docker)
|
||||||
|
2. [Kubernetes deployment on-premises](#deploy-on-premises-with-metallb-load-balancer-)
|
||||||
|
3. [Kubernetes deployment on AWS](#deploy-on-aws-cloud-amazon-web-services)
|
||||||
|
4. [Kubernetes deployment on GCP](#deploy-on-gcp-google-cloud-platform)
|
||||||
|
5. [Kubernetes deployment on AliCloud](#deploy-on-ali-cloud)
|
||||||
|
6. [Troubleshooting](#troubleshooting)
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
The TON node, whether it is validator or fullnode, requires a public IP address.
|
||||||
|
If your server is within an internal network or kubernetes you have to make sure that the required ports are available from the outside.
|
||||||
|
|
||||||
|
Also pay attention at [hardware requirements](https://docs.ton.org/participate/run-nodes/full-node) for TON fullnodes and validators. Pods and StatefulSets in this guide imply these requirements.
|
||||||
|
|
||||||
|
It is recommended to everyone to read Docker chapter first in order to get a better understanding about TON Docker image and its parameters.
|
||||||
|
|
||||||
|
## Docker
|
||||||
|
|
||||||
|
### Installation
|
||||||
|
```docker pull ghcr.io/ton-blockchain/ton:latest```
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
TON validator-engine supports number of command line parameters,
|
||||||
|
these parameters can be handed over to the container via environment variables.
|
||||||
|
Below is the list of supported arguments and their default values:
|
||||||
|
|
||||||
|
| Argument | Description | Mandatory? | Default value |
|
||||||
|
|:------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:----------:|:-------------------------------------------------------:|
|
||||||
|
| PUBLIC_IP | This will be a public IP address of your TON node. Normally it is the same IP address as your server's external IP. This also can be your proxy server or load balancer IP address. | yes | |
|
||||||
|
| GLOBAL_CONFIG_URL | TON global configuration file. Mainnet - https://ton.org/global-config.json, Testnet - https://ton.org/testnet-global.config.json | no | https://api.tontech.io/ton/wallet-mainnet.autoconf.json |
|
||||||
|
| DUMP_URL | URL to TON dump. Specify dump from https://dump.ton.org. If you are using testnet dump, make sure to download global config for testnet. | no | |
|
||||||
|
| VALIDATOR_PORT | UDP port that must be available from the outside. Used for communication with other nodes. | no | 30001 |
|
||||||
|
| CONSOLE_PORT | This TCP port is used to access validator's console. Not necessarily to be opened for external access. | no | 30002 |
|
||||||
|
| LITE_PORT | Lite-server's TCP port. Used by lite-client. | no | 30003 |
|
||||||
|
| LITESERVER | true or false. Set to true if you want up and running lite-server. | no | false |
|
||||||
|
| STATE_TTL | Node's state will be gc'd after this time (in seconds). | no | 86400 |
|
||||||
|
| ARCHIVE_TTL | Node's archived blocks will be deleted after this time (in seconds). | no | 86400 |
|
||||||
|
| THREADS | Number of threads used by validator-engine. | no | 8 |
|
||||||
|
| VERBOSITY | Verbosity level. | no | 3 |
|
||||||
|
| CUSTOM_ARG | validator-engine might have some undocumented arguments. This is reserved for the test purposes.<br/>For example you can pass **--logname /var/ton-work/log** in order to have log files. | no | |
|
||||||
|
|
||||||
|
### Run the node - the quick way
|
||||||
|
The below command runs docker container with a TON node, that will start synchronization process.
|
||||||
|
|
||||||
|
Notice **--network host** option, means that the Docker container will use the network namespace of the host machine.
|
||||||
|
In this case there is no need to map ports between the host and the container. The container will use the same IP address and ports as the host.
|
||||||
|
This approach simplifies networking configuration for the container, and usually is used on the dedicated server with assigned public IP.
|
||||||
|
|
||||||
|
Keep in mind that this option can also introduce security concerns because the container has access to the host's network interfaces directly, which might not be desirable in a multi-tenant environment.
|
||||||
|
|
||||||
|
Check your firewall configuration and make sure that at least UDP port 43677 is publicly available.
|
||||||
|
Find out your PUBLIC_IP:
|
||||||
|
```
|
||||||
|
curl -4 ifconfig.me
|
||||||
|
```
|
||||||
|
and replace it in the command below:
|
||||||
|
```
|
||||||
|
docker run -d --name ton-node -v /data/db:/var/ton-work/db \
|
||||||
|
-e "PUBLIC_IP=<PUBLIC_IP>" \
|
||||||
|
-e "LITESERVER=true" \
|
||||||
|
-e "DUMP_URL=https://dump.ton.org/dumps/latest.tar.lz" \
|
||||||
|
--network host \
|
||||||
|
-it ghcr.io/ton-blockchain/ton
|
||||||
|
```
|
||||||
|
If you don't need Lite-server, then remove -e "LITESERVER=true".
|
||||||
|
|
||||||
|
### Run the node - isolated way
|
||||||
|
In production environments it is recommended to use **Port mapping** feature of Docker's default bridge network.
|
||||||
|
When you use port mapping, Docker allocates a specific port on the host to forward traffic to a port inside the container.
|
||||||
|
This is ideal for running multiple containers with isolated networks on the same host.
|
||||||
|
```
|
||||||
|
docker run -d --name ton-node -v /data/db:/var/ton-work/db \
|
||||||
|
-e "PUBLIC_IP=<PUBLIC_IP>" \
|
||||||
|
-e "DUMP_URL=https://dump.ton.org/dumps/latest.tar.lz" \
|
||||||
|
-e "VALIDATOR_PORT=443" \
|
||||||
|
-e "CONSOLE_PORT=88" \
|
||||||
|
-e "LITE_PORT=443" \
|
||||||
|
-e "LITESERVER=true" \
|
||||||
|
-p 443:443/udp \
|
||||||
|
-p 88:88/tcp \
|
||||||
|
-p 443:443/tcp \
|
||||||
|
-it ghcr.io/ton-blockchain/ton
|
||||||
|
```
|
||||||
|
Adjust ports per your need.
|
||||||
|
Check your firewall configuration and make sure that customized ports (443/udp, 88/tcp and 443/tcp in this example) are publicly available.
|
||||||
|
|
||||||
|
### Verify if TON node is operating correctly
|
||||||
|
After executing above command check the log files:
|
||||||
|
|
||||||
|
```docker logs ton-node```
|
||||||
|
|
||||||
|
This is totally fine if in the log output for some time (up to 15 minutes) you see messages like:
|
||||||
|
|
||||||
|
```log
|
||||||
|
failed to download proof link: [Error : 651 : no nodes]
|
||||||
|
```
|
||||||
|
|
||||||
|
After some time you should be able to see multiple messages similar to these below:
|
||||||
|
```log
|
||||||
|
failed to download key blocks: [Error : 652 : adnl query timeout]
|
||||||
|
last key block is [ w=-1 s=9223372036854775808 seq=34879845 rcEsfLF3E80PqQPWesW+rlOY2EpXd5UDrW32SzRWgus= C1Hs+q2Vew+WxbGL6PU1P6R2iYUJVJs4032CTS/DQzI= ]
|
||||||
|
getnextkey: [Error : 651 : not inited]
|
||||||
|
downloading state (-1,8000000000000000,38585739):9E86E166AE7E24BAA22762766381440C625F47E2B11D72967BB58CE8C90F7EBA:5BFFF759380097DF178325A7151E9C0571C4E452A621441A03A0CECAED970F57: total=1442840576 (71MB/s)downloading state (-1,8000000000000000,38585739):9E86E166AE7E24BAA22762766381440C625F47E2B11D72967BB58CE8C90F7EBA:5BFFF759380097DF178325A7151E9C0571C4E452A621441A03A0CECAED970F57: total=1442840576 (71MB/s)
|
||||||
|
finished downloading state (-1,8000000000000000,38585739):9E86E166AE7E24BAA22762766381440C625F47E2B11D72967BB58CE8C90F7EBA:5BFFF759380097DF178325A7151E9C0571C4E452A621441A03A0CECAED970F57: total=4520747390
|
||||||
|
getnextkey: [Error : 651 : not inited]
|
||||||
|
getnextkey: [Error : 651 : not inited]
|
||||||
|
```
|
||||||
|
As you noticed we have mounted docker volume to a local folder **/data/db**.
|
||||||
|
Go inside this folder on your server and check if its size is growing (```sudo du -h .*```)
|
||||||
|
|
||||||
|
Now connect to the running container:
|
||||||
|
```
|
||||||
|
docker exec -ti ton-node /bin/bash
|
||||||
|
```
|
||||||
|
and try to connect and execute **getconfig** command via validator-engine-console:
|
||||||
|
```
|
||||||
|
validator-engine-console -k client -p server.pub -a localhost:$(jq .control[].port <<< cat /var/ton-work/db/config.json) -c getconfig
|
||||||
|
```
|
||||||
|
if you see a json output that means that validator-engine is up, now execute **last** command with a lite-client:
|
||||||
|
```
|
||||||
|
lite-client -a localhost:$(jq .liteservers[].port <<< cat /var/ton-work/db/config.json) -p liteserver.pub -c last
|
||||||
|
```
|
||||||
|
if you see the following output:
|
||||||
|
```
|
||||||
|
conn ready
|
||||||
|
failed query: [Error : 652 : adnl query timeout]
|
||||||
|
cannot get server version and time (server too old?)
|
||||||
|
server version is too old (at least 1.1 with capabilities 1 required), some queries are unavailable
|
||||||
|
fatal error executing command-line queries, skipping the rest
|
||||||
|
```
|
||||||
|
it means that the lite-server is up, but the node is not synchronized yet.
|
||||||
|
Once the node is synchronized, the output of **last** command will be similar to this one:
|
||||||
|
|
||||||
|
```
|
||||||
|
conn ready
|
||||||
|
server version is 1.1, capabilities 7
|
||||||
|
server time is 1719306580 (delta 0)
|
||||||
|
last masterchain block is (-1,8000000000000000,20435927):47A517265B25CE4F2C8B3058D46343C070A4B31C5C37745390CE916C7D1CE1C5:279F9AA88C8146257E6C9B537905238C26E37DC2E627F2B6F1D558CB29A6EC82
|
||||||
|
server time is 1719306580 (delta 0)
|
||||||
|
zerostate id set to -1:823F81F306FF02694F935CF5021548E3CE2B86B529812AF6A12148879E95A128:67E20AC184B9E039A62667ACC3F9C00F90F359A76738233379EFA47604980CE8
|
||||||
|
```
|
||||||
|
If you can't make it working, refer to the [Troubleshooting](#troubleshooting) section below.
|
||||||
|
### Use validator-engine-console
|
||||||
|
```docker exec -ti ton-node /bin/bash```
|
||||||
|
|
||||||
|
```validator-engine-console -k client -p server.pub -a 127.0.0.1:$(jq .control[].port <<< cat /var/ton-work/db/config.json)```
|
||||||
|
|
||||||
|
### Use lite-client
|
||||||
|
```docker exec -ti ton-node /bin/bash```
|
||||||
|
|
||||||
|
```lite-client -p liteserver.pub -a 127.0.0.1:$(jq .liteservers[].port <<< cat /var/ton-work/db/config.json)```
|
||||||
|
|
||||||
|
If you use lite-client outside the Docker container, copy the **liteserver.pub** from the container:
|
||||||
|
|
||||||
|
```docker cp ton-node:/var/ton-work/db/liteserver.pub /your/path```
|
||||||
|
|
||||||
|
```lite-client -p /your/path/liteserver.pub -a <PUBLIC_IP>:<LITE_PORT>```
|
||||||
|
|
||||||
|
### Stop TON docker container
|
||||||
|
```
|
||||||
|
docker stop ton-node
|
||||||
|
```
|
||||||
|
|
||||||
|
## Kubernetes
|
||||||
|
### Deploy in a quick way (without load balancer)
|
||||||
|
If the nodes within your kubernetes cluster have external IPs,
|
||||||
|
make sure that the PUBLIC_IP used for validator-engine matches the node's external IP.
|
||||||
|
If all Kubernetes nodes are inside DMZ - skip this section.
|
||||||
|
|
||||||
|
#### Prepare
|
||||||
|
If you are using **flannel** network driver you can find node's IP this way:
|
||||||
|
```yaml
|
||||||
|
kubectl get nodes
|
||||||
|
kubectl describe node <NODE_NAME> | grep public-ip
|
||||||
|
```
|
||||||
|
for **calico** driver use:
|
||||||
|
```yaml
|
||||||
|
kubectl describe node <NODE_NAME> | grep IPv4Address
|
||||||
|
```
|
||||||
|
Double check if your Kubernetes node's external IP coincides with the host's IP address:
|
||||||
|
```
|
||||||
|
kubectl run --image=ghcr.io/ton-blockchain/ton:latest validator-engine-pod --env="HOST_IP=1.1.1.1" --env="PUBLIC_IP=1.1.1.1"
|
||||||
|
kubectl exec -it validator-engine-pod -- curl -4 ifconfig.me
|
||||||
|
kubectl delete pod validator-engine-pod
|
||||||
|
```
|
||||||
|
If IPs do not match, refer to the sections where load balancers are used.
|
||||||
|
|
||||||
|
Now do the following:
|
||||||
|
* Add a label to this particular node.
|
||||||
|
* By this label our pod will know where to be deployed and what storage to use:
|
||||||
|
```
|
||||||
|
kubectl label nodes <NODE_NAME> node_type=ton-validator
|
||||||
|
```
|
||||||
|
* Replace **<PUBLIC_IP>** (and ports if needed) in file [ton-node-port.yaml](ton-node-port.yaml).
|
||||||
|
* Replace **<LOCAL_STORAGE_PATH>** with a real path on host for Persistent Volume.
|
||||||
|
* If you change the ports, make sure you specify appropriate env vars in Pod section.
|
||||||
|
* If you want to use dynamic storage provisioning via volumeClaimTemplates, feel free to create own StorageClass.
|
||||||
|
|
||||||
#### Install
|
#### Install
|
||||||
```docker pull ghcr.io/ton-blockchain/ton:latest```
|
```yaml
|
||||||
#### Create volume
|
kubectl apply -f ton-node-port.yaml
|
||||||
```docker volume create ton-db```
|
```
|
||||||
#### Run
|
|
||||||
```docker run -d --name ton-node --mount source=ton-db,target=/var/ton-work/db --network host -e "PUBLIC_IP=<YOUR_PUBLIC_IP>" -e "CONSOLE_PORT=<TCP-PORT1>" -e "LITESERVER=true" -e "LITE_PORT=<TCP-PORT2>" -it ghcr.io/ton-blockchain/ton```
|
this deployment uses host's network stack (**hostNetwork: true**) option and service of **NodePort** type.
|
||||||
|
Actually you can also use service of type **LoadBalancer**.
|
||||||
|
This way the service will get public IP assigned to the endpoints.
|
||||||
|
|
||||||
|
#### Verify installation
|
||||||
|
See if service endpoints were correctly created:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
kubectl get endpoints
|
||||||
|
|
||||||
|
NAME ENDPOINTS
|
||||||
|
validator-engine-srv <PUBLIC_IP>:30002,<PUBLIC_IP>:30001,<PUBLIC_IP>:30003
|
||||||
|
```
|
||||||
|
Check the logs for the deployment status:
|
||||||
|
```yaml
|
||||||
|
kubectl logs validator-engine-pod
|
||||||
|
```
|
||||||
|
or go inside the pod and check if blockchain size is growing:
|
||||||
|
```yaml
|
||||||
|
kubectl exec --stdin --tty validator-engine-pod -- /bin/bash
|
||||||
|
du -h .
|
||||||
|
```
|
||||||
|
### Deploy on-premises with metalLB load balancer
|
||||||
|
|
||||||
|
Often Kubernetes cluster is located in DMZ, is behind corporate firewall and access is controlled via proxy configuration.
|
||||||
|
In this case we can't use host's network stack (**hostNetwork: true**) within a Pod and must manually proxy the access to the pod.
|
||||||
|
|
||||||
|
A **LoadBalancer** service type automatically provisions an external load balancer (such as those provided by cloud providers like AWS, GCP, Azure) and assigns a public IP address to your service. In a non-cloud environment or in a DMZ setup, you need to manually configure the load balancer.
|
||||||
|
|
||||||
|
If you are running your Kubernetes cluster on-premises or in an environment where an external load balancer is not automatically provided, you can use a load balancer implementation like MetalLB.
|
||||||
|
|
||||||
|
#### Prepare
|
||||||
|
Select the node where persistent storage will be located for TON validator.
|
||||||
|
* Add a label to this particular node. By this label our pod will know where to be deployed:
|
||||||
|
```
|
||||||
|
kubectl label nodes <NODE_NAME> node_type=ton-validator
|
||||||
|
```
|
||||||
|
* Replace **<PUBLIC_IP>** (and ports if needed) in file [ton-metal-lb.yaml](ton-metal-lb.yaml).
|
||||||
|
* Replace **<LOCAL_STORAGE_PATH>** with a real path on host for Persistent Volume.
|
||||||
|
* If you change the ports, make sure you specify appropriate env vars in Pod section.
|
||||||
|
* If you want to use dynamic storage provisioning via volumeClaimTemplates, feel free to create own StorageClass.
|
||||||
|
|
||||||
|
* Install MetalLB
|
||||||
|
```yaml
|
||||||
|
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.14.5/config/manifests/metallb-native.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
* Configure MetalLB
|
||||||
|
Create a configuration map to define the IP address range that MetalLB can use for external load balancer services.
|
||||||
|
```yaml
|
||||||
|
apiVersion: metallb.io/v1beta1
|
||||||
|
kind: IPAddressPool
|
||||||
|
metadata:
|
||||||
|
name: first-pool
|
||||||
|
namespace: metallb-system
|
||||||
|
spec:
|
||||||
|
addresses:
|
||||||
|
- 10.244.1.0/24 <-- your CIDR address
|
||||||
|
```
|
||||||
|
apply configuration
|
||||||
|
```yaml
|
||||||
|
kubectl apply -f metallb-config.yaml
|
||||||
|
```
|
||||||
|
#### Install
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
kubectl apply -f ton-metal-lb.yaml
|
||||||
|
```
|
||||||
|
We do not use Pod Node Affinity here, since the Pod will remember the host with local storage it was bound to.
|
||||||
|
|
||||||
|
#### Verify installation
|
||||||
|
Assume your network CIDR (**--pod-network-cidr**) within cluster is 10.244.1.0/24, then you can compare the output with the one below:
|
||||||
|
```yaml
|
||||||
|
kubectl get service
|
||||||
|
|
||||||
|
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||||
|
kubernetes ClusterIP <NOT_IMPORTANT> <none> 443/TCP 28h
|
||||||
|
validator-engine-srv LoadBalancer <NOT_IMPORTANT> 10.244.1.1 30001:30001/UDP,30002:30002/TCP,30003:30003/TCP 60m
|
||||||
|
```
|
||||||
|
you can see that endpoints are pointing to metal-LB subnet:
|
||||||
|
```
|
||||||
|
kubectl get endpoints
|
||||||
|
|
||||||
|
NAME ENDPOINTS
|
||||||
|
kubernetes <IP>:6443
|
||||||
|
validator-engine-srv 10.244.1.10:30002,10.244.1.10:30001,10.244.1.10:30003
|
||||||
|
```
|
||||||
|
and metal-LB itself operates with the right endpoint:
|
||||||
|
```
|
||||||
|
kubectl describe service metallb-webhook-service -n metallb-system
|
||||||
|
|
||||||
|
Name: metallb-webhook-service
|
||||||
|
Namespace: metallb-system
|
||||||
|
Selector: component=controller
|
||||||
|
Type: ClusterIP
|
||||||
|
IP: <NOT_IMPORTANT_IP>
|
||||||
|
IPs: <NOT_IMPORTANT_IP>
|
||||||
|
Port: <unset> 443/TCP
|
||||||
|
TargetPort: 9443/TCP
|
||||||
|
Endpoints: 10.244.2.3:9443 <-- CIDR
|
||||||
|
```
|
||||||
|
|
||||||
|
Use the commands from the previous chapter to see if node operates properly.
|
||||||
|
|
||||||
|
### Deploy on AWS cloud (Amazon Web Services)
|
||||||
|
|
||||||
|
#### Prepare
|
||||||
|
* AWS EKS is configured with worker nodes with selected add-ons:
|
||||||
|
* CoreDNS - Enable service discovery within your cluster.
|
||||||
|
* kube-proxy - Enable service networking within your cluster.
|
||||||
|
* Amazon VPC CNI - Enable pod networking within your cluster.
|
||||||
|
* Allocate Elastic IP.
|
||||||
|
* Replace **<PUBLIC_IP>** with the newly created Elastic IP in [ton-aws.yaml](ton-aws.yaml)
|
||||||
|
* Replace **<ELASTIC_IP_ID>** with Elastic IP allocation ID (see in AWS console).
|
||||||
|
* Adjust StorageClass name. Make sure you are providing fast storage.
|
||||||
|
|
||||||
|
#### Install
|
||||||
|
|
||||||
|
```kubectl apply -f ton-aws.yaml```
|
||||||
|
|
||||||
|
#### Verify installation
|
||||||
|
Use instructions from the previous sections.
|
||||||
|
|
||||||
|
### Deploy on GCP (Google Cloud Platform)
|
||||||
|
|
||||||
|
#### Prepare
|
||||||
|
* Kubernetes cluster of type Standard (not Autopilot).
|
||||||
|
* Premium static IP address.
|
||||||
|
* Adjust firewall rules and security groups to allow ports 30001/udp, 30002/tcp and 30003/tcp (default ones).
|
||||||
|
* Replace **<PUBLIC_IP>** (and ports if needed) in file [ton-gcp.yaml](ton-gcp.yaml).
|
||||||
|
* Adjust StorageClass name. Make sure you are providing fast storage.
|
||||||
|
|
||||||
|
* Load Balancer will be created automatically according to Kubernetes service in yaml file.
|
||||||
|
|
||||||
|
#### Install
|
||||||
|
```kubectl apply -f ton-gcp.yaml```
|
||||||
|
|
||||||
|
#### Verify installation
|
||||||
|
Use instructions from the previous sections.
|
||||||
|
|
||||||
|
### Deploy on Ali Cloud
|
||||||
|
|
||||||
|
#### Prepare
|
||||||
|
* AliCloud kubernetes cluster.
|
||||||
|
* Elastic IP.
|
||||||
|
* Replace **<ELASTIC_IP_ID>** with Elastic IP allocation ID (see in AliCloud console).
|
||||||
|
* Replace **<PUBLIC_IP>** (and ports if needed) in file [ton-ali.yaml](ton-ali.yaml) with the elastic IP attached to your CLB.
|
||||||
|
* Adjust StorageClass name. Make sure you are providing fast storage.
|
||||||
|
|
||||||
|
#### Install
|
||||||
|
```kubectl apply -f ton-ali.yaml```
|
||||||
|
|
||||||
|
As a result CLB (classic internal Load Balancer) will be created automatically with assigned external IP.
|
||||||
|
|
||||||
|
#### Verify installation
|
||||||
|
Use instructions from the previous sections.
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
## Docker
|
||||||
|
### TON node cannot synchronize, constantly see messages [Error : 651 : no nodes] in the log
|
||||||
|
|
||||||
|
Start the new container without starting validator-engine:
|
||||||
|
|
||||||
|
```
|
||||||
|
docker run -it -v /data/db:/var/ton-work/db \
|
||||||
|
-e "HOST_IP=<PUBLIC_IP>" \
|
||||||
|
-e "PUBLIC_IP=<PUBLIC_IP>" \
|
||||||
|
-e "LITESERVER=true" \
|
||||||
|
-p 43677:43677/udp \
|
||||||
|
-p 43678:43678/tcp \
|
||||||
|
-p 43679:43679/tcp \
|
||||||
|
--entrypoint /bin/bash \
|
||||||
|
ghcr.io/ton-blockchain/ton
|
||||||
|
```
|
||||||
|
identify your PUBLIC_IP:
|
||||||
|
```
|
||||||
|
curl -4 ifconfig.me
|
||||||
|
```
|
||||||
|
compare if resulted IP coincides with your <PUBLIC_IP>.
|
||||||
|
If it doesn't, exit container and launch it with the correct public IP.
|
||||||
|
Then open UDP port (inside the container) you plan to allocate for TON node using netcat utility:
|
||||||
|
```
|
||||||
|
nc -ul 30001
|
||||||
|
```
|
||||||
|
and from any **other** linux machine check if you can reach this UDP port by sending a test message to that port:
|
||||||
|
```
|
||||||
|
echo "test" | nc -u <PUBLIC_IP> 30001
|
||||||
|
```
|
||||||
|
as a result inside the container you have to receive the "test" message.
|
||||||
|
|
||||||
|
If you don't get the message inside the docker container, that means that either your firewall, LoadBalancer, NAT or proxy is blocking it.
|
||||||
|
Ask your system administrator for assistance.
|
||||||
|
|
||||||
|
In the same way you can check if TCP port is available:
|
||||||
|
|
||||||
|
Execute inside the container ```nc -l 30003``` and test connection from another server
|
||||||
|
```nc -vz <PUBLIC_IP> 30003```
|
||||||
|
|
||||||
|
### Can't connect to lite-server
|
||||||
|
* check if lite-server was enabled on start by passing **"LITESERVER=true"** argument;
|
||||||
|
* check if TCP port (LITE_PORT) is available from the outside. From any other linux machine execute:
|
||||||
|
```
|
||||||
|
nc -vz <PUBLIC_IP> <LITE_PORT>
|
||||||
|
```
|
||||||
|
### How to see what traffic is generated inside the TON docker container?
|
||||||
|
There is available a traffic monitoring utility inside the container, just execute:
|
||||||
|
```
|
||||||
|
iptraf-ng
|
||||||
|
```
|
||||||
|
Other tools like **tcpdump**, **nc**, **wget**, **curl**, **ifconfig**, **pv**, **plzip**, **jq** and **netstat** are also available.
|
||||||
|
|
||||||
|
### How to build TON docker image from sources?
|
||||||
|
```
|
||||||
|
git clone --recursive https://github.com/ton-blockchain/ton.git
|
||||||
|
cd ton
|
||||||
|
docker build .
|
||||||
|
```
|
||||||
|
|
||||||
|
## Kubernetes
|
||||||
|
### AWS
|
||||||
|
#### After installing AWS LB, load balancer is still not available (pending):
|
||||||
|
```
|
||||||
|
kubectl get deployment -n kube-system aws-load-balancer-controller
|
||||||
|
```
|
||||||
|
Solution:
|
||||||
|
|
||||||
|
Try to install AWS LoadBalancer using ```Helm``` way.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
#### After installing AWS LB and running ton node, service shows error:
|
||||||
|
|
||||||
|
```k describe service validator-engine-srv```
|
||||||
|
|
||||||
|
```log
|
||||||
|
Failed build model due to unable to resolve at least one subnet (0 match VPC and tags: [kubernetes.io/role/elb])
|
||||||
|
```
|
||||||
|
Solution:
|
||||||
|
|
||||||
|
You haven't labeled the AWS subnets with the correct resource tags.
|
||||||
|
|
||||||
|
* Public Subnets should be resource tagged with: kubernetes.io/role/elb: 1
|
||||||
|
* Private Subnets should be tagged with: kubernetes.io/role/internal-elb: 1
|
||||||
|
* Both private and public subnets should be tagged with: kubernetes.io/cluster/${your-cluster-name}: owned
|
||||||
|
* or if the subnets are also used by non-EKS resources kubernetes.io/cluster/${your-cluster-name}: shared
|
||||||
|
|
||||||
|
So create tags for at least one subnet:
|
||||||
|
```
|
||||||
|
kubernetes.io/role/elb: 1
|
||||||
|
kubernetes.io/cluster/<YOUR_CLUSTER_NAME>: owner
|
||||||
|
```
|
||||||
|
---
|
||||||
|
#### AWS Load Balancer works, but I still see ```[no nodes]``` in validator's log
|
||||||
|
It is required to add the security group for the EC2 instances to the load balancer along with the default security group.
|
||||||
|
It's a misleading that the default security group has "everything open."
|
||||||
|
|
||||||
|
Add security group (default name is usually something like 'launch-wizard-1').
|
||||||
|
And make sure you allow the ports you specified or default ports 30001/udp, 30002/tcp and 30003/tcp.
|
||||||
|
|
||||||
|
You can also set inbound and outbound rules of new security group to allow ALL ports and for ALL protocols and for source CIDR 0.0.0.0/0 for testing purposes.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
#### Pending PersistentVolumeClaim ```Waiting for a volume to be created either by the external provisioner 'ebs.csi.aws.com' or manually by the system administrator.```
|
||||||
|
|
||||||
|
Solution:
|
||||||
|
|
||||||
|
Configure Amazon EBS CSI driver for working PersistentVolumes in EKS.
|
||||||
|
|
||||||
|
1. Enable IAM OIDC provider
|
||||||
|
```
|
||||||
|
eksctl utils associate-iam-oidc-provider --region=us-west-2 --cluster=k8s-my --approve
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Create Amazon EBS CSI driver IAM role
|
||||||
|
```
|
||||||
|
eksctl create iamserviceaccount \
|
||||||
|
--region us-west-2 \
|
||||||
|
--name ebs-csi-controller-sa \
|
||||||
|
--namespace kube-system \
|
||||||
|
--cluster k8s-my \
|
||||||
|
--attach-policy-arn arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy \
|
||||||
|
--approve \
|
||||||
|
--role-only \
|
||||||
|
--role-name AmazonEKS_EBS_CSI_DriverRole
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Add the Amazon EBS CSI add-on
|
||||||
|
```yaml
|
||||||
|
eksctl create addon --name aws-ebs-csi-driver --cluster k8s-my --service-account-role-arn arn:aws:iam::$(aws sts get-caller-identity --query Account --output text):role/AmazonEKS_EBS_CSI_DriverRole --force
|
||||||
|
```
|
||||||
|
### Google Cloud
|
||||||
|
#### Load Balancer cannot obtain external IP (pending)
|
||||||
|
|
||||||
|
```
|
||||||
|
kubectl describe service validator-engine-srv
|
||||||
|
|
||||||
|
Events:
|
||||||
|
Type Reason Age From Message
|
||||||
|
---- ------ ---- ---- -------
|
||||||
|
Warning LoadBalancerMixedProtocolNotSupported 7m8s g-cloudprovider LoadBalancers with multiple protocols are not supported.
|
||||||
|
Normal EnsuringLoadBalancer 113s (x7 over 7m8s) service-controller Ensuring load balancer
|
||||||
|
Warning SyncLoadBalancerFailed 113s (x7 over 7m8s) service-controller Error syncing load balancer: failed to ensure load balancer: mixed protocol is not supported for LoadBalancer
|
||||||
|
```
|
||||||
|
Solution:
|
||||||
|
|
||||||
|
Create static IP address of type Premium in GCP console and use it as a value for field ```loadBalancerIP``` in Kubernetes service.
|
||||||
|
|
||||||
|
### Ali Cloud
|
||||||
|
|
||||||
|
#### Validator logs always show
|
||||||
|
```
|
||||||
|
Client got error [PosixError : Connection reset by peer : 104 : Error on [fd:45]]
|
||||||
|
[!NetworkManager][&ADNL_WARNING] [networkmanager]: received too small proxy packet of size 21
|
||||||
|
```
|
||||||
|
Solution:
|
||||||
|
|
||||||
|
The node is sychnronizing, but very slow though.
|
||||||
|
Try to use Network Load Balancer (NLB) instead of default CLB.
|
||||||
|
|
||||||
|
|
||||||
If you don't need Liteserver, then remove -e "LITESERVER=true".
|
|
||||||
|
|
||||||
#### Use
|
|
||||||
```docker exec -ti <container-id> /bin/bash```
|
|
||||||
|
|
||||||
```./validator-engine-console -k client -p server.pub -a <IP>:<TCP-PORT1>```
|
|
||||||
|
|
||||||
IP:PORT is shown at start of container.
|
|
||||||
|
|
||||||
#### Lite-client
|
|
||||||
To use lite-client you need to get liteserver.pub from container.
|
|
||||||
|
|
||||||
```docker cp <container-id>:/var/ton-work/db/liteserver.pub /your/path```
|
|
||||||
|
|
||||||
Then you can connect to it, but be sure you use right port, it's different from fullnode console port.
|
|
||||||
|
|
||||||
```lite-client -a <IP>:<TCP-PORT2> -p liteserver.pub```
|
|
||||||
|
|
|
@ -6,4 +6,4 @@
|
||||||
"permissions" : 15
|
"permissions" : 15
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
112
docker/init.sh
112
docker/init.sh
|
@ -1,30 +1,93 @@
|
||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
# global config
|
if [ ! -z "$TEST" ]; then
|
||||||
if [ ! -z "$GCONFURL" ]; then
|
echo -e "Running simple validator-engine test..."
|
||||||
|
validator-engine -h
|
||||||
|
test $? -eq 2 || { echo "simple validator-engine test failed"; exit 1; }
|
||||||
|
exit 0;
|
||||||
|
fi
|
||||||
|
|
||||||
|
# global config
|
||||||
|
if [ ! -z "$GLOBAL_CONFIG_URL" ]; then
|
||||||
echo -e "\e[1;32m[+]\e[0m Downloading provided global config."
|
echo -e "\e[1;32m[+]\e[0m Downloading provided global config."
|
||||||
wget -q $GCONFURL -O /var/ton-work/db/ton-global.config
|
wget -q $GLOBAL_CONFIG_URL -O /var/ton-work/db/ton-global.config
|
||||||
else
|
else
|
||||||
echo -e "\e[1;33m[=]\e[0m No global config provided, downloading default."
|
echo -e "\e[1;33m[=]\e[0m No global config provided, downloading mainnet default."
|
||||||
wget -q https://api.tontech.io/ton/wallet-mainnet.autoconf.json -O /var/ton-work/db/ton-global.config
|
wget -q https://api.tontech.io/ton/wallet-mainnet.autoconf.json -O /var/ton-work/db/ton-global.config
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ -z "$VALIDATOR_PORT" ]; then
|
||||||
|
VALIDATOR_PORT=30001
|
||||||
|
echo -e "\e[1;33m[=]\e[0m Using default VALIDATOR_PORT $VALIDATOR_PORT udp"
|
||||||
|
else
|
||||||
|
echo -e "\e[1;33m[=]\e[0m Using VALIDATOR_PORT $VALIDATOR_PORT udp"
|
||||||
|
fi
|
||||||
|
|
||||||
# Init local config with IP:PORT
|
# Init local config with IP:PORT
|
||||||
if [ ! -z "$PUBLIC_IP" ]; then
|
if [ ! -z "$PUBLIC_IP" ]; then
|
||||||
if [ -z "$CONSOLE_PORT" ]; then
|
echo -e "\e[1;32m[+]\e[0m Using provided IP: $PUBLIC_IP:$VALIDATOR_PORT"
|
||||||
CONSOLE_PORT="43678"
|
|
||||||
fi
|
|
||||||
echo -e "\e[1;32m[+]\e[0m Using provided IP: $PUBLIC_IP:$CONSOLE_PORT"
|
|
||||||
validator-engine -C /var/ton-work/db/ton-global.config --db /var/ton-work/db --ip "$PUBLIC_IP:$CONSOLE_PORT"
|
|
||||||
else
|
else
|
||||||
echo -e "\e[1;31m[!]\e[0m No IP:PORT provided, exiting"
|
echo -e "\e[1;31m[!]\e[0m No PUBLIC_IP provided, exiting..."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ ! -f "/var/ton-work/db/config.json" ]; then
|
||||||
|
echo -e "\e[1;32m[+]\e[0m Initializing validator-engine:"
|
||||||
|
echo validator-engine -C /var/ton-work/db/ton-global.config --db /var/ton-work/db --ip "$PUBLIC_IP:$VALIDATOR_PORT"
|
||||||
|
validator-engine -C /var/ton-work/db/ton-global.config --db /var/ton-work/db --ip "$PUBLIC_IP:$VALIDATOR_PORT"
|
||||||
|
test $? -eq 0 || { echo "Cannot initialize validator-engine"; exit 2; }
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -z "$DUMP_URL" ]; then
|
||||||
|
echo -e "\e[1;32m[+]\e[0m Using provided dump $DUMP_URL"
|
||||||
|
if [ ! -f "dump_downloaded" ]; then
|
||||||
|
echo -e "\e[1;32m[+]\e[0m Downloading dump..."
|
||||||
|
curl --retry 10 --retry-delay 30 -Ls $DUMP_URL | pv | plzip -d -n8 | tar -xC /var/ton-work/db
|
||||||
|
touch dump_downloaded
|
||||||
|
else
|
||||||
|
echo -e "\e[1;32m[+]\e[0m Dump has been already used."
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$STATE_TTL" ]; then
|
||||||
|
STATE_TTL=86400
|
||||||
|
echo -e "\e[1;33m[=]\e[0m Using default STATE_TTL $STATE_TTL"
|
||||||
|
else
|
||||||
|
echo -e "\e[1;33m[=]\e[0m Using STATE_TTL $STATE_TTL"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$ARCHIVE_TTL" ]; then
|
||||||
|
ARCHIVE_TTL=86400
|
||||||
|
echo -e "\e[1;33m[=]\e[0m Using default ARCHIVE_TTL $ARCHIVE_TTL"
|
||||||
|
else
|
||||||
|
echo -e "\e[1;33m[=]\e[0m Using ARCHIVE_TTL $ARCHIVE_TTL"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$THREADS" ]; then
|
||||||
|
THREADS=8
|
||||||
|
echo -e "\e[1;33m[=]\e[0m Using default THREADS $THREADS"
|
||||||
|
else
|
||||||
|
echo -e "\e[1;33m[=]\e[0m Using THREADS $THREADS"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$VERBOSITY" ]; then
|
||||||
|
VERBOSITY=3
|
||||||
|
echo -e "\e[1;33m[=]\e[0m Using default VERBOSITY $VERBOSITY"
|
||||||
|
else
|
||||||
|
echo -e "\e[1;33m[=]\e[0m Using VERBOSITY $VERBOSITY"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$CONSOLE_PORT" ]; then
|
||||||
|
CONSOLE_PORT=30002
|
||||||
|
echo -e "\e[1;33m[=]\e[0m Using default CONSOLE_PORT $CONSOLE_PORT tcp"
|
||||||
|
else
|
||||||
|
echo -e "\e[1;33m[=]\e[0m Using CONSOLE_PORT $CONSOLE_PORT tcp"
|
||||||
|
fi
|
||||||
|
|
||||||
# Generating server certificate
|
# Generating server certificate
|
||||||
if [ -f "./server" ]; then
|
if [ -f "./server" ]; then
|
||||||
echo -e "\e[1;33m[=]\e[0m Found existing server certificate, skipping"
|
echo -e "\e[1;33m[=]\e[0m Found existing server certificate, skipping"
|
||||||
else
|
else
|
||||||
echo -e "\e[1;32m[+]\e[0m Generating and installing server certificate for remote control"
|
echo -e "\e[1;32m[+]\e[0m Generating and installing server certificate for remote control"
|
||||||
read -r SERVER_ID1 SERVER_ID2 <<< $(generate-random-id -m keys -n server)
|
read -r SERVER_ID1 SERVER_ID2 <<< $(generate-random-id -m keys -n server)
|
||||||
echo "Server IDs: $SERVER_ID1 $SERVER_ID2"
|
echo "Server IDs: $SERVER_ID1 $SERVER_ID2"
|
||||||
|
@ -32,16 +95,16 @@ else
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Generating client certificate
|
# Generating client certificate
|
||||||
if [ -f "./client" ]; then
|
if [ -f "./client" ]; then
|
||||||
echo -e "\e[1;33m[=]\e[0m Found existing client certificate, skipping"
|
echo -e "\e[1;33m[=]\e[0m Found existing client certificate, skipping"
|
||||||
else
|
else
|
||||||
read -r CLIENT_ID1 CLIENT_ID2 <<< $(generate-random-id -m keys -n client)
|
read -r CLIENT_ID1 CLIENT_ID2 <<< $(generate-random-id -m keys -n client)
|
||||||
echo -e "\e[1;32m[+]\e[0m Generated client private certificate $CLIENT_ID1 $CLIENT_ID2"
|
echo -e "\e[1;32m[+]\e[0m Generated client private certificate $CLIENT_ID1 $CLIENT_ID2"
|
||||||
echo -e "\e[1;32m[+]\e[0m Generated client public certificate"
|
echo -e "\e[1;32m[+]\e[0m Generated client public certificate"
|
||||||
# Adding client permissions
|
# Adding client permissions
|
||||||
sed -e "s/CONSOLE-PORT/\"$(printf "%q" $CONSOLE_PORT)\"/g" -e "s~SERVER-ID~\"$(printf "%q" $SERVER_ID2)\"~g" -e "s~CLIENT-ID~\"$(printf "%q" $CLIENT_ID2)\"~g" control.template > control.new
|
sed -e "s/CONSOLE-PORT/\"$(printf "%q" $CONSOLE_PORT)\"/g" -e "s~SERVER-ID~\"$(printf "%q" $SERVER_ID2)\"~g" -e "s~CLIENT-ID~\"$(printf "%q" $CLIENT_ID2)\"~g" /var/ton-work/scripts/control.template > control.new
|
||||||
sed -e "s~\"control\"\ \:\ \[~$(printf "%q" $(cat control.new))~g" config.json > config.json.new
|
sed -e "s~\"control\"\ \:\ \[~$(printf "%q" $(cat control.new))~g" /var/ton-work/db/config.json > config.json.new
|
||||||
mv config.json.new config.json
|
mv config.json.new /var/ton-work/db/config.json
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Liteserver
|
# Liteserver
|
||||||
|
@ -50,20 +113,25 @@ if [ -z "$LITESERVER" ]; then
|
||||||
else
|
else
|
||||||
if [ -f "./liteserver" ]; then
|
if [ -f "./liteserver" ]; then
|
||||||
echo -e "\e[1;33m[=]\e[0m Found existing liteserver certificate, skipping"
|
echo -e "\e[1;33m[=]\e[0m Found existing liteserver certificate, skipping"
|
||||||
else
|
else
|
||||||
echo -e "\e[1;32m[+]\e[0m Generating and installing liteserver certificate for remote control"
|
echo -e "\e[1;32m[+]\e[0m Generating and installing liteserver certificate for remote control"
|
||||||
read -r LITESERVER_ID1 LITESERVER_ID2 <<< $(generate-random-id -m keys -n liteserver)
|
read -r LITESERVER_ID1 LITESERVER_ID2 <<< $(generate-random-id -m keys -n liteserver)
|
||||||
echo "Liteserver IDs: $LITESERVER_ID1 $LITESERVER_ID2"
|
echo "Liteserver IDs: $LITESERVER_ID1 $LITESERVER_ID2"
|
||||||
cp liteserver /var/ton-work/db/keyring/$LITESERVER_ID1
|
cp liteserver /var/ton-work/db/keyring/$LITESERVER_ID1
|
||||||
|
|
||||||
if [ -z "$LITE_PORT" ]; then
|
if [ -z "$LITE_PORT" ]; then
|
||||||
LITE_PORT="43679"
|
LITE_PORT=30003
|
||||||
|
echo -e "\e[1;33m[=]\e[0m Using default LITE_PORT $LITE_PORT tcp"
|
||||||
|
else
|
||||||
|
echo -e "\e[1;33m[=]\e[0m Using LITE_PORT $LITE_PORT tcp"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
LITESERVERS=$(printf "%q" "\"liteservers\":[{\"id\":\"$LITESERVER_ID2\",\"port\":\"$LITE_PORT\"}")
|
LITESERVERS=$(printf "%q" "\"liteservers\":[{\"id\":\"$LITESERVER_ID2\",\"port\":\"$LITE_PORT\"}")
|
||||||
sed -e "s~\"liteservers\"\ \:\ \[~$LITESERVERS~g" config.json > config.json.liteservers
|
sed -e "s~\"liteservers\"\ \:\ \[~$LITESERVERS~g" /var/ton-work/db/config.json > config.json.liteservers
|
||||||
mv config.json.liteservers config.json
|
mv config.json.liteservers /var/ton-work/db/config.json
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo -e "\e[1;32m[+]\e[0m Running validator-engine"
|
echo -e "\e[1;32m[+]\e[0m Starting validator-engine:"
|
||||||
|
echo validator-engine -c /var/ton-work/db/config.json -C /var/ton-work/db/ton-global.config --db /var/ton-work/db --state-ttl $STATE_TTL --archive-ttl $ARCHIVE_TTL --threads $THREADS --verbosity $VERBOSITY $CUSTOM_ARG
|
||||||
exec validator-engine -c /var/ton-work/db/config.json -C /var/ton-work/db/ton-global.config --db /var/ton-work/db
|
exec validator-engine -c /var/ton-work/db/config.json -C /var/ton-work/db/ton-global.config --db /var/ton-work/db --state-ttl $STATE_TTL --archive-ttl $ARCHIVE_TTL --threads $THREADS --verbosity $VERBOSITY $CUSTOM_ARG
|
||||||
|
|
121
docker/ton-ali.yaml
Normal file
121
docker/ton-ali.yaml
Normal file
|
@ -0,0 +1,121 @@
|
||||||
|
apiVersion: "apps/v1"
|
||||||
|
kind: StatefulSet
|
||||||
|
metadata:
|
||||||
|
name: validator-engine-pod
|
||||||
|
labels:
|
||||||
|
name: validator-engine-pod
|
||||||
|
spec:
|
||||||
|
volumeClaimTemplates:
|
||||||
|
- metadata:
|
||||||
|
name: validator-engine-pvc
|
||||||
|
spec:
|
||||||
|
storageClassName: alicloud-disk-ssd
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 800Gi
|
||||||
|
serviceName: validator-engine-srv-headless
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
name: validator-engine-pod
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
name: validator-engine-pod
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: validator-engine-container
|
||||||
|
image: ghcr.io/neodix42/ton:latest
|
||||||
|
env:
|
||||||
|
- name: PUBLIC_IP
|
||||||
|
value: "<PUBLIC_IP>"
|
||||||
|
- name: GLOBAL_CONFIG_URL
|
||||||
|
value: "https://api.tontech.io/ton/wallet-mainnet.autoconf.json"
|
||||||
|
- name: DUMP_URL
|
||||||
|
value: "https://dump.ton.org/dumps/latest.tar.lz"
|
||||||
|
- name: LITESERVER
|
||||||
|
value: "true"
|
||||||
|
- name: VALIDATOR_PORT
|
||||||
|
value: "30001"
|
||||||
|
- name: CONSOLE_PORT
|
||||||
|
value: "30002"
|
||||||
|
- name: LITE_PORT
|
||||||
|
value: "30003"
|
||||||
|
- name: STATE_TTL
|
||||||
|
value: "86400"
|
||||||
|
- name: ARCHIVE_TTL
|
||||||
|
value: "86400"
|
||||||
|
- name: THREADS
|
||||||
|
value: "8"
|
||||||
|
- name: VERBOSITY
|
||||||
|
value: "3"
|
||||||
|
ports:
|
||||||
|
- containerPort: 30001
|
||||||
|
protocol: UDP
|
||||||
|
- containerPort: 30002
|
||||||
|
protocol: TCP
|
||||||
|
- containerPort: 30003
|
||||||
|
protocol: TCP
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: "/var/ton-work/db"
|
||||||
|
name: validator-engine-pvc
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
memory: "64Gi"
|
||||||
|
cpu: "16"
|
||||||
|
limits:
|
||||||
|
memory: "128Gi"
|
||||||
|
cpu: "32"
|
||||||
|
---
|
||||||
|
kind: Service
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: validator-engine-srv
|
||||||
|
annotations:
|
||||||
|
service.beta.kubernetes.io/alibaba-cloud-loadbalancer-eip-ids: "<ELASTIC_IP_ID>"
|
||||||
|
service.beta.kubernetes.io/alibaba-cloud-loadbalancer-address-type: "intranet"
|
||||||
|
spec:
|
||||||
|
type: LoadBalancer
|
||||||
|
externalTrafficPolicy: Local
|
||||||
|
ports:
|
||||||
|
- name: validator-udp
|
||||||
|
nodePort: 30001
|
||||||
|
port: 30001
|
||||||
|
targetPort: 30001
|
||||||
|
protocol: UDP
|
||||||
|
- name: console-tcp
|
||||||
|
nodePort: 30002
|
||||||
|
port: 30002
|
||||||
|
targetPort: 30002
|
||||||
|
protocol: TCP
|
||||||
|
- name: ls-tcp
|
||||||
|
nodePort: 30003
|
||||||
|
port: 30003
|
||||||
|
targetPort: 30003
|
||||||
|
protocol: TCP
|
||||||
|
selector:
|
||||||
|
name: validator-engine-pod
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: validator-engine-srv-headless
|
||||||
|
spec:
|
||||||
|
clusterIP: None
|
||||||
|
ports:
|
||||||
|
- name: validator-udp
|
||||||
|
port: 30001
|
||||||
|
targetPort: 30001
|
||||||
|
protocol: UDP
|
||||||
|
- name: console-tcp
|
||||||
|
port: 30002
|
||||||
|
targetPort: 30002
|
||||||
|
protocol: TCP
|
||||||
|
- name: ls-tcp
|
||||||
|
port: 30003
|
||||||
|
targetPort: 30003
|
||||||
|
protocol: TCP
|
||||||
|
selector:
|
||||||
|
name: validator-engine-pod
|
122
docker/ton-aws.yaml
Normal file
122
docker/ton-aws.yaml
Normal file
|
@ -0,0 +1,122 @@
|
||||||
|
apiVersion: "apps/v1"
|
||||||
|
kind: StatefulSet
|
||||||
|
metadata:
|
||||||
|
name: validator-engine-pod
|
||||||
|
labels:
|
||||||
|
name: validator-engine-pod
|
||||||
|
spec:
|
||||||
|
volumeClaimTemplates:
|
||||||
|
- metadata:
|
||||||
|
name: validator-engine-pvc
|
||||||
|
spec:
|
||||||
|
storageClassName: gp2
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 800Gi
|
||||||
|
serviceName: validator-engine-srv-headless
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
name: validator-engine-pod
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
name: validator-engine-pod
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: validator-engine-container
|
||||||
|
image: ghcr.io/neodix42/ton:latest
|
||||||
|
env:
|
||||||
|
- name: PUBLIC_IP
|
||||||
|
value: "<PUBLIC_IP>"
|
||||||
|
- name: GLOBAL_CONFIG_URL
|
||||||
|
value: "https://api.tontech.io/ton/wallet-mainnet.autoconf.json"
|
||||||
|
- name: DUMP_URL
|
||||||
|
value: "https://dump.ton.org/dumps/latest.tar.lz"
|
||||||
|
- name: LITESERVER
|
||||||
|
value: "true"
|
||||||
|
- name: VALIDATOR_PORT
|
||||||
|
value: "30001"
|
||||||
|
- name: CONSOLE_PORT
|
||||||
|
value: "30002"
|
||||||
|
- name: LITE_PORT
|
||||||
|
value: "30003"
|
||||||
|
- name: STATE_TTL
|
||||||
|
value: "86400"
|
||||||
|
- name: ARCHIVE_TTL
|
||||||
|
value: "86400"
|
||||||
|
- name: THREADS
|
||||||
|
value: "8"
|
||||||
|
- name: VERBOSITY
|
||||||
|
value: "3"
|
||||||
|
ports:
|
||||||
|
- containerPort: 30001
|
||||||
|
protocol: UDP
|
||||||
|
- containerPort: 30002
|
||||||
|
protocol: TCP
|
||||||
|
- containerPort: 30003
|
||||||
|
protocol: TCP
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: "/var/ton-work/db"
|
||||||
|
name: validator-engine-pvc
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
memory: "64Gi"
|
||||||
|
cpu: "16"
|
||||||
|
limits:
|
||||||
|
memory: "128Gi"
|
||||||
|
cpu: "32"
|
||||||
|
---
|
||||||
|
kind: Service
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: validator-engine-srv
|
||||||
|
annotations:
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
||||||
|
service.beta.kubernetes.io/aws-load-balancer-eip-allocations: "<ELASTIC_IP_ID>" # Replace with your EIP allocation ID
|
||||||
|
spec:
|
||||||
|
type: LoadBalancer
|
||||||
|
ports:
|
||||||
|
- name: validator-udp
|
||||||
|
nodePort: 30001
|
||||||
|
port: 30001
|
||||||
|
targetPort: 30001
|
||||||
|
protocol: UDP
|
||||||
|
- name: console-tcp
|
||||||
|
nodePort: 30002
|
||||||
|
port: 30002
|
||||||
|
targetPort: 30002
|
||||||
|
protocol: TCP
|
||||||
|
- name: ls-tcp
|
||||||
|
nodePort: 30003
|
||||||
|
port: 30003
|
||||||
|
targetPort: 30003
|
||||||
|
protocol: TCP
|
||||||
|
selector:
|
||||||
|
name: validator-engine-pod
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: validator-engine-srv-headless
|
||||||
|
spec:
|
||||||
|
clusterIP: None
|
||||||
|
ports:
|
||||||
|
- name: validator-udp
|
||||||
|
port: 30001
|
||||||
|
targetPort: 30001
|
||||||
|
protocol: UDP
|
||||||
|
- name: console-tcp
|
||||||
|
port: 30002
|
||||||
|
targetPort: 30002
|
||||||
|
protocol: TCP
|
||||||
|
- name: ls-tcp
|
||||||
|
port: 30003
|
||||||
|
targetPort: 30003
|
||||||
|
protocol: TCP
|
||||||
|
selector:
|
||||||
|
name: validator-engine-pod
|
134
docker/ton-gcp.yaml
Normal file
134
docker/ton-gcp.yaml
Normal file
|
@ -0,0 +1,134 @@
|
||||||
|
apiVersion: "apps/v1"
|
||||||
|
kind: StatefulSet
|
||||||
|
metadata:
|
||||||
|
name: validator-engine-pod
|
||||||
|
labels:
|
||||||
|
name: validator-engine-pod
|
||||||
|
spec:
|
||||||
|
volumeClaimTemplates:
|
||||||
|
- metadata:
|
||||||
|
name: validator-engine-pvc
|
||||||
|
spec:
|
||||||
|
storageClassName: standard-rwo
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 800Gi
|
||||||
|
serviceName: validator-engine-srv-headless
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
name: validator-engine-pod
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
name: validator-engine-pod
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: validator-engine-container
|
||||||
|
image: ghcr.io/neodix42/ton:latest
|
||||||
|
env:
|
||||||
|
- name: PUBLIC_IP
|
||||||
|
value: "<PUBLIC_IP>"
|
||||||
|
- name: GLOBAL_CONFIG_URL
|
||||||
|
value: "https://api.tontech.io/ton/wallet-mainnet.autoconf.json"
|
||||||
|
- name: DUMP_URL
|
||||||
|
value: "https://dump.ton.org/dumps/latest.tar.lz"
|
||||||
|
- name: LITESERVER
|
||||||
|
value: "true"
|
||||||
|
- name: VALIDATOR_PORT
|
||||||
|
value: "30001"
|
||||||
|
- name: CONSOLE_PORT
|
||||||
|
value: "30002"
|
||||||
|
- name: LITE_PORT
|
||||||
|
value: "30003"
|
||||||
|
- name: STATE_TTL
|
||||||
|
value: "86400"
|
||||||
|
- name: ARCHIVE_TTL
|
||||||
|
value: "86400"
|
||||||
|
- name: THREADS
|
||||||
|
value: "8"
|
||||||
|
- name: VERBOSITY
|
||||||
|
value: "3"
|
||||||
|
ports:
|
||||||
|
- containerPort: 30001
|
||||||
|
protocol: UDP
|
||||||
|
- containerPort: 30002
|
||||||
|
protocol: TCP
|
||||||
|
- containerPort: 30003
|
||||||
|
protocol: TCP
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: "/var/ton-work/db"
|
||||||
|
name: validator-engine-pvc
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
memory: "64Gi"
|
||||||
|
cpu: "16"
|
||||||
|
limits:
|
||||||
|
memory: "128Gi"
|
||||||
|
cpu: "32"
|
||||||
|
---
|
||||||
|
kind: Service
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: validator-engine-srv
|
||||||
|
spec:
|
||||||
|
type: LoadBalancer
|
||||||
|
loadBalancerIP: <PUBLIC_IP>
|
||||||
|
ports:
|
||||||
|
- port: 30001
|
||||||
|
targetPort: 30001
|
||||||
|
protocol: UDP
|
||||||
|
selector:
|
||||||
|
name: validator-engine-pod
|
||||||
|
---
|
||||||
|
kind: Service
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: validator-engine-console-srv
|
||||||
|
spec:
|
||||||
|
type: LoadBalancer
|
||||||
|
loadBalancerIP: <PUBLIC_IP>
|
||||||
|
ports:
|
||||||
|
- port: 30002
|
||||||
|
targetPort: 30002
|
||||||
|
protocol: TCP
|
||||||
|
selector:
|
||||||
|
name: validator-engine-pod
|
||||||
|
---
|
||||||
|
kind: Service
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: lite-server-srv
|
||||||
|
spec:
|
||||||
|
type: LoadBalancer
|
||||||
|
loadBalancerIP: <PUBLIC_IP>
|
||||||
|
ports:
|
||||||
|
- port: 30003
|
||||||
|
targetPort: 30003
|
||||||
|
protocol: TCP
|
||||||
|
selector:
|
||||||
|
name: validator-engine-pod
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: validator-engine-srv-headless
|
||||||
|
spec:
|
||||||
|
clusterIP: None
|
||||||
|
ports:
|
||||||
|
- name: validator-udp
|
||||||
|
port: 30001
|
||||||
|
targetPort: 30001
|
||||||
|
protocol: UDP
|
||||||
|
- name: console-tcp
|
||||||
|
port: 30002
|
||||||
|
targetPort: 30002
|
||||||
|
protocol: TCP
|
||||||
|
- name: ls-tcp
|
||||||
|
port: 30003
|
||||||
|
targetPort: 30003
|
||||||
|
protocol: TCP
|
||||||
|
selector:
|
||||||
|
name: validator-engine-pod
|
118
docker/ton-metal-lb.yaml
Normal file
118
docker/ton-metal-lb.yaml
Normal file
|
@ -0,0 +1,118 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: validator-engine-pod
|
||||||
|
labels:
|
||||||
|
name: validator-engine-pod
|
||||||
|
spec:
|
||||||
|
volumes:
|
||||||
|
- name: validator-engine-pv
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: validator-engine-pvc
|
||||||
|
containers:
|
||||||
|
- name: validator-engine-container
|
||||||
|
image: ghcr.io/neodix42/ton:latest
|
||||||
|
env:
|
||||||
|
- name: PUBLIC_IP
|
||||||
|
value: "<PUBLIC_IP>"
|
||||||
|
- name: GLOBAL_CONFIG_URL
|
||||||
|
value: "https://api.tontech.io/ton/wallet-mainnet.autoconf.json"
|
||||||
|
- name: DUMP_URL
|
||||||
|
value: "https://dump.ton.org/dumps/latest.tar.lz"
|
||||||
|
- name: LITESERVER
|
||||||
|
value: "true"
|
||||||
|
- name: VALIDATOR_PORT
|
||||||
|
value: "30001"
|
||||||
|
- name: CONSOLE_PORT
|
||||||
|
value: "30002"
|
||||||
|
- name: LITE_PORT
|
||||||
|
value: "30003"
|
||||||
|
- name: STATE_TTL
|
||||||
|
value: "86400"
|
||||||
|
- name: ARCHIVE_TTL
|
||||||
|
value: "86400"
|
||||||
|
- name: THREADS
|
||||||
|
value: "8"
|
||||||
|
- name: VERBOSITY
|
||||||
|
value: "3"
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: "/var/ton-work/db"
|
||||||
|
name: validator-engine-pv
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
memory: "64Gi"
|
||||||
|
cpu: "16"
|
||||||
|
limits:
|
||||||
|
memory: "128Gi"
|
||||||
|
cpu: "32"
|
||||||
|
---
|
||||||
|
kind: Service
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: validator-engine-srv
|
||||||
|
annotations:
|
||||||
|
metallb.universe.tf/address-pool: first-pool
|
||||||
|
spec:
|
||||||
|
type: LoadBalancer
|
||||||
|
ports:
|
||||||
|
- name: validator-engine-public-udp-port
|
||||||
|
nodePort: 30001
|
||||||
|
port: 30001
|
||||||
|
targetPort: 30001
|
||||||
|
protocol: UDP
|
||||||
|
- name: validator-console-tcp-port
|
||||||
|
nodePort: 30002
|
||||||
|
port: 30002
|
||||||
|
targetPort: 30002
|
||||||
|
protocol: TCP
|
||||||
|
- name: lite-server-tcp-port
|
||||||
|
nodePort: 30003
|
||||||
|
port: 30003
|
||||||
|
targetPort: 30003
|
||||||
|
protocol: TCP
|
||||||
|
selector:
|
||||||
|
name: validator-engine-pod
|
||||||
|
---
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: StorageClass
|
||||||
|
metadata:
|
||||||
|
name: local-storage
|
||||||
|
provisioner: kubernetes.io/no-provisioner
|
||||||
|
volumeBindingMode: WaitForFirstConsumer
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolume
|
||||||
|
metadata:
|
||||||
|
name: validator-engine-pv
|
||||||
|
labels:
|
||||||
|
type: local
|
||||||
|
spec:
|
||||||
|
storageClassName: local-storage
|
||||||
|
capacity:
|
||||||
|
storage: 800Gi
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
- ReadOnlyMany
|
||||||
|
persistentVolumeReclaimPolicy: Retain
|
||||||
|
local:
|
||||||
|
path: <LOCAL_STORAGE_PATH>
|
||||||
|
nodeAffinity:
|
||||||
|
required:
|
||||||
|
nodeSelectorTerms:
|
||||||
|
- matchExpressions:
|
||||||
|
- key: node_type
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- ton-validator
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: validator-engine-pvc
|
||||||
|
spec:
|
||||||
|
storageClassName: local-storage
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 800Gi
|
126
docker/ton-node-port.yaml
Normal file
126
docker/ton-node-port.yaml
Normal file
|
@ -0,0 +1,126 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: validator-engine-pod
|
||||||
|
labels:
|
||||||
|
name: validator-engine-pod
|
||||||
|
spec:
|
||||||
|
affinity:
|
||||||
|
nodeAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
nodeSelectorTerms:
|
||||||
|
- matchExpressions:
|
||||||
|
- key: node_type
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- ton-validator
|
||||||
|
hostNetwork: true
|
||||||
|
volumes:
|
||||||
|
- name: validator-engine-pv
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: validator-engine-pvc
|
||||||
|
containers:
|
||||||
|
- name: validator-engine-container
|
||||||
|
image: ghcr.io/neodix42/ton:latest
|
||||||
|
env:
|
||||||
|
- name: PUBLIC_IP
|
||||||
|
value: "<PUBLIC_IP>"
|
||||||
|
- name: GLOBAL_CONFIG_URL
|
||||||
|
value: "https://api.tontech.io/ton/wallet-mainnet.autoconf.json"
|
||||||
|
- name: DUMP_URL
|
||||||
|
value: "https://dump.ton.org/dumps/latest.tar.lz"
|
||||||
|
- name: LITESERVER
|
||||||
|
value: "true"
|
||||||
|
- name: VALIDATOR_PORT
|
||||||
|
value: "30001"
|
||||||
|
- name: CONSOLE_PORT
|
||||||
|
value: "30002"
|
||||||
|
- name: LITE_PORT
|
||||||
|
value: "30003"
|
||||||
|
- name: STATE_TTL
|
||||||
|
value: "86400"
|
||||||
|
- name: ARCHIVE_TTL
|
||||||
|
value: "86400"
|
||||||
|
- name: THREADS
|
||||||
|
value: "8"
|
||||||
|
- name: VERBOSITY
|
||||||
|
value: "3"
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: "/var/ton-work/db"
|
||||||
|
name: validator-engine-pv
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
memory: "64Gi"
|
||||||
|
cpu: "16"
|
||||||
|
limits:
|
||||||
|
memory: "128Gi"
|
||||||
|
cpu: "32"
|
||||||
|
---
|
||||||
|
kind: Service
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: validator-engine-srv
|
||||||
|
spec:
|
||||||
|
type: NodePort
|
||||||
|
ports:
|
||||||
|
- name: validator-engine-public-udp-port
|
||||||
|
nodePort: 30001
|
||||||
|
port: 30001
|
||||||
|
targetPort: 30001
|
||||||
|
protocol: UDP
|
||||||
|
- name: validator-console-tcp-port
|
||||||
|
nodePort: 30002
|
||||||
|
port: 30002
|
||||||
|
targetPort: 30002
|
||||||
|
protocol: TCP
|
||||||
|
- name: lite-server-tcp-port
|
||||||
|
nodePort: 30003
|
||||||
|
port: 30003
|
||||||
|
targetPort: 30003
|
||||||
|
protocol: TCP
|
||||||
|
selector:
|
||||||
|
name: validator-engine-pod
|
||||||
|
---
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: StorageClass
|
||||||
|
metadata:
|
||||||
|
name: local-storage
|
||||||
|
provisioner: kubernetes.io/no-provisioner
|
||||||
|
volumeBindingMode: WaitForFirstConsumer
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolume
|
||||||
|
metadata:
|
||||||
|
name: validator-engine-pv
|
||||||
|
labels:
|
||||||
|
type: local
|
||||||
|
spec:
|
||||||
|
storageClassName: local-storage
|
||||||
|
capacity:
|
||||||
|
storage: 800Gi
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
- ReadOnlyMany
|
||||||
|
persistentVolumeReclaimPolicy: Retain
|
||||||
|
local:
|
||||||
|
path: <LOCAL_STORAGE_PATH>
|
||||||
|
nodeAffinity:
|
||||||
|
required:
|
||||||
|
nodeSelectorTerms:
|
||||||
|
- matchExpressions:
|
||||||
|
- key: node_type
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- ton-validator
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: validator-engine-pvc
|
||||||
|
spec:
|
||||||
|
storageClassName: local-storage
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 800Gi
|
File diff suppressed because one or more lines are too long
|
@ -630,6 +630,11 @@ engine.validator.customOverlayNode adnl_id:int256 msg_sender:Bool msg_sender_pri
|
||||||
engine.validator.customOverlay name:string nodes:(vector engine.validator.customOverlayNode) = engine.validator.CustomOverlay;
|
engine.validator.customOverlay name:string nodes:(vector engine.validator.customOverlayNode) = engine.validator.CustomOverlay;
|
||||||
engine.validator.customOverlaysConfig overlays:(vector engine.validator.customOverlay) = engine.validator.CustomOverlaysConfig;
|
engine.validator.customOverlaysConfig overlays:(vector engine.validator.customOverlay) = engine.validator.CustomOverlaysConfig;
|
||||||
|
|
||||||
|
engine.validator.collatorOptions
|
||||||
|
deferring_enabled:Bool defer_messages_after:int defer_out_queue_size_limit:long
|
||||||
|
dispatch_phase_2_max_total:int dispatch_phase_3_max_total:int
|
||||||
|
dispatch_phase_2_max_per_initiator:int dispatch_phase_3_max_per_initiator:int = engine.validator.CollatorOptions;
|
||||||
|
|
||||||
engine.validator.collatorsList.collator adnl_id:int256 trusted:Bool = engine.validator.collatorsList.Collator;
|
engine.validator.collatorsList.collator adnl_id:int256 trusted:Bool = engine.validator.collatorsList.Collator;
|
||||||
engine.validator.collatorsList.shard shard_id:tonNode.shardId collators:(vector engine.validator.collatorsList.collator)
|
engine.validator.collatorsList.shard shard_id:tonNode.shardId collators:(vector engine.validator.collatorsList.collator)
|
||||||
= engine.validator.collatorsList.Shard;
|
= engine.validator.collatorsList.Shard;
|
||||||
|
@ -755,6 +760,8 @@ engine.validator.showCustomOverlays = engine.validator.CustomOverlaysConfig;
|
||||||
|
|
||||||
engine.validator.setStateSerializerEnabled enabled:Bool = engine.validator.Success;
|
engine.validator.setStateSerializerEnabled enabled:Bool = engine.validator.Success;
|
||||||
|
|
||||||
|
engine.validator.setCollatorOptionsJson json:string = engine.validator.Success;
|
||||||
|
|
||||||
engine.validator.getValidatorSessionsInfo = engine.validator.ValidatorSessionsInfo;
|
engine.validator.getValidatorSessionsInfo = engine.validator.ValidatorSessionsInfo;
|
||||||
|
|
||||||
engine.validator.addCollator adnl_id:int256 shard:tonNode.shardId = engine.validator.Success;
|
engine.validator.addCollator adnl_id:int256 shard:tonNode.shardId = engine.validator.Success;
|
||||||
|
|
Binary file not shown.
|
@ -1215,6 +1215,45 @@ td::Status SetStateSerializerEnabledQuery::receive(td::BufferSlice data) {
|
||||||
return td::Status::OK();
|
return td::Status::OK();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
td::Status SetCollatorOptionsJsonQuery::run() {
|
||||||
|
TRY_RESULT_ASSIGN(file_name_, tokenizer_.get_token<std::string>());
|
||||||
|
TRY_STATUS(tokenizer_.check_endl());
|
||||||
|
return td::Status::OK();
|
||||||
|
}
|
||||||
|
|
||||||
|
td::Status SetCollatorOptionsJsonQuery::send() {
|
||||||
|
TRY_RESULT(data, td::read_file(file_name_));
|
||||||
|
auto b =
|
||||||
|
ton::create_serialize_tl_object<ton::ton_api::engine_validator_setCollatorOptionsJson>(data.as_slice().str());
|
||||||
|
td::actor::send_closure(console_, &ValidatorEngineConsole::envelope_send_query, std::move(b), create_promise());
|
||||||
|
return td::Status::OK();
|
||||||
|
}
|
||||||
|
|
||||||
|
td::Status SetCollatorOptionsJsonQuery::receive(td::BufferSlice data) {
|
||||||
|
TRY_RESULT_PREFIX(f, ton::fetch_tl_object<ton::ton_api::engine_validator_success>(data.as_slice(), true),
|
||||||
|
"received incorrect answer: ");
|
||||||
|
td::TerminalIO::out() << "success\n";
|
||||||
|
return td::Status::OK();
|
||||||
|
}
|
||||||
|
|
||||||
|
td::Status ResetCollatorOptionsQuery::run() {
|
||||||
|
TRY_STATUS(tokenizer_.check_endl());
|
||||||
|
return td::Status::OK();
|
||||||
|
}
|
||||||
|
|
||||||
|
td::Status ResetCollatorOptionsQuery::send() {
|
||||||
|
auto b = ton::create_serialize_tl_object<ton::ton_api::engine_validator_setCollatorOptionsJson>("{}");
|
||||||
|
td::actor::send_closure(console_, &ValidatorEngineConsole::envelope_send_query, std::move(b), create_promise());
|
||||||
|
return td::Status::OK();
|
||||||
|
}
|
||||||
|
|
||||||
|
td::Status ResetCollatorOptionsQuery::receive(td::BufferSlice data) {
|
||||||
|
TRY_RESULT_PREFIX(f, ton::fetch_tl_object<ton::ton_api::engine_validator_success>(data.as_slice(), true),
|
||||||
|
"received incorrect answer: ");
|
||||||
|
td::TerminalIO::out() << "success\n";
|
||||||
|
return td::Status::OK();
|
||||||
|
}
|
||||||
|
|
||||||
td::Status GetValidatorSessionsInfoQuery::run() {
|
td::Status GetValidatorSessionsInfoQuery::run() {
|
||||||
TRY_STATUS(tokenizer_.check_endl());
|
TRY_STATUS(tokenizer_.check_endl());
|
||||||
return td::Status::OK();
|
return td::Status::OK();
|
||||||
|
|
|
@ -1230,6 +1230,47 @@ class SetStateSerializerEnabledQuery : public Query {
|
||||||
bool enabled_;
|
bool enabled_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class SetCollatorOptionsJsonQuery : public Query {
|
||||||
|
public:
|
||||||
|
SetCollatorOptionsJsonQuery(td::actor::ActorId<ValidatorEngineConsole> console, Tokenizer tokenizer)
|
||||||
|
: Query(console, std::move(tokenizer)) {
|
||||||
|
}
|
||||||
|
td::Status run() override;
|
||||||
|
td::Status send() override;
|
||||||
|
td::Status receive(td::BufferSlice data) override;
|
||||||
|
static std::string get_name() {
|
||||||
|
return "setcollatoroptionsjson";
|
||||||
|
}
|
||||||
|
static std::string get_help() {
|
||||||
|
return "setcollatoroptionsjson <filename>\tset collator options from file <filename>";
|
||||||
|
}
|
||||||
|
std::string name() const override {
|
||||||
|
return get_name();
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::string file_name_;
|
||||||
|
};
|
||||||
|
|
||||||
|
class ResetCollatorOptionsQuery : public Query {
|
||||||
|
public:
|
||||||
|
ResetCollatorOptionsQuery(td::actor::ActorId<ValidatorEngineConsole> console, Tokenizer tokenizer)
|
||||||
|
: Query(console, std::move(tokenizer)) {
|
||||||
|
}
|
||||||
|
td::Status run() override;
|
||||||
|
td::Status send() override;
|
||||||
|
td::Status receive(td::BufferSlice data) override;
|
||||||
|
static std::string get_name() {
|
||||||
|
return "resetcollatoroptions";
|
||||||
|
}
|
||||||
|
static std::string get_help() {
|
||||||
|
return "resetcollatoroptions\tset collator options to default values";
|
||||||
|
}
|
||||||
|
std::string name() const override {
|
||||||
|
return get_name();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
class GetValidatorSessionsInfoQuery : public Query {
|
class GetValidatorSessionsInfoQuery : public Query {
|
||||||
public:
|
public:
|
||||||
GetValidatorSessionsInfoQuery(td::actor::ActorId<ValidatorEngineConsole> console, Tokenizer tokenizer)
|
GetValidatorSessionsInfoQuery(td::actor::ActorId<ValidatorEngineConsole> console, Tokenizer tokenizer)
|
||||||
|
|
|
@ -147,6 +147,8 @@ void ValidatorEngineConsole::run() {
|
||||||
add_query_runner(std::make_unique<QueryRunnerImpl<DelCustomOverlayQuery>>());
|
add_query_runner(std::make_unique<QueryRunnerImpl<DelCustomOverlayQuery>>());
|
||||||
add_query_runner(std::make_unique<QueryRunnerImpl<ShowCustomOverlaysQuery>>());
|
add_query_runner(std::make_unique<QueryRunnerImpl<ShowCustomOverlaysQuery>>());
|
||||||
add_query_runner(std::make_unique<QueryRunnerImpl<SetStateSerializerEnabledQuery>>());
|
add_query_runner(std::make_unique<QueryRunnerImpl<SetStateSerializerEnabledQuery>>());
|
||||||
|
add_query_runner(std::make_unique<QueryRunnerImpl<SetCollatorOptionsJsonQuery>>());
|
||||||
|
add_query_runner(std::make_unique<QueryRunnerImpl<ResetCollatorOptionsQuery>>());
|
||||||
add_query_runner(std::make_unique<QueryRunnerImpl<GetValidatorSessionsInfoQuery>>());
|
add_query_runner(std::make_unique<QueryRunnerImpl<GetValidatorSessionsInfoQuery>>());
|
||||||
add_query_runner(std::make_unique<QueryRunnerImpl<AddCollatorQuery>>());
|
add_query_runner(std::make_unique<QueryRunnerImpl<AddCollatorQuery>>());
|
||||||
add_query_runner(std::make_unique<QueryRunnerImpl<AddShardQuery>>());
|
add_query_runner(std::make_unique<QueryRunnerImpl<AddShardQuery>>());
|
||||||
|
|
|
@ -1471,7 +1471,6 @@ td::Status ValidatorEngine::load_global_config() {
|
||||||
h.push_back(b);
|
h.push_back(b);
|
||||||
}
|
}
|
||||||
validator_options_.write().set_hardforks(std::move(h));
|
validator_options_.write().set_hardforks(std::move(h));
|
||||||
validator_options_.write().set_state_serializer_enabled(config_.state_serializer_enabled);
|
|
||||||
|
|
||||||
return td::Status::OK();
|
return td::Status::OK();
|
||||||
}
|
}
|
||||||
|
@ -1937,6 +1936,9 @@ void ValidatorEngine::started_overlays() {
|
||||||
|
|
||||||
void ValidatorEngine::start_validator() {
|
void ValidatorEngine::start_validator() {
|
||||||
validator_options_.write().set_allow_blockchain_init(config_.validators.size() > 0);
|
validator_options_.write().set_allow_blockchain_init(config_.validators.size() > 0);
|
||||||
|
validator_options_.write().set_state_serializer_enabled(config_.state_serializer_enabled);
|
||||||
|
load_collator_options();
|
||||||
|
|
||||||
validator_manager_ = ton::validator::ValidatorManagerFactory::create(
|
validator_manager_ = ton::validator::ValidatorManagerFactory::create(
|
||||||
validator_options_, db_root_, keyring_.get(), adnl_.get(), rldp_.get(), overlay_manager_.get());
|
validator_options_, db_root_, keyring_.get(), adnl_.get(), rldp_.get(), overlay_manager_.get());
|
||||||
|
|
||||||
|
@ -2547,6 +2549,69 @@ void ValidatorEngine::del_custom_overlay_from_config(std::string name, td::Promi
|
||||||
promise.set_error(td::Status::Error(PSTRING() << "no overlay \"" << name << "\" in config"));
|
promise.set_error(td::Status::Error(PSTRING() << "no overlay \"" << name << "\" in config"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static td::Result<td::Ref<ton::validator::CollatorOptions>> parse_collator_options(td::MutableSlice json_str) {
|
||||||
|
td::Ref<ton::validator::CollatorOptions> ref{true};
|
||||||
|
ton::validator::CollatorOptions& opts = ref.write();
|
||||||
|
|
||||||
|
// Set default values (from_json leaves missing fields as is)
|
||||||
|
ton::ton_api::engine_validator_collatorOptions f;
|
||||||
|
f.deferring_enabled_ = opts.deferring_enabled;
|
||||||
|
f.defer_out_queue_size_limit_ = opts.defer_out_queue_size_limit;
|
||||||
|
f.defer_messages_after_ = opts.defer_messages_after;
|
||||||
|
f.dispatch_phase_2_max_total_ = opts.dispatch_phase_2_max_total;
|
||||||
|
f.dispatch_phase_3_max_total_ = opts.dispatch_phase_3_max_total;
|
||||||
|
f.dispatch_phase_2_max_per_initiator_ = opts.dispatch_phase_2_max_per_initiator;
|
||||||
|
f.dispatch_phase_3_max_per_initiator_ =
|
||||||
|
opts.dispatch_phase_3_max_per_initiator ? opts.dispatch_phase_3_max_per_initiator.value() : -1;
|
||||||
|
|
||||||
|
TRY_RESULT_PREFIX(json, td::json_decode(json_str), "failed to parse json: ");
|
||||||
|
TRY_STATUS_PREFIX(ton::ton_api::from_json(f, json.get_object()), "json does not fit TL scheme: ");
|
||||||
|
|
||||||
|
if (f.defer_messages_after_ <= 0) {
|
||||||
|
return td::Status::Error("defer_messages_after should be positive");
|
||||||
|
}
|
||||||
|
if (f.defer_out_queue_size_limit_ < 0) {
|
||||||
|
return td::Status::Error("defer_out_queue_size_limit should be non-negative");
|
||||||
|
}
|
||||||
|
if (f.dispatch_phase_2_max_total_ < 0) {
|
||||||
|
return td::Status::Error("dispatch_phase_2_max_total should be non-negative");
|
||||||
|
}
|
||||||
|
if (f.dispatch_phase_3_max_total_ < 0) {
|
||||||
|
return td::Status::Error("dispatch_phase_3_max_total should be non-negative");
|
||||||
|
}
|
||||||
|
if (f.dispatch_phase_2_max_per_initiator_ < 0) {
|
||||||
|
return td::Status::Error("dispatch_phase_2_max_per_initiator should be non-negative");
|
||||||
|
}
|
||||||
|
|
||||||
|
opts.deferring_enabled = f.deferring_enabled_;
|
||||||
|
opts.defer_messages_after = f.defer_messages_after_;
|
||||||
|
opts.defer_out_queue_size_limit = f.defer_out_queue_size_limit_;
|
||||||
|
opts.dispatch_phase_2_max_total = f.dispatch_phase_2_max_total_;
|
||||||
|
opts.dispatch_phase_3_max_total = f.dispatch_phase_3_max_total_;
|
||||||
|
opts.dispatch_phase_2_max_per_initiator = f.dispatch_phase_2_max_per_initiator_;
|
||||||
|
if (f.dispatch_phase_3_max_per_initiator_ >= 0) {
|
||||||
|
opts.dispatch_phase_3_max_per_initiator = f.dispatch_phase_3_max_per_initiator_;
|
||||||
|
} else {
|
||||||
|
opts.dispatch_phase_3_max_per_initiator = {};
|
||||||
|
}
|
||||||
|
|
||||||
|
return ref;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ValidatorEngine::load_collator_options() {
|
||||||
|
auto r_data = td::read_file(collator_options_file());
|
||||||
|
if (r_data.is_error()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
td::BufferSlice data = r_data.move_as_ok();
|
||||||
|
auto r_collator_options = parse_collator_options(data.as_slice());
|
||||||
|
if (r_collator_options.is_error()) {
|
||||||
|
LOG(ERROR) << "Failed to read collator options from file: " << r_collator_options.move_as_error();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
validator_options_.write().set_collator_options(r_collator_options.move_as_ok());
|
||||||
|
}
|
||||||
|
|
||||||
void ValidatorEngine::check_key(ton::PublicKeyHash id, td::Promise<td::Unit> promise) {
|
void ValidatorEngine::check_key(ton::PublicKeyHash id, td::Promise<td::Unit> promise) {
|
||||||
if (keys_.count(id) == 1) {
|
if (keys_.count(id) == 1) {
|
||||||
promise.set_value(td::Unit());
|
promise.set_value(td::Unit());
|
||||||
|
@ -3817,6 +3882,33 @@ void ValidatorEngine::run_control_query(ton::ton_api::engine_validator_setStateS
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ValidatorEngine::run_control_query(ton::ton_api::engine_validator_setCollatorOptionsJson &query,
|
||||||
|
td::BufferSlice data, ton::PublicKeyHash src, td::uint32 perm,
|
||||||
|
td::Promise<td::BufferSlice> promise) {
|
||||||
|
if (!(perm & ValidatorEnginePermissions::vep_modify)) {
|
||||||
|
promise.set_value(create_control_query_error(td::Status::Error(ton::ErrorCode::error, "not authorized")));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (!started_) {
|
||||||
|
promise.set_value(create_control_query_error(td::Status::Error(ton::ErrorCode::notready, "not started")));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
auto r_collator_options = parse_collator_options(query.json_);
|
||||||
|
if (r_collator_options.is_error()) {
|
||||||
|
promise.set_value(create_control_query_error(r_collator_options.move_as_error_prefix("failed to parse json: ")));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
auto S = td::write_file(collator_options_file(), query.json_);
|
||||||
|
if (S.is_error()) {
|
||||||
|
promise.set_value(create_control_query_error(r_collator_options.move_as_error_prefix("failed to write file: ")));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
validator_options_.write().set_collator_options(r_collator_options.move_as_ok());
|
||||||
|
td::actor::send_closure(validator_manager_, &ton::validator::ValidatorManagerInterface::update_options,
|
||||||
|
validator_options_);
|
||||||
|
promise.set_value(ton::create_serialize_tl_object<ton::ton_api::engine_validator_success>());
|
||||||
|
}
|
||||||
|
|
||||||
void ValidatorEngine::run_control_query(ton::ton_api::engine_validator_setCollatorsList &query, td::BufferSlice data,
|
void ValidatorEngine::run_control_query(ton::ton_api::engine_validator_setCollatorsList &query, td::BufferSlice data,
|
||||||
ton::PublicKeyHash src, td::uint32 perm, td::Promise<td::BufferSlice> promise) {
|
ton::PublicKeyHash src, td::uint32 perm, td::Promise<td::BufferSlice> promise) {
|
||||||
if (!(perm & ValidatorEnginePermissions::vep_modify)) {
|
if (!(perm & ValidatorEnginePermissions::vep_modify)) {
|
||||||
|
|
|
@ -408,6 +408,9 @@ class ValidatorEngine : public td::actor::Actor {
|
||||||
std::string custom_overlays_config_file() const {
|
std::string custom_overlays_config_file() const {
|
||||||
return db_root_ + "/custom-overlays.json";
|
return db_root_ + "/custom-overlays.json";
|
||||||
}
|
}
|
||||||
|
std::string collator_options_file() const {
|
||||||
|
return db_root_ + "/collator-options.json";
|
||||||
|
}
|
||||||
std::string collators_list_file() const {
|
std::string collators_list_file() const {
|
||||||
return db_root_ + "/collators-list.json";
|
return db_root_ + "/collators-list.json";
|
||||||
}
|
}
|
||||||
|
@ -417,6 +420,7 @@ class ValidatorEngine : public td::actor::Actor {
|
||||||
void add_custom_overlay_to_config(
|
void add_custom_overlay_to_config(
|
||||||
ton::tl_object_ptr<ton::ton_api::engine_validator_customOverlay> overlay, td::Promise<td::Unit> promise);
|
ton::tl_object_ptr<ton::ton_api::engine_validator_customOverlay> overlay, td::Promise<td::Unit> promise);
|
||||||
void del_custom_overlay_from_config(std::string name, td::Promise<td::Unit> promise);
|
void del_custom_overlay_from_config(std::string name, td::Promise<td::Unit> promise);
|
||||||
|
void load_collator_options();
|
||||||
|
|
||||||
void check_key(ton::PublicKeyHash id, td::Promise<td::Unit> promise);
|
void check_key(ton::PublicKeyHash id, td::Promise<td::Unit> promise);
|
||||||
|
|
||||||
|
@ -514,6 +518,8 @@ class ValidatorEngine : public td::actor::Actor {
|
||||||
ton::PublicKeyHash src, td::uint32 perm, td::Promise<td::BufferSlice> promise);
|
ton::PublicKeyHash src, td::uint32 perm, td::Promise<td::BufferSlice> promise);
|
||||||
void run_control_query(ton::ton_api::engine_validator_setStateSerializerEnabled &query, td::BufferSlice data,
|
void run_control_query(ton::ton_api::engine_validator_setStateSerializerEnabled &query, td::BufferSlice data,
|
||||||
ton::PublicKeyHash src, td::uint32 perm, td::Promise<td::BufferSlice> promise);
|
ton::PublicKeyHash src, td::uint32 perm, td::Promise<td::BufferSlice> promise);
|
||||||
|
void run_control_query(ton::ton_api::engine_validator_setCollatorOptionsJson &query, td::BufferSlice data,
|
||||||
|
ton::PublicKeyHash src, td::uint32 perm, td::Promise<td::BufferSlice> promise);
|
||||||
void run_control_query(ton::ton_api::engine_validator_setCollatorsList &query, td::BufferSlice data,
|
void run_control_query(ton::ton_api::engine_validator_setCollatorsList &query, td::BufferSlice data,
|
||||||
ton::PublicKeyHash src, td::uint32 perm, td::Promise<td::BufferSlice> promise);
|
ton::PublicKeyHash src, td::uint32 perm, td::Promise<td::BufferSlice> promise);
|
||||||
void run_control_query(ton::ton_api::engine_validator_showCollatorsList &query, td::BufferSlice data,
|
void run_control_query(ton::ton_api::engine_validator_showCollatorsList &query, td::BufferSlice data,
|
||||||
|
|
|
@ -26,9 +26,14 @@
|
||||||
|
|
||||||
namespace ton::validator {
|
namespace ton::validator {
|
||||||
|
|
||||||
CollatorNode::CollatorNode(adnl::AdnlNodeIdShort local_id, td::actor::ActorId<ValidatorManager> manager,
|
CollatorNode::CollatorNode(adnl::AdnlNodeIdShort local_id, td::Ref<ValidatorManagerOptions> opts,
|
||||||
td::actor::ActorId<adnl::Adnl> adnl, td::actor::ActorId<rldp::Rldp> rldp)
|
td::actor::ActorId<ValidatorManager> manager, td::actor::ActorId<adnl::Adnl> adnl,
|
||||||
: local_id_(local_id), manager_(std::move(manager)), adnl_(std::move(adnl)), rldp_(std::move(rldp)) {
|
td::actor::ActorId<rldp::Rldp> rldp)
|
||||||
|
: local_id_(local_id)
|
||||||
|
, opts_(std::move(opts))
|
||||||
|
, manager_(std::move(manager))
|
||||||
|
, adnl_(std::move(adnl))
|
||||||
|
, rldp_(std::move(rldp)) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void CollatorNode::start_up() {
|
void CollatorNode::start_up() {
|
||||||
|
@ -383,7 +388,7 @@ void CollatorNode::generate_block(ShardIdFull shard, CatchainSeqno cc_seqno, std
|
||||||
cache_entry->block_seqno = block_seqno;
|
cache_entry->block_seqno = block_seqno;
|
||||||
run_collate_query(
|
run_collate_query(
|
||||||
shard, last_masterchain_state_->get_block_id(), std::move(prev_blocks), Ed25519_PublicKey{td::Bits256::zero()},
|
shard, last_masterchain_state_->get_block_id(), std::move(prev_blocks), Ed25519_PublicKey{td::Bits256::zero()},
|
||||||
last_masterchain_state_->get_validator_set(shard), manager_, timeout,
|
last_masterchain_state_->get_validator_set(shard), opts_->get_collator_options(), manager_, timeout,
|
||||||
[=, SelfId = actor_id(this), timer = td::Timer{}](td::Result<BlockCandidate> R) {
|
[=, SelfId = actor_id(this), timer = td::Timer{}](td::Result<BlockCandidate> R) {
|
||||||
LOG(INFO) << "generate block result"
|
LOG(INFO) << "generate block result"
|
||||||
<< ": shard=" << shard.to_str() << ", cc_seqno=" << cc_seqno << ", next_block_seqno=" << block_seqno
|
<< ": shard=" << shard.to_str() << ", cc_seqno=" << cc_seqno << ", next_block_seqno=" << block_seqno
|
||||||
|
|
|
@ -26,8 +26,9 @@ class ValidatorManager;
|
||||||
|
|
||||||
class CollatorNode : public td::actor::Actor {
|
class CollatorNode : public td::actor::Actor {
|
||||||
public:
|
public:
|
||||||
CollatorNode(adnl::AdnlNodeIdShort local_id, td::actor::ActorId<ValidatorManager> manager,
|
CollatorNode(adnl::AdnlNodeIdShort local_id, td::Ref<ValidatorManagerOptions> opts,
|
||||||
td::actor::ActorId<adnl::Adnl> adnl, td::actor::ActorId<rldp::Rldp> rldp);
|
td::actor::ActorId<ValidatorManager> manager, td::actor::ActorId<adnl::Adnl> adnl,
|
||||||
|
td::actor::ActorId<rldp::Rldp> rldp);
|
||||||
void start_up() override;
|
void start_up() override;
|
||||||
void tear_down() override;
|
void tear_down() override;
|
||||||
void add_shard(ShardIdFull shard);
|
void add_shard(ShardIdFull shard);
|
||||||
|
@ -36,12 +37,17 @@ class CollatorNode : public td::actor::Actor {
|
||||||
void new_masterchain_block_notification(td::Ref<MasterchainState> state);
|
void new_masterchain_block_notification(td::Ref<MasterchainState> state);
|
||||||
void update_validator_group_info(ShardIdFull shard, std::vector<BlockIdExt> prev, CatchainSeqno cc_seqno);
|
void update_validator_group_info(ShardIdFull shard, std::vector<BlockIdExt> prev, CatchainSeqno cc_seqno);
|
||||||
|
|
||||||
|
void update_options(td::Ref<ValidatorManagerOptions> opts) {
|
||||||
|
opts_ = std::move(opts);
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void receive_query(adnl::AdnlNodeIdShort src, td::BufferSlice data, td::Promise<td::BufferSlice> promise);
|
void receive_query(adnl::AdnlNodeIdShort src, td::BufferSlice data, td::Promise<td::BufferSlice> promise);
|
||||||
|
|
||||||
bool can_collate_shard(ShardIdFull shard) const;
|
bool can_collate_shard(ShardIdFull shard) const;
|
||||||
|
|
||||||
adnl::AdnlNodeIdShort local_id_;
|
adnl::AdnlNodeIdShort local_id_;
|
||||||
|
td::Ref<ValidatorManagerOptions> opts_;
|
||||||
td::actor::ActorId<ValidatorManager> manager_;
|
td::actor::ActorId<ValidatorManager> manager_;
|
||||||
td::actor::ActorId<adnl::Adnl> adnl_;
|
td::actor::ActorId<adnl::Adnl> adnl_;
|
||||||
td::actor::ActorId<rldp::Rldp> rldp_;
|
td::actor::ActorId<rldp::Rldp> rldp_;
|
||||||
|
|
|
@ -87,9 +87,9 @@ void run_validate_query(ShardIdFull shard, BlockIdExt min_masterchain_block_id,
|
||||||
td::Promise<ValidateCandidateResult> promise, unsigned mode = 0);
|
td::Promise<ValidateCandidateResult> promise, unsigned mode = 0);
|
||||||
void run_collate_query(ShardIdFull shard, const BlockIdExt& min_masterchain_block_id, std::vector<BlockIdExt> prev,
|
void run_collate_query(ShardIdFull shard, const BlockIdExt& min_masterchain_block_id, std::vector<BlockIdExt> prev,
|
||||||
Ed25519_PublicKey creator, td::Ref<ValidatorSet> validator_set,
|
Ed25519_PublicKey creator, td::Ref<ValidatorSet> validator_set,
|
||||||
td::actor::ActorId<ValidatorManager> manager, td::Timestamp timeout,
|
td::Ref<CollatorOptions> collator_opts, td::actor::ActorId<ValidatorManager> manager,
|
||||||
td::Promise<BlockCandidate> promise, td::CancellationToken cancellation_token = {},
|
td::Timestamp timeout, td::Promise<BlockCandidate> promise,
|
||||||
unsigned mode = 0);
|
td::CancellationToken cancellation_token = {}, unsigned mode = 0);
|
||||||
void run_collate_hardfork(ShardIdFull shard, const BlockIdExt& min_masterchain_block_id, std::vector<BlockIdExt> prev,
|
void run_collate_hardfork(ShardIdFull shard, const BlockIdExt& min_masterchain_block_id, std::vector<BlockIdExt> prev,
|
||||||
td::actor::ActorId<ValidatorManager> manager, td::Timestamp timeout,
|
td::actor::ActorId<ValidatorManager> manager, td::Timestamp timeout,
|
||||||
td::Promise<BlockCandidate> promise);
|
td::Promise<BlockCandidate> promise);
|
||||||
|
|
|
@ -70,6 +70,7 @@ class Collator final : public td::actor::Actor {
|
||||||
std::vector<Ref<ShardState>> prev_states;
|
std::vector<Ref<ShardState>> prev_states;
|
||||||
std::vector<Ref<BlockData>> prev_block_data;
|
std::vector<Ref<BlockData>> prev_block_data;
|
||||||
Ed25519_PublicKey created_by_;
|
Ed25519_PublicKey created_by_;
|
||||||
|
Ref<CollatorOptions> collator_opts_;
|
||||||
Ref<ValidatorSet> validator_set_;
|
Ref<ValidatorSet> validator_set_;
|
||||||
td::actor::ActorId<ValidatorManager> manager;
|
td::actor::ActorId<ValidatorManager> manager;
|
||||||
td::Timestamp timeout;
|
td::Timestamp timeout;
|
||||||
|
@ -89,9 +90,9 @@ class Collator final : public td::actor::Actor {
|
||||||
|
|
||||||
public:
|
public:
|
||||||
Collator(ShardIdFull shard, bool is_hardfork, BlockIdExt min_masterchain_block_id, std::vector<BlockIdExt> prev,
|
Collator(ShardIdFull shard, bool is_hardfork, BlockIdExt min_masterchain_block_id, std::vector<BlockIdExt> prev,
|
||||||
Ref<ValidatorSet> validator_set, Ed25519_PublicKey collator_id, td::actor::ActorId<ValidatorManager> manager,
|
Ref<ValidatorSet> validator_set, Ed25519_PublicKey collator_id, Ref<CollatorOptions> collator_opts,
|
||||||
td::Timestamp timeout, td::Promise<BlockCandidate> promise, td::CancellationToken cancellation_token,
|
td::actor::ActorId<ValidatorManager> manager, td::Timestamp timeout, td::Promise<BlockCandidate> promise,
|
||||||
unsigned mode);
|
td::CancellationToken cancellation_token, unsigned mode);
|
||||||
~Collator() override = default;
|
~Collator() override = default;
|
||||||
bool is_busy() const {
|
bool is_busy() const {
|
||||||
return busy_;
|
return busy_;
|
||||||
|
@ -199,6 +200,7 @@ class Collator final : public td::actor::Actor {
|
||||||
sibling_out_msg_queue_;
|
sibling_out_msg_queue_;
|
||||||
std::map<StdSmcAddress, size_t> unprocessed_deferred_messages_; // number of messages from dispatch queue in new_msgs
|
std::map<StdSmcAddress, size_t> unprocessed_deferred_messages_; // number of messages from dispatch queue in new_msgs
|
||||||
td::uint64 out_msg_queue_size_ = 0;
|
td::uint64 out_msg_queue_size_ = 0;
|
||||||
|
td::uint64 old_out_msg_queue_size_ = 0;
|
||||||
bool have_out_msg_queue_size_in_state_ = false;
|
bool have_out_msg_queue_size_in_state_ = false;
|
||||||
std::unique_ptr<vm::Dictionary> ihr_pending;
|
std::unique_ptr<vm::Dictionary> ihr_pending;
|
||||||
std::shared_ptr<block::MsgProcessedUptoCollection> processed_upto_, sibling_processed_upto_;
|
std::shared_ptr<block::MsgProcessedUptoCollection> processed_upto_, sibling_processed_upto_;
|
||||||
|
@ -219,6 +221,8 @@ class Collator final : public td::actor::Actor {
|
||||||
unsigned dispatch_queue_ops_{0};
|
unsigned dispatch_queue_ops_{0};
|
||||||
std::map<StdSmcAddress, LogicalTime> last_dispatch_queue_emitted_lt_;
|
std::map<StdSmcAddress, LogicalTime> last_dispatch_queue_emitted_lt_;
|
||||||
bool have_unprocessed_account_dispatch_queue_ = true;
|
bool have_unprocessed_account_dispatch_queue_ = true;
|
||||||
|
td::uint64 defer_out_queue_size_limit_;
|
||||||
|
td::uint64 hard_defer_out_queue_size_limit_;
|
||||||
|
|
||||||
bool msg_metadata_enabled_ = false;
|
bool msg_metadata_enabled_ = false;
|
||||||
bool deferring_messages_enabled_ = false;
|
bool deferring_messages_enabled_ = false;
|
||||||
|
|
|
@ -50,7 +50,6 @@ static const td::uint32 SPLIT_MAX_QUEUE_SIZE = 100000;
|
||||||
static const td::uint32 MERGE_MAX_QUEUE_SIZE = 2047;
|
static const td::uint32 MERGE_MAX_QUEUE_SIZE = 2047;
|
||||||
static const td::uint32 SKIP_EXTERNALS_QUEUE_SIZE = 8000;
|
static const td::uint32 SKIP_EXTERNALS_QUEUE_SIZE = 8000;
|
||||||
static const int HIGH_PRIORITY_EXTERNAL = 10; // don't skip high priority externals when queue is big
|
static const int HIGH_PRIORITY_EXTERNAL = 10; // don't skip high priority externals when queue is big
|
||||||
static const int DEFER_MESSAGES_AFTER = 10; // 10'th and later messages from address will be deferred
|
|
||||||
|
|
||||||
#define DBG(__n) dbg(__n)&&
|
#define DBG(__n) dbg(__n)&&
|
||||||
#define DSTART int __dcnt = 0;
|
#define DSTART int __dcnt = 0;
|
||||||
|
@ -71,19 +70,22 @@ static inline bool dbg(int c) {
|
||||||
* @param prev A vector of BlockIdExt representing the previous blocks.
|
* @param prev A vector of BlockIdExt representing the previous blocks.
|
||||||
* @param validator_set A reference to the ValidatorSet.
|
* @param validator_set A reference to the ValidatorSet.
|
||||||
* @param collator_id The public key of the block creator.
|
* @param collator_id The public key of the block creator.
|
||||||
|
* @param collator_opts A reference to CollatorOptions.
|
||||||
* @param manager The ActorId of the ValidatorManager.
|
* @param manager The ActorId of the ValidatorManager.
|
||||||
* @param timeout The timeout for the collator.
|
* @param timeout The timeout for the collator.
|
||||||
* @param promise The promise to return the result.
|
* @param promise The promise to return the result.
|
||||||
*/
|
*/
|
||||||
Collator::Collator(ShardIdFull shard, bool is_hardfork, BlockIdExt min_masterchain_block_id,
|
Collator::Collator(ShardIdFull shard, bool is_hardfork, BlockIdExt min_masterchain_block_id,
|
||||||
std::vector<BlockIdExt> prev, td::Ref<ValidatorSet> validator_set, Ed25519_PublicKey collator_id,
|
std::vector<BlockIdExt> prev, td::Ref<ValidatorSet> validator_set, Ed25519_PublicKey collator_id,
|
||||||
td::actor::ActorId<ValidatorManager> manager, td::Timestamp timeout,
|
Ref<CollatorOptions> collator_opts, td::actor::ActorId<ValidatorManager> manager,
|
||||||
td::Promise<BlockCandidate> promise, td::CancellationToken cancellation_token, unsigned mode)
|
td::Timestamp timeout, td::Promise<BlockCandidate> promise, td::CancellationToken cancellation_token,
|
||||||
|
unsigned mode)
|
||||||
: shard_(shard)
|
: shard_(shard)
|
||||||
, is_hardfork_(is_hardfork)
|
, is_hardfork_(is_hardfork)
|
||||||
, min_mc_block_id{min_masterchain_block_id}
|
, min_mc_block_id{min_masterchain_block_id}
|
||||||
, prev_blocks(std::move(prev))
|
, prev_blocks(std::move(prev))
|
||||||
, created_by_(collator_id)
|
, created_by_(collator_id)
|
||||||
|
, collator_opts_(collator_opts)
|
||||||
, validator_set_(std::move(validator_set))
|
, validator_set_(std::move(validator_set))
|
||||||
, manager(manager)
|
, manager(manager)
|
||||||
, timeout(timeout)
|
, timeout(timeout)
|
||||||
|
@ -1868,6 +1870,7 @@ bool Collator::try_collate() {
|
||||||
last_proc_int_msg_.second.set_zero();
|
last_proc_int_msg_.second.set_zero();
|
||||||
first_unproc_int_msg_.first = ~0ULL;
|
first_unproc_int_msg_.first = ~0ULL;
|
||||||
first_unproc_int_msg_.second.set_ones();
|
first_unproc_int_msg_.second.set_ones();
|
||||||
|
old_out_msg_queue_size_ = out_msg_queue_size_;
|
||||||
if (is_masterchain()) {
|
if (is_masterchain()) {
|
||||||
LOG(DEBUG) << "getting the list of special smart contracts";
|
LOG(DEBUG) << "getting the list of special smart contracts";
|
||||||
auto res = config_->get_special_smartcontracts();
|
auto res = config_->get_special_smartcontracts();
|
||||||
|
@ -2052,6 +2055,10 @@ bool Collator::fetch_config_params() {
|
||||||
return fatal_error(res.move_as_error());
|
return fatal_error(res.move_as_error());
|
||||||
}
|
}
|
||||||
compute_phase_cfg_.libraries = std::make_unique<vm::Dictionary>(config_->get_libraries_root(), 256);
|
compute_phase_cfg_.libraries = std::make_unique<vm::Dictionary>(config_->get_libraries_root(), 256);
|
||||||
|
defer_out_queue_size_limit_ = std::max<td::uint64>(collator_opts_->defer_out_queue_size_limit,
|
||||||
|
compute_phase_cfg_.size_limits.defer_out_queue_size_limit);
|
||||||
|
// This one is checked in validate-query
|
||||||
|
hard_defer_out_queue_size_limit_ = compute_phase_cfg_.size_limits.defer_out_queue_size_limit;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3176,8 +3183,10 @@ int Collator::process_one_new_message(block::NewOutMsg msg, bool enqueue_only, R
|
||||||
bool is_special_account = is_masterchain() && config_->is_special_smartcontract(src_addr);
|
bool is_special_account = is_masterchain() && config_->is_special_smartcontract(src_addr);
|
||||||
bool defer = false;
|
bool defer = false;
|
||||||
if (!from_dispatch_queue) {
|
if (!from_dispatch_queue) {
|
||||||
if (deferring_messages_enabled_ && !is_special && !is_special_account && msg.msg_idx != 0) {
|
if (deferring_messages_enabled_ && collator_opts_->deferring_enabled && !is_special && !is_special_account &&
|
||||||
if (++sender_generated_messages_count_[src_addr] >= DEFER_MESSAGES_AFTER) {
|
msg.msg_idx != 0) {
|
||||||
|
if (++sender_generated_messages_count_[src_addr] >= collator_opts_->defer_messages_after ||
|
||||||
|
out_msg_queue_size_ > defer_out_queue_size_limit_) {
|
||||||
defer = true;
|
defer = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3776,18 +3785,24 @@ int Collator::process_external_message(Ref<vm::Cell> msg) {
|
||||||
* @returns True if the processing was successful, false otherwise.
|
* @returns True if the processing was successful, false otherwise.
|
||||||
*/
|
*/
|
||||||
bool Collator::process_dispatch_queue() {
|
bool Collator::process_dispatch_queue() {
|
||||||
|
if (out_msg_queue_size_ > defer_out_queue_size_limit_ && old_out_msg_queue_size_ > hard_defer_out_queue_size_limit_) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
have_unprocessed_account_dispatch_queue_ = true;
|
have_unprocessed_account_dispatch_queue_ = true;
|
||||||
size_t max_total_count[3] = {1 << 30, 150, 150};
|
size_t max_total_count[3] = {1 << 30, collator_opts_->dispatch_phase_2_max_total,
|
||||||
size_t max_per_initiator[3] = {1 << 30, 20, 0};
|
collator_opts_->dispatch_phase_3_max_total};
|
||||||
if (out_msg_queue_size_ <= 256) {
|
size_t max_per_initiator[3] = {1 << 30, collator_opts_->dispatch_phase_2_max_per_initiator, 0};
|
||||||
|
if (collator_opts_->dispatch_phase_3_max_per_initiator) {
|
||||||
|
max_per_initiator[2] = collator_opts_->dispatch_phase_3_max_per_initiator.value();
|
||||||
|
} else if (out_msg_queue_size_ <= 256) {
|
||||||
max_per_initiator[2] = 10;
|
max_per_initiator[2] = 10;
|
||||||
} else if (out_msg_queue_size_ <= 512) {
|
} else if (out_msg_queue_size_ <= 512) {
|
||||||
max_per_initiator[2] = 2;
|
max_per_initiator[2] = 2;
|
||||||
} else if (out_msg_queue_size_ <= 2048) {
|
} else if (out_msg_queue_size_ <= 1500) {
|
||||||
max_per_initiator[2] = 1;
|
max_per_initiator[2] = 1;
|
||||||
}
|
}
|
||||||
for (int iter = 0; iter < 3; ++iter) {
|
for (int iter = 0; iter < 3; ++iter) {
|
||||||
if (max_per_initiator[iter] == 0) {
|
if (max_per_initiator[iter] == 0 || max_total_count[iter] == 0) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
vm::AugmentedDictionary cur_dispatch_queue{dispatch_queue_->get_root(), 256, block::tlb::aug_DispatchQueue};
|
vm::AugmentedDictionary cur_dispatch_queue{dispatch_queue_->get_root(), 256, block::tlb::aug_DispatchQueue};
|
||||||
|
@ -3826,7 +3841,8 @@ bool Collator::process_dispatch_queue() {
|
||||||
|
|
||||||
// Remove message from DispatchQueue
|
// Remove message from DispatchQueue
|
||||||
bool ok;
|
bool ok;
|
||||||
if (iter == 0 || (iter == 1 && sender_generated_messages_count_[src_addr] >= DEFER_MESSAGES_AFTER)) {
|
if (iter == 0 ||
|
||||||
|
(iter == 1 && sender_generated_messages_count_[src_addr] >= collator_opts_->defer_messages_after)) {
|
||||||
ok = cur_dispatch_queue.lookup_delete(src_addr).not_null();
|
ok = cur_dispatch_queue.lookup_delete(src_addr).not_null();
|
||||||
} else {
|
} else {
|
||||||
dict.lookup_delete(key);
|
dict.lookup_delete(key);
|
||||||
|
|
|
@ -214,8 +214,9 @@ void run_validate_query(ShardIdFull shard, BlockIdExt min_masterchain_block_id,
|
||||||
|
|
||||||
void run_collate_query(ShardIdFull shard, const BlockIdExt& min_masterchain_block_id, std::vector<BlockIdExt> prev,
|
void run_collate_query(ShardIdFull shard, const BlockIdExt& min_masterchain_block_id, std::vector<BlockIdExt> prev,
|
||||||
Ed25519_PublicKey creator, td::Ref<ValidatorSet> validator_set,
|
Ed25519_PublicKey creator, td::Ref<ValidatorSet> validator_set,
|
||||||
td::actor::ActorId<ValidatorManager> manager, td::Timestamp timeout,
|
td::Ref<CollatorOptions> collator_opts, td::actor::ActorId<ValidatorManager> manager,
|
||||||
td::Promise<BlockCandidate> promise, td::CancellationToken cancellation_token, unsigned mode) {
|
td::Timestamp timeout, td::Promise<BlockCandidate> promise,
|
||||||
|
td::CancellationToken cancellation_token, unsigned mode) {
|
||||||
BlockSeqno seqno = 0;
|
BlockSeqno seqno = 0;
|
||||||
for (auto& p : prev) {
|
for (auto& p : prev) {
|
||||||
if (p.seqno() > seqno) {
|
if (p.seqno() > seqno) {
|
||||||
|
@ -224,8 +225,8 @@ void run_collate_query(ShardIdFull shard, const BlockIdExt& min_masterchain_bloc
|
||||||
}
|
}
|
||||||
td::actor::create_actor<Collator>(PSTRING() << "collate" << shard.to_str() << ":" << (seqno + 1), shard, false,
|
td::actor::create_actor<Collator>(PSTRING() << "collate" << shard.to_str() << ":" << (seqno + 1), shard, false,
|
||||||
min_masterchain_block_id, std::move(prev), std::move(validator_set), creator,
|
min_masterchain_block_id, std::move(prev), std::move(validator_set), creator,
|
||||||
std::move(manager), timeout, std::move(promise), std::move(cancellation_token),
|
std::move(collator_opts), std::move(manager), timeout, std::move(promise),
|
||||||
mode)
|
std::move(cancellation_token), mode)
|
||||||
.release();
|
.release();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -240,8 +241,8 @@ void run_collate_hardfork(ShardIdFull shard, const BlockIdExt& min_masterchain_b
|
||||||
}
|
}
|
||||||
td::actor::create_actor<Collator>(PSTRING() << "collate" << shard.to_str() << ":" << (seqno + 1), shard, true,
|
td::actor::create_actor<Collator>(PSTRING() << "collate" << shard.to_str() << ":" << (seqno + 1), shard, true,
|
||||||
min_masterchain_block_id, std::move(prev), td::Ref<ValidatorSet>{},
|
min_masterchain_block_id, std::move(prev), td::Ref<ValidatorSet>{},
|
||||||
Ed25519_PublicKey{Bits256::zero()}, std::move(manager), timeout, std::move(promise),
|
Ed25519_PublicKey{Bits256::zero()}, td::Ref<CollatorOptions>{true},
|
||||||
td::CancellationToken{}, 0)
|
std::move(manager), timeout, std::move(promise), td::CancellationToken{}, 0)
|
||||||
.release();
|
.release();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1004,6 +1004,7 @@ bool ValidateQuery::fetch_config_params() {
|
||||||
action_phase_cfg_.action_fine_enabled = config_->get_global_version() >= 4;
|
action_phase_cfg_.action_fine_enabled = config_->get_global_version() >= 4;
|
||||||
action_phase_cfg_.bounce_on_fail_enabled = config_->get_global_version() >= 4;
|
action_phase_cfg_.bounce_on_fail_enabled = config_->get_global_version() >= 4;
|
||||||
action_phase_cfg_.message_skip_enabled = config_->get_global_version() >= 8;
|
action_phase_cfg_.message_skip_enabled = config_->get_global_version() >= 8;
|
||||||
|
action_phase_cfg_.disable_custom_fess = config_->get_global_version() >= 8;
|
||||||
action_phase_cfg_.mc_blackhole_addr = config_->get_burning_config().blackhole_addr;
|
action_phase_cfg_.mc_blackhole_addr = config_->get_burning_config().blackhole_addr;
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
|
@ -2298,6 +2299,7 @@ bool ValidateQuery::prepare_out_msg_queue_size() {
|
||||||
// if after_split then out_msg_queue_size is always present, since it is calculated during split
|
// if after_split then out_msg_queue_size is always present, since it is calculated during split
|
||||||
old_out_msg_queue_size_ = ps_.out_msg_queue_size_.value();
|
old_out_msg_queue_size_ = ps_.out_msg_queue_size_.value();
|
||||||
out_msg_queue_size_known_ = true;
|
out_msg_queue_size_known_ = true;
|
||||||
|
have_out_msg_queue_size_in_state_ = true;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (!store_out_msg_queue_size_) { // Don't need it
|
if (!store_out_msg_queue_size_) { // Don't need it
|
||||||
|
@ -3499,7 +3501,7 @@ bool ValidateQuery::check_account_dispatch_queue_update(td::Bits256 addr, Ref<vm
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (old_dict_size > 0 && max_removed_lt == 0) {
|
if (old_dict_size > 0 && max_removed_lt == 0) {
|
||||||
have_unprocessed_account_dispatch_queue_ = true;
|
++processed_account_dispatch_queues_;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -3526,6 +3528,28 @@ bool ValidateQuery::unpack_dispatch_queue_update() {
|
||||||
if (!res) {
|
if (!res) {
|
||||||
return reject_query("invalid DispatchQueue dictionary in the new state");
|
return reject_query("invalid DispatchQueue dictionary in the new state");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (have_out_msg_queue_size_in_state_ &&
|
||||||
|
old_out_msg_queue_size_ <= compute_phase_cfg_.size_limits.defer_out_queue_size_limit) {
|
||||||
|
// Check that at least one message was taken from each AccountDispatchQueue
|
||||||
|
try {
|
||||||
|
have_unprocessed_account_dispatch_queue_ = false;
|
||||||
|
td::uint64 total_account_dispatch_queues = 0;
|
||||||
|
ps_.dispatch_queue_->check_for_each([&](Ref<vm::CellSlice>, td::ConstBitPtr, int n) -> bool {
|
||||||
|
assert(n == 352);
|
||||||
|
++total_account_dispatch_queues;
|
||||||
|
if (total_account_dispatch_queues > processed_account_dispatch_queues_) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
});
|
||||||
|
have_unprocessed_account_dispatch_queue_ =
|
||||||
|
(total_account_dispatch_queues != processed_account_dispatch_queues_);
|
||||||
|
} catch (vm::VmVirtError&) {
|
||||||
|
// VmVirtError can happen if we have only a proof of ShardState
|
||||||
|
have_unprocessed_account_dispatch_queue_ = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
} catch (vm::VmError& err) {
|
} catch (vm::VmError& err) {
|
||||||
return reject_query("invalid DispatchQueue dictionary difference between the old and the new state: "s +
|
return reject_query("invalid DispatchQueue dictionary difference between the old and the new state: "s +
|
||||||
err.get_msg());
|
err.get_msg());
|
||||||
|
@ -3788,7 +3812,8 @@ bool ValidateQuery::check_in_msg(td::ConstBitPtr key, Ref<vm::CellSlice> in_msg)
|
||||||
}
|
}
|
||||||
if (have_unprocessed_account_dispatch_queue_ && tag != block::gen::InMsg::msg_import_ext &&
|
if (have_unprocessed_account_dispatch_queue_ && tag != block::gen::InMsg::msg_import_ext &&
|
||||||
tag != block::gen::InMsg::msg_import_deferred_tr && tag != block::gen::InMsg::msg_import_deferred_fin) {
|
tag != block::gen::InMsg::msg_import_deferred_tr && tag != block::gen::InMsg::msg_import_deferred_fin) {
|
||||||
// Collator is requeired to take at least one message from each AccountDispatchQueue (unless the block is full)
|
// Collator is requeired to take at least one message from each AccountDispatchQueue
|
||||||
|
// (unless the block is full or unless out_msg_queue_size is big)
|
||||||
// If some AccountDispatchQueue is unporcessed then it's not allowed to import other messages except for externals
|
// If some AccountDispatchQueue is unporcessed then it's not allowed to import other messages except for externals
|
||||||
return reject_query("required DispatchQueue processing is not done, but some other internal messages are imported");
|
return reject_query("required DispatchQueue processing is not done, but some other internal messages are imported");
|
||||||
}
|
}
|
||||||
|
|
|
@ -237,11 +237,13 @@ class ValidateQuery : public td::actor::Actor {
|
||||||
std::set<StdSmcAddress> account_expected_defer_all_messages_;
|
std::set<StdSmcAddress> account_expected_defer_all_messages_;
|
||||||
td::uint64 old_out_msg_queue_size_ = 0, new_out_msg_queue_size_ = 0;
|
td::uint64 old_out_msg_queue_size_ = 0, new_out_msg_queue_size_ = 0;
|
||||||
bool out_msg_queue_size_known_ = false;
|
bool out_msg_queue_size_known_ = false;
|
||||||
|
bool have_out_msg_queue_size_in_state_ = false;
|
||||||
|
|
||||||
bool msg_metadata_enabled_ = false;
|
bool msg_metadata_enabled_ = false;
|
||||||
bool deferring_messages_enabled_ = false;
|
bool deferring_messages_enabled_ = false;
|
||||||
bool store_out_msg_queue_size_ = false;
|
bool store_out_msg_queue_size_ = false;
|
||||||
|
|
||||||
|
td::uint64 processed_account_dispatch_queues_ = 0;
|
||||||
bool have_unprocessed_account_dispatch_queue_ = false;
|
bool have_unprocessed_account_dispatch_queue_ = false;
|
||||||
|
|
||||||
td::PerfWarningTimer perf_timer_;
|
td::PerfWarningTimer perf_timer_;
|
||||||
|
|
|
@ -128,8 +128,8 @@ void ValidatorManagerImpl::sync_complete(td::Promise<td::Unit> promise) {
|
||||||
}
|
}
|
||||||
Ed25519_PublicKey created_by{td::Bits256::zero()};
|
Ed25519_PublicKey created_by{td::Bits256::zero()};
|
||||||
td::as<td::uint32>(created_by.as_bits256().data() + 32 - 4) = ((unsigned)std::time(nullptr) >> 8);
|
td::as<td::uint32>(created_by.as_bits256().data() + 32 - 4) = ((unsigned)std::time(nullptr) >> 8);
|
||||||
run_collate_query(shard_id, last_masterchain_block_id_, prev, created_by, val_set, actor_id(this),
|
run_collate_query(shard_id, last_masterchain_block_id_, prev, created_by, val_set, td::Ref<CollatorOptions>{true},
|
||||||
td::Timestamp::in(10.0), std::move(P));
|
actor_id(this), td::Timestamp::in(10.0), std::move(P));
|
||||||
}
|
}
|
||||||
|
|
||||||
void ValidatorManagerImpl::validate_fake(BlockCandidate candidate, std::vector<BlockIdExt> prev, BlockIdExt last,
|
void ValidatorManagerImpl::validate_fake(BlockCandidate candidate, std::vector<BlockIdExt> prev, BlockIdExt last,
|
||||||
|
|
|
@ -3365,6 +3365,9 @@ void ValidatorManagerImpl::update_options(td::Ref<ValidatorManagerOptions> opts)
|
||||||
td::actor::send_closure(group.second.actor, &ValidatorGroup::update_options, opts,
|
td::actor::send_closure(group.second.actor, &ValidatorGroup::update_options, opts,
|
||||||
opts->need_monitor(group.second.shard, last_masterchain_state_));
|
opts->need_monitor(group.second.shard, last_masterchain_state_));
|
||||||
}
|
}
|
||||||
|
for (auto &collator : collator_nodes_) {
|
||||||
|
td::actor::send_closure(collator.second.actor, &CollatorNode::update_options, opts);
|
||||||
|
}
|
||||||
opts_ = std::move(opts);
|
opts_ = std::move(opts);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3409,7 +3412,7 @@ void ValidatorManagerImpl::add_collator(adnl::AdnlNodeIdShort id, ShardIdFull sh
|
||||||
auto it = collator_nodes_.find(id);
|
auto it = collator_nodes_.find(id);
|
||||||
if (it == collator_nodes_.end()) {
|
if (it == collator_nodes_.end()) {
|
||||||
it = collator_nodes_.emplace(id, Collator()).first;
|
it = collator_nodes_.emplace(id, Collator()).first;
|
||||||
it->second.actor = td::actor::create_actor<CollatorNode>("collatornode", id, actor_id(this), adnl_, rldp_);
|
it->second.actor = td::actor::create_actor<CollatorNode>("collatornode", id, opts_, actor_id(this), adnl_, rldp_);
|
||||||
}
|
}
|
||||||
if (!it->second.shards.insert(shard).second) {
|
if (!it->second.shards.insert(shard).second) {
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -514,8 +514,8 @@ void ValidatorGroup::collate_block(td::uint32 round_id, td::Timestamp timeout, t
|
||||||
}
|
}
|
||||||
if (self_collate) {
|
if (self_collate) {
|
||||||
run_collate_query(shard_, min_masterchain_block_id_, prev_block_ids_,
|
run_collate_query(shard_, min_masterchain_block_id_, prev_block_ids_,
|
||||||
Ed25519_PublicKey{local_id_full_.ed25519_value().raw()}, validator_set_, manager_,
|
Ed25519_PublicKey{local_id_full_.ed25519_value().raw()}, validator_set_,
|
||||||
td::Timestamp::in(10.0), std::move(promise));
|
opts_->get_collator_options(), manager_, td::Timestamp::in(10.0), std::move(promise));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (collator_adnl_id.is_zero()) {
|
if (collator_adnl_id.is_zero()) {
|
||||||
|
|
|
@ -142,6 +142,9 @@ struct ValidatorManagerOptionsImpl : public ValidatorManagerOptions {
|
||||||
bool get_state_serializer_enabled() const override {
|
bool get_state_serializer_enabled() const override {
|
||||||
return state_serializer_enabled_;
|
return state_serializer_enabled_;
|
||||||
}
|
}
|
||||||
|
td::Ref<CollatorOptions> get_collator_options() const override {
|
||||||
|
return collator_options_;
|
||||||
|
}
|
||||||
td::Ref<CollatorsList> get_collators_list() const override {
|
td::Ref<CollatorsList> get_collators_list() const override {
|
||||||
return collators_list_;
|
return collators_list_;
|
||||||
}
|
}
|
||||||
|
@ -228,6 +231,9 @@ struct ValidatorManagerOptionsImpl : public ValidatorManagerOptions {
|
||||||
void set_state_serializer_enabled(bool value) override {
|
void set_state_serializer_enabled(bool value) override {
|
||||||
state_serializer_enabled_ = value;
|
state_serializer_enabled_ = value;
|
||||||
}
|
}
|
||||||
|
void set_collator_options(td::Ref<CollatorOptions> value) override {
|
||||||
|
collator_options_ = std::move(value);
|
||||||
|
}
|
||||||
void set_collators_list(td::Ref<CollatorsList> list) override {
|
void set_collators_list(td::Ref<CollatorsList> list) override {
|
||||||
collators_list_ = std::move(list);
|
collators_list_ = std::move(list);
|
||||||
}
|
}
|
||||||
|
@ -281,6 +287,7 @@ struct ValidatorManagerOptionsImpl : public ValidatorManagerOptions {
|
||||||
bool celldb_preload_all_ = false;
|
bool celldb_preload_all_ = false;
|
||||||
td::optional<double> catchain_max_block_delay_;
|
td::optional<double> catchain_max_block_delay_;
|
||||||
bool state_serializer_enabled_ = true;
|
bool state_serializer_enabled_ = true;
|
||||||
|
td::Ref<CollatorOptions> collator_options_{true};
|
||||||
td::Ref<CollatorsList> collators_list_{true, CollatorsList{}};
|
td::Ref<CollatorsList> collators_list_{true, CollatorsList{}};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -52,6 +52,21 @@ struct PerfTimerStats {
|
||||||
std::deque<std::pair<double, double>> stats; // <Time::now(), duration>
|
std::deque<std::pair<double, double>> stats; // <Time::now(), duration>
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct CollatorOptions : public td::CntObject {
|
||||||
|
bool deferring_enabled = true;
|
||||||
|
|
||||||
|
// Defer messages from account after Xth message in block (excluding first messages from transactions)
|
||||||
|
td::uint32 defer_messages_after = 10;
|
||||||
|
// Defer all messages if out msg queue size is greater than X (excluding first messages from transactions)
|
||||||
|
td::uint64 defer_out_queue_size_limit = 2048;
|
||||||
|
|
||||||
|
// See Collator::process_dispatch_queue
|
||||||
|
td::uint32 dispatch_phase_2_max_total = 150;
|
||||||
|
td::uint32 dispatch_phase_3_max_total = 150;
|
||||||
|
td::uint32 dispatch_phase_2_max_per_initiator = 20;
|
||||||
|
td::optional<td::uint32> dispatch_phase_3_max_per_initiator; // Default - depends on out msg queue size
|
||||||
|
};
|
||||||
|
|
||||||
struct CollatorsList : public td::CntObject {
|
struct CollatorsList : public td::CntObject {
|
||||||
struct Collator {
|
struct Collator {
|
||||||
adnl::AdnlNodeIdShort adnl_id;
|
adnl::AdnlNodeIdShort adnl_id;
|
||||||
|
@ -105,6 +120,7 @@ struct ValidatorManagerOptions : public td::CntObject {
|
||||||
virtual bool get_celldb_preload_all() const = 0;
|
virtual bool get_celldb_preload_all() const = 0;
|
||||||
virtual td::optional<double> get_catchain_max_block_delay() const = 0;
|
virtual td::optional<double> get_catchain_max_block_delay() const = 0;
|
||||||
virtual bool get_state_serializer_enabled() const = 0;
|
virtual bool get_state_serializer_enabled() const = 0;
|
||||||
|
virtual td::Ref<CollatorOptions> get_collator_options() const = 0;
|
||||||
virtual td::Ref<CollatorsList> get_collators_list() const = 0;
|
virtual td::Ref<CollatorsList> get_collators_list() const = 0;
|
||||||
|
|
||||||
virtual void set_zero_block_id(BlockIdExt block_id) = 0;
|
virtual void set_zero_block_id(BlockIdExt block_id) = 0;
|
||||||
|
@ -134,6 +150,7 @@ struct ValidatorManagerOptions : public td::CntObject {
|
||||||
virtual void set_celldb_preload_all(bool value) = 0;
|
virtual void set_celldb_preload_all(bool value) = 0;
|
||||||
virtual void set_catchain_max_block_delay(double value) = 0;
|
virtual void set_catchain_max_block_delay(double value) = 0;
|
||||||
virtual void set_state_serializer_enabled(bool value) = 0;
|
virtual void set_state_serializer_enabled(bool value) = 0;
|
||||||
|
virtual void set_collator_options(td::Ref<CollatorOptions> value) = 0;
|
||||||
virtual void set_collators_list(td::Ref<CollatorsList> list) = 0;
|
virtual void set_collators_list(td::Ref<CollatorsList> list) = 0;
|
||||||
|
|
||||||
static td::Ref<ValidatorManagerOptions> create(
|
static td::Ref<ValidatorManagerOptions> create(
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue