1
0
Fork 0
mirror of https://github.com/ton-blockchain/ton synced 2025-03-09 15:40:10 +00:00

updated tonlib + fixes in vm

This commit is contained in:
ton 2020-02-20 19:56:18 +04:00
parent 28735ddc9e
commit efd47af432
42 changed files with 750 additions and 307 deletions

View file

@ -1845,7 +1845,7 @@ bool Transaction::compute_state() {
// code:(Maybe ^Cell) data:(Maybe ^Cell) library:(HashmapE 256 SimpleLib) // code:(Maybe ^Cell) data:(Maybe ^Cell) library:(HashmapE 256 SimpleLib)
auto frozen_state = cb2.finalize(); auto frozen_state = cb2.finalize();
frozen_hash = frozen_state->get_hash().bits(); frozen_hash = frozen_state->get_hash().bits();
if (verbosity >= 3 * 0) { // !!!DEBUG!!! if (verbosity >= 3 * 1) { // !!!DEBUG!!!
std::cerr << "freezing state of smart contract: "; std::cerr << "freezing state of smart contract: ";
block::gen::t_StateInit.print_ref(std::cerr, frozen_state); block::gen::t_StateInit.print_ref(std::cerr, frozen_state);
CHECK(block::gen::t_StateInit.validate_ref(frozen_state)); CHECK(block::gen::t_StateInit.validate_ref(frozen_state));

View file

@ -466,6 +466,7 @@ x{CF28} @Defop STILE4
x{CF29} @Defop STULE4 x{CF29} @Defop STULE4
x{CF2A} @Defop STILE8 x{CF2A} @Defop STILE8
x{CF2B} @Defop STULE8 x{CF2B} @Defop STULE8
x{CF30} @Defop BDEPTH
x{CF31} @Defop BBITS x{CF31} @Defop BBITS
x{CF32} @Defop BREFS x{CF32} @Defop BREFS
x{CF33} @Defop BBITREFS x{CF33} @Defop BBITREFS
@ -493,6 +494,7 @@ x{CF42} @Defop STSAME
} : STSLICECONST } : STSLICECONST
x{CF81} @Defop STZERO x{CF81} @Defop STZERO
x{CF83} @Defop STONE x{CF83} @Defop STONE
// cell deserialization (CellSlice primitives) // cell deserialization (CellSlice primitives)
x{D0} @Defop CTOS x{D0} @Defop CTOS
x{D1} @Defop ENDS x{D1} @Defop ENDS
@ -593,6 +595,8 @@ x{D75F} @Defop PLDULE8Q
x{D760} @Defop LDZEROES x{D760} @Defop LDZEROES
x{D761} @Defop LDONES x{D761} @Defop LDONES
x{D762} @Defop LDSAME x{D762} @Defop LDSAME
x{D764} @Defop SDEPTH
x{D765} @Defop CDEPTH
// //
// continuation / flow control primitives // continuation / flow control primitives
x{D8} dup @Defop EXECUTE @Defop CALLX x{D8} dup @Defop EXECUTE @Defop CALLX

View file

@ -195,3 +195,6 @@ recursive append-long-bytes {
} : parse-adnl-addr } : parse-adnl-addr
{ adnl>$ type } : .adnl { adnl>$ type } : .adnl
{ bl word parse-adnl-addr 1 'nop } ::_ adnl: { bl word parse-adnl-addr 1 'nop } ::_ adnl:
// ( x a b -- a<=x<=b )
{ 2 pick >= -rot >= and } : in-range?

View file

@ -1271,6 +1271,12 @@ struct StackTransform {
bool is_pop(int* i) const; bool is_pop(int* i) const;
bool is_rot() const; bool is_rot() const;
bool is_rotrev() const; bool is_rotrev() const;
bool is_push_rot(int i) const;
bool is_push_rot(int* i) const;
bool is_push_rotrev(int i) const;
bool is_push_rotrev(int* i) const;
bool is_push_xchg(int i, int j, int k) const;
bool is_push_xchg(int* i, int* j, int* k) const;
bool is_xchg2(int i, int j) const; bool is_xchg2(int i, int j) const;
bool is_xchg2(int* i, int* j) const; bool is_xchg2(int* i, int* j) const;
bool is_xcpu(int i, int j) const; bool is_xcpu(int i, int j) const;
@ -1404,6 +1410,9 @@ struct Optimizer {
bool is_push(int* i); bool is_push(int* i);
bool is_pop(int* i); bool is_pop(int* i);
bool is_nop(); bool is_nop();
bool is_push_rot(int* i);
bool is_push_rotrev(int* i);
bool is_push_xchg(int* i, int* j, int* k);
bool is_xchg2(int* i, int* j); bool is_xchg2(int* i, int* j);
bool is_xcpu(int* i, int* j); bool is_xcpu(int* i, int* j);
bool is_puxc(int* i, int* j); bool is_puxc(int* i, int* j);

View file

@ -401,6 +401,19 @@ bool Optimizer::is_pop(int* i) {
return is_pred([i](const auto& t) { return t.is_pop(i) && *i < 256; }); return is_pred([i](const auto& t) { return t.is_pop(i) && *i < 256; });
} }
bool Optimizer::is_push_rot(int* i) {
return is_pred([i](const auto& t) { return t.is_push_rot(i) && *i < 16; }, 3);
}
bool Optimizer::is_push_rotrev(int* i) {
return is_pred([i](const auto& t) { return t.is_push_rotrev(i) && *i < 16; }, 3);
}
bool Optimizer::is_push_xchg(int* i, int* j, int* k) {
return is_pred([i, j, k](const auto& t) { return t.is_push_xchg(i, j, k) && *i < 16 && *j < 16 && *k < 16; }) &&
!(p_ == 2 && op_[0]->is_push() && op_[1]->is_xchg());
}
bool Optimizer::is_xchg2(int* i, int* j) { bool Optimizer::is_xchg2(int* i, int* j) {
return is_pred([i, j](const auto& t) { return t.is_xchg2(i, j) && *i < 16 && *j < 16; }); return is_pred([i, j](const auto& t) { return t.is_xchg2(i, j) && *i < 16 && *j < 16; });
} }
@ -434,7 +447,8 @@ bool Optimizer::is_xcpu2(int* i, int* j, int* k) {
} }
bool Optimizer::is_puxc2(int* i, int* j, int* k) { bool Optimizer::is_puxc2(int* i, int* j, int* k) {
return is_pred([i, j, k](const auto& t) { return t.is_puxc2(i, j, k) && *i < 16 && *j < 15 && *k < 15; }); return is_pred(
[i, j, k](const auto& t) { return t.is_puxc2(i, j, k) && *i < 16 && *j < 15 && *k < 15 && *j + *k != -1; });
} }
bool Optimizer::is_puxcpu(int* i, int* j, int* k) { bool Optimizer::is_puxcpu(int* i, int* j, int* k) {
@ -545,6 +559,9 @@ bool Optimizer::find_at_least(int pb) {
(is_xcpu(&i, &j) && rewrite(AsmOp::XcPu(i, j))) || (is_puxc(&i, &j) && rewrite(AsmOp::PuXc(i, j))) || (is_xcpu(&i, &j) && rewrite(AsmOp::XcPu(i, j))) || (is_puxc(&i, &j) && rewrite(AsmOp::PuXc(i, j))) ||
(is_push2(&i, &j) && rewrite(AsmOp::Push2(i, j))) || (is_blkswap(&i, &j) && rewrite(AsmOp::BlkSwap(i, j))) || (is_push2(&i, &j) && rewrite(AsmOp::Push2(i, j))) || (is_blkswap(&i, &j) && rewrite(AsmOp::BlkSwap(i, j))) ||
(is_blkpush(&i, &j) && rewrite(AsmOp::BlkPush(i, j))) || (is_blkdrop(&i) && rewrite(AsmOp::BlkDrop(i))) || (is_blkpush(&i, &j) && rewrite(AsmOp::BlkPush(i, j))) || (is_blkdrop(&i) && rewrite(AsmOp::BlkDrop(i))) ||
(is_push_rot(&i) && rewrite(AsmOp::Push(i), AsmOp::Custom("ROT"))) ||
(is_push_rotrev(&i) && rewrite(AsmOp::Push(i), AsmOp::Custom("-ROT"))) ||
(is_push_xchg(&i, &j, &k) && rewrite(AsmOp::Push(i), AsmOp::Xchg(j, k))) ||
(is_reverse(&i, &j) && rewrite(AsmOp::BlkReverse(i, j))) || (is_reverse(&i, &j) && rewrite(AsmOp::BlkReverse(i, j))) ||
(is_nip_seq(&i, &j) && rewrite(AsmOp::Xchg(i, j), AsmOp::BlkDrop(i))) || (is_nip_seq(&i, &j) && rewrite(AsmOp::Xchg(i, j), AsmOp::BlkDrop(i))) ||
(is_pop_blkdrop(&i, &k) && rewrite(AsmOp::Pop(i), AsmOp::BlkDrop(k))) || (is_pop_blkdrop(&i, &k) && rewrite(AsmOp::Pop(i), AsmOp::BlkDrop(k))) ||

View file

@ -454,6 +454,53 @@ bool StackTransform::is_rotrev() const {
return equal(rot_rev, true); return equal(rot_rev, true);
} }
// PUSH i ; ROT == 1 i 0 2 3
bool StackTransform::is_push_rot(int i) const {
return is_valid() && d == -1 && i >= 0 && is_trivial_after(3) && get(0) == 1 && get(1) == i && get(2) == 0;
}
bool StackTransform::is_push_rot(int *i) const {
return is_valid() && (*i = get(1)) >= 0 && is_push_rot(*i);
}
// PUSH i ; -ROT == 0 1 i 2 3
bool StackTransform::is_push_rotrev(int i) const {
return is_valid() && d == -1 && i >= 0 && is_trivial_after(3) && get(0) == 0 && get(1) == 1 && get(2) == i;
}
bool StackTransform::is_push_rotrev(int *i) const {
return is_valid() && (*i = get(2)) >= 0 && is_push_rotrev(*i);
}
// PUSH s(i) ; XCHG s(j),s(k) --> i 0 1 .. i ..
// PUSH s(i) ; XCHG s(0),s(k) --> k-1 0 1 .. k-2 i k ..
bool StackTransform::is_push_xchg(int i, int j, int k) const {
StackTransform t;
return is_valid() && d == -1 && n <= 3 && t.apply_push(i) && t.apply_xchg(j, k) && t <= *this;
}
bool StackTransform::is_push_xchg(int *i, int *j, int *k) const {
if (!(is_valid() && d == -1 && n <= 3 && n > 0)) {
return false;
}
int s = get(0);
if (s < 0) {
return false;
}
*i = s;
*j = 0;
if (n == 1) {
*k = 0;
} else if (n == 2) {
*k = s + 1;
*i = get(s + 1);
} else {
*j = A[1].first + 1;
*k = A[2].first + 1;
}
return is_push_xchg(*i, *j, *k);
}
// XCHG s1,s(i) ; XCHG s0,s(j) // XCHG s1,s(i) ; XCHG s0,s(j)
bool StackTransform::is_xchg2(int i, int j) const { bool StackTransform::is_xchg2(int i, int j) const {
StackTransform t; StackTransform t;

View file

@ -16,7 +16,7 @@
{- {-
Data structure: Data structure:
Root cell: [OptRef<1b+1r?>:Hashmap<PfxDict:Slice->UInt<32b>,CatTable>:domains] Root cell: [OptRef<1b+1r?>:Hashmap<PfxDict:Slice->UInt<32b>,CatTable>:domains]
[OptRef<1b+1r?>:Hashmap<UInt<64b>(Time|Hash32)->Slice(DomName)>:gc] [OptRef<1b+1r?>:Hashmap<UInt<160b>(Time|Hash128)->Slice(DomName)>:gc]
[UInt<32b>:stdperiod] [Gram:PPReg] [Gram:PPCell] [Gram:PPBit] [UInt<32b>:stdperiod] [Gram:PPReg] [Gram:PPCell] [Gram:PPBit]
[UInt<32b>:lasthousekeeping] [UInt<32b>:lasthousekeeping]
<CatTable> := HashmapE 16 ^DNSRecord <CatTable> := HashmapE 16 ^DNSRecord
@ -30,12 +30,13 @@
\1com\0goo\0 which will return \1com\0 (as per pfx tree) with -1 cat \1com\0goo\0 which will return \1com\0 (as per pfx tree) with -1 cat
-} -}
(cell, cell, [int, int, int, int], int, int) load_data() inline_ref { (cell, cell, cell, [int, int, int, int], int, int) load_data() inline_ref {
slice cs = get_data().begin_parse(); slice cs = get_data().begin_parse();
return ( return (
cs~load_ref(), ;; control data
cs~load_dict(), ;; pfx tree: domains data and exp cs~load_dict(), ;; pfx tree: domains data and exp
cs~load_dict(), ;; gc auxillary with expiry and 32 bit hash slice cs~load_dict(), ;; gc auxillary with expiration and 128-bit hash slice
[ cs~load_uint(32), ;; length of this period of time in seconds [ cs~load_uint(30), ;; length of this period of time in seconds
cs~load_grams(), ;; standard payment for registering a new subdomain cs~load_grams(), ;; standard payment for registering a new subdomain
cs~load_grams(), ;; price paid for each cell (PPC) cs~load_grams(), ;; price paid for each cell (PPC)
cs~load_grams() ], ;; and bit (PPB) cs~load_grams() ], ;; and bit (PPB)
@ -46,16 +47,17 @@
(int, int, int, int) load_prices() inline_ref { (int, int, int, int) load_prices() inline_ref {
slice cs = get_data().begin_parse(); slice cs = get_data().begin_parse();
(cs~load_dict(), cs~load_dict()); (cs~load_ref(), cs~load_dict(), cs~load_dict());
return (cs~load_uint(32), cs~load_grams(), cs~load_grams(), cs~load_grams()); return (cs~load_uint(30), cs~load_grams(), cs~load_grams(), cs~load_grams());
} }
() store_data(cell dd, cell gc, prices, int nhk, int lhk) impure { () store_data(cell ctl, cell dd, cell gc, prices, int nhk, int lhk) impure {
var [sp, ppr, ppc, ppb] = prices; var [sp, ppr, ppc, ppb] = prices;
set_data(begin_cell() set_data(begin_cell()
.store_ref(ctl) ;; control data
.store_dict(dd) ;; domains data and exp .store_dict(dd) ;; domains data and exp
.store_dict(gc) ;; keyed expiration time and 32 bit hash slice .store_dict(gc) ;; keyed expiration time and 128-bit hash slice
.store_int(sp, 32) ;; standard period .store_uint(sp, 30) ;; standard period
.store_grams(ppr) ;; price per registration .store_grams(ppr) ;; price per registration
.store_grams(ppc) ;; price per cell .store_grams(ppc) ;; price per cell
.store_grams(ppb) ;; price per bit .store_grams(ppb) ;; price per bit
@ -94,40 +96,42 @@ global var query_info;
return send_message(addr, 0xef6b6179, query_id, op, 0, 128); return send_message(addr, 0xef6b6179, query_id, op, 0, 128);
} }
() housekeeping(cell dd, cell gc, prices, int nhk, int lhk) impure { () housekeeping(cell ctl, cell dd, cell gc, prices, int nhk, int lhk, int max_steps) impure {
int n = now(); int n = now();
if (n < max(nhk, lhk + 60)) { ;; housekeeping cooldown: 1 minute if (n < max(nhk, lhk + 60)) { ;; housekeeping cooldown: 1 minute
;; if housekeeping was done recently, or if next housekeeping is in the future, just save ;; if housekeeping was done recently, or if next housekeeping is in the future, just save
return store_data(dd, gc, prices, nhk, lhk); return store_data(ctl, dd, gc, prices, nhk, lhk);
} }
;; need to do some housekeeping - maybe remove entry with ;; need to do some housekeeping - maybe remove entry with
;; least expiration but only if it is already expired ;; least expiration but only if it is already expired
;; no iterating and deleting all to not put too much gas gc ;; no iterating and deleting all to not put too much gas gc
;; burden on any random specific user request ;; burden on any random specific user request
;; over time it will do the garbage collection required ;; over time it will do the garbage collection required
(int mkey, cell name, int found?) = gc.udict_get_min_ref?(64); (int mkey, cell domain, int found?) = gc.udict_get_min_ref?(32 + 128);
if (found?) { ;; no short circuit optimization, two nested ifs while (found? & max_steps) { ;; no short circuit optimization, two nested ifs
nhk = (mkey >> 32); nhk = (mkey >> 128);
if (nhk < n) { if (nhk < n) {
slice sname = name.begin_parse(); slice sdomain = domain.begin_parse();
(_, slice val, _, found?) = dd.pfxdict_get?(1023, sname); (_, slice val, _, found?) = dd.pfxdict_get?(1023, sdomain);
if (found?) { if (found?) {
int exp = val.preload_uint(32); int exp = val.preload_uint(32);
if (exp <= n) { if (exp <= n) {
dd~pfxdict_delete?(1023, sname); dd~pfxdict_delete?(1023, sdomain);
} }
} }
gc~udict_delete?(64, mkey); gc~udict_delete?(32 + 128, mkey);
(mkey, _, found?) = gc.udict_get_min_ref?(64); (mkey, domain, found?) = gc.udict_get_min_ref?(32 + 128);
nhk = (found? ? mkey >> 32 : 0xffffffff); nhk = (found? ? mkey >> 32 : 0xffffffff);
max_steps -= 1;
} }
} }
store_data(dd, gc, prices, nhk, n); store_data(ctl, dd, gc, prices, nhk, n);
} }
int _calcprice(cell data, ppc, ppb) inline_ref { ;; only for internal calcs int calcprice_internal(slice domain, cell data, ppc, ppb) inline_ref { ;; only for internal calcs
var (_, bits, refs) = compute_data_size(data, 100); ;; 100 cells max var (_, bits, refs) = compute_data_size(data, 100); ;; 100 cells max
return ppc * refs + ppb * bits; bits += slice_bits(domain) * 2 + (128 + 32 + 32);
return ppc * (refs + 2) + ppb * bits;
} }
int check_owner(cell cat_table, int src_wc, int src_addr) inline_ref { int check_owner(cell cat_table, int src_wc, int src_addr) inline_ref {
@ -165,13 +169,43 @@ int check_owner(cell cat_table, int src_wc, int src_addr) inline_ref {
value:CurrencyCollection ihr_fee:Grams fwd_fee:Grams value:CurrencyCollection ihr_fee:Grams fwd_fee:Grams
created_lt:uint64 created_at:uint32 created_lt:uint64 created_at:uint32
Internal message data structure: Internal message data structure:
[UInt<32b>:op] [UInt<64b>:query_id] [Ref<1r>:name] [UInt<32b>:op] [UInt<64b>:query_id] [Ref<1r>:domain]
(if not prolong: [Ref<1r>:value->CatTable]) (if not prolong: [Ref<1r>:value->CatTable])
-} -}
;; Control operations: permitted only to the owner of this smartcontract
() perform_ctl_op(int op, int src_wc, int src_addr, slice in_msg) impure inline_ref {
var (ctl, domdata, gc, prices, nhk, lhk) = load_data();
var cs = ctl.begin_parse();
if ((cs~load_int(8) != src_wc) | (cs~load_uint(256) != src_addr)) {
return send_error(0xee6f776e);
}
if (op == 0x43685072) { ;; ChPr = Change Prices
var (stdper, ppr, ppc, ppb) = (in_msg~load_uint(32), in_msg~load_grams(), in_msg~load_grams(), in_msg~load_grams());
in_msg.end_parse();
;; NB: stdper == 0 -> disable new actions
store_data(ctl, domdata, gc, [stdper, ppr, ppc, ppb], nhk, lhk);
return send_ok(0);
}
if (op == 0x4344656c) { ;; CDel = destroy smart contract
ifnot (domdata.null?()) {
;; domain dictionary not empty, force gc
housekeeping(ctl, domdata, gc, prices, nhk, 1, -1);
}
(ctl, domdata, gc, prices, nhk, lhk) = load_data();
ifnot (domdata.null?()) {
;; domain dictionary still not empty, error
return send_error(0xee74656d);
}
var (addr, query_id, op) = query_info;
return send_message(addr, 0xef6b6179, query_id, op, 0, 128 + 32);
}
return send_error(0xffffffff);
}
;; Must send at least GR$1 more for possible gas fees! ;; Must send at least GR$1 more for possible gas fees!
() recv_internal(int ct_bal, int msg_value, cell in_msg_cell, slice in_msg) impure { () recv_internal(int msg_value, cell in_msg_cell, slice in_msg) impure {
;; this time very interested in internal messages ;; this time very interested in internal messages
if (in_msg.slice_bits() < 32) { if (in_msg.slice_bits() < 32) {
return (); ;; simple transfer or short return (); ;; simple transfer or short
@ -197,47 +231,60 @@ int check_owner(cell cat_table, int src_wc, int src_addr) inline_ref {
if (op & (1 << 31)) { if (op & (1 << 31)) {
return (); ;; an answer to our query return (); ;; an answer to our query
} }
if ((op >> 24) == 0x43) {
;; Control operations
return perform_ctl_op(op, src_wc, src_addr, in_msg);
}
int qt = (op == 0x72656764) * 1 + (op == 0x70726f6c) * 2 + (op == 0x75706464) * 4 + (op == 0x676f6763) * 8; int qt = (op == 0x72656764) * 1 + (op == 0x70726f6c) * 2 + (op == 0x75706464) * 4 + (op == 0x676f6763) * 8;
ifnot (qt) { ;; unknown query, return error ifnot (qt) { ;; unknown query, return error
return send_error(0xffffffff); return send_error(0xffffffff);
} }
qt = - qt; qt = - qt;
(cell domdata, cell gc, [int, int, int, int] prices, int nhk, int lhk) = load_data(); (cell ctl, cell domdata, cell gc, [int, int, int, int] prices, int nhk, int lhk) = load_data();
if (qt == 8) { ;; 0x676f6763 -> GO, GC! go!!! if (qt == 8) { ;; 0x676f6763 -> GO, GC! go!!!
;; Manual garbage collection iteration ;; Manual garbage collection iteration
housekeeping(domdata, gc, prices, nhk, 1); ;; forced int max_steps = in_msg~load_int(32); ;; -1 = infty
housekeeping(ctl, domdata, gc, prices, nhk, 1, max_steps); ;; forced
return send_error(0xef6b6179); return send_error(0xef6b6179);
} }
slice name = null(); slice domain = null();
cell name_cell = in_msg~load_maybe_ref(); cell domain_cell = in_msg~load_maybe_ref();
if (name_cell.null?()) { int fail = 0;
if (domain_cell.null?()) {
int bytes = in_msg~load_uint(6); int bytes = in_msg~load_uint(6);
name = in_msg~load_bits(bytes * 8); fail = (bytes == 0);
domain = in_msg~load_bits(bytes * 8);
} else { } else {
name = name_cell.begin_parse(); domain = domain_cell.begin_parse();
var (bits, refs) = slice_bits_refs(domain);
fail = (refs | ((bits - 8) & (7 - 128)));
} }
(_, int name_last_byte) = name.slice_last(8).load_uint(8); ifnot (fail) {
if (name_last_byte != 0) { ;; name must end with \0! no\0 error ;; domain must end with \0! no\0 error
fail = domain.slice_last(8).preload_uint(8);
}
if (fail) {
return send_error(0xee6f5c30); return send_error(0xee6f5c30);
} }
int zeros = 0; int zeros = 0;
slice cname = name; slice cdomain = domain;
repeat (cname.slice_bits() ^>> 3) { repeat (cdomain.slice_bits() ^>> 3) {
int c = cname~load_uint(8); int c = cdomain~load_uint(8);
zeros -= (c == 0); zeros -= (c == 0);
} }
;; if (zeros != 1) { ;; too much zero chars (overflow): ov\0 ;; if (zeros != 1) { ;; too much zero chars (overflow): ov\0
;; return send_error(0xef765c30); } ;; return send_error(0xef765c30); }
name = begin_cell().store_uint(zeros, 7).store_slice(name).end_cell().begin_parse(); domain = begin_cell().store_uint(zeros, 7).store_slice(domain).end_cell().begin_parse();
(slice pfx, slice val, slice tail, int found?) = domdata.pfxdict_get?(1023, name); (slice pfx, slice val, slice tail, int found?) = domdata.pfxdict_get?(1023, domain);
int n = now(); int n = now();
cell cat_table = null(); cell cat_table = null();
int exp = 0; int exp = 0;
@ -270,17 +317,22 @@ int check_owner(cell cat_table, int src_wc, int src_addr) inline_ref {
data = in_msg~load_ref(); data = in_msg~load_ref();
;; basic integrity check of (client-provided) dictionary ;; basic integrity check of (client-provided) dictionary
ifnot (data.dict_empty?()) { ;; 1000 gas! ifnot (data.dict_empty?()) { ;; 1000 gas!
(int dmin, _, int minok) = idict_get_min?(data, 16); (_, _, int minok) = idict_get_min?(data, 16);
(int dmax, _, int maxok) = idict_get_max?(data, 16); (_, _, int maxok) = idict_get_max?(data, 16);
throw_unless(31, minok & maxok & (dmin <= dmax)); throw_unless(31, minok & maxok);
} }
} else { } else {
data = cat_table; data = cat_table;
} }
;; compute action price ;; load prices
var [stdper, ppr, ppc, ppb] = prices; var [stdper, ppr, ppc, ppb] = prices;
int price = _calcprice(data, ppc, ppb) + (ppr & (qt != 4)); ifnot (stdper) { ;; smart contract disabled by owner, no new actions
return send_error(0xd34f4646);
}
;; compute action price
int price = calcprice_internal(domain, data, ppc, ppb) + (ppr & (qt != 4));
if (msg_value - (1 << 30) < price) { ;; gr<p: grams - GR$1 < price if (msg_value - (1 << 30) < price) { ;; gr<p: grams - GR$1 < price
return send_error(0xe7723c70); return send_error(0xe7723c70);
} }
@ -290,19 +342,22 @@ int check_owner(cell cat_table, int src_wc, int src_addr) inline_ref {
;; ########################################################################## ;; ##########################################################################
if (qt == 2) { ;; 0x70726f6c -> prol | prolong domain if (qt == 2) { ;; 0x70726f6c -> prol | prolong domain
if (exp > n + stdper) { ;; does not expire soon, cannot prolong
return send_error(0xf365726f);
}
slice value = begin_cell().store_uint(exp + stdper, 32).store_ref(data).end_cell().begin_parse(); slice value = begin_cell().store_uint(exp + stdper, 32).store_ref(data).end_cell().begin_parse();
ifnot (domdata~pfxdict_set?(1023, name, value)) { ;; Set ERR | 2^31 ifnot (domdata~pfxdict_set?(1023, domain, value)) { ;; Set ERR | 2^31
return send_error(0xf3657272); return send_error(0xf3657272);
} }
int sh_low = name.slice_hash() & ((1 << 32) - 1); int sh_low = domain.slice_hash() & ((1 << 128) - 1);
int gckeyO = (exp << 32) + sh_low; int gckeyO = (exp << 128) + sh_low;
int gckeyN = gckeyO + (stdper << 32); int gckeyN = gckeyO + (stdper << 128);
gc~udict_delete?(64, gckeyO); ;; delete old gc entry, add new gc~udict_delete?(32 + 128, gckeyO); ;; delete old gc entry, add new
gc~udict_set_ref(64, gckeyN, begin_cell().store_slice(name).end_cell()); gc~udict_set_ref(32 + 128, gckeyN, begin_cell().store_slice(domain).end_cell());
housekeeping(domdata, gc, prices, nhk, lhk); housekeeping(ctl, domdata, gc, prices, nhk, lhk, 1);
return send_ok(price); return send_ok(price);
} }
@ -313,14 +368,14 @@ int check_owner(cell cat_table, int src_wc, int src_addr) inline_ref {
} }
int expires_at = n + stdper; int expires_at = n + stdper;
slice value = begin_cell().store_uint(expires_at, 32).store_ref(data).end_cell().begin_parse(); slice value = begin_cell().store_uint(expires_at, 32).store_ref(data).end_cell().begin_parse();
ifnot (domdata~pfxdict_set?(1023, name, value)) { ;; Set ERR | 2^31 ifnot (domdata~pfxdict_set?(1023, domain, value)) { ;; Set ERR | 2^31
return send_error(0xf3657272); return send_error(0xf3657272);
} }
int gckey = (expires_at << 32) | (name.slice_hash() & ((1 << 32) - 1)); int gckey = (expires_at << 128) | (domain.slice_hash() & ((1 << 128) - 1));
gc~udict_set_ref(64, gckey, begin_cell().store_slice(name).end_cell()); gc~udict_set_ref(32 + 128, gckey, begin_cell().store_slice(domain).end_cell());
;; using ref requires additional cell, but using value (DICTUSET) may ;; using ref requires additional cell, but using value (DICTUSET) may
;; cause problems with very long names or complex dictionaries ;; cause problems with very long domains or complex dictionaries
housekeeping(domdata, gc, prices, min(nhk, expires_at), lhk); housekeeping(ctl, domdata, gc, prices, min(nhk, expires_at), lhk, 1);
return send_ok(price); return send_ok(price);
} }
@ -328,11 +383,10 @@ int check_owner(cell cat_table, int src_wc, int src_addr) inline_ref {
if (qt == 4) { ;; 0x75706464 -> updd | update domain (data) if (qt == 4) { ;; 0x75706464 -> updd | update domain (data)
slice value = begin_cell().store_uint(exp, 32).store_ref(data).end_cell().begin_parse(); slice value = begin_cell().store_uint(exp, 32).store_ref(data).end_cell().begin_parse();
ifnot (domdata~pfxdict_set?(1023, name, value)) { ;; Set ERR | 2^31 ifnot (domdata~pfxdict_set?(1023, domain, value)) { ;; Set ERR | 2^31
return send_error(0xf3657272); return send_error(0xf3657272);
} }
;; no need to update gc here housekeeping(ctl, domdata, gc, prices, nhk, lhk, 1);
housekeeping(domdata, gc, prices, nhk, lhk);
return send_ok(price); return send_ok(price);
} }
;; ########################################################################## ;; ##########################################################################
@ -345,11 +399,11 @@ int check_owner(cell cat_table, int src_wc, int src_addr) inline_ref {
;;===========================================================================;; ;;===========================================================================;;
() recv_external(slice in_msg) impure { () recv_external(slice in_msg) impure {
;; not interested at all! but need to init! ;; only for initialization
(cell dd, cell gc, var prices, int nhk, int lhk) = load_data(); (cell ctl, cell dd, cell gc, var prices, int nhk, int lhk) = load_data();
ifnot (lhk) { ifnot (lhk) {
accept_message(); accept_message();
store_data(dd, gc, prices, 0, now()); return store_data(ctl, dd, gc, prices, 0xffffffff, now());
} }
} }
@ -357,16 +411,16 @@ int check_owner(cell cat_table, int src_wc, int src_addr) inline_ref {
;; Getter methods ;; ;; Getter methods ;;
;;===========================================================================;; ;;===========================================================================;;
(int, cell, int, slice) dnsdictlookup(slice subdomain, int nowtime) inline_ref { (int, cell, int, slice) dnsdictlookup(slice domain, int nowtime) inline_ref {
int bits = subdomain.slice_bits(); int bits = domain.slice_bits();
ifnot (bits) { ifnot (bits) {
return (0, null(), 0, null()); ;; zero-length input return (0, null(), 0, null()); ;; zero-length input
} }
throw_if(30, bits & 7); ;; malformed input (~ 8n-bit) throw_if(30, bits & 7); ;; malformed input (~ 8n-bit)
int name_last_byte = subdomain.slice_last(8).preload_uint(8); int domain_last_byte = domain.slice_last(8).preload_uint(8);
if (name_last_byte) { if (domain_last_byte) {
subdomain = begin_cell().store_slice(subdomain) ;; append zero byte domain = begin_cell().store_slice(domain) ;; append zero byte
.store_uint(0, 8).end_cell().begin_parse(); .store_uint(0, 8).end_cell().begin_parse();
bits += 8; bits += 8;
} }
@ -375,22 +429,21 @@ int check_owner(cell cat_table, int src_wc, int src_addr) inline_ref {
} }
(_, cell root) = get_data().begin_parse().load_dict(); (_, cell root) = get_data().begin_parse().load_dict();
slice cname = subdomain; slice sd_tail = domain;
int zeros = 0; int zeros = 0;
repeat (bits >> 3) { repeat (bits >> 3) {
int c = cname~load_uint(8); int c = sd_tail~load_uint(8);
zeros -= (c == 0); zeros -= (c == 0);
} }
;; can't move these declarations lower, will cause errors! ;; can't move these declarations lower, will cause errors!
slice pfx = cname; slice tail = slice pfx = sd_tail;
slice val = null(); slice val = null();
slice tail = cname;
int exp = 0; int exp = 0;
do { do {
slice pfxname = begin_cell().store_uint(zeros, 7) slice pfxname = begin_cell().store_uint(zeros, 7)
.store_slice(subdomain).end_cell().begin_parse(); .store_slice(domain).end_cell().begin_parse();
(pfx, val, tail, int succ) = root.pfxdict_get?(1023, pfxname); (pfx, val, tail, int succ) = root.pfxdict_get?(1023, pfxname);
if (succ) { if (succ) {
int exp = val~load_uint(32); int exp = val~load_uint(32);
@ -410,8 +463,8 @@ int check_owner(cell cat_table, int src_wc, int src_addr) inline_ref {
} }
;;8m dns-record-value ;;8m dns-record-value
(int, cell) dnsresolve(slice subdomain, int category) method_id { (int, cell) dnsresolve(slice domain, int category) method_id {
(int exp, cell cat_table, int exact?, slice pfx) = dnsdictlookup(subdomain, now()); (int exp, cell cat_table, int exact?, slice pfx) = dnsdictlookup(domain, now());
ifnot (exp) { ifnot (exp) {
return (0, null()); return (0, null());
} }
@ -434,13 +487,13 @@ int check_owner(cell cat_table, int src_wc, int src_addr) inline_ref {
;; getexpiration needs to know the current time to skip any possible expired ;; getexpiration needs to know the current time to skip any possible expired
;; subdomains in the chain. it will return 0 if not found or expired. ;; subdomains in the chain. it will return 0 if not found or expired.
int getexpirationx(slice subdomain, int nowtime) inline method_id { int getexpirationx(slice domain, int nowtime) inline method_id {
(int exp, _, _, _) = dnsdictlookup(subdomain, nowtime); (int exp, _, _, _) = dnsdictlookup(domain, nowtime);
return exp; return exp;
} }
int getexpiration(slice subdomain) method_id { int getexpiration(slice domain) method_id {
return getexpirationx(subdomain, now()); return getexpirationx(domain, now());
} }
int getstdperiod() method_id { int getstdperiod() method_id {
@ -463,12 +516,12 @@ int getppb() method_id {
return ppb; return ppb;
} }
int calcprice(cell val) method_id { ;; only for external gets (not efficient) int calcprice(slice domain, cell val) method_id { ;; only for external gets (not efficient)
(_, _, int ppc, int ppb) = load_prices(); (_, _, int ppc, int ppb) = load_prices();
return _calcprice(val, ppc, ppb); return calcprice_internal(domain, val, ppc, ppb);
} }
int calcregprice(cell val) method_id { ;; only for external gets (not efficient) int calcregprice(slice domain, cell val) method_id { ;; only for external gets (not efficient)
(_, int ppr, int ppc, int ppb) = load_prices(); (_, int ppr, int ppc, int ppb) = load_prices();
return ppr + _calcprice(val, ppc, ppb); return ppr + calcprice_internal(domain, val, ppc, ppb);
} }

View file

@ -176,8 +176,8 @@ elector_addr config.elector_smc!
config.special! config.special!
// gas_price gas_limit special_gas_limit gas_credit block_gas_limit freeze_due_limit delete_due_limit flat_gas_limit flat_gas_price -- // gas_price gas_limit special_gas_limit gas_credit block_gas_limit freeze_due_limit delete_due_limit flat_gas_limit flat_gas_price --
1000 sg* 1 *M dup 10000 10 *M GR$0.1 GR$1.0 100 100000 config.gas_prices! 1000 sg* 1 *M dup 10000 10 *M GR$0.1 GR$1.0 1000 1000000 config.gas_prices!
10000 sg* 1 *M 10 *M 10000 10 *M GR$0.1 GR$1.0 100 1000000 config.mc_gas_prices! 10000 sg* 1 *M 10 *M 10000 10 *M GR$0.1 GR$1.0 1000 10000000 config.mc_gas_prices!
// lump_price bit_price cell_price ihr_factor first_frac next_frac // lump_price bit_price cell_price ihr_factor first_frac next_frac
1000000 1000 sg* 100000 sg* 3/2 sg*/ 1/3 sg*/ 1/3 sg*/ config.fwd_prices! 1000000 1000 sg* 100000 sg* 3/2 sg*/ 1/3 sg*/ 1/3 sg*/ config.fwd_prices!
10000000 10000 sg* 1000000 sg* 3/2 sg*/ 1/3 sg*/ 1/3 sg*/ config.mc_fwd_prices! 10000000 10000 sg* 1000000 sg* 3/2 sg*/ 1/3 sg*/ 1/3 sg*/ config.mc_fwd_prices!

View file

@ -0,0 +1,66 @@
#!/usr/bin/fift -s
"TonUtil.fif" include
"Asm.fif" include
"GetOpt.fif" include
{ show-options-help 1 halt } : usage
Basechain =: wc // create smart contract in basechain
"new-dns-query.boc" =: savefile
0 =: contract-id
variable dns-dict dictnew dns-dict !
begin-options
"<filename-base> <address> <reg-period> <reg-price> <ng-per-bit> <ng-per-cell> [-w<workchain>] [-r<random-id>] [-o<savefile-boc>]" +cr +tab
+"Creates a new automatic dns smart contract with 32-bit identifier <random-id> controlled from wallet with address <address> "
+"and saves it into <savefile-boc> ('" savefile $+ +"' by default)"
disable-digit-options generic-help-setopt
"w" "--workchain" { parse-workchain-id =: wc } short-long-option-arg
"Selects workchain to create smart contract (" wc (.) $+ +" by default)" option-help
"r" "--random-id" { parse-int =: contract-id } short-long-option-arg
"Sets 'random' smart contract identifier (" contract-id (.) $+ +" by default)" option-help
"o" "--output" { =: savefile } short-long-option-arg
"Sets output file for generated initialization message ('" savefile $+ +"' by default)" option-help
"h" "--help" { usage } short-long-option
"Shows a help message" option-help
parse-options
$# 6 <> ' usage if
6 :$1..n
$1 =: file-base
$2 false parse-load-address drop 2=: ctl-addr
$3 parse-int dup 0 10000000 in-range? ' usage ifnot =: reg-period
$4 $>GR =: reg-price
$5 parse-int dup 0< ' usage if =: ng-pb
$6 parse-int dup 0< ' usage if =: ng-pc
contract-id 32 fits ' usage ifnot
{ contract-id ?dup { (.) $+ } if } : +contractid
."Creating new automatic DNS smart contract in workchain " wc .
."with random id " contract-id . cr
."Controlling wallet (smart contract) is " ctl-addr 6 .Addr cr
."Subdomain registration period is " reg-period . ."seconds" cr
."Subdomain registration price is " reg-price .GR
."+ " ng-pc . ."per cell + " ng-pb . ."per bit" cr
// Create new automatic DNS; source code included from `auto/dns-auto-code.fif`
"auto/dns-auto-code.fif" include
// code
<b <b ctl-addr -rot 8 i, swap 256 u, contract-id 32 i, b> ref, // ctl
dns-dict @ dict, dictnew dict, // dom_dict gc
reg-period 30 u, reg-price Gram, ng-pc Gram, ng-pb Gram, // stdper ppc ppb
0 64 u, // nhk lhk
b> // data
null // no libraries
<b b{0011} s, 3 roll ref, rot ref, swap dict, b> // create StateInit
dup ."StateInit: " <s csr. cr
dup hashu wc swap 2dup 2constant wallet_addr
."new automatic DNS smartcontract address = " 2dup .addr cr
2dup file-base +"-dns" +contractid +".addr" save-address-verbose
."Non-bounceable address (for init): " 2dup 7 .Addr cr
."Bounceable address (for later access): " 6 .Addr cr
<b b{1000100} s, wallet_addr addr, b{000010} s, swap <s s, b{0} s, b>
dup ."External message for initialization is " <s csr. cr
2 boc+>B dup Bx. cr
savefile tuck B>file
."(Saved dns smart-contract creating query to file " type .")" cr

View file

@ -82,6 +82,7 @@ slice skip_dict(slice s) asm "SKIPDICT";
cell preload_maybe_ref(slice s) asm "PLDOPTREF"; cell preload_maybe_ref(slice s) asm "PLDOPTREF";
builder store_maybe_ref(builder b, cell c) asm(c b) "STOPTREF"; builder store_maybe_ref(builder b, cell c) asm(c b) "STOPTREF";
int cell_depth(cell c) asm "CDEPTH";
int slice_refs(slice s) asm "SREFS"; int slice_refs(slice s) asm "SREFS";
int slice_bits(slice s) asm "SBITS"; int slice_bits(slice s) asm "SBITS";
@ -89,9 +90,11 @@ int slice_bits(slice s) asm "SBITS";
int slice_empty?(slice s) asm "SEMPTY"; int slice_empty?(slice s) asm "SEMPTY";
int slice_data_empty?(slice s) asm "SDEMPTY"; int slice_data_empty?(slice s) asm "SDEMPTY";
int slice_refs_empty?(slice s) asm "SREMPTY"; int slice_refs_empty?(slice s) asm "SREMPTY";
int slice_depth(slice s) asm "SDEPTH";
int builder_refs(builder b) asm "BREFS"; int builder_refs(builder b) asm "BREFS";
int builder_bits(builder b) asm "BBITS"; int builder_bits(builder b) asm "BBITS";
int builder_depth(builder b) asm "BDEPTH";
builder begin_cell() asm "NEWC"; builder begin_cell() asm "NEWC";
cell end_cell(builder b) asm "ENDC"; cell end_cell(builder b) asm "ENDC";

View file

@ -81,7 +81,7 @@ td::Ref<vm::Cell> HighloadWallet::make_a_gift_message(const td::Ed25519::Private
} }
td::Ref<vm::Cell> HighloadWallet::get_init_code() noexcept { td::Ref<vm::Cell> HighloadWallet::get_init_code() noexcept {
return SmartContractCode::highload_wallet(); return SmartContractCode::get_code(SmartContractCode::HighloadWalletV1);
} }
vm::CellHash HighloadWallet::get_init_code_hash() noexcept { vm::CellHash HighloadWallet::get_init_code_hash() noexcept {

View file

@ -104,7 +104,7 @@ td::Ref<vm::Cell> HighloadWalletV2::make_a_gift_message(const td::Ed25519::Priva
} }
td::Ref<vm::Cell> HighloadWalletV2::get_init_code(td::int32 revision) noexcept { td::Ref<vm::Cell> HighloadWalletV2::get_init_code(td::int32 revision) noexcept {
return SmartContractCode::highload_wallet_v2(revision); return SmartContractCode::get_code(SmartContractCode::HighloadWalletV2, revision);
} }
vm::CellHash HighloadWalletV2::get_init_code_hash() noexcept { vm::CellHash HighloadWalletV2::get_init_code_hash() noexcept {

View file

@ -178,15 +178,17 @@ td::Result<std::vector<DnsInterface::Entry>> DnsInterface::resolve(td::Slice nam
*/ */
// creation // creation
td::Ref<ManualDns> ManualDns::create(td::Ref<vm::Cell> data, int revision) { td::Ref<ManualDns> ManualDns::create(td::Ref<vm::Cell> data, int revision) {
return td::Ref<ManualDns>(true, State{ton::SmartContractCode::dns_manual(revision), std::move(data)}); return td::Ref<ManualDns>(
true, State{ton::SmartContractCode::get_code(ton::SmartContractCode::ManualDns, revision), std::move(data)});
} }
td::Ref<ManualDns> ManualDns::create(const td::Ed25519::PublicKey& public_key, td::uint32 wallet_id, int revision) { td::Ref<ManualDns> ManualDns::create(const td::Ed25519::PublicKey& public_key, td::uint32 wallet_id, int revision) {
return create(create_init_data_fast(public_key, wallet_id), revision); return create(create_init_data_fast(public_key, wallet_id), revision);
} }
td::optional<td::int32> ManualDns::guess_revision(const vm::Cell::Hash& code_hash) { td::optional<td::int32> ManualDns::guess_revision(const vm::Cell::Hash& code_hash) {
for (auto i : {-1, 1}) { for (auto i : ton::SmartContractCode::get_revisions(ton::SmartContractCode::ManualDns)) {
if (ton::SmartContractCode::dns_manual(i)->get_hash() == code_hash) { if (ton::SmartContractCode::get_code(ton::SmartContractCode::ManualDns, i)->get_hash() == code_hash) {
return i; return i;
} }
} }

View file

@ -48,7 +48,8 @@ td::Ref<vm::Cell> MultisigWallet::QueryBuilder::create(td::int32 id, td::Ed25519
} }
td::Ref<MultisigWallet> MultisigWallet::create(td::Ref<vm::Cell> data) { td::Ref<MultisigWallet> MultisigWallet::create(td::Ref<vm::Cell> data) {
return td::Ref<MultisigWallet>(true, State{ton::SmartContractCode::multisig(), std::move(data)}); return td::Ref<MultisigWallet>(
true, State{ton::SmartContractCode::get_code(ton::SmartContractCode::Multisig), std::move(data)});
} }
int MultisigWallet::processed(td::uint64 query_id) const { int MultisigWallet::processed(td::uint64 query_id) const {

View file

@ -96,19 +96,6 @@ const auto& get_map() {
"FwCEMQLTAAHAAZPUAdCY0wUBqgLXGAHiINdJwg/" "FwCEMQLTAAHAAZPUAdCY0wUBqgLXGAHiINdJwg/"
"ypiB41yLXCwfyaHBTEddJqTYCmNMHAcAAEqEB5DDIywYBzxbJ0FADACBZ9KhvpSCUAvQEMJIybeICACg0A4AQ9FqZECOECUBE8AEBkjAx4gBmM" "ypiB41yLXCwfyaHBTEddJqTYCmNMHAcAAEqEB5DDIywYBzxbJ0FADACBZ9KhvpSCUAvQEMJIybeICACg0A4AQ9FqZECOECUBE8AEBkjAx4gBmM"
"SLAFZwy9AQQI4QJUELwAQHgIsAWmDIChAn0czAB4DAyIMAfkzD0BODAIJJtAeDyLG0B"); "SLAFZwy9AQQI4QJUELwAQHgIsAWmDIChAn0czAB4DAyIMAfkzD0BODAIJJtAeDyLG0B");
//auto check_revision = [&](td::Slice name, td::int32 default_revision) {
//auto it = map.find(name);
//CHECK(it != map.end());
//auto other_it = map.find(PSLICE() << name << "-r" << default_revision);
//CHECK(other_it != map.end());
//CHECK(it->second->get_hash() == other_it->second->get_hash());
//};
//check_revision("highload-wallet", HIGHLOAD_WALLET_REVISION);
//check_revision("highload-wallet-v2", HIGHLOAD_WALLET2_REVISION);
//check_revision("simple-wallet", WALLET_REVISION);
//check_revision("wallet", WALLET2_REVISION);
//check_revision("wallet3", WALLET3_REVISION);
return map; return map;
}(); }();
return map; return map;
@ -123,60 +110,93 @@ td::Result<td::Ref<vm::Cell>> SmartContractCode::load(td::Slice name) {
} }
return it->second; return it->second;
} }
td::Ref<vm::Cell> SmartContractCode::multisig() {
auto res = load("multisig").move_as_ok(); td::Span<int> SmartContractCode::get_revisions(Type type) {
switch (type) {
case Type::WalletV1: {
static int res[] = {1, 2};
return res; return res;
} }
td::Ref<vm::Cell> SmartContractCode::wallet3(int revision) { case Type::WalletV2: {
if (revision == 0) { static int res[] = {1, 2};
revision = WALLET3_REVISION;
}
auto res = load(PSLICE() << "wallet3-r" << revision).move_as_ok();
return res; return res;
} }
td::Ref<vm::Cell> SmartContractCode::wallet(int revision) { case Type::WalletV3: {
if (revision == 0) { static int res[] = {1, 2};
revision = WALLET2_REVISION;
}
auto res = load(PSLICE() << "wallet-r" << revision).move_as_ok();
return res; return res;
} }
td::Ref<vm::Cell> SmartContractCode::simple_wallet(int revision) { case Type::WalletV1Ext: {
if (revision == 0) { static int res[] = {-1};
revision = WALLET_REVISION;
}
auto res = load(PSLICE() << "simple-wallet-r" << revision).move_as_ok();
return res; return res;
} }
td::Ref<vm::Cell> SmartContractCode::simple_wallet_ext() { case Type::HighloadWalletV1: {
static auto res = load("simple-wallet-ext").move_as_ok(); static int res[] = {-1, 1, 2};
return res; return res;
} }
td::Ref<vm::Cell> SmartContractCode::highload_wallet(int revision) { case Type::HighloadWalletV2: {
static int res[] = {-1, 1, 2};
return res;
}
case Type::Multisig: {
static int res[] = {-1};
return res;
}
case Type::ManualDns: {
static int res[] = {-1, 1};
return res;
}
}
UNREACHABLE();
return {};
}
td::Result<int> SmartContractCode::validate_revision(Type type, int revision) {
auto revisions = get_revisions(type);
if (revision == -1) { if (revision == -1) {
return load("highload-wallet").move_as_ok(); if (revisions[0] == -1) {
return -1;
}
return revisions[revisions.size() - 1];
} }
if (revision == 0) { if (revision == 0) {
revision = HIGHLOAD_WALLET_REVISION; return revisions[revisions.size() - 1];
} }
return load(PSLICE() << "highload-wallet-r" << revision).move_as_ok(); for (auto x : revisions) {
if (x == revision) {
return revision;
} }
td::Ref<vm::Cell> SmartContractCode::highload_wallet_v2(int revision) { }
return td::Status::Error("No such revision");
}
td::Ref<vm::Cell> SmartContractCode::get_code(Type type, int ext_revision) {
auto revision = validate_revision(type, ext_revision).move_as_ok();
auto basename = [](Type type) -> td::Slice {
switch (type) {
case Type::WalletV1:
return "simple-wallet";
case Type::WalletV2:
return "wallet";
case Type::WalletV3:
return "wallet3";
case Type::WalletV1Ext:
return "simple-wallet-ext";
case Type::HighloadWalletV1:
return "highload-wallet";
case Type::HighloadWalletV2:
return "highload-wallet-v2";
case Type::Multisig:
return "multisig";
case Type::ManualDns:
return "dns-manual";
}
UNREACHABLE();
return "";
}(type);
if (revision == -1) { if (revision == -1) {
return load("highload-wallet-v2").move_as_ok(); return load(basename).move_as_ok();
} }
if (revision == 0) { return load(PSLICE() << basename << "-r" << revision).move_as_ok();
revision = HIGHLOAD_WALLET2_REVISION;
}
return load(PSLICE() << "highload-wallet-v2-r" << revision).move_as_ok();
}
td::Ref<vm::Cell> SmartContractCode::dns_manual(int revision) {
if (revision == -1) {
return load("dns-manual").move_as_ok();
}
if (revision == 0) {
revision = DNS_REVISION;
}
return load(PSLICE() << "dns-manual-r" << revision).move_as_ok();
} }
} // namespace ton } // namespace ton

View file

@ -18,17 +18,16 @@
*/ */
#include "vm/cells.h" #include "vm/cells.h"
#include "td/utils/Span.h"
namespace ton { namespace ton {
class SmartContractCode { class SmartContractCode {
public: public:
static td::Result<td::Ref<vm::Cell>> load(td::Slice name); static td::Result<td::Ref<vm::Cell>> load(td::Slice name);
static td::Ref<vm::Cell> multisig();
static td::Ref<vm::Cell> wallet3(int revision = 0); enum Type { WalletV1 = 1, WalletV1Ext, WalletV2, WalletV3, HighloadWalletV1, HighloadWalletV2, ManualDns, Multisig };
static td::Ref<vm::Cell> wallet(int revision = 0); static td::Span<int> get_revisions(Type type);
static td::Ref<vm::Cell> simple_wallet(int revision = 0); static td::Result<int> validate_revision(Type type, int revision);
static td::Ref<vm::Cell> simple_wallet_ext(); static td::Ref<vm::Cell> get_code(Type type, int revision = 0);
static td::Ref<vm::Cell> highload_wallet(int revision = 0);
static td::Ref<vm::Cell> highload_wallet_v2(int revision = 0);
static td::Ref<vm::Cell> dns_manual(int revision = 0);
}; };
} // namespace ton } // namespace ton

View file

@ -64,7 +64,7 @@ td::Ref<vm::Cell> TestWallet::make_a_gift_message_static(const td::Ed25519::Priv
} }
td::Ref<vm::Cell> TestWallet::get_init_code(td::int32 revision) noexcept { td::Ref<vm::Cell> TestWallet::get_init_code(td::int32 revision) noexcept {
return ton::SmartContractCode::simple_wallet(revision); return ton::SmartContractCode::get_code(ton::SmartContractCode::WalletV1, revision);
} }
vm::CellHash TestWallet::get_init_code_hash() noexcept { vm::CellHash TestWallet::get_init_code_hash() noexcept {

View file

@ -70,7 +70,7 @@ td::Ref<vm::Cell> Wallet::make_a_gift_message(const td::Ed25519::PrivateKey& pri
} }
td::Ref<vm::Cell> Wallet::get_init_code(td::int32 revision) noexcept { td::Ref<vm::Cell> Wallet::get_init_code(td::int32 revision) noexcept {
return SmartContractCode::wallet(revision); return SmartContractCode::get_code(ton::SmartContractCode::WalletV2, revision);
} }
vm::CellHash Wallet::get_init_code_hash() noexcept { vm::CellHash Wallet::get_init_code_hash() noexcept {

View file

@ -80,7 +80,7 @@ td::Ref<vm::Cell> WalletV3::make_a_gift_message(const td::Ed25519::PrivateKey& p
} }
td::Ref<vm::Cell> WalletV3::get_init_code(td::int32 revision) noexcept { td::Ref<vm::Cell> WalletV3::get_init_code(td::int32 revision) noexcept {
return SmartContractCode::wallet3(revision); return SmartContractCode::get_code(ton::SmartContractCode::WalletV3, revision);
} }
vm::CellHash WalletV3::get_init_code_hash() noexcept { vm::CellHash WalletV3::get_init_code_hash() noexcept {

View file

@ -497,13 +497,16 @@ class SimpleWallet : public ton::SmartContract {
} }
static td::Ref<SimpleWallet> create_empty() { static td::Ref<SimpleWallet> create_empty() {
return td::Ref<SimpleWallet>(true, State{ton::SmartContractCode::simple_wallet_ext(), {}}); return td::Ref<SimpleWallet>(true,
State{ton::SmartContractCode::get_code(ton::SmartContractCode::WalletV1Ext), {}});
} }
static td::Ref<SimpleWallet> create(td::Ref<vm::Cell> data) { static td::Ref<SimpleWallet> create(td::Ref<vm::Cell> data) {
return td::Ref<SimpleWallet>(true, State{ton::SmartContractCode::simple_wallet_ext(), std::move(data)}); return td::Ref<SimpleWallet>(
true, State{ton::SmartContractCode::get_code(ton::SmartContractCode::WalletV1Ext), std::move(data)});
} }
static td::Ref<SimpleWallet> create_fast(td::Ref<vm::Cell> data) { static td::Ref<SimpleWallet> create_fast(td::Ref<vm::Cell> data) {
return td::Ref<SimpleWallet>(true, State{ton::SmartContractCode::simple_wallet(), std::move(data)}); return td::Ref<SimpleWallet>(
true, State{ton::SmartContractCode::get_code(ton::SmartContractCode::WalletV1), std::move(data)});
} }
td::int32 seqno() const { td::int32 seqno() const {

View file

@ -14,7 +14,7 @@
You should have received a copy of the GNU Lesser General Public License You should have received a copy of the GNU Lesser General Public License
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>. along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
Copyright 2017-2019 Telegram Systems LLP Copyright 2017-2020 Telegram Systems LLP
*/ */
#include <tl/tlblib.hpp> #include <tl/tlblib.hpp>
@ -138,22 +138,23 @@ bool TLB::print_skip(PrettyPrinter& pp, vm::CellSlice& cs) const {
return pp.fail("invalid value"); return pp.fail("invalid value");
} }
pp.raw_nl(); pp.raw_nl();
cs_copy.print_rec(pp.os, pp.indent); return cs_copy.print_rec(pp.os, &pp.limit, pp.indent) && pp.mkindent() && pp.close();
return pp.mkindent() && pp.close();
} }
bool TLB::print_special(PrettyPrinter& pp, vm::CellSlice& cs) const { bool TLB::print_special(PrettyPrinter& pp, vm::CellSlice& cs) const {
pp.open("raw@"); pp.open("raw@");
pp << *this << ' '; pp << *this << ' ';
pp.raw_nl(); pp.raw_nl();
cs.print_rec(pp.os, pp.indent); return cs.print_rec(pp.os, &pp.limit, pp.indent) && pp.mkindent() && pp.close();
return pp.mkindent() && pp.close();
} }
bool TLB::print_ref(PrettyPrinter& pp, Ref<vm::Cell> cell_ref) const { bool TLB::print_ref(PrettyPrinter& pp, Ref<vm::Cell> cell_ref) const {
if (cell_ref.is_null()) { if (cell_ref.is_null()) {
return pp.fail("null cell reference"); return pp.fail("null cell reference");
} }
if (!pp.register_recursive_call()) {
return pp.fail("too many recursive calls while printing a TL-B value");
}
bool is_special; bool is_special;
auto cs = load_cell_slice_special(std::move(cell_ref), is_special); auto cs = load_cell_slice_special(std::move(cell_ref), is_special);
if (is_special) { if (is_special) {
@ -163,18 +164,21 @@ bool TLB::print_ref(PrettyPrinter& pp, Ref<vm::Cell> cell_ref) const {
} }
} }
bool TLB::print_skip(std::ostream& os, vm::CellSlice& cs, int indent) const { bool TLB::print_skip(std::ostream& os, vm::CellSlice& cs, int indent, int rec_limit) const {
PrettyPrinter pp{os, indent}; PrettyPrinter pp{os, indent};
pp.set_limit(rec_limit);
return pp.fail_unless(print_skip(pp, cs)); return pp.fail_unless(print_skip(pp, cs));
} }
bool TLB::print(std::ostream& os, const vm::CellSlice& cs, int indent) const { bool TLB::print(std::ostream& os, const vm::CellSlice& cs, int indent, int rec_limit) const {
PrettyPrinter pp{os, indent}; PrettyPrinter pp{os, indent};
pp.set_limit(rec_limit);
return pp.fail_unless(print(pp, cs)); return pp.fail_unless(print(pp, cs));
} }
bool TLB::print_ref(std::ostream& os, Ref<vm::Cell> cell_ref, int indent) const { bool TLB::print_ref(std::ostream& os, Ref<vm::Cell> cell_ref, int indent, int rec_limit) const {
PrettyPrinter pp{os, indent}; PrettyPrinter pp{os, indent};
pp.set_limit(rec_limit);
return pp.fail_unless(print_ref(pp, std::move(cell_ref))); return pp.fail_unless(print_ref(pp, std::move(cell_ref)));
} }

View file

@ -14,7 +14,7 @@
You should have received a copy of the GNU Lesser General Public License You should have received a copy of the GNU Lesser General Public License
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>. along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
Copyright 2017-2019 Telegram Systems LLP Copyright 2017-2020 Telegram Systems LLP
*/ */
#pragma once #pragma once
#include <iostream> #include <iostream>
@ -208,12 +208,12 @@ class TLB {
bool print(PrettyPrinter& pp, Ref<vm::CellSlice> cs_ref) const { bool print(PrettyPrinter& pp, Ref<vm::CellSlice> cs_ref) const {
return print(pp, *cs_ref); return print(pp, *cs_ref);
} }
bool print_skip(std::ostream& os, vm::CellSlice& cs, int indent = 0) const; bool print_skip(std::ostream& os, vm::CellSlice& cs, int indent = 0, int rec_limit = 0) const;
bool print(std::ostream& os, const vm::CellSlice& cs, int indent = 0) const; bool print(std::ostream& os, const vm::CellSlice& cs, int indent = 0, int rec_limit = 0) const;
bool print(std::ostream& os, Ref<vm::CellSlice> cs_ref, int indent = 0) const { bool print(std::ostream& os, Ref<vm::CellSlice> cs_ref, int indent = 0, int rec_limit = 0) const {
return print(os, *cs_ref, indent); return print(os, *cs_ref, indent, rec_limit);
} }
bool print_ref(std::ostream& os, Ref<vm::Cell> cell_ref, int indent = 0) const; bool print_ref(std::ostream& os, Ref<vm::Cell> cell_ref, int indent = 0, int rec_limit = 0) const;
std::string as_string_skip(vm::CellSlice& cs, int indent = 0) const; std::string as_string_skip(vm::CellSlice& cs, int indent = 0) const;
std::string as_string(const vm::CellSlice& cs, int indent = 0) const; std::string as_string(const vm::CellSlice& cs, int indent = 0) const;
std::string as_string(Ref<vm::CellSlice> cs_ref, int indent = 0) const { std::string as_string(Ref<vm::CellSlice> cs_ref, int indent = 0) const {
@ -456,15 +456,20 @@ bool store_from(vm::CellBuilder& cb, const T& tlb_type, Ref<vm::CellSlice> field
namespace tlb { namespace tlb {
struct PrettyPrinter { struct PrettyPrinter {
enum { default_print_limit = 4096 };
std::ostream& os; std::ostream& os;
int indent; int indent;
int level; int level;
bool failed; bool failed;
bool nl_used; bool nl_used;
int mode; int mode;
int limit{default_print_limit};
PrettyPrinter(std::ostream& _os, int _indent = 0, int _mode = 1) PrettyPrinter(std::ostream& _os, int _indent = 0, int _mode = 1)
: os(_os), indent(_indent), level(0), failed(false), nl_used(false), mode(_mode) { : os(_os), indent(_indent), level(0), failed(false), nl_used(false), mode(_mode) {
} }
PrettyPrinter(int _limit, std::ostream& _os, int _indent = 0, int _mode = 1)
: os(_os), indent(_indent), level(0), failed(false), nl_used(false), mode(_mode), limit(_limit) {
}
~PrettyPrinter(); ~PrettyPrinter();
bool ok() const { bool ok() const {
return !failed && !level; return !failed && !level;
@ -489,6 +494,14 @@ struct PrettyPrinter {
bool field_int(long long value, std::string name); bool field_int(long long value, std::string name);
bool field_uint(unsigned long long value); bool field_uint(unsigned long long value);
bool field_uint(unsigned long long value, std::string name); bool field_uint(unsigned long long value, std::string name);
bool register_recursive_call() {
return limit--;
}
void set_limit(int new_limit) {
if (new_limit > 0) {
limit = new_limit;
}
}
bool out(std::string str) { bool out(std::string str) {
os << str; os << str;
return true; return true;

View file

@ -814,6 +814,9 @@ void register_cell_serialize_ops(OpcodeTable& cp0) {
compute_len_store_const_ref)) compute_len_store_const_ref))
.insert(OpcodeInstr::mksimple(0xcf23, 16, "ENDXC", exec_builder_to_special_cell)) .insert(OpcodeInstr::mksimple(0xcf23, 16, "ENDXC", exec_builder_to_special_cell))
.insert(OpcodeInstr::mkfixed(0xcf28 >> 2, 14, 2, dump_store_le_int, exec_store_le_int)) .insert(OpcodeInstr::mkfixed(0xcf28 >> 2, 14, 2, dump_store_le_int, exec_store_le_int))
.insert(OpcodeInstr::mksimple(
0xcf30, 16, "BDEPTH",
std::bind(exec_int_builder_func, _1, "BDEPTH", [](Ref<CellBuilder> b) { return b->get_depth(); })))
.insert(OpcodeInstr::mksimple( .insert(OpcodeInstr::mksimple(
0xcf31, 16, "BBITS", 0xcf31, 16, "BBITS",
std::bind(exec_int_builder_func, _1, "BBITS", [](Ref<CellBuilder> b) { return b->size(); }))) std::bind(exec_int_builder_func, _1, "BBITS", [](Ref<CellBuilder> b) { return b->size(); })))
@ -1321,6 +1324,22 @@ int exec_load_same(VmState* st, const char* name, int x) {
return 0; return 0;
} }
int exec_cell_depth(VmState* st) {
Stack& stack = st->get_stack();
VM_LOG(st) << "execute CDEPTH";
auto cell = stack.pop_maybe_cell();
stack.push_smallint(cell.not_null() ? cell->get_depth() : 0);
return 0;
}
int exec_slice_depth(VmState* st) {
Stack& stack = st->get_stack();
VM_LOG(st) << "execute SDEPTH";
auto cs = stack.pop_cellslice();
stack.push_smallint(cs->get_depth());
return 0;
}
void register_cell_deserialize_ops(OpcodeTable& cp0) { void register_cell_deserialize_ops(OpcodeTable& cp0) {
using namespace std::placeholders; using namespace std::placeholders;
cp0.insert(OpcodeInstr::mksimple(0xd0, 8, "CTOS", exec_cell_to_slice)) cp0.insert(OpcodeInstr::mksimple(0xd0, 8, "CTOS", exec_cell_to_slice))
@ -1407,7 +1426,9 @@ void register_cell_deserialize_ops(OpcodeTable& cp0) {
.insert(OpcodeInstr::mkfixed(0xd75, 12, 4, dump_load_le_int, exec_load_le_int)) .insert(OpcodeInstr::mkfixed(0xd75, 12, 4, dump_load_le_int, exec_load_le_int))
.insert(OpcodeInstr::mksimple(0xd760, 16, "LDZEROES", std::bind(exec_load_same, _1, "LDZEROES", 0))) .insert(OpcodeInstr::mksimple(0xd760, 16, "LDZEROES", std::bind(exec_load_same, _1, "LDZEROES", 0)))
.insert(OpcodeInstr::mksimple(0xd761, 16, "LDONES", std::bind(exec_load_same, _1, "LDONES", 1))) .insert(OpcodeInstr::mksimple(0xd761, 16, "LDONES", std::bind(exec_load_same, _1, "LDONES", 1)))
.insert(OpcodeInstr::mksimple(0xd762, 16, "LDSAME", std::bind(exec_load_same, _1, "LDSAME", -1))); .insert(OpcodeInstr::mksimple(0xd762, 16, "LDSAME", std::bind(exec_load_same, _1, "LDSAME", -1)))
.insert(OpcodeInstr::mksimple(0xd764, 16, "SDEPTH", exec_slice_depth))
.insert(OpcodeInstr::mksimple(0xd765, 16, "CDEPTH", exec_cell_depth));
} }
void register_cell_ops(OpcodeTable& cp0) { void register_cell_ops(OpcodeTable& cp0) {

View file

@ -52,7 +52,7 @@ Ref<DataCell> CellBuilder::finalize_copy(bool special) const {
} }
auto res = DataCell::create(data, size(), td::span(refs.data(), size_refs()), special); auto res = DataCell::create(data, size(), td::span(refs.data(), size_refs()), special);
if (res.is_error()) { if (res.is_error()) {
LOG(ERROR) << res.error(); LOG(DEBUG) << res.error();
throw CellWriteError{}; throw CellWriteError{};
} }
auto cell = res.move_as_ok(); auto cell = res.move_as_ok();
@ -60,7 +60,7 @@ Ref<DataCell> CellBuilder::finalize_copy(bool special) const {
if (vm_state_interface) { if (vm_state_interface) {
vm_state_interface->register_new_cell(cell); vm_state_interface->register_new_cell(cell);
if (cell.is_null()) { if (cell.is_null()) {
LOG(ERROR) << "cannot register new data cell"; LOG(DEBUG) << "cannot register new data cell";
throw CellWriteError{}; throw CellWriteError{};
} }
} }
@ -71,7 +71,7 @@ Ref<DataCell> CellBuilder::finalize_novm(bool special) {
auto res = DataCell::create(data, size(), td::mutable_span(refs.data(), size_refs()), special); auto res = DataCell::create(data, size(), td::mutable_span(refs.data(), size_refs()), special);
bits = refs_cnt = 0; bits = refs_cnt = 0;
if (res.is_error()) { if (res.is_error()) {
LOG(ERROR) << res.error(); LOG(DEBUG) << res.error();
throw CellWriteError{}; throw CellWriteError{};
} }
CHECK(res.ok().not_null()); CHECK(res.ok().not_null());
@ -87,7 +87,7 @@ Ref<DataCell> CellBuilder::finalize(bool special) {
auto cell = finalize_novm(special); auto cell = finalize_novm(special);
vm_state_interface->register_new_cell(cell); vm_state_interface->register_new_cell(cell);
if (cell.is_null()) { if (cell.is_null()) {
LOG(ERROR) << "cannot register new data cell"; LOG(DEBUG) << "cannot register new data cell";
throw CellWriteError{}; throw CellWriteError{};
} }
return cell; return cell;
@ -102,6 +102,7 @@ Ref<Cell> CellBuilder::create_pruned_branch(Ref<Cell> cell, td::uint32 new_level
} }
return do_create_pruned_branch(std::move(cell), new_level, virt_level); return do_create_pruned_branch(std::move(cell), new_level, virt_level);
} }
Ref<DataCell> CellBuilder::do_create_pruned_branch(Ref<Cell> cell, td::uint32 new_level, td::uint32 virt_level) { Ref<DataCell> CellBuilder::do_create_pruned_branch(Ref<Cell> cell, td::uint32 new_level, td::uint32 virt_level) {
auto level_mask = cell->get_level_mask().apply(virt_level); auto level_mask = cell->get_level_mask().apply(virt_level);
auto level = level_mask.get_level(); auto level = level_mask.get_level();
@ -386,6 +387,14 @@ CellBuilder& CellBuilder::store_ref(Ref<Cell> ref) {
return ensure_pass(store_ref_bool(std::move(ref))); return ensure_pass(store_ref_bool(std::move(ref)));
} }
td::uint16 CellBuilder::get_depth() const {
int d = 0;
for (unsigned i = 0; i < refs_cnt; i++) {
d = std::max(d, 1 + refs[i]->get_depth());
}
return static_cast<td::uint16>(d);
}
bool CellBuilder::append_data_cell_bool(const DataCell& cell) { bool CellBuilder::append_data_cell_bool(const DataCell& cell) {
unsigned len = cell.size(); unsigned len = cell.size();
if (can_extend_by(len, cell.size_refs())) { if (can_extend_by(len, cell.size_refs())) {

View file

@ -14,7 +14,7 @@
You should have received a copy of the GNU Lesser General Public License You should have received a copy of the GNU Lesser General Public License
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>. along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
Copyright 2017-2019 Telegram Systems LLP Copyright 2017-2020 Telegram Systems LLP
*/ */
#pragma once #pragma once
#include "vm/cells/DataCell.h" #include "vm/cells/DataCell.h"
@ -78,6 +78,7 @@ class CellBuilder : public td::CntObject {
const unsigned char* get_data() const { const unsigned char* get_data() const {
return data; return data;
} }
td::uint16 get_depth() const;
td::ConstBitPtr data_bits() const { td::ConstBitPtr data_bits() const {
return data; return data;
} }

View file

@ -776,6 +776,14 @@ bool CellSlice::fetch_maybe_ref(Ref<vm::Cell>& res) {
} }
} }
td::uint16 CellSlice::get_depth() const {
int d = 0;
for (unsigned i = 0; i < size_refs(); ++i) {
d = std::max(d, prefetch_ref(i)->get_depth() + 1);
}
return static_cast<td::uint16>(d);
}
bool CellSlice::begins_with(unsigned bits, unsigned long long value) const { bool CellSlice::begins_with(unsigned bits, unsigned long long value) const {
return have(bits) && !((prefetch_ulong(bits) ^ value) & ((1ULL << bits) - 1)); return have(bits) && !((prefetch_ulong(bits) ^ value) & ((1ULL << bits) - 1));
} }
@ -980,13 +988,18 @@ void CellSlice::dump_hex(std::ostream& os, int mode, bool endl) const {
} }
} }
void CellSlice::print_rec(std::ostream& os, int indent) const { bool CellSlice::print_rec(std::ostream& os, int* limit, int indent) const {
for (int i = 0; i < indent; i++) { for (int i = 0; i < indent; i++) {
os << ' '; os << ' ';
} }
if (!limit || *limit <= 0) {
os << "<cell output limit reached>" << std::endl;
return false;
}
--*limit;
if (cell.is_null()) { if (cell.is_null()) {
os << "NULL" << std::endl; os << "NULL" << std::endl;
return; return true;
} }
if (is_special()) { if (is_special()) {
os << "SPECIAL "; os << "SPECIAL ";
@ -994,9 +1007,21 @@ void CellSlice::print_rec(std::ostream& os, int indent) const {
os << "x{" << as_bitslice().to_hex() << '}' << std::endl; os << "x{" << as_bitslice().to_hex() << '}' << std::endl;
for (unsigned i = 0; i < size_refs(); i++) { for (unsigned i = 0; i < size_refs(); i++) {
CellSlice cs{NoVm(), prefetch_ref(i)}; CellSlice cs{NoVm(), prefetch_ref(i)};
cs.print_rec(os, indent + 1); if (!cs.print_rec(os, limit, indent + 1)) {
return false;
} }
} }
return true;
}
bool CellSlice::print_rec(std::ostream& os, int indent) const {
int limit = default_recursive_print_limit;
return print_rec(os, &limit, indent);
}
bool CellSlice::print_rec(int limit, std::ostream& os, int indent) const {
return print_rec(os, &limit, indent);
}
td::StringBuilder& operator<<(td::StringBuilder& sb, const CellSlice& cs) { td::StringBuilder& operator<<(td::StringBuilder& sb, const CellSlice& cs) {
std::ostringstream os; std::ostringstream os;

View file

@ -44,6 +44,7 @@ class CellSlice : public td::CntObject {
public: public:
static constexpr long long fetch_long_eof = (static_cast<unsigned long long>(-1LL) << 63); static constexpr long long fetch_long_eof = (static_cast<unsigned long long>(-1LL) << 63);
static constexpr unsigned long long fetch_ulong_eof = (unsigned long long)-1LL; static constexpr unsigned long long fetch_ulong_eof = (unsigned long long)-1LL;
enum { default_recursive_print_limit = 100 };
struct CellReadError {}; struct CellReadError {};
CellSlice(NoVm, Ref<Cell> cell_ref); CellSlice(NoVm, Ref<Cell> cell_ref);
@ -129,6 +130,7 @@ class CellSlice : public td::CntObject {
const unsigned char* data() const { const unsigned char* data() const {
return cell->get_data(); return cell->get_data();
} }
td::uint16 get_depth() const;
td::ConstBitPtr data_bits() const { td::ConstBitPtr data_bits() const {
return td::ConstBitPtr{data(), (int)cur_pos()}; return td::ConstBitPtr{data(), (int)cur_pos()};
} }
@ -252,7 +254,9 @@ class CellSlice : public td::CntObject {
bool contents_equal(const CellSlice& cs2) const; bool contents_equal(const CellSlice& cs2) const;
void dump(std::ostream& os, int level = 0, bool endl = true) const; void dump(std::ostream& os, int level = 0, bool endl = true) const;
void dump_hex(std::ostream& os, int mode = 0, bool endl = false) const; void dump_hex(std::ostream& os, int mode = 0, bool endl = false) const;
void print_rec(std::ostream& os, int indent = 0) const; bool print_rec(std::ostream& os, int indent = 0) const;
bool print_rec(std::ostream& os, int* limit, int indent = 0) const;
bool print_rec(int limit, std::ostream& os, int indent = 0) const;
void error() const { void error() const {
throw CellReadError{}; throw CellReadError{};
} }

View file

@ -77,7 +77,7 @@ int exec_set_gas_limit(VmState* st) {
int exec_commit(VmState* st) { int exec_commit(VmState* st) {
VM_LOG(st) << "execute COMMIT"; VM_LOG(st) << "execute COMMIT";
st->commit(); st->force_commit();
return 0; return 0;
} }

View file

@ -434,12 +434,33 @@ int VmState::run() {
} }
} while (!res); } while (!res);
// LOG(INFO) << "[EN] data cells: " << DataCell::get_total_data_cells(); // LOG(INFO) << "[EN] data cells: " << DataCell::get_total_data_cells();
if ((res | 1) == -1) { if ((res | 1) == -1 && !try_commit()) {
commit(); VM_LOG(this) << "automatic commit failed (new data or action cells too deep)";
get_stack().clear();
get_stack().push_smallint(0);
return ~(int)Excno::cell_ov;
} }
return res; return res;
} }
bool VmState::try_commit() {
if (cr.d[0].not_null() && cr.d[1].not_null() && cr.d[0]->get_depth() <= max_data_depth &&
cr.d[1]->get_depth() <= max_data_depth) {
cstate.c4 = cr.d[0];
cstate.c5 = cr.d[1];
cstate.committed = true;
return true;
} else {
return false;
}
}
void VmState::force_commit() {
if (!try_commit()) {
throw VmError{Excno::cell_ov, "cannot commit too deep cells as new data/actions"};
}
}
ControlData* force_cdata(Ref<Continuation>& cont) { ControlData* force_cdata(Ref<Continuation>& cont) {
if (!cont->get_cdata()) { if (!cont->get_cdata()) {
cont = Ref<ArgContExt>{true, cont}; cont = Ref<ArgContExt>{true, cont};

View file

@ -103,7 +103,8 @@ class VmState final : public VmStateInterface {
cell_reload_gas_price = 25, cell_reload_gas_price = 25,
cell_create_gas_price = 500, cell_create_gas_price = 500,
exception_gas_price = 50, exception_gas_price = 50,
tuple_entry_gas_price = 1 tuple_entry_gas_price = 1,
max_data_depth = 512
}; };
VmState(); VmState();
VmState(Ref<CellSlice> _code); VmState(Ref<CellSlice> _code);
@ -291,11 +292,8 @@ class VmState final : public VmStateInterface {
return cont->is_unique() ? cont.unique_write().jump_w(this) : cont->jump(this); return cont->is_unique() ? cont.unique_write().jump_w(this) : cont->jump(this);
} }
static Ref<CellSlice> convert_code_cell(Ref<Cell> code_cell); static Ref<CellSlice> convert_code_cell(Ref<Cell> code_cell);
void commit() { bool try_commit();
cstate.c4 = cr.d[0]; void force_commit();
cstate.c5 = cr.d[1];
cstate.committed = true;
}
void set_chksig_always_succeed(bool flag) { void set_chksig_always_succeed(bool flag) {
chksig_always_succeed = flag; chksig_always_succeed = flag;

View file

@ -94,39 +94,41 @@
\clearpage \clearpage
\mysection{Overview}\label{sect:overview} \mysection{Overview}\label{sect:overview}
The Catchain Consensus protocol builds upon the overlay network construction protocol and the overlay network broadcast protocol of TON Network (\cite{TON}). The catchain protocol itself can be decomposed into two separate protocols, one more low-level and general-purpose (the {\em Catchain protocol\/}\footnote{The original name of this protocol used during the initial stage of the research and development phase was {\em catch-chain} or {\em catchchain}, because it essentially is a special block{\em chain} dedicated to {\em catch\/}ing all events important for the consensus protocol; after saying and writing this name a lot of times it gradually got contracted to ``catchain''.}), and the other the high-level {\em (TON) block consensus protocol}, which makes use of the Catchain protocol. Higher levels in the TON protocol stack are occupied by the block generation and validation levels; however, all of them are executed essentially locally on one (logical) machine, with the problem of achieving consensus on the newly-generated block delegated to the Catchain protocol level. The Catchain Consensus protocol builds upon the overlay network construction protocol and the overlay network broadcast protocol of TON Network (\cite{TON}). The Catchain Consensus protocol itself can be decomposed into two separate protocols, one more low-level and general-purpose (the {\em Catchain protocol\/}\footnote{The original name of this protocol used during the initial stage of the research and development phase was {\em catch-chain} or {\em catchchain}, because it essentially is a special block{\em chain} dedicated to {\em catch\/}ing all events important for the consensus protocol; after saying and writing this name a lot of times it gradually got contracted to ``catchain''.}), and the other the high-level {\em Block Consensus Protocol (BCP)}, which makes use of the Catchain protocol. Higher levels in the TON protocol stack are occupied by the block generation and validation levels; however, all of them are executed essentially locally on one (logical) machine, with the problem of achieving consensus on the newly-generated block delegated to the Catchain protocol level.
Here is an approximate diagram of the protocol stack employed by TON for block generation and distribution, showing the correct place of the Catchain Consensus protocol (or rather its two component protocols): Here is an approximate diagram of the protocol stack employed by TON for block generation and distribution, showing the correct place of the Catchain Consensus protocol (or rather its two component protocols):
\begin{itemize} \begin{itemize}
\item {\it Top-level:} Block generation and block validation software, logically running on a stand-alone logical machine, with all the inputs provided and outputs handled by the lower-level protocols. The job of this software is to either generate a new valid block for a blockchain (a shardchain or the masterchain of the TON Blockchain; cf.~\cite{TON} for a discussion of the shardchains and the masterchain), or to check the validity of a block generated by somebody else. \item {\it Top-level:} Block generation and block validation software, logically running on a stand-alone logical machine, with all the inputs provided and outputs handled by the lower-level protocols. The job of this software is to either generate a new valid block for a blockchain (a shardchain or the masterchain of the TON Blockchain; cf.~\cite{TON} for a discussion of shardchains and the masterchain), or to check the validity of a block generated by somebody else.
\item {\it (TON) Block consensus protocol:\/} Achieves (byzantine fault tolerant) consensus on the block to be accepted as the next one in the current validator group for the masterchain or a shardchain. This level makes use of (the abstract interface of) the block generation and validation software, and builds upon the lower-level Catchain protocol. This protocol is explained in more detail in Section~\ptref{sect:blk.consensus}. \item {\it (TON) Block consensus protocol:\/} Achieves (byzantine fault tolerant) consensus on the block to be accepted as the next one in the current validator group for the masterchain or a shardchain. This level makes use of (the abstract interface of) the block generation and validation software, and builds upon the lower-level Catchain protocol. This protocol is explained in more detail in Section~\ptref{sect:blk.consensus}.
\item {\it Catchain protocol:\/} Provides secure persistent broadcasts in an overlay network (e.g., the task group of validators for a specific shardchain or the masterchain dedicated to generation, validation, and propagation of new blocks in this shardchain or masterchain), and detects attempts of ``cheating'' (protocol violation) on the part of some participants. This protocol is explained in more detail in Section~\ptref{sect:catchain}. \item {\it Catchain protocol:\/} Provides secure persistent broadcasts in an overlay network (e.g., the task group of validators for a specific shardchain or the masterchain dedicated to generation, validation, and propagation of new blocks in this shardchain or masterchain), and detects attempts of ``cheating'' (protocol violation) on the part of some participants. This protocol is explained in more detail in Section~\ptref{sect:catchain}.
\item {\it (TON Network) overlay broadcast protocol:\/} A simple best-effort broadcast protocol for overlay networks in the TON Network as described in \cite{TON}. Simply broadcasts received broadcast messages to all neighbors in the same overlay network that did not receive a copy of these messages before, with minimal effort dedicated to keeping copies of undelivered broadcast messages for a short interval of time. \item {\it (TON Network) overlay broadcast protocol:\/} A simple best-effort broadcast protocol for overlay networks in the TON Network as described in \cite{TON}. Simply broadcasts received broadcast messages to all neighbors in the same overlay network that did not receive a copy of these messages before, with minimal effort dedicated to keeping copies of undelivered broadcast messages for a short period of time.
\item {\it (TON Network) overlay protocol:\/} Creates overlay networks (cf.~\cite{TON}) inside the ADNL protocol network, manages neighbor lists for these overlay networks. Each participant of an overlay network tracks several neighbors in the same overlay network and keeps dedicated ADNL connections (called {\em ``channels''\/}) to them, so that incoming messages can be efficiently broadcasted to all neighbors with minimal overhead. \item {\it (TON Network) overlay protocol:\/} Creates overlay networks (cf.~\cite{TON}) inside the ADNL protocol network, manages neighbor lists for these overlay networks. Each participant of an overlay network tracks several neighbors in the same overlay network and keeps dedicated ADNL connections (called {\em ``channels''\/}) to them, so that incoming messages can be efficiently broadcast to all neighbors with minimal overhead.
\item {\it Abstract Datagram Network Layer (ADNL) protocol\/}: The basic protocol of the TON Network, that delivers packets (datagrams) between network nodes identified only by 256-bit abstract (ADNL) addresses, which effectively are cryptographic keys (or their hashes). \item {\it Abstract Datagram Network Layer (ADNL) protocol\/}: The basic protocol of the TON Network, that delivers packets (datagrams) between network nodes identified only by 256-bit abstract (ADNL) addresses, which effectively are cryptographic keys (or their hashes).
\end{itemize} \end{itemize}
This text aims to describe only the second and the third protocol in this suite, namely, the (TON) block consensus protocol and the (TON) Catchain protocol. This text aims to describe only the second and the third protocol in this suite, namely, the (TON) block consensus protocol and the (TON) Catchain protocol.
A few words on the efficiency of the combined Catchain Consensus protocol. Firstly, it is a true Byzantine Fault Tolerant (BFT) protocol, in the sense that it eventually achieves consensus on a valid next block of the blockchain even if some participants (validators) exhibit arbitrarily malicious behavior, provided these malicious participants are less than one third of the total number of the validators. It is well-known that achieving BFT consensus is impossible if at least one third of participants are malicious (cf.~\cite{Byzantine}), so the Catchain Consensus protocol is as good as theoretically possible in this respect. Secondly, when the Catchain Consensus was first implemented (in December 2018) and tested on up to 300 nodes distributed all over the world, it achieved consensus on a new block in 6 seconds for 300 nodes and in 4--5 seconds for 100 nodes (and in 3 seconds for 10 nodes), even if some of these nodes fail to participate or exhibit incorrect behavior.\footnote{When the ratio of the malicious or non-participating or very slow validators grows up to one third, the protocol exhibits graceful degradation, with the block consensus time growing very slowly---say, by at most half a second---until the critical value of one third is almost achieved.} Since the TON Blockchain task groups are not expected to consist of more than a hundred validators (even if a total of a thousand or ten thousand validators are running, only a hundred of them with the largest stakes will generate new masterchain blocks, and the others will participate only in the creation of new shardchain blocks, each shardchain block generated and validated by 10--30 validators; of course, all numbers given here are configuration parameters (cf.\ \cite{TON} and \cite{TBC}) and can be adjusted later by a consensus vote of validators if necessary), this means that the TON Blockchain is able to generate new blocks once every 4--5 seconds, as originally planned. This promise has been further tested and found out to be fulfilled with the launch of the Test Network of the TON Blockchain a couple of months later (in March 2019). Therefore, we see that the Catchain Consensus protocol is a new member of the ever growing family of the practical BFT protocols (cf.~\cite{PBFT}), even though it is based on slightly different principles. We would like to point out here that the author of this text, while providing the general guidelines of how this protocol should be designed (on the lines of ``let's create a BFT-hardened group broadcast message system, and run a suitably adapted simple two-phase or three-phase commit protocol on top of this system'') and participating in several discussions during the development and implementation of the protocol, is definitely not the only designer of this protocol and especially of its current implementation. This is the work of several people.
A few words on the efficiency of the combined Catchain Consensus protocol. Firstly, it is a true Byzantine Fault Tolerant (BFT) protocol, in the sense that it eventually achieves consensus on a valid next block of the blockchain even if some participants (validators) exhibit arbitrarily malicious behavior, provided these malicious participants are less than one third of the total number of the validators. It is well-known that achieving BFT consensus is impossible if at least one third of participants are malicious (cf.~\cite{Byzantine}), so the Catchain Consensus protocol is as good as theoretically possible in this respect. Secondly, when the Catchain Consensus was first implemented (in December 2018) and tested on up to 300 nodes distributed all over the world, it achieved consensus on a new block in 6 seconds for 300 nodes and in 4--5 seconds for 100 nodes (and in 3 seconds for 10 nodes), even if some of these nodes fail to participate or exhibit incorrect behavior.\footnote{When the ratio of the malicious or non-participating or very slow validators grows up to one third, the protocol exhibits graceful degradation, with the block consensus time growing very slowly---say, by at most half a second---until the critical value of one third is almost achieved.} Since the TON Blockchain task groups are not expected to consist of more than a hundred validators (even if a total of a thousand or ten thousand validators are running, only a hundred of them with the largest stakes will generate new masterchain blocks, and the others will participate only in the creation of new shardchain blocks, each shardchain block generated and validated by 10--30 validators; of course, all numbers given here are configuration parameters (cf.\ \cite{TON} and \cite{TBC}) and can be adjusted later by a consensus vote of validators if necessary), this means that the TON Blockchain is able to generate new blocks once every 4--5 seconds, as originally planned. This promise has been further tested and found out to be fulfilled with the launch of the Test Network of the TON Blockchain a couple of months later (in March 2019). Therefore, we see that the Catchain Consensus protocol is a new member of the ever-growing family of practical BFT protocols (cf.~\cite{PBFT}), even though it is based on slightly different principles.
\clearpage \clearpage
\mysection{Catchain Protocol}\label{sect:catchain} \mysection{Catchain Protocol}\label{sect:catchain}
We have already explained in the Overview (cf.~\ptref{sect:overview}) that the BFT consensus protocol used by TON Blockchain for achieving consensus on new blockchain blocks consists of two protocols. We provide here a brief description of the {\em Catchain protocol}, the lower-lever of these two protocols that could be potentially used for purposes other than BFT consensus for blocks. The source code for the Catchcain protocol resides in subdirectory {\tt catchain} of the source tree. We have already explained in the Overview (cf.~\ptref{sect:overview}) that the BFT consensus protocol used by the TON Blockchain for achieving consensus on new blockchain blocks consists of two protocols. We provide here a brief description of the {\em Catchain protocol}, the lower-lever of these two protocols that could be potentially used for purposes other than BFT consensus for blocks. The source code for the Catchcain protocol resides in subdirectory {\tt catchain} of the source tree.
\nxpoint\emb{Prerequisites for running the Catchain protocol}\label{p:cc.prereq} \nxpoint\emb{Prerequisites for running the Catchain protocol}\label{p:cc.prereq}
The main prerequisite for running (an instance of) the Catchain protocol is the ordered list of all nodes that are participating (or allowed to participate) in this specific instance of the protocol. This list consists of public keys and ADNL addresses of all participating nodes. It has to be provided from the outside when an instance of the Catchain protocol is created. The main prerequisite for running (an instance of) the Catchain protocol is the ordered list of all nodes that are participating (or allowed to participate) in this specific instance of the protocol. This list consists of public keys and ADNL addresses of all participating nodes. It has to be provided from the outside when an instance of the Catchain protocol is created.
\nxpoint\emb{Nodes participating in the block consensus protocol}\label{p:cc.nodes} \nxpoint\emb{Nodes participating in the block consensus protocol}\label{p:cc.nodes}
For the specific task of creating new blocks for one of blockchains (i.e., the masterchain or one of the active shardchains) of the TON Blockchain, a special task group consisting of several validators is created. The list of members of this task group is used both to create a private overlay network inside ADNL (this means that the only nodes that can join this overlay network are those explicitly listed during its creation) and to run the corresponding instance of the Catchain protocol. For the specific task of creating new blocks for one of the blockchains (i.e., the masterchain or one of the active shardchains) of the TON Blockchain, a special task group consisting of several validators is created. The list of members of this task group is used both to create a private overlay network inside ADNL (this means that the only nodes that can join this overlay network are those explicitly listed during its creation) and to run the corresponding instance of the Catchain protocol.
The construction of this list of members is the responsibility of the higher levels of the overall protocol stack (the block creation and validation software) and therefore is not the topic of this text (\cite{TBC} would be a more appropriate reference). It is sufficient to know at this point that this list is a deterministic function of the current (most recent) masterchain state (and especially of the current value of the configuration parameters, such as the active list of all validators elected for creating new blocks along with their respective weights). Since the list is computed deterministically, all validators compute the same lists and in particular each validator knows in which task groups (i.e., instances of the Catchain protocol) it participates without any further need for network communication or negotiation.\footnote{If some validators have outdated masterchain state, they may fail to compute correct task group lists and to participate in the corresponding catchains; in this respect, they are treated as if they were malicious or malfunctioning and do not affect the overall validity of the BFT protocol as long as less than one third of all validators fail in this fashion.} The construction of this list of members is the responsibility of the higher levels of the overall protocol stack (the block creation and validation software) and therefore is not the topic of this text (\cite{TBC} would be a more appropriate reference). It is sufficient to know at this point that this list is a deterministic function of the current (most recent) masterchain state (and especially of the current value of the configuration parameters, such as the active list of all validators elected for creating new blocks along with their respective weights). Since the list is computed deterministically, all validators compute the same lists and in particular each validator knows in which task groups (i.e., instances of the Catchain protocol) it participates without any further need for network communication or negotiation.\footnote{If some validators have an outdated masterchain state, they may fail to compute correct task group lists and to participate in the corresponding catchains; in this respect, they are treated as if they were malicious or malfunctioning and do not affect the overall validity of the BFT protocol as long as less than one third of all validators fail in this fashion.}
\nxsubpoint\emb{Catchains are created in advance} \nxsubpoint\emb{Catchains are created in advance}
In fact, not only the current values of the lists alluded to above are computed, but also their immediately subsequent (future) values are computed as well, so that the Catchain is usually created in advance. In this way it is already in place when the first block has to be created by the new instance of the validator task group. In fact, not only the current values of the lists alluded to above are computed, but also their immediately subsequent (future) values are computed as well, so that the Catchain is usually created in advance. In this way it is already in place when the first block has to be created by the new instance of the validator task group.
\nxpoint\emb{The genesis block and the identifier of a catchain}\label{sp:cc.ident} \nxpoint\emb{The genesis block and the identifier of a catchain}\label{sp:cc.ident}
A {\em catchain\/} (i.e., an instance of the Catchain protocol) is characterized by its {\em genesis block} or {\em genesis message}. It is a simple data structure containing some magic numbers, the purpose of the catchain (e.g., the identifier of the shardchain, for which the blocks will be generated, and the so-called {\em catchain sequence number}, also obtained from the masterchain configuration and used to distinguish subsequent instances of the catchain generating ``the same'' shardchain, but possibly with different participating validators), and, most importantly, the list of all participating nodes (their ADNL addresses and Ed25519 public keys as explained in~\ptref{p:cc.prereq}). The Catchain protocol itself uses only this list and the $\Sha$ hash of the overall data structure; this hash is used as an internal identifier of the catchain, i.e., of the instance of the Catchain protocol. A {\em catchain\/} (i.e., an instance of the Catchain protocol) is characterized by its {\em genesis block} or {\em genesis message}. It is a simple data structure containing some magic numbers, the purpose of the catchain (e.g., the identifier of the shardchain, for which the blocks will be generated, and the so-called {\em catchain sequence number}, also obtained from the masterchain configuration and used to distinguish subsequent instances of the catchain generating ``the same'' shardchain, but possibly with different participating validators), and, most importantly, the list of all participating nodes (their ADNL addresses and Ed25519 public keys as explained in~\ptref{p:cc.prereq}). The Catchain protocol itself uses only this list and the $\Sha$ hash of the overall data structure; this hash is used as an internal identifier of the catchain, i.e., of this specific instance of the Catchain protocol.
\nxsubpoint\emb{Distribution of the genesis block} \nxsubpoint\emb{Distribution of the genesis block}
Note that the genesis block is not distributed among the participating nodes; it is rather computed independently by each participating node as explained in \ptref{p:cc.nodes}. Since the hash of the genesis block is used as the catchain identifier (i.e., identifier of the specific instance of the Catchain protocol; cf.~\ptref{sp:cc.ident}), if a node (accidentally or intentionally) computes a different genesis block, it will be effectively locked out from participating in the ``correct'' instance of the protocol. Note that the genesis block is not distributed among the participating nodes; it is rather computed independently by each participating node as explained in \ptref{p:cc.nodes}. Since the hash of the genesis block is used as the catchain identifier (i.e., identifier of the specific instance of the Catchain protocol; cf.~\ptref{sp:cc.ident}), if a node (accidentally or intentionally) computes a different genesis block, it will be effectively locked out from participating in the ``correct'' instance of the protocol.
@ -135,12 +137,12 @@ Note that the genesis block is not distributed among the participating nodes; it
Note that the (ordered) list of nodes participating in a catchain is fixed in the genesis block and hence it is known to all the participants and it is unambiguously determined by the hash of the genesis block (i.e., the catchain identifier), provided there are no (known) collisions for $\Sha$. Therefore, we fix the number of participating nodes $N$ in the discussion of one specific catchain below, and assume that the nodes are numbered from $1$ to $N$ (their real identities may be looked up in the list of participants using this index in range $1\ldots N$). The set of all participants will be denoted by $I$; we assume that $I=\{1\ldots N\}$. Note that the (ordered) list of nodes participating in a catchain is fixed in the genesis block and hence it is known to all the participants and it is unambiguously determined by the hash of the genesis block (i.e., the catchain identifier), provided there are no (known) collisions for $\Sha$. Therefore, we fix the number of participating nodes $N$ in the discussion of one specific catchain below, and assume that the nodes are numbered from $1$ to $N$ (their real identities may be looked up in the list of participants using this index in range $1\ldots N$). The set of all participants will be denoted by $I$; we assume that $I=\{1\ldots N\}$.
\nxpoint\emb{Messages in a catchain. Catchain as a process group} \nxpoint\emb{Messages in a catchain. Catchain as a process group}
One perspective is that a catchain is a {\em (distributed) process group\/} consisting of $N$ known and fixed {\em (communicating) processes\/} (or {\em nodes\/} in the preceding terminology), and these processes generate {\em broadcast messages}, that are eventually broadcasted to all members of the process group. The set of all processes is denoted by $I$; we usually assume that $I=\{1\ldots N\}$. The broadcasts generated by each process are numbered starting from one, so the $n$-th broadcast of process $i$ will receive {\em sequence number\/} or {\em height\/} $n$; each broadcast should be uniquely determined by the identity or the index~$i$ of the originating process and its height~$n$, so we can think of the pair $(i,n)$ as the natural identifier of a broadcast message inside a process group.\footnote{In the Byzantine environment of a catchain this is not necessarily true in all situations.} The broadcasts generated by the same process $i$ are expected to be delivered to every other process in exactly the same order they have been created, i.e., in the increasing order of their height. In this respect a catchain is very similar to a process group in the sense of \cite{Birman} or \cite{DistrSys}. The principal difference is that a catchain is a ``hardened'' version of a process group tolerant to possible Byzantine (arbitrarily malicious) behavior of some participants. One perspective is that a catchain is a {\em (distributed) process group\/} consisting of $N$ known and fixed {\em (communicating) processes\/} (or {\em nodes\/} in the preceding terminology), and these processes generate {\em broadcast messages}, that are eventually broadcast to all members of the process group. The set of all processes is denoted by $I$; we usually assume that $I=\{1\ldots N\}$. The broadcasts generated by each process are numbered starting from one, so the $n$-th broadcast of process $i$ will receive {\em sequence number\/} or {\em height\/} $n$; each broadcast should be uniquely determined by the identity or the index~$i$ of the originating process and its height~$n$, so we can think of the pair $(i,n)$ as the natural identifier of a broadcast message inside a process group.\footnote{In the Byzantine environment of a catchain this is not necessarily true in all situations.} The broadcasts generated by the same process $i$ are expected to be delivered to every other process in exactly the same order they have been created, i.e., in increasing order of their height. In this respect a catchain is very similar to a process group in the sense of \cite{Birman} or \cite{DistrSys}. The principal difference is that a catchain is a ``hardened'' version of a process group tolerant to possible Byzantine (arbitrarily malicious) behavior of some participants.
\nxsubpoint\emb{Dependence relation on messages} \nxsubpoint\emb{Dependence relation on messages}
One can introduce a {\em dependence relation\/} on all messages broadcasted in a process group. This relation must be a strict partial order $\prec$, with the property that $m_{i,k}\prec m_{i,k+1}$, where $m_{i,k}$ denotes the $k$-th message broadcasted by group member process with index~$i$. The meaning of $m\prec m'$ is that {\em $m'$ depends on $m$}, so that the (broadcast) message $m'$ can be processed (by a member of the process group) only if $m$ has been processed before. For instance, if the message $m'$ represents the reaction of a group member to another message $m$, then it is natural to set $m\prec m'$. If a member of the process group receives a message $m'$ before all its dependencies, i.e., messages $m\prec m'$, have been processed (or {\em delivered\/} to the higher-level protocol), then its processing (or {\em delivery\/}) is delayed until all its dependencies are delivered. One can introduce a {\em dependence relation\/} on all messages broadcast in a process group. This relation must be a strict partial order $\prec$, with the property that $m_{i,k}\prec m_{i,k+1}$, where $m_{i,k}$ denotes the $k$-th message broadcast by group member process with index~$i$. The meaning of $m\prec m'$ is that {\em $m'$ depends on $m$}, so that the (broadcast) message $m'$ can be processed (by a member of the process group) only if $m$ has been processed before. For instance, if the message $m'$ represents the reaction of a group member to another message $m$, then it is natural to set $m\prec m'$. If a member of the process group receives a message $m'$ before all its dependencies, i.e., messages $m\prec m'$, have been processed (or {\em delivered\/} to the higher-level protocol), then its processing (or {\em delivery\/}) is delayed until all its dependencies are delivered.
We have defined the dependence relation to be a strict partial order, so it must be transitive ($m''\prec m'$ and $m'\prec m$ imply $m''\prec m$), antisymmetric (at most one of $m'\prec m$ and $m\prec m'$ can hold for any two messages $m$ and $m'$) and antireflexive ($m\prec m$ never holds). If we have a smaller set of ``basic dependencies'' $m'\to m$, we can construct its transitive closure $\to^+$ and put $\prec:=\to^+$. The only other requirement is that every broadcast of a sender depends on all previous broadcasts of the same sender. It is not strictly necessary to assume this; however, this assumption is quite natural and considerably simplifies the design of a messaging system inside a process group, so the Catchain protocol makes this assumption. We have defined the dependence relation to be a strict partial order, so it must be transitive ($m''\prec m'$ and $m'\prec m$ imply $m''\prec m$), antisymmetric (at most one of $m'\prec m$ and $m\prec m'$ can hold for any two messages $m$ and $m'$) and anti-reflexive ($m\prec m$ never holds). If we have a smaller set of ``basic dependencies'' $m'\to m$, we can construct its transitive closure $\to^+$ and put $\prec:=\to^+$. The only other requirement is that every broadcast of a sender depends on all previous broadcasts of the same sender. It is not strictly necessary to assume this; however, this assumption is quite natural and considerably simplifies the design of a messaging system inside a process group, so the Catchain protocol makes this assumption.
\nxsubpoint\label{sp:dep.cone}\emb{Dependence set or cone of a message} \nxsubpoint\label{sp:dep.cone}\emb{Dependence set or cone of a message}
Let $m$ be a (broadcast) message inside a process group as above. We say that the set $D_m:=\{m'\,:\,m'\prec m\}$ is the {\em dependence set\/} or {\em dependence cone\/} of message~$m$. In other words, $D_m$ is the {\em principal ideal\/} generated by $m$ in the partially ordered finite set of all messages. It is precisely the set of all messages that must be delivered before $m$ is delivered. Let $m$ be a (broadcast) message inside a process group as above. We say that the set $D_m:=\{m'\,:\,m'\prec m\}$ is the {\em dependence set\/} or {\em dependence cone\/} of message~$m$. In other words, $D_m$ is the {\em principal ideal\/} generated by $m$ in the partially ordered finite set of all messages. It is precisely the set of all messages that must be delivered before $m$ is delivered.
@ -156,7 +158,7 @@ Recall that we have assumed that any message depends on all preceding messages o
\begin{equation} \begin{equation}
\VT(D)_i:=\sup\{s\in\bbN\,:\,m_{i,s}\in D\}=\inf\{s\in\bbN_0\,:\,m_{i,s+1}\not\in D\} \VT(D)_i:=\sup\{s\in\bbN\,:\,m_{i,s}\in D\}=\inf\{s\in\bbN_0\,:\,m_{i,s+1}\not\in D\}
\end{equation} \end{equation}
(if no message $m_{i,s}$ is in $D$, we set $\VT(D)_i:=0$.) Indeed, it is clear that (if no message $m_{i,s}$ is in $D$, we set $\VT(D)_i:=0$). Indeed, it is clear that
\begin{equation} \begin{equation}
m_{i,s}\in D\Leftrightarrow s\leq\VT(D)_i m_{i,s}\in D\Leftrightarrow s\leq\VT(D)_i
\end{equation} \end{equation}
@ -177,7 +179,7 @@ If $i$ is the sender of message $m$, and $s$ is the height of message $m$, so th
Note that $m'\preceq m$ iff $D^+_{m'}\subset D^+_m$ iff $\VT^+(m')\leq\VT^+(m)$ in $\bbN_0^I$, where $m'\preceq m$ means ``$m'\prec m$ or $m'=m$''. Similarly, $m'\prec m$ iff $D^+_{m'}\subset D_m$ iff $\VT^+(m')\leq\VT(m)$. In other words, {\em the dependence relation $\prec$ on (some or all) messages is completely determined by the adjusted vector timestamps of these messages.} Note that $m'\preceq m$ iff $D^+_{m'}\subset D^+_m$ iff $\VT^+(m')\leq\VT^+(m)$ in $\bbN_0^I$, where $m'\preceq m$ means ``$m'\prec m$ or $m'=m$''. Similarly, $m'\prec m$ iff $D^+_{m'}\subset D_m$ iff $\VT^+(m')\leq\VT(m)$. In other words, {\em the dependence relation $\prec$ on (some or all) messages is completely determined by the adjusted vector timestamps of these messages.}
\nxsubpoint\emb{Using vector timestamps to correctly deliver broadcast messages} \nxsubpoint\emb{Using vector timestamps to correctly deliver broadcast messages}
Vector timestamps can be used (in non-byzantine settings) to correctly deliver messages broadcast in a process group.\footnote{We assume that all broadcast messages in the process group are ``causal broadcasts'' or ``cbcast'' in the terminology of \cite{Birman}, because we need only cbcasts for the implementation of Catchain protocol and Catchain consensus.} Namely, suppose that every broadcast message $m=m_{i,s}$ contains the index of its sender $i$ and the vector timestamp of this message $\VT(m)$. Then each receiver $j$ knows whether the message can be delivered or not. For this, $j$ keeps track of the cone $C_j$ of all messages delivered so far, for example by maintaining a {\em current timestamp} $\VT(j)$ equal to $\VT(C_j)$. In other words, $\VT(j)_k$ is the count of messages of sender $k$ processed by $j$ so far. If $\VT(m)\leq\VT(j)$, then the message $m$ is delivered immediately and $\VT(j)$ is updated to $\sup(\VT(j),\VT^+(m))$ afterwards; this is equivalent to increasing $\VT(j)_i$ by one, where $i$ is the original sender of message~$m$. If this condition is not met, then $m$ may be put into a waiting queue until $\VT(j)$ becomes large enough. Instead of passively waiting for the required broadcasts, $j$ can construct the list of message indices $(i',s')$ that are implicitly mentioned in $\VT(m)$ of some received but not delivered message $m$, and request messages with these indices from the neighbors from which $j$ learned about $m$ and $\VT(m)$; an alternative strategy (actually employed by the current implementation of the Catchain protocol) is to request these messages from randomly chosen neighbors from time to time. The latter strategy is simpler because it does not require remembering the immediate sources of all received messages (which may become unavailable anyway). Vector timestamps can be used (in non-byzantine settings) to correctly deliver messages broadcast in a process group.\footnote{We assume that all broadcast messages in the process group are ``causal broadcasts'' or ``cbcast'' in the terminology of \cite{Birman}, because we only need cbcasts for the implementation of Catchain protocol and Catchain consensus.} Namely, suppose that every broadcast message $m=m_{i,s}$ contains the index of its sender $i$ and the vector timestamp of this message $\VT(m)$. Then each receiver $j$ knows whether the message can be delivered or not. For this, $j$ keeps track of the cone $C_j$ of all messages delivered so far, for example by maintaining a {\em current timestamp} $\VT(j)$ equal to $\VT(C_j)$. In other words, $\VT(j)_k$ is the count of messages of sender $k$ processed by $j$ so far. If $\VT(m)\leq\VT(j)$, then the message $m$ is delivered immediately and $\VT(j)$ is updated to $\sup(\VT(j),\VT^+(m))$ afterwards; this is equivalent to increasing $\VT(j)_i$ by one, where $i$ is the original sender of message~$m$. If this condition is not met, then $m$ may be put into a waiting queue until $\VT(j)$ becomes large enough. Instead of passively waiting for the required broadcasts, $j$ can construct the list of message indices $(i',s')$ that are implicitly mentioned in $\VT(m)$ of some received but not delivered message $m$, and request messages with these indices from the neighbors from which $j$ learned about $m$ and $\VT(m)$; an alternative strategy (actually employed by the current implementation of the Catchain protocol) is to request these messages from randomly chosen neighbors from time to time. The latter strategy is simpler because it does not require remembering the immediate sources of all received messages (which may become unavailable anyway).
\nxpoint\emb{Message structure in a catchain. Catchain as a multi-blockchain} \nxpoint\emb{Message structure in a catchain. Catchain as a multi-blockchain}
The message structure in a catchain is a bit more complicated than described above because of the necessity to support a BFT protocol. In particular, vector timestamps are not sufficient in a Byzantine setting. They have to be complemented by descriptions based on maximal elements of a dependence cone (such descriptions are typically used in non-byzantine settings only when the process group is very large, so that vector timestamp sizes become prohibitive). The message structure in a catchain is a bit more complicated than described above because of the necessity to support a BFT protocol. In particular, vector timestamps are not sufficient in a Byzantine setting. They have to be complemented by descriptions based on maximal elements of a dependence cone (such descriptions are typically used in non-byzantine settings only when the process group is very large, so that vector timestamp sizes become prohibitive).
@ -189,9 +191,9 @@ An alternative way (to using a vector timestamp) of describing a message cone $D
Catchain protocol uses {\em $\Sha$} hashes of (suitably serialized) messages as their unique identifiers. If we assume that there are no collisions for $\Sha$ (computable in reasonable, e.g., polynomial time), then a message $m$ is completely identified within the process group by its hash $\Sha(m)$. Catchain protocol uses {\em $\Sha$} hashes of (suitably serialized) messages as their unique identifiers. If we assume that there are no collisions for $\Sha$ (computable in reasonable, e.g., polynomial time), then a message $m$ is completely identified within the process group by its hash $\Sha(m)$.
\nxsubpoint\emb{Message headers}\label{sp:msg.hdr} \nxsubpoint\emb{Message headers}\label{sp:msg.hdr}
The header of a message $m=m_{i,s}$ inside a catchain (i.e., an instance of the Catchain protocol) always contains the index $i$ of its sender, the height $s$, the catchain identifier (i.e., the hash of the genesis message, cf.~\ptref{sp:cc.ident})) and the set of hashes of maximal elements of the dependence cone of $m$, i.e., the set $\{\Sha(m')\,:\,m'\in\Max(D_m)\}$. In particular, the hash $\Sha(m_{i,s-1})$ of the previous message of the same sender is always included since $m_{i,s-1}\in\Max(D_m)$ if $s>1$; for performance reasons, there is a separate field in the message header containing $\Sha(m_{i,s-1})$. If $s=1$, then there is no previous message, so the hash of the genesis message (i.e., the catchain identifier, cf.~\ptref{sp:cc.ident}) is used instead. The header of a message $m=m_{i,s}$ inside a catchain (i.e., an instance of the Catchain protocol) always contains the index $i$ of its sender, the height $s$, the catchain identifier (i.e., the hash of the genesis message, cf.~\ptref{sp:cc.ident}) and the set of hashes of maximal elements of the dependence cone of $m$, i.e., the set $\{\Sha(m')\,:\,m'\in\Max(D_m)\}$. In particular, the hash $\Sha(m_{i,s-1})$ of the previous message of the same sender is always included since $m_{i,s-1}\in\Max(D_m)$ if $s>1$; for performance reasons, there is a separate field in the message header containing $\Sha(m_{i,s-1})$. If $s=1$, then there is no previous message, so the hash of the genesis message (i.e., the catchain identifier, cf.~\ptref{sp:cc.ident}) is used instead.
The vector timestamp $\VT(m)$ is not included into the message header; however, the header implicitly determines $\VT(m)$ since The vector timestamp $\VT(m)$ is not included in the message header; however, the header implicitly determines $\VT(m)$ since
\begin{equation} \begin{equation}
\VT(m)=\sup_{m'\in D_m}\VT^+(m')=\sup_{m'\in\Max(D_m)}\VT^+(m') \VT(m)=\sup_{m'\in D_m}\VT^+(m')=\sup_{m'\in\Max(D_m)}\VT^+(m')
\end{equation} \end{equation}
@ -212,43 +214,43 @@ Note that all messages created by the same sender $i$ in a catchain turn out to
\nxpoint\emb{Message propagation in a catchain}\label{sp:cc.msg.prop} \nxpoint\emb{Message propagation in a catchain}\label{sp:cc.msg.prop}
Now we are ready to describe message propagation in a catchain. Namely: Now we are ready to describe message propagation in a catchain. Namely:
\begin{itemize} \begin{itemize}
\item The (lower-level) overlay network protocol maintains a list of neighbors in the private overlay network underlying the catchain and provides ADNL channels to each of these neighbors. This private overlay network has the same list of members (processes, nodes) as the catchain, and the neighbors of each node form an (oriented) subgraph on the set of all partipating nodes. This (essentially random) subgraph is strongly connected with probability very close to one. \item The (lower-level) overlay network protocol maintains a list of neighbors in the private overlay network underlying the catchain and provides ADNL channels to each of these neighbors. This private overlay network has the same list of members (processes, nodes) as the catchain, and the neighbors of each node form an (oriented) subgraph on the set of all participating nodes. This (essentially random) subgraph is strongly connected with probability very close to one.
\item Each process generates some new messages from time to time (as needed by the higher-level protocol). These messages are augmented by catchain message headers as outlined in~\ptref{sp:msg.hdr}, signed, and propagated to all known neighbors using the ADNL channels established by the overlay protocol. \item Each process generates some new messages from time to time (as needed by the higher-level protocol). These messages are augmented by catchain message headers as outlined in~\ptref{sp:msg.hdr}, signed, and propagated to all known neighbors using the ADNL channels established by the overlay protocol.
\item In contrast with the usual simple overlay broadcast protocol, the messages received from neighbors are not immediately re-broadcasted to all other neighbors that are not known yet to have a copy of them. Instead, the signature is checked first, and invalid messages are discarded. Then the message is either delivered (if all its dependent messages have been already delivered), or put into a waiting queue. In the latter case, all the required messages mentioned in its header (i.e., the set $\Max(D_m)$) are pulled from the neighbor that sent this message (apart from that, attempts to download these missing messages from random neighbors are performed from time to time). If necessary, this process is repeated recursively until some messages can be delivered. Once a message is ready for local delivery (i.e., all its dependencies are already present), it is also re-broadcasted to all neighbors in the overlay network. \item In contrast with the usual simple overlay broadcast protocol, the messages received from neighbors are not immediately rebroadcast to all other neighbors that are not known yet to have a copy of them. Instead, the signature is checked first, and invalid messages are discarded. Then the message is either delivered (if all its dependent messages have already been delivered), or put into a waiting queue. In the latter case, all the required messages mentioned in its header (i.e., the set $\Max(D_m)$) are pulled from the neighbor that sent this message (apart from that, attempts to download these missing messages from random neighbors are performed from time to time). If necessary, this process is repeated recursively until some messages can be delivered. Once a message is ready for local delivery (i.e., all its dependencies are already present), it is also rebroadcast to all neighbors in the overlay network.
\item Apart from the recursive ``pull'' mechanism described above, a faster vector timestamp-based mechanism is also used, so that messages can be queried from neighbors by their senders and heights (learned from vector timestamps of received messages). Namely, each process sends a special query containing the current vector timestamp to a randomly chosen neighbor from time to time. This peer-to-peer query leads to its receiver sending back all or some messages unknown to the sender (judging by their vector timestamps). \item Apart from the recursive ``pull'' mechanism described above, a faster vector timestamp-based mechanism is also used, so that messages can be queried from neighbors by their senders and heights (learned from the vector timestamps of received messages). Namely, each process sends a special query containing the current vector timestamp to a randomly chosen neighbor from time to time. This peer-to-peer query leads to its receiver sending back all or some messages unknown to the sender (judging by their vector timestamps).
\item This faster vector timestamp-based mechanism can be disabled for messages of some sender as soon as a ``fork'' is detected, i.e., a second message with the same sender $i$ and height $s$, but with a different hash, is learned from a neighbor, for example, during the fast or slow ``pull'' process. Once a fork created by $i$ is detected, the corresponding component $\VT_i$ of all subsequent vector timestamps is set to a special value $\infty$ to indicate that comparing the values of these components does not make sense anymore. \item This faster vector timestamp-based mechanism can be disabled for messages originating from certain senders as soon as a ``fork'' is detected, i.e., a second message with the same sender $i$ and height $s$, but with a different hash, is learned from a neighbor, for example, during the fast or slow ``pull'' process. Once a fork created by $i$ is detected, the corresponding component $\VT_i$ of all subsequent vector timestamps is set to a special value $\infty$ to indicate that comparing the values of these components does not make sense anymore.
\item When a message is delivered (to the higher-level protocol), this message is added into the cone $C$ of processed messages of the current process (and the current vector timestamp is updated accordingly), and all subsequent messages generated by the current process will be assumed to depend on all the messages delivered so far (even if this is not logically necessary from the perspective of the higher-level protocol). \item When a message is delivered (to the higher-level protocol), this message is added into the cone $C$ of processed messages of the current process (and the current vector timestamp is updated accordingly), and all subsequent messages generated by the current process will be assumed to depend on all the messages delivered so far (even if this is not logically necessary from the perspective of the higher-level protocol).
\item If the set $\Max(C)$ of the maximal elements of the cone of processed messages becomes too large (contains more elements than a certain amount fixed in advance by the genesis message of the catchain), then the Catchain protocol asks the higher-level protocol to generate a new message (empty if no useful payload is available). After this new message is generated (and immediately delivered to the current process), $C$ is updated and $\Max(C)$ consists of only one element (the new message). In this way the size of $\Max(C)$ and therefore the size of the message header always remain bounded. \item If the set $\Max(C)$ of the maximal elements of the cone of processed messages becomes too large (contains more elements than a certain amount fixed in advance by the genesis message of the catchain), then the Catchain protocol asks the higher-level protocol to generate a new message (empty if no useful payload is available). After this new message is generated (and immediately delivered to the current process), $C$ is updated and $\Max(C)$ consists of only one element (the new message). In this way the size of $\Max(C)$ and therefore the size of the message header always remain bounded.
\item Once a message~$m$ is delivered and the set $C$ is modified to include this message, a timer is set, and after some small delay the higher-level protocol is asked to create a new message (empty if necessary), so that this new message $m^*$ would refer to the new~$C$, similarly to the procedure described in the previous item. This new message $m^*$ is pushed to all neighbors; since its header contains $\Max(C)$ for the new~$C$, and $m\in C$, the neighbors learn not only about the newly-generated message $m^*$, but also about the original received message $m$. If some neighbors do not have a copy of $m$ yet, they would require one (from the current process or not). \item Once a message~$m$ is delivered and the set $C$ is modified to include this message, a timer is set, and after some small delay the higher-level protocol is asked to create a new message (empty if necessary), so that this new message $m^*$ would refer to the new~$C$, similarly to the procedure described in the previous item. This new message $m^*$ is pushed to all neighbors; since its header contains $\Max(C)$ for the new~$C$, and $m\in C$, the neighbors learn not only about the newly-generated message $m^*$, but also about the original received message $m$. If some neighbors do not have a copy of $m$ yet, they would require one (from the current process or not).
\item All (broadcast) messages received and especially created in a catchain are stored into a special local database. This is especially important for the newly-created messages (cf.~\ptref{sp:new.msg.flush}): if a message is created and sent to neighbors, but not saved into the database (and flushed to disk) before the creating process crashes and is restarted, then another message with the same sender and height can be created after restart, thus effectively leading to an involuntary ``fork''. \item All (broadcast) messages received and created in a catchain are stored into a special local database. This is especially important for newly-created messages (cf.~\ptref{sp:new.msg.flush}): if a message is created and sent to neighbors, but not saved into the database (and flushed to disk) before the creating process crashes and is restarted, then another message with the same sender and height can be created after restart, thus effectively leading to an involuntary ``fork''.
\end{itemize} \end{itemize}
\nxpoint\emb{Forks and their prevention} \nxpoint\emb{Forks and their prevention}
One can see that the multi-blockchain structure of a catchain outlined above (with references to other blocks by their hashes and with signatures) leaves not too much possibilities for ``cheating'' in a consensus protocol built upon a catchain (i.e., using the catchain as a means for broadcasting messages inside a process group). The only possibility that is not detected immediately consists of creating two (or more) different versions of the same message $m_{i,s}$ (say, $m'_{i,s}$ and $m''_{i,s}$), and sending one version of this message $m'_{i,s}$ to some peers and a different version $m''_{i,s}$ to others. If $s$ is minimal (for a fixed $i$), then this corresponds to a {\em fork\/} in the blockchain terminology: two different next blocks $m'_{i,s}$ and $m''_{i,s}$ for the same previous block $m_{i,s-1}$. One can see that the multi-blockchain structure of a catchain outlined above (with references to other blocks by their hashes and with signatures) leaves very little possibility for ``cheating'' in a consensus protocol built upon a catchain (i.e., using the catchain as a means for broadcasting messages inside a process group). The only possibility that is not detected immediately consists of creating two (or more) different versions of the same message $m_{i,s}$ (say, $m'_{i,s}$ and $m''_{i,s}$), and sending one version of this message $m'_{i,s}$ to some peers and a different version $m''_{i,s}$ to others. If $s$ is minimal (for a fixed $i$), then this corresponds to a {\em fork\/} in blockchain terminology: two different next blocks $m'_{i,s}$ and $m''_{i,s}$ for the same previous block $m_{i,s-1}$.
Therefore, the catchain protocol takes care to detect forks as soon as possible and prevent their propagation. Therefore, the Catchain protocol takes care to detect forks as soon as possible and prevent their propagation.
\nxsubpoint\emb{Detection of forks} \nxsubpoint\emb{Detection of forks}
The detection of forks is simple: if there are two different blocks $m'_{i,s}$ and $m''_{i,s}$ with the same creator $i\in I$ and the same height $s\geq1$, and with valid signatures of~$i$, then this is a fork. The detection of forks is simple: if there are two different blocks $m'_{i,s}$ and $m''_{i,s}$ with the same creator $i\in I$ and the same height $s\geq1$, and with valid signatures of~$i$, then this is a fork.
\nxsubpoint\emb{Fork proofs}\label{sp:fork.proofs} \nxsubpoint\emb{Fork proofs}\label{sp:fork.proofs}
Block signatures in the catchain protocol are created in such a way that creating {\em fork proofs\/} (i.e., the proof that a process~$i$ has intentionally created a fork) is especially simple since it is the hash of a very small structure (containing a magic number, the values of $i$ and $s$, and the hash of the remainder of the message) that is actually signed. Therefore, only two such small structures and two signatures are required in a fork proof. Block signatures in the Catchain protocol are created in such a way that creating {\em fork proofs\/} (i.e., the proof that a process~$i$ has intentionally created a fork) is especially simple since it is the hash of a very small structure (containing a magic number, the values of $i$ and $s$, and the hash of the remainder of the message) that is actually signed. Therefore, only two such small structures and two signatures are required in a fork proof.
\nxsubpoint\emb{External punishment for creating forks} \nxsubpoint\emb{External punishment for creating forks}
Notice that an external punishment for creating catchain forks may be used in the proof-of-stake blockchain generation context. Namely, the fork proofs may be submitted to a special smart contract (such as the elector smart contract of the TON Blockchain), checked automatically, and some part or all of the stake of the offending party may be confiscated. Notice that an external punishment for creating catchain forks may be used in the proof-of-stake blockchain generation context. Namely, the fork proofs may be submitted to a special smart contract (such as the elector smart contract of the TON Blockchain), checked automatically, and some part or all of the stake of the offending party may be confiscated.
\nxsubpoint\emb{Internal processing of forks} \nxsubpoint\emb{Internal processing of forks}
Once a fork (created by~$i$) is detected (by another process~$j$), i.e.\ $j$ learns about two different messages $m_{i,s}$ and $m'_{i,s}$ created by $i$ and having same height $s$ (usually this happens while recursively downloading dependencies of some other messages), $j$ starts ignoring~$i$ and all of its subsequent messages. They are not accepted and not broadcasted further. However, messages created by~$i$ prior to the fork detection may be still downloaded if they are referred to in messages (blocks) created by processes that did not see this fork before referring to such messages created by~$i$. Once a fork (created by~$i$) is detected (by another process~$j$), i.e.\ $j$ learns about two different messages $m_{i,s}$ and $m'_{i,s}$ created by $i$ and having same height $s$ (usually this happens while recursively downloading dependencies of some other messages), $j$ starts ignoring~$i$ and all of its subsequent messages. They are not accepted and not broadcast further. However, messages created by~$i$ prior to the fork detection may be still downloaded if they are referred to in messages (blocks) created by processes that did not see this fork before referring to such messages created by~$i$.
\nxsubpoint\emb{Accepting messages from a ``bad'' process is bad}\label{sp:no.bad.accept} \nxsubpoint\emb{Accepting messages from a ``bad'' process is bad}\label{sp:no.bad.accept}
Furthermore, if process $i$ learns about a fork created by process $j$, then $i$ shows this to its neighbors by creating a new service broadcast message that contains the corresponding fork proof (cf.~\ptref{sp:fork.proofs}). Afterwards this and all subsequent messages of $j$ cannot directly depend on any messages by the known ``bad'' producer $i$ (but they still can refer to messages from another party $k$ that directly or indirectly refer to messages of~$i$ if no fork by~$i$ was known to $k$ at the time when the referring message was created). If $j$ violates this restriction and creates messages with such invalid references, these messages will be discarded by all (honest) processes in the group. Furthermore, if process $i$ learns about a fork created by process $j$, then $i$ shows this to its neighbors by creating a new service broadcast message that contains the corresponding fork proof (cf.~\ptref{sp:fork.proofs}). Afterwards, this and all subsequent messages of $j$ cannot directly depend on any messages by the known ``bad'' producer $i$ (but they still can refer to messages from another party $k$ that directly or indirectly refer to messages of~$i$ if no fork by~$i$ was known to $k$ at the time when the referring message was created). If $j$ violates this restriction and creates messages with such invalid references, these messages will be discarded by all honest processes in the group.
\nxsubpoint\emb{The set of ``bad'' group members is a part of the intrinsic state}\label{sp:bad.proc.set} \nxsubpoint\emb{The set of ``bad'' group members is a part of the intrinsic state}\label{sp:bad.proc.set}
Each process~$i$ keeps its own copy of the set of known ``bad'' processes in the group, i.e., those processes that have created at least one fork or have violated \ptref{sp:no.bad.accept}. This set is updated by adding~$j$ into it as soon as $i$ learns about a fork created by~$j$ (or about a violation of~\ptref{sp:no.bad.accept} by $j$); after that, a callback provided by the higher-level protocol is invoked. This set is used when a new broadcast message arrives: if the sender is bad, then the message is ignored and discarded. Each process~$i$ keeps its own copy of the set of known ``bad'' processes in the group, i.e., those processes that have created at least one fork or have violated \ptref{sp:no.bad.accept}. This set is updated by adding~$j$ into it as soon as $i$ learns about a fork created by~$j$ (or about a violation of~\ptref{sp:no.bad.accept} by $j$); after that, a callback provided by the higher-level protocol is invoked. This set is used when a new broadcast message arrives: if the sender is bad, then the message is ignored and discarded.
\clearpage \clearpage
\mysection{Block Consensus Protocol}\label{sect:blk.consensus} \mysection{Block Consensus Protocol}\label{sect:blk.consensus}
We explain in this section the basic workings of the TON Block Consensus Protocol (cf.~\ptref{sect:overview}), which builds upon the generic Catchain protocol (cf.~\ptref{sect:catchain}) to provide the BFT protocol employed for generating and validating new blocks of TON Blockchain. The source code for the TON Block Consensus protocol resides in subdirectory {\tt validator-session} of the source tree. We explain in this section the basic workings of the TON Block Consensus Protocol (cf.~\ptref{sect:overview}), which builds upon the generic Catchain protocol (cf.~\ptref{sect:catchain}) to provide the BFT protocol employed for generating and validating new blocks of the TON Blockchain. The source code for the TON Block Consensus protocol resides in subdirectory {\tt validator-session} of the source tree.
\nxpoint\emb{Internal state of the Block Consensus Protocol}\label{p:cc.state} \nxpoint\emb{Internal state of the Block Consensus Protocol}\label{p:cc.state}
The higher-level Block Consensus Protocol introduces a new notion to the catchain: that of an {\em internal state\/} of the Block Consensus Protocol (BCP), sometimes also (not quite correctly) called ``the internal state of the catchain'' or simply {\em catchain state}. Namely, each process $i\in I$ has a well-determined internal state $\sigma_{C_i}$ after a subset of messages (actually always a dependence cone) $C_i$ is delivered by the Catchain protocol to the higher-level protocol (i.e., to the Block Consensus Protocol in this case). Furthermore, this state $\sigma_{C_i}=\sigma(C_i)$ depends only on cone~$C_i$, but not on the identity of the process $i\in I$, and can be defined for any dependence cone~$S$ (not necessarily a cone $C_i$ of delivered messages for some process $i$ at some point). The higher-level Block Consensus Protocol introduces a new notion to the catchain: that of an {\em internal state\/} of the Block Consensus Protocol (BCP), sometimes also (not quite correctly) called ``the internal state of the catchain'' or simply {\em catchain state}. Namely, each process $i\in I$ has a well-determined internal state $\sigma_{C_i}$ after a subset of messages (actually always a dependence cone) $C_i$ is delivered by the Catchain protocol to the higher-level protocol (i.e., to the Block Consensus Protocol in this case). Furthermore, this state $\sigma_{C_i}=\sigma(C_i)$ depends only on cone~$C_i$, but not on the identity of the process $i\in I$, and can be defined for any dependence cone~$S$ (not necessarily a cone $C_i$ of delivered messages for some process $i$ at some point).
@ -276,13 +278,13 @@ However, this stronger condition is not required by the update algorithm.
\begin{equation}\label{eq:state.merge} \begin{equation}\label{eq:state.merge}
\sigma_{S\cup T}=g(\sigma_S,\sigma_T)\quad\text{for any cones $S$ and $T$} \sigma_{S\cup T}=g(\sigma_S,\sigma_T)\quad\text{for any cones $S$ and $T$}
\end{equation} \end{equation}
(the union of two cones always is a cone.) (the union of two cones always is a cone).
This function $\sigma$ is applied by the update algorithm only in the specific case $T=D^+_m$ and $m\not\in S$. This function $\sigma$ is applied by the update algorithm only in the specific case $T=D^+_m$ and $m\not\in S$.
\end{itemize} \end{itemize}
\nxsubpoint\emb{Commutativity and associativity of $g$}\label{sp:g.assoc} \nxsubpoint\emb{Commutativity and associativity of $g$}\label{sp:g.assoc}
Note that \eqref{eq:state.merge} (for arbitrary cones $S$ and $T$) implies associativity and commutativity of $g$, at least when $g$ is applied to possible states (values of form $\sigma_S$ for some cone $S$). In this respect $g$ defines a commutative monoid structure on the set $\Sigma=\{\sigma_S\,:\,S$ is a cone$\}$. Usually $g$ is defined or partially defined on a larger set $\tilde\Sigma$ of state-like values, and it may be commutative and associative on this larger set $\tilde\Sigma$, i.e., Note that \eqref{eq:state.merge} (for arbitrary cones $S$ and $T$) implies associativity and commutativity of $g$, at least when $g$ is applied to possible states (values of form $\sigma_S$ for some cone $S$). In this respect $g$ defines a commutative monoid structure on the set $\Sigma=\{\sigma_S\,:\,S$ is a cone$\}$. Usually $g$ is defined or partially defined on a larger set $\tilde\Sigma$ of state-like values, and it may be commutative and associative on this larger set $\tilde\Sigma$, i.e.,
$g(x,y)=g(y,x)$ and $g(x,g(y,z))=g(g(x,y),z)$ for $x$, $y$, $z\in\tilde\Sigma$ (whenever both sides the equality are defined), with $\sigma_\emptyset$ as an unit, i.e., $g(x,\sigma_\emptyset)=x=g(\sigma_\emptyset,x)$ for $x\in\tilde S$ (under the same condition). However, this property, useful for the formal analysis of the consensus algorithm, is not strictly required by the state update algorithm, because this algorithm uses $g$ in a deterministic fashion to compute $\sigma_S$. $g(x,y)=g(y,x)$ and $g(x,g(y,z))=g(g(x,y),z)$ for $x$, $y$, $z\in\tilde\Sigma$ (whenever both sides of the equality are defined), with $\sigma_\emptyset$ as an unit, i.e., $g(x,\sigma_\emptyset)=x=g(\sigma_\emptyset,x)$ for $x\in\tilde S$ (under the same condition). However, this property, useful for the formal analysis of the consensus algorithm, is not strictly required by the state update algorithm, because this algorithm uses $g$ in a deterministic fashion to compute $\sigma_S$.
\nxsubpoint\emb{Commutativity of $f$} \nxsubpoint\emb{Commutativity of $f$}
Note that $f$, if it satisfies the stronger condition \eqref{eq:state.rec.x}, must also exhibit a commutativity property Note that $f$, if it satisfies the stronger condition \eqref{eq:state.rec.x}, must also exhibit a commutativity property
@ -307,7 +309,7 @@ The set $\Max(D_m)$ is explicitly listed in the header of message $m$ in some fi
\sigma_{C'_i}=g(\sigma_{C_i},\sigma_{D^+_m}) \sigma_{C'_i}=g(\sigma_{C_i},\sigma_{D^+_m})
\end{equation} \end{equation}
This state, however, is ``virtual'' in the sense that it can be slightly changed later (especially if $g$ is not commutative). Nevertheless, it is used to make some important decisions by the higher-level algorithm (BCP). This state, however, is ``virtual'' in the sense that it can be slightly changed later (especially if $g$ is not commutative). Nevertheless, it is used to make some important decisions by the higher-level algorithm (BCP).
\item Once a new message $m$ is generated and locally delivered, so that $C_i$ becomes equal to $D^+_m$, the previously computed value of $\sigma_{C_i}$ is discarded and replaced with $\sigma_{D^+_m}$ computed according to the general algorithm described above. If $g$ is not commutative or not associative (for example, it may happen that $g(x,y)$ and $g(y,x)$ are different but equivalent representatations of the same state), then this might lead to a slight change of the current ``virtual'' state of process $i$. \item Once a new message $m$ is generated and locally delivered, so that $C_i$ becomes equal to $D^+_m$, the previously computed value of $\sigma_{C_i}$ is discarded and replaced with $\sigma_{D^+_m}$ computed according to the general algorithm described above. If $g$ is not commutative or not associative (for example, it may happen that $g(x,y)$ and $g(y,x)$ are different but equivalent representations of the same state), then this might lead to a slight change of the current ``virtual'' state of process $i$.
\item If the lower-level (catchain) protocol reports to the higher-level protocol that a certain process $j\not\in i$ is ``bad'' (i.e., $j$ is found out to have created a fork, cf.~\ptref{sp:bad.proc.set}, or to have knowingly endorsed a fork by another process, cf.~\ptref{sp:no.bad.accept}), then the current (virtual) state $\sigma_{C_i}$ is recomputed from scratch using the new set $C'_i=\bigcup_{\text{$m\in C_i$, $m$ was created by ``good'' process $k$}}D^+_m$ and the ``merge'' function $g$ applied to the set of $\sigma_{D^+_m}$ where $m$ runs through the set of last messages of the processes known to be good (or through the set of maximal elements of this set). The next created outbound message will depend only on the messages from $C'_i$. \item If the lower-level (catchain) protocol reports to the higher-level protocol that a certain process $j\not\in i$ is ``bad'' (i.e., $j$ is found out to have created a fork, cf.~\ptref{sp:bad.proc.set}, or to have knowingly endorsed a fork by another process, cf.~\ptref{sp:no.bad.accept}), then the current (virtual) state $\sigma_{C_i}$ is recomputed from scratch using the new set $C'_i=\bigcup_{\text{$m\in C_i$, $m$ was created by ``good'' process $k$}}D^+_m$ and the ``merge'' function $g$ applied to the set of $\sigma_{D^+_m}$ where $m$ runs through the set of last messages of the processes known to be good (or through the set of maximal elements of this set). The next created outbound message will depend only on the messages from $C'_i$.
\end{itemize} \end{itemize}
@ -334,7 +336,7 @@ The internal state is {\em persistent}, in the sense that the memory used to all
The consequence of the structure of the buffer used to store the internal states of a catchain explained above is that it is updated only by appending some new data at its end. This means that the internal state (or rather the buffer containing all the required internal states) of a catchain can be flushed to an append-only file, and easily recovered after a restart. The only other data that needs to be stored before restarts is the offset (from the start of the buffer, i.e., of this file) of the current state of the catchain. A simple key-value database can be used for this purpose. The consequence of the structure of the buffer used to store the internal states of a catchain explained above is that it is updated only by appending some new data at its end. This means that the internal state (or rather the buffer containing all the required internal states) of a catchain can be flushed to an append-only file, and easily recovered after a restart. The only other data that needs to be stored before restarts is the offset (from the start of the buffer, i.e., of this file) of the current state of the catchain. A simple key-value database can be used for this purpose.
\nxsubpoint\label{sp:share.substr}\emb{Sharing data between different states} \nxsubpoint\label{sp:share.substr}\emb{Sharing data between different states}
It turns out that the tree (or rather the dag) representing the new state $\sigma_{S\cup\{m\}}=f(\sigma_S,m)$ shares large subtrees with the previous state $\sigma_S$, and, similarly, $\sigma_{S\cup T}=g(\sigma_S,\sigma_T)$ shares large subtrees with $\sigma_S$ and $\sigma_T$. The persistent structure used for representing the states in BCP makes possible reusing the same pointers inside the buffer for representing such shared data structures instead of duplicating them. It turns out that the tree (or rather the dag) representing the new state $\sigma_{S\cup\{m\}}=f(\sigma_S,m)$ shares large subtrees with the previous state $\sigma_S$, and, similarly, $\sigma_{S\cup T}=g(\sigma_S,\sigma_T)$ shares large subtrees with $\sigma_S$ and $\sigma_T$. The persistent structure used for representing the states in BCP makes it possible to reuse the same pointers inside the buffer for representing such shared data structures instead of duplicating them.
\nxsubpoint\label{sp:memoize}\emb{Memoizing nodes} \nxsubpoint\label{sp:memoize}\emb{Memoizing nodes}
Another technique employed while computing new states (i.e., the values of function~$f$) is that of {\em memoizing new nodes}, also borrowed from functional programming languages. Namely, whenever a new node is constructed (inside the huge buffer containing all states for a specific catchain), its hash is computed, and a simple hash table is used to look up the latest node with the same hash. If a node with this hash is found, and it has the same contents, then the newly-constructed node is discarded and a reference to the old node with the same contents is returned instead. On the other hand, if no copy of the new node is found, then the hash table is updated, the end-of-buffer (allocation) pointer is advanced, and the pointer to the new node is returned to the caller. Another technique employed while computing new states (i.e., the values of function~$f$) is that of {\em memoizing new nodes}, also borrowed from functional programming languages. Namely, whenever a new node is constructed (inside the huge buffer containing all states for a specific catchain), its hash is computed, and a simple hash table is used to look up the latest node with the same hash. If a node with this hash is found, and it has the same contents, then the newly-constructed node is discarded and a reference to the old node with the same contents is returned instead. On the other hand, if no copy of the new node is found, then the hash table is updated, the end-of-buffer (allocation) pointer is advanced, and the pointer to the new node is returned to the caller.
@ -342,7 +344,7 @@ Another technique employed while computing new states (i.e., the values of funct
In this way if different processes end up making similar computations and having similar states, large portions of these states will be shared even if they are not directly related by application of function~$f$ as explained in~\ptref{sp:share.substr}. In this way if different processes end up making similar computations and having similar states, large portions of these states will be shared even if they are not directly related by application of function~$f$ as explained in~\ptref{sp:share.substr}.
\nxsubpoint\emb{Importance of optimization techniques} \nxsubpoint\emb{Importance of optimization techniques}
The optimization techniques \ptref{sp:share.substr} and \ptref{sp:memoize} used for sharing parts of different internal states inside the same catchain are drastically important for improving the memory profile and the performance of BCM in a large process group. The improvement is several orders of magnitude in groups of $N\approx100$ processes. Without these optimizations BCM would not be fit for its intended purpose (BFT consensus on new blocks generated by validators in TON Blockchain). The optimization techniques \ptref{sp:share.substr} and \ptref{sp:memoize} used for sharing parts of different internal states inside the same catchain are drastically important for improving the memory profile and the performance of BCM in a large process group. The improvement is several orders of magnitude in groups of $N\approx100$ processes. Without these optimizations BCM would not be fit for its intended purpose (BFT consensus on new blocks generated by validators in the TON Blockchain).
\nxsubpoint\emb{Message $m$ contains a hash of state $\sigma_{D^+_m}$} \nxsubpoint\emb{Message $m$ contains a hash of state $\sigma_{D^+_m}$}
Every message $m$ contains a (Merkle) hash of (the abstract representation of) the corresponding state $\sigma_{D^+_m}$. Very roughly, this hash is computed recursively using the tree of nodes representation of~\ptref{sp:state.node.tree}: all node references inside a node are replaced with (recursively computed) hashes of the referred nodes, and a simple 64-bit hash of the resulting byte sequence is computed. This hash is also used for memoization as described in \ptref{sp:memoize}. Every message $m$ contains a (Merkle) hash of (the abstract representation of) the corresponding state $\sigma_{D^+_m}$. Very roughly, this hash is computed recursively using the tree of nodes representation of~\ptref{sp:state.node.tree}: all node references inside a node are replaced with (recursively computed) hashes of the referred nodes, and a simple 64-bit hash of the resulting byte sequence is computed. This hash is also used for memoization as described in \ptref{sp:memoize}.
@ -350,10 +352,10 @@ Every message $m$ contains a (Merkle) hash of (the abstract representation of) t
The purpose of this field in messages is to provide a sanity check for the computations of $\sigma_{D^+_m}$ performed by different processes (and possibly by different implementations of the state update algorithm): once $\sigma_{D^+_m}$ is computed for a newly-delivered message $m$, the hash of computed $\sigma_{D^+_m}$ is compared to the value stored in the header of~$m$. If these values are not equal, an error message is output into an error log (and no further actions are taken by the software). These error logs can be examined to detect bugs or incompatibilities between different versions of BCP. The purpose of this field in messages is to provide a sanity check for the computations of $\sigma_{D^+_m}$ performed by different processes (and possibly by different implementations of the state update algorithm): once $\sigma_{D^+_m}$ is computed for a newly-delivered message $m$, the hash of computed $\sigma_{D^+_m}$ is compared to the value stored in the header of~$m$. If these values are not equal, an error message is output into an error log (and no further actions are taken by the software). These error logs can be examined to detect bugs or incompatibilities between different versions of BCP.
\nxpoint\emb{State recovery after restart or crashes} \nxpoint\emb{State recovery after restart or crashes}
A catchain is typically used by the BCP for several minutes; during this period, the program (the validator software) running the catchain protocol may be terminated and restarted, either deliberately (e.g., because of a scheduled software update) or unintensionally (the program might crash because of a bug in this or some other subsystem, and be restarted afterwards). One way of dealing with this situation would be to ignore all catchains not created after the last restart. However, this would lead to some validators not participating in creating any blocks for several minutes (until next catchain instances are created), which is undesirable. Therefore, a catchain state recovery protocol is run instead after every restart, so that the validator could continue participating in the same catchain. A catchain is typically used by the BCP for several minutes; during this period, the program (the validator software) running the Catchain protocol may be terminated and restarted, either deliberately (e.g., because of a scheduled software update) or unintentionally (the program might crash because of a bug in this or some other subsystem, and be restarted afterwards). One way of dealing with this situation would be to ignore all catchains not created after the last restart. However, this would lead to some validators not participating in creating any blocks for several minutes (until the next catchain instances are created), which is undesirable. Therefore, a catchain state recovery protocol is run instead after every restart, so that the validator can continue participating in the same catchain.
\nxsubpoint\emb{Database of all delivered messages}\label{sp:msg.db} \nxsubpoint\emb{Database of all delivered messages}\label{sp:msg.db}
To this end, a special database is created for each active catchain. This database contains all known and delivered messages, indexed by their identifiers (hashes). A simple key-value storage suffices for this purpose. The hash of the most recent outbound message $m=m_{i,s}$ generated by the current process $i$ is also stored in this database. After restart, all messages up to $m$ are recursively delivered in proper order (in the same way as if all these messages had been just received from the network in an arbitrary order) and processed by the higher-level protocol, until $m$ finally is delivered, thus recovering the current state. To this end, a special database is created for each active catchain. This database contains all known and delivered messages, indexed by their identifiers (hashes). A simple key-value database suffices for this purpose. The hash of the most recent outbound message $m=m_{i,s}$ generated by the current process $i$ is also stored in this database. After restart, all messages up to $m$ are recursively delivered in proper order (in the same way as if all these messages had been just received from the network in an arbitrary order) and processed by the higher-level protocol, until $m$ finally is delivered, thus recovering the current state.
\nxsubpoint\emb{Flushing new messages to disk}\label{sp:new.msg.flush} \nxsubpoint\emb{Flushing new messages to disk}\label{sp:new.msg.flush}
We have already explained in~\ptref{sp:cc.msg.prop} that newly-created messages are stored in the database of all delivered messages (cf.~\ptref{sp:msg.db}) and the database is flushed to disk before the new message is sent to all network neighbors. In this way we can be sure that the message cannot be lost if the system crashes and is restarted, thus avoiding the creation of involuntary forks. We have already explained in~\ptref{sp:cc.msg.prop} that newly-created messages are stored in the database of all delivered messages (cf.~\ptref{sp:msg.db}) and the database is flushed to disk before the new message is sent to all network neighbors. In this way we can be sure that the message cannot be lost if the system crashes and is restarted, thus avoiding the creation of involuntary forks.
@ -365,7 +367,7 @@ An implementation might use an append-only file containing all previously comput
Now we are ready to present a high-level description of the Block Consensus Protocol employed by TON Blockchain validators to generate and achieve consensus on new blockchain blocks. Essentially, it is a three-phase commit protocol that runs over a catchain (an instance of the Catchain protocol), which is used as a ``hardened'' message broadcast system in a process group. Now we are ready to present a high-level description of the Block Consensus Protocol employed by TON Blockchain validators to generate and achieve consensus on new blockchain blocks. Essentially, it is a three-phase commit protocol that runs over a catchain (an instance of the Catchain protocol), which is used as a ``hardened'' message broadcast system in a process group.
\nxsubpoint\emb{Creation of new catchain messages} \nxsubpoint\emb{Creation of new catchain messages}
Recall that the lower-level Catchain protocol does not create broadcast messages on its own (with the only exception of service broadcasts with fork proofs, cf.~\ptref{sp:no.bad.accept}). Instead, when a new message needs to be created, the higher-level protocol (BCP) is asked to do this by invoking a callback. Apart from that, the creation of new messages may be triggered by changes in the current virtual state and by timer alarms. Recall that the lower-level Catchain protocol does not create broadcast messages on its own (with the only exception being service broadcasts with fork proofs, cf.~\ptref{sp:no.bad.accept}). Instead, when a new message needs to be created, the higher-level protocol (BCP) is asked to do this by invoking a callback. Apart from that, the creation of new messages may be triggered by changes in the current virtual state and by timer alarms.
\nxsubpoint\emb{Payload of catchain messages}\label{sp:payload} \nxsubpoint\emb{Payload of catchain messages}\label{sp:payload}
In this way the payload of catchain messages is always determined by the higher level protocol, such as BCP. For BCP, this payload consists of In this way the payload of catchain messages is always determined by the higher level protocol, such as BCP. For BCP, this payload consists of
@ -375,19 +377,19 @@ In this way the payload of catchain messages is always determined by the higher
\end{itemize} \end{itemize}
\nxsubpoint\emb{BCP events} \nxsubpoint\emb{BCP events}
We have just explained that the payload of a catchain message contains several (zero or more) BCP events. Now we list all admissible BCP event types. We have just explained that the payload of a catchain message contains several (possibly zero) BCP events. Now we list all admissible BCP event types.
\begin{itemize} \begin{itemize}
\item $\Submit(\wround,\wcandidate)$ --- suggest a new block candidate \item $\Submit(\wround,\wcandidate)$ --- suggest a new block candidate
\item $\Approve(\wround,\wcandidate,\wsignature)$ --- a block candidate has passed local validation \item $\Approve(\wround,\wcandidate,\wsignature)$ --- a block candidate has passed local validation
\item $\Reject(\wround,\wcandidate)$ --- a block candidate failed local validation \item $\Reject(\wround,\wcandidate)$ --- a block candidate has failed local validation
\item $\CommitSign(\wround,\wcandidate,\wsignature)$ --- block candidate has been accepted and signed \item $\CommitSign(\wround,\wcandidate,\wsignature)$ --- a block candidate has been accepted and signed
\item $\Vote(\wround,\wcandidate)$ --- vote for a block candidate \item $\Vote(\wround,\wcandidate)$ --- a vote for a block candidate
\item $\VoteFor(\wround,\wcandidate)$ --- this block candidate must be voted for in this round (even if the current process has another opinion) \item $\VoteFor(\wround,\wcandidate)$ --- this block candidate must be voted for in this round (even if the current process has another opinion)
\item $\PreCommit(\wround,\wcandidate)$ --- a preliminary committment to a block candidate (used in three-phase commit scheme) \item $\PreCommit(\wround,\wcandidate)$ --- a preliminary commitment to a block candidate (used in three-phase commit scheme)
\end{itemize} \end{itemize}
\nxsubpoint\emb{Protocol parameters} \nxsubpoint\emb{Protocol parameters}
Several parameters of BCP must be fixed in advance (in the genesis message of the catchain, where they are initialized from the current values of configuration parameters of masterchain state): Several parameters of BCP must be fixed in advance (in the genesis message of the catchain, where they are initialized from the values of the configuration parameters extracted from the current masterchain state):
\begin{itemize} \begin{itemize}
\item $K$ --- duration of one attempt (in seconds). It is an integer amount of seconds in the current implementation; however, this is an implementation detail, not a restriction of the protocol \item $K$ --- duration of one attempt (in seconds). It is an integer amount of seconds in the current implementation; however, this is an implementation detail, not a restriction of the protocol
\item $Y$ --- number of {\em fast\/} attempts to accept a candidate \item $Y$ --- number of {\em fast\/} attempts to accept a candidate
@ -398,7 +400,7 @@ Several parameters of BCP must be fixed in advance (in the genesis message of th
Possible values for these parameters are $K=8$, $Y=3$, $C=2$, $\Delta_i=2(i-1)$, $\Delta_\infty=2C$. Possible values for these parameters are $K=8$, $Y=3$, $C=2$, $\Delta_i=2(i-1)$, $\Delta_\infty=2C$.
\nxsubpoint\emb{Protocol overview} \nxsubpoint\emb{Protocol overview}
The BCP consist of several {\em rounds\/} that are executed inside the same catchain. More than one round may be active at one point of time, because some phases of a round may overlap with other phases of other rounds. Therefore, all BCP events contain an explicit round identifier $\wround$ (a small integer starting from zero). Every round is terminated either by (collectively) accepting a {\em block candidate\/} suggested by one of participating processes, or by accepting a special {\em null candidate\/}---a dummy value indicating that no real block candidate was accepted, for example because no block candidates were suggested at all. After a round is terminated (from the perspective of a participating process), i.e., once a block candidate collects $\CommitSign$ signatures of more that $2/3$ of all validators, only $\CommitSign$ events may be added to that round; the process automatically starts participating in the next round (with the next identifier) and ignores all BCP events with different values of $\wround$.\footnote{This also means that each process implicitly determines the Unixtime of the start of the next round, and computes all delays, e.g., the block candidate submission delays, starting from this time.} The BCP consists of several {\em rounds\/} that are executed inside the same catchain. More than one round may be active at one point of time, because some phases of a round may overlap with other phases of other rounds. Therefore, all BCP events contain an explicit round identifier $\wround$ (a small integer starting from zero). Every round is terminated either by (collectively) accepting a {\em block candidate\/} suggested by one of the participating processes, or by accepting a special {\em null candidate\/}---a dummy value indicating that no real block candidate was accepted, for example because no block candidates were suggested at all. After a round is terminated (from the perspective of a participating process), i.e., once a block candidate collects $\CommitSign$ signatures of more than $2/3$ of all validators, only $\CommitSign$ events may be added to that round; the process automatically starts participating in the next round (with the next identifier) and ignores all BCP events with different values of $\wround$.\footnote{This also means that each process implicitly determines the Unixtime of the start of the next round, and computes all delays, e.g., the block candidate submission delays, starting from this time.}
Each round is subdivided into several {\em attempts}. Each attempt lasts a predetermined time period of $K$ seconds (BCP uses clocks to measure time and time intervals and assumes that clocks of ``good'' processes are more or less in agreement with each other; therefore, BCP is not an asynchronous BFT protocol). Each attempt starts at Unixtime exactly divisible by $K$ and lasts for $K$ seconds. The attempt identifier $\wattempt$ is the Unixtime of its start divided by $K$. Therefore, the attempts are numbered more or less consecutively by 32-bit integers, but not starting from zero. The first $Y$ attempts of a round are {\em fast\/}; the remaining attempts are {\em slow}. Each round is subdivided into several {\em attempts}. Each attempt lasts a predetermined time period of $K$ seconds (BCP uses clocks to measure time and time intervals and assumes that clocks of ``good'' processes are more or less in agreement with each other; therefore, BCP is not an asynchronous BFT protocol). Each attempt starts at Unixtime exactly divisible by $K$ and lasts for $K$ seconds. The attempt identifier $\wattempt$ is the Unixtime of its start divided by $K$. Therefore, the attempts are numbered more or less consecutively by 32-bit integers, but not starting from zero. The first $Y$ attempts of a round are {\em fast\/}; the remaining attempts are {\em slow}.
@ -406,38 +408,38 @@ Each round is subdivided into several {\em attempts}. Each attempt lasts a prede
In contrast with rounds, BCP events do not have a parameter to indicate the attempt they belong to. Instead, this attempt is implicitly determined by the Unix time indicated in the payload of the catchain message containing the BCP event (cf.~\ptref{sp:payload}). Furthermore, the attempts are subdivided into {\em fast\/} (the first $Y$ attempts of a round in which a process takes part) and {\em slow\/} (the subsequent attempts of the same round). This subdivision is also implicit: the first BCP event sent by a process in a round belongs to a certain attempt, and $Y$ attempts starting from this one are considered fast by this process. In contrast with rounds, BCP events do not have a parameter to indicate the attempt they belong to. Instead, this attempt is implicitly determined by the Unix time indicated in the payload of the catchain message containing the BCP event (cf.~\ptref{sp:payload}). Furthermore, the attempts are subdivided into {\em fast\/} (the first $Y$ attempts of a round in which a process takes part) and {\em slow\/} (the subsequent attempts of the same round). This subdivision is also implicit: the first BCP event sent by a process in a round belongs to a certain attempt, and $Y$ attempts starting from this one are considered fast by this process.
\nxsubpoint\emb{Block producers and block candidates} \nxsubpoint\emb{Block producers and block candidates}
There are $C$ designated block producers (member processes) in each round. The (ordered) list of these block producers is computed by a deterministic algorithm (in the simplest case, processes $i$, $i+1$, \dots, $i+C-1$ are used in the $i$-th round, with the indices taken modulo $N$, the total number of processes in the catchain) and is known to all participants without any extra communication or negotiation. The processes are ordered in this list by decreasing priority, so the first member of the list has the highest priority (i.e., if it offers a block candidate not too late, this block candidate has very high chances to be accepted by the protocol). There are $C$ designated block producers (member processes) in each round. The (ordered) list of these block producers is computed by a deterministic algorithm (in the simplest case, processes $i$, $i+1$, \dots, $i+C-1$ are used in the $i$-th round, with the indices taken modulo $N$, the total number of processes in the catchain) and is known to all participants without any extra communication or negotiation. The processes are ordered in this list by decreasing priority, so the first member of the list has the highest priority (i.e., if it suggests a block candidate in time, this block candidate has a very high chance to be accepted by the protocol).
The first block producer may suggest a block candidate immediately after the round starts. Other block producers can suggest block candidates only after some delay $\Delta_i$, where $i$ is the index of the producer in the list of designated block producers, with $0=\Delta_1\leq\Delta_2\leq\ldots$. After some predetermined period of time $\Delta_\infty$ elapses from the round start, a special {\em null candidate\/} is assumed automatically suggested (even if there are no explicit BCP events to indicate this). Therefore, at most $C+1$ block candidates (including the null candidate) are suggested in a round. The first block producer may suggest a block candidate immediately after the round starts. Other block producers can suggest block candidates only after some delay $\Delta_i$, where $i$ is the index of the producer in the list of designated block producers, with $0=\Delta_1\leq\Delta_2\leq\ldots$. After some predetermined period of time $\Delta_\infty$ elapses from the round start, a special {\em null candidate\/} is assumed automatically suggested (even if there are no explicit BCP events to indicate this). Therefore, at most $C+1$ block candidates (including the null candidate) are suggested in a round.
\nxsubpoint\emb{Suggesting a block candidate} \nxsubpoint\emb{Suggesting a block candidate}
A block candidate for TON Block\-chain consists of two large ``files'' --- the block and the collated data, along with a small header containing the description of the block being generated (most importantly, the complete {\em block identifier\/} for the block candidate, containing the workchain and the shard identifier, the block sequence number, its file hash and its root hash) and the $\Sha$ hashes of the two large files. Only a part of this small header (including the hashes of the two files and other important data) is used as $\wcandidate$ in BCP events such as $\Submit$ or $\CommitSign$ to refer to a specific block candidate. The bulk of the data (most importantly, the two large files) is propagated in the overlay network associated with the catchain by the streaming broadcast protocol implemented over ADNL for this purpose (cf.~\cite[5]{TON}). This bulk data propagation mechanism is unimportant for the validity of the consensus protocol (the only important point is that the hashes of the large files are part of BCP events and hence of the catchain messages, where they are signed by the sender, and these hashes are checked after the large files are received by any participating nodes; therefore, nobody can replace or corrupt these files). A $\Submit(\wround,\wcandidate)$ BCP event is created in the catchain by the block producer in parallel with the propagation of the block candidate, indicating the committment of this block producer to this specific block candidate. A block candidate for the TON Block\-chain consists of two large ``files'' --- the block and the collated data, along with a small header containing the description of the block being generated (most importantly, the complete {\em block identifier\/} for the block candidate, containing the workchain and the shard identifier, the block sequence number, its file hash and its root hash) and the $\Sha$ hashes of the two large files. Only a part of this small header (including the hashes of the two files and other important data) is used as $\wcandidate$ in BCP events such as $\Submit$ or $\CommitSign$ to refer to a specific block candidate. The bulk of the data (most importantly, the two large files) is propagated in the overlay network associated with the catchain by the streaming broadcast protocol implemented over ADNL for this purpose (cf.~\cite[5]{TON}). This bulk data propagation mechanism is unimportant for the validity of the consensus protocol (the only important point is that the hashes of the large files are part of BCP events and hence of the catchain messages, where they are signed by the sender, and these hashes are checked after the large files are received by any participating nodes; therefore, nobody can replace or corrupt these files). A $\Submit(\wround,\wcandidate)$ BCP event is created in the catchain by the block producer in parallel with the propagation of the block candidate, indicating the submission of this specific block candidate by this block producer.
\nxsubpoint\emb{Processing block candidates} \nxsubpoint\emb{Processing block candidates}
Once a process observes a $\Submit$ BCP event in a delivered catchain message, it checks the validity of this event (for instance, its originating process must be in the list of designated producers, and current Unixtime must be at least the start of the round plus the minimum delay $\Delta_i$, where $i$ is the index of this producer in the list of designated producers), and if it is valid, remembers it in the current catchain state (cf.~\ptref{p:cc.state}). After that, when a streaming broadcast containing the files with this block candidates (with correct hash values) is received (or immediately, if these files are already present), the process invokes a validator instance to validate the new block candidate (even if this block candidate was suggested by this process itself!). Depending on the result of this validation, either an $\Approve(\wround,\wcandidate,\wsignature)$ or a $\Reject(\wround,\wcandidate)$ BCP event is created (and embedded into a new catchain message). Note that the $\wsignature$ used in $\Approve$ events uses the same private key that will be ultimately used to sign the accepted block, but the signature itself is different from that used in $\CommitSign$ (the hash of a structure with different magic number is actually signed). Therefore, this interim signature cannot be used to fake the acceptance of this block by this particular validator process to an outside observer. Once a process observes a $\Submit$ BCP event in a delivered catchain message, it checks the validity of this event (for instance, its originating process must be in the list of designated producers, and current Unixtime must be at least the start of the round plus the minimum delay $\Delta_i$, where $i$ is the index of this producer in the list of designated producers), and if it is valid, remembers it in the current catchain state (cf.~\ptref{p:cc.state}). After that, when a streaming broadcast containing the files associated with this block candidates (with correct hash values) is received (or immediately, if these files are already present), the process invokes a validator instance to validate the new block candidate (even if this block candidate was suggested by this process itself!). Depending on the result of this validation, either an $\Approve(\wround,\wcandidate,\wsignature)$ or a $\Reject(\wround,\wcandidate)$ BCP event is created (and embedded into a new catchain message). Note that the $\wsignature$ used in $\Approve$ events uses the same private key that will ultimately be used to sign the accepted block, but the signature itself is different from that used in $\CommitSign$ (the hash of a structure with different magic number is actually signed). Therefore, this interim signature cannot be used to fake the acceptance of this block by this particular validator process to an outside observer.
\nxsubpoint\emb{Overview of one round} \nxsubpoint\emb{Overview of one round}
Each round of BCP proceeds as follows: Each round of BCP proceeds as follows:
\begin{itemize} \begin{itemize}
\item At the beginning of a round several processes (from the predetermined list of designated producers) submit their block candidates (with certain delays depending on the producer priority) and reflect this fact by means of $\Submit$ events (incorporated into catchain messages). \item At the beginning of a round, several processes (from the predetermined list of designated producers) submit their block candidates (with certain delays depending on their producer priority) and reflect this fact by means of $\Submit$ events (incorporated into catchain messages).
\item Once a process receives a submitted block candidate (i.e., observes a $\Submit$ events and receives all necessary files by means external with respect to the consensus protocol), it starts the validation of this candidate and eventually creates either an $\Approve$ or a $\Reject$ event for this block candidate. \item Once a process receives a submitted block candidate (i.e., observes a $\Submit$ event and receives all necessary files by means external to the consensus protocol), it starts the validation of this candidate and eventually creates either an $\Approve$ or a $\Reject$ event for this block candidate.
\item During each {\em fast attempt\/} (i.e., one of the first $Y$ attempts) every process votes either for a block candidate that has collected votes of more than $2/3$ of all processes, or, if there are no such candidates yet, for the valid (i.e., $\Approve$d by more than $2/3$ of all processes) block candidate with the highest priority. The voting is performed by means of creating $\Vote$ events (embedded into new catchain messages). \item During each {\em fast attempt\/} (i.e., one of the first $Y$ attempts) every process votes either for a block candidate that has collected the votes of more than $2/3$ of all processes, or, if there are no such candidates yet, for the valid (i.e., $\Approve$d by more than $2/3$ of all processes) block candidate with the highest priority. The voting is performed by means of creating $\Vote$ events (embedded into new catchain messages).
\item During each {\em slow attempt\/} (i.e., any attempt except $Y$ first ones) every process votes either for a candidate that was $\PreCommit$ted before (by the same process), or for a candidate that was suggested by $\VoteFor$. \item During each {\em slow attempt\/} (i.e., any attempt except the first $Y$) every process votes either for a candidate that was $\PreCommit$ted before (by the same process), or for a candidate that was suggested by $\VoteFor$.
\item If a block candidate has received votes from more than $2/3$ of all processes during the current attempt, and the current process observes these votes (which are collected in the catchain state), a $\PreCommit$ event is created, indicating that the process will vote only for this candidate in the future. \item If a block candidate has received votes from more than $2/3$ of all processes during the current attempt, and the current process observes these votes (which are collected in the catchain state), a $\PreCommit$ event is created, indicating that the process will vote only for this candidate in future.
\item If a block candidate collects $\PreCommit$s from more than $2/3$ of all processes inside an attempt, then it is assumed to be accepted (by the group), and each process that observes these $\PreCommit$s creates a $\CommitSign$ event with a valid block signature. These block signatures are registered in the catchain, and are ultimately collected to create a ``block proof'' (containing signatures of more than $2/3$ of validators for this block). This block proof is the external output of the consensus protocol (along with the block itself, but without its collated data); it is ultimately propagated in the overlay network of all full nodes that have subscribed to new blocks of this shard (or of the masterchain). \item If a block candidate collects $\PreCommit$s from more than $2/3$ of all processes inside an attempt, then it is assumed to be accepted (by the group), and each process that observes these $\PreCommit$s creates a $\CommitSign$ event with a valid block signature. These block signatures are registered in the catchain, and are ultimately collected to create a ``block proof'' (containing signatures of more than $2/3$ of the validators for this block). This block proof is the external output of the consensus protocol (along with the block itself, but without its collated data); it is ultimately propagated in the overlay network of all full nodes that have subscribed to new blocks of this shard (or of the masterchain).
\item Once a block candidate collects $\CommitSign$ signatures from more than $2/3$ of all validators, the round is considered finished (at least from the perspective of a process that observes all these signatures). After that, only a $\CommitSign$ can be added to that round by this process, and the process automatically starts participating in the next round (and ignores all events related to all other rounds). \item Once a block candidate collects $\CommitSign$ signatures from more than $2/3$ of all validators, the round is considered finished (at least from the perspective of a process that observes all these signatures). After that, only a $\CommitSign$ can be added to that round by this process, and the process automatically starts participating in the next round (and ignores all events related to other rounds).
\end{itemize} \end{itemize}
Note that the above protocol may lead to a validator signing (in a $\CommitSign$ event) a block candidate that was $\Reject$ed by the same validator before (this is a kind of ``submitting to the will of majority''). Note that the above protocol may lead to a validator signing (in a $\CommitSign$ event) a block candidate that was $\Reject$ed by the same validator before (this is a kind of ``submitting to the will of majority'').
\nxsubpoint\emb{$\Vote$ and $\PreCommit$ messages are created deterministically}\label{sp:force.vote} \nxsubpoint\emb{$\Vote$ and $\PreCommit$ messages are created deterministically}\label{sp:force.vote}
Note that each process can create at most one $\Vote$ and at most one $\PreCommit$ event in each attempt. Furthermore, these events are completely determined by the state $\sigma_{D_m}$ of the sender of catchain message~$m$ containing such an event. Therefore, the receiver can detect invalid $\Vote$ or $\PreCommit$ events and ignore them (thus mitigating byzantine behavior of other participants). On the other hand, a message $m$ that should contain a $\Vote$ or a $\PreCommit$ event according to the corresponding state $\sigma_{D_m}$ but does not contain one can be received. In this case, the current implementation automatically creates missing events and proceeds as if $m$ had contained them from the very beginning. However, such instances of byzantine behavior are either corrected or ignored (and a message is output into the error log), but the offending processes are not punished otherwise (because this would require very large misbehavior proofs for outside observers that do not have access to the internal state of the catchain). Note that each process can create at most one $\Vote$ and at most one $\PreCommit$ event in each attempt. Furthermore, these events are completely determined by the state $\sigma_{D_m}$ of the sender of catchain message~$m$ containing such an event. Therefore, the receiver can detect invalid $\Vote$ or $\PreCommit$ events and ignore them (thus mitigating byzantine behavior of other participants). On the other hand, a message $m$ that should contain a $\Vote$ or a $\PreCommit$ event according to the corresponding state $\sigma_{D_m}$ but does not contain one can be received. In this case, the current implementation automatically creates missing events and proceeds as if $m$ had contained them from the very beginning. However, such instances of byzantine behavior are either corrected or ignored (and a message is output into the error log), but the offending processes are not otherwise punished (because this would require very large misbehavior proofs for outside observers that do not have access to the internal state of the catchain).
\nxsubpoint\emb{Multiple $\Vote$s and $\PreCommit$s of the same process}\label{sp:vote.fork} \nxsubpoint\emb{Multiple $\Vote$s and $\PreCommit$s of the same process}\label{sp:vote.fork}
Note that a process usually ignores subsequent $\Vote$s and $\PreCommit$s generated by the same originating process inside the same attempt, so normally a process can vote for at most one block candidate. However, it may happen that a ``good'' process indirectly observes a fork created by a byzantine process, with $\Vote$s for different block candidates in different branches of this fork (this can happen if the ``good'' process learns about these two branches from two other ``good'' processes that did not see this fork before). In this case, both $\Vote$s (for different candidates) are taken into account (added into the merged state of the current process). A similar logic applies to $\PreCommit$s. Note that a process usually ignores subsequent $\Vote$s and $\PreCommit$s generated by the same originating process inside the same attempt, so normally a process can vote for at most one block candidate. However, it may happen that a ``good'' process indirectly observes a fork created by a byzantine process, with $\Vote$s for different block candidates in different branches of this fork (this can happen if the ``good'' process learns about these two branches from two other ``good'' processes that did not see this fork before). In this case, both $\Vote$s (for different candidates) are taken into account (added into the merged state of the current process). A similar logic applies to $\PreCommit$s.
\nxsubpoint\emb{Approving or rejecting block candidates} \nxsubpoint\emb{Approving or rejecting block candidates}
Notice that a block candidate cannot be $\Approve$d or $\Reject$ed before it has been $\Submit$ted (i.e., an $\Approve$ event that was not preceded by a corresponding $\Submit$ event will be ignored), and that a candidate cannot be approved before the minimum time of its submission (the round start time plus the priority-dependent delay $\Delta_i$) is reached, i.e., any ``good'' process will postpone the creation of its $\Approve$ until this time. Furthermore, one cannot $\Approve$ more than one candidate of the same producer in the same round (i.e., even if a process $\Submit$s several candidates, only one of them---presumably the first one---will be $\Approve$d by other ``good'' processes; as usual, this means that subsequent $\Approve$ events will be ignored by ``good'' processes on receival). Notice that a block candidate cannot be $\Approve$d or $\Reject$ed before it has been $\Submit$ted (i.e., an $\Approve$ event that was not preceded by a corresponding $\Submit$ event will be ignored), and that a candidate cannot be approved before the minimum time of its submission (the round start time plus the priority-dependent delay $\Delta_i$) is reached, i.e., any ``good'' process will postpone the creation of its $\Approve$ until this time. Furthermore, one cannot $\Approve$ more than one candidate of the same producer in the same round (i.e., even if a process $\Submit$s several candidates, only one of them---presumably the first one---will be $\Approve$d by other ``good'' processes; as usual, this means that subsequent $\Approve$ events will be ignored by ``good'' processes on receipt).
\nxsubpoint\emb{Approving the null block candidate} \nxsubpoint\emb{Approving the null block candidate}
The implicit null block candidate is also explicitly approved (by creating an $\Approve$ event) by all (good) processes, once the delay $\Delta_\infty$ from the start of the round expires. The implicit null block candidate is also explicitly approved (by creating an $\Approve$ event) by all (good) processes, once the delay $\Delta_\infty$ from the start of the round expires.
@ -445,18 +447,18 @@ The implicit null block candidate is also explicitly approved (by creating an $\
\nxsubpoint\emb{Choosing a block candidate for voting}\label{sp:vote.rules} \nxsubpoint\emb{Choosing a block candidate for voting}\label{sp:vote.rules}
Each process chooses one of the available block candidates (including the implicit null candidate) and votes for this candidate (by creating a $\Vote$ event) by applying the following rules (in the order they are presented): Each process chooses one of the available block candidates (including the implicit null candidate) and votes for this candidate (by creating a $\Vote$ event) by applying the following rules (in the order they are presented):
\begin{itemize} \begin{itemize}
\item If the current process created a $\PreCommit$ event for a candidate during one of the previous attempts, and no other candidate has collected votes of more than $2/3$ of all processes since (i.e., inside one of the subsequent attempts, including the current one so far; we say that the $\PreCommit$ event is still {\em active\/} in this case), then the current process votes for this candidate again. \item If the current process created a $\PreCommit$ event for a candidate during one of the previous attempts, and no other candidate has collected votes from more than $2/3$ of all processes since (i.e., inside one of the subsequent attempts, including the current one so far; we say that the $\PreCommit$ event is still {\em active\/} in this case), then the current process votes for this candidate again.
\item If the current attempt is fast (i.e., one of the first $Y$ attempts of a round from the perspective of the current process), and a candidate has collected votes of more than $2/3$ of all processes during the current or one of the previous attempts, the current process votes for this candidate. In the case of a tie, the candidate from the latest of all such attempts is chosen. \item If the current attempt is fast (i.e., one of the first $Y$ attempts of a round from the perspective of the current process), and a candidate has collected votes from more than $2/3$ of all processes during the current or one of the previous attempts, the current process votes for this candidate. In the case of a tie, the candidate from the latest of all such attempts is chosen.
\item If the current attempt is fast, and the previous rules do not apply, then the process votes for the candidate with the highest priority among all {\em eligible candidates}, i.e., candidates that have collected $\Approve$s (observable by the current process) from more than $2/3$ of all processes. \item If the current attempt is fast, and the previous rules do not apply, then the process votes for the candidate with the highest priority among all {\em eligible candidates}, i.e., candidates that have collected $\Approve$s (observable by the current process) from more than $2/3$ of all processes.
\item If the current attempt is slow, then the process votes only after it receives a valid $\VoteFor$ event in the same attempt. If the first rule is applicable, the process votes according to it (i.e., for the previously $\PreCommit$ed candidate). Otherwise it votes for the block candidate that is mentioned in the $\VoteFor$ event. If there are several such valid events (during the current attempt), the candidate with the smallest hash is selected (this may happen in rare situations related to different $\VoteFor$ events created in different branches of a fork, cf.~\ptref{sp:vote.fork}). \item If the current attempt is slow, then the process votes only after it receives a valid $\VoteFor$ event in the same attempt. If the first rule is applicable, the process votes according to it (i.e., for the previously $\PreCommit$ed candidate). Otherwise it votes for the block candidate that is mentioned in the $\VoteFor$ event. If there are several such valid events (during the current attempt), the candidate with the smallest hash is selected (this may happen in rare situations related to different $\VoteFor$ events created in different branches of a fork, cf.~\ptref{sp:vote.fork}).
\end{itemize} \end{itemize}
The ``null candidate'' is considered to have the least priority. It also requires an explicit $\Approve$ before being voted for (with the exception of the first two rules). The ``null candidate'' is considered to have the least priority. It also requires an explicit $\Approve$ before being voted for (with the exception of the first two rules).
\nxsubpoint\emb{Creating $\VoteFor$ events during slow attempts} \nxsubpoint\emb{Creating $\VoteFor$ events during slow attempts}
A $\VoteFor$ event is created at the beginning of a slow attempt by the {\em coordinator\/} --- the process with index $\wattempt\bmod N$ in the ordered list of all processes participating in the catchain (as usual, this means that $\VoteFor$ created by another process will be ignored by all ``good'' processes). This $\VoteFor$ event refers to one of the block candidates (including the null candidate) that have collected $\Approve$s from more than $2/3$ of all processes, usually randomly chosen among all such candidates. Essentially, this is a suggestion to vote for this block candidate directed to all other processes that do not have an active $\PreCommit$. A $\VoteFor$ event is created at the beginning of a slow attempt by the {\em coordinator\/} --- the process with index $\wattempt\bmod N$ in the ordered list of all processes participating in the catchain (as usual, this means that a $\VoteFor$ created by another process will be ignored by all ``good'' processes). This $\VoteFor$ event refers to one of the block candidates (including the null candidate) that have collected $\Approve$s from more than $2/3$ of all processes, usually randomly chosen among all such candidates. Essentially, this is a suggestion to vote for this block candidate directed to all other processes that do not have an active $\PreCommit$.
\nxpoint\emb{Validity of BCP} \nxpoint\emb{Validity of BCP}
Now we present a sketch of the proof of validity of TON Block Consensus Protocol (BCP) described above in~\ptref{p:bcp.descr}, assuming that less than one third of all processes exhibit byzantine (arbitrarily malicious protocol-violating) behavior, as it is customary for Byzantine Fault Tolerant protocols. During this subsection, we consider only one round of BCP protocol, subdivided into several attempts. Now we present a sketch of the proof of validity of TON Block Consensus Protocol (BCP) described above in~\ptref{p:bcp.descr}, assuming that less than one third of all processes exhibit byzantine (arbitrarily malicious, possibly protocol-violating) behavior, as it is customary for Byzantine Fault Tolerant protocols. During this subsection, we consider only one round of BCP, subdivided into several attempts.
\nxsubpoint\emb{Fundamental assumption}\label{sp:fund.ass} \nxsubpoint\emb{Fundamental assumption}\label{sp:fund.ass}
Let us emphasize once again that we assume that {\em less than one third of all processes are byzantine}. All other processes are assumed to be {\em good}, i.e., they follow the protocol. Let us emphasize once again that we assume that {\em less than one third of all processes are byzantine}. All other processes are assumed to be {\em good}, i.e., they follow the protocol.
@ -468,20 +470,20 @@ The reasoning in this subsection is valid for the {\em weighted variant of BCP}
We collect here some useful invariants obeyed by all BCP events during one round of BCP (inside a catchain). These invariants are enforced in two ways. Firstly, any ``good'' (non-byzantine) process will not create events violating these invariants. Secondly, even if a ``bad'' process creates an event violating these invariants, all ``good'' processes will detect this when a catchain message containing this event is delivered to BCP and ignore such events. Some possible issues related to forks (cf.~~\ptref{sp:vote.fork}) remain even after these precautions; we indicate how these issues are resolved separately, and ignore them in this list. So: We collect here some useful invariants obeyed by all BCP events during one round of BCP (inside a catchain). These invariants are enforced in two ways. Firstly, any ``good'' (non-byzantine) process will not create events violating these invariants. Secondly, even if a ``bad'' process creates an event violating these invariants, all ``good'' processes will detect this when a catchain message containing this event is delivered to BCP and ignore such events. Some possible issues related to forks (cf.~~\ptref{sp:vote.fork}) remain even after these precautions; we indicate how these issues are resolved separately, and ignore them in this list. So:
\begin{itemize} \begin{itemize}
\item There is at most one $\Submit$ event by each process (inside one round of BCP). \item There is at most one $\Submit$ event by each process (inside one round of BCP).
\item There is at most one $\Approve$ or $\Reject$ event by each process related to one candidate (and even to all candidates created by the same designated block producer).\footnote{In fact, $\Reject$s appear only in this restriction, and do not affect anything else. Therefore, any process can abstain from sending $\Reject$s without violating the protocol, and $\Reject$ events could have been removed from the protocol altogether. Instead, the current implementation of the protocol still generates $\Reject$s, but does not check anything on their receipt and does not remember them in the catchain state. Only a message is output into the error log, and the offending candidate is stored into a special directory for future study, because $\Reject$s usually indicate either the presence of a byzantine adversary, or a bug in the collator (block generation) or validator (block verification) software either on the node that suggested the block or on the node that created the $\Reject$ event.} This is achieved by requiring all ``good'' processes to ignore (i.e., not to create $\Approve$s or $\Reject$s for) all candidates suggested by the same producer but the very first one they have learned about. \item There is at most one $\Approve$ or $\Reject$ event by each process related to one candidate (more precisely, even if there are multiple candidates created by the same designated block producer, only one of them can be $\Approve$d by another process).\footnote{In fact, $\Reject$s appear only in this restriction, and do not affect anything else. Therefore, any process can abstain from sending $\Reject$s without violating the protocol, and $\Reject$ events could have been removed from the protocol altogether. Instead, the current implementation of the protocol still generates $\Reject$s, but does not check anything on their receipt and does not remember them in the catchain state. Only a message is output into the error log, and the offending candidate is stored into a special directory for future study, because $\Reject$s usually indicate either the presence of a byzantine adversary, or a bug in the collator (block generation) or validator (block verification) software either on the node that suggested the block or on the node that created the $\Reject$ event.} This is achieved by requiring all ``good'' processes to ignore (i.e., not to create $\Approve$s or $\Reject$s for) all candidates suggested by the same producer but the very first one they have learned about.
\item There is at most one $\Vote$ and at most one $\PreCommit$ event by each process during each attempt. \item There is at most one $\Vote$ and at most one $\PreCommit$ event by each process during each attempt.
\item There is at most one $\VoteFor$ event during each (slow) attempt. \item There is at most one $\VoteFor$ event during each (slow) attempt.
\item There is at most one $\CommitSign$ event by each process. \item There is at most one $\CommitSign$ event by each process.
\item During a slow attempt, each process votes either for its previously $\PreCommit$ted candidate, or for the candidate indicated in the $\VoteFor$ event of this attempt. \item During a slow attempt, each process votes either for its previously $\PreCommit$ted candidate, or for the candidate indicated in the $\VoteFor$ event of this attempt.
\end{itemize} \end{itemize}
One might somewhat improve the above statements by adding word ``valid'' where appropriate (e.g., there is at most one {\em valid\/} $\Submit$ event\dots). One might somewhat improve the above statements by adding the word ``valid'' where appropriate (e.g., there is at most one {\em valid\/} $\Submit$ event\dots).
\nxsubpoint\emb{More invariants}\label{sp:more.inv} \nxsubpoint\emb{More invariants}\label{sp:more.inv}
\begin{itemize} \begin{itemize}
\item There is at most one eligible candidate (i.e., candidate that has received $\Approve$s from more than $2/3$ of all processes) from each designated producer, and no eligible candidates from other producers. \item There is at most one eligible candidate (i.e., candidate that has received $\Approve$s from more than $2/3$ of all processes) from each designated producer, and no eligible candidates from other producers.
\item There are at most $C+1$ eligible candidates in total (at most $C$ candidates from $C$ designated producers, plus the empty candidate). \item There are at most $C+1$ eligible candidates in total (at most $C$ candidates from $C$ designated producers, plus the null candidate).
\item A candidate may be accepted only if it has collected more than $2/3$ $\PreCommit$s during the same attempt (more precisely, a candidate is accepted only if there are $\PreCommit$ events created by more than $2/3$ of all processes for this candidate and belonging to the same attempt). \item A candidate may be accepted only if it has collected more than $2/3$ $\PreCommit$s during the same attempt (more precisely, a candidate is accepted only if there are $\PreCommit$ events created by more than $2/3$ of all processes for this candidate and belonging to the same attempt).
\item A candidate may be $\Vote$d for, $\PreCommit$ted, or mentioned in a $\VoteFor$ only if it is an {\em eligible candidate}, meaning that has previously collected $\Approve$s from more than $2/3$ of all validators (i.e., a valid $\Vote$ event may be created for a candidate only if $\Approve$ events for this candidate have been previously created by more than $2/3$ of all processes and registered in catchain messages observable from the message containing the $\Vote$ event, and similarly for $\PreCommit$ and $\VoteFor$ events). \item A candidate may be $\Vote$d for, $\PreCommit$ted, or mentioned in a $\VoteFor$ only if it is an {\em eligible candidate}, meaning that it has previously collected $\Approve$s from more than $2/3$ of all validators (i.e., a valid $\Vote$ event may be created for a candidate only if $\Approve$ events for this candidate have been previously created by more than $2/3$ of all processes and registered in catchain messages observable from the message containing the $\Vote$ event, and similarly for $\PreCommit$ and $\VoteFor$ events).
\end{itemize} \end{itemize}
\nxsubpoint\emb{At most one block candidate is accepted}\label{sp:acc.unique} \nxsubpoint\emb{At most one block candidate is accepted}\label{sp:acc.unique}
@ -490,13 +492,13 @@ Now we claim that {\em at most one block candidate can be accepted (in a round o
\nxsubpoint\emb{At most one block candidate may be $\PreCommit$ted during one attempt}\label{sp:all.precomm.same} \nxsubpoint\emb{At most one block candidate may be $\PreCommit$ted during one attempt}\label{sp:all.precomm.same}
Note that all valid $\PreCommit$ events (if any) created inside the same attempt must refer to the same block candidate, by the same reasoning as in the first part of~\ptref{sp:acc.unique}: since a valid $\PreCommit$ event for a candidate $c$ may be created only after votes from more than $2/3$ of all processes are observed for this candidate inside the same attempt (and invalid $\PreCommit$s are ignored by all good processes), the existence of valid $\PreCommit$ events for different candidates $c_1$ and $c_2$ inside the same attempt would imply that more than one third of all processes have voted both for $c_1$ and $c_2$ inside this attempt, i.e., they have exhibited byzantine behavior. This is impossible in view of our fundamental assumption~\ptref{sp:fund.ass}. Note that all valid $\PreCommit$ events (if any) created inside the same attempt must refer to the same block candidate, by the same reasoning as in the first part of~\ptref{sp:acc.unique}: since a valid $\PreCommit$ event for a candidate $c$ may be created only after votes from more than $2/3$ of all processes are observed for this candidate inside the same attempt (and invalid $\PreCommit$s are ignored by all good processes), the existence of valid $\PreCommit$ events for different candidates $c_1$ and $c_2$ inside the same attempt would imply that more than one third of all processes have voted both for $c_1$ and $c_2$ inside this attempt, i.e., they have exhibited byzantine behavior. This is impossible in view of our fundamental assumption~\ptref{sp:fund.ass}.
\nxsubpoint\emb{Previous $\PreCommit$ is deactivated by the observation of a newer one}\label{sp:new.precomm.deact} \nxsubpoint\emb{A previous $\PreCommit$ is deactivated by the observation of a newer one}\label{sp:new.precomm.deact}
We claim that {\em whenever a process with an active $\PreCommit$ observes a valid $\PreCommit$ created by any process in a later attempt for a different candidate, its previously active $\PreCommit$ is deactivated}. Recall that we say that a process has an {\em active $\PreCommit$} if it has created a $\PreCommit$ for a certain candidate $c$ during a certain attempt $a$, did not create any $\PreCommit$ during any attempts $a'>a$, and did not observe votes of more than $2/3$ of all validators for any candidate $\neq c$ during any attempts $a'>a$. Any process has at most one active $\PreCommit$, and if it has one, it must vote only for the precommitted candidate. We claim that {\em whenever a process with an active $\PreCommit$ observes a valid $\PreCommit$ created by any process in a later attempt for a different candidate, its previously active $\PreCommit$ is deactivated}. Recall that we say that a process has an {\em active $\PreCommit$} if it has created a $\PreCommit$ for a certain candidate $c$ during a certain attempt $a$, did not create any $\PreCommit$ during any attempts $a'>a$, and did not observe votes of more than $2/3$ of all validators for any candidate $\neq c$ during any attempts $a'>a$. Any process has at most one active $\PreCommit$, and if it has one, it must vote only for the precommitted candidate.
Now we see that if a process with an active $\PreCommit$ for a candidate $c$ since attempt $a$ observes a valid $\PreCommit$ (usually by another process) for a candidate $c'$ created during some later attempt $a'>a$, then the first process must also observe all dependencies of the message that contains the newer $\PreCommit$; these dependencies necessarily include valid $\Vote$s from more than $2/3$ of all validators for the same candidate $c'\neq c$ created during the same attempt $a'>a$ (because otherwise the newer $\PreCommit$ would not be valid, and would be ignored by the first process); by definition, the observation of all these $\Vote$s deactivates the original $\PreCommit$. Now we see that if a process with an active $\PreCommit$ for a candidate $c$ since attempt $a$ observes a valid $\PreCommit$ (usually by another process) for a candidate $c'$ created during some later attempt $a'>a$, then the first process must also observe all dependencies of the message that contains the newer $\PreCommit$; these dependencies necessarily include valid $\Vote$s from more than $2/3$ of all validators for the same candidate $c'\neq c$ created during the same attempt $a'>a$ (because otherwise the newer $\PreCommit$ would not be valid, and would be ignored by the other process); by definition, the observation of all these $\Vote$s deactivates the original $\PreCommit$.
\nxsubpoint\emb{Assumptions for proving the convergence of the protocol}\label{sp:conv.ass} \nxsubpoint\emb{Assumptions for proving the convergence of the protocol}\label{sp:conv.ass}
Now we are going to prove that the protocol described above {\em converges\/} (i.e., terminates after accepting a block candidate) with probability one under some assumptions, which essentially tell us that there are enough ``good'' processes (i.e., processes that diligently follow the protocol and do not introduce arbitrary delays before sending their new messages), and that these good processes enjoy good network connectivity at least from time to time. More precisely, these assumptions are as follows: Now we are going to prove that the protocol described above {\em converges\/} (i.e., terminates after accepting a block candidate) with probability one under some assumptions, which essentially tell us that there are enough ``good'' processes (i.e., processes that diligently follow the protocol and do not introduce arbitrary delays before sending their new messages), and that these good processes enjoy good network connectivity at least from time to time. More precisely, our assumptions are as follows:
\begin{itemize} \begin{itemize}
\item There is a subset $I^+\subset I$ consisting of ``good'' processes and containing more than $2/3$ of all processes. \item There is a subset $I^+\subset I$ consisting of ``good'' processes and containing more than $2/3$ of all processes.
\item All processes from $I^+$ have well-synchronized clocks (differing by at most $\tau$, where $\tau$ is a bound for network latency described below). \item All processes from $I^+$ have well-synchronized clocks (differing by at most $\tau$, where $\tau$ is a bound for network latency described below).
@ -507,14 +509,14 @@ Now we are going to prove that the protocol described above {\em converges\/} (i
\end{itemize} \end{itemize}
\nxsubpoint\emb{The protocol terminates under these assumptions} \nxsubpoint\emb{The protocol terminates under these assumptions}
Now we claim that {\em (each round of) the BCP protocol as described above terminates with probability one under the assumptions listed in~\ptref{sp:conv.ass}}. The proof goes as follows. Now we claim that {\em (each round of) the BCP protocol as described above terminates with probability one under the assumptions listed in~\ptref{sp:conv.ass}}. The proof proceeds as follows.
\begin{itemize} \begin{itemize}
\item Let us assume that the protocol does not converge. Then it continues running forever. We are going to ignore the first several attempts, and consider only attempts $a_0$, $a_0+1$, $a_0+2$, \dots\ starting from some $a_0$, to be chosen later. \item Let us assume that the protocol does not converge. Then it continues running forever. We are going to ignore the first several attempts, and consider only attempts $a_0$, $a_0+1$, $a_0+2$, \dots\ starting from some $a_0$, to be chosen later.
\item Since all processes from $I^+$ continue participating in the protocol, they will create at least one message not much later than the start of the round (which may be perceived slightly differently by different processes). For instance, they will create an $\Approve$ for the null candidate not later than $\Delta_\infty$ seconds from the start of the round. Therefore, they will consider all attempts slow at most $KY$ seconds afterwards. By choosing $a_0$ appropriately, we can assume that all attempts we consider are slow from the perspective of all processes from~$I^+$. \item Since all processes from $I^+$ continue participating in the protocol, they will create at least one message not much later than the start of the round (which may be perceived slightly differently by each process). For instance, they will create an $\Approve$ for the null candidate no later than $\Delta_\infty$ seconds from the start of the round. Therefore, they will consider all attempts slow at most $KY$ seconds afterwards. By choosing $a_0$ appropriately, we can assume that all attempts we consider are slow from the perspective of all processes from~$I^+$.
\item After a ``good'' attempt $a\geq a_0$ all processes from $I^+$ will see the $\Approve$s for the null candidate created by all other processes from~$I^+$, and will deem the null candidate eligible henceforth. Since there are infinitely many ``good'' attempts, this will happen sooner or later with probability one. Therefore, we can assume (increasing $a_0$ if necessary) that there is at least one eligible candidate from the perspective of all processes from $I^+$, namely, the null candidate. \item After a ``good'' attempt $a\geq a_0$ all processes from $I^+$ will see the $\Approve$s for the null candidate created by all other processes from~$I^+$, and will deem the null candidate eligible henceforth. Since there are infinitely many ``good'' attempts, this will happen sooner or later with probability one. Therefore, we can assume (increasing $a_0$ if necessary) that there is at least one eligible candidate from the perspective of all processes from $I^+$, namely, the null candidate.
\item Furthermore, there will be infinitely many attempts $a\geq a_0$ that are perceived slow by all processes from $I^+$, that have a coordinator from $I^+$, and that are ``good'' (with respect to the network connectivity) as defined in~\ptref{sp:conv.ass}. Let us call such attempts ``very good''. \item Furthermore, there will be infinitely many attempts $a\geq a_0$ that are perceived slow by all processes from $I^+$, that have a coordinator from $I^+$, and that are ``good'' (with respect to the network connectivity) as defined in~\ptref{sp:conv.ass}. Let us call such attempts ``very good''.
\item Consider one of ``very good'' slow attempts $a$. With probability $q'>0$, its coordinator (which belongs to $I^+$) will wait for $\tau'\in(\tau,K-3\tau)$ seconds before creating its $\VoteFor$ event. Consider the most recent $\PreCommit$ event created by any process from~$I^+$; let us suppose it was created during attempt $a'<a$ for some candidate $c'$. With probability $qq'>0$, the catchain message carrying this $\PreCommit$ will be already delivered to the coordinator at the time of generation of its $\VoteFor$ event. In that case, the catchain message carrying this $\VoteFor$ will depend on this $\PreCommit(c')$ event, and all ``good'' processes that observe this $\VoteFor$ will also observe its dependencies, including this $\PreCommit(c')$. We see that {\em with probability at least $qq'$, all processes from $I^+$ that receive the $\VoteFor$ event during a ``very good'' slow attempt receive also the most recent $\PreCommit$ (if any).} \item Consider one ``very good'' slow attempt $a$. With probability $q'>0$, its coordinator (which belongs to $I^+$) will wait for $\tau'\in(\tau,K-3\tau)$ seconds before creating its $\VoteFor$ event. Consider the most recent $\PreCommit$ event created by any process from~$I^+$; let us suppose it was created during attempt $a'<a$ for some candidate $c'$. With probability $qq'>0$, the catchain message carrying this $\PreCommit$ will be already delivered to the coordinator at the time of generation of its $\VoteFor$ event. In that case, the catchain message carrying this $\VoteFor$ will depend on this $\PreCommit(c')$ event, and all ``good'' processes that observe this $\VoteFor$ will also observe its dependencies, including this $\PreCommit(c')$. We see that {\em with probability at least $qq'$, all processes from $I^+$ that receive the $\VoteFor$ event during a ``very good'' slow attempt receive also the most recent $\PreCommit$ (if any).}
\item Next, consider any process from $I^+$ that receives this $\VoteFor$, for a randomly chosen eligible candidate $c$, and suppose that there are already some $\PreCommit$s, and that the previous statement holds. Since there are at most $C+1$ eligible candidates (cf.~\ptref{sp:more.inv}), with probability at least $1/(C+1)>0$ we'll have $c=c'$, where $c'$ is the most recently $\PreCommit$ted candidate (there is at most one such candidate by~\ptref{sp:all.precomm.same}). In this case, all processes from $I^+$ will vote for $c=c'$ during this attempt immediately after they receive this $\VoteFor$ (which will be delivered to any process $j\in I^+$ less than $K-2\tau$ seconds after the beginning of the attempt with probability $qq'$). Indeed, if a process $j$ from $I^+$ did not have an active $\PreCommit$, it will vote for the value indicated in $\VoteFor$, which is $c$. If $j$ had an active $\PreCommit$, and it is as recent as possible, i.e., also created during attempt $a'$, then it must have been a $\PreCommit$ for the same value $c'=c$ (because we know about at least one valid $\PreCommit$ for $c'$ during attempt $a'$, and all other valid $\PreCommit$s during attempt $a'$ must be for the same $c'$ by~\ptref{sp:all.precomm.same}). Finally, if $j$ had an active $\PreCommit$ from an attempt $<a'$, then it will become inactive once the $\VoteFor$ with all its dependences (including the newer $\PreCommit(c')$) has been delivered to this process~$j$ (cf.~\ptref{sp:new.precomm.deact}), and the process will again vote for the value $c$ indicated in $\VoteFor$. Therefore, all processes from $I^+$ will vote for the same $c=c'$ during this attempt, less than $K-2\tau$ seconds after the beginning of the attempt (with some probability bounded away from zero). \item Next, consider any process from $I^+$ that receives this $\VoteFor$, for a randomly chosen eligible candidate $c$, and suppose that there are already some $\PreCommit$s, and that the previous statement holds. Since there are at most $C+1$ eligible candidates (cf.~\ptref{sp:more.inv}), with probability at least $1/(C+1)>0$ we'll have $c=c'$, where $c'$ is the most recently $\PreCommit$ted candidate (there is at most one such candidate by~\ptref{sp:all.precomm.same}). In this case, all processes from $I^+$ will vote for $c=c'$ during this attempt immediately after they receive this $\VoteFor$ (which will be delivered to any process $j\in I^+$ less than $K-2\tau$ seconds after the beginning of the attempt with probability $qq'$). Indeed, if a process $j$ from $I^+$ did not have an active $\PreCommit$, it will vote for the value indicated in $\VoteFor$, which is $c$. If $j$ had an active $\PreCommit$, and it is as recent as possible, i.e., also created during attempt $a'$, then it must have been a $\PreCommit$ for the same value $c'=c$ (because we know about at least one valid $\PreCommit$ for $c'$ during attempt $a'$, and all other valid $\PreCommit$s during attempt $a'$ must be for the same $c'$ by~\ptref{sp:all.precomm.same}). Finally, if $j$ had an active $\PreCommit$ from an attempt $<a'$, then it will become inactive once the $\VoteFor$ with all its dependencies (including the newer $\PreCommit(c')$) has been delivered to this process~$j$ (cf.~\ptref{sp:new.precomm.deact}), and the process will again vote for the value $c$ indicated in $\VoteFor$. Therefore, all processes from $I^+$ will vote for the same $c=c'$ during this attempt, less than $K-2\tau$ seconds after the beginning of the attempt (with some probability bounded away from zero).
\item If there are no $\PreCommit$s yet, then the above reasoning simplifies further: all processes from~$I^+$ that receive this $\VoteFor$ will immediately vote for the candidate $c$ suggested by this $\VoteFor$. \item If there are no $\PreCommit$s yet, then the above reasoning simplifies further: all processes from~$I^+$ that receive this $\VoteFor$ will immediately vote for the candidate $c$ suggested by this $\VoteFor$.
\item In both cases, all processes from $I^+$ will create a $\Vote$ for the same candidate $c$ less than $K-2\tau$ seconds from the beginning of the attempt, and this will happen with a positive probability bounded away from zero. \item In both cases, all processes from $I^+$ will create a $\Vote$ for the same candidate $c$ less than $K-2\tau$ seconds from the beginning of the attempt, and this will happen with a positive probability bounded away from zero.
\item Finally, all processes from $I^+$ will receive these $\Vote$s for $c$ from all processes from~$I^+$, again less than $(K-2\tau)+\tau=K-\tau$ seconds after the beginning of this attempt, i.e., still during the same attempt (even after taking into account the imperfect clock synchronization between processes from $I^+$). This means that they will all create a valid $\PreCommit$ for $c$, i.e., the protocol will accept $c$ during this attempt with probability bounded away from zero. \item Finally, all processes from $I^+$ will receive these $\Vote$s for $c$ from all processes from~$I^+$, again less than $(K-2\tau)+\tau=K-\tau$ seconds after the beginning of this attempt, i.e., still during the same attempt (even after taking into account the imperfect clock synchronization between processes from $I^+$). This means that they will all create a valid $\PreCommit$ for $c$, i.e., the protocol will accept $c$ during this attempt with probability bounded away from zero.

View file

@ -1691,6 +1691,7 @@ All these primitives first check whether there is enough space in the Builder, a
\item {\tt CF29} --- {\tt STULE4} ($x$ $b$ -- $b'$), stores a little-endian unsigned 32-bit integer. \item {\tt CF29} --- {\tt STULE4} ($x$ $b$ -- $b'$), stores a little-endian unsigned 32-bit integer.
\item {\tt CF2A} --- {\tt STILE8} ($x$ $b$ -- $b'$), stores a little-endian signed 64-bit integer. \item {\tt CF2A} --- {\tt STILE8} ($x$ $b$ -- $b'$), stores a little-endian signed 64-bit integer.
\item {\tt CF2B} --- {\tt STULE8} ($x$ $b$ -- $b'$), stores a little-endian unsigned 64-bit integer. \item {\tt CF2B} --- {\tt STULE8} ($x$ $b$ -- $b'$), stores a little-endian unsigned 64-bit integer.
\item {\tt CF30} --- {\tt BDEPTH} ($b$ -- $x$), returns the depth of {\em Builder\/} $b$. If no cell references are stored in~$b$, then $x=0$; otherwise $x$ is one plus the maximum of depths of cells referred to from~$b$.
\item {\tt CF31} --- {\tt BBITS} ($b$ -- $x$), returns the number of data bits already stored in {\em Builder\/} $b$. \item {\tt CF31} --- {\tt BBITS} ($b$ -- $x$), returns the number of data bits already stored in {\em Builder\/} $b$.
\item {\tt CF32} --- {\tt BREFS} ($b$ -- $y$), returns the number of cell references already stored in $b$. \item {\tt CF32} --- {\tt BREFS} ($b$ -- $y$), returns the number of cell references already stored in $b$.
\item {\tt CF33} --- {\tt BBITREFS} ($b$ -- $x$ $y$), returns the numbers of both data bits and cell references in $b$. \item {\tt CF33} --- {\tt BBITREFS} ($b$ -- $x$ $y$), returns the numbers of both data bits and cell references in $b$.
@ -1803,6 +1804,8 @@ All these primitives first check whether there is enough space in the Builder, a
\item {\tt D760} --- {\tt LDZEROES} ($s$ -- $n$ $s'$), returns the count $n$ of leading zero bits in $s$, and removes these bits from $s$. \item {\tt D760} --- {\tt LDZEROES} ($s$ -- $n$ $s'$), returns the count $n$ of leading zero bits in $s$, and removes these bits from $s$.
\item {\tt D761} --- {\tt LDONES} ($s$ -- $n$ $s'$), returns the count $n$ of leading one bits in $s$, and removes these bits from $s$. \item {\tt D761} --- {\tt LDONES} ($s$ -- $n$ $s'$), returns the count $n$ of leading one bits in $s$, and removes these bits from $s$.
\item {\tt D762} --- {\tt LDSAME} ($s$ $x$ -- $n$ $s'$), returns the count $n$ of leading bits equal to $0\leq x\leq 1$ in $s$, and removes these bits from $s$. \item {\tt D762} --- {\tt LDSAME} ($s$ $x$ -- $n$ $s'$), returns the count $n$ of leading bits equal to $0\leq x\leq 1$ in $s$, and removes these bits from $s$.
\item {\tt D764} --- {\tt SDEPTH} ($s$ -- $x$), returns the depth of {\em Slice\/}~$s$. If $s$ has no references, then $x=0$; otherwise $x$ is one plus the maximum of depths of cells referred to from~$s$.
\item {\tt D765} --- {\tt CDEPTH} ($c$ -- $x$), returns the depth of {\em Cell\/}~$c$. If $c$ has no references, then $x=0$; otherwise $x$ is one plus the maximum of depths of cells referred to from~$c$. If $c$ is a {\em Null\/} instead of a {\em Cell}, returns zero.
\end{itemize} \end{itemize}
\mysubsection{Continuation and control flow primitives} \mysubsection{Continuation and control flow primitives}
@ -2297,7 +2300,7 @@ The following primitives, which use the above conventions, are defined:
\nxsubpoint\emb{Outbound message and output action primitives} \nxsubpoint\emb{Outbound message and output action primitives}
\begin{itemize} \begin{itemize}
\item {\tt FB00} --- {\tt SENDRAWMSG} ($c$ $x$ -- ), sends a raw message contained in {\em Cell $c$}, which should contain a correctly serialized object {\tt Message $X$}, with the only exception that the source address is allowed to have dummy value {\tt addr\_none} (to be automatically replaced with the current smart-contract address), and {\tt ihr\_fee}, {\tt fwd\_fee}, {\tt created\_lt} and {\tt created\_at} fields can have arbitrary values (to be rewritten with correct values during the action phase of the current transaction). Integer parameter $x$ contains the flags. Currently $x=0$ is used for ordinary messages; $x=128$ is used for messages that are to carry all the remaining balance of the current smart contract (instead of the value originally indicated in the message); $x=64$ is used for messages that carry all the remaining value of the inbound message in addition to the value initially indicated in the new message (if bit 0 is not set, the gas fees are deducted from this amount); $x'=x+1$ means that the sender wants to pay transfer fees separately; $x'=x+2$ means that any errors arising while processing this message during the action phase should be ignored. \item {\tt FB00} --- {\tt SENDRAWMSG} ($c$ $x$ -- ), sends a raw message contained in {\em Cell $c$}, which should contain a correctly serialized object {\tt Message $X$}, with the only exception that the source address is allowed to have dummy value {\tt addr\_none} (to be automatically replaced with the current smart-contract address), and {\tt ihr\_fee}, {\tt fwd\_fee}, {\tt created\_lt} and {\tt created\_at} fields can have arbitrary values (to be rewritten with correct values during the action phase of the current transaction). Integer parameter $x$ contains the flags. Currently $x=0$ is used for ordinary messages; $x=128$ is used for messages that are to carry all the remaining balance of the current smart contract (instead of the value originally indicated in the message); $x=64$ is used for messages that carry all the remaining value of the inbound message in addition to the value initially indicated in the new message (if bit 0 is not set, the gas fees are deducted from this amount); $x'=x+1$ means that the sender wants to pay transfer fees separately; $x'=x+2$ means that any errors arising while processing this message during the action phase should be ignored. Finally, $x'=x+32$ means that the current account must be destroyed if its resulting balance is zero. This flag is usually employed together with $+128$.
\item {\tt FB02} --- {\tt RAWRESERVE} ($x$ $y$ -- ), creates an output action which would reserve exactly $x$ nanograms (if $y=0$), at most $x$ nanograms (if $y=2$), or all but $x$ nanograms (if $y=1$ or $y=3$), from the remaining balance of the account. It is roughly equivalent to creating an outbound message carrying $x$ nanograms (or $b-x$ nanograms, where $b$ is the remaining balance) to oneself, so that the subsequent output actions would not be able to spend more money than the remainder. Bit $+2$ in $y$ means that the external action does not fail if the specified amount cannot be reserved; instead, all remaining balance is reserved. Bit $+8$ in $y$ means $x\leftarrow -x$ before performing any further actions. Bit $+4$ in $y$ means that $x$ is increased by the original balance of the current account (before the compute phase), including all extra currencies, before performing any other checks and actions. Currently $x$ must be a non-negative integer, and $y$ must be in the range $0\ldots 15$. \item {\tt FB02} --- {\tt RAWRESERVE} ($x$ $y$ -- ), creates an output action which would reserve exactly $x$ nanograms (if $y=0$), at most $x$ nanograms (if $y=2$), or all but $x$ nanograms (if $y=1$ or $y=3$), from the remaining balance of the account. It is roughly equivalent to creating an outbound message carrying $x$ nanograms (or $b-x$ nanograms, where $b$ is the remaining balance) to oneself, so that the subsequent output actions would not be able to spend more money than the remainder. Bit $+2$ in $y$ means that the external action does not fail if the specified amount cannot be reserved; instead, all remaining balance is reserved. Bit $+8$ in $y$ means $x\leftarrow -x$ before performing any further actions. Bit $+4$ in $y$ means that $x$ is increased by the original balance of the current account (before the compute phase), including all extra currencies, before performing any other checks and actions. Currently $x$ must be a non-negative integer, and $y$ must be in the range $0\ldots 15$.
\item {\tt FB03} --- {\tt RAWRESERVEX} ($x$ $D$ $y$ -- ), similar to {\tt RAWRESERVE}, but also accepts a dictionary~$D$ (represented by a {\em Cell\/} or {\em Null\/}) with extra currencies. In this way currencies other than Grams can be reserved. \item {\tt FB03} --- {\tt RAWRESERVEX} ($x$ $D$ $y$ -- ), similar to {\tt RAWRESERVE}, but also accepts a dictionary~$D$ (represented by a {\em Cell\/} or {\em Null\/}) with extra currencies. In this way currencies other than Grams can be reserved.
\item {\tt FB04} --- {\tt SETCODE} ($c$ -- ), creates an output action that would change this smart contract code to that given by {\em Cell\/}~$c$. Notice that this change will take effect only after the successful termination of the current run of the smart contract. \item {\tt FB04} --- {\tt SETCODE} ($c$ -- ), creates an output action that would change this smart contract code to that given by {\em Cell\/}~$c$. Notice that this change will take effect only after the successful termination of the current run of the smart contract.

View file

@ -15,9 +15,9 @@ Test_Fift_testvm2_default 8a6e35fc0224398be9d2de39d31c86ea96965ef1eca2aa9e0af230
Test_Fift_testvm3_default 3c1b77471c5fd914ed8b5f528b9faed618e278693f5030b953ff150e543864ae Test_Fift_testvm3_default 3c1b77471c5fd914ed8b5f528b9faed618e278693f5030b953ff150e543864ae
Test_Fift_testvm4_default 8a6e35fc0224398be9d2de39d31c86ea96965ef1eca2aa9e0af2303150ed4a7b Test_Fift_testvm4_default 8a6e35fc0224398be9d2de39d31c86ea96965ef1eca2aa9e0af2303150ed4a7b
Test_Fift_testvm4a_default 523b561d6bf2f5ebb26a755e687bfbda8e33462c98e9978119755f79a086cf5e Test_Fift_testvm4a_default 523b561d6bf2f5ebb26a755e687bfbda8e33462c98e9978119755f79a086cf5e
Test_Fift_testvm4b_default daf8567bd58f05c10bb6596cea33b63e1061fa02dd5560db18ff22f96736f0d5 Test_Fift_testvm4b_default e6d16e7217d0b2a3f0b38be0e3af0e28a70c925ccbc1c4d9890e25b8973e565a
Test_Fift_testvm4c_default 2bbd67831d90bceaae29546ee3a58c4d376c2e8fb6a5b8ea2eae3ab8787e063e Test_Fift_testvm4c_default 2bbd67831d90bceaae29546ee3a58c4d376c2e8fb6a5b8ea2eae3ab8787e063e
Test_Fift_testvm4d_default 32eee098378e64c938dea09990287a349d0d8f6aabb7360535c782a958cd5fea Test_Fift_testvm4d_default 9e8de54cfc3676ba4e817a4bd0367655d3514b898fecb3e71178e5aff8e35f83
Test_Fift_testvm5_default bab109acfdf626a192171d74c69c3176d661a8dedf730aea616d4997b98830f1 Test_Fift_testvm5_default bab109acfdf626a192171d74c69c3176d661a8dedf730aea616d4997b98830f1
Test_Fift_testvm6_default dd6353c8f3f21cf62a4769ee1f3daaec46f43fd633ffb84c5d6535b120af9027 Test_Fift_testvm6_default dd6353c8f3f21cf62a4769ee1f3daaec46f43fd633ffb84c5d6535b120af9027
Test_Fift_testvm7_default 77f54b6c8c9a728d262e912efcc347de7014a37d08793c3adeac8b96fe063342 Test_Fift_testvm7_default 77f54b6c8c9a728d262e912efcc347de7014a37d08793c3adeac8b96fe063342

View file

@ -206,7 +206,7 @@ sync = ton.BlockIdExt;
// revision = 0 -- use default revision // revision = 0 -- use default revision
// revision = x (x > 0) -- use revision x // revision = x (x > 0) -- use revision x
getAccountAddress initial_account_state:InitialAccountState revision:int32 = AccountAddress; getAccountAddress initial_account_state:InitialAccountState revision:int32 = AccountAddress;
// guessAccountRevision initial_account_state:InitialAccountState = AccountRevisionList; guessAccountRevision initial_account_state:InitialAccountState = AccountRevisionList;
getAccountState account_address:accountAddress = FullAccountState; getAccountState account_address:accountAddress = FullAccountState;
createQuery private_key:InputKey address:accountAddress timeout:int32 action:Action = query.Info; createQuery private_key:InputKey address:accountAddress timeout:int32 action:Action = query.Info;

Binary file not shown.

View file

@ -1329,11 +1329,36 @@ td::Result<td::Bits256> get_adnl_address(td::Slice adnl_address) {
return address; return address;
} }
static td::optional<ton::SmartContractCode::Type> get_wallet_type(tonlib_api::InitialAccountState& state) {
return downcast_call2<td::optional<ton::SmartContractCode::Type>>(
state,
td::overloaded(
[](const tonlib_api::raw_initialAccountState&) { return td::optional<ton::SmartContractCode::Type>(); },
[](const tonlib_api::testGiver_initialAccountState&) { return td::optional<ton::SmartContractCode::Type>(); },
[](const tonlib_api::testWallet_initialAccountState&) { return ton::SmartContractCode::WalletV1; },
[](const tonlib_api::wallet_initialAccountState&) { return ton::SmartContractCode::WalletV2; },
[](const tonlib_api::wallet_v3_initialAccountState&) { return ton::SmartContractCode::WalletV3; },
[](const tonlib_api::wallet_highload_v1_initialAccountState&) {
return ton::SmartContractCode::HighloadWalletV1;
},
[](const tonlib_api::wallet_highload_v2_initialAccountState&) {
return ton::SmartContractCode::HighloadWalletV2;
},
[](const tonlib_api::dns_initialAccountState&) { return ton::SmartContractCode::ManualDns; }));
}
tonlib_api::object_ptr<tonlib_api::Object> TonlibClient::do_static_request( tonlib_api::object_ptr<tonlib_api::Object> TonlibClient::do_static_request(
const tonlib_api::getAccountAddress& request) { const tonlib_api::getAccountAddress& request) {
if (!request.initial_account_state_) { if (!request.initial_account_state_) {
return status_to_tonlib_api(TonlibError::EmptyField("initial_account_state")); return status_to_tonlib_api(TonlibError::EmptyField("initial_account_state"));
} }
auto o_type = get_wallet_type(*request.initial_account_state_);
if (o_type) {
auto status = ton::SmartContractCode::validate_revision(o_type.value(), request.revision_);
if (status.is_error()) {
return status_to_tonlib_api(TonlibError::InvalidRevision());
}
}
auto r_account_address = downcast_call2<td::Result<block::StdAddress>>( auto r_account_address = downcast_call2<td::Result<block::StdAddress>>(
*request.initial_account_state_, *request.initial_account_state_,
[&request](auto&& state) { return get_account_address(state, request.revision_); }); [&request](auto&& state) { return get_account_address(state, request.revision_); });
@ -1343,6 +1368,72 @@ tonlib_api::object_ptr<tonlib_api::Object> TonlibClient::do_static_request(
return tonlib_api::make_object<tonlib_api::accountAddress>(r_account_address.ok().rserialize(true)); return tonlib_api::make_object<tonlib_api::accountAddress>(r_account_address.ok().rserialize(true));
} }
td::Status TonlibClient::do_request(const tonlib_api::guessAccountRevision& request,
td::Promise<object_ptr<tonlib_api::accountRevisionList>>&& promise) {
if (!request.initial_account_state_) {
return TonlibError::EmptyField("initial_account_state");
}
auto o_type = get_wallet_type(*request.initial_account_state_);
if (!o_type) {
promise.set_value(tonlib_api::make_object<tonlib_api::accountRevisionList>(std::vector<td::int32>{0}));
return td::Status::OK();
}
auto revisions = ton::SmartContractCode::get_revisions(o_type.value());
std::vector<std::pair<int, block::StdAddress>> addresses;
TRY_STATUS(downcast_call2<td::Status>(*request.initial_account_state_, [&revisions, &addresses](const auto& state) {
for (auto revision : revisions) {
TRY_RESULT(address, get_account_address(state, revision));
addresses.push_back(std::make_pair(revision, address));
}
return td::Status::OK();
}));
auto actor_id = actor_id_++;
class GuessRevisions : public TonlibQueryActor {
public:
GuessRevisions(td::actor::ActorShared<TonlibClient> client, td::optional<ton::BlockIdExt> block_id,
std::vector<std::pair<int, block::StdAddress>> addresses, td::Promise<std::vector<int>> promise)
: TonlibQueryActor(std::move(client))
, block_id_(std::move(block_id))
, addresses_(std::move(addresses))
, promise_(std::move(promise)) {
}
private:
td::optional<ton::BlockIdExt> block_id_;
std::vector<std::pair<int, block::StdAddress>> addresses_;
td::Promise<std::vector<int>> promise_;
size_t left_{0};
std::vector<int> res;
void start_up() {
left_ += addresses_.size();
for (auto& p : addresses_) {
send_query(int_api::GetAccountState{p.second, block_id_.copy()},
promise_send_closure(td::actor::actor_id(this), &GuessRevisions::on_account_state, p.first));
}
}
void on_account_state(int revision, td::Result<td::unique_ptr<AccountState>> r_state) {
if (r_state.is_ok() && r_state.ok()->get_wallet_type() != AccountState::WalletType::Empty) {
res.push_back(revision);
}
left_--;
if (left_ == 0) {
promise_.set_value(std::move(res));
stop();
}
}
};
actors_[actor_id] = td::actor::create_actor<GuessRevisions>(
"GuessRevisions", actor_shared(this, actor_id), query_context_.block_id.copy(), std::move(addresses),
promise.wrap(
[](auto&& x) mutable { return tonlib_api::make_object<tonlib_api::accountRevisionList>(std::move(x)); }));
return td::Status::OK();
}
tonlib_api::object_ptr<tonlib_api::Object> TonlibClient::do_static_request( tonlib_api::object_ptr<tonlib_api::Object> TonlibClient::do_static_request(
const tonlib_api::unpackAccountAddress& request) { const tonlib_api::unpackAccountAddress& request) {
auto r_account_address = get_account_address(request.account_address_); auto r_account_address = get_account_address(request.account_address_);

View file

@ -226,6 +226,8 @@ class TonlibClient : public td::actor::Actor {
td::Status do_request(const tonlib_api::getAccountState& request, td::Status do_request(const tonlib_api::getAccountState& request,
td::Promise<object_ptr<tonlib_api::fullAccountState>>&& promise); td::Promise<object_ptr<tonlib_api::fullAccountState>>&& promise);
td::Status do_request(const tonlib_api::guessAccountRevision& request,
td::Promise<object_ptr<tonlib_api::accountRevisionList>>&& promise);
td::Status do_request(tonlib_api::sync& request, td::Promise<object_ptr<tonlib_api::ton_blockIdExt>>&& promise); td::Status do_request(tonlib_api::sync& request, td::Promise<object_ptr<tonlib_api::ton_blockIdExt>>&& promise);

View file

@ -32,6 +32,7 @@
// INVALID_ACCOUNT_ADDRESS // INVALID_ACCOUNT_ADDRESS
// INVALID_CONFIG // INVALID_CONFIG
// INVALID_PEM_KEY // INVALID_PEM_KEY
// INVALID_REVISION
// MESSAGE_TOO_LONG // MESSAGE_TOO_LONG
// EMPTY_FIELD // EMPTY_FIELD
// INVALID_FIELD // INVALID_FIELD
@ -80,6 +81,9 @@ struct TonlibError {
static td::Status InvalidPemKey() { static td::Status InvalidPemKey() {
return td::Status::Error(400, "INVALID_PEM_KEY"); return td::Status::Error(400, "INVALID_PEM_KEY");
} }
static td::Status InvalidRevision() {
return td::Status::Error(400, "INVALID_REVISION");
}
static td::Status NeedConfig() { static td::Status NeedConfig() {
return td::Status::Error(400, "NeedConfig"); return td::Status::Error(400, "NeedConfig");
} }

View file

@ -311,6 +311,7 @@ class TonlibCli : public td::actor::Actor {
td::TerminalIO::out() << "runmethod <addr> <method-id> <params>...\tRuns GET method <method-id> of account " td::TerminalIO::out() << "runmethod <addr> <method-id> <params>...\tRuns GET method <method-id> of account "
"<addr> with specified parameters\n"; "<addr> with specified parameters\n";
td::TerminalIO::out() << "getstate <key_id>\tget state of wallet with requested key\n"; td::TerminalIO::out() << "getstate <key_id>\tget state of wallet with requested key\n";
td::TerminalIO::out() << "guessrevision <key_id>\tsearch of existing accounts corresponding to the given key\n";
td::TerminalIO::out() << "getaddress <key_id>\tget address of wallet with requested key\n"; td::TerminalIO::out() << "getaddress <key_id>\tget address of wallet with requested key\n";
td::TerminalIO::out() << "dns resolve (<addr> | root) <name> <category>\n"; td::TerminalIO::out() << "dns resolve (<addr> | root) <name> <category>\n";
td::TerminalIO::out() << "dns cmd <key_id> <dns_cmd>\n"; td::TerminalIO::out() << "dns cmd <key_id> <dns_cmd>\n";
@ -416,6 +417,8 @@ class TonlibCli : public td::actor::Actor {
run_dns_cmd(parser, std::move(cmd_promise)); run_dns_cmd(parser, std::move(cmd_promise));
} else if (cmd == "gethistory") { } else if (cmd == "gethistory") {
get_history(parser.read_word(), std::move(cmd_promise)); get_history(parser.read_word(), std::move(cmd_promise));
} else if (cmd == "guessrevision") {
guess_revision(parser.read_word(), std::move(cmd_promise));
} else { } else {
cmd_promise.set_error(td::Status::Error(PSLICE() << "Unkwnown query `" << cmd << "`")); cmd_promise.set_error(td::Status::Error(PSLICE() << "Unkwnown query `" << cmd << "`"));
} }
@ -1091,6 +1094,27 @@ class TonlibCli : public td::actor::Actor {
td::SecureString secret; td::SecureString secret;
}; };
template <class F>
auto with_account_state(int version, std::string public_key, td::uint32 wallet_id, F&& f) {
using tonlib_api::make_object;
if (version == 1) {
return f(make_object<tonlib_api::testWallet_initialAccountState>(public_key));
}
if (version == 2) {
return f(make_object<tonlib_api::wallet_initialAccountState>(public_key));
}
if (version == 4) {
return f(make_object<tonlib_api::wallet_highload_v1_initialAccountState>(public_key, wallet_id));
}
if (version == 5) {
return f(make_object<tonlib_api::wallet_highload_v2_initialAccountState>(public_key, wallet_id));
}
if (version == 6) {
return f(make_object<tonlib_api::dns_initialAccountState>(public_key, wallet_id));
}
return f(make_object<tonlib_api::wallet_v3_initialAccountState>(public_key, wallet_id));
}
td::Result<Address> to_account_address(td::Slice key, bool need_private_key) { td::Result<Address> to_account_address(td::Slice key, bool need_private_key) {
if (key.empty()) { if (key.empty()) {
return td::Status::Error("account address is empty"); return td::Status::Error("account address is empty");
@ -1106,27 +1130,7 @@ class TonlibCli : public td::actor::Actor {
return tonlib::TonlibClient::static_request( return tonlib::TonlibClient::static_request(
make_object<tonlib_api::getAccountAddress>(std::move(x), revision)); make_object<tonlib_api::getAccountAddress>(std::move(x), revision));
}; };
if (version == 1) { return with_account_state(version, keys_[r_key_i.ok()].public_key, wallet_id_, do_request);
return do_request(make_object<tonlib_api::testWallet_initialAccountState>(keys_[r_key_i.ok()].public_key));
}
if (version == 2) {
return do_request(make_object<tonlib_api::wallet_initialAccountState>(keys_[r_key_i.ok()].public_key));
}
if (version == 4) {
return do_request(make_object<tonlib_api::wallet_highload_v1_initialAccountState>(
keys_[r_key_i.ok()].public_key, wallet_id_));
}
if (version == 5) {
return do_request(make_object<tonlib_api::wallet_highload_v2_initialAccountState>(
keys_[r_key_i.ok()].public_key, wallet_id_));
}
if (version == 6) {
return do_request(
make_object<tonlib_api::dns_initialAccountState>(keys_[r_key_i.ok()].public_key, wallet_id_));
}
return do_request(
make_object<tonlib_api::wallet_v3_initialAccountState>(keys_[r_key_i.ok()].public_key, wallet_id_));
UNREACHABLE();
}(options_.wallet_version, options_.wallet_revision); }(options_.wallet_version, options_.wallet_revision);
if (obj->get_id() != tonlib_api::error::ID) { if (obj->get_id() != tonlib_api::error::ID) {
Address res; Address res;
@ -1336,6 +1340,17 @@ class TonlibCli : public td::actor::Actor {
promise.send_closure(td::actor::actor_id(this), &TonlibCli::get_history2, key.str())); promise.send_closure(td::actor::actor_id(this), &TonlibCli::get_history2, key.str()));
} }
void guess_revision(td::Slice key, td::Promise<td::Unit> promise) {
TRY_RESULT_PROMISE(promise, key_i, to_key_i(key));
with_account_state(options_.wallet_version, keys_[key_i].public_key, wallet_id_, [&](auto state) {
using tonlib_api::make_object;
send_query(make_object<tonlib_api::guessAccountRevision>(std::move(state)), promise.wrap([](auto revisions) {
td::TerminalIO::out() << to_string(revisions);
return td::Unit();
}));
});
}
void get_history2(td::Slice key, td::Result<tonlib_api::object_ptr<tonlib_api::fullAccountState>> r_state, void get_history2(td::Slice key, td::Result<tonlib_api::object_ptr<tonlib_api::fullAccountState>> r_state,
td::Promise<td::Unit> promise) { td::Promise<td::Unit> promise) {
TRY_RESULT_PROMISE(promise, state, std::move(r_state)); TRY_RESULT_PROMISE(promise, state, std::move(r_state));

View file

@ -97,26 +97,12 @@ void ApplyBlock::got_block_handle(BlockHandle handle) {
written_block_data(); written_block_data();
return; return;
} }
if (handle_->id().is_masterchain() && !handle_->inited_proof()) {
abort_query(td::Status::Error(ErrorCode::notready, "proof is absent"));
return;
}
if (!handle_->id().is_masterchain() && !handle_->inited_proof_link()) {
abort_query(td::Status::Error(ErrorCode::notready, "proof link is absent"));
return;
}
if (handle_->is_archived()) { if (handle_->is_archived()) {
finish_query(); finish_query();
return; return;
} }
CHECK(handle_->inited_merge_before());
CHECK(handle_->inited_split_after());
CHECK(handle_->inited_prev());
CHECK(handle_->inited_state_root_hash());
CHECK(handle_->inited_logical_time());
if (handle_->received()) { if (handle_->received()) {
written_block_data(); written_block_data();
return; return;
@ -149,6 +135,19 @@ void ApplyBlock::got_block_handle(BlockHandle handle) {
void ApplyBlock::written_block_data() { void ApplyBlock::written_block_data() {
VLOG(VALIDATOR_DEBUG) << "apply block: written block data for " << id_; VLOG(VALIDATOR_DEBUG) << "apply block: written block data for " << id_;
if (handle_->id().is_masterchain() && !handle_->inited_proof()) {
abort_query(td::Status::Error(ErrorCode::notready, "proof is absent"));
return;
}
if (!handle_->id().is_masterchain() && !handle_->inited_proof_link()) {
abort_query(td::Status::Error(ErrorCode::notready, "proof link is absent"));
return;
}
CHECK(handle_->inited_merge_before());
CHECK(handle_->inited_split_after());
CHECK(handle_->inited_prev());
CHECK(handle_->inited_state_root_hash());
CHECK(handle_->inited_logical_time());
if (handle_->is_applied() && handle_->processed()) { if (handle_->is_applied() && handle_->processed()) {
finish_query(); finish_query();
} else { } else {

View file

@ -14,7 +14,7 @@
You should have received a copy of the GNU Lesser General Public License You should have received a copy of the GNU Lesser General Public License
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>. along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
Copyright 2017-2019 Telegram Systems LLP Copyright 2017-2020 Telegram Systems LLP
*/ */
#include "external-message.hpp" #include "external-message.hpp"
#include "vm/boc.h" #include "vm/boc.h"
@ -48,6 +48,9 @@ td::Result<Ref<ExtMessageQ>> ExtMessageQ::create_ext_message(td::BufferSlice dat
if (ext_msg->get_level() != 0) { if (ext_msg->get_level() != 0) {
return td::Status::Error("external message must have zero level"); return td::Status::Error("external message must have zero level");
} }
if (ext_msg->get_depth() >= max_ext_msg_depth) {
return td::Status::Error("external message is too deep");
}
vm::CellSlice cs{vm::NoVmOrd{}, ext_msg}; vm::CellSlice cs{vm::NoVmOrd{}, ext_msg};
if (cs.prefetch_ulong(2) != 2) { // ext_in_msg_info$10 if (cs.prefetch_ulong(2) != 2) { // ext_in_msg_info$10
return td::Status::Error("external message must begin with ext_in_msg_info$10"); return td::Status::Error("external message must begin with ext_in_msg_info$10");

View file

@ -14,7 +14,7 @@
You should have received a copy of the GNU Lesser General Public License You should have received a copy of the GNU Lesser General Public License
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>. along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
Copyright 2017-2019 Telegram Systems LLP Copyright 2017-2020 Telegram Systems LLP
*/ */
#pragma once #pragma once
@ -34,6 +34,7 @@ class ExtMessageQ : public ExtMessage {
public: public:
static constexpr unsigned max_ext_msg_size = 65535; static constexpr unsigned max_ext_msg_size = 65535;
static constexpr unsigned max_ext_msg_depth = 512;
AccountIdPrefixFull shard() const override { AccountIdPrefixFull shard() const override {
return addr_prefix_; return addr_prefix_;
} }