1
0
Fork 0
mirror of https://github.com/ton-blockchain/ton synced 2025-03-09 15:40:10 +00:00

updated submodules, bugfixes

- added new fift/func code for validator complaint creation
- bugfixes in validator
- updates in tonlib
- new versions of rocksdb/abseil
- hardfork support
This commit is contained in:
ton 2020-04-27 16:01:46 +04:00
parent 16a4566091
commit 9f008b129f
129 changed files with 8438 additions and 879 deletions

View file

@ -163,12 +163,12 @@ struct MsgProcessedUpto {
MsgProcessedUpto(ton::ShardId _shard, ton::BlockSeqno _mcseqno, ton::LogicalTime _lt, td::ConstBitPtr _hash)
: shard(_shard), mc_seqno(_mcseqno), last_inmsg_lt(_lt), last_inmsg_hash(_hash) {
}
bool operator<(const MsgProcessedUpto& other) const & {
bool operator<(const MsgProcessedUpto& other) const& {
return shard < other.shard || (shard == other.shard && mc_seqno < other.mc_seqno);
}
bool contains(const MsgProcessedUpto& other) const &;
bool contains(const MsgProcessedUpto& other) const&;
bool contains(ton::ShardId other_shard, ton::LogicalTime other_lt, td::ConstBitPtr other_hash,
ton::BlockSeqno other_mc_seqno) const &;
ton::BlockSeqno other_mc_seqno) const&;
// NB: this is for checking whether we have already imported an internal message
bool already_processed(const EnqueuedMsgDescr& msg) const;
bool can_check_processed() const {
@ -596,6 +596,62 @@ struct BlockProofChain {
td::Status validate(td::CancellationToken cancellation_token = {});
};
// compute the share of shardchain blocks generated by each validator using Monte Carlo method
class MtCarloComputeShare {
int K, N;
long long iterations;
std::vector<double> W;
std::vector<double> CW, RW;
std::vector<std::pair<double, double>> H;
std::vector<int> A;
double R0;
bool ok;
public:
MtCarloComputeShare(int subset_size, const std::vector<double>& weights, long long iteration_count = 1000000)
: K(subset_size), N((int)weights.size()), iterations(iteration_count), W(weights), ok(false) {
compute();
}
MtCarloComputeShare(int subset_size, int set_size, const double* weights, long long iteration_count = 1000000)
: K(subset_size), N(set_size), iterations(iteration_count), W(weights, weights + set_size), ok(false) {
compute();
}
bool is_ok() const {
return ok;
}
const double* share_array() const {
return ok ? RW.data() : nullptr;
}
const double* weights_array() const {
return ok ? W.data() : nullptr;
}
double operator[](int i) const {
return ok ? RW.at(i) : -1.;
}
double share(int i) const {
return ok ? RW.at(i) : -1.;
}
double weight(int i) const {
return ok ? W.at(i) : -1.;
}
int size() const {
return N;
}
int subset_size() const {
return K;
}
long long performed_iterations() const {
return iterations;
}
private:
bool set_error() {
return ok = false;
}
bool compute();
void gen_vset();
};
int filter_out_msg_queue(vm::AugmentedDictionary& out_queue, ton::ShardIdFull old_shard, ton::ShardIdFull subshard);
std::ostream& operator<<(std::ostream& os, const ShardId& shard_id);