mirror of
https://github.com/ton-blockchain/ton
synced 2025-03-09 15:40:10 +00:00
integrating the existing state of TON Storage / TON Payments / CPS Fift development branches
This commit is contained in:
parent
040df63c98
commit
4e2624459b
153 changed files with 10760 additions and 1695 deletions
|
@ -106,7 +106,8 @@ class Scheduler {
|
|||
};
|
||||
|
||||
enum Mode { Running, Paused };
|
||||
Scheduler(std::vector<NodeInfo> infos, Mode mode = Paused) : infos_(std::move(infos)) {
|
||||
Scheduler(std::vector<NodeInfo> infos, bool skip_timeouts = false, Mode mode = Paused)
|
||||
: infos_(std::move(infos)), skip_timeouts_(skip_timeouts) {
|
||||
init();
|
||||
if (mode == Running) {
|
||||
start();
|
||||
|
@ -184,6 +185,7 @@ class Scheduler {
|
|||
std::shared_ptr<core::SchedulerGroupInfo> group_info_;
|
||||
std::vector<td::unique_ptr<core::Scheduler>> schedulers_;
|
||||
bool is_started_{false};
|
||||
bool skip_timeouts_{false};
|
||||
|
||||
void init() {
|
||||
CHECK(infos_.size() < 256);
|
||||
|
@ -191,7 +193,8 @@ class Scheduler {
|
|||
group_info_ = std::make_shared<core::SchedulerGroupInfo>(infos_.size());
|
||||
td::uint8 id = 0;
|
||||
for (const auto &info : infos_) {
|
||||
schedulers_.emplace_back(td::make_unique<core::Scheduler>(group_info_, core::SchedulerId{id}, info.cpu_threads_));
|
||||
schedulers_.emplace_back(
|
||||
td::make_unique<core::Scheduler>(group_info_, core::SchedulerId{id}, info.cpu_threads_, skip_timeouts_));
|
||||
id++;
|
||||
}
|
||||
}
|
||||
|
@ -250,7 +253,7 @@ T ¤t_actor() {
|
|||
inline void send_message(core::ActorInfo &actor_info, core::ActorMessage message) {
|
||||
auto scheduler_context_ptr = core::SchedulerContext::get();
|
||||
if (scheduler_context_ptr == nullptr) {
|
||||
LOG(ERROR) << "send to actor is silently ignored";
|
||||
//LOG(ERROR) << "send to actor is silently ignored";
|
||||
return;
|
||||
}
|
||||
auto &scheduler_context = *scheduler_context_ptr;
|
||||
|
@ -263,10 +266,11 @@ inline void send_message(ActorRef actor_ref, core::ActorMessage message) {
|
|||
message.set_link_token(actor_ref.link_token);
|
||||
send_message(actor_ref.actor_info, std::move(message));
|
||||
}
|
||||
|
||||
inline void send_message_later(core::ActorInfo &actor_info, core::ActorMessage message) {
|
||||
auto scheduler_context_ptr = core::SchedulerContext::get();
|
||||
if (scheduler_context_ptr == nullptr) {
|
||||
LOG(ERROR) << "send to actor is silently ignored";
|
||||
//LOG(ERROR) << "send to actor is silently ignored";
|
||||
return;
|
||||
}
|
||||
auto &scheduler_context = *scheduler_context_ptr;
|
||||
|
@ -285,7 +289,7 @@ template <class ExecuteF, class ToMessageF>
|
|||
void send_immediate(ActorRef actor_ref, ExecuteF &&execute, ToMessageF &&to_message) {
|
||||
auto scheduler_context_ptr = core::SchedulerContext::get();
|
||||
if (scheduler_context_ptr == nullptr) {
|
||||
LOG(ERROR) << "send to actor is silently ignored";
|
||||
//LOG(ERROR) << "send to actor is silently ignored";
|
||||
return;
|
||||
}
|
||||
auto &scheduler_context = *scheduler_context_ptr;
|
||||
|
@ -368,28 +372,26 @@ void send_closure_later(ActorRef actor_ref, ArgsT &&... args) {
|
|||
inline void send_signals(ActorRef actor_ref, ActorSignals signals) {
|
||||
auto scheduler_context_ptr = core::SchedulerContext::get();
|
||||
if (scheduler_context_ptr == nullptr) {
|
||||
LOG(ERROR) << "send to actor is silently ignored";
|
||||
//LOG(ERROR) << "send to actor is silently ignored";
|
||||
return;
|
||||
}
|
||||
auto &scheduler_context = *scheduler_context_ptr;
|
||||
core::ActorExecutor executor(actor_ref.actor_info, scheduler_context,
|
||||
core::ActorExecutor::Options().with_has_poll(scheduler_context.has_poll()));
|
||||
if (executor.can_send_immediate()) {
|
||||
return executor.send_immediate(signals.raw());
|
||||
}
|
||||
executor.send(signals.raw());
|
||||
core::ActorExecutor executor(
|
||||
actor_ref.actor_info, scheduler_context,
|
||||
core::ActorExecutor::Options().with_has_poll(scheduler_context.has_poll()).with_signals(signals.raw()));
|
||||
}
|
||||
|
||||
inline void send_signals_later(ActorRef actor_ref, ActorSignals signals) {
|
||||
auto scheduler_context_ptr = core::SchedulerContext::get();
|
||||
if (scheduler_context_ptr == nullptr) {
|
||||
LOG(ERROR) << "send to actor is silently ignored";
|
||||
//LOG(ERROR) << "send to actor is silently ignored";
|
||||
return;
|
||||
}
|
||||
auto &scheduler_context = *scheduler_context_ptr;
|
||||
core::ActorExecutor executor(actor_ref.actor_info, scheduler_context,
|
||||
core::ActorExecutor::Options().with_has_poll(scheduler_context.has_poll()));
|
||||
executor.send((signals | ActorSignals::pause()).raw());
|
||||
core::ActorExecutor::Options()
|
||||
.with_has_poll(scheduler_context.has_poll())
|
||||
.with_signals((signals | ActorSignals::pause()).raw()));
|
||||
}
|
||||
|
||||
inline void register_actor_info_ptr(core::ActorInfoPtr actor_info_ptr) {
|
||||
|
|
|
@ -81,7 +81,7 @@ void ActorExecutor::start() noexcept {
|
|||
return;
|
||||
}
|
||||
|
||||
ActorSignals signals;
|
||||
ActorSignals signals{options_.signals};
|
||||
SCOPE_EXIT {
|
||||
pending_signals_.add_signals(signals);
|
||||
};
|
||||
|
@ -209,6 +209,7 @@ bool ActorExecutor::flush_one_signal(ActorSignals &signals) {
|
|||
case ActorSignals::Alarm:
|
||||
if (actor_execute_context_.get_alarm_timestamp() && actor_execute_context_.get_alarm_timestamp().is_in_past()) {
|
||||
actor_execute_context_.alarm_timestamp() = Timestamp::never();
|
||||
actor_info_.set_alarm_timestamp(Timestamp::never());
|
||||
actor_info_.actor().alarm();
|
||||
}
|
||||
break;
|
||||
|
|
|
@ -42,8 +42,14 @@ class ActorExecutor {
|
|||
this->has_poll = new_has_poll;
|
||||
return *this;
|
||||
}
|
||||
Options &with_signals(ActorSignals signals) {
|
||||
this->signals = signals;
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool from_queue{false};
|
||||
bool has_poll{false};
|
||||
ActorSignals signals;
|
||||
};
|
||||
|
||||
ActorExecutor(ActorInfo &actor_info, SchedulerDispatcher &dispatcher, Options options)
|
||||
|
|
|
@ -37,7 +37,7 @@ void IoWorker::tear_down() {
|
|||
#endif
|
||||
}
|
||||
|
||||
bool IoWorker::run_once(double timeout) {
|
||||
bool IoWorker::run_once(double timeout, bool skip_timeouts) {
|
||||
auto &dispatcher = *SchedulerContext::get();
|
||||
#if TD_PORT_POSIX
|
||||
auto &poll = SchedulerContext::get()->get_poll();
|
||||
|
@ -87,6 +87,14 @@ bool IoWorker::run_once(double timeout) {
|
|||
if (timeout_ms < 0) {
|
||||
timeout_ms = 0;
|
||||
}
|
||||
|
||||
if (timeout_ms > 0 && skip_timeouts) {
|
||||
timeout_ms = 0;
|
||||
//auto *heap_node = heap.top();
|
||||
//auto *actor_info = ActorInfo::from_heap_node(heap_node);
|
||||
//LOG(ERROR) << "Jump: " << wakeup_timestamp.at() << " " << actor_info->get_name();
|
||||
Time::jump_in_future(wakeup_timestamp.at() + 1e-9);
|
||||
}
|
||||
//const int thirty_seconds = 30 * 1000;
|
||||
//if (timeout_ms > thirty_seconds) {
|
||||
//timeout_ms = thirty_seconds;
|
||||
|
|
|
@ -33,7 +33,7 @@ class IoWorker {
|
|||
void start_up();
|
||||
void tear_down();
|
||||
|
||||
bool run_once(double timeout);
|
||||
bool run_once(double timeout, bool skip_timeouts = false);
|
||||
|
||||
private:
|
||||
MpscPollableQueue<SchedulerMessage> &queue_;
|
||||
|
|
|
@ -24,7 +24,6 @@
|
|||
namespace td {
|
||||
namespace actor {
|
||||
namespace core {
|
||||
|
||||
std::atomic<bool> debug;
|
||||
void set_debug(bool flag) {
|
||||
debug = flag;
|
||||
|
@ -34,8 +33,11 @@ bool need_debug() {
|
|||
return debug.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
Scheduler::Scheduler(std::shared_ptr<SchedulerGroupInfo> scheduler_group_info, SchedulerId id, size_t cpu_threads_count)
|
||||
: scheduler_group_info_(std::move(scheduler_group_info)), cpu_threads_(cpu_threads_count) {
|
||||
Scheduler::Scheduler(std::shared_ptr<SchedulerGroupInfo> scheduler_group_info, SchedulerId id, size_t cpu_threads_count,
|
||||
bool skip_timeouts)
|
||||
: scheduler_group_info_(std::move(scheduler_group_info))
|
||||
, cpu_threads_(cpu_threads_count)
|
||||
, skip_timeouts_(skip_timeouts) {
|
||||
scheduler_group_info_->active_scheduler_count++;
|
||||
info_ = &scheduler_group_info_->schedulers.at(id.value());
|
||||
info_->id = id;
|
||||
|
@ -101,7 +103,7 @@ bool Scheduler::run(double timeout) {
|
|||
if (SchedulerContext::get()->is_stop_requested()) {
|
||||
res = false;
|
||||
} else {
|
||||
res = io_worker_->run_once(timeout);
|
||||
res = io_worker_->run_once(timeout, skip_timeouts_);
|
||||
}
|
||||
if (!res) {
|
||||
if (!is_stopped_) {
|
||||
|
@ -331,6 +333,7 @@ void Scheduler::close_scheduler_group(SchedulerGroupInfo &group_info) {
|
|||
worker->actor_info_creator.clear();
|
||||
}
|
||||
}
|
||||
|
||||
//for (auto &scheduler : group_info.schedulers) {
|
||||
//scheduler.io_worker->actor_info_creator.ensure_empty();
|
||||
//for (auto &worker : scheduler.cpu_workers) {
|
||||
|
|
|
@ -200,7 +200,8 @@ class Scheduler {
|
|||
return thread_id;
|
||||
}
|
||||
|
||||
Scheduler(std::shared_ptr<SchedulerGroupInfo> scheduler_group_info, SchedulerId id, size_t cpu_threads_count);
|
||||
Scheduler(std::shared_ptr<SchedulerGroupInfo> scheduler_group_info, SchedulerId id, size_t cpu_threads_count,
|
||||
bool skip_timeouts = false);
|
||||
|
||||
Scheduler(const Scheduler &) = delete;
|
||||
Scheduler &operator=(const Scheduler &) = delete;
|
||||
|
@ -241,6 +242,7 @@ class Scheduler {
|
|||
Poll poll_;
|
||||
KHeap<double> heap_;
|
||||
std::unique_ptr<IoWorker> io_worker_;
|
||||
bool skip_timeouts_{false};
|
||||
|
||||
class ContextImpl : public SchedulerContext {
|
||||
public:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue