mirror of
https://github.com/ton-blockchain/ton
synced 2025-03-09 15:40:10 +00:00
integrating the existing state of TON Storage / TON Payments / CPS Fift development branches
This commit is contained in:
parent
040df63c98
commit
4e2624459b
153 changed files with 10760 additions and 1695 deletions
|
@ -81,7 +81,7 @@ void ActorExecutor::start() noexcept {
|
|||
return;
|
||||
}
|
||||
|
||||
ActorSignals signals;
|
||||
ActorSignals signals{options_.signals};
|
||||
SCOPE_EXIT {
|
||||
pending_signals_.add_signals(signals);
|
||||
};
|
||||
|
@ -209,6 +209,7 @@ bool ActorExecutor::flush_one_signal(ActorSignals &signals) {
|
|||
case ActorSignals::Alarm:
|
||||
if (actor_execute_context_.get_alarm_timestamp() && actor_execute_context_.get_alarm_timestamp().is_in_past()) {
|
||||
actor_execute_context_.alarm_timestamp() = Timestamp::never();
|
||||
actor_info_.set_alarm_timestamp(Timestamp::never());
|
||||
actor_info_.actor().alarm();
|
||||
}
|
||||
break;
|
||||
|
|
|
@ -42,8 +42,14 @@ class ActorExecutor {
|
|||
this->has_poll = new_has_poll;
|
||||
return *this;
|
||||
}
|
||||
Options &with_signals(ActorSignals signals) {
|
||||
this->signals = signals;
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool from_queue{false};
|
||||
bool has_poll{false};
|
||||
ActorSignals signals;
|
||||
};
|
||||
|
||||
ActorExecutor(ActorInfo &actor_info, SchedulerDispatcher &dispatcher, Options options)
|
||||
|
|
|
@ -37,7 +37,7 @@ void IoWorker::tear_down() {
|
|||
#endif
|
||||
}
|
||||
|
||||
bool IoWorker::run_once(double timeout) {
|
||||
bool IoWorker::run_once(double timeout, bool skip_timeouts) {
|
||||
auto &dispatcher = *SchedulerContext::get();
|
||||
#if TD_PORT_POSIX
|
||||
auto &poll = SchedulerContext::get()->get_poll();
|
||||
|
@ -87,6 +87,14 @@ bool IoWorker::run_once(double timeout) {
|
|||
if (timeout_ms < 0) {
|
||||
timeout_ms = 0;
|
||||
}
|
||||
|
||||
if (timeout_ms > 0 && skip_timeouts) {
|
||||
timeout_ms = 0;
|
||||
//auto *heap_node = heap.top();
|
||||
//auto *actor_info = ActorInfo::from_heap_node(heap_node);
|
||||
//LOG(ERROR) << "Jump: " << wakeup_timestamp.at() << " " << actor_info->get_name();
|
||||
Time::jump_in_future(wakeup_timestamp.at() + 1e-9);
|
||||
}
|
||||
//const int thirty_seconds = 30 * 1000;
|
||||
//if (timeout_ms > thirty_seconds) {
|
||||
//timeout_ms = thirty_seconds;
|
||||
|
|
|
@ -33,7 +33,7 @@ class IoWorker {
|
|||
void start_up();
|
||||
void tear_down();
|
||||
|
||||
bool run_once(double timeout);
|
||||
bool run_once(double timeout, bool skip_timeouts = false);
|
||||
|
||||
private:
|
||||
MpscPollableQueue<SchedulerMessage> &queue_;
|
||||
|
|
|
@ -24,7 +24,6 @@
|
|||
namespace td {
|
||||
namespace actor {
|
||||
namespace core {
|
||||
|
||||
std::atomic<bool> debug;
|
||||
void set_debug(bool flag) {
|
||||
debug = flag;
|
||||
|
@ -34,8 +33,11 @@ bool need_debug() {
|
|||
return debug.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
Scheduler::Scheduler(std::shared_ptr<SchedulerGroupInfo> scheduler_group_info, SchedulerId id, size_t cpu_threads_count)
|
||||
: scheduler_group_info_(std::move(scheduler_group_info)), cpu_threads_(cpu_threads_count) {
|
||||
Scheduler::Scheduler(std::shared_ptr<SchedulerGroupInfo> scheduler_group_info, SchedulerId id, size_t cpu_threads_count,
|
||||
bool skip_timeouts)
|
||||
: scheduler_group_info_(std::move(scheduler_group_info))
|
||||
, cpu_threads_(cpu_threads_count)
|
||||
, skip_timeouts_(skip_timeouts) {
|
||||
scheduler_group_info_->active_scheduler_count++;
|
||||
info_ = &scheduler_group_info_->schedulers.at(id.value());
|
||||
info_->id = id;
|
||||
|
@ -101,7 +103,7 @@ bool Scheduler::run(double timeout) {
|
|||
if (SchedulerContext::get()->is_stop_requested()) {
|
||||
res = false;
|
||||
} else {
|
||||
res = io_worker_->run_once(timeout);
|
||||
res = io_worker_->run_once(timeout, skip_timeouts_);
|
||||
}
|
||||
if (!res) {
|
||||
if (!is_stopped_) {
|
||||
|
@ -331,6 +333,7 @@ void Scheduler::close_scheduler_group(SchedulerGroupInfo &group_info) {
|
|||
worker->actor_info_creator.clear();
|
||||
}
|
||||
}
|
||||
|
||||
//for (auto &scheduler : group_info.schedulers) {
|
||||
//scheduler.io_worker->actor_info_creator.ensure_empty();
|
||||
//for (auto &worker : scheduler.cpu_workers) {
|
||||
|
|
|
@ -200,7 +200,8 @@ class Scheduler {
|
|||
return thread_id;
|
||||
}
|
||||
|
||||
Scheduler(std::shared_ptr<SchedulerGroupInfo> scheduler_group_info, SchedulerId id, size_t cpu_threads_count);
|
||||
Scheduler(std::shared_ptr<SchedulerGroupInfo> scheduler_group_info, SchedulerId id, size_t cpu_threads_count,
|
||||
bool skip_timeouts = false);
|
||||
|
||||
Scheduler(const Scheduler &) = delete;
|
||||
Scheduler &operator=(const Scheduler &) = delete;
|
||||
|
@ -241,6 +242,7 @@ class Scheduler {
|
|||
Poll poll_;
|
||||
KHeap<double> heap_;
|
||||
std::unique_ptr<IoWorker> io_worker_;
|
||||
bool skip_timeouts_{false};
|
||||
|
||||
class ContextImpl : public SchedulerContext {
|
||||
public:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue