rcu_sched_clock_irq(user_tick); #ifdef CONFIG_IRQ_WORK if (in_irq()) irq_work_tick(); #endif scheduler_tick(); if (IS_ENABLED(CONFIG_POSIX_TIMERS)) run_posix_cpu_timers(); } 这段代码是从定时器中断处理程序调用的函数,用于将一个时钟滴答计入当前进程的时间。user_tick参数表示该滴答是否...
等到一定时间后rcu检测机制调用print_cpu_stall_info->touch_nmi_watchdog->touch_softlockup_watchdog->touch_softlockup_watchdog_sched将时间戳设置为(SOFTLOCKUP_RESET ULONG_MAX)。 然后watchdog_timer_fn 会判断touch_ts == SOFTLOCKUP_RESET。然后__touch_watchdog()。重置时间戳。走下一轮时间判断。下...
The CONFIG_NO_HZ_IDLE=y Kconfig option causes the kernel to avoid sending scheduling-clock interrupts to idle CPUs, which is critically important both to battery-powered devices and to highly virtualized mainframes. Therefore, system with aggressive real-time response constraints often run CONFIG_HZ...
void rcu_sched_clock_irq(int user); void rcu_report_dead(unsigned int cpu); void rcutree_migrate_callbacks(int cpu); #ifdef CONFIG_TASKS_RCU_GENERIC void rcu_init_tasks_generic(void); Expand Down 2 changes: 1 addition & 1 deletion 2 include/linux/rcutiny.h Show comments View file ...
This leads to the logging of rcu_sched detected stalls on CPUs/tasks messages.In order to get a vmcore to analyse the cause of the rcu_stall messages, it is necessary to configure the system to panic on rcu_stall events.To configure this temporarily on the command line:...
Soft lockups and RCU sched CPU stalls are detected where many CPUs are looping in a spinlock. A hard lockup is encountered and then the kernel crashes in the end. Raw soft lockups: ... [56376.979162] NMI watchdog: BUG: soft lockup - CPU#26 stuck for 23s! [ptlrpcd_00_00:12056] ...
static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) { rcu_cpu_stall_suppress = 1; return NOTIFY_DONE; } static struct notifier_block rcu_panic_block = { .notifier_call = rcu_panic, }; static int __init check_cpu_stall_init(void) ...
[<ffffffff810253a9>] ? sched_clock+0x9/0x10 [<ffffffff814c3c96>] do_setlink+0x2e6/0xcc0 [<ffffffff810b9b64>] ? __lock_acquire+0x454/0x1b00 [<ffffffff813081c1>] ? nla_parse+0x31/0x120 [<ffffffff814c6750>] rtnl_newlink+0x5c0/0x860 ...
*/ if (unlikely(raw_cpu_read(rcu_sched_qs_mask))) { local_irq_save(flags); rcu_momentary_dyntick_idle(); local_irq_restore(flags); } if (unlikely(raw_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))) { /* * Yes, we just checked a per-CPU variable with preemption * enabled, ...
void__initrcu_init(void){intcpu;rcu_bootup_announce();rcu_init_geometry();rcu_init_one(&rcu_bh_state,&rcu_bh_data);rcu_init_one(&rcu_sched_state,&rcu_sched_data);__rcu_init_preempt();open_softirq(RCU_SOFTIRQ,rcu_process_callbacks);/* ...