sched_info_reset_dequeued(t);//这个函数将sched_info.last_queued置为0 t->sched_info.run_delay += delta;//累加在运行等待队列的等待时间 t->sched_info.last_arrival = now;//记录当前时刻到last_arrival,也就是最新一次进程被调度到cpu运行的起始时间 t->sched_info.pcount++;//累加进程被调度到cpu...
delta =0;if(t->sched_info.last_queued)delta = now - t->sched_info.last_queued;sched_info_reset_dequeued(t);t->sched_info.run_delay += delta;t->sched_info.last_arrival = now;t->sched_info.pcount++;rq_sched_info_arrive(rq, delta);} ...
if (likely(sched_info_on())) memset(&p->sched_info, 0, sizeof(p->sched_info)); #endif #if defined(CONFIG_SMP) p->on_cpu = 0; #endif init_task_preempt_count(p); #ifdef CONFIG_SMP plist_node_init(&p->pushable_tasks, MAX_PRIO); RB_CLEAR_NODE(&p->pushable_dl_tasks); #...
【一】 sched.h 第一个数据结构体是task_struct,这个数据结构被内核用来表示进程,包含其所有信息。 定义于文件include/linux/sched.h中,先看看其完整定义 1structtask_struct {2volatilelongstate;/*-1 unrunnable, 0 runnable, >0 stopped*/3void*stack;4atomic_t usage;5unsignedintflags;/*per process flag...
task_struct.thread_info.preempt_count * PREEMPT_MASK:0x000000ff* SOFTIRQ_MASK:0x0000ff00* HARDIRQ_MASK:0x000f0000* NMI_MASK:0x00100000* PREEMPT_NEED_RESCHED:0x80000000*/#definePREEMPT_BITS 8#defineSOFTIRQ_BITS 8#defineHARDIRQ_BITS 4#defineNMI_BITS 1#definePREEMPT_SHIFT 0#defineSOFTIRQ_SHIFT...
unsignedshortmigration_flags;structsched_infosched_info; structlist_headtasks;//指向进程PCB的指针structmm_struct*mm;structmm_struct*active_mm; intexit_state;intexit_code;intexit_signal;/* The signal sent when the parent dies: */intpdeath_signal;/* JOBCTL_*, siglock protected: */unsignedlong...
如表26–2 中所示,许多 sched 探测器的参数由指向 lwpsinfo_t 的指针和指向 psinfo_t 的指针组成,它们分别指示线程和包含该线程的进程。这些结构将分别在lwpsinfo_t和psinfo_t中详细介绍。cpuinfo_t cpuinfo_t 结构定义 CPU。如表26–2 中所示,enqueue 和dequeue 探测器的参数都包括指向 cpuinfo_t 的指针...
* After ->cpu is set up to a new value, task_rq_lock(p, ...) can be * successfully executed on another CPU. We must ensure that updates of * per-task data have been completed by this moment. */ smp_wmb(); WRITE_ONCE(task_thread_info(p)->cpu, cpu); p->wake_cpu ...
uclamp_rq_inc(rq, p); if (!(flags & ENQUEUE_RESTORE)) { sched_info_enqueue(rq, p); psi_enqueue(p, flags & ENQUEUE_MIGRATED); } if (sched_core_enabled(rq)) sched_core_enqueue(rq, p); } @@ -2041,7 +2046,7 @@ inline bool dequeue_task(struct rq *rq, struct task_struct ...
run_delay, rq->rq_sched_info.pcount); seq_printf(seq, "\n"); #ifdef CONFIG_SMP /* domain-specific stats */ rcu_read_lock(); for_each_domain(cpu, sd) { enum cpu_idle_type itype; cpumask_scnprintf(mask_str, mask_len, sched_domain_span(sd)); seq_printf(seq, "domain%d %s",...