进程调度所使用到的数据结构:
1.就绪队列
内核为每一个cpu创建一个进程就绪队列,该队列上的进程均由该cpu执行,代码如下(kernel/sched/core.c)。
1 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
定义了一个struct rq结构体数组,每个数组元素是一个就绪队列,对应一个cpu。下面看下struct rq结构体(kernel/sched/sched.h):
1 struct rq { 2 /* runqueue lock: */ 3 raw_spinlock_t lock; 4 5 /* 6 * nr_running and cpu_load should be in the same cacheline because 7 * remote CPUs use both these fields when doing load calculation. 8 */ 9 unsigned int nr_running; 10 #ifdef CONFIG_NUMA_BALANCING 11 unsigned int nr_numa_running; 12 unsigned int nr_preferred_running; 13 #endif 14 #define CPU_LOAD_IDX_MAX 5 15 unsigned long cpu_load[CPU_LOAD_IDX_MAX]; 16 unsigned long last_load_update_tick; 17 #ifdef CONFIG_NO_HZ_COMMON 18 u64 nohz_stamp; 19 unsigned long nohz_flags; 20 #endif 21 #ifdef CONFIG_NO_HZ_FULL 22 unsigned long last_sched_tick; 23 #endif 24 int skip_clock_update; 25 26 /* capture load from *all* tasks on this cpu: */ 27 struct load_weight load; 28 unsigned long nr_load_updates; 29 u64 nr_switches; 30 31 struct cfs_rq cfs; 32 struct rt_rq rt; 33 struct dl_rq dl; 34 35 #ifdef CONFIG_FAIR_GROUP_SCHED 36 /* list of leaf cfs_rq on this cpu: */ 37 struct list_head leaf_cfs_rq_list; 38 39 struct sched_avg avg; 40 #endif /* CONFIG_FAIR_GROUP_SCHED */ 41 42 /* 43 * This is part of a global counter where only the total sum 44 * over all CPUs matters. A task can increase this counter on 45 * one CPU and if it got migrated afterwards it may decrease 46 * it on another CPU. Always updated under the runqueue lock: 47 */ 48 unsigned long nr_uninterruptible; 49 50 struct task_struct *curr, *idle, *stop; 51 unsigned long next_balance; 52 struct mm_struct *prev_mm; 53 54 u64 clock; 55 u64 clock_task; 56 57 atomic_t nr_iowait; 58 59 #ifdef CONFIG_SMP 60 struct root_domain *rd; 61 struct sched_domain *sd; 62 63 unsigned long cpu_capacity; 64 65 unsigned char idle_balance; 66 /* For active balancing */ 67 int post_schedule; 68 int active_balance; 69 int push_cpu; 70 struct cpu_stop_work active_balance_work; 71 /* cpu of this runqueue: */ 72 int cpu; 73 int online; 74 75 struct list_head cfs_tasks; 76 77 u64 rt_avg; 78 u64 age_stamp; 79 u64 idle_stamp; 80 u64 avg_idle; 81 82 /* This is used to determine avg_idle's max value */ 83 u64 max_idle_balance_cost; 84 #endif 85 86 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 87 u64 prev_irq_time; 88 #endif 89 #ifdef CONFIG_PARAVIRT 90 u64 prev_steal_time; 91 #endif 92 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 93 u64 prev_steal_time_rq; 94 #endif 95 96 /* calc_load related fields */ 97 unsigned long calc_load_update; 98 long calc_load_active; 99 100 #ifdef CONFIG_SCHED_HRTICK 101 #ifdef CONFIG_SMP 102 int hrtick_csd_pending; 103 struct call_single_data hrtick_csd; 104 #endif 105 struct hrtimer hrtick_timer; 106 #endif 107 108 #ifdef CONFIG_SCHEDSTATS 109 /* latency stats */ 110 struct sched_info rq_sched_info; 111 unsigned long long rq_cpu_time; 112 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ 113 114 /* sys_sched_yield() stats */ 115 unsigned int yld_count; 116 117 /* schedule() stats */ 118 unsigned int sched_count; 119 unsigned int sched_goidle; 120 121 /* try_to_wake_up() stats */ 122 unsigned int ttwu_count; 123 unsigned int ttwu_local; 124 #endif 125 126 #ifdef CONFIG_SMP 127 struct llist_head wake_list; 128 #endif 129 };