19 #ifndef _COBALT_KERNEL_SCHED_H 20 #define _COBALT_KERNEL_SCHED_H 22 #include <linux/percpu.h> 23 #include <cobalt/kernel/lock.h> 24 #include <cobalt/kernel/thread.h> 25 #include <cobalt/kernel/schedqueue.h> 26 #include <cobalt/kernel/sched-tp.h> 27 #include <cobalt/kernel/sched-weak.h> 28 #include <cobalt/kernel/sched-sporadic.h> 29 #include <cobalt/kernel/sched-quota.h> 30 #include <cobalt/kernel/vfile.h> 31 #include <cobalt/kernel/assert.h> 32 #include <asm/xenomai/machine.h> 40 #define XNRESCHED 0x10000000 41 #define XNINSW 0x20000000 42 #define XNINTCK 0x40000000 45 #define XNHTICK 0x00008000 46 #define XNINIRQ 0x00004000 47 #define XNHDEFER 0x00002000 50 xnsched_queue_t runnable;
72 #ifdef CONFIG_XENO_OPT_SCHED_WEAK 74 struct xnsched_weak weak;
76 #ifdef CONFIG_XENO_OPT_SCHED_TP 80 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC 82 struct xnsched_sporadic pss;
84 #ifdef CONFIG_XENO_OPT_SCHED_QUOTA 86 struct xnsched_quota quota;
91 struct xntimer htimer;
93 struct xntimer rrbtimer;
95 struct xnthread rootcb;
96 #ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH 97 struct xnthread *last;
99 #ifdef CONFIG_XENO_ARCH_FPU 101 struct xnthread *fpuholder;
103 #ifdef CONFIG_XENO_OPT_WATCHDOG 105 struct xntimer wdtimer;
109 #ifdef CONFIG_XENO_OPT_STATS 111 xnticks_t last_account_switch;
113 xnstat_exectime_t *current_account;
117 DECLARE_PER_CPU(
struct xnsched, nksched);
119 extern cpumask_t cobalt_cpu_affinity;
121 extern struct list_head nkthreadq;
123 extern int cobalt_nrthreads;
125 #ifdef CONFIG_XENO_OPT_VFILE 129 union xnsched_policy_param;
131 struct xnsched_class {
132 void (*sched_init)(
struct xnsched *sched);
133 void (*sched_enqueue)(
struct xnthread *thread);
134 void (*sched_dequeue)(
struct xnthread *thread);
135 void (*sched_requeue)(
struct xnthread *thread);
136 struct xnthread *(*sched_pick)(
struct xnsched *sched);
137 void (*sched_tick)(
struct xnsched *sched);
138 void (*sched_rotate)(
struct xnsched *sched,
139 const union xnsched_policy_param *p);
140 void (*sched_migrate)(
struct xnthread *thread,
142 void (*sched_setparam)(
struct xnthread *thread,
143 const union xnsched_policy_param *p);
144 void (*sched_getparam)(
struct xnthread *thread,
145 union xnsched_policy_param *p);
146 void (*sched_trackprio)(
struct xnthread *thread,
147 const union xnsched_policy_param *p);
148 int (*sched_declare)(
struct xnthread *thread,
149 const union xnsched_policy_param *p);
150 void (*sched_forget)(
struct xnthread *thread);
151 void (*sched_kick)(
struct xnthread *thread);
152 #ifdef CONFIG_XENO_OPT_VFILE 153 int (*sched_init_vfile)(
struct xnsched_class *schedclass,
154 struct xnvfile_directory *vfroot);
155 void (*sched_cleanup_vfile)(
struct xnsched_class *schedclass);
158 struct xnsched_class *next;
164 #define XNSCHED_CLASS_WEIGHT(n) (n * XNSCHED_CLASS_WEIGHT_FACTOR) 167 #define XNSCHED_RUNPRIO 0x80000000 169 #define xnsched_for_each_thread(__thread) \ 170 list_for_each_entry(__thread, &nkthreadq, glink) 173 static inline int xnsched_cpu(
struct xnsched *sched)
178 static inline int xnsched_cpu(
struct xnsched *sched)
184 static inline struct xnsched *xnsched_struct(
int cpu)
186 return &per_cpu(nksched, cpu);
189 static inline struct xnsched *xnsched_current(
void)
192 return raw_cpu_ptr(&nksched);
195 static inline struct xnthread *xnsched_current_thread(
void)
197 return xnsched_current()->
curr;
201 static inline int xnsched_resched_p(
struct xnsched *sched)
203 return sched->
status & XNRESCHED;
207 static inline void xnsched_set_self_resched(
struct xnsched *sched)
209 sched->
status |= XNRESCHED;
212 #define xnsched_realtime_domain cobalt_pipeline.domain 217 static inline void xnsched_set_resched(
struct xnsched *sched)
219 struct xnsched *current_sched = xnsched_current();
221 if (current_sched == sched)
222 current_sched->
status |= XNRESCHED;
223 else if (!xnsched_resched_p(sched)) {
224 cpumask_set_cpu(xnsched_cpu(sched), ¤t_sched->
resched);
225 sched->
status |= XNRESCHED;
226 current_sched->
status |= XNRESCHED;
230 #define xnsched_realtime_cpus cobalt_pipeline.supported_cpus 232 static inline int xnsched_supported_cpu(
int cpu)
234 return cpumask_test_cpu(cpu, &xnsched_realtime_cpus);
239 static inline void xnsched_set_resched(
struct xnsched *sched)
241 xnsched_set_self_resched(sched);
244 #define xnsched_realtime_cpus CPU_MASK_ALL 246 static inline int xnsched_supported_cpu(
int cpu)
253 #define for_each_realtime_cpu(cpu) \ 254 for_each_online_cpu(cpu) \ 255 if (xnsched_supported_cpu(cpu)) \ 257 int ___xnsched_run(
struct xnsched *sched);
259 void __xnsched_run_handler(
void);
261 static inline int __xnsched_run(
struct xnsched *sched)
285 (XNINIRQ|XNINSW|XNRESCHED)) != XNRESCHED)
288 return ___xnsched_run(sched);
293 struct xnsched *sched = xnsched_current();
302 if (unlikely(sched->
curr->lock_count > 0))
305 return __xnsched_run(sched);
308 void xnsched_lock(
void);
310 void xnsched_unlock(
void);
312 static inline int xnsched_interrupt_p(
void)
314 return xnsched_current()->
lflags & XNINIRQ;
317 static inline int xnsched_root_p(
void)
319 return xnthread_test_state(xnsched_current_thread(),
XNROOT);
322 static inline int xnsched_unblockable_p(
void)
324 return xnsched_interrupt_p() || xnsched_root_p();
327 static inline int xnsched_primary_p(
void)
329 return !xnsched_unblockable_p();
332 #ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH 334 struct xnsched *xnsched_finish_unlocked_switch(
struct xnsched *sched);
336 #define xnsched_resched_after_unlocked_switch() xnsched_run() 339 int xnsched_maybe_resched_after_unlocked_switch(
struct xnsched *sched)
341 return sched->
status & XNRESCHED;
347 xnsched_finish_unlocked_switch(
struct xnsched *sched)
349 XENO_BUG_ON(COBALT, !hard_irqs_disabled());
350 return xnsched_current();
353 static inline void xnsched_resched_after_unlocked_switch(
void) { }
356 xnsched_maybe_resched_after_unlocked_switch(
struct xnsched *sched)
363 #ifdef CONFIG_XENO_OPT_WATCHDOG 364 static inline void xnsched_reset_watchdog(
struct xnsched *sched)
369 static inline void xnsched_reset_watchdog(
struct xnsched *sched)
374 #include <cobalt/kernel/sched-idle.h> 375 #include <cobalt/kernel/sched-rt.h> 377 int xnsched_init_proc(
void);
379 void xnsched_cleanup_proc(
void);
381 void xnsched_register_classes(
void);
383 void xnsched_init(
struct xnsched *sched,
int cpu);
385 void xnsched_destroy(
struct xnsched *sched);
387 struct xnthread *xnsched_pick_next(
struct xnsched *sched);
389 void xnsched_putback(
struct xnthread *thread);
391 int xnsched_set_policy(
struct xnthread *thread,
392 struct xnsched_class *sched_class,
393 const union xnsched_policy_param *p);
395 void xnsched_track_policy(
struct xnthread *thread,
396 struct xnthread *target);
398 void xnsched_migrate(
struct xnthread *thread,
401 void xnsched_migrate_passive(
struct xnthread *thread,
427 struct xnsched_class *sched_class,
428 const union xnsched_policy_param *sched_param)
430 sched_class->sched_rotate(sched, sched_param);
433 static inline int xnsched_init_thread(
struct xnthread *thread)
437 xnsched_idle_init_thread(thread);
438 xnsched_rt_init_thread(thread);
440 #ifdef CONFIG_XENO_OPT_SCHED_TP 441 ret = xnsched_tp_init_thread(thread);
445 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC 446 ret = xnsched_sporadic_init_thread(thread);
450 #ifdef CONFIG_XENO_OPT_SCHED_QUOTA 451 ret = xnsched_quota_init_thread(thread);
459 static inline int xnsched_root_priority(
struct xnsched *sched)
461 return sched->rootcb.cprio;
464 static inline struct xnsched_class *xnsched_root_class(
struct xnsched *sched)
466 return sched->rootcb.sched_class;
469 static inline void xnsched_tick(
struct xnsched *sched)
471 struct xnthread *curr = sched->
curr;
472 struct xnsched_class *sched_class = curr->sched_class;
479 if (sched_class == curr->base_class &&
480 sched_class->sched_tick &&
481 xnthread_test_state(curr, XNTHREAD_BLOCK_BITS|
XNRRB) ==
XNRRB &&
482 curr->lock_count == 0)
483 sched_class->sched_tick(sched);
486 static inline int xnsched_declare(
struct xnsched_class *sched_class,
487 struct xnthread *thread,
488 const union xnsched_policy_param *p)
492 if (sched_class->sched_declare) {
493 ret = sched_class->sched_declare(thread, p);
497 if (sched_class != thread->base_class)
498 sched_class->nthreads++;
503 #ifdef CONFIG_XENO_OPT_SCHED_CLASSES 505 static inline void xnsched_enqueue(
struct xnthread *thread)
507 struct xnsched_class *sched_class = thread->sched_class;
509 if (sched_class != &xnsched_class_idle)
510 sched_class->sched_enqueue(thread);
513 static inline void xnsched_dequeue(
struct xnthread *thread)
515 struct xnsched_class *sched_class = thread->sched_class;
517 if (sched_class != &xnsched_class_idle)
518 sched_class->sched_dequeue(thread);
521 static inline void xnsched_requeue(
struct xnthread *thread)
523 struct xnsched_class *sched_class = thread->sched_class;
525 if (sched_class != &xnsched_class_idle)
526 sched_class->sched_requeue(thread);
529 static inline void xnsched_setparam(
struct xnthread *thread,
530 const union xnsched_policy_param *p)
532 thread->sched_class->sched_setparam(thread, p);
533 thread->wprio = thread->cprio + thread->sched_class->weight;
536 static inline void xnsched_getparam(
struct xnthread *thread,
537 union xnsched_policy_param *p)
539 thread->sched_class->sched_getparam(thread, p);
542 static inline void xnsched_trackprio(
struct xnthread *thread,
543 const union xnsched_policy_param *p)
545 thread->sched_class->sched_trackprio(thread, p);
546 thread->wprio = thread->cprio + thread->sched_class->weight;
549 static inline void xnsched_forget(
struct xnthread *thread)
551 struct xnsched_class *sched_class = thread->base_class;
553 --sched_class->nthreads;
555 if (sched_class->sched_forget)
556 sched_class->sched_forget(thread);
559 static inline void xnsched_kick(
struct xnthread *thread)
561 struct xnsched_class *sched_class = thread->base_class;
563 xnthread_set_info(thread,
XNKICKED);
565 if (sched_class->sched_kick)
566 sched_class->sched_kick(thread);
568 xnsched_set_resched(thread->sched);
578 static inline void xnsched_enqueue(
struct xnthread *thread)
580 struct xnsched_class *sched_class = thread->sched_class;
582 if (sched_class != &xnsched_class_idle)
583 __xnsched_rt_enqueue(thread);
586 static inline void xnsched_dequeue(
struct xnthread *thread)
588 struct xnsched_class *sched_class = thread->sched_class;
590 if (sched_class != &xnsched_class_idle)
591 __xnsched_rt_dequeue(thread);
594 static inline void xnsched_requeue(
struct xnthread *thread)
596 struct xnsched_class *sched_class = thread->sched_class;
598 if (sched_class != &xnsched_class_idle)
599 __xnsched_rt_requeue(thread);
602 static inline void xnsched_setparam(
struct xnthread *thread,
603 const union xnsched_policy_param *p)
605 struct xnsched_class *sched_class = thread->sched_class;
607 if (sched_class != &xnsched_class_idle)
608 __xnsched_rt_setparam(thread, p);
610 __xnsched_idle_setparam(thread, p);
612 thread->wprio = thread->cprio + sched_class->weight;
615 static inline void xnsched_getparam(
struct xnthread *thread,
616 union xnsched_policy_param *p)
618 struct xnsched_class *sched_class = thread->sched_class;
620 if (sched_class != &xnsched_class_idle)
621 __xnsched_rt_getparam(thread, p);
623 __xnsched_idle_getparam(thread, p);
626 static inline void xnsched_trackprio(
struct xnthread *thread,
627 const union xnsched_policy_param *p)
629 struct xnsched_class *sched_class = thread->sched_class;
631 if (sched_class != &xnsched_class_idle)
632 __xnsched_rt_trackprio(thread, p);
634 __xnsched_idle_trackprio(thread, p);
636 thread->wprio = thread->cprio + sched_class->weight;
639 static inline void xnsched_forget(
struct xnthread *thread)
641 --thread->base_class->nthreads;
642 __xnsched_rt_forget(thread);
645 static inline void xnsched_kick(
struct xnthread *thread)
647 xnthread_set_info(thread,
XNKICKED);
648 xnsched_set_resched(thread->sched);
struct xnthread * curr
Definition: sched.h:63
Snapshot revision tag.
Definition: vfile.h:482
#define XNKICKED
Forced out of primary mode.
Definition: thread.h:69
#define XNRRB
Undergoes a round-robin scheduling.
Definition: thread.h:45
int cpu
Definition: sched.h:66
#define XNROOT
Root thread (that is, Linux/IDLE)
Definition: thread.h:48
volatile unsigned inesting
Definition: sched.h:89
Scheduling information structure.
Definition: sched.h:57
unsigned long lflags
Definition: sched.h:61
unsigned long status
Definition: sched.h:59
static void xnsched_rotate(struct xnsched *sched, struct xnsched_class *sched_class, const union xnsched_policy_param *sched_param)
Rotate a scheduler runqueue.
Definition: sched.h:426
static int xnsched_run(void)
The rescheduling procedure.
Definition: sched.h:291
cpumask_t resched
Definition: sched.h:68