Xenomai  3.0.5
sched.h
1 /*
2  * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
3  *
4  * Xenomai is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published
6  * by the Free Software Foundation; either version 2 of the License,
7  * or (at your option) any later version.
8  *
9  * Xenomai is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with Xenomai; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
17  * 02111-1307, USA.
18  */
19 #ifndef _COBALT_KERNEL_SCHED_H
20 #define _COBALT_KERNEL_SCHED_H
21 
22 #include <linux/percpu.h>
23 #include <cobalt/kernel/lock.h>
24 #include <cobalt/kernel/thread.h>
25 #include <cobalt/kernel/schedqueue.h>
26 #include <cobalt/kernel/sched-tp.h>
27 #include <cobalt/kernel/sched-weak.h>
28 #include <cobalt/kernel/sched-sporadic.h>
29 #include <cobalt/kernel/sched-quota.h>
30 #include <cobalt/kernel/vfile.h>
31 #include <cobalt/kernel/assert.h>
32 #include <asm/xenomai/machine.h>
33 
39 /* Sched status flags */
40 #define XNRESCHED 0x10000000 /* Needs rescheduling */
41 #define XNINSW 0x20000000 /* In context switch */
42 #define XNINTCK 0x40000000 /* In master tick handler context */
43 
44 /* Sched local flags */
45 #define XNHTICK 0x00008000 /* Host tick pending */
46 #define XNINIRQ 0x00004000 /* In IRQ handling context */
47 #define XNHDEFER 0x00002000 /* Host tick deferred */
48 
49 struct xnsched_rt {
50  xnsched_queue_t runnable;
51 };
52 
57 struct xnsched {
59  unsigned long status;
61  unsigned long lflags;
63  struct xnthread *curr;
64 #ifdef CONFIG_SMP
65 
66  int cpu;
68  cpumask_t resched;
69 #endif
70 
71  struct xnsched_rt rt;
72 #ifdef CONFIG_XENO_OPT_SCHED_WEAK
73 
74  struct xnsched_weak weak;
75 #endif
76 #ifdef CONFIG_XENO_OPT_SCHED_TP
77 
78  struct xnsched_tp tp;
79 #endif
80 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
81 
82  struct xnsched_sporadic pss;
83 #endif
84 #ifdef CONFIG_XENO_OPT_SCHED_QUOTA
85 
86  struct xnsched_quota quota;
87 #endif
88 
89  volatile unsigned inesting;
91  struct xntimer htimer;
93  struct xntimer rrbtimer;
95  struct xnthread rootcb;
96 #ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
97  struct xnthread *last;
98 #endif
99 #ifdef CONFIG_XENO_ARCH_FPU
100 
101  struct xnthread *fpuholder;
102 #endif
103 #ifdef CONFIG_XENO_OPT_WATCHDOG
104 
105  struct xntimer wdtimer;
107  int wdcount;
108 #endif
109 #ifdef CONFIG_XENO_OPT_STATS
110 
111  xnticks_t last_account_switch;
113  xnstat_exectime_t *current_account;
114 #endif
115 };
116 
117 DECLARE_PER_CPU(struct xnsched, nksched);
118 
119 extern cpumask_t cobalt_cpu_affinity;
120 
121 extern struct list_head nkthreadq;
122 
123 extern int cobalt_nrthreads;
124 
125 #ifdef CONFIG_XENO_OPT_VFILE
126 extern struct xnvfile_rev_tag nkthreadlist_tag;
127 #endif
128 
129 union xnsched_policy_param;
130 
131 struct xnsched_class {
132  void (*sched_init)(struct xnsched *sched);
133  void (*sched_enqueue)(struct xnthread *thread);
134  void (*sched_dequeue)(struct xnthread *thread);
135  void (*sched_requeue)(struct xnthread *thread);
136  struct xnthread *(*sched_pick)(struct xnsched *sched);
137  void (*sched_tick)(struct xnsched *sched);
138  void (*sched_rotate)(struct xnsched *sched,
139  const union xnsched_policy_param *p);
140  void (*sched_migrate)(struct xnthread *thread,
141  struct xnsched *sched);
142  void (*sched_setparam)(struct xnthread *thread,
143  const union xnsched_policy_param *p);
144  void (*sched_getparam)(struct xnthread *thread,
145  union xnsched_policy_param *p);
146  void (*sched_trackprio)(struct xnthread *thread,
147  const union xnsched_policy_param *p);
148  int (*sched_declare)(struct xnthread *thread,
149  const union xnsched_policy_param *p);
150  void (*sched_forget)(struct xnthread *thread);
151  void (*sched_kick)(struct xnthread *thread);
152 #ifdef CONFIG_XENO_OPT_VFILE
153  int (*sched_init_vfile)(struct xnsched_class *schedclass,
154  struct xnvfile_directory *vfroot);
155  void (*sched_cleanup_vfile)(struct xnsched_class *schedclass);
156 #endif
157  int nthreads;
158  struct xnsched_class *next;
159  int weight;
160  int policy;
161  const char *name;
162 };
163 
164 #define XNSCHED_CLASS_WEIGHT(n) (n * XNSCHED_CLASS_WEIGHT_FACTOR)
165 
166 /* Placeholder for current thread priority */
167 #define XNSCHED_RUNPRIO 0x80000000
168 
169 #define xnsched_for_each_thread(__thread) \
170  list_for_each_entry(__thread, &nkthreadq, glink)
171 
172 #ifdef CONFIG_SMP
173 static inline int xnsched_cpu(struct xnsched *sched)
174 {
175  return sched->cpu;
176 }
177 #else /* !CONFIG_SMP */
178 static inline int xnsched_cpu(struct xnsched *sched)
179 {
180  return 0;
181 }
182 #endif /* CONFIG_SMP */
183 
184 static inline struct xnsched *xnsched_struct(int cpu)
185 {
186  return &per_cpu(nksched, cpu);
187 }
188 
189 static inline struct xnsched *xnsched_current(void)
190 {
191  /* IRQs off */
192  return raw_cpu_ptr(&nksched);
193 }
194 
195 static inline struct xnthread *xnsched_current_thread(void)
196 {
197  return xnsched_current()->curr;
198 }
199 
200 /* Test resched flag of given sched. */
201 static inline int xnsched_resched_p(struct xnsched *sched)
202 {
203  return sched->status & XNRESCHED;
204 }
205 
206 /* Set self resched flag for the current scheduler. */
207 static inline void xnsched_set_self_resched(struct xnsched *sched)
208 {
209  sched->status |= XNRESCHED;
210 }
211 
212 #define xnsched_realtime_domain cobalt_pipeline.domain
213 
214 /* Set resched flag for the given scheduler. */
215 #ifdef CONFIG_SMP
216 
217 static inline void xnsched_set_resched(struct xnsched *sched)
218 {
219  struct xnsched *current_sched = xnsched_current();
220 
221  if (current_sched == sched)
222  current_sched->status |= XNRESCHED;
223  else if (!xnsched_resched_p(sched)) {
224  cpumask_set_cpu(xnsched_cpu(sched), &current_sched->resched);
225  sched->status |= XNRESCHED;
226  current_sched->status |= XNRESCHED;
227  }
228 }
229 
230 #define xnsched_realtime_cpus cobalt_pipeline.supported_cpus
231 
232 static inline int xnsched_supported_cpu(int cpu)
233 {
234  return cpumask_test_cpu(cpu, &xnsched_realtime_cpus);
235 }
236 
237 #else /* !CONFIG_SMP */
238 
239 static inline void xnsched_set_resched(struct xnsched *sched)
240 {
241  xnsched_set_self_resched(sched);
242 }
243 
244 #define xnsched_realtime_cpus CPU_MASK_ALL
245 
246 static inline int xnsched_supported_cpu(int cpu)
247 {
248  return 1;
249 }
250 
251 #endif /* !CONFIG_SMP */
252 
253 #define for_each_realtime_cpu(cpu) \
254  for_each_online_cpu(cpu) \
255  if (xnsched_supported_cpu(cpu)) \
256 
257 int ___xnsched_run(struct xnsched *sched);
258 
259 void __xnsched_run_handler(void);
260 
261 static inline int __xnsched_run(struct xnsched *sched)
262 {
263  /*
264  * NOTE: Since ___xnsched_run() won't run immediately if an
265  * escalation to primary domain is needed, we won't use
266  * critical scheduler information before we actually run in
267  * primary mode; therefore we can first test the scheduler
268  * status then escalate.
269  *
270  * Running in the primary domain means that no Linux-triggered
271  * CPU migration may occur from that point either. Finally,
272  * since migration is always a self-directed operation for
273  * Xenomai threads, we can safely read the scheduler state
274  * bits without holding the nklock.
275  *
276  * Said differently, if we race here because of a CPU
277  * migration, it must have been Linux-triggered because we run
278  * in secondary mode; in which case we will escalate to the
279  * primary domain, then unwind the current call frame without
280  * running the rescheduling procedure in
281  * ___xnsched_run(). Therefore, the scheduler slot
282  * (i.e. "sched") will be either valid, or unused.
283  */
284  if (((sched->status|sched->lflags) &
285  (XNINIRQ|XNINSW|XNRESCHED)) != XNRESCHED)
286  return 0;
287 
288  return ___xnsched_run(sched);
289 }
290 
291 static inline int xnsched_run(void)
292 {
293  struct xnsched *sched = xnsched_current();
294  /*
295  * No rescheduling is possible, either if:
296  *
297  * - the current thread holds the scheduler lock
298  * - an ISR context is active
299  * - we are caught in the middle of an unlocked context switch.
300  */
301  smp_rmb();
302  if (unlikely(sched->curr->lock_count > 0))
303  return 0;
304 
305  return __xnsched_run(sched);
306 }
307 
308 void xnsched_lock(void);
309 
310 void xnsched_unlock(void);
311 
312 static inline int xnsched_interrupt_p(void)
313 {
314  return xnsched_current()->lflags & XNINIRQ;
315 }
316 
317 static inline int xnsched_root_p(void)
318 {
319  return xnthread_test_state(xnsched_current_thread(), XNROOT);
320 }
321 
322 static inline int xnsched_unblockable_p(void)
323 {
324  return xnsched_interrupt_p() || xnsched_root_p();
325 }
326 
327 static inline int xnsched_primary_p(void)
328 {
329  return !xnsched_unblockable_p();
330 }
331 
332 #ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
333 
334 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched);
335 
336 #define xnsched_resched_after_unlocked_switch() xnsched_run()
337 
338 static inline
339 int xnsched_maybe_resched_after_unlocked_switch(struct xnsched *sched)
340 {
341  return sched->status & XNRESCHED;
342 }
343 
344 #else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
345 
346 static inline struct xnsched *
347 xnsched_finish_unlocked_switch(struct xnsched *sched)
348 {
349  XENO_BUG_ON(COBALT, !hard_irqs_disabled());
350  return xnsched_current();
351 }
352 
353 static inline void xnsched_resched_after_unlocked_switch(void) { }
354 
355 static inline int
356 xnsched_maybe_resched_after_unlocked_switch(struct xnsched *sched)
357 {
358  return 0;
359 }
360 
361 #endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
362 
363 #ifdef CONFIG_XENO_OPT_WATCHDOG
364 static inline void xnsched_reset_watchdog(struct xnsched *sched)
365 {
366  sched->wdcount = 0;
367 }
368 #else /* !CONFIG_XENO_OPT_WATCHDOG */
369 static inline void xnsched_reset_watchdog(struct xnsched *sched)
370 {
371 }
372 #endif /* CONFIG_XENO_OPT_WATCHDOG */
373 
374 #include <cobalt/kernel/sched-idle.h>
375 #include <cobalt/kernel/sched-rt.h>
376 
377 int xnsched_init_proc(void);
378 
379 void xnsched_cleanup_proc(void);
380 
381 void xnsched_register_classes(void);
382 
383 void xnsched_init(struct xnsched *sched, int cpu);
384 
385 void xnsched_destroy(struct xnsched *sched);
386 
387 struct xnthread *xnsched_pick_next(struct xnsched *sched);
388 
389 void xnsched_putback(struct xnthread *thread);
390 
391 int xnsched_set_policy(struct xnthread *thread,
392  struct xnsched_class *sched_class,
393  const union xnsched_policy_param *p);
394 
395 void xnsched_track_policy(struct xnthread *thread,
396  struct xnthread *target);
397 
398 void xnsched_migrate(struct xnthread *thread,
399  struct xnsched *sched);
400 
401 void xnsched_migrate_passive(struct xnthread *thread,
402  struct xnsched *sched);
403 
426 static inline void xnsched_rotate(struct xnsched *sched,
427  struct xnsched_class *sched_class,
428  const union xnsched_policy_param *sched_param)
429 {
430  sched_class->sched_rotate(sched, sched_param);
431 }
432 
433 static inline int xnsched_init_thread(struct xnthread *thread)
434 {
435  int ret = 0;
436 
437  xnsched_idle_init_thread(thread);
438  xnsched_rt_init_thread(thread);
439 
440 #ifdef CONFIG_XENO_OPT_SCHED_TP
441  ret = xnsched_tp_init_thread(thread);
442  if (ret)
443  return ret;
444 #endif /* CONFIG_XENO_OPT_SCHED_TP */
445 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
446  ret = xnsched_sporadic_init_thread(thread);
447  if (ret)
448  return ret;
449 #endif /* CONFIG_XENO_OPT_SCHED_SPORADIC */
450 #ifdef CONFIG_XENO_OPT_SCHED_QUOTA
451  ret = xnsched_quota_init_thread(thread);
452  if (ret)
453  return ret;
454 #endif /* CONFIG_XENO_OPT_SCHED_QUOTA */
455 
456  return ret;
457 }
458 
459 static inline int xnsched_root_priority(struct xnsched *sched)
460 {
461  return sched->rootcb.cprio;
462 }
463 
464 static inline struct xnsched_class *xnsched_root_class(struct xnsched *sched)
465 {
466  return sched->rootcb.sched_class;
467 }
468 
469 static inline void xnsched_tick(struct xnsched *sched)
470 {
471  struct xnthread *curr = sched->curr;
472  struct xnsched_class *sched_class = curr->sched_class;
473  /*
474  * A thread that undergoes round-robin scheduling only
475  * consumes its time slice when it runs within its own
476  * scheduling class, which excludes temporary PIP boosts, and
477  * does not hold the scheduler lock.
478  */
479  if (sched_class == curr->base_class &&
480  sched_class->sched_tick &&
481  xnthread_test_state(curr, XNTHREAD_BLOCK_BITS|XNRRB) == XNRRB &&
482  curr->lock_count == 0)
483  sched_class->sched_tick(sched);
484 }
485 
486 static inline int xnsched_declare(struct xnsched_class *sched_class,
487  struct xnthread *thread,
488  const union xnsched_policy_param *p)
489 {
490  int ret;
491 
492  if (sched_class->sched_declare) {
493  ret = sched_class->sched_declare(thread, p);
494  if (ret)
495  return ret;
496  }
497  if (sched_class != thread->base_class)
498  sched_class->nthreads++;
499 
500  return 0;
501 }
502 
503 #ifdef CONFIG_XENO_OPT_SCHED_CLASSES
504 
505 static inline void xnsched_enqueue(struct xnthread *thread)
506 {
507  struct xnsched_class *sched_class = thread->sched_class;
508 
509  if (sched_class != &xnsched_class_idle)
510  sched_class->sched_enqueue(thread);
511 }
512 
513 static inline void xnsched_dequeue(struct xnthread *thread)
514 {
515  struct xnsched_class *sched_class = thread->sched_class;
516 
517  if (sched_class != &xnsched_class_idle)
518  sched_class->sched_dequeue(thread);
519 }
520 
521 static inline void xnsched_requeue(struct xnthread *thread)
522 {
523  struct xnsched_class *sched_class = thread->sched_class;
524 
525  if (sched_class != &xnsched_class_idle)
526  sched_class->sched_requeue(thread);
527 }
528 
529 static inline void xnsched_setparam(struct xnthread *thread,
530  const union xnsched_policy_param *p)
531 {
532  thread->sched_class->sched_setparam(thread, p);
533  thread->wprio = thread->cprio + thread->sched_class->weight;
534 }
535 
536 static inline void xnsched_getparam(struct xnthread *thread,
537  union xnsched_policy_param *p)
538 {
539  thread->sched_class->sched_getparam(thread, p);
540 }
541 
542 static inline void xnsched_trackprio(struct xnthread *thread,
543  const union xnsched_policy_param *p)
544 {
545  thread->sched_class->sched_trackprio(thread, p);
546  thread->wprio = thread->cprio + thread->sched_class->weight;
547 }
548 
549 static inline void xnsched_forget(struct xnthread *thread)
550 {
551  struct xnsched_class *sched_class = thread->base_class;
552 
553  --sched_class->nthreads;
554 
555  if (sched_class->sched_forget)
556  sched_class->sched_forget(thread);
557 }
558 
559 static inline void xnsched_kick(struct xnthread *thread)
560 {
561  struct xnsched_class *sched_class = thread->base_class;
562 
563  xnthread_set_info(thread, XNKICKED);
564 
565  if (sched_class->sched_kick)
566  sched_class->sched_kick(thread);
567 
568  xnsched_set_resched(thread->sched);
569 }
570 
571 #else /* !CONFIG_XENO_OPT_SCHED_CLASSES */
572 
573 /*
574  * If only the RT and IDLE scheduling classes are compiled in, we can
575  * fully inline common helpers for dealing with those.
576  */
577 
578 static inline void xnsched_enqueue(struct xnthread *thread)
579 {
580  struct xnsched_class *sched_class = thread->sched_class;
581 
582  if (sched_class != &xnsched_class_idle)
583  __xnsched_rt_enqueue(thread);
584 }
585 
586 static inline void xnsched_dequeue(struct xnthread *thread)
587 {
588  struct xnsched_class *sched_class = thread->sched_class;
589 
590  if (sched_class != &xnsched_class_idle)
591  __xnsched_rt_dequeue(thread);
592 }
593 
594 static inline void xnsched_requeue(struct xnthread *thread)
595 {
596  struct xnsched_class *sched_class = thread->sched_class;
597 
598  if (sched_class != &xnsched_class_idle)
599  __xnsched_rt_requeue(thread);
600 }
601 
602 static inline void xnsched_setparam(struct xnthread *thread,
603  const union xnsched_policy_param *p)
604 {
605  struct xnsched_class *sched_class = thread->sched_class;
606 
607  if (sched_class != &xnsched_class_idle)
608  __xnsched_rt_setparam(thread, p);
609  else
610  __xnsched_idle_setparam(thread, p);
611 
612  thread->wprio = thread->cprio + sched_class->weight;
613 }
614 
615 static inline void xnsched_getparam(struct xnthread *thread,
616  union xnsched_policy_param *p)
617 {
618  struct xnsched_class *sched_class = thread->sched_class;
619 
620  if (sched_class != &xnsched_class_idle)
621  __xnsched_rt_getparam(thread, p);
622  else
623  __xnsched_idle_getparam(thread, p);
624 }
625 
626 static inline void xnsched_trackprio(struct xnthread *thread,
627  const union xnsched_policy_param *p)
628 {
629  struct xnsched_class *sched_class = thread->sched_class;
630 
631  if (sched_class != &xnsched_class_idle)
632  __xnsched_rt_trackprio(thread, p);
633  else
634  __xnsched_idle_trackprio(thread, p);
635 
636  thread->wprio = thread->cprio + sched_class->weight;
637 }
638 
639 static inline void xnsched_forget(struct xnthread *thread)
640 {
641  --thread->base_class->nthreads;
642  __xnsched_rt_forget(thread);
643 }
644 
645 static inline void xnsched_kick(struct xnthread *thread)
646 {
647  xnthread_set_info(thread, XNKICKED);
648  xnsched_set_resched(thread->sched);
649 }
650 
651 #endif /* !CONFIG_XENO_OPT_SCHED_CLASSES */
652 
655 #endif /* !_COBALT_KERNEL_SCHED_H */
struct xnthread * curr
Definition: sched.h:63
Snapshot revision tag.
Definition: vfile.h:482
#define XNKICKED
Forced out of primary mode.
Definition: thread.h:69
#define XNRRB
Undergoes a round-robin scheduling.
Definition: thread.h:45
int cpu
Definition: sched.h:66
#define XNROOT
Root thread (that is, Linux/IDLE)
Definition: thread.h:48
volatile unsigned inesting
Definition: sched.h:89
Scheduling information structure.
Definition: sched.h:57
unsigned long lflags
Definition: sched.h:61
unsigned long status
Definition: sched.h:59
static void xnsched_rotate(struct xnsched *sched, struct xnsched_class *sched_class, const union xnsched_policy_param *sched_param)
Rotate a scheduler runqueue.
Definition: sched.h:426
static int xnsched_run(void)
The rescheduling procedure.
Definition: sched.h:291
cpumask_t resched
Definition: sched.h:68