Xenomai  3.0.5
schedqueue.h
1 /*
2  * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
3  *
4  * Xenomai is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published
6  * by the Free Software Foundation; either version 2 of the License,
7  * or (at your option) any later version.
8  *
9  * Xenomai is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with Xenomai; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
17  * 02111-1307, USA.
18  */
19 #ifndef _COBALT_KERNEL_SCHEDQUEUE_H
20 #define _COBALT_KERNEL_SCHEDQUEUE_H
21 
22 #include <cobalt/kernel/list.h>
23 
29 #define XNSCHED_CLASS_WEIGHT_FACTOR 1024
30 
31 #ifdef CONFIG_XENO_OPT_SCALABLE_SCHED
32 
33 #include <linux/bitmap.h>
34 
35 /*
36  * Multi-level priority queue, suitable for handling the runnable
37  * thread queue of the core scheduling class with O(1) property. We
38  * only manage a descending queuing order, i.e. highest numbered
39  * priorities come first.
40  */
41 #define XNSCHED_MLQ_LEVELS 260 /* i.e. XNSCHED_CORE_NR_PRIO */
42 
43 struct xnsched_mlq {
44  int elems;
45  DECLARE_BITMAP(prio_map, XNSCHED_MLQ_LEVELS);
46  struct list_head heads[XNSCHED_MLQ_LEVELS];
47 };
48 
49 struct xnthread;
50 
51 void xnsched_initq(struct xnsched_mlq *q);
52 
53 void xnsched_addq(struct xnsched_mlq *q,
54  struct xnthread *thread);
55 
56 void xnsched_addq_tail(struct xnsched_mlq *q,
57  struct xnthread *thread);
58 
59 void xnsched_delq(struct xnsched_mlq *q,
60  struct xnthread *thread);
61 
62 struct xnthread *xnsched_getq(struct xnsched_mlq *q);
63 
64 static inline int xnsched_emptyq_p(struct xnsched_mlq *q)
65 {
66  return q->elems == 0;
67 }
68 
69 static inline int xnsched_weightq(struct xnsched_mlq *q)
70 {
71  return find_first_bit(q->prio_map, XNSCHED_MLQ_LEVELS);
72 }
73 
74 typedef struct xnsched_mlq xnsched_queue_t;
75 
76 #else /* ! CONFIG_XENO_OPT_SCALABLE_SCHED */
77 
78 typedef struct list_head xnsched_queue_t;
79 
80 #define xnsched_initq(__q) INIT_LIST_HEAD(__q)
81 #define xnsched_emptyq_p(__q) list_empty(__q)
82 #define xnsched_addq(__q, __t) list_add_prilf(__t, __q, cprio, rlink)
83 #define xnsched_addq_tail(__q, __t) list_add_priff(__t, __q, cprio, rlink)
84 #define xnsched_delq(__q, __t) (void)(__q), list_del(&(__t)->rlink)
85 #define xnsched_getq(__q) \
86  ({ \
87  struct xnthread *__t = NULL; \
88  if (!list_empty(__q)) \
89  __t = list_get_entry(__q, struct xnthread, rlink); \
90  __t; \
91  })
92 #define xnsched_weightq(__q) \
93  ({ \
94  struct xnthread *__t; \
95  __t = list_first_entry(__q, struct xnthread, rlink); \
96  __t->cprio; \
97  })
98 
99 
100 #endif /* !CONFIG_XENO_OPT_SCALABLE_SCHED */
101 
102 struct xnthread *xnsched_findq(xnsched_queue_t *q, int prio);
103 
106 #endif /* !_COBALT_KERNEL_SCHEDQUEUE_H */