Xenomai  3.0.5
lock.h
1 /*
2  * Copyright (C) 2001-2008,2012 Philippe Gerum <rpm@xenomai.org>.
3  * Copyright (C) 2004,2005 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
4  *
5  * Xenomai is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published
7  * by the Free Software Foundation; either version 2 of the License,
8  * or (at your option) any later version.
9  *
10  * Xenomai is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with Xenomai; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
18  * 02111-1307, USA.
19  */
20 #ifndef _COBALT_KERNEL_LOCK_H
21 #define _COBALT_KERNEL_LOCK_H
22 
23 #include <linux/ipipe.h>
24 #include <linux/percpu.h>
25 #include <cobalt/kernel/assert.h>
26 
32 typedef unsigned long spl_t;
33 
39 #define splhigh(x) ((x) = ipipe_test_and_stall_head() & 1)
40 #ifdef CONFIG_SMP
41 
46 #define splexit(x) ipipe_restore_head(x & 1)
47 #else /* !CONFIG_SMP */
48 #define splexit(x) ipipe_restore_head(x)
49 #endif /* !CONFIG_SMP */
50 
53 #define splmax() ipipe_stall_head()
54 
57 #define splnone() ipipe_unstall_head()
58 
64 #define spltest() ipipe_test_head()
65 
66 #if XENO_DEBUG(LOCKING)
67 
68 struct xnlock {
69  unsigned owner;
70  arch_spinlock_t alock;
71  const char *file;
72  const char *function;
73  unsigned int line;
74  int cpu;
75  unsigned long long spin_time;
76  unsigned long long lock_date;
77 };
78 
79 struct xnlockinfo {
80  unsigned long long spin_time;
81  unsigned long long lock_time;
82  const char *file;
83  const char *function;
84  unsigned int line;
85 };
86 
87 #define XNARCH_LOCK_UNLOCKED (struct xnlock) { \
88  ~0, \
89  __ARCH_SPIN_LOCK_UNLOCKED, \
90  NULL, \
91  NULL, \
92  0, \
93  -1, \
94  0LL, \
95  0LL, \
96 }
97 
98 #define XNLOCK_DBG_CONTEXT , __FILE__, __LINE__, __FUNCTION__
99 #define XNLOCK_DBG_CONTEXT_ARGS \
100  , const char *file, int line, const char *function
101 #define XNLOCK_DBG_PASS_CONTEXT , file, line, function
102 
103 void xnlock_dbg_prepare_acquire(unsigned long long *start);
104 void xnlock_dbg_prepare_spin(unsigned int *spin_limit);
105 void xnlock_dbg_acquired(struct xnlock *lock, int cpu,
106  unsigned long long *start,
107  const char *file, int line,
108  const char *function);
109 int xnlock_dbg_release(struct xnlock *lock,
110  const char *file, int line,
111  const char *function);
112 
113 DECLARE_PER_CPU(struct xnlockinfo, xnlock_stats);
114 
115 #else /* !XENO_DEBUG(LOCKING) */
116 
117 struct xnlock {
118  unsigned owner;
119  arch_spinlock_t alock;
120 };
121 
122 #define XNARCH_LOCK_UNLOCKED \
123  (struct xnlock) { \
124  ~0, \
125  __ARCH_SPIN_LOCK_UNLOCKED, \
126  }
127 
128 #define XNLOCK_DBG_CONTEXT
129 #define XNLOCK_DBG_CONTEXT_ARGS
130 #define XNLOCK_DBG_PASS_CONTEXT
131 
132 static inline
133 void xnlock_dbg_prepare_acquire(unsigned long long *start)
134 {
135 }
136 
137 static inline
138 void xnlock_dbg_prepare_spin(unsigned int *spin_limit)
139 {
140 }
141 
142 static inline void
143 xnlock_dbg_acquired(struct xnlock *lock, int cpu,
144  unsigned long long *start)
145 {
146 }
147 
148 static inline int xnlock_dbg_release(struct xnlock *lock)
149 {
150  return 0;
151 }
152 
153 #endif /* !XENO_DEBUG(LOCKING) */
154 
155 #if defined(CONFIG_SMP) || XENO_DEBUG(LOCKING)
156 
157 #define xnlock_get(lock) __xnlock_get(lock XNLOCK_DBG_CONTEXT)
158 #define xnlock_put(lock) __xnlock_put(lock XNLOCK_DBG_CONTEXT)
159 #define xnlock_get_irqsave(lock,x) \
160  ((x) = __xnlock_get_irqsave(lock XNLOCK_DBG_CONTEXT))
161 #define xnlock_put_irqrestore(lock,x) \
162  __xnlock_put_irqrestore(lock,x XNLOCK_DBG_CONTEXT)
163 #define xnlock_clear_irqoff(lock) xnlock_put_irqrestore(lock, 1)
164 #define xnlock_clear_irqon(lock) xnlock_put_irqrestore(lock, 0)
165 
166 static inline void xnlock_init (struct xnlock *lock)
167 {
168  *lock = XNARCH_LOCK_UNLOCKED;
169 }
170 
171 #define DECLARE_XNLOCK(lock) struct xnlock lock
172 #define DECLARE_EXTERN_XNLOCK(lock) extern struct xnlock lock
173 #define DEFINE_XNLOCK(lock) struct xnlock lock = XNARCH_LOCK_UNLOCKED
174 #define DEFINE_PRIVATE_XNLOCK(lock) static DEFINE_XNLOCK(lock)
175 
176 static inline int ____xnlock_get(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS)
177 {
178  int cpu = ipipe_processor_id();
179  unsigned long long start;
180 
181  if (lock->owner == cpu)
182  return 2;
183 
184  xnlock_dbg_prepare_acquire(&start);
185 
186  arch_spin_lock(&lock->alock);
187  lock->owner = cpu;
188 
189  xnlock_dbg_acquired(lock, cpu, &start /*, */ XNLOCK_DBG_PASS_CONTEXT);
190 
191  return 0;
192 }
193 
194 static inline void ____xnlock_put(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS)
195 {
196  if (xnlock_dbg_release(lock /*, */ XNLOCK_DBG_PASS_CONTEXT))
197  return;
198 
199  lock->owner = ~0U;
200  arch_spin_unlock(&lock->alock);
201 }
202 
203 #ifndef CONFIG_XENO_ARCH_OUTOFLINE_XNLOCK
204 #define ___xnlock_get ____xnlock_get
205 #define ___xnlock_put ____xnlock_put
206 #else /* out of line xnlock */
207 int ___xnlock_get(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS);
208 
209 void ___xnlock_put(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS);
210 #endif /* out of line xnlock */
211 
212 #if XENO_DEBUG(LOCKING)
213 /* Disable UP-over-SMP kernel optimization in debug mode. */
214 #define __locking_active__ 1
215 #else
216 #define __locking_active__ ipipe_smp_p
217 #endif
218 
219 static inline spl_t
220 __xnlock_get_irqsave(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS)
221 {
222  unsigned long flags;
223 
224  splhigh(flags);
225 
226  if (__locking_active__)
227  flags |= ___xnlock_get(lock /*, */ XNLOCK_DBG_PASS_CONTEXT);
228 
229  return flags;
230 }
231 
232 static inline void __xnlock_put_irqrestore(struct xnlock *lock, spl_t flags
233  /*, */ XNLOCK_DBG_CONTEXT_ARGS)
234 {
235  /* Only release the lock if we didn't take it recursively. */
236  if (__locking_active__ && !(flags & 2))
237  ___xnlock_put(lock /*, */ XNLOCK_DBG_PASS_CONTEXT);
238 
239  splexit(flags & 1);
240 }
241 
242 static inline int xnlock_is_owner(struct xnlock *lock)
243 {
244  if (__locking_active__)
245  return lock->owner == ipipe_processor_id();
246 
247  return 1;
248 }
249 
250 static inline int __xnlock_get(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS)
251 {
252  if (__locking_active__)
253  return ___xnlock_get(lock /* , */ XNLOCK_DBG_PASS_CONTEXT);
254 
255  return 0;
256 }
257 
258 static inline void __xnlock_put(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS)
259 {
260  if (__locking_active__)
261  ___xnlock_put(lock /*, */ XNLOCK_DBG_PASS_CONTEXT);
262 }
263 
264 #undef __locking_active__
265 
266 #else /* !(CONFIG_SMP || XENO_DEBUG(LOCKING) */
267 
268 #define xnlock_init(lock) do { } while(0)
269 #define xnlock_get(lock) do { } while(0)
270 #define xnlock_put(lock) do { } while(0)
271 #define xnlock_get_irqsave(lock,x) splhigh(x)
272 #define xnlock_put_irqrestore(lock,x) splexit(x)
273 #define xnlock_clear_irqoff(lock) splmax()
274 #define xnlock_clear_irqon(lock) splnone()
275 #define xnlock_is_owner(lock) 1
276 
277 #define DECLARE_XNLOCK(lock)
278 #define DECLARE_EXTERN_XNLOCK(lock)
279 #define DEFINE_XNLOCK(lock)
280 #define DEFINE_PRIVATE_XNLOCK(lock)
281 
282 #endif /* !(CONFIG_SMP || XENO_DEBUG(LOCKING)) */
283 
284 DECLARE_EXTERN_XNLOCK(nklock);
285 
288 #endif /* !_COBALT_KERNEL_LOCK_H */
#define splexit(x)
Restore the saved hard interrupt state on the local processor.
Definition: lock.h:46
#define splhigh(x)
Hard disable interrupts on the local processor, saving previous state.
Definition: lock.h:39