Xenomai  3.0.5
lock.h
1 /*
2  * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>.
3  *
4  * This library is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2 of the License, or (at your option) any later version.
8  *
9  * This library is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13 
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with this library; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
17  */
18 
19 #ifndef _BOILERPLATE_LOCK_H
20 #define _BOILERPLATE_LOCK_H
21 
22 #include <pthread.h>
23 #include <boilerplate/atomic.h>
24 #include <boilerplate/wrappers.h>
25 #include <boilerplate/debug.h>
26 
27 /*
28  * CANCEL_DEFER/RESTORE() should enclose any emulator code prior to
29  * holding a lock, or invoking inner boilerplate/copperplate services
30  * (which usually do so), to change the system state. A proper cleanup
31  * handler should be pushed prior to acquire such lock.
32  *
33  * Those macros ensure that cancellation type is switched to deferred
34  * mode while the section is traversed, then restored to its original
35  * value upon exit.
36  *
37  * WARNING: inner services MAY ASSUME that cancellability is deferred
38  * for the caller, so you really want to define protected sections as
39  * required in the higher interface layers.
40  */
41 struct service {
42  int cancel_type;
43 };
44 
45 #ifdef CONFIG_XENO_ASYNC_CANCEL
46 
47 #define CANCEL_DEFER(__s) \
48  do { \
49  pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, \
50  &(__s).cancel_type); \
51  } while (0)
52 
53 #define CANCEL_RESTORE(__s) \
54  do { \
55  pthread_setcanceltype((__s).cancel_type, NULL); \
56  backtrace_check(); \
57  } while (0)
58 
59 #else /* !CONFIG_XENO_ASYNC_CANCEL */
60 
61 #define CANCEL_DEFER(__s) do { (void)(__s); } while (0)
62 
63 #define CANCEL_RESTORE(__s) do { } while (0)
64 
65 #endif /* !CONFIG_XENO_ASYNC_CANCEL */
66 
67 struct cleanup_block {
68  pthread_mutex_t *lock;
69  void (*handler)(void *arg);
70  void *arg;
71 };
72 
73 #define __push_cleanup_args(__cb, __lock, __fn, __arg) \
74  ((__cb)->lock = (__lock)), \
75  ((__cb)->handler = (void (*)(void *))(__fn)), \
76  ((__cb)->arg = (__arg))
77 
78 #define push_cleanup_handler(__cb, __lock, __fn, __arg) \
79  pthread_cleanup_push((void (*)(void *))__run_cleanup_block, \
80  (__push_cleanup_args(__cb, __lock, __fn, __arg), (__cb)))
81 
82 #define pop_cleanup_handler(__cb) \
83  pthread_cleanup_pop(0)
84 
85 #define push_cleanup_lock(__lock) \
86  pthread_cleanup_push((void (*)(void *))__RT(pthread_mutex_unlock), (__lock))
87 
88 #define pop_cleanup_lock(__lock) \
89  pthread_cleanup_pop(0)
90 
91 #ifdef CONFIG_XENO_DEBUG
92 int __check_cancel_type(const char *locktype);
93 #else
94 #define __check_cancel_type(__locktype) \
95  ({ (void)__locktype; 0; })
96 #endif
97 
98 #define __do_lock(__lock, __op) \
99  ({ \
100  int __ret; \
101  __ret = -__RT(pthread_mutex_##__op(__lock)); \
102  __ret; \
103  })
104 
105 #define __do_lock_nocancel(__lock, __type, __op) \
106  ({ \
107  __bt(__check_cancel_type(#__op "_nocancel")); \
108  __do_lock(__lock, __op); \
109  })
110 
111 #define __do_unlock(__lock) \
112  ({ \
113  int __ret; \
114  __ret = -__RT(pthread_mutex_unlock(__lock)); \
115  __ret; \
116  })
117 /*
118  * Macros to enter/leave critical sections within inner
119  * routines. Actually, they are mainly aimed at self-documenting the
120  * code, by specifying basic assumption(s) about the code being
121  * traversed. In effect, they are currently aliases to the standard
122  * pthread_mutex_* API, except for the _safe form.
123  *
124  * The _nocancel suffix indicates that no cancellation point is
125  * traversed by the protected code, therefore we don't need any
126  * cleanup handler since we are guaranteed to run in deferred cancel
127  * mode after CANCEL_DEFER(). A runtime check is inserted in
128  * debug mode, which triggers when cancellability is not in deferred
129  * mode while an attempt is made to acquire a _nocancel lock.
130  *
131  * read/write_lock() forms must be enclosed within the scope of a
132  * cleanup handler since the protected code may reach cancellation
133  * points. push_cleanup_lock() is a simple shorthand to push
134  * pthread_mutex_unlock as the cleanup handler.
135  */
136 #define read_lock(__lock) \
137  __do_lock(__lock, lock)
138 
139 #define read_trylock(__lock) \
140  __do_lock(__lock, trylock)
141 
142 #define read_lock_nocancel(__lock) \
143  __do_lock_nocancel(__lock, read_lock, lock)
144 
145 #define read_trylock_nocancel(__lock) \
146  __do_lock_nocancel(__lock, read_trylock, trylock)
147 
148 #define read_unlock(__lock) \
149  __do_unlock(__lock)
150 
151 #define write_lock(__lock) \
152  __do_lock(__lock, lock)
153 
154 #define write_trylock(__lock) \
155  __do_lock(__lock, trylock)
156 
157 #define write_lock_nocancel(__lock) \
158  __do_lock_nocancel(__lock, write_lock, lock)
159 
160 #define write_trylock_nocancel(__lock) \
161  __do_lock_nocancel(__lock, write_trylock, trylock)
162 
163 #define write_unlock(__lock) \
164  __do_unlock(__lock)
165 
166 #define __do_lock_safe(__lock, __state, __op) \
167  ({ \
168  int __ret, __oldstate; \
169  __bt(__check_cancel_type(#__op "_safe")); \
170  pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &__oldstate); \
171  __ret = -__RT(pthread_mutex_##__op(__lock)); \
172  if (__ret) \
173  pthread_setcancelstate(__oldstate, NULL); \
174  __state = __oldstate; \
175  __ret; \
176  })
177 
178 #define __do_unlock_safe(__lock, __state) \
179  ({ \
180  int __ret, __restored_state = __state; \
181  __ret = -__RT(pthread_mutex_unlock(__lock)); \
182  pthread_setcancelstate(__restored_state, NULL); \
183  __ret; \
184  })
185 
186 /*
187  * The _safe call form is available when undoing the changes from an
188  * update section upon cancellation using a cleanup handler is not an
189  * option (e.g. too complex), or in situations where the protected
190  * code shall fully run; in such cases, cancellation is disabled
191  * throughout the section.
192  */
193 
194 #define write_lock_safe(__lock, __state) \
195  __do_lock_safe(__lock, __state, lock)
196 
197 #define write_trylock_safe(__lock, __state) \
198  __do_lock_safe(__lock, __state, trylock)
199 
200 #define write_unlock_safe(__lock, __state) \
201  __do_unlock_safe(__lock, __state)
202 
203 #define read_lock_safe(__lock, __state) \
204  __do_lock_safe(__lock, __state, lock)
205 
206 #define read_unlock_safe(__lock, __state) \
207  __do_unlock_safe(__lock, __state)
208 
209 #ifdef CONFIG_XENO_DEBUG
210 #define mutex_type_attribute PTHREAD_MUTEX_ERRORCHECK
211 #else
212 #define mutex_type_attribute PTHREAD_MUTEX_NORMAL
213 #endif
214 
215 #endif /* _BOILERPLATE_LOCK_H */