Xenomai  3.0.5
cobalt-core.h
1 /*
2  * Copyright (C) 2014 Jan Kiszka <jan.kiszka@siemens.com>.
3  * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
4  *
5  * Xenomai is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published
7  * by the Free Software Foundation; either version 2 of the License,
8  * or (at your option) any later version.
9  *
10  * Xenomai is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with Xenomai; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
18  * 02111-1307, USA.
19  */
20 #undef TRACE_SYSTEM
21 #define TRACE_SYSTEM cobalt_core
22 
23 #if !defined(_TRACE_COBALT_CORE_H) || defined(TRACE_HEADER_MULTI_READ)
24 #define _TRACE_COBALT_CORE_H
25 
26 #include <linux/tracepoint.h>
27 
28 DECLARE_EVENT_CLASS(thread_event,
29  TP_PROTO(struct xnthread *thread),
30  TP_ARGS(thread),
31 
32  TP_STRUCT__entry(
33  __field(struct xnthread *, thread)
34  __string(name, thread->name)
35  __field(pid_t, pid)
36  __field(unsigned long, state)
37  __field(unsigned long, info)
38  ),
39 
40  TP_fast_assign(
41  __entry->thread = thread;
42  __assign_str(name, thread->name);
43  __entry->state = thread->state;
44  __entry->info = thread->info;
45  __entry->pid = xnthread_host_pid(thread);
46  ),
47 
48  TP_printk("thread=%p(%s) pid=%d state=0x%lx info=0x%lx",
49  __entry->thread, __get_str(name), __entry->pid,
50  __entry->state, __entry->info)
51 );
52 
53 DECLARE_EVENT_CLASS(synch_wait_event,
54  TP_PROTO(struct xnsynch *synch, struct xnthread *thread),
55  TP_ARGS(synch, thread),
56 
57  TP_STRUCT__entry(
58  __field(struct xnthread *, thread)
59  __string(name, thread->name)
60  __field(struct xnsynch *, synch)
61  ),
62 
63  TP_fast_assign(
64  __entry->thread = thread;
65  __assign_str(name, thread->name);
66  __entry->synch = synch;
67  ),
68 
69  TP_printk("synch=%p thread=%p(%s)",
70  __entry->synch, __entry->thread, __get_str(name))
71 );
72 
73 DECLARE_EVENT_CLASS(synch_post_event,
74  TP_PROTO(struct xnsynch *synch),
75  TP_ARGS(synch),
76 
77  TP_STRUCT__entry(
78  __field(struct xnsynch *, synch)
79  ),
80 
81  TP_fast_assign(
82  __entry->synch = synch;
83  ),
84 
85  TP_printk("synch=%p", __entry->synch)
86 );
87 
88 DECLARE_EVENT_CLASS(irq_event,
89  TP_PROTO(unsigned int irq),
90  TP_ARGS(irq),
91 
92  TP_STRUCT__entry(
93  __field(unsigned int, irq)
94  ),
95 
96  TP_fast_assign(
97  __entry->irq = irq;
98  ),
99 
100  TP_printk("irq=%u", __entry->irq)
101 );
102 
103 DECLARE_EVENT_CLASS(clock_event,
104  TP_PROTO(unsigned int irq),
105  TP_ARGS(irq),
106 
107  TP_STRUCT__entry(
108  __field(unsigned int, irq)
109  ),
110 
111  TP_fast_assign(
112  __entry->irq = irq;
113  ),
114 
115  TP_printk("clock_irq=%u", __entry->irq)
116 );
117 
118 DECLARE_EVENT_CLASS(thread_migrate,
119  TP_PROTO(struct xnthread *thread, unsigned int cpu),
120  TP_ARGS(thread, cpu),
121 
122  TP_STRUCT__entry(
123  __field(struct xnthread *, thread)
124  __string(name, thread->name)
125  __field(unsigned int, cpu)
126  ),
127 
128  TP_fast_assign(
129  __entry->thread = thread;
130  __assign_str(name, thread->name);
131  __entry->cpu = cpu;
132  ),
133 
134  TP_printk("thread=%p(%s) cpu=%u",
135  __entry->thread, __get_str(name), __entry->cpu)
136 );
137 
138 DECLARE_EVENT_CLASS(timer_event,
139  TP_PROTO(struct xntimer *timer),
140  TP_ARGS(timer),
141 
142  TP_STRUCT__entry(
143  __field(struct xntimer *, timer)
144  ),
145 
146  TP_fast_assign(
147  __entry->timer = timer;
148  ),
149 
150  TP_printk("timer=%p", __entry->timer)
151 );
152 
153 TRACE_EVENT(cobalt_schedule,
154  TP_PROTO(struct xnsched *sched),
155  TP_ARGS(sched),
156 
157  TP_STRUCT__entry(
158  __field(unsigned long, status)
159  ),
160 
161  TP_fast_assign(
162  __entry->status = sched->status;
163  ),
164 
165  TP_printk("status=0x%lx", __entry->status)
166 );
167 
168 TRACE_EVENT(cobalt_schedule_remote,
169  TP_PROTO(struct xnsched *sched),
170  TP_ARGS(sched),
171 
172  TP_STRUCT__entry(
173  __field(unsigned long, status)
174  ),
175 
176  TP_fast_assign(
177  __entry->status = sched->status;
178  ),
179 
180  TP_printk("status=0x%lx", __entry->status)
181 );
182 
183 TRACE_EVENT(cobalt_switch_context,
184  TP_PROTO(struct xnthread *prev, struct xnthread *next),
185  TP_ARGS(prev, next),
186 
187  TP_STRUCT__entry(
188  __field(struct xnthread *, prev)
189  __field(struct xnthread *, next)
190  __string(prev_name, prev->name)
191  __string(next_name, next->name)
192  ),
193 
194  TP_fast_assign(
195  __entry->prev = prev;
196  __entry->next = next;
197  __assign_str(prev_name, prev->name);
198  __assign_str(next_name, next->name);
199  ),
200 
201  TP_printk("prev=%p(%s) next=%p(%s)",
202  __entry->prev, __get_str(prev_name),
203  __entry->next, __get_str(next_name))
204 );
205 
206 TRACE_EVENT(cobalt_thread_init,
207  TP_PROTO(struct xnthread *thread,
208  const struct xnthread_init_attr *attr,
209  struct xnsched_class *sched_class),
210  TP_ARGS(thread, attr, sched_class),
211 
212  TP_STRUCT__entry(
213  __field(struct xnthread *, thread)
214  __string(thread_name, thread->name)
215  __string(class_name, sched_class->name)
216  __field(unsigned long, flags)
217  __field(int, cprio)
218  ),
219 
220  TP_fast_assign(
221  __entry->thread = thread;
222  __assign_str(thread_name, thread->name);
223  __entry->flags = attr->flags;
224  __assign_str(class_name, sched_class->name);
225  __entry->cprio = thread->cprio;
226  ),
227 
228  TP_printk("thread=%p(%s) flags=0x%lx class=%s prio=%d",
229  __entry->thread, __get_str(thread_name), __entry->flags,
230  __get_str(class_name), __entry->cprio)
231 );
232 
233 TRACE_EVENT(cobalt_thread_suspend,
234  TP_PROTO(struct xnthread *thread, unsigned long mask, xnticks_t timeout,
235  xntmode_t timeout_mode, struct xnsynch *wchan),
236  TP_ARGS(thread, mask, timeout, timeout_mode, wchan),
237 
238  TP_STRUCT__entry(
239  __field(struct xnthread *, thread)
240  __field(unsigned long, mask)
241  __field(xnticks_t, timeout)
242  __field(xntmode_t, timeout_mode)
243  __field(struct xnsynch *, wchan)
244  ),
245 
246  TP_fast_assign(
247  __entry->thread = thread;
248  __entry->mask = mask;
249  __entry->timeout = timeout;
250  __entry->timeout_mode = timeout_mode;
251  __entry->wchan = wchan;
252  ),
253 
254  TP_printk("thread=%p mask=0x%lx timeout=%Lu timeout_mode=%d wchan=%p",
255  __entry->thread, __entry->mask,
256  __entry->timeout, __entry->timeout_mode, __entry->wchan)
257 );
258 
259 TRACE_EVENT(cobalt_thread_resume,
260  TP_PROTO(struct xnthread *thread, unsigned long mask),
261  TP_ARGS(thread, mask),
262 
263  TP_STRUCT__entry(
264  __field(struct xnthread *, thread)
265  __field(unsigned long, mask)
266  ),
267 
268  TP_fast_assign(
269  __entry->thread = thread;
270  __entry->mask = mask;
271  ),
272 
273  TP_printk("thread=%p mask=0x%lx",
274  __entry->thread, __entry->mask)
275 );
276 
277 TRACE_EVENT(cobalt_thread_fault,
278  TP_PROTO(struct xnthread *thread, struct ipipe_trap_data *td),
279  TP_ARGS(thread, td),
280 
281  TP_STRUCT__entry(
282  __field(struct xnthread *, thread)
283  __string(name, thread->name)
284  __field(void *, ip)
285  __field(unsigned int, type)
286  ),
287 
288  TP_fast_assign(
289  __entry->thread = thread;
290  __assign_str(name, thread->name);
291  __entry->ip = (void *)xnarch_fault_pc(td);
292  __entry->type = xnarch_fault_trap(td);
293  ),
294 
295  TP_printk("thread=%p(%s) ip=%p type=%x",
296  __entry->thread, __get_str(name), __entry->ip,
297  __entry->type)
298 );
299 
300 DEFINE_EVENT(thread_event, cobalt_thread_start,
301  TP_PROTO(struct xnthread *thread),
302  TP_ARGS(thread)
303 );
304 
305 DEFINE_EVENT(thread_event, cobalt_thread_cancel,
306  TP_PROTO(struct xnthread *thread),
307  TP_ARGS(thread)
308 );
309 
310 DEFINE_EVENT(thread_event, cobalt_thread_join,
311  TP_PROTO(struct xnthread *thread),
312  TP_ARGS(thread)
313 );
314 
315 DEFINE_EVENT(thread_event, cobalt_thread_unblock,
316  TP_PROTO(struct xnthread *thread),
317  TP_ARGS(thread)
318 );
319 
320 DEFINE_EVENT(thread_event, cobalt_thread_wait_period,
321  TP_PROTO(struct xnthread *thread),
322  TP_ARGS(thread)
323 );
324 
325 DEFINE_EVENT(thread_event, cobalt_thread_missed_period,
326  TP_PROTO(struct xnthread *thread),
327  TP_ARGS(thread)
328 );
329 
330 DEFINE_EVENT(thread_event, cobalt_thread_set_mode,
331  TP_PROTO(struct xnthread *thread),
332  TP_ARGS(thread)
333 );
334 
335 DEFINE_EVENT(thread_migrate, cobalt_thread_migrate,
336  TP_PROTO(struct xnthread *thread, unsigned int cpu),
337  TP_ARGS(thread, cpu)
338 );
339 
340 DEFINE_EVENT(thread_migrate, cobalt_thread_migrate_passive,
341  TP_PROTO(struct xnthread *thread, unsigned int cpu),
342  TP_ARGS(thread, cpu)
343 );
344 
345 DEFINE_EVENT(thread_event, cobalt_shadow_gohard,
346  TP_PROTO(struct xnthread *thread),
347  TP_ARGS(thread)
348 );
349 
350 DEFINE_EVENT(thread_event, cobalt_watchdog_signal,
351  TP_PROTO(struct xnthread *thread),
352  TP_ARGS(thread)
353 );
354 
355 DEFINE_EVENT(thread_event, cobalt_shadow_hardened,
356  TP_PROTO(struct xnthread *thread),
357  TP_ARGS(thread)
358 );
359 
360 #define cobalt_print_relax_reason(reason) \
361  __print_symbolic(reason, \
362  { SIGDEBUG_UNDEFINED, "undefined" }, \
363  { SIGDEBUG_MIGRATE_SIGNAL, "signal" }, \
364  { SIGDEBUG_MIGRATE_SYSCALL, "syscall" }, \
365  { SIGDEBUG_MIGRATE_FAULT, "fault" })
366 
367 TRACE_EVENT(cobalt_shadow_gorelax,
368  TP_PROTO(struct xnthread *thread, int reason),
369  TP_ARGS(thread, reason),
370 
371  TP_STRUCT__entry(
372  __field(struct xnthread *, thread)
373  __field(int, reason)
374  ),
375 
376  TP_fast_assign(
377  __entry->thread = thread;
378  __entry->reason = reason;
379  ),
380 
381  TP_printk("thread=%p reason=%s",
382  __entry->thread, cobalt_print_relax_reason(__entry->reason))
383 );
384 
385 DEFINE_EVENT(thread_event, cobalt_shadow_relaxed,
386  TP_PROTO(struct xnthread *thread),
387  TP_ARGS(thread)
388 );
389 
390 DEFINE_EVENT(thread_event, cobalt_shadow_entry,
391  TP_PROTO(struct xnthread *thread),
392  TP_ARGS(thread)
393 );
394 
395 TRACE_EVENT(cobalt_shadow_map,
396  TP_PROTO(struct xnthread *thread),
397  TP_ARGS(thread),
398 
399  TP_STRUCT__entry(
400  __field(struct xnthread *, thread)
401  __string(name, thread->name)
402  __field(int, prio)
403  ),
404 
405  TP_fast_assign(
406  __entry->thread = thread;
407  __assign_str(name, thread->name);
408  __entry->prio = xnthread_base_priority(thread);
409  ),
410 
411  TP_printk("thread=%p(%s) prio=%d",
412  __entry->thread, __get_str(name), __entry->prio)
413 );
414 
415 DEFINE_EVENT(thread_event, cobalt_shadow_unmap,
416  TP_PROTO(struct xnthread *thread),
417  TP_ARGS(thread)
418 );
419 
420 TRACE_EVENT(cobalt_lostage_request,
421  TP_PROTO(const char *type, struct task_struct *task),
422  TP_ARGS(type, task),
423 
424  TP_STRUCT__entry(
425  __field(pid_t, pid)
426  __array(char, comm, TASK_COMM_LEN)
427  __field(const char *, type)
428  ),
429 
430  TP_fast_assign(
431  __entry->type = type;
432  __entry->pid = task_pid_nr(task);
433  memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
434  ),
435 
436  TP_printk("request=%s pid=%d comm=%s",
437  __entry->type, __entry->pid, __entry->comm)
438 );
439 
440 TRACE_EVENT(cobalt_lostage_wakeup,
441  TP_PROTO(struct task_struct *task),
442  TP_ARGS(task),
443 
444  TP_STRUCT__entry(
445  __field(pid_t, pid)
446  __array(char, comm, TASK_COMM_LEN)
447  ),
448 
449  TP_fast_assign(
450  __entry->pid = task_pid_nr(task);
451  memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
452  ),
453 
454  TP_printk("pid=%d comm=%s",
455  __entry->pid, __entry->comm)
456 );
457 
458 TRACE_EVENT(cobalt_lostage_signal,
459  TP_PROTO(struct task_struct *task, int sig),
460  TP_ARGS(task, sig),
461 
462  TP_STRUCT__entry(
463  __field(pid_t, pid)
464  __array(char, comm, TASK_COMM_LEN)
465  __field(int, sig)
466  ),
467 
468  TP_fast_assign(
469  __entry->pid = task_pid_nr(task);
470  __entry->sig = sig;
471  memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
472  ),
473 
474  TP_printk("pid=%d comm=%s sig=%d",
475  __entry->pid, __entry->comm, __entry->sig)
476 );
477 
478 DEFINE_EVENT(irq_event, cobalt_irq_entry,
479  TP_PROTO(unsigned int irq),
480  TP_ARGS(irq)
481 );
482 
483 DEFINE_EVENT(irq_event, cobalt_irq_exit,
484  TP_PROTO(unsigned int irq),
485  TP_ARGS(irq)
486 );
487 
488 DEFINE_EVENT(irq_event, cobalt_irq_attach,
489  TP_PROTO(unsigned int irq),
490  TP_ARGS(irq)
491 );
492 
493 DEFINE_EVENT(irq_event, cobalt_irq_detach,
494  TP_PROTO(unsigned int irq),
495  TP_ARGS(irq)
496 );
497 
498 DEFINE_EVENT(irq_event, cobalt_irq_enable,
499  TP_PROTO(unsigned int irq),
500  TP_ARGS(irq)
501 );
502 
503 DEFINE_EVENT(irq_event, cobalt_irq_disable,
504  TP_PROTO(unsigned int irq),
505  TP_ARGS(irq)
506 );
507 
508 DEFINE_EVENT(clock_event, cobalt_clock_entry,
509  TP_PROTO(unsigned int irq),
510  TP_ARGS(irq)
511 );
512 
513 DEFINE_EVENT(clock_event, cobalt_clock_exit,
514  TP_PROTO(unsigned int irq),
515  TP_ARGS(irq)
516 );
517 
518 DEFINE_EVENT(timer_event, cobalt_timer_stop,
519  TP_PROTO(struct xntimer *timer),
520  TP_ARGS(timer)
521 );
522 
523 DEFINE_EVENT(timer_event, cobalt_timer_expire,
524  TP_PROTO(struct xntimer *timer),
525  TP_ARGS(timer)
526 );
527 
528 #define cobalt_print_timer_mode(mode) \
529  __print_symbolic(mode, \
530  { XN_RELATIVE, "rel" }, \
531  { XN_ABSOLUTE, "abs" }, \
532  { XN_REALTIME, "rt" })
533 
534 TRACE_EVENT(cobalt_timer_start,
535  TP_PROTO(struct xntimer *timer, xnticks_t value, xnticks_t interval,
536  xntmode_t mode),
537  TP_ARGS(timer, value, interval, mode),
538 
539  TP_STRUCT__entry(
540  __field(struct xntimer *, timer)
541 #ifdef CONFIG_XENO_OPT_STATS
542  __string(name, timer->name)
543 #endif
544  __field(xnticks_t, value)
545  __field(xnticks_t, interval)
546  __field(xntmode_t, mode)
547  ),
548 
549  TP_fast_assign(
550  __entry->timer = timer;
551 #ifdef CONFIG_XENO_OPT_STATS
552  __assign_str(name, timer->name);
553 #endif
554  __entry->value = value;
555  __entry->interval = interval;
556  __entry->mode = mode;
557  ),
558 
559  TP_printk("timer=%p(%s) value=%Lu interval=%Lu mode=%s",
560  __entry->timer,
561 #ifdef CONFIG_XENO_OPT_STATS
562  __get_str(name),
563 #else
564  "(anon)",
565 #endif
566  __entry->value, __entry->interval,
567  cobalt_print_timer_mode(__entry->mode))
568 );
569 
570 #ifdef CONFIG_SMP
571 
572 TRACE_EVENT(cobalt_timer_migrate,
573  TP_PROTO(struct xntimer *timer, unsigned int cpu),
574  TP_ARGS(timer, cpu),
575 
576  TP_STRUCT__entry(
577  __field(struct xntimer *, timer)
578  __field(unsigned int, cpu)
579  ),
580 
581  TP_fast_assign(
582  __entry->timer = timer;
583  __entry->cpu = cpu;
584  ),
585 
586  TP_printk("timer=%p cpu=%u",
587  __entry->timer, __entry->cpu)
588 );
589 
590 #endif /* CONFIG_SMP */
591 
592 DEFINE_EVENT(synch_wait_event, cobalt_synch_sleepon,
593  TP_PROTO(struct xnsynch *synch, struct xnthread *thread),
594  TP_ARGS(synch, thread)
595 );
596 
597 DEFINE_EVENT(synch_wait_event, cobalt_synch_try_acquire,
598  TP_PROTO(struct xnsynch *synch, struct xnthread *thread),
599  TP_ARGS(synch, thread)
600 );
601 
602 DEFINE_EVENT(synch_wait_event, cobalt_synch_acquire,
603  TP_PROTO(struct xnsynch *synch, struct xnthread *thread),
604  TP_ARGS(synch, thread)
605 );
606 
607 DEFINE_EVENT(synch_post_event, cobalt_synch_release,
608  TP_PROTO(struct xnsynch *synch),
609  TP_ARGS(synch)
610 );
611 
612 DEFINE_EVENT(synch_post_event, cobalt_synch_wakeup,
613  TP_PROTO(struct xnsynch *synch),
614  TP_ARGS(synch)
615 );
616 
617 DEFINE_EVENT(synch_post_event, cobalt_synch_wakeup_many,
618  TP_PROTO(struct xnsynch *synch),
619  TP_ARGS(synch)
620 );
621 
622 DEFINE_EVENT(synch_post_event, cobalt_synch_flush,
623  TP_PROTO(struct xnsynch *synch),
624  TP_ARGS(synch)
625 );
626 
627 DEFINE_EVENT(synch_post_event, cobalt_synch_forget,
628  TP_PROTO(struct xnsynch *synch),
629  TP_ARGS(synch)
630 );
631 
632 #endif /* _TRACE_COBALT_CORE_H */
633 
634 /* This part must be outside protection */
635 #undef TRACE_INCLUDE_PATH
636 #undef TRACE_INCLUDE_FILE
637 #define TRACE_INCLUDE_FILE cobalt-core
638 #include <trace/define_trace.h>
Scheduling information structure.
Definition: sched.h:57
unsigned long status
Definition: sched.h:59