Xenomai  3.0.5
rtskb.h
1 /***
2  *
3  * include/rtskb.h
4  *
5  * RTnet - real-time networking subsystem
6  * Copyright (C) 2002 Ulrich Marx <marx@kammer.uni-hannover.de>,
7  * 2003-2005 Jan Kiszka <jan.kiszka@web.de>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  *
23  */
24 
25 #ifndef __RTSKB_H_
26 #define __RTSKB_H_
27 
28 #ifdef __KERNEL__
29 
30 #include <linux/skbuff.h>
31 
32 #include <rtnet.h>
33 #include <rtnet_internal.h>
34 
35 
36 /***
37 
38 rtskb Management - A Short Introduction
39 ---------------------------------------
40 
41 1. rtskbs (Real-Time Socket Buffers)
42 
43 A rtskb consists of a management structure (struct rtskb) and a fixed-sized
44 (RTSKB_SIZE) data buffer. It is used to store network packets on their way from
45 the API routines through the stack to the NICs or vice versa. rtskbs are
46 allocated as one chunk of memory which contains both the managment structure
47 and the buffer memory itself.
48 
49 
50 2. rtskb Queues
51 
52 A rtskb queue is described by struct rtskb_queue. A queue can contain an
53 unlimited number of rtskbs in an ordered way. A rtskb can either be added to
54 the head (rtskb_queue_head()) or the tail of a queue (rtskb_queue_tail()). When
55 a rtskb is removed from a queue (rtskb_dequeue()), it is always taken from the
56 head. Queues are normally spin lock protected unless the __variants of the
57 queuing functions are used.
58 
59 
60 3. Prioritized rtskb Queues
61 
62 A prioritized queue contains a number of normal rtskb queues within an array.
63 The array index of a sub-queue correspond to the priority of the rtskbs within
64 this queue. For enqueuing a rtskb (rtskb_prio_queue_head()), its priority field
65 is evaluated and the rtskb is then placed into the appropriate sub-queue. When
66 dequeuing a rtskb, the first rtskb of the first non-empty sub-queue with the
67 highest priority is returned. The current implementation supports 32 different
68 priority levels, the lowest if defined by QUEUE_MIN_PRIO, the highest by
69 QUEUE_MAX_PRIO.
70 
71 
72 4. rtskb Pools
73 
74 As rtskbs must not be allocated by a normal memory manager during runtime,
75 preallocated rtskbs are kept ready in several pools. Most packet producers
76 (NICs, sockets, etc.) have their own pools in order to be independent of the
77 load situation of other parts of the stack.
78 
79 When a pool is created (rtskb_pool_init()), the required rtskbs are allocated
80 from a Linux slab cache. Pools can be extended (rtskb_pool_extend()) or
81 shrinked (rtskb_pool_shrink()) during runtime. When shutting down the
82 program/module, every pool has to be released (rtskb_pool_release()). All these
83 commands demand to be executed within a non real-time context.
84 
85 Pools are organized as normal rtskb queues (struct rtskb_queue). When a rtskb
86 is allocated (alloc_rtskb()), it is actually dequeued from the pool's queue.
87 When freeing a rtskb (kfree_rtskb()), the rtskb is enqueued to its owning pool.
88 rtskbs can be exchanged between pools (rtskb_acquire()). In this case, the
89 passed rtskb switches over to from its owning pool to a given pool, but only if
90 this pool can pass an empty rtskb from its own queue back.
91 
92 
93 5. rtskb Chains
94 
95 To ease the defragmentation of larger IP packets, several rtskbs can form a
96 chain. For these purposes, the first rtskb (and only the first!) provides a
97 pointer to the last rtskb in the chain. When enqueuing the first rtskb of a
98 chain, the whole chain is automatically placed into the destined queue. But,
99 to dequeue a complete chain specialized calls are required (postfix: _chain).
100 While chains also get freed en bloc (kfree_rtskb()) when passing the first
101 rtskbs, it is not possible to allocate a chain from a pool (alloc_rtskb()); a
102 newly allocated rtskb is always reset to a "single rtskb chain". Furthermore,
103 the acquisition of complete chains is NOT supported (rtskb_acquire()).
104 
105 
106 6. Capturing Support (Optional)
107 
108 When incoming or outgoing packets are captured, the assigned rtskb needs to be
109 shared between the stack, the driver, and the capturing service. In contrast to
110 many other network stacks, RTnet does not create a new rtskb head and
111 re-references the payload. Instead, additional fields at the end of the rtskb
112 structure are use for sharing a rtskb with a capturing service. If the sharing
113 bit (RTSKB_CAP_SHARED) in cap_flags is set, the rtskb will not be returned to
114 the owning pool upon the call of kfree_rtskb. Instead this bit will be reset,
115 and a compensation rtskb stored in cap_comp_skb will be returned to the owning
116 pool. cap_start and cap_len can be used to mirror the dimension of the full
117 packet. This is required because the data and len fields will be modified while
118 walking through the stack. cap_next allows to add a rtskb to a separate queue
119 which is independent of any queue described in 2.
120 
121 Certain setup tasks for capturing packets can not become part of a capturing
122 module, they have to be embedded into the stack. For this purpose, several
123 inline functions are provided. rtcap_mark_incoming() is used to save the packet
124 dimension right before it is modifed by the stack. rtcap_report_incoming()
125 calls the capturing handler, if present, in order to let it process the
126 received rtskb (e.g. allocate compensation rtskb, mark original rtskb as
127 shared, and enqueue it).
128 
129 Outgoing rtskb have to be captured by adding a hook function to the chain of
130 hard_start_xmit functions of a device. To measure the delay caused by RTmac
131 between the request and the actual transmission, a time stamp can be taken using
132 rtcap_mark_rtmac_enqueue(). This function is typically called by RTmac
133 disciplines when they add a rtskb to their internal transmission queue. In such
134 a case, the RTSKB_CAP_RTMAC_STAMP bit is set in cap_flags to indicate that the
135 cap_rtmac_stamp field now contains valid data.
136 
137  ***/
138 
139 
140 #ifndef CHECKSUM_PARTIAL
141 #define CHECKSUM_PARTIAL CHECKSUM_HW
142 #endif
143 
144 #define RTSKB_CAP_SHARED 1 /* rtskb shared between stack and RTcap */
145 #define RTSKB_CAP_RTMAC_STAMP 2 /* cap_rtmac_stamp is valid */
146 
147 #define RTSKB_UNMAPPED 0
148 
149 struct rtskb_queue;
150 struct rtsocket;
151 struct rtnet_device;
152 
153 /***
154  * rtskb - realtime socket buffer
155  */
156 struct rtskb {
157  struct rtskb *next; /* used for queuing rtskbs */
158  struct rtskb *chain_end; /* marks the end of a rtskb chain starting
159  with this very rtskb */
160 
161  struct rtskb_pool *pool; /* owning pool */
162 
163  unsigned int priority; /* bit 0..15: prio, 16..31: user-defined */
164 
165  struct rtsocket *sk; /* assigned socket */
166  struct rtnet_device *rtdev; /* source or destination device */
167 
168  nanosecs_abs_t time_stamp; /* arrival or transmission (RTcap) time */
169 
170  /* patch address of the transmission time stamp, can be NULL
171  * calculation: *xmit_stamp = cpu_to_be64(time_in_ns + *xmit_stamp)
172  */
173  nanosecs_abs_t *xmit_stamp;
174 
175  /* transport layer */
176  union
177  {
178  struct tcphdr *th;
179  struct udphdr *uh;
180  struct icmphdr *icmph;
181  struct iphdr *ipihdr;
182  unsigned char *raw;
183  } h;
184 
185  /* network layer */
186  union
187  {
188  struct iphdr *iph;
189  struct arphdr *arph;
190  unsigned char *raw;
191  } nh;
192 
193  /* link layer */
194  union
195  {
196  struct ethhdr *ethernet;
197  unsigned char *raw;
198  } mac;
199 
200  unsigned short protocol;
201  unsigned char pkt_type;
202 
203  unsigned char ip_summed;
204  unsigned int csum;
205 
206  unsigned char *data;
207  unsigned char *tail;
208  unsigned char *end;
209  unsigned int len;
210 
211  dma_addr_t buf_dma_addr;
212 
213  unsigned char *buf_start;
214 
215 #ifdef CONFIG_XENO_DRIVERS_NET_CHECKED
216  unsigned char *buf_end;
217 #endif
218 
219 #if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP)
220  int cap_flags; /* see RTSKB_CAP_xxx */
221  struct rtskb *cap_comp_skb; /* compensation rtskb */
222  struct rtskb *cap_next; /* used for capture queue */
223  unsigned char *cap_start; /* start offset for capturing */
224  unsigned int cap_len; /* capture length of this rtskb */
225  nanosecs_abs_t cap_rtmac_stamp; /* RTmac enqueuing time */
226 #endif
227 
228  struct list_head entry; /* for global rtskb list */
229 };
230 
231 struct rtskb_queue {
232  struct rtskb *first;
233  struct rtskb *last;
234  rtdm_lock_t lock;
235 };
236 
237 struct rtskb_pool_lock_ops {
238  int (*trylock)(void *cookie);
239  void (*unlock)(void *cookie);
240 };
241 
242 struct rtskb_pool {
243  struct rtskb_queue queue;
244  const struct rtskb_pool_lock_ops *lock_ops;
245  void *lock_cookie;
246 };
247 
248 #define QUEUE_MAX_PRIO 0
249 #define QUEUE_MIN_PRIO 31
250 
251 struct rtskb_prio_queue {
252  rtdm_lock_t lock;
253  unsigned long usage; /* bit array encoding non-empty sub-queues */
254  struct rtskb_queue queue[QUEUE_MIN_PRIO+1];
255 };
256 
257 #define RTSKB_PRIO_MASK 0x0000FFFF /* bits 0..15: xmit prio */
258 #define RTSKB_CHANNEL_MASK 0xFFFF0000 /* bits 16..31: xmit channel */
259 #define RTSKB_CHANNEL_SHIFT 16
260 
261 #define RTSKB_DEF_RT_CHANNEL SOCK_DEF_RT_CHANNEL
262 #define RTSKB_DEF_NRT_CHANNEL SOCK_DEF_NRT_CHANNEL
263 #define RTSKB_USER_CHANNEL SOCK_USER_CHANNEL
264 
265 /* Note: always keep SOCK_XMIT_PARAMS consistent with definitions above! */
266 #define RTSKB_PRIO_VALUE SOCK_XMIT_PARAMS
267 
268 
269 /* default values for the module parameter */
270 #define DEFAULT_GLOBAL_RTSKBS 0 /* default number of rtskb's in global pool */
271 #define DEFAULT_DEVICE_RTSKBS 16 /* default additional rtskbs per network adapter */
272 #define DEFAULT_SOCKET_RTSKBS 16 /* default number of rtskb's in socket pools */
273 
274 #define ALIGN_RTSKB_STRUCT_LEN SKB_DATA_ALIGN(sizeof(struct rtskb))
275 #define RTSKB_SIZE 1544 /* maximum needed by pcnet32-rt */
276 
277 extern unsigned int rtskb_pools; /* current number of rtskb pools */
278 extern unsigned int rtskb_pools_max; /* maximum number of rtskb pools */
279 extern unsigned int rtskb_amount; /* current number of allocated rtskbs */
280 extern unsigned int rtskb_amount_max; /* maximum number of allocated rtskbs */
281 
282 #ifdef CONFIG_XENO_DRIVERS_NET_CHECKED
283 extern void rtskb_over_panic(struct rtskb *skb, int len, void *here);
284 extern void rtskb_under_panic(struct rtskb *skb, int len, void *here);
285 #endif
286 
287 extern struct rtskb *rtskb_pool_dequeue(struct rtskb_pool *pool);
288 
289 extern void rtskb_pool_queue_tail(struct rtskb_pool *pool, struct rtskb *skb);
290 
291 extern struct rtskb *alloc_rtskb(unsigned int size, struct rtskb_pool *pool);
292 
293 extern void kfree_rtskb(struct rtskb *skb);
294 #define dev_kfree_rtskb(a) kfree_rtskb(a)
295 
296 
297 #define rtskb_checksum_none_assert(skb) (skb->ip_summed = CHECKSUM_NONE)
298 
299 static inline void rtskb_tx_timestamp(struct rtskb *skb)
300 {
301  nanosecs_abs_t *ts = skb->xmit_stamp;
302 
303  if (!ts)
304  return;
305 
306  *ts = cpu_to_be64(rtdm_clock_read() + *ts);
307 }
308 
309 /***
310  * rtskb_queue_init - initialize the queue
311  * @queue
312  */
313 static inline void rtskb_queue_init(struct rtskb_queue *queue)
314 {
315  rtdm_lock_init(&queue->lock);
316  queue->first = NULL;
317  queue->last = NULL;
318 }
319 
320 /***
321  * rtskb_prio_queue_init - initialize the prioritized queue
322  * @prioqueue
323  */
324 static inline void rtskb_prio_queue_init(struct rtskb_prio_queue *prioqueue)
325 {
326  memset(prioqueue, 0, sizeof(struct rtskb_prio_queue));
327  rtdm_lock_init(&prioqueue->lock);
328 }
329 
330 /***
331  * rtskb_queue_empty
332  * @queue
333  */
334 static inline int rtskb_queue_empty(struct rtskb_queue *queue)
335 {
336  return (queue->first == NULL);
337 }
338 
339 /***
340  * rtskb__prio_queue_empty
341  * @queue
342  */
343 static inline int rtskb_prio_queue_empty(struct rtskb_prio_queue *prioqueue)
344 {
345  return (prioqueue->usage == 0);
346 }
347 
348 /***
349  * __rtskb_queue_head - insert a buffer at the queue head (w/o locks)
350  * @queue: queue to use
351  * @skb: buffer to queue
352  */
353 static inline void __rtskb_queue_head(struct rtskb_queue *queue,
354  struct rtskb *skb)
355 {
356  struct rtskb *chain_end = skb->chain_end;
357 
358  chain_end->next = queue->first;
359 
360  if (queue->first == NULL)
361  queue->last = chain_end;
362  queue->first = skb;
363 }
364 
365 /***
366  * rtskb_queue_head - insert a buffer at the queue head (lock protected)
367  * @queue: queue to use
368  * @skb: buffer to queue
369  */
370 static inline void rtskb_queue_head(struct rtskb_queue *queue, struct rtskb *skb)
371 {
372  rtdm_lockctx_t context;
373 
374  rtdm_lock_get_irqsave(&queue->lock, context);
375  __rtskb_queue_head(queue, skb);
376  rtdm_lock_put_irqrestore(&queue->lock, context);
377 }
378 
379 /***
380  * __rtskb_prio_queue_head - insert a buffer at the prioritized queue head
381  * (w/o locks)
382  * @queue: queue to use
383  * @skb: buffer to queue
384  */
385 static inline void __rtskb_prio_queue_head(struct rtskb_prio_queue *prioqueue,
386  struct rtskb *skb)
387 {
388  unsigned int prio = skb->priority & RTSKB_PRIO_MASK;
389 
390  RTNET_ASSERT(prio <= 31, prio = 31;);
391 
392  __rtskb_queue_head(&prioqueue->queue[prio], skb);
393  __set_bit(prio, &prioqueue->usage);
394 }
395 
396 /***
397  * rtskb_prio_queue_head - insert a buffer at the prioritized queue head
398  * (lock protected)
399  * @queue: queue to use
400  * @skb: buffer to queue
401  */
402 static inline void rtskb_prio_queue_head(struct rtskb_prio_queue *prioqueue,
403  struct rtskb *skb)
404 {
405  rtdm_lockctx_t context;
406 
407  rtdm_lock_get_irqsave(&prioqueue->lock, context);
408  __rtskb_prio_queue_head(prioqueue, skb);
409  rtdm_lock_put_irqrestore(&prioqueue->lock, context);
410 }
411 
412 /***
413  * __rtskb_queue_tail - insert a buffer at the queue tail (w/o locks)
414  * @queue: queue to use
415  * @skb: buffer to queue
416  */
417 static inline void __rtskb_queue_tail(struct rtskb_queue *queue,
418  struct rtskb *skb)
419 {
420  struct rtskb *chain_end = skb->chain_end;
421 
422  chain_end->next = NULL;
423 
424  if (queue->first == NULL)
425  queue->first = skb;
426  else
427  queue->last->next = skb;
428  queue->last = chain_end;
429 }
430 
431 /***
432  * rtskb_queue_tail - insert a buffer at the queue tail (lock protected)
433  * @queue: queue to use
434  * @skb: buffer to queue
435  */
436 static inline void rtskb_queue_tail(struct rtskb_queue *queue,
437  struct rtskb *skb)
438 {
439  rtdm_lockctx_t context;
440 
441  rtdm_lock_get_irqsave(&queue->lock, context);
442  __rtskb_queue_tail(queue, skb);
443  rtdm_lock_put_irqrestore(&queue->lock, context);
444 }
445 
446 /***
447  * __rtskb_prio_queue_tail - insert a buffer at the prioritized queue tail
448  * (w/o locks)
449  * @prioqueue: queue to use
450  * @skb: buffer to queue
451  */
452 static inline void __rtskb_prio_queue_tail(struct rtskb_prio_queue *prioqueue,
453  struct rtskb *skb)
454 {
455  unsigned int prio = skb->priority & RTSKB_PRIO_MASK;
456 
457  RTNET_ASSERT(prio <= 31, prio = 31;);
458 
459  __rtskb_queue_tail(&prioqueue->queue[prio], skb);
460  __set_bit(prio, &prioqueue->usage);
461 }
462 
463 /***
464  * rtskb_prio_queue_tail - insert a buffer at the prioritized queue tail
465  * (lock protected)
466  * @prioqueue: queue to use
467  * @skb: buffer to queue
468  */
469 static inline void rtskb_prio_queue_tail(struct rtskb_prio_queue *prioqueue,
470  struct rtskb *skb)
471 {
472  rtdm_lockctx_t context;
473 
474  rtdm_lock_get_irqsave(&prioqueue->lock, context);
475  __rtskb_prio_queue_tail(prioqueue, skb);
476  rtdm_lock_put_irqrestore(&prioqueue->lock, context);
477 }
478 
479 /***
480  * __rtskb_dequeue - remove from the head of the queue (w/o locks)
481  * @queue: queue to remove from
482  */
483 static inline struct rtskb *__rtskb_dequeue(struct rtskb_queue *queue)
484 {
485  struct rtskb *result;
486 
487  if ((result = queue->first) != NULL) {
488  queue->first = result->next;
489  result->next = NULL;
490  }
491 
492  return result;
493 }
494 
495 /***
496  * rtskb_dequeue - remove from the head of the queue (lock protected)
497  * @queue: queue to remove from
498  */
499 static inline struct rtskb *rtskb_dequeue(struct rtskb_queue *queue)
500 {
501  rtdm_lockctx_t context;
502  struct rtskb *result;
503 
504  rtdm_lock_get_irqsave(&queue->lock, context);
505  result = __rtskb_dequeue(queue);
506  rtdm_lock_put_irqrestore(&queue->lock, context);
507 
508  return result;
509 }
510 
511 /***
512  * __rtskb_prio_dequeue - remove from the head of the prioritized queue
513  * (w/o locks)
514  * @prioqueue: queue to remove from
515  */
516 static inline struct rtskb *
517  __rtskb_prio_dequeue(struct rtskb_prio_queue *prioqueue)
518 {
519  int prio;
520  struct rtskb *result = NULL;
521  struct rtskb_queue *sub_queue;
522 
523  if (prioqueue->usage) {
524  prio = ffz(~prioqueue->usage);
525  sub_queue = &prioqueue->queue[prio];
526  result = __rtskb_dequeue(sub_queue);
527  if (rtskb_queue_empty(sub_queue))
528  __change_bit(prio, &prioqueue->usage);
529  }
530 
531  return result;
532 }
533 
534 /***
535  * rtskb_prio_dequeue - remove from the head of the prioritized queue
536  * (lock protected)
537  * @prioqueue: queue to remove from
538  */
539 static inline struct rtskb *
540  rtskb_prio_dequeue(struct rtskb_prio_queue *prioqueue)
541 {
542  rtdm_lockctx_t context;
543  struct rtskb *result;
544 
545  rtdm_lock_get_irqsave(&prioqueue->lock, context);
546  result = __rtskb_prio_dequeue(prioqueue);
547  rtdm_lock_put_irqrestore(&prioqueue->lock, context);
548 
549  return result;
550 }
551 
552 /***
553  * __rtskb_dequeue_chain - remove a chain from the head of the queue
554  * (w/o locks)
555  * @queue: queue to remove from
556  */
557 static inline struct rtskb *__rtskb_dequeue_chain(struct rtskb_queue *queue)
558 {
559  struct rtskb *result;
560  struct rtskb *chain_end;
561 
562  if ((result = queue->first) != NULL) {
563  chain_end = result->chain_end;
564  queue->first = chain_end->next;
565  chain_end->next = NULL;
566  }
567 
568  return result;
569 }
570 
571 /***
572  * rtskb_dequeue_chain - remove a chain from the head of the queue
573  * (lock protected)
574  * @queue: queue to remove from
575  */
576 static inline struct rtskb *rtskb_dequeue_chain(struct rtskb_queue *queue)
577 {
578  rtdm_lockctx_t context;
579  struct rtskb *result;
580 
581  rtdm_lock_get_irqsave(&queue->lock, context);
582  result = __rtskb_dequeue_chain(queue);
583  rtdm_lock_put_irqrestore(&queue->lock, context);
584 
585  return result;
586 }
587 
588 /***
589  * rtskb_prio_dequeue_chain - remove a chain from the head of the
590  * prioritized queue
591  * @prioqueue: queue to remove from
592  */
593 static inline
594  struct rtskb *rtskb_prio_dequeue_chain(struct rtskb_prio_queue *prioqueue)
595 {
596  rtdm_lockctx_t context;
597  int prio;
598  struct rtskb *result = NULL;
599  struct rtskb_queue *sub_queue;
600 
601  rtdm_lock_get_irqsave(&prioqueue->lock, context);
602  if (prioqueue->usage) {
603  prio = ffz(~prioqueue->usage);
604  sub_queue = &prioqueue->queue[prio];
605  result = __rtskb_dequeue_chain(sub_queue);
606  if (rtskb_queue_empty(sub_queue))
607  __change_bit(prio, &prioqueue->usage);
608  }
609  rtdm_lock_put_irqrestore(&prioqueue->lock, context);
610 
611  return result;
612 }
613 
614 /***
615  * rtskb_queue_purge - clean the queue
616  * @queue
617  */
618 static inline void rtskb_queue_purge(struct rtskb_queue *queue)
619 {
620  struct rtskb *skb;
621  while ( (skb=rtskb_dequeue(queue))!=NULL )
622  kfree_rtskb(skb);
623 }
624 
625 static inline int rtskb_headlen(const struct rtskb *skb)
626 {
627  return skb->len;
628 }
629 
630 static inline void rtskb_reserve(struct rtskb *skb, unsigned int len)
631 {
632  skb->data+=len;
633  skb->tail+=len;
634 }
635 
636 static inline unsigned char *__rtskb_put(struct rtskb *skb, unsigned int len)
637 {
638  unsigned char *tmp=skb->tail;
639 
640  skb->tail+=len;
641  skb->len+=len;
642  return tmp;
643 }
644 
645 #define rtskb_put(skb, length) \
646 ({ \
647  struct rtskb *__rtskb = (skb); \
648  unsigned int __len = (length); \
649  unsigned char *tmp=__rtskb->tail; \
650 \
651  __rtskb->tail += __len; \
652  __rtskb->len += __len; \
653 \
654  RTNET_ASSERT(__rtskb->tail <= __rtskb->buf_end, \
655  rtskb_over_panic(__rtskb, __len, current_text_addr());); \
656 \
657  tmp; \
658 })
659 
660 static inline unsigned char *__rtskb_push(struct rtskb *skb, unsigned int len)
661 {
662  skb->data-=len;
663  skb->len+=len;
664  return skb->data;
665 }
666 
667 #define rtskb_push(skb, length) \
668 ({ \
669  struct rtskb *__rtskb = (skb); \
670  unsigned int __len = (length); \
671 \
672  __rtskb->data -= __len; \
673  __rtskb->len += __len; \
674 \
675  RTNET_ASSERT(__rtskb->data >= __rtskb->buf_start, \
676  rtskb_under_panic(__rtskb, __len, current_text_addr());); \
677 \
678  __rtskb->data; \
679 })
680 
681 static inline unsigned char *__rtskb_pull(struct rtskb *skb, unsigned int len)
682 {
683  RTNET_ASSERT(len <= skb->len, return NULL;);
684 
685  skb->len -= len;
686 
687  return skb->data += len;
688 }
689 
690 static inline unsigned char *rtskb_pull(struct rtskb *skb, unsigned int len)
691 {
692  if (len > skb->len)
693  return NULL;
694 
695  skb->len -= len;
696 
697  return skb->data += len;
698 }
699 
700 static inline void rtskb_trim(struct rtskb *skb, unsigned int len)
701 {
702  if (skb->len>len) {
703  skb->len = len;
704  skb->tail = skb->data+len;
705  }
706 }
707 
708 static inline struct rtskb *rtskb_padto(struct rtskb *rtskb, unsigned int len)
709 {
710  RTNET_ASSERT(len <= (unsigned int)(rtskb->buf_end + 1 - rtskb->data),
711  return NULL;);
712 
713  memset(rtskb->data + rtskb->len, 0, len - rtskb->len);
714 
715  return rtskb;
716 }
717 
718 static inline dma_addr_t rtskb_data_dma_addr(struct rtskb *rtskb,
719  unsigned int offset)
720 {
721  return rtskb->buf_dma_addr + rtskb->data - rtskb->buf_start + offset;
722 }
723 
724 extern struct rtskb_pool global_pool;
725 
726 extern unsigned int rtskb_pool_init(struct rtskb_pool *pool,
727  unsigned int initial_size,
728  const struct rtskb_pool_lock_ops *lock_ops,
729  void *lock_cookie);
730 
731 extern unsigned int __rtskb_module_pool_init(struct rtskb_pool *pool,
732  unsigned int initial_size,
733  struct module *module);
734 
735 #define rtskb_module_pool_init(pool, size) \
736  __rtskb_module_pool_init(pool, size, THIS_MODULE)
737 
738 extern void rtskb_pool_release(struct rtskb_pool *pool);
739 
740 extern unsigned int rtskb_pool_extend(struct rtskb_pool *pool,
741  unsigned int add_rtskbs);
742 extern unsigned int rtskb_pool_shrink(struct rtskb_pool *pool,
743  unsigned int rem_rtskbs);
744 extern int rtskb_acquire(struct rtskb *rtskb, struct rtskb_pool *comp_pool);
745 extern struct rtskb* rtskb_clone(struct rtskb *rtskb,
746  struct rtskb_pool *pool);
747 
748 extern int rtskb_pools_init(void);
749 extern void rtskb_pools_release(void);
750 
751 extern unsigned int rtskb_copy_and_csum_bits(const struct rtskb *skb,
752  int offset, u8 *to, int len,
753  unsigned int csum);
754 extern void rtskb_copy_and_csum_dev(const struct rtskb *skb, u8 *to);
755 
756 
757 #if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP)
758 
759 extern rtdm_lock_t rtcap_lock;
760 extern void (*rtcap_handler)(struct rtskb *skb);
761 
762 static inline void rtcap_mark_incoming(struct rtskb *skb)
763 {
764  skb->cap_start = skb->data;
765  skb->cap_len = skb->len;
766 }
767 
768 static inline void rtcap_report_incoming(struct rtskb *skb)
769 {
770  rtdm_lockctx_t context;
771 
772 
773  rtdm_lock_get_irqsave(&rtcap_lock, context);
774  if (rtcap_handler != NULL)
775  rtcap_handler(skb);
776 
777  rtdm_lock_put_irqrestore(&rtcap_lock, context);
778 }
779 
780 static inline void rtcap_mark_rtmac_enqueue(struct rtskb *skb)
781 {
782  /* rtskb start and length are probably not valid yet */
783  skb->cap_flags |= RTSKB_CAP_RTMAC_STAMP;
784  skb->cap_rtmac_stamp = rtdm_clock_read();
785 }
786 
787 #else /* ifndef CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP */
788 
789 #define rtcap_mark_incoming(skb)
790 #define rtcap_report_incoming(skb)
791 #define rtcap_mark_rtmac_enqueue(skb)
792 
793 #endif /* CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP */
794 
795 
796 #endif /* __KERNEL__ */
797 
798 #endif /* __RTSKB_H_ */
static void rtdm_lock_put_irqrestore(rtdm_lock_t *lock, rtdm_lockctx_t context)
Release lock and restore preemption state.
Definition: driver.h:626
ipipe_spinlock_t rtdm_lock_t
Lock variable.
Definition: driver.h:551
#define rtdm_lock_get_irqsave(__lock, __context)
Acquire lock and disable preemption, by stalling the head domain.
Definition: driver.h:603
uint64_t nanosecs_abs_t
RTDM type for representing absolute dates.
Definition: rtdm.h:43
static void rtdm_lock_init(rtdm_lock_t *lock)
Dynamic lock initialisation.
Definition: driver.h:563
nanosecs_abs_t rtdm_clock_read(void)
Get system time.
unsigned long rtdm_lockctx_t
Variable to save the context while holding a lock.
Definition: driver.h:554