00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022 #ifndef _XENO_NUCLEUS_HEAP_H
00023 #define _XENO_NUCLEUS_HEAP_H
00024
00025 #include <nucleus/queue.h>
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043
00044
00045
00046
00047 #if defined(__KERNEL__) || defined(__XENO_SIM__)
00048
00049 #define XNHEAP_MINLOG2 3
00050 #define XNHEAP_MAXLOG2 22
00051 #define XNHEAP_MINALLOCSZ (1 << XNHEAP_MINLOG2)
00052 #define XNHEAP_MINALIGNSZ (1 << 4)
00053 #define XNHEAP_NBUCKETS (XNHEAP_MAXLOG2 - XNHEAP_MINLOG2 + 2)
00054 #define XNHEAP_MAXEXTSZ (1 << 31)
00055
00056 #define XNHEAP_PFREE 0
00057 #define XNHEAP_PCONT 1
00058 #define XNHEAP_PLIST 2
00059
00060 typedef struct xnextent {
00061
00062 xnholder_t link;
00063
00064 #define link2extent(ln) container_of(ln, xnextent_t, link)
00065
00066 caddr_t membase,
00067 memlim,
00068 freelist;
00069
00070 struct xnpagemap {
00071 unsigned int type : 8;
00072 unsigned int bcount : 24;
00073 } pagemap[1];
00074
00075 } xnextent_t;
00076
00077 typedef struct xnheap {
00078
00079 xnholder_t link;
00080
00081 #define link2heap(ln) container_of(ln, xnheap_t, link)
00082
00083 u_long extentsize,
00084 pagesize,
00085 pageshift,
00086 hdrsize,
00087 npages,
00088 ubytes,
00089 maxcont;
00090
00091 xnqueue_t extents;
00092
00093 DECLARE_XNLOCK(lock);
00094
00095 struct xnbucket {
00096 caddr_t freelist;
00097 int fcount;
00098 } buckets[XNHEAP_NBUCKETS];
00099
00100 xnholder_t *idleq;
00101
00102 xnarch_heapcb_t archdep;
00103
00104 XNARCH_DECL_DISPLAY_CONTEXT();
00105
00106 } xnheap_t;
00107
00108 extern xnheap_t kheap;
00109
00110 #if CONFIG_XENO_OPT_SYS_STACKPOOLSZ > 0
00111 extern xnheap_t kstacks;
00112 #endif
00113
00114 #define xnheap_extentsize(heap) ((heap)->extentsize)
00115 #define xnheap_page_size(heap) ((heap)->pagesize)
00116 #define xnheap_page_count(heap) ((heap)->npages)
00117 #define xnheap_usable_mem(heap) ((heap)->maxcont * countq(&(heap)->extents))
00118 #define xnheap_used_mem(heap) ((heap)->ubytes)
00119 #define xnheap_max_contiguous(heap) ((heap)->maxcont)
00120
00121 static inline size_t xnheap_align(size_t size, size_t al)
00122 {
00123
00124 return ((size+al-1)&(~(al-1)));
00125 }
00126
00127 static inline size_t xnheap_overhead(size_t hsize, size_t psize)
00128 {
00129 size_t m = psize / sizeof(struct xnpagemap);
00130 size_t q = (size_t)xnarch_llimd(hsize - sizeof(xnextent_t), m, m + 1);
00131 return xnheap_align(hsize - q, XNHEAP_MINALIGNSZ);
00132 }
00133
00134 #define xnmalloc(size) xnheap_alloc(&kheap,size)
00135 #define xnfree(ptr) xnheap_free(&kheap,ptr)
00136 #define xnfreesync() xnheap_finalize_free(&kheap)
00137 #define xnfreesafe(thread,ptr,ln) \
00138 do { \
00139 if (xnpod_current_p(thread)) \
00140 xnheap_schedule_free(&kheap,ptr,ln); \
00141 else \
00142 xnheap_free(&kheap,ptr); \
00143 } while(0)
00144
00145 static inline size_t xnheap_rounded_size(size_t hsize, size_t psize)
00146 {
00147
00148
00149
00150
00151
00152
00153
00154
00155 if (hsize < 2 * psize)
00156 hsize = 2 * psize;
00157 hsize += xnheap_overhead(hsize, psize);
00158 return xnheap_align(hsize, psize);
00159 }
00160
00161 #ifdef __cplusplus
00162 extern "C" {
00163 #endif
00164
00165
00166
00167 #ifdef __KERNEL__
00168
00169 #define XNHEAP_DEV_MINOR 254
00170
00171 int xnheap_mount(void);
00172
00173 void xnheap_umount(void);
00174
00175 int xnheap_init_mapped(xnheap_t *heap,
00176 u_long heapsize,
00177 int memflags);
00178
00179 int xnheap_destroy_mapped(xnheap_t *heap);
00180
00181 #define xnheap_mapped_offset(heap,ptr) \
00182 (((caddr_t)(ptr)) - ((caddr_t)(heap)->archdep.heapbase))
00183
00184 #define xnheap_mapped_address(heap,off) \
00185 (((caddr_t)(heap)->archdep.heapbase) + (off))
00186
00187 #define xnheap_mapped_p(heap) \
00188 ((heap)->archdep.heapbase != NULL)
00189
00190 #endif
00191
00192
00193
00194 int xnheap_init(xnheap_t *heap,
00195 void *heapaddr,
00196 u_long heapsize,
00197 u_long pagesize);
00198
00199 int xnheap_destroy(xnheap_t *heap,
00200 void (*flushfn)(xnheap_t *heap,
00201 void *extaddr,
00202 u_long extsize,
00203 void *cookie),
00204 void *cookie);
00205
00206 int xnheap_extend(xnheap_t *heap,
00207 void *extaddr,
00208 u_long extsize);
00209
00210 void *xnheap_alloc(xnheap_t *heap,
00211 u_long size);
00212
00213 int xnheap_test_and_free(xnheap_t *heap,
00214 void *block,
00215 int (*ckfn)(void *block));
00216
00217 int xnheap_free(xnheap_t *heap,
00218 void *block);
00219
00220 void xnheap_schedule_free(xnheap_t *heap,
00221 void *block,
00222 xnholder_t *link);
00223
00224 void xnheap_finalize_free_inner(xnheap_t *heap);
00225
00226 static inline void xnheap_finalize_free(xnheap_t *heap)
00227 {
00228 if (heap->idleq)
00229 xnheap_finalize_free_inner(heap);
00230 }
00231
00232 int xnheap_check_block(xnheap_t *heap,
00233 void *block);
00234
00235 #ifdef __cplusplus
00236 }
00237 #endif
00238
00239 #endif
00240
00241 #define XNHEAP_DEV_NAME "/dev/rtheap"
00242
00243 #endif