e7df2654d315c4bf61fab408a1b405ebc6f3ea00
[sip-router] / mem / vq_malloc.c
1 /* $Id$
2  *
3  * History: 
4  * merged from Andrei's qmalloc and many fragments from Regents 
5  * University of California NetBSD malloc used; see
6  * http://www.ajk.tele.fi/libc/stdlib/malloc.c.html#malloc for more
7  * details including redistribution policy; this policy asks for
8  * displaying the copyright:
9  * Copyright (c) 1983 Regents of the University of California.
10  * All rights reserved.
11  *
12  *
13  * About:
14  * aggressive, wasteful and very quick malloc library built for
15  * servers that continuously alocate and release chunks of only
16  * few sizes:
17  * - free lists are organized by size (which eliminates long list traversal 
18  *   thru short free chunks if a long one is asked)
19  * - quite a few sizes are supported --> this results in more waste
20  *    (unused place in a chunk) however memory can be well reused after
21  *    freeing
22  * - the last bucket holds unlikely, huge, variable length chunks;
23  *   they are maintained as stack growing from the opposite direction
24  *   of our heap; coalesing is enabled; stack-like first-fit used to
25  *   find free fragments
26  *
27  * TODO: possibly, some delayed coalescation wouldn't hurt; also, further
28  * optimization will certainly speed-up the entire process a lot; using
29  * knowledge of application as well as trying to make pipeline happy
30  * (from which I am sadly far away now)
31  * provides optimization space; trying to use other memory allocators
32  * to compare would be a great thing to do too;
33  *
34  * also, comparing to other memory allocaters (like Horde) would not
35  * be a bad idea: those folks have been doing that for ages and specifically
36  * Horde has been heavily optimized for multi-processor machines
37  *
38  * References:
39  *   - list of malloc implementations: http://www.cs.colorado.edu/~zorn/Malloc.html
40  *   - a white-paper: http://g.oswego.edu/dl/html/malloc.html
41  *   - Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles: 
42        ``Dynamic Storage Allocation: A Survey and Critical Review'' in International 
43        Workshop on Memory Management, September 1995, 
44        ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps
45  *   - ptmalloc: http://www.malloc.de/en/
46  *   - GNU C-lib malloc: http://www.gnu.org/manual/glibc-2.0.6/html_chapter/libc_3.html
47  *   - delorie malocs: http://www.delorie.com/djgpp/malloc/
48  */
49
50 #ifdef VQ_MALLOC
51
52 #include <stdlib.h>
53
54 #include "../config.h"
55 #include "../globals.h"
56 #include "vq_malloc.h"
57 #include "../dprint.h"
58 #include "../globals.h"
59
60 #define BIG_BUCKET(_qm) ((_qm)->max_small_bucket+1)
61 #define IS_BIGBUCKET(_qm, _bucket) ((_bucket)==BIG_BUCKET(_qm)) 
62
63 #ifdef DBG_QM_MALLOC
64 #define ASSERT(a)       \
65         my_assert(a, __LINE__, __FILE__, __FUNCTION__ )
66 #else
67 #define ASSERT(a)
68 #endif
69
70 #ifdef DBG_QM_MALLOC
71 #       define MORE_CORE(_q,_b,_s) (more_core( (_q), (_b), (_s), file, func, line ))
72 #else
73 #       define MORE_CORE(_q,_b,_s) (more_core( (_q), (_b), (_s) ))
74 #endif
75
76
77
78 /* dimensioning buckets: define the step function constants for size2bucket */
79 int s2b_step[] = {8, 16, 32, 64, 128, 256, 512, 1024, 1536, 2048, 2560, MAX_FIXED_BLOCK, EO_STEP };
80
81 void my_assert( int assertation, int line, char *file, char *function )
82 {
83         if (assertation) return;
84
85         LOG(L_CRIT,"CRIT: assertation failed in %s (%s:%d)\n",
86                 function, file, line);
87         abort();
88 }
89
90 #ifdef DBG_QM_MALLOC
91 void vqm_debug_frag(struct vqm_block* qm, struct vqm_frag* f)
92 {
93
94
95         if (f->check!=ST_CHECK_PATTERN){
96                 LOG(L_CRIT, "BUG: vqm_*: fragm. %p beginning overwritten(%x)!\n",
97                                 f, f->check);
98                 vqm_status(qm);
99                 abort();
100         };
101         if (memcmp(f->end_check, END_CHECK_PATTERN, END_CHECK_PATTERN_LEN)!=0) {
102                 LOG(L_CRIT, "BUG: vqm_*: fragm. %p end overwritten(%.*s)!\n",
103                                 f, END_CHECK_PATTERN_LEN, f->end_check );
104                 vqm_status(qm);
105                 abort();
106         }
107 }
108 #endif
109
110
111 /* takes  demanded size without overhead as input, returns bucket number
112    and changed the demanded size to size really used including all
113    possible overhead
114  */
115 unsigned char size2bucket( struct vqm_block* qm, int *size  )
116 {
117         unsigned char b;        
118         unsigned int real_size;
119         unsigned int exceeds;
120
121         real_size = *size+ VQM_OVERHEAD;
122
123 #ifdef DBG_QM_MALLOC
124         real_size+=END_CHECK_PATTERN_LEN;
125 #endif
126         real_size+=((exceeds = (real_size % 8 )) ? 8 - exceeds : 0);
127         ASSERT( !(real_size%8) );
128         /* "small" chunk sizes <=1k translated using a table */
129         if ( real_size < MAX_FIXED_BLOCK ) {
130                 b = qm->s2b[ real_size ];
131                 *size = qm->s2s[ real_size ];
132         /* there might be various allocations slightly 1>1k, I still
133            don't want to be too agressive and increase useless 
134            allocations in small steps
135         */      
136         } else {
137                 b = BIG_BUCKET(qm); 
138                 *size = MAX_FIXED_BLOCK + 
139                         (real_size-MAX_FIXED_BLOCK+BLOCK_STEP) 
140                         / BLOCK_STEP * BLOCK_STEP;
141         }
142         /*size must be a multiple of 8*/
143         ASSERT( !(*size%8) );
144         return b;
145 }
146
147
148 /* init malloc and return a qm_block */
149 struct vqm_block* vqm_malloc_init(char* address, unsigned int size)
150 {
151         char* start;
152         struct vqm_block* qm;
153         unsigned int init_overhead;
154         unsigned char b;        /* bucket iterator */
155         unsigned int s;         /* size iterator */
156         char *end;
157         
158         /* make address and size multiple of 8*/
159         start=(char*)( ((unsigned int)address%8)?((unsigned int)address+8)/8*8:
160                         (unsigned int)address);
161         if (size<start-address) return 0;
162         size-=(start-address);
163         if (size <8) return 0;
164         size=(size%8)?(size-8)/8*8:size;
165
166         init_overhead=sizeof(struct vqm_block);
167         if (size < init_overhead)
168         {
169                 /* not enough mem to create our control structures !!!*/
170                 return 0;
171         }
172         end = start + size;
173         qm=(struct vqm_block*)start;
174         memset(qm, 0, sizeof(struct vqm_block));
175         size-=init_overhead;
176
177         /* definition of the size2bucket table */
178         for (s=0, b=0; s<MAX_FIXED_BLOCK ; s++) {
179                 while (s>s2b_step[b]) b++;
180                 if (b>MAX_BUCKET) {
181                         LOG(L_CRIT, "CRIT: vqm_malloc_init: attempt to install too many buckets,"
182                                 "s2b_step > MAX_BUCKET\n");
183                         return 0;
184                 }
185                 qm->s2b[s] = b;
186                 qm->s2s[s] = s2b_step[b];
187         }
188         qm->max_small_bucket = b;
189
190         /* from where we will draw memory */
191         qm->core = (char *) ( start + sizeof( struct vqm_block ) );
192         qm->free_core = size;
193         /* remember for bound checking */
194         qm->init_core = qm->core;
195         qm->core_end = end;
196         /* allocate big chunks from end */
197         qm->big_chunks = end;
198         
199         return qm;
200 }
201
202
203
204 struct vqm_frag *more_core(     struct vqm_block* qm, 
205                                 unsigned char bucket, unsigned int size
206 #ifdef DBG_QM_MALLOC
207                                 , char *file, char *func, unsigned int line
208 #endif
209                          ) 
210 {
211         struct vqm_frag *new_chunk;
212         struct vqm_frag_end *end;
213
214         if (qm->free_core<size) return 0;
215
216         /* update core */
217         if (IS_BIGBUCKET(qm, bucket)) {
218                 qm->big_chunks-=size;
219                 new_chunk = (struct vqm_frag *) qm->big_chunks ;        
220         } else {
221                 new_chunk = (struct vqm_frag *) qm->core;       
222                 qm->core+=size;
223         }
224         qm->free_core-=size;
225
226         /* initialize the new fragment */
227         new_chunk->u.inuse.bucket = bucket;
228         new_chunk->size = size;
229
230         end=FRAG_END( new_chunk );
231         end->size=size;
232
233         return new_chunk;
234 }
235
236 static inline void vqm_detach_free( struct vqm_block* qm, struct vqm_frag* frag)
237 {
238
239         struct vqm_frag *prev, *next;
240
241         prev=FRAG_END(frag)->prv_free; 
242         next=frag->u.nxt_free;
243
244         if (prev) prev->u.nxt_free=next; 
245         else qm->next_free[BIG_BUCKET(qm)]=next;
246
247         if (next) FRAG_END(next)->prv_free=prev; 
248          
249 }
250
251
252 #ifdef DBG_QM_MALLOC
253 void* vqm_malloc(struct vqm_block* qm, unsigned int size, 
254         char* file, char* func, unsigned int line)
255 #else
256 void* vqm_malloc(struct vqm_block* qm, unsigned int size)
257 #endif
258 {
259         struct vqm_frag *new_chunk, *f;
260         unsigned char bucket;
261         
262 #ifdef DBG_QM_MALLOC
263         unsigned int demanded_size;
264         DBG("vqm_malloc(%p, %d) called from %s: %s(%d)\n", qm, size, file,
265          func, line);
266         demanded_size = size;
267 #endif
268         new_chunk=0;
269         /* what's the bucket? what's the total size incl. overhead? */
270         bucket = size2bucket( qm, &size );
271
272         if (IS_BIGBUCKET(qm, bucket)) { /* the kilo-bucket uses first-fit */
273 #ifdef DBG_QM_MALLOC
274                 DBG("vqm_malloc: processing a big fragment\n");
275 #endif
276                 for (f=qm->next_free[bucket] ; f; f=f->u.nxt_free ) 
277                         if (f->size>=size) { /* first-fit */
278                                 new_chunk=f;
279                                 VQM_DEBUG_FRAG(qm, f);
280                                 vqm_detach_free(qm,f);
281                                 break;
282                         }
283         } else if (  (new_chunk=qm->next_free[ bucket ]) ) { /*fixed size bucket*/
284                         VQM_DEBUG_FRAG(qm, new_chunk);
285                         /*detach it from the head of bucket's free list*/
286                         qm->next_free[ bucket ] = new_chunk->u.nxt_free;
287         }
288
289         if (!new_chunk) { /* no chunk can be reused; slice one from the core */
290                 new_chunk=MORE_CORE( qm, bucket, size );
291                 if (!new_chunk) {
292 #ifdef DBG_QM_MALLOC
293                         LOG(L_DBG, "vqm_malloc(%p, %d) called from %s: %s(%d)\n", 
294                                 qm, size, file, func, line);
295 #else
296                         LOG(L_DBG, "vqm_malloc(%p, %d) called from %s: %s(%d)\n", 
297                                 qm, size);
298 #endif
299                         return 0;
300                 }
301         }
302         new_chunk->u.inuse.magic = FR_USED;
303         new_chunk->u.inuse.bucket=bucket;
304 #ifdef DBG_QM_MALLOC
305         new_chunk->file=file;
306         new_chunk->func=func;
307         new_chunk->line=line;
308         new_chunk->demanded_size=demanded_size;
309         qm->usage[ bucket ]++;
310         DBG("vqm_malloc( %p, %d ) returns address %p in bucket %d, real-size %d \n",
311                 qm, demanded_size, (char*)new_chunk+sizeof(struct vqm_frag), 
312                 bucket, size );
313
314         new_chunk->end_check=(char*)new_chunk+sizeof(struct vqm_frag)+demanded_size;
315         memcpy(  new_chunk->end_check, END_CHECK_PATTERN, END_CHECK_PATTERN_LEN );
316         new_chunk->check=ST_CHECK_PATTERN;
317 #endif
318         return (char*)new_chunk+sizeof(struct vqm_frag);
319 }
320
321 #ifdef DBG_QM_MALLOC
322 void vqm_free(struct vqm_block* qm, void* p, char* file, char* func, 
323                                 unsigned int line)
324 #else
325 void vqm_free(struct vqm_block* qm, void* p)
326 #endif
327 {
328         struct vqm_frag *f, *next, *prev, *first_big;
329         unsigned char b;
330
331 #ifdef DBG_QM_MALLOC
332         DBG("vqm_free(%p, %p), called from %s: %s(%d)\n", 
333                 qm, p, file, func, line);
334         if (p>(void *)qm->core_end || p<(void*)qm->init_core){
335                 LOG(L_CRIT, "BUG: vqm_free: bad pointer %p (out of memory block!) - "
336                                 "aborting\n", p);
337                 abort();
338         }
339 #endif
340         if (p==0) {
341                 DBG("WARNING:vqm_free: free(0) called\n");
342                 return;
343         }
344         f=(struct  vqm_frag*) ((char*)p-sizeof(struct vqm_frag));
345         b=f->u.inuse.bucket;
346 #ifdef DBG_QM_MALLOC
347         VQM_DEBUG_FRAG(qm, f);
348         if ( ! FRAG_ISUSED(f) ) {
349                 LOG(L_CRIT, "BUG: vqm_free: freeing already freed pointer,"
350                                 " first freed: %s: %s(%d) - aborting\n",
351                                 f->file, f->func, f->line);
352                 abort();
353         }
354         if ( b>MAX_BUCKET ) {
355                 LOG(L_CRIT, "BUG: vqm_free: fragment with too high bucket nr: "
356                                 "%d, allocated: %s: %s(%d) - aborting\n",
357                                 b, f->file, f->func, f->line); 
358                 abort();
359         }
360         DBG("vqm_free: freeing %d bucket block alloc'ed from %s: %s(%d)\n", 
361                 f->u.inuse.bucket, f->file, f->func, f->line);
362         f->file=file; f->func=func; f->line=line;
363         qm->usage[ f->u.inuse.bucket ]--;
364 #endif
365         if (IS_BIGBUCKET(qm,b)) {
366                 next=FRAG_NEXT(f);
367                 if  ((char *)next +sizeof( struct vqm_frag) < qm->core_end) {
368                         VQM_DEBUG_FRAG(qm, next);
369                         if (! FRAG_ISUSED(next)) { /* coalescate with next fragment */
370                                 DBG("vqm_free: coalescated with next\n");
371                                 vqm_detach_free(qm, next);
372                                 f->size+=next->size;
373                                 FRAG_END(f)->size=f->size;
374                         }
375                 }
376                 first_big = qm->next_free[b];
377                 if (first_big &&  f>first_big) {
378                         prev=FRAG_PREV(f);
379                         VQM_DEBUG_FRAG(qm, prev);
380                         if (!FRAG_ISUSED(prev)) { /* coalescate with prev fragment */
381                                 DBG("vqm_free: coalescated with prev\n");
382                                 vqm_detach_free(qm, prev );
383                                 prev->size+=f->size;
384                                 f=prev;
385                                 FRAG_END(f)->size=f->size;
386                         }
387                 }
388                 if ((char *)f==qm->big_chunks) { /* release unused core */
389                         DBG("vqm_free: big chunk released\n");
390                         qm->free_core+=f->size;
391                         qm->big_chunks+=f->size;
392                         return;
393                 }               
394                 first_big = qm->next_free[b];
395                 /* fix reverse link (used only for BIG_BUCKET */
396                 if (first_big) FRAG_END(first_big)->prv_free=f;
397                 FRAG_END(f)->prv_free=0;
398         } else first_big = qm->next_free[b];
399         f->u.nxt_free = first_big; /* also clobbers magic */
400         qm->next_free[b] = f;
401 }
402
403 void dump_frag( struct vqm_frag* f, int i )
404 {
405         LOG(memlog, "    %3d. address=%p  real size=%d bucket=%d\n", i, 
406                 (char*)f+sizeof(struct vqm_frag), f->size, f->u.inuse.bucket);
407 #ifdef DBG_QM_MALLOC
408         LOG(memlog, "            demanded size=%d\n", f->demanded_size );
409         LOG(memlog, "            alloc'd from %s: %s(%d)\n",
410                 f->file, f->func, f->line);
411         LOG(memlog, "        start check=%x, end check= %.*s\n",
412                         f->check, END_CHECK_PATTERN_LEN, f->end_check );
413 #endif
414 }
415
416 void vqm_status(struct vqm_block* qm)
417 {
418         struct vqm_frag* f;
419         unsigned int i,on_list;
420
421         LOG(memlog, "vqm_status (%p):\n", qm);
422         if (!qm) return;
423         LOG(memlog, " heap size= %d, available: %d\n", 
424                 qm->core_end-qm->init_core, qm->free_core );
425         
426         LOG(memlog, "dumping unfreed fragments:\n");
427         for (f=(struct vqm_frag*)qm->init_core, i=0;(char*)f<(char*)qm->core;
428                 f=FRAG_NEXT(f) ,i++) if ( FRAG_ISUSED(f) ) dump_frag(f, i);
429
430         LOG(memlog, "dumping unfreed big fragments:\n");
431     for (f=(struct vqm_frag*)qm->big_chunks,i=0;(char*)f<(char*)qm->core_end;
432                 f=FRAG_NEXT(f) ,i++) if ( FRAG_ISUSED(f) ) dump_frag( f, i );
433
434 #ifdef DBG_QM_MALLOC
435         DBG("dumping bucket statistics:\n");
436         for (i=0; i<=BIG_BUCKET(qm); i++) {
437                 for(on_list=0, f=qm->next_free[i]; f; f=f->u.nxt_free ) on_list++;
438                 LOG(L_DBG, "    %3d. bucket: in use: %ld, on free list: %d\n", 
439                         i, qm->usage[i], on_list );
440         }
441 #endif
442         LOG(memlog, "-----------------------------\n");
443 }
444
445
446
447 #endif