modules/ims_qos: added patch for flow-description bug when request originates from...
[sip-router] / mem / sf_malloc.c
1 /*
2  * shared memory, multi-process safe, pool based version of f_malloc
3  *
4  * This file is part of Kamailio, a free SIP server.
5  *
6  * Copyright (C) 2007 iptelorg GmbH
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20
21 #ifdef SF_MALLOC
22
23 #include <string.h>
24 #include <stdlib.h>
25
26 #include "sf_malloc.h"
27 #include "../dprint.h"
28 #include "../globals.h"
29 #include "memdbg.h"
30 #include "../cfg/cfg.h" /* memlog */
31
32 #define MAX_POOL_FRAGS 10000 /* max fragments per pool hash bucket */
33 #define MIN_POOL_FRAGS 10    /* min fragments per pool hash bucket */
34
35 /*useful macros*/
36
37 #define FRAG_NEXT(f) \
38         ((struct sfm_frag*)((char*)(f)+sizeof(struct sfm_frag)+(f)->size ))
39
40
41 /* SF_ROUNDTO= 2^k so the following works */
42 #define ROUNDTO_MASK    (~((unsigned long)SF_ROUNDTO-1))
43 #define ROUNDUP(s)              (((s)+(SF_ROUNDTO-1))&ROUNDTO_MASK)
44 #define ROUNDDOWN(s)    ((s)&ROUNDTO_MASK)
45
46 #define FRAG_OVERHEAD   (sizeof(struct sfm_frag))
47 #define INIT_OVERHEAD   \
48         (ROUNDUP(sizeof(struct sfm_block))+sizeof(struct sfm_frag))
49
50
51
52 /* finds hash if s <=SF_MALLOC_OPTIMIZE */
53 #define GET_SMALL_HASH(s) (unsigned long)(s)/SF_ROUNDTO
54 /* finds hash if s > SF_MALLOC_OPTIMIZE */
55 #define GET_BIG_HASH(s) \
56         (SF_MALLOC_OPTIMIZE/SF_ROUNDTO+big_hash_idx((s))-SF_MALLOC_OPTIMIZE_FACTOR+1)
57
58 /* finds the hash value for s, s=SF_ROUNDTO multiple*/
59 #define GET_HASH(s)   ( ((unsigned long)(s)<=SF_MALLOC_OPTIMIZE)?\
60                                                         GET_SMALL_HASH(s): GET_BIG_HASH(s) )
61
62
63 #define UN_HASH_SMALL(h) ((unsigned long)(h)*SF_ROUNDTO)
64 #define UN_HASH_BIG(h) (1UL<<((unsigned long)(h)-SF_MALLOC_OPTIMIZE/SF_ROUNDTO+\
65                                                         SF_MALLOC_OPTIMIZE_FACTOR-1))
66
67 #define UN_HASH(h)      ( ((unsigned long)(h)<=(SF_MALLOC_OPTIMIZE/SF_ROUNDTO))?\
68                                                 UN_HASH_SMALL(h): UN_HASH_BIG(h) )
69
70 #define BITMAP_BITS (sizeof(((struct sfm_block*)0)->bitmap)*8)
71 #define BITMAP_BLOCK_SIZE ((SF_MALLOC_OPTIMIZE/SF_ROUNDTO)/ BITMAP_BITS)
72 /* only for "small" hashes (up to HASH(SF_MALLOC_OPTIMIZE) */
73 #define HASH_BIT_POS(h) (((unsigned long)(h))/BITMAP_BLOCK_SIZE)
74 #define HASH_TO_BITMAP(h) (1UL<<HASH_BIT_POS(h))
75 #define BIT_TO_HASH(b) ((b)*BITMAP_BLOCK_SIZE)
76
77
78
79 /* mark/test used/unused frags */
80 #define FRAG_MARK_USED(f)
81 #define FRAG_CLEAR_USED(f)
82 #define FRAG_WAS_USED(f)   (1)
83
84 /* other frag related defines:
85  * MEM_COALESCE_FRAGS 
86  * MEM_FRAG_AVOIDANCE
87  */
88 #define MEM_FRAG_AVOIDANCE
89
90
91 #define SFM_REALLOC_REMALLOC
92
93 /* computes hash number for big buckets*/
94 inline static unsigned long big_hash_idx(unsigned long s)
95 {
96         unsigned long idx;
97         /* s is rounded => s = k*2^n (SF_ROUNDTO=2^n) 
98          * index= i such that 2^i > s >= 2^(i-1)
99          *
100          * => index = number of the first non null bit in s*/
101         idx=sizeof(long)*8-1;
102         for (; !(s&(1UL<<(sizeof(long)*8-1))) ; s<<=1, idx--);
103         return idx;
104 }
105
106
107 #ifdef DBG_F_MALLOC
108 #define ST_CHECK_PATTERN   0xf0f0f0f0
109 #define END_CHECK_PATTERN1 0xc0c0c0c0
110 #define END_CHECK_PATTERN2 0xabcdefed
111 #endif
112
113
114 #ifdef SFM_ONE_LOCK
115
116 #define SFM_MAIN_HASH_LOCK(qm, hash) lock_get(&(qm)->lock)
117 #define SFM_MAIN_HASH_UNLOCK(qm, hash) lock_release(&(qm)->lock)
118 #define SFM_POOL_LOCK(p, hash) lock_get(&(p)->lock)
119 #define SFM_POOL_UNLOCK(p, hash) lock_release(&(p)->lock)
120
121 #warn "degraded performance, only one lock"
122
123 #elif defined SFM_LOCK_PER_BUCKET
124
125 #define SFM_MAIN_HASH_LOCK(qm, hash) \
126         lock_get(&(qm)->free_hash[(hash)].lock)
127 #define SFM_MAIN_HASH_UNLOCK(qm, hash)  \
128         lock_release(&(qm)->free_hash[(hash)].lock)
129 #define SFM_POOL_LOCK(p, hash) lock_get(&(p)->pool_hash[(hash)].lock)
130 #define SFM_POOL_UNLOCK(p, hash) lock_release(&(p)->pool_hash[(hash)].lock)
131 #else
132 #error no locks defined
133 #endif /* SFM_ONE_LOCK/SFM_LOCK_PER_BUCKET */
134
135 #define SFM_BIG_GET_AND_SPLIT_LOCK(qm)   lock_get(&(qm)->get_and_split)
136 #define SFM_BIG_GET_AND_SPLIT_UNLOCK(qm) lock_release(&(qm)->get_and_split)
137
138 static unsigned long sfm_max_hash=0; /* maximum hash value (no point in
139                                                                                  searching further) */
140 static unsigned long pool_id=(unsigned long)-1;
141
142
143 /* call for each child */
144 int sfm_pool_reset()
145 {
146         pool_id=(unsigned long)-1;
147         return 0;
148 }
149
150
151 #define sfm_fix_pool_id(qm) \
152         do{ \
153                 if (unlikely(pool_id>=SFM_POOLS_NO)) \
154                         pool_id=((unsigned)atomic_add(&(qm)->crt_id, 1))%SFM_POOLS_NO; \
155         }while(0)
156
157
158
159 static inline void frag_push(struct sfm_frag** head, struct sfm_frag* frag)
160 {
161         frag->u.nxt_free=*head;
162         *head=frag;
163 }
164
165
166 static inline struct sfm_frag* frag_pop(struct sfm_frag** head)
167 {
168         struct sfm_frag* frag;
169         frag=*head;
170         *head=frag->u.nxt_free;
171         return frag;
172 }
173
174 static inline void sfm_pool_insert (struct sfm_pool* pool, int hash,
175                                                                 struct sfm_frag* frag)
176 {
177         unsigned long hash_bit;
178
179         SFM_POOL_LOCK(pool, hash);
180         frag_push(&pool->pool_hash[hash].first, frag);
181         pool->pool_hash[hash].no++;
182         /* set it only if not already set (avoids an expensive
183          * cache trashing atomic write op) */
184         hash_bit=HASH_TO_BITMAP(hash);
185         if  (!(atomic_get_long((long*)&pool->bitmap) & hash_bit))
186                 atomic_or_long((long*)&pool->bitmap, hash_bit);
187         SFM_POOL_UNLOCK(pool, hash);
188 }
189
190
191
192 /* returns 1 if it's ok to add a fragm. to pool p_id @ hash, 0 otherwise */
193 static inline int sfm_check_pool(struct sfm_block* qm, unsigned long p_id,
194                                                                         int hash, int split)
195 {
196         /* TODO: come up with something better
197          * if fragment is some  split/rest from an allocation, that is
198          *  >= requested size, accept it, else
199          *  look at misses and current fragments and decide based on them */
200         return (p_id<SFM_POOLS_NO) && (split ||
201                         ( (qm->pool[p_id].pool_hash[hash].no < MIN_POOL_FRAGS) ||
202                           ((qm->pool[p_id].pool_hash[hash].misses > 
203                                  qm->pool[p_id].pool_hash[hash].no) &&
204                                 (qm->pool[p_id].pool_hash[hash].no<MAX_POOL_FRAGS) ) ) );
205 }
206
207
208 /* choose on which pool to add a free'd packet
209  * return - pool idx or -1 if it should be added to main*/
210 static inline unsigned long  sfm_choose_pool(struct sfm_block* qm,
211                                                                                                 struct sfm_frag* frag,
212                                                                                                 int hash, int split)
213 {
214         /* check original pool first */
215         if (sfm_check_pool(qm, frag->id, hash, split))
216                 return frag->id;
217         else{
218                 /* check if our pool is properly set */
219                 sfm_fix_pool_id(qm);
220                 /* check if my pool needs some frags */
221                 if ((pool_id!=frag->id) && (sfm_check_pool(qm,  pool_id, hash, 0))){
222                         frag->id=pool_id;
223                         return pool_id;
224                 }
225         }
226         /* else add it back to main */
227         frag->id=(unsigned long)(-1);
228         return frag->id;
229 }
230
231
232 static inline void sfm_insert_free(struct sfm_block* qm, struct sfm_frag* frag,
233                                                                         int split)
234 {
235         struct sfm_frag** f;
236         unsigned long p_id;
237         int hash;
238         unsigned long hash_bit;
239         
240         if (likely(frag->size<=SF_POOL_MAX_SIZE)){
241                 hash=GET_SMALL_HASH(frag->size);
242                 if (unlikely((p_id=sfm_choose_pool(qm, frag, hash, split))==
243                                         (unsigned long)-1)){
244                         /* add it back to the "main" hash */
245                         SFM_MAIN_HASH_LOCK(qm, hash);
246                                 frag->id=(unsigned long)(-1); /* main hash marker */
247                                 /*insert it here*/
248                                 frag_push(&(qm->free_hash[hash].first), frag);
249                                 qm->free_hash[hash].no++;
250                                 /* set it only if not already set (avoids an expensive
251                                 * cache trashing atomic write op) */
252                                 hash_bit=HASH_TO_BITMAP(hash);
253                                 if  (!(atomic_get_long((long*)&qm->bitmap) & hash_bit))
254                                         atomic_or_long((long*)&qm->bitmap, hash_bit);
255                         SFM_MAIN_HASH_UNLOCK(qm, hash);
256                 }else{
257                         /* add it to one of the pools pool */
258                         sfm_pool_insert(&qm->pool[p_id], hash, frag);
259                 }
260         }else{
261                 hash=GET_BIG_HASH(frag->size);
262                 SFM_MAIN_HASH_LOCK(qm, hash);
263                         f=&(qm->free_hash[hash].first);
264                         for(; *f; f=&((*f)->u.nxt_free))
265                                 if (frag->size <= (*f)->size) break;
266                         frag->id=(unsigned long)(-1); /* main hash marker */
267                         /*insert it here*/
268                         frag->u.nxt_free=*f;
269                         *f=frag;
270                         qm->free_hash[hash].no++;
271                         /* inc. big hash free size ? */
272                 SFM_MAIN_HASH_UNLOCK(qm, hash);
273         }
274         
275 }
276
277
278
279  /* size should be already rounded-up */
280 static inline
281 #ifdef DBG_F_MALLOC 
282 void sfm_split_frag(struct sfm_block* qm, struct sfm_frag* frag,
283                                         unsigned long size,
284                                         const char* file, const char* func, unsigned int line)
285 #else
286 void sfm_split_frag(struct sfm_block* qm, struct sfm_frag* frag,
287                                         unsigned long size)
288 #endif
289 {
290         unsigned long rest;
291         struct sfm_frag* n;
292         int bigger_rest;
293         
294         rest=frag->size-size;
295 #ifdef MEM_FRAG_AVOIDANCE
296         if ((rest> (FRAG_OVERHEAD+SF_MALLOC_OPTIMIZE))||
297                 (rest>=(FRAG_OVERHEAD+size))){ /* the residue fragm. is big enough*/
298                 bigger_rest=1;
299 #else
300         if (rest>(FRAG_OVERHEAD+SF_MIN_FRAG_SIZE)){
301                 bigger_rest=rest>=(size+FRAG_OVERHEAD);
302 #endif
303                 frag->size=size;
304                 /*split the fragment*/
305                 n=FRAG_NEXT(frag);
306                 n->size=rest-FRAG_OVERHEAD;
307                 n->id=pool_id;
308                 FRAG_CLEAR_USED(n); /* never used */
309 #ifdef DBG_F_MALLOC
310                 /* frag created by malloc, mark it*/
311                 n->file=file;
312                 n->func="frag. from sfm_malloc";
313                 n->line=line;
314                 n->check=ST_CHECK_PATTERN;
315 #endif
316                 /* reinsert n in free list*/
317                 sfm_insert_free(qm, n, bigger_rest);
318         }else{
319                 /* we cannot split this fragment any more => alloc all of it*/
320         }
321 }
322
323
324
325 /* init malloc and return a sfm_block*/
326 struct sfm_block* sfm_malloc_init(char* address, unsigned long size, int type)
327 {
328         char* start;
329         char* end;
330         struct sfm_block* qm;
331         unsigned long init_overhead;
332         int r;
333 #ifdef SFM_LOCK_PER_BUCKET
334         int i;
335 #endif
336         
337         /* make address and size multiple of 8*/
338         start=(char*)ROUNDUP((unsigned long) address);
339         DBG("sfm_malloc_init: SF_OPTIMIZE=%lu, /SF_ROUNDTO=%lu\n",
340                         SF_MALLOC_OPTIMIZE, SF_MALLOC_OPTIMIZE/SF_ROUNDTO);
341         DBG("sfm_malloc_init: SF_HASH_SIZE=%lu, sfm_block size=%lu\n",
342                         SF_HASH_SIZE, (long)sizeof(struct sfm_block));
343         DBG("sfm_malloc_init(%p, %lu), start=%p\n", address, size, start);
344
345         if (size<start-address) return 0;
346         size-=(start-address);
347         if (size <(SF_MIN_FRAG_SIZE+FRAG_OVERHEAD)) return 0;
348         size=ROUNDDOWN(size);
349
350         init_overhead=INIT_OVERHEAD;
351         
352         
353         if (size < init_overhead)
354         {
355                 /* not enough mem to create our control structures !!!*/
356                 return 0;
357         }
358         end=start+size;
359         qm=(struct sfm_block*)start;
360         memset(qm, 0, sizeof(struct sfm_block));
361         qm->size=size;
362         qm->type = type;
363         size-=init_overhead;
364         
365         qm->first_frag=(struct sfm_frag*)(start+ROUNDUP(sizeof(struct sfm_block)));
366         qm->last_frag=(struct sfm_frag*)(end-sizeof(struct sfm_frag));
367         /* init initial fragment*/
368         qm->first_frag->size=size;
369         qm->first_frag->id=(unsigned long)-1; /* not in a pool */
370         qm->last_frag->size=0;
371         
372 #ifdef DBG_F_MALLOC
373         qm->first_frag->check=ST_CHECK_PATTERN;
374         qm->last_frag->check=END_CHECK_PATTERN1;
375 #endif
376         
377         /* link initial fragment into the free list*/
378         
379         sfm_insert_free(qm, qm->first_frag, 0);
380         sfm_max_hash=GET_HASH(size);
381         
382         /* init locks */
383         if (lock_init(&qm->get_and_split)==0)
384                 goto error;
385 #ifdef SFM_ONE_LOCK
386         if (lock_init(&qm->lock)==0){
387                 lock_destroy(&qm->get_and_split);
388                 goto error;
389         }
390         for (r=0; r<SFM_POOLS_NO; r++){
391                 if (lock_init(&qm->pool[r].lock)==0){
392                         for (;r>0; r--) lock_destroy(&qm->pool[r-1].lock);
393                         lock_destroy(&qm->lock);
394                         lock_destroy(&qm->get_and_split);
395                         goto error;
396                 }
397         }
398 #elif defined(SFM_LOCK_PER_BUCKET)
399         for (r=0; r<SF_HASH_SIZE; r++)
400                 if (lock_init(&qm->free_hash[r].lock)==0){
401                         for(;r>0; r--) lock_destroy(&qm->free_hash[r-1].lock);
402                         lock_destroy(&qm->get_and_split);
403                         goto error;
404                 }
405         for (i=0; i<SFM_POOLS_NO; i++){
406                 for (r=0; r<SF_HASH_POOL_SIZE; r++)
407                         if (lock_init(&qm->pool[i].pool_hash[r].lock)==0){
408                                 for(;r>0; r--) lock_destroy(&qm->pool[i].poo_hash[r].lock);
409                                 for(; i>0; i--){
410                                         for (r=0; r<SF_HASH_POOL_SIZE; r++)
411                                                 lock_destroy(&qm->pool[i].pool_hash[r].lock);
412                                 }
413                                 for (r=0; r<SF_HASH_SIZE; r++)
414                                         lock_destroy(&qm->free_hash[r].lock);
415                                 lock_destroy(&qm->get_and_split);
416                                 goto error;
417                         }
418         }
419 #endif
420         qm->is_init=1;
421         return qm;
422 error:
423         return 0;
424 }
425
426
427
428 /* cleanup */
429 void sfm_malloc_destroy(struct sfm_block* qm)
430 {
431         int r, i;
432         /* destroy all the locks */
433         if (!qm || !qm->is_init)
434                 return; /* nothing to do */
435         lock_destroy(&qm->get_and_split);
436 #ifdef SFM_ONE_LOCK
437         lock_destroy(&qm->lock);
438         for (r=0; r<SFM_POOLS_NO; r++){
439                 lock_destroy(&qm->pool[r].lock);
440         }
441 #elif defined(SFM_LOCK_PER_BUCKET)
442         for (r=0; r<SF_HASH_SIZE; r++)
443                 lock_destroy(&qm->free_hash[r].lock);
444         for (i=0; i<SFM_POOLS_NO; i++){
445                 for (r=0; r<SF_HASH_POOL_SIZE; r++)
446                         lock_destroy(&qm->pool[i].pool_hash[r].lock);
447         }
448 #endif
449         qm->is_init=0;
450
451 }
452
453
454 /* returns next set bit in bitmap, starts at b
455  * if b is set, returns b
456  * if not found returns BITMAP_BITS */
457 static inline unsigned long _next_set_bit(unsigned long b,
458                                                                                         unsigned long* bitmap)
459 {
460         for (; !((1UL<<b)& *bitmap) && b<BITMAP_BITS; b++);
461         return b;
462 }
463
464 /* returns start of block b and sets *end
465  * (handles also the "rest" block at the end ) */
466 static inline unsigned long _hash_range(unsigned long b, unsigned long* end)
467 {
468         unsigned long s;
469         
470         if ((unlikely(b>=BITMAP_BITS))){
471                 s=BIT_TO_HASH(BITMAP_BITS);
472                 *end=SF_HASH_POOL_SIZE; /* last, possible rest block */
473         }else{
474                 s=BIT_TO_HASH(b);
475                 *end=s+BITMAP_BLOCK_SIZE;
476         }
477         return s;
478 }
479
480
481 #ifdef DBG_F_MALLOC
482 static inline struct sfm_frag* pool_get_frag(struct sfm_block* qm,
483                                                 struct sfm_pool*  pool, int hash, unisgned long size,
484                                                 const char* file, const char* func, unsigned int line)
485 #else
486 static inline struct sfm_frag* pool_get_frag(struct sfm_block* qm,
487                                                                                         struct sfm_pool*  pool,
488                                                                                         int hash, unsigned long size)
489 #endif
490 {
491         int r;
492         int next_block;
493         struct sfm_frag* volatile* f;
494         struct sfm_frag* frag;
495         unsigned long b;
496         unsigned long eob;
497
498         /* special case for r=hash */
499         r=hash;
500         f=&pool->pool_hash[r].first;
501         if (*f==0) 
502                 goto not_found;
503         SFM_POOL_LOCK(pool, r);
504         if (unlikely(*f==0)){
505                 SFM_POOL_UNLOCK(pool, r);
506                 goto not_found;
507         }
508 found:
509         /* detach it from the free list*/
510         frag=frag_pop((struct sfm_frag**)f);
511         frag->u.nxt_free=0; /* mark it as 'taken' */
512         frag->id=pool_id;
513         pool->pool_hash[r].no--;
514         SFM_POOL_UNLOCK(pool, r);
515 #ifdef DBG_F_MALLOC
516         sfm_split_frag(qm, frag, size, file, func, line);
517 #else
518         sfm_split_frag(qm, frag, size);
519 #endif
520         if (&qm->pool[pool_id]==pool)
521                 atomic_inc_long((long*)&pool->hits);
522         return frag;
523         
524 not_found:
525         atomic_inc_long((long*)&pool->pool_hash[r].misses);
526         r++;
527         b=HASH_BIT_POS(r);
528         
529         while(r<SF_HASH_POOL_SIZE){
530                 b=_next_set_bit(b, &pool->bitmap);
531                 next_block=_hash_range(b, &eob);
532                 r=(r<next_block)?next_block:r;
533                 for (; r<eob; r++){
534                         f=&pool->pool_hash[r].first;
535                         if (*f){
536                                 SFM_POOL_LOCK(pool, r);
537                                 if (unlikely(*f==0)){
538                                         /* not found */
539                                         SFM_POOL_UNLOCK(pool, r);
540                                 }else
541                                         goto found;
542                         }
543                         atomic_inc_long((long*)&pool->pool_hash[r].misses);
544                 }
545                 b++;
546         }
547 #if 0 /* EXPENSIVE BUG CHECK */
548         for (r=hash; r<SF_HASH_POOL_SIZE; r++){
549                 f=&pool->pool_hash[r].first;
550                 if (*f){
551                                 SFM_POOL_LOCK(pool, r);
552                                 if (unlikely(*f==0)){
553                                         /* not found */
554                                         SFM_POOL_UNLOCK(pool, r);
555                                 }else{
556                                         b=_next_set_bit(HASH_BIT_POS(r), &pool->bitmap);
557                                         next_block=_hash_range(b, &eob);
558                                         BUG("pool_get_frag: found fragm. %d at %d (bit %ld range %ld-%ld), next set bit=%ld"
559                                                         " bitmap %ld (%p)\n", hash, r, HASH_BIT_POS(r),
560                                                         next_block, eob, b, pool->bitmap, &pool->bitmap);
561                                         goto found;
562                                 }
563                 }
564         }
565 #endif
566         atomic_inc_long((long*)&pool->missed);
567         return 0;
568 }
569
570
571
572 #ifdef DBG_F_MALLOC
573 static inline struct sfm_frag* main_get_frag(struct sfm_block* qm, int hash,
574                                                 unsigned long size,
575                                                 const char* file, const char* func, unsigned int line)
576 #else
577 static inline struct sfm_frag* main_get_frag(struct sfm_block* qm, int hash,
578                                                                                                 unsigned long size)
579 #endif
580 {
581         int r;
582         int next_block;
583         struct sfm_frag* volatile* f;
584         struct sfm_frag* frag;
585         unsigned long b;
586         unsigned long eob;
587
588         r=hash;
589         b=HASH_BIT_POS(r);
590         while(r<=SF_MALLOC_OPTIMIZE/SF_ROUNDTO){
591                         b=_next_set_bit(b, &qm->bitmap);
592                         next_block=_hash_range(b, &eob);
593                         r=(r<next_block)?next_block:r;
594                         for (; r<eob; r++){
595                                 f=&qm->free_hash[r].first;
596                                 if (*f){
597                                         SFM_MAIN_HASH_LOCK(qm, r);
598                                         if (unlikely(*f==0)){
599                                                 /* not found, somebody stole it */
600                                                 SFM_MAIN_HASH_UNLOCK(qm, r);
601                                                 continue; 
602                                         }
603                                         /* detach it from the free list*/
604                                         frag=frag_pop((struct sfm_frag**)f);
605                                         frag->u.nxt_free=0; /* mark it as 'taken' */
606                                         qm->free_hash[r].no--;
607                                         SFM_MAIN_HASH_UNLOCK(qm, r);
608                                         frag->id=pool_id;
609 #ifdef DBG_F_MALLOC
610                                         sfm_split_frag(qm, frag, size, file, func, line);
611 #else
612                                         sfm_split_frag(qm, frag, size);
613 #endif
614                                         return frag;
615                                 }
616                         }
617                         b++;
618         }
619         /* big fragments */
620         SFM_BIG_GET_AND_SPLIT_LOCK(qm);
621         for (; r<= sfm_max_hash ; r++){
622                 f=&qm->free_hash[r].first;
623                 if (*f){
624                         SFM_MAIN_HASH_LOCK(qm, r);
625                         if (unlikely((*f)==0)){
626                                 /* not found */
627                                 SFM_MAIN_HASH_UNLOCK(qm, r);
628                                 continue; 
629                         }
630                         for(;(*f); f=&((*f)->u.nxt_free))
631                                 if ((*f)->size>=size){
632                                         /* found, detach it from the free list*/
633                                         frag=*f;
634                                         *f=frag->u.nxt_free;
635                                         frag->u.nxt_free=0; /* mark it as 'taken' */
636                                         qm->free_hash[r].no--;
637                                         SFM_MAIN_HASH_UNLOCK(qm, r);
638                                         frag->id=pool_id;
639 #ifdef DBG_F_MALLOC
640                                         sfm_split_frag(qm, frag, size, file, func, line);
641 #else
642                                         sfm_split_frag(qm, frag, size);
643 #endif
644                                         SFM_BIG_GET_AND_SPLIT_UNLOCK(qm);
645                                         return frag;
646                                 };
647                         SFM_MAIN_HASH_UNLOCK(qm, r);
648                         /* try in a bigger bucket */
649                 }
650         }
651         SFM_BIG_GET_AND_SPLIT_UNLOCK(qm);
652         return 0;
653 }
654
655
656
657 #ifdef DBG_F_MALLOC
658 void* sfm_malloc(struct sfm_block* qm, unsigned long size,
659                                         const char* file, const char* func, unsigned int line)
660 #else
661 void* sfm_malloc(struct sfm_block* qm, unsigned long size)
662 #endif
663 {
664         struct sfm_frag* frag;
665         int hash;
666         unsigned int i;
667         
668 #ifdef DBG_F_MALLOC
669         MDBG("sfm_malloc(%p, %lu) called from %s: %s(%d)\n", qm, size, file, func,
670                         line);
671 #endif
672         /*size must be a multiple of 8*/
673         size=ROUNDUP(size);
674 /*      if (size>(qm->size-qm->real_used)) return 0; */
675
676         /* check if our pool id is set */
677         sfm_fix_pool_id(qm);
678         
679         /*search for a suitable free frag*/
680         if (likely(size<=SF_POOL_MAX_SIZE)){
681                 hash=GET_SMALL_HASH(size);
682                 /* try first in our pool */
683 #ifdef DBG_F_MALLOC
684                 if (likely((frag=pool_get_frag(qm, &qm->pool[pool_id], hash, size,
685                                                                                 file, func, line))!=0))
686                         goto found;
687                 /* try in the "main" free hash, go through all the hash */
688                 if (likely((frag=main_get_frag(qm, hash, size, file, func, line))!=0))
689                         goto found;
690                 /* really low mem , try in other pools */
691                 for (i=(pool_id+1); i< (pool_id+SFM_POOLS_NO); i++){
692                         if ((frag=pool_get_frag(qm, &qm->pool[i%SFM_POOLS_NO], hash, size,
693                                                                                 file, func, line))!=0)
694                                 goto found;
695                 }
696 #else
697                 if (likely((frag=pool_get_frag(qm, &qm->pool[pool_id], hash, size))
698                                         !=0 ))
699                         goto found;
700                 /* try in the "main" free hash, go through all the hash */
701                 if (likely((frag=main_get_frag(qm, hash, size))!=0))
702                         goto found;
703                 /* really low mem , try in other pools */
704                 for (i=(pool_id+1); i< (pool_id+SFM_POOLS_NO); i++){
705                         if ((frag=pool_get_frag(qm, &qm->pool[i%SFM_POOLS_NO], hash, size))
706                                         !=0 )
707                                 goto found;
708                 }
709 #endif
710                 /* not found, bad! */
711                 return 0;
712         }else{
713                 hash=GET_BIG_HASH(size);
714 #ifdef DBG_F_MALLOC
715                 if ((frag=main_get_frag(qm, hash, size, file, func, line))==0)
716                         return 0; /* not found, bad! */
717 #else
718                 if ((frag=main_get_frag(qm, hash, size))==0)
719                         return 0; /* not found, bad! */
720 #endif
721         }
722
723 found:
724         /* we found it!*/
725 #ifdef DBG_F_MALLOC
726         frag->file=file;
727         frag->func=func;
728         frag->line=line;
729         frag->check=ST_CHECK_PATTERN;
730         MDBG("sfm_malloc(%p, %lu) returns address %p \n", qm, size,
731                 (char*)frag+sizeof(struct sfm_frag));
732 #endif
733         FRAG_MARK_USED(frag); /* mark it as used */
734         return (char*)frag+sizeof(struct sfm_frag);
735 }
736
737
738
739 #ifdef DBG_F_MALLOC
740 void sfm_free(struct sfm_block* qm, void* p, const char* file,
741                                 const char* func, unsigned int line)
742 #else
743 void sfm_free(struct sfm_block* qm, void* p)
744 #endif
745 {
746         struct sfm_frag* f;
747
748 #ifdef DBG_F_MALLOC
749         MDBG("sfm_free(%p, %p), called from %s: %s(%d)\n", qm, p, file, func,
750                                 line);
751         if (p>(void*)qm->last_frag || p<(void*)qm->first_frag){
752                 LOG(L_CRIT, "BUG: sfm_free: bad pointer %p (out of memory block!) - "
753                                 "aborting\n", p);
754                 abort();
755         }
756 #endif
757         if (unlikely(p==0)) {
758                 LOG(L_WARN, "WARNING: sfm_free: free(0) called\n");
759                 return;
760         }
761         f=(struct sfm_frag*) ((char*)p-sizeof(struct sfm_frag));
762 #ifdef DBG_F_MALLOC
763         MDBG("sfm_free: freeing block alloc'ed from %s: %s(%ld)\n",
764                         f->file, f->func, f->line);
765 #endif
766 #ifdef DBG_F_MALLOC
767         f->file=file;
768         f->func=func;
769         f->line=line;
770 #endif
771         sfm_insert_free(qm, f, 0);
772 }
773
774
775 #ifdef DBG_F_MALLOC
776 void* sfm_realloc(struct sfm_block* qm, void* p, unsigned long size,
777                                         const char* file, const char* func, unsigned int line)
778 #else
779 void* sfm_realloc(struct sfm_block* qm, void* p, unsigned long size)
780 #endif
781 {
782         struct sfm_frag *f;
783         unsigned long orig_size;
784         void *ptr;
785 #ifndef SFM_REALLOC_REMALLOC
786         struct sfm_frag *n;
787         struct sfm_frag **pf;
788         unsigned long diff;
789         unsigned long p_id;
790         int hash;
791         unsigned long n_size;
792         struct sfm_pool * pool;
793 #endif
794         
795 #ifdef DBG_F_MALLOC
796         MDBG("sfm_realloc(%p, %p, %lu) called from %s: %s(%d)\n", qm, p, size,
797                         file, func, line);
798         if ((p)&&(p>(void*)qm->last_frag || p<(void*)qm->first_frag)){
799                 LOG(L_CRIT, "BUG: sfm_free: bad pointer %p (out of memory block!) - "
800                                 "aborting\n", p);
801                 abort();
802         }
803 #endif
804         if (size==0) {
805                 if (p)
806 #ifdef DBG_F_MALLOC
807                         sfm_free(qm, p, file, func, line);
808 #else
809                         sfm_free(qm, p);
810 #endif
811                 return 0;
812         }
813         if (p==0)
814 #ifdef DBG_F_MALLOC
815                 return sfm_malloc(qm, size, file, func, line);
816 #else
817                 return sfm_malloc(qm, size);
818 #endif
819         f=(struct sfm_frag*) ((char*)p-sizeof(struct sfm_frag));
820 #ifdef DBG_F_MALLOC
821         MDBG("sfm_realloc: realloc'ing frag %p alloc'ed from %s: %s(%ld)\n",
822                         f, f->file, f->func, f->line);
823 #endif
824         size=ROUNDUP(size);
825         orig_size=f->size;
826         if (f->size > size){
827                 /* shrink */
828 #ifdef DBG_F_MALLOC
829                 MDBG("sfm_realloc: shrinking from %lu to %lu\n", f->size, size);
830                 sfm_split_frag(qm, f, size, file, "frag. from sfm_realloc", line);
831 #else
832                 sfm_split_frag(qm, f, size);
833 #endif
834         }else if (f->size<size){
835                 /* grow */
836 #ifdef DBG_F_MALLOC
837                 MDBG("sfm_realloc: growing from %lu to %lu\n", f->size, size);
838 #endif
839 #ifndef SFM_REALLOC_REMALLOC
840                 diff=size-f->size;
841                 n=FRAG_NEXT(f);
842                 if (((char*)n < (char*)qm->last_frag) && 
843                                 (n->u.nxt_free)&&((n->size+FRAG_OVERHEAD)>=diff)){
844                         /* join  */
845                         /* detach n from the free list */
846 try_again:
847                         p_id=n->id;
848                         n_size=n->size;
849                         if ((unlikely(p_id >=SFM_POOLS_NO))){
850                                 hash=GET_HASH(n_size);
851                                 SFM_MAIN_HASH_LOCK(qm, hash);
852                                 if (unlikely((n->u.nxt_free==0) ||
853                                                         ((n->size+FRAG_OVERHEAD)<diff))){ 
854                                         SFM_MAIN_HASH_UNLOCK(qm, hash);
855                                         goto not_found;
856                                 }
857                                 if (unlikely((n->id!=p_id) || (n->size!=n_size))){
858                                         /* fragment still free, but changed, either 
859                                          * moved to another pool or has a diff. size */
860                                         SFM_MAIN_HASH_UNLOCK(qm, hash);
861                                         goto try_again;
862                                 }
863                                 pf=&(qm->free_hash[hash].first);
864                                 /* find it */
865                                 for(;(*pf)&&(*pf!=n); pf=&((*pf)->u.nxt_free));/*FIXME slow */
866                                 if (*pf==0){
867                                         SFM_MAIN_HASH_UNLOCK(qm, hash);
868                                         /* not found, bad! */
869                                         LOG(L_WARN, "WARNING: sfm_realloc: could not find %p in "
870                                                             "free " "list (hash=%d)\n", n, hash);
871                                         /* somebody is in the process of changing it ? */
872                                         goto not_found;
873                                 }
874                                 /* detach */
875                                 *pf=n->u.nxt_free;
876                                 n->u.nxt_free=0; /* mark it immediately as detached */
877                                 qm->free_hash[hash].no--;
878                                 SFM_MAIN_HASH_UNLOCK(qm, hash);
879                                 /* join */
880                                 f->size+=n->size+FRAG_OVERHEAD;
881                                 /* split it if necessary */
882                                 if (f->size > size){
883                         #ifdef DBG_F_MALLOC
884                                         sfm_split_frag(qm, f, size, file, "fragm. from "
885                                                                         "sfm_realloc", line);
886                         #else
887                                         sfm_split_frag(qm, f, size);
888                         #endif
889                                 }
890                         }else{ /* p_id < SFM_POOLS_NO (=> in a pool )*/
891                                 hash=GET_SMALL_HASH(n_size);
892                                 pool=&qm->pool[p_id];
893                                 SFM_POOL_LOCK(pool, hash);
894                                 if (unlikely((n->u.nxt_free==0) ||
895                                                         ((n->size+FRAG_OVERHEAD)<diff))){
896                                         SFM_POOL_UNLOCK(pool, hash);
897                                         goto not_found;
898                                 }
899                                 if (unlikely((n->id!=p_id) || (n->size!=n_size))){
900                                         /* fragment still free, but changed, either 
901                                          * moved to another pool or has a diff. size */
902                                         SFM_POOL_UNLOCK(pool, hash);
903                                         goto try_again;
904                                 }
905                                 pf=&(pool->pool_hash[hash].first);
906                                 /* find it */
907                                 for(;(*pf)&&(*pf!=n); pf=&((*pf)->u.nxt_free));/*FIXME slow */
908                                 if (*pf==0){
909                                         SFM_POOL_UNLOCK(pool, hash);
910                                         /* not found, bad! */
911                                         LOG(L_WARN, "WARNING: sfm_realloc: could not find %p in "
912                                                             "free " "list (hash=%d)\n", n, hash);
913                                         /* somebody is in the process of changing it ? */
914                                         goto not_found;
915                                 }
916                                 /* detach */
917                                 *pf=n->u.nxt_free;
918                                 n->u.nxt_free=0; /* mark it immediately as detached */
919                                 pool->pool_hash[hash].no--;
920                                 SFM_POOL_UNLOCK(pool, hash);
921                                 /* join */
922                                 f->size+=n->size+FRAG_OVERHEAD;
923                                 /* split it if necessary */
924                                 if (f->size > size){
925                         #ifdef DBG_F_MALLOC
926                                         sfm_split_frag(qm, f, size, file, "fragm. from "
927                                                                         "sfm_realloc", line);
928                         #else
929                                         sfm_split_frag(qm, f, size);
930                         #endif
931                                 }
932                         }
933                 }else{
934 not_found:
935                         /* could not join => realloc */
936 #else/* SFM_REALLOC_REMALLOC */ 
937                 {
938 #endif /* SFM_REALLOC_REMALLOC */
939         #ifdef DBG_F_MALLOC
940                         ptr=sfm_malloc(qm, size, file, func, line);
941         #else
942                         ptr=sfm_malloc(qm, size);
943         #endif
944                         if (ptr){
945                                 /* copy, need by libssl */
946                                 memcpy(ptr, p, orig_size);
947         #ifdef DBG_F_MALLOC
948                                 sfm_free(qm, p, file, func, line);
949         #else
950                                 sfm_free(qm, p);
951         #endif
952                         }
953                         p=ptr;
954                 }
955         }else{
956                 /* do nothing */
957 #ifdef DBG_F_MALLOC
958                 MDBG("sfm_realloc: doing nothing, same size: %lu - %lu\n", 
959                                 f->size, size);
960 #endif
961         }
962 #ifdef DBG_F_MALLOC
963         MDBG("sfm_realloc: returning %p\n", p);
964 #endif
965         return p;
966 }
967
968
969
970 void sfm_status(struct sfm_block* qm)
971 {
972         struct sfm_frag* f;
973         int i,j;
974         int h;
975         int unused;
976         unsigned long size;
977         int k;
978         int memlog;
979
980         memlog=cfg_get(core, core_cfg, memlog);
981         LOG(memlog, "sfm_status (%p):\n", qm);
982         if (!qm) return;
983
984         LOG(memlog, " heap size= %ld\n", qm->size);
985         LOG(memlog, "dumping free list:\n");
986         for(h=0,i=0,size=0;h<=sfm_max_hash;h++){
987                 SFM_MAIN_HASH_LOCK(qm, h);
988                 unused=0;
989                 for (f=qm->free_hash[h].first,j=0; f;
990                                 size+=f->size,f=f->u.nxt_free,i++,j++){
991                         if (!FRAG_WAS_USED(f)){
992                                 unused++;
993 #ifdef DBG_F_MALLOC
994                                 LOG(memlog, "unused fragm.: hash = %3d, fragment %p,"
995                                                         " address %p size %lu, created from %s: %s(%ld)\n",
996                                                     h, f, (char*)f+sizeof(struct sfm_frag), f->size,
997                                                         f->file, f->func, f->line);
998 #endif
999                         };
1000                 }
1001                 if (j) LOG(memlog, "hash = %3d fragments no.: %5d, unused: %5d\n\t\t"
1002                                                         " bucket size: %9lu - %9lu (first %9lu)\n",
1003                                                         h, j, unused, UN_HASH(h),
1004                                                 ((h<=SF_MALLOC_OPTIMIZE/SF_ROUNDTO)?1:2)* UN_HASH(h),
1005                                                         qm->free_hash[h].first->size
1006                                 );
1007                 if (j!=qm->free_hash[h].no){
1008                         LOG(L_CRIT, "BUG: sfm_status: different free frag. count: %d!=%ld"
1009                                         " for hash %3d\n", j, qm->free_hash[h].no, h);
1010                 }
1011                 SFM_MAIN_HASH_UNLOCK(qm, h);
1012         }
1013         for (k=0; k<SFM_POOLS_NO; k++){
1014                 for(h=0;h<SF_HASH_POOL_SIZE;h++){
1015                         SFM_POOL_LOCK(&qm->pool[k], h);
1016                         unused=0;
1017                         for (f=qm->pool[k].pool_hash[h].first,j=0; f;
1018                                         size+=f->size,f=f->u.nxt_free,i++,j++){
1019                                 if (!FRAG_WAS_USED(f)){
1020                                         unused++;
1021 #ifdef DBG_F_MALLOC
1022                                         LOG(memlog, "[%2d] unused fragm.: hash = %3d, fragment %p,"
1023                                                                 " address %p size %lu, created from %s: "
1024                                                                 "%s(%ld)\n", k
1025                                                                 h, f, (char*)f+sizeof(struct sfm_frag),
1026                                                                 f->size, f->file, f->func, f->line);
1027 #endif
1028                                 };
1029                         }
1030                         if (j) LOG(memlog, "[%2d] hash = %3d fragments no.: %5d, unused: "
1031                                                                 "%5d\n\t\t bucket size: %9lu - %9lu "
1032                                                                 "(first %9lu)\n",
1033                                                                 k, h, j, unused, UN_HASH(h),
1034                                                         ((h<=SF_MALLOC_OPTIMIZE/SF_ROUNDTO)?1:2) *
1035                                                                 UN_HASH(h),
1036                                                                 qm->pool[k].pool_hash[h].first->size
1037                                         );
1038                         if (j!=qm->pool[k].pool_hash[h].no){
1039                                 LOG(L_CRIT, "BUG: sfm_status: [%d] different free frag."
1040                                                         " count: %d!=%ld for hash %3d\n",
1041                                                         k, j, qm->pool[k].pool_hash[h].no, h);
1042                         }
1043                         SFM_POOL_UNLOCK(&qm->pool[k], h);
1044                 }
1045         }
1046         LOG(memlog, "TOTAL: %6d free fragments = %6lu free bytes\n", i, size);
1047         LOG(memlog, "-----------------------------\n");
1048 }
1049
1050
1051
1052 /* fills a malloc info structure with info about the block
1053  * if a parameter is not supported, it will be filled with 0 */
1054 void sfm_info(struct sfm_block* qm, struct mem_info* info)
1055 {
1056         int r, k;
1057         unsigned long total_frags;
1058         struct sfm_frag* f;
1059         
1060         memset(info,0, sizeof(*info));
1061         total_frags=0;
1062         info->total_size=qm->size;
1063         info->min_frag=SF_MIN_FRAG_SIZE;
1064         /* we'll have to compute it all */
1065         for (r=0; r<=SF_MALLOC_OPTIMIZE/SF_ROUNDTO; r++){
1066                 info->free+=qm->free_hash[r].no*UN_HASH(r);
1067                 total_frags+=qm->free_hash[r].no;
1068         }
1069         for(;r<=sfm_max_hash; r++){
1070                 total_frags+=qm->free_hash[r].no;
1071                 SFM_MAIN_HASH_LOCK(qm, r);
1072                 for(f=qm->free_hash[r].first;f;f=f->u.nxt_free){
1073                         info->free+=f->size;
1074                 }
1075                 SFM_MAIN_HASH_UNLOCK(qm, r);
1076         }
1077         for (k=0; k<SFM_POOLS_NO; k++){
1078                 for (r=0; r<SF_HASH_POOL_SIZE; r++){
1079                         info->free+=qm->pool[k].pool_hash[r].no*UN_HASH(r);
1080                         total_frags+=qm->pool[k].pool_hash[r].no;
1081                 }
1082         }
1083         info->real_used=info->total_size-info->free;
1084         info->used=info->real_used-total_frags*FRAG_OVERHEAD-INIT_OVERHEAD
1085                                 -FRAG_OVERHEAD;
1086         info->max_used=0; /* we don't really know */
1087         info->total_frags=total_frags;
1088 }
1089
1090
1091
1092 /* returns how much free memory is available
1093  * on error (not compiled with bookkeeping code) returns (unsigned long)(-1) */
1094 unsigned long sfm_available(struct sfm_block* qm)
1095 {
1096         /* we don't know how much free memory we have and it's to expensive
1097          * to compute it */
1098         return ((unsigned long)-1);
1099 }
1100
1101 #endif