2 * shared memory, multi-process safe, pool based version of f_malloc
4 * This file is part of Kamailio, a free SIP server.
6 * Copyright (C) 2007 iptelorg GmbH
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
26 #include "sf_malloc.h"
27 #include "../dprint.h"
28 #include "../globals.h"
30 #include "../cfg/cfg.h" /* memlog */
32 #define MAX_POOL_FRAGS 10000 /* max fragments per pool hash bucket */
33 #define MIN_POOL_FRAGS 10 /* min fragments per pool hash bucket */
37 #define FRAG_NEXT(f) \
38 ((struct sfm_frag*)((char*)(f)+sizeof(struct sfm_frag)+(f)->size ))
41 /* SF_ROUNDTO= 2^k so the following works */
42 #define ROUNDTO_MASK (~((unsigned long)SF_ROUNDTO-1))
43 #define ROUNDUP(s) (((s)+(SF_ROUNDTO-1))&ROUNDTO_MASK)
44 #define ROUNDDOWN(s) ((s)&ROUNDTO_MASK)
46 #define FRAG_OVERHEAD (sizeof(struct sfm_frag))
47 #define INIT_OVERHEAD \
48 (ROUNDUP(sizeof(struct sfm_block))+sizeof(struct sfm_frag))
52 /* finds hash if s <=SF_MALLOC_OPTIMIZE */
53 #define GET_SMALL_HASH(s) (unsigned long)(s)/SF_ROUNDTO
54 /* finds hash if s > SF_MALLOC_OPTIMIZE */
55 #define GET_BIG_HASH(s) \
56 (SF_MALLOC_OPTIMIZE/SF_ROUNDTO+big_hash_idx((s))-SF_MALLOC_OPTIMIZE_FACTOR+1)
58 /* finds the hash value for s, s=SF_ROUNDTO multiple*/
59 #define GET_HASH(s) ( ((unsigned long)(s)<=SF_MALLOC_OPTIMIZE)?\
60 GET_SMALL_HASH(s): GET_BIG_HASH(s) )
63 #define UN_HASH_SMALL(h) ((unsigned long)(h)*SF_ROUNDTO)
64 #define UN_HASH_BIG(h) (1UL<<((unsigned long)(h)-SF_MALLOC_OPTIMIZE/SF_ROUNDTO+\
65 SF_MALLOC_OPTIMIZE_FACTOR-1))
67 #define UN_HASH(h) ( ((unsigned long)(h)<=(SF_MALLOC_OPTIMIZE/SF_ROUNDTO))?\
68 UN_HASH_SMALL(h): UN_HASH_BIG(h) )
70 #define BITMAP_BITS (sizeof(((struct sfm_block*)0)->bitmap)*8)
71 #define BITMAP_BLOCK_SIZE ((SF_MALLOC_OPTIMIZE/SF_ROUNDTO)/ BITMAP_BITS)
72 /* only for "small" hashes (up to HASH(SF_MALLOC_OPTIMIZE) */
73 #define HASH_BIT_POS(h) (((unsigned long)(h))/BITMAP_BLOCK_SIZE)
74 #define HASH_TO_BITMAP(h) (1UL<<HASH_BIT_POS(h))
75 #define BIT_TO_HASH(b) ((b)*BITMAP_BLOCK_SIZE)
79 /* mark/test used/unused frags */
80 #define FRAG_MARK_USED(f)
81 #define FRAG_CLEAR_USED(f)
82 #define FRAG_WAS_USED(f) (1)
84 /* other frag related defines:
88 #define MEM_FRAG_AVOIDANCE
91 #define SFM_REALLOC_REMALLOC
93 /* computes hash number for big buckets*/
94 inline static unsigned long big_hash_idx(unsigned long s)
97 /* s is rounded => s = k*2^n (SF_ROUNDTO=2^n)
98 * index= i such that 2^i > s >= 2^(i-1)
100 * => index = number of the first non null bit in s*/
101 idx=sizeof(long)*8-1;
102 for (; !(s&(1UL<<(sizeof(long)*8-1))) ; s<<=1, idx--);
108 #define ST_CHECK_PATTERN 0xf0f0f0f0
109 #define END_CHECK_PATTERN1 0xc0c0c0c0
110 #define END_CHECK_PATTERN2 0xabcdefed
116 #define SFM_MAIN_HASH_LOCK(qm, hash) lock_get(&(qm)->lock)
117 #define SFM_MAIN_HASH_UNLOCK(qm, hash) lock_release(&(qm)->lock)
118 #define SFM_POOL_LOCK(p, hash) lock_get(&(p)->lock)
119 #define SFM_POOL_UNLOCK(p, hash) lock_release(&(p)->lock)
121 #warn "degraded performance, only one lock"
123 #elif defined SFM_LOCK_PER_BUCKET
125 #define SFM_MAIN_HASH_LOCK(qm, hash) \
126 lock_get(&(qm)->free_hash[(hash)].lock)
127 #define SFM_MAIN_HASH_UNLOCK(qm, hash) \
128 lock_release(&(qm)->free_hash[(hash)].lock)
129 #define SFM_POOL_LOCK(p, hash) lock_get(&(p)->pool_hash[(hash)].lock)
130 #define SFM_POOL_UNLOCK(p, hash) lock_release(&(p)->pool_hash[(hash)].lock)
132 #error no locks defined
133 #endif /* SFM_ONE_LOCK/SFM_LOCK_PER_BUCKET */
135 #define SFM_BIG_GET_AND_SPLIT_LOCK(qm) lock_get(&(qm)->get_and_split)
136 #define SFM_BIG_GET_AND_SPLIT_UNLOCK(qm) lock_release(&(qm)->get_and_split)
138 static unsigned long sfm_max_hash=0; /* maximum hash value (no point in
139 searching further) */
140 static unsigned long pool_id=(unsigned long)-1;
143 /* call for each child */
146 pool_id=(unsigned long)-1;
151 #define sfm_fix_pool_id(qm) \
153 if (unlikely(pool_id>=SFM_POOLS_NO)) \
154 pool_id=((unsigned)atomic_add(&(qm)->crt_id, 1))%SFM_POOLS_NO; \
159 static inline void frag_push(struct sfm_frag** head, struct sfm_frag* frag)
161 frag->u.nxt_free=*head;
166 static inline struct sfm_frag* frag_pop(struct sfm_frag** head)
168 struct sfm_frag* frag;
170 *head=frag->u.nxt_free;
174 static inline void sfm_pool_insert (struct sfm_pool* pool, int hash,
175 struct sfm_frag* frag)
177 unsigned long hash_bit;
179 SFM_POOL_LOCK(pool, hash);
180 frag_push(&pool->pool_hash[hash].first, frag);
181 pool->pool_hash[hash].no++;
182 /* set it only if not already set (avoids an expensive
183 * cache trashing atomic write op) */
184 hash_bit=HASH_TO_BITMAP(hash);
185 if (!(atomic_get_long((long*)&pool->bitmap) & hash_bit))
186 atomic_or_long((long*)&pool->bitmap, hash_bit);
187 SFM_POOL_UNLOCK(pool, hash);
192 /* returns 1 if it's ok to add a fragm. to pool p_id @ hash, 0 otherwise */
193 static inline int sfm_check_pool(struct sfm_block* qm, unsigned long p_id,
196 /* TODO: come up with something better
197 * if fragment is some split/rest from an allocation, that is
198 * >= requested size, accept it, else
199 * look at misses and current fragments and decide based on them */
200 return (p_id<SFM_POOLS_NO) && (split ||
201 ( (qm->pool[p_id].pool_hash[hash].no < MIN_POOL_FRAGS) ||
202 ((qm->pool[p_id].pool_hash[hash].misses >
203 qm->pool[p_id].pool_hash[hash].no) &&
204 (qm->pool[p_id].pool_hash[hash].no<MAX_POOL_FRAGS) ) ) );
208 /* choose on which pool to add a free'd packet
209 * return - pool idx or -1 if it should be added to main*/
210 static inline unsigned long sfm_choose_pool(struct sfm_block* qm,
211 struct sfm_frag* frag,
214 /* check original pool first */
215 if (sfm_check_pool(qm, frag->id, hash, split))
218 /* check if our pool is properly set */
220 /* check if my pool needs some frags */
221 if ((pool_id!=frag->id) && (sfm_check_pool(qm, pool_id, hash, 0))){
226 /* else add it back to main */
227 frag->id=(unsigned long)(-1);
232 static inline void sfm_insert_free(struct sfm_block* qm, struct sfm_frag* frag,
238 unsigned long hash_bit;
240 if (likely(frag->size<=SF_POOL_MAX_SIZE)){
241 hash=GET_SMALL_HASH(frag->size);
242 if (unlikely((p_id=sfm_choose_pool(qm, frag, hash, split))==
244 /* add it back to the "main" hash */
245 SFM_MAIN_HASH_LOCK(qm, hash);
246 frag->id=(unsigned long)(-1); /* main hash marker */
248 frag_push(&(qm->free_hash[hash].first), frag);
249 qm->free_hash[hash].no++;
250 /* set it only if not already set (avoids an expensive
251 * cache trashing atomic write op) */
252 hash_bit=HASH_TO_BITMAP(hash);
253 if (!(atomic_get_long((long*)&qm->bitmap) & hash_bit))
254 atomic_or_long((long*)&qm->bitmap, hash_bit);
255 SFM_MAIN_HASH_UNLOCK(qm, hash);
257 /* add it to one of the pools pool */
258 sfm_pool_insert(&qm->pool[p_id], hash, frag);
261 hash=GET_BIG_HASH(frag->size);
262 SFM_MAIN_HASH_LOCK(qm, hash);
263 f=&(qm->free_hash[hash].first);
264 for(; *f; f=&((*f)->u.nxt_free))
265 if (frag->size <= (*f)->size) break;
266 frag->id=(unsigned long)(-1); /* main hash marker */
270 qm->free_hash[hash].no++;
271 /* inc. big hash free size ? */
272 SFM_MAIN_HASH_UNLOCK(qm, hash);
279 /* size should be already rounded-up */
282 void sfm_split_frag(struct sfm_block* qm, struct sfm_frag* frag,
284 const char* file, const char* func, unsigned int line)
286 void sfm_split_frag(struct sfm_block* qm, struct sfm_frag* frag,
294 rest=frag->size-size;
295 #ifdef MEM_FRAG_AVOIDANCE
296 if ((rest> (FRAG_OVERHEAD+SF_MALLOC_OPTIMIZE))||
297 (rest>=(FRAG_OVERHEAD+size))){ /* the residue fragm. is big enough*/
300 if (rest>(FRAG_OVERHEAD+SF_MIN_FRAG_SIZE)){
301 bigger_rest=rest>=(size+FRAG_OVERHEAD);
304 /*split the fragment*/
306 n->size=rest-FRAG_OVERHEAD;
308 FRAG_CLEAR_USED(n); /* never used */
310 /* frag created by malloc, mark it*/
312 n->func="frag. from sfm_malloc";
314 n->check=ST_CHECK_PATTERN;
316 /* reinsert n in free list*/
317 sfm_insert_free(qm, n, bigger_rest);
319 /* we cannot split this fragment any more => alloc all of it*/
325 /* init malloc and return a sfm_block*/
326 struct sfm_block* sfm_malloc_init(char* address, unsigned long size, int type)
330 struct sfm_block* qm;
331 unsigned long init_overhead;
333 #ifdef SFM_LOCK_PER_BUCKET
337 /* make address and size multiple of 8*/
338 start=(char*)ROUNDUP((unsigned long) address);
339 DBG("sfm_malloc_init: SF_OPTIMIZE=%lu, /SF_ROUNDTO=%lu\n",
340 SF_MALLOC_OPTIMIZE, SF_MALLOC_OPTIMIZE/SF_ROUNDTO);
341 DBG("sfm_malloc_init: SF_HASH_SIZE=%lu, sfm_block size=%lu\n",
342 SF_HASH_SIZE, (long)sizeof(struct sfm_block));
343 DBG("sfm_malloc_init(%p, %lu), start=%p\n", address, size, start);
345 if (size<start-address) return 0;
346 size-=(start-address);
347 if (size <(SF_MIN_FRAG_SIZE+FRAG_OVERHEAD)) return 0;
348 size=ROUNDDOWN(size);
350 init_overhead=INIT_OVERHEAD;
353 if (size < init_overhead)
355 /* not enough mem to create our control structures !!!*/
359 qm=(struct sfm_block*)start;
360 memset(qm, 0, sizeof(struct sfm_block));
365 qm->first_frag=(struct sfm_frag*)(start+ROUNDUP(sizeof(struct sfm_block)));
366 qm->last_frag=(struct sfm_frag*)(end-sizeof(struct sfm_frag));
367 /* init initial fragment*/
368 qm->first_frag->size=size;
369 qm->first_frag->id=(unsigned long)-1; /* not in a pool */
370 qm->last_frag->size=0;
373 qm->first_frag->check=ST_CHECK_PATTERN;
374 qm->last_frag->check=END_CHECK_PATTERN1;
377 /* link initial fragment into the free list*/
379 sfm_insert_free(qm, qm->first_frag, 0);
380 sfm_max_hash=GET_HASH(size);
383 if (lock_init(&qm->get_and_split)==0)
386 if (lock_init(&qm->lock)==0){
387 lock_destroy(&qm->get_and_split);
390 for (r=0; r<SFM_POOLS_NO; r++){
391 if (lock_init(&qm->pool[r].lock)==0){
392 for (;r>0; r--) lock_destroy(&qm->pool[r-1].lock);
393 lock_destroy(&qm->lock);
394 lock_destroy(&qm->get_and_split);
398 #elif defined(SFM_LOCK_PER_BUCKET)
399 for (r=0; r<SF_HASH_SIZE; r++)
400 if (lock_init(&qm->free_hash[r].lock)==0){
401 for(;r>0; r--) lock_destroy(&qm->free_hash[r-1].lock);
402 lock_destroy(&qm->get_and_split);
405 for (i=0; i<SFM_POOLS_NO; i++){
406 for (r=0; r<SF_HASH_POOL_SIZE; r++)
407 if (lock_init(&qm->pool[i].pool_hash[r].lock)==0){
408 for(;r>0; r--) lock_destroy(&qm->pool[i].poo_hash[r].lock);
410 for (r=0; r<SF_HASH_POOL_SIZE; r++)
411 lock_destroy(&qm->pool[i].pool_hash[r].lock);
413 for (r=0; r<SF_HASH_SIZE; r++)
414 lock_destroy(&qm->free_hash[r].lock);
415 lock_destroy(&qm->get_and_split);
429 void sfm_malloc_destroy(struct sfm_block* qm)
432 /* destroy all the locks */
433 if (!qm || !qm->is_init)
434 return; /* nothing to do */
435 lock_destroy(&qm->get_and_split);
437 lock_destroy(&qm->lock);
438 for (r=0; r<SFM_POOLS_NO; r++){
439 lock_destroy(&qm->pool[r].lock);
441 #elif defined(SFM_LOCK_PER_BUCKET)
442 for (r=0; r<SF_HASH_SIZE; r++)
443 lock_destroy(&qm->free_hash[r].lock);
444 for (i=0; i<SFM_POOLS_NO; i++){
445 for (r=0; r<SF_HASH_POOL_SIZE; r++)
446 lock_destroy(&qm->pool[i].pool_hash[r].lock);
454 /* returns next set bit in bitmap, starts at b
455 * if b is set, returns b
456 * if not found returns BITMAP_BITS */
457 static inline unsigned long _next_set_bit(unsigned long b,
458 unsigned long* bitmap)
460 for (; !((1UL<<b)& *bitmap) && b<BITMAP_BITS; b++);
464 /* returns start of block b and sets *end
465 * (handles also the "rest" block at the end ) */
466 static inline unsigned long _hash_range(unsigned long b, unsigned long* end)
470 if ((unlikely(b>=BITMAP_BITS))){
471 s=BIT_TO_HASH(BITMAP_BITS);
472 *end=SF_HASH_POOL_SIZE; /* last, possible rest block */
475 *end=s+BITMAP_BLOCK_SIZE;
482 static inline struct sfm_frag* pool_get_frag(struct sfm_block* qm,
483 struct sfm_pool* pool, int hash, unisgned long size,
484 const char* file, const char* func, unsigned int line)
486 static inline struct sfm_frag* pool_get_frag(struct sfm_block* qm,
487 struct sfm_pool* pool,
488 int hash, unsigned long size)
493 struct sfm_frag* volatile* f;
494 struct sfm_frag* frag;
498 /* special case for r=hash */
500 f=&pool->pool_hash[r].first;
503 SFM_POOL_LOCK(pool, r);
504 if (unlikely(*f==0)){
505 SFM_POOL_UNLOCK(pool, r);
509 /* detach it from the free list*/
510 frag=frag_pop((struct sfm_frag**)f);
511 frag->u.nxt_free=0; /* mark it as 'taken' */
513 pool->pool_hash[r].no--;
514 SFM_POOL_UNLOCK(pool, r);
516 sfm_split_frag(qm, frag, size, file, func, line);
518 sfm_split_frag(qm, frag, size);
520 if (&qm->pool[pool_id]==pool)
521 atomic_inc_long((long*)&pool->hits);
525 atomic_inc_long((long*)&pool->pool_hash[r].misses);
529 while(r<SF_HASH_POOL_SIZE){
530 b=_next_set_bit(b, &pool->bitmap);
531 next_block=_hash_range(b, &eob);
532 r=(r<next_block)?next_block:r;
534 f=&pool->pool_hash[r].first;
536 SFM_POOL_LOCK(pool, r);
537 if (unlikely(*f==0)){
539 SFM_POOL_UNLOCK(pool, r);
543 atomic_inc_long((long*)&pool->pool_hash[r].misses);
547 #if 0 /* EXPENSIVE BUG CHECK */
548 for (r=hash; r<SF_HASH_POOL_SIZE; r++){
549 f=&pool->pool_hash[r].first;
551 SFM_POOL_LOCK(pool, r);
552 if (unlikely(*f==0)){
554 SFM_POOL_UNLOCK(pool, r);
556 b=_next_set_bit(HASH_BIT_POS(r), &pool->bitmap);
557 next_block=_hash_range(b, &eob);
558 BUG("pool_get_frag: found fragm. %d at %d (bit %ld range %ld-%ld), next set bit=%ld"
559 " bitmap %ld (%p)\n", hash, r, HASH_BIT_POS(r),
560 next_block, eob, b, pool->bitmap, &pool->bitmap);
566 atomic_inc_long((long*)&pool->missed);
573 static inline struct sfm_frag* main_get_frag(struct sfm_block* qm, int hash,
575 const char* file, const char* func, unsigned int line)
577 static inline struct sfm_frag* main_get_frag(struct sfm_block* qm, int hash,
583 struct sfm_frag* volatile* f;
584 struct sfm_frag* frag;
590 while(r<=SF_MALLOC_OPTIMIZE/SF_ROUNDTO){
591 b=_next_set_bit(b, &qm->bitmap);
592 next_block=_hash_range(b, &eob);
593 r=(r<next_block)?next_block:r;
595 f=&qm->free_hash[r].first;
597 SFM_MAIN_HASH_LOCK(qm, r);
598 if (unlikely(*f==0)){
599 /* not found, somebody stole it */
600 SFM_MAIN_HASH_UNLOCK(qm, r);
603 /* detach it from the free list*/
604 frag=frag_pop((struct sfm_frag**)f);
605 frag->u.nxt_free=0; /* mark it as 'taken' */
606 qm->free_hash[r].no--;
607 SFM_MAIN_HASH_UNLOCK(qm, r);
610 sfm_split_frag(qm, frag, size, file, func, line);
612 sfm_split_frag(qm, frag, size);
620 SFM_BIG_GET_AND_SPLIT_LOCK(qm);
621 for (; r<= sfm_max_hash ; r++){
622 f=&qm->free_hash[r].first;
624 SFM_MAIN_HASH_LOCK(qm, r);
625 if (unlikely((*f)==0)){
627 SFM_MAIN_HASH_UNLOCK(qm, r);
630 for(;(*f); f=&((*f)->u.nxt_free))
631 if ((*f)->size>=size){
632 /* found, detach it from the free list*/
635 frag->u.nxt_free=0; /* mark it as 'taken' */
636 qm->free_hash[r].no--;
637 SFM_MAIN_HASH_UNLOCK(qm, r);
640 sfm_split_frag(qm, frag, size, file, func, line);
642 sfm_split_frag(qm, frag, size);
644 SFM_BIG_GET_AND_SPLIT_UNLOCK(qm);
647 SFM_MAIN_HASH_UNLOCK(qm, r);
648 /* try in a bigger bucket */
651 SFM_BIG_GET_AND_SPLIT_UNLOCK(qm);
658 void* sfm_malloc(struct sfm_block* qm, unsigned long size,
659 const char* file, const char* func, unsigned int line)
661 void* sfm_malloc(struct sfm_block* qm, unsigned long size)
664 struct sfm_frag* frag;
669 MDBG("sfm_malloc(%p, %lu) called from %s: %s(%d)\n", qm, size, file, func,
672 /*size must be a multiple of 8*/
674 /* if (size>(qm->size-qm->real_used)) return 0; */
676 /* check if our pool id is set */
679 /*search for a suitable free frag*/
680 if (likely(size<=SF_POOL_MAX_SIZE)){
681 hash=GET_SMALL_HASH(size);
682 /* try first in our pool */
684 if (likely((frag=pool_get_frag(qm, &qm->pool[pool_id], hash, size,
685 file, func, line))!=0))
687 /* try in the "main" free hash, go through all the hash */
688 if (likely((frag=main_get_frag(qm, hash, size, file, func, line))!=0))
690 /* really low mem , try in other pools */
691 for (i=(pool_id+1); i< (pool_id+SFM_POOLS_NO); i++){
692 if ((frag=pool_get_frag(qm, &qm->pool[i%SFM_POOLS_NO], hash, size,
693 file, func, line))!=0)
697 if (likely((frag=pool_get_frag(qm, &qm->pool[pool_id], hash, size))
700 /* try in the "main" free hash, go through all the hash */
701 if (likely((frag=main_get_frag(qm, hash, size))!=0))
703 /* really low mem , try in other pools */
704 for (i=(pool_id+1); i< (pool_id+SFM_POOLS_NO); i++){
705 if ((frag=pool_get_frag(qm, &qm->pool[i%SFM_POOLS_NO], hash, size))
710 /* not found, bad! */
713 hash=GET_BIG_HASH(size);
715 if ((frag=main_get_frag(qm, hash, size, file, func, line))==0)
716 return 0; /* not found, bad! */
718 if ((frag=main_get_frag(qm, hash, size))==0)
719 return 0; /* not found, bad! */
729 frag->check=ST_CHECK_PATTERN;
730 MDBG("sfm_malloc(%p, %lu) returns address %p \n", qm, size,
731 (char*)frag+sizeof(struct sfm_frag));
733 FRAG_MARK_USED(frag); /* mark it as used */
734 return (char*)frag+sizeof(struct sfm_frag);
740 void sfm_free(struct sfm_block* qm, void* p, const char* file,
741 const char* func, unsigned int line)
743 void sfm_free(struct sfm_block* qm, void* p)
749 MDBG("sfm_free(%p, %p), called from %s: %s(%d)\n", qm, p, file, func,
751 if (p>(void*)qm->last_frag || p<(void*)qm->first_frag){
752 LOG(L_CRIT, "BUG: sfm_free: bad pointer %p (out of memory block!) - "
757 if (unlikely(p==0)) {
758 LOG(L_WARN, "WARNING: sfm_free: free(0) called\n");
761 f=(struct sfm_frag*) ((char*)p-sizeof(struct sfm_frag));
763 MDBG("sfm_free: freeing block alloc'ed from %s: %s(%ld)\n",
764 f->file, f->func, f->line);
771 sfm_insert_free(qm, f, 0);
776 void* sfm_realloc(struct sfm_block* qm, void* p, unsigned long size,
777 const char* file, const char* func, unsigned int line)
779 void* sfm_realloc(struct sfm_block* qm, void* p, unsigned long size)
783 unsigned long orig_size;
785 #ifndef SFM_REALLOC_REMALLOC
787 struct sfm_frag **pf;
791 unsigned long n_size;
792 struct sfm_pool * pool;
796 MDBG("sfm_realloc(%p, %p, %lu) called from %s: %s(%d)\n", qm, p, size,
798 if ((p)&&(p>(void*)qm->last_frag || p<(void*)qm->first_frag)){
799 LOG(L_CRIT, "BUG: sfm_free: bad pointer %p (out of memory block!) - "
807 sfm_free(qm, p, file, func, line);
815 return sfm_malloc(qm, size, file, func, line);
817 return sfm_malloc(qm, size);
819 f=(struct sfm_frag*) ((char*)p-sizeof(struct sfm_frag));
821 MDBG("sfm_realloc: realloc'ing frag %p alloc'ed from %s: %s(%ld)\n",
822 f, f->file, f->func, f->line);
829 MDBG("sfm_realloc: shrinking from %lu to %lu\n", f->size, size);
830 sfm_split_frag(qm, f, size, file, "frag. from sfm_realloc", line);
832 sfm_split_frag(qm, f, size);
834 }else if (f->size<size){
837 MDBG("sfm_realloc: growing from %lu to %lu\n", f->size, size);
839 #ifndef SFM_REALLOC_REMALLOC
842 if (((char*)n < (char*)qm->last_frag) &&
843 (n->u.nxt_free)&&((n->size+FRAG_OVERHEAD)>=diff)){
845 /* detach n from the free list */
849 if ((unlikely(p_id >=SFM_POOLS_NO))){
850 hash=GET_HASH(n_size);
851 SFM_MAIN_HASH_LOCK(qm, hash);
852 if (unlikely((n->u.nxt_free==0) ||
853 ((n->size+FRAG_OVERHEAD)<diff))){
854 SFM_MAIN_HASH_UNLOCK(qm, hash);
857 if (unlikely((n->id!=p_id) || (n->size!=n_size))){
858 /* fragment still free, but changed, either
859 * moved to another pool or has a diff. size */
860 SFM_MAIN_HASH_UNLOCK(qm, hash);
863 pf=&(qm->free_hash[hash].first);
865 for(;(*pf)&&(*pf!=n); pf=&((*pf)->u.nxt_free));/*FIXME slow */
867 SFM_MAIN_HASH_UNLOCK(qm, hash);
868 /* not found, bad! */
869 LOG(L_WARN, "WARNING: sfm_realloc: could not find %p in "
870 "free " "list (hash=%d)\n", n, hash);
871 /* somebody is in the process of changing it ? */
876 n->u.nxt_free=0; /* mark it immediately as detached */
877 qm->free_hash[hash].no--;
878 SFM_MAIN_HASH_UNLOCK(qm, hash);
880 f->size+=n->size+FRAG_OVERHEAD;
881 /* split it if necessary */
884 sfm_split_frag(qm, f, size, file, "fragm. from "
885 "sfm_realloc", line);
887 sfm_split_frag(qm, f, size);
890 }else{ /* p_id < SFM_POOLS_NO (=> in a pool )*/
891 hash=GET_SMALL_HASH(n_size);
892 pool=&qm->pool[p_id];
893 SFM_POOL_LOCK(pool, hash);
894 if (unlikely((n->u.nxt_free==0) ||
895 ((n->size+FRAG_OVERHEAD)<diff))){
896 SFM_POOL_UNLOCK(pool, hash);
899 if (unlikely((n->id!=p_id) || (n->size!=n_size))){
900 /* fragment still free, but changed, either
901 * moved to another pool or has a diff. size */
902 SFM_POOL_UNLOCK(pool, hash);
905 pf=&(pool->pool_hash[hash].first);
907 for(;(*pf)&&(*pf!=n); pf=&((*pf)->u.nxt_free));/*FIXME slow */
909 SFM_POOL_UNLOCK(pool, hash);
910 /* not found, bad! */
911 LOG(L_WARN, "WARNING: sfm_realloc: could not find %p in "
912 "free " "list (hash=%d)\n", n, hash);
913 /* somebody is in the process of changing it ? */
918 n->u.nxt_free=0; /* mark it immediately as detached */
919 pool->pool_hash[hash].no--;
920 SFM_POOL_UNLOCK(pool, hash);
922 f->size+=n->size+FRAG_OVERHEAD;
923 /* split it if necessary */
926 sfm_split_frag(qm, f, size, file, "fragm. from "
927 "sfm_realloc", line);
929 sfm_split_frag(qm, f, size);
935 /* could not join => realloc */
936 #else/* SFM_REALLOC_REMALLOC */
938 #endif /* SFM_REALLOC_REMALLOC */
940 ptr=sfm_malloc(qm, size, file, func, line);
942 ptr=sfm_malloc(qm, size);
945 /* copy, need by libssl */
946 memcpy(ptr, p, orig_size);
948 sfm_free(qm, p, file, func, line);
958 MDBG("sfm_realloc: doing nothing, same size: %lu - %lu\n",
963 MDBG("sfm_realloc: returning %p\n", p);
970 void sfm_status(struct sfm_block* qm)
980 memlog=cfg_get(core, core_cfg, memlog);
981 LOG(memlog, "sfm_status (%p):\n", qm);
984 LOG(memlog, " heap size= %ld\n", qm->size);
985 LOG(memlog, "dumping free list:\n");
986 for(h=0,i=0,size=0;h<=sfm_max_hash;h++){
987 SFM_MAIN_HASH_LOCK(qm, h);
989 for (f=qm->free_hash[h].first,j=0; f;
990 size+=f->size,f=f->u.nxt_free,i++,j++){
991 if (!FRAG_WAS_USED(f)){
994 LOG(memlog, "unused fragm.: hash = %3d, fragment %p,"
995 " address %p size %lu, created from %s: %s(%ld)\n",
996 h, f, (char*)f+sizeof(struct sfm_frag), f->size,
997 f->file, f->func, f->line);
1001 if (j) LOG(memlog, "hash = %3d fragments no.: %5d, unused: %5d\n\t\t"
1002 " bucket size: %9lu - %9lu (first %9lu)\n",
1003 h, j, unused, UN_HASH(h),
1004 ((h<=SF_MALLOC_OPTIMIZE/SF_ROUNDTO)?1:2)* UN_HASH(h),
1005 qm->free_hash[h].first->size
1007 if (j!=qm->free_hash[h].no){
1008 LOG(L_CRIT, "BUG: sfm_status: different free frag. count: %d!=%ld"
1009 " for hash %3d\n", j, qm->free_hash[h].no, h);
1011 SFM_MAIN_HASH_UNLOCK(qm, h);
1013 for (k=0; k<SFM_POOLS_NO; k++){
1014 for(h=0;h<SF_HASH_POOL_SIZE;h++){
1015 SFM_POOL_LOCK(&qm->pool[k], h);
1017 for (f=qm->pool[k].pool_hash[h].first,j=0; f;
1018 size+=f->size,f=f->u.nxt_free,i++,j++){
1019 if (!FRAG_WAS_USED(f)){
1022 LOG(memlog, "[%2d] unused fragm.: hash = %3d, fragment %p,"
1023 " address %p size %lu, created from %s: "
1025 h, f, (char*)f+sizeof(struct sfm_frag),
1026 f->size, f->file, f->func, f->line);
1030 if (j) LOG(memlog, "[%2d] hash = %3d fragments no.: %5d, unused: "
1031 "%5d\n\t\t bucket size: %9lu - %9lu "
1033 k, h, j, unused, UN_HASH(h),
1034 ((h<=SF_MALLOC_OPTIMIZE/SF_ROUNDTO)?1:2) *
1036 qm->pool[k].pool_hash[h].first->size
1038 if (j!=qm->pool[k].pool_hash[h].no){
1039 LOG(L_CRIT, "BUG: sfm_status: [%d] different free frag."
1040 " count: %d!=%ld for hash %3d\n",
1041 k, j, qm->pool[k].pool_hash[h].no, h);
1043 SFM_POOL_UNLOCK(&qm->pool[k], h);
1046 LOG(memlog, "TOTAL: %6d free fragments = %6lu free bytes\n", i, size);
1047 LOG(memlog, "-----------------------------\n");
1052 /* fills a malloc info structure with info about the block
1053 * if a parameter is not supported, it will be filled with 0 */
1054 void sfm_info(struct sfm_block* qm, struct mem_info* info)
1057 unsigned long total_frags;
1060 memset(info,0, sizeof(*info));
1062 info->total_size=qm->size;
1063 info->min_frag=SF_MIN_FRAG_SIZE;
1064 /* we'll have to compute it all */
1065 for (r=0; r<=SF_MALLOC_OPTIMIZE/SF_ROUNDTO; r++){
1066 info->free+=qm->free_hash[r].no*UN_HASH(r);
1067 total_frags+=qm->free_hash[r].no;
1069 for(;r<=sfm_max_hash; r++){
1070 total_frags+=qm->free_hash[r].no;
1071 SFM_MAIN_HASH_LOCK(qm, r);
1072 for(f=qm->free_hash[r].first;f;f=f->u.nxt_free){
1073 info->free+=f->size;
1075 SFM_MAIN_HASH_UNLOCK(qm, r);
1077 for (k=0; k<SFM_POOLS_NO; k++){
1078 for (r=0; r<SF_HASH_POOL_SIZE; r++){
1079 info->free+=qm->pool[k].pool_hash[r].no*UN_HASH(r);
1080 total_frags+=qm->pool[k].pool_hash[r].no;
1083 info->real_used=info->total_size-info->free;
1084 info->used=info->real_used-total_frags*FRAG_OVERHEAD-INIT_OVERHEAD
1086 info->max_used=0; /* we don't really know */
1087 info->total_frags=total_frags;
1092 /* returns how much free memory is available
1093 * on error (not compiled with bookkeeping code) returns (unsigned long)(-1) */
1094 unsigned long sfm_available(struct sfm_block* qm)
1096 /* we don't know how much free memory we have and it's to expensive
1098 return ((unsigned long)-1);