4 * Copyright (C) 2001-2003 FhG Fokus
6 * This file is part of sip-router, a free SIP server.
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 * 2003-07-06 added fm_realloc (andrei)
25 * 2004-07-19 fragments book keeping code and support for 64 bits
26 * memory blocks (64 bits machine & size >=2^32)
27 * GET_HASH s/</<=/ (avoids waste of 1 hash cell) (andrei)
28 * 2004-11-10 support for > 4Gb mem., switched to long (andrei)
29 * 2005-03-02 added fm_info() (andrei)
30 * 2005-12-12 fixed realloc shrink real_used accounting (andrei)
31 * fixed initial size (andrei)
32 * 2006-02-03 fixed realloc out of mem. free bug (andrei)
33 * 2006-04-07 s/DBG/MDBG (andrei)
34 * 2007-02-23 added fm_available() (andrei)
35 * 2007-06-23 added hash bitmap (andrei)
36 * 2009-09-28 added fm_sums() (patch from Dragos Vingarzan)
40 #if !defined(q_malloc) && (defined F_MALLOC)
46 #include "../dprint.h"
47 #include "../globals.h"
48 #include "../compiler_opt.h"
50 #include "../bit_scan.h"
51 #include "../cfg/cfg.h" /* memlog */
56 #define FRAG_NEXT(f) \
57 ((struct fm_frag*)((char*)(f)+sizeof(struct fm_frag)+(f)->size ))
59 #define FRAG_OVERHEAD (sizeof(struct fm_frag))
60 #define INIT_OVERHEAD \
61 (ROUNDUP(sizeof(struct fm_block))+sizeof(struct fm_frag))
65 /* ROUNDTO= 2^k so the following works */
66 #define ROUNDTO_MASK (~((unsigned long)ROUNDTO-1))
67 #define ROUNDUP(s) (((s)+(ROUNDTO-1))&ROUNDTO_MASK)
68 #define ROUNDDOWN(s) ((s)&ROUNDTO_MASK)
71 #define ROUNDUP(s) (((s)%ROUNDTO)?((s)+ROUNDTO)/ROUNDTO*ROUNDTO:(s))
72 #define ROUNDDOWN(s) (((s)%ROUNDTO)?((s)-ROUNDTO)/ROUNDTO*ROUNDTO:(s))
77 /* finds the hash value for s, s=ROUNDTO multiple*/
78 #define GET_HASH(s) ( ((unsigned long)(s)<=F_MALLOC_OPTIMIZE)?\
79 (unsigned long)(s)/ROUNDTO: \
80 F_MALLOC_OPTIMIZE/ROUNDTO+big_hash_idx((s))- \
81 F_MALLOC_OPTIMIZE_FACTOR+1 )
83 #define UN_HASH(h) ( ((unsigned long)(h)<=(F_MALLOC_OPTIMIZE/ROUNDTO))?\
84 (unsigned long)(h)*ROUNDTO: \
85 1UL<<((unsigned long)(h)-F_MALLOC_OPTIMIZE/ROUNDTO+\
86 F_MALLOC_OPTIMIZE_FACTOR-1)\
89 #ifdef F_MALLOC_HASH_BITMAP
91 #define fm_bmp_set(qm, b) \
93 (qm)->free_bitmap[(b)/FM_HASH_BMP_BITS] |= \
94 1UL<<((b)%FM_HASH_BMP_BITS); \
97 #define fm_bmp_reset(qm, b) \
99 (qm)->free_bitmap[(b)/FM_HASH_BMP_BITS] &= \
100 ~(1UL<<((b)%FM_HASH_BMP_BITS)); \
103 /* returns 0 if not set, !=0 if set */
104 #define fm_bmp_is_set(qm, b) \
105 ((qm)->free_bitmap[(b)/FM_HASH_BMP_BITS] & (1UL<<((b)%FM_HASH_BMP_BITS)))
107 inline static int fm_bmp_first_set(struct fm_block* qm, int start)
112 fm_hash_bitmap_t test_val;
115 bmp_idx=start/FM_HASH_BMP_BITS;
116 bit=start%FM_HASH_BMP_BITS;
117 test_val=1UL <<((unsigned long)bit);
118 if (qm->free_bitmap[bmp_idx] & test_val)
120 else if (qm->free_bitmap[bmp_idx] & ~(test_val-1)){
123 for (r=bit+1; r<FM_HASH_BMP_BITS; r++, test_val<<=1){
124 if (qm->free_bitmap[bmp_idx] & test_val)
125 return (start-bit+r);
128 v=qm->free_bitmap[bmp_idx]>>(bit+1);
129 return start+1+bit_scan_forward((unsigned long)v);
131 for (r=bmp_idx+1;r<FM_HASH_BMP_SIZE; r++){
132 if (qm->free_bitmap[r]){
133 /* find first set bit */
134 return r*FM_HASH_BMP_BITS+
135 bit_scan_forward((unsigned long)qm->free_bitmap[r]);
138 /* not found, nothing free */
141 #endif /* F_MALLOC_HASH_BITMAP */
145 /* mark/test used/unused frags */
146 #define FRAG_MARK_USED(f)
147 #define FRAG_CLEAR_USED(f)
148 #define FRAG_WAS_USED(f) (1)
150 /* other frag related defines:
154 #define MEM_FRAG_AVOIDANCE
157 /* computes hash number for big buckets*/
159 inline static unsigned long big_hash_idx(unsigned long s)
162 /* s is rounded => s = k*2^n (ROUNDTO=2^n)
163 * index= i such that 2^(i+1) > s >= 2^i
165 * => index = number of the first non null bit in s*/
166 idx=sizeof(long)*8-1;
167 for (; !(s&(1UL<<(sizeof(long)*8-1))) ; s<<=1, idx--);
171 #define big_hash_idx(s) ((unsigned long)bit_scan_reverse((unsigned long)(s)))
176 #define ST_CHECK_PATTERN 0xf0f0f0f0
177 #define END_CHECK_PATTERN1 0xc0c0c0c0
178 #define END_CHECK_PATTERN2 0xabcdefed
183 static inline void fm_insert_free(struct fm_block* qm, struct fm_frag* frag)
188 hash=GET_HASH(frag->size);
189 f=&(qm->free_hash[hash].first);
190 if (frag->size > F_MALLOC_OPTIMIZE){ /* because of '<=' in GET_HASH,
191 (different from 0.8.1[24] on
192 purpose --andrei ) */
193 for(; *f; f=&((*f)->u.nxt_free)){
194 if (frag->size <= (*f)->size) break;
201 qm->free_hash[hash].no++;
202 #ifdef F_MALLOC_HASH_BITMAP
203 fm_bmp_set(qm, hash);
204 #endif /* F_MALLOC_HASH_BITMAP */
209 /* size should be already rounded-up */
212 void fm_split_frag(struct fm_block* qm, struct fm_frag* frag,
214 const char* file, const char* func, unsigned int line)
216 void fm_split_frag(struct fm_block* qm, struct fm_frag* frag,
223 rest=frag->size-size;
224 #ifdef MEM_FRAG_AVOIDANCE
225 if ((rest> (FRAG_OVERHEAD+F_MALLOC_OPTIMIZE))||
226 (rest>=(FRAG_OVERHEAD+size))){ /* the residue fragm. is big enough*/
228 if (rest>(FRAG_OVERHEAD+MIN_FRAG_SIZE)){
231 /*split the fragment*/
233 n->size=rest-FRAG_OVERHEAD;
234 FRAG_CLEAR_USED(n); /* never used */
235 #if defined(DBG_F_MALLOC) || defined(MALLOC_STATS)
236 qm->real_used+=FRAG_OVERHEAD;
239 /* frag created by malloc, mark it*/
241 n->func="frag. from fm_malloc";
243 n->check=ST_CHECK_PATTERN;
245 /* reinsert n in free list*/
246 fm_insert_free(qm, n);
248 /* we cannot split this fragment any more => alloc all of it*/
254 /* init malloc and return a fm_block*/
255 struct fm_block* fm_malloc_init(char* address, unsigned long size)
260 unsigned long init_overhead;
262 /* make address and size multiple of 8*/
263 start=(char*)ROUNDUP((unsigned long) address);
264 DBG("fm_malloc_init: F_OPTIMIZE=%lu, /ROUNDTO=%lu\n",
265 F_MALLOC_OPTIMIZE, F_MALLOC_OPTIMIZE/ROUNDTO);
266 DBG("fm_malloc_init: F_HASH_SIZE=%lu, fm_block size=%lu\n",
267 F_HASH_SIZE, (long)sizeof(struct fm_block));
268 DBG("fm_malloc_init(%p, %lu), start=%p\n", address, size, start);
270 if (size<start-address) return 0;
271 size-=(start-address);
272 if (size <(MIN_FRAG_SIZE+FRAG_OVERHEAD)) return 0;
273 size=ROUNDDOWN(size);
275 init_overhead=INIT_OVERHEAD;
278 if (size < init_overhead)
280 /* not enough mem to create our control structures !!!*/
284 qm=(struct fm_block*)start;
285 memset(qm, 0, sizeof(struct fm_block));
287 #if defined(DBG_F_MALLOC) || defined(MALLOC_STATS)
288 qm->real_used=init_overhead;
289 qm->max_real_used=qm->real_used;
293 qm->first_frag=(struct fm_frag*)(start+ROUNDUP(sizeof(struct fm_block)));
294 qm->last_frag=(struct fm_frag*)(end-sizeof(struct fm_frag));
295 /* init initial fragment*/
296 qm->first_frag->size=size;
297 qm->last_frag->size=0;
300 qm->first_frag->check=ST_CHECK_PATTERN;
301 qm->last_frag->check=END_CHECK_PATTERN1;
304 /* link initial fragment into the free list*/
306 fm_insert_free(qm, qm->first_frag);
315 void* fm_malloc(struct fm_block* qm, unsigned long size,
316 const char* file, const char* func, unsigned int line)
318 void* fm_malloc(struct fm_block* qm, unsigned long size)
322 struct fm_frag* frag;
326 MDBG("fm_malloc(%p, %lu) called from %s: %s(%d)\n", qm, size, file, func,
329 /*size must be a multiple of 8*/
331 /* if (size>(qm->size-qm->real_used)) return 0; */
334 /*search for a suitable free frag*/
336 #ifdef F_MALLOC_HASH_BITMAP
337 hash=fm_bmp_first_set(qm, GET_HASH(size));
338 if (likely(hash>=0)){
339 f=&(qm->free_hash[hash].first);
340 if (likely(hash<=F_MALLOC_OPTIMIZE/ROUNDTO)) /* return first match */
342 for(;(*f); f=&((*f)->u.nxt_free))
343 if ((*f)->size>=size) goto found;
345 #else /* F_MALLOC_HASH_BITMAP */
346 for(hash=GET_HASH(size);hash<F_HASH_SIZE;hash++){
347 f=&(qm->free_hash[hash].first);
349 if (likely(hash<=F_MALLOC_OPTIMIZE/ROUNDTO)) /* return first match */
352 for(;(*f); f=&((*f)->u.nxt_free))
353 if ((*f)->size>=size) goto found;
354 /* try in a bigger bucket */
356 #endif /* F_MALLOC_HASH_BITMAP */
357 /* not found, bad! */
362 /* detach it from the free list*/
365 frag->u.nxt_free=0; /* mark it as 'taken' */
366 qm->free_hash[hash].no--;
367 #ifdef F_MALLOC_HASH_BITMAP
368 if (qm->free_hash[hash].no==0)
369 fm_bmp_reset(qm, hash);
370 #endif /* F_MALLOC_HASH_BITMAP */
372 /*see if we'll use full frag, or we'll split it in 2*/
375 fm_split_frag(qm, frag, size, file, func, line);
380 frag->check=ST_CHECK_PATTERN;
381 MDBG("fm_malloc(%p, %lu) returns address %p \n", qm, size,
382 (char*)frag+sizeof(struct fm_frag));
384 fm_split_frag(qm, frag, size);
386 #if defined(DBG_F_MALLOC) || defined(MALLOC_STATS)
387 qm->real_used+=frag->size;
388 qm->used+=frag->size;
389 if (qm->max_real_used<qm->real_used)
390 qm->max_real_used=qm->real_used;
392 FRAG_MARK_USED(frag); /* mark it as used */
393 return (char*)frag+sizeof(struct fm_frag);
399 void fm_free(struct fm_block* qm, void* p, const char* file, const char* func,
402 void fm_free(struct fm_block* qm, void* p)
409 MDBG("fm_free(%p, %p), called from %s: %s(%d)\n", qm, p, file, func, line);
410 if (p>(void*)qm->last_frag || p<(void*)qm->first_frag){
411 LOG(L_CRIT, "BUG: fm_free: bad pointer %p (out of memory block!) - "
417 LOG(L_WARN, "WARNING:fm_free: free(0) called\n");
420 f=(struct fm_frag*) ((char*)p-sizeof(struct fm_frag));
422 MDBG("fm_free: freeing block alloc'ed from %s: %s(%ld)\n",
423 f->file, f->func, f->line);
426 #if defined(DBG_F_MALLOC) || defined(MALLOC_STATS)
435 fm_insert_free(qm, f);
440 void* fm_realloc(struct fm_block* qm, void* p, unsigned long size,
441 const char* file, const char* func, unsigned int line)
443 void* fm_realloc(struct fm_block* qm, void* p, unsigned long size)
449 unsigned long orig_size;
455 MDBG("fm_realloc(%p, %p, %lu) called from %s: %s(%d)\n", qm, p, size,
457 if ((p)&&(p>(void*)qm->last_frag || p<(void*)qm->first_frag)){
458 LOG(L_CRIT, "BUG: fm_free: bad pointer %p (out of memory block!) - "
466 fm_free(qm, p, file, func, line);
474 return fm_malloc(qm, size, file, func, line);
476 return fm_malloc(qm, size);
478 f=(struct fm_frag*) ((char*)p-sizeof(struct fm_frag));
480 MDBG("fm_realloc: realloc'ing frag %p alloc'ed from %s: %s(%ld)\n",
481 f, f->file, f->func, f->line);
488 MDBG("fm_realloc: shrinking from %lu to %lu\n", f->size, size);
489 fm_split_frag(qm, f, size, file, "frag. from fm_realloc", line);
491 fm_split_frag(qm, f, size);
493 #if defined(DBG_F_MALLOC) || defined(MALLOC_STATS)
494 /* fm_split frag already adds FRAG_OVERHEAD for the newly created
495 free frag, so here we only need orig_size-f->size for real used */
496 qm->real_used-=(orig_size-f->size);
497 qm->used-=(orig_size-f->size);
499 }else if (f->size<size){
502 MDBG("fm_realloc: growing from %lu to %lu\n", f->size, size);
506 if (((char*)n < (char*)qm->last_frag) &&
507 (n->u.nxt_free)&&((n->size+FRAG_OVERHEAD)>=diff)){
509 /* detach n from the free list */
510 hash=GET_HASH(n->size);
511 pf=&(qm->free_hash[hash].first);
513 for(;(*pf)&&(*pf!=n); pf=&((*pf)->u.nxt_free)); /*FIXME slow */
515 /* not found, bad! */
516 LOG(L_CRIT, "BUG: fm_realloc: could not find %p in free "
517 "list (hash=%ld)\n", n, GET_HASH(n->size));
522 qm->free_hash[hash].no--;
523 #ifdef F_MALLOC_HASH_BITMAP
524 if (qm->free_hash[hash].no==0)
525 fm_bmp_reset(qm, hash);
526 #endif /* F_MALLOC_HASH_BITMAP */
528 f->size+=n->size+FRAG_OVERHEAD;
529 #if defined(DBG_F_MALLOC) || defined(MALLOC_STATS)
530 qm->real_used-=FRAG_OVERHEAD;
532 /* split it if necessary */
535 fm_split_frag(qm, f, size, file, "fragm. from fm_realloc",
538 fm_split_frag(qm, f, size);
541 #if defined(DBG_F_MALLOC) || defined(MALLOC_STATS)
542 qm->real_used+=(f->size-orig_size);
543 qm->used+=(f->size-orig_size);
546 /* could not join => realloc */
548 ptr=fm_malloc(qm, size, file, func, line);
550 ptr=fm_malloc(qm, size);
553 /* copy, need by libssl */
554 memcpy(ptr, p, orig_size);
556 fm_free(qm, p, file, func, line);
566 MDBG("fm_realloc: doing nothing, same size: %lu - %lu\n",
571 MDBG("fm_realloc: returning %p\n", p);
578 void fm_status(struct fm_block* qm)
587 memlog=cfg_get(core, core_cfg, memlog);
588 LOG_(DEFAULT_FACILITY, memlog, "fm_status: ", "fm_status (%p):\n", qm);
591 LOG_(DEFAULT_FACILITY, memlog, "fm_status: ", " heap size= %ld\n",
593 #if defined(DBG_F_MALLOC) || defined(MALLOC_STATS)
594 LOG_(DEFAULT_FACILITY, memlog, "fm_status: ",
595 " used= %lu, used+overhead=%lu, free=%lu\n",
596 qm->used, qm->real_used, qm->size-qm->real_used);
597 LOG_(DEFAULT_FACILITY, memlog, "fm_status: ",
598 " max used (+overhead)= %lu\n", qm->max_real_used);
601 LOG_(DEFAULT_FACILITY, memlog, "fm_status: ", "dumping all fragments:\n");
602 for (f=qm->first_frag, i=0;((char*)f<(char*)qm->last_frag) && (i<10);
603 f=FRAG_NEXT(f), i++){
604 LOG_(DEFAULT_FACILITY, memlog, "fm_status: ",
605 " %3d. %c address=%x size=%d\n", i,
606 (f->u.reserved)?'a':'N',
607 (char*)f+sizeof(struct fm_frag), f->size);
609 LOG_(DEFAULT_FACILITY, memlog, "fm_status: ",
610 " %s from %s: %s(%d)\n",
611 (f->u.is_free)?"freed":"alloc'd", f->file, f->func, f->line);
615 LOG_(DEFAULT_FACILITY, memlog, "fm_status: ", "dumping free list:\n");
616 for(h=0,i=0,size=0;h<F_HASH_SIZE;h++){
618 for (f=qm->free_hash[h].first,j=0; f;
619 size+=f->size,f=f->u.nxt_free,i++,j++){
620 if (!FRAG_WAS_USED(f)){
623 LOG_(DEFAULT_FACILITY, memlog, "fm_status: ",
624 "unused fragm.: hash = %3d, fragment %p,"
625 " address %p size %lu, created from %s: %s(%ld)\n",
626 h, f, (char*)f+sizeof(struct fm_frag), f->size,
627 f->file, f->func, f->line);
631 if (j) LOG_(DEFAULT_FACILITY, memlog, "fm_status: ",
632 "hash = %3d fragments no.: %5d, unused: %5d\n\t\t"
633 " bucket size: %9lu - %9lu (first %9lu)\n",
634 h, j, unused, UN_HASH(h),
635 ((h<=F_MALLOC_OPTIMIZE/ROUNDTO)?1:2)* UN_HASH(h),
636 qm->free_hash[h].first->size
638 if (j!=qm->free_hash[h].no){
639 LOG(L_CRIT, "BUG: fm_status: different free frag. count: %d!=%ld"
640 " for hash %3d\n", j, qm->free_hash[h].no, h);
644 LOG_(DEFAULT_FACILITY, memlog, "fm_status: ",
645 " %5d.[%3d:%3d] %c address=%x size=%d(%x)\n",
647 (f->u.reserved)?'a':'N',
648 (char*)f+sizeof(struct fm_frag), f->size, f->size);
650 DBG(" %s from %s: %s(%d)\n",
651 (f->u.reserved)?"freed":"alloc'd", f->file, f->func, f->line);
656 LOG_(DEFAULT_FACILITY, memlog, "fm_status: ",
657 "TOTAL: %6d free fragments = %6lu free bytes\n", i, size);
658 LOG_(DEFAULT_FACILITY, memlog, "fm_status: ",
659 "-----------------------------\n");
664 /* fills a malloc info structure with info about the block
665 * if a parameter is not supported, it will be filled with 0 */
666 void fm_info(struct fm_block* qm, struct mem_info* info)
670 #if !defined(DBG_F_MALLOC) && !defined(MALLOC_STATS)
674 memset(info,0, sizeof(*info));
676 info->total_size=qm->size;
677 info->min_frag=MIN_FRAG_SIZE;
678 #if defined(DBG_F_MALLOC) || defined(MALLOC_STATS)
679 info->free=qm->size-qm->real_used;
681 info->real_used=qm->real_used;
682 info->max_used=qm->max_real_used;
683 for(r=0;r<F_HASH_SIZE; r++){
684 total_frags+=qm->free_hash[r].no;
687 /* we'll have to compute it all */
688 for (r=0; r<=F_MALLOC_OPTIMIZE/ROUNDTO; r++){
689 info->free+=qm->free_hash[r].no*UN_HASH(r);
690 total_frags+=qm->free_hash[r].no;
692 for(;r<F_HASH_SIZE; r++){
693 total_frags+=qm->free_hash[r].no;
694 for(f=qm->free_hash[r].first;f;f=f->u.nxt_free){
698 info->real_used=info->total_size-info->free;
699 info->used=0; /* we don't really now */
700 info->used=info->real_used-total_frags*FRAG_OVERHEAD-INIT_OVERHEAD-
702 info->max_used=0; /* we don't really now */
704 info->total_frags=total_frags;
709 /* returns how much free memory is available
710 * on error (not compiled with bookkeeping code) returns (unsigned long)(-1) */
711 unsigned long fm_available(struct fm_block* qm)
714 #if defined(DBG_F_MALLOC) || defined(MALLOC_STATS)
715 return qm->size-qm->real_used;
717 /* we don't know how much free memory we have and it's to expensive
719 return ((unsigned long)-1);
726 typedef struct _mem_counter{
734 struct _mem_counter *next;
737 static mem_counter* get_mem_counter(mem_counter **root,struct fm_frag* f)
741 if (!*root) goto make_new;
742 for(x=*root;x;x=x->next)
743 if (x->file == f->file && x->func == f->func && x->line == f->line)
746 x = malloc(sizeof(mem_counter));
759 void fm_sums(struct fm_block* qm)
762 struct fm_frag* free_frag;
765 mem_counter *root,*x;
770 memlog=cfg_get(core, core_cfg, memlog);
771 LOG_(DEFAULT_FACILITY, memlog, "fm_status: ",
772 "summarizing all alloc'ed. fragments:\n");
774 for (f=qm->first_frag, i=0; (char*)f<(char*)qm->last_frag;
775 f=FRAG_NEXT(f), i++){
776 if (f->u.nxt_free==0){
777 /* it might be in-use or the last free fragm. in a free list
778 => search the free frags of the same size for a possible
780 hash=GET_HASH(f->size);
781 for(free_frag=qm->free_hash[hash].first;
782 free_frag && (free_frag!=f);
783 free_frag=free_frag->u.nxt_free);
784 if (free_frag==0){ /* not found among the free frag */
785 x = get_mem_counter(&root,f);
793 LOG_(DEFAULT_FACILITY, memlog, "fm_status: ",
794 " count=%6d size=%10lu bytes from %s: %s(%ld)\n",
796 x->file, x->func, x->line
802 LOG_(DEFAULT_FACILITY, memlog, "fm_status: ",
803 "-----------------------------\n");
805 #endif /* DBG_F_MALLOC */