4 * Copyright (C) 2001-2003 FhG Fokus
6 * This file is part of sip-router, a free SIP server.
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 * ????-??-?? created by andrei
24 * 2003-04-14 more debugging added in DBG_QM_MALLOC mode (andrei)
25 * 2003-06-29 added qm_realloc (andrei)
26 * 2004-07-19 fragments book keeping code and support for 64 bits
27 * memory blocks (64 bits machine & size>=2^32) (andrei)
28 * GET_HASH s/</<=/ (avoids waste of 1 hash cell) (andrei)
29 * 2004-11-10 support for > 4Gb mem., switched to long (andrei)
30 * 2005-03-02 added qm_info() (andrei)
31 * 2005-12-12 fixed realloc shrink real_used & used accounting;
32 * fixed initial size (andrei)
33 * 2006-02-03 fixed realloc out of mem. free bug (andrei)
34 * 2006-04-07 s/DBG/MDBG (andrei)
35 * 2007-02-23 added fm_available() (andrei)
36 * 2009-09-28 added fm_sums() (patch from Dragos Vingarzan)
40 #if !defined(q_malloc) && !(defined F_MALLOC)
47 #include "../dprint.h"
48 #include "../globals.h"
50 #include "../cfg/cfg.h" /* memlog */
55 ((struct qm_frag_end*)((char*)(f)+sizeof(struct qm_frag)+ \
58 #define FRAG_NEXT(f) \
59 ((struct qm_frag*)((char*)(f)+sizeof(struct qm_frag)+(f)->size+ \
60 sizeof(struct qm_frag_end)))
62 #define FRAG_PREV(f) \
63 ( (struct qm_frag*) ( ((char*)(f)-sizeof(struct qm_frag_end))- \
64 ((struct qm_frag_end*)((char*)(f)-sizeof(struct qm_frag_end)))->size- \
65 sizeof(struct qm_frag) ) )
67 #define PREV_FRAG_END(f) \
68 ((struct qm_frag_end*)((char*)(f)-sizeof(struct qm_frag_end)))
71 #define FRAG_OVERHEAD (sizeof(struct qm_frag)+sizeof(struct qm_frag_end))
74 #define ROUNDTO_MASK (~((unsigned long)ROUNDTO-1))
75 #define ROUNDUP(s) (((s)+(ROUNDTO-1))&ROUNDTO_MASK)
76 #define ROUNDDOWN(s) ((s)&ROUNDTO_MASK)
79 #define ROUNDUP(s) (((s)%ROUNDTO)?((s)+ROUNDTO)/ROUNDTO*ROUNDTO:(s))
80 #define ROUNDDOWN(s) (((s)%ROUNDTO)?((s)-ROUNDTO)/ROUNDTO*ROUNDTO:(s))
85 /* finds the hash value for s, s=ROUNDTO multiple*/
86 #define GET_HASH(s) ( ((unsigned long)(s)<=QM_MALLOC_OPTIMIZE)?\
87 (unsigned long)(s)/ROUNDTO: \
88 QM_MALLOC_OPTIMIZE/ROUNDTO+big_hash_idx((s))- \
89 QM_MALLOC_OPTIMIZE_FACTOR+1 )
91 #define UN_HASH(h) ( ((unsigned long)(h)<=(QM_MALLOC_OPTIMIZE/ROUNDTO))?\
92 (unsigned long)(h)*ROUNDTO: \
93 1UL<<((h)-QM_MALLOC_OPTIMIZE/ROUNDTO+\
94 QM_MALLOC_OPTIMIZE_FACTOR-1)\
98 /* mark/test used/unused frags */
99 #define FRAG_MARK_USED(f)
100 #define FRAG_CLEAR_USED(f)
101 #define FRAG_WAS_USED(f) (1)
103 /* other frag related defines:
108 #define MEM_FRAG_AVOIDANCE
111 /* computes hash number for big buckets*/
112 inline static unsigned long big_hash_idx(unsigned long s)
115 /* s is rounded => s = k*2^n (ROUNDTO=2^n)
116 * index= i such that 2^i > s >= 2^(i-1)
118 * => index = number of the first non null bit in s*/
119 idx=sizeof(long)*8-1;
120 for (; !(s&(1UL<<(sizeof(long)*8-1))) ; s<<=1, idx--);
126 #define ST_CHECK_PATTERN 0xf0f0f0f0
127 #define END_CHECK_PATTERN1 0xc0c0c0c0
128 #define END_CHECK_PATTERN2 0xabcdefed
131 static void qm_debug_frag(struct qm_block* qm, struct qm_frag* f)
133 if (f->check!=ST_CHECK_PATTERN){
134 LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p) "
135 "beginning overwritten(%lx)!\n",
136 f, (char*)f+sizeof(struct qm_frag),
141 if ((FRAG_END(f)->check1!=END_CHECK_PATTERN1)||
142 (FRAG_END(f)->check2!=END_CHECK_PATTERN2)){
143 LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p)"
144 " end overwritten(%lx, %lx)!\n",
145 f, (char*)f+sizeof(struct qm_frag),
146 FRAG_END(f)->check1, FRAG_END(f)->check2);
150 if ((f>qm->first_frag)&&
151 ((PREV_FRAG_END(f)->check1!=END_CHECK_PATTERN1) ||
152 (PREV_FRAG_END(f)->check2!=END_CHECK_PATTERN2) ) ){
153 LOG(L_CRIT, "BUG: qm_*: prev. fragm. tail overwritten(%lx, %lx)[%p:%p]!"
155 PREV_FRAG_END(f)->check1, PREV_FRAG_END(f)->check2, f,
156 (char*)f+sizeof(struct qm_frag));
165 static inline void qm_insert_free(struct qm_block* qm, struct qm_frag* frag)
168 struct qm_frag* prev;
171 hash=GET_HASH(frag->size);
172 for(f=qm->free_hash[hash].head.u.nxt_free; f!=&(qm->free_hash[hash].head);
174 if (frag->size <= f->size) break;
177 prev=FRAG_END(f)->prev_free;
178 prev->u.nxt_free=frag;
179 FRAG_END(frag)->prev_free=prev;
181 FRAG_END(f)->prev_free=frag;
182 qm->free_hash[hash].no++;
187 /* init malloc and return a qm_block*/
188 struct qm_block* qm_malloc_init(char* address, unsigned long size)
193 unsigned long init_overhead;
196 /* make address and size multiple of 8*/
197 start=(char*)ROUNDUP((unsigned long) address);
198 DBG("qm_malloc_init: QM_OPTIMIZE=%lu, /ROUNDTO=%lu\n",
199 QM_MALLOC_OPTIMIZE, QM_MALLOC_OPTIMIZE/ROUNDTO);
200 DBG("qm_malloc_init: QM_HASH_SIZE=%lu, qm_block size=%lu\n",
201 QM_HASH_SIZE, (long)sizeof(struct qm_block));
202 DBG("qm_malloc_init(%p, %lu), start=%p\n", address, size, start);
203 if (size<start-address) return 0;
204 size-=(start-address);
205 if (size <(MIN_FRAG_SIZE+FRAG_OVERHEAD)) return 0;
206 size=ROUNDDOWN(size);
208 init_overhead=ROUNDUP(sizeof(struct qm_block))+sizeof(struct qm_frag)+
209 sizeof(struct qm_frag_end);
210 DBG("qm_malloc_init: size= %lu, init_overhead=%lu\n", size, init_overhead);
212 if (size < init_overhead)
214 /* not enough mem to create our control structures !!!*/
218 qm=(struct qm_block*)start;
219 memset(qm, 0, sizeof(struct qm_block));
221 qm->real_used=init_overhead;
222 qm->max_real_used=qm->real_used;
225 qm->first_frag=(struct qm_frag*)(start+ROUNDUP(sizeof(struct qm_block)));
226 qm->last_frag_end=(struct qm_frag_end*)(end-sizeof(struct qm_frag_end));
227 /* init initial fragment*/
228 qm->first_frag->size=size;
229 qm->last_frag_end->size=size;
232 qm->first_frag->check=ST_CHECK_PATTERN;
233 qm->last_frag_end->check1=END_CHECK_PATTERN1;
234 qm->last_frag_end->check2=END_CHECK_PATTERN2;
236 /* init free_hash* */
237 for (h=0; h<QM_HASH_SIZE;h++){
238 qm->free_hash[h].head.u.nxt_free=&(qm->free_hash[h].head);
239 qm->free_hash[h].tail.prev_free=&(qm->free_hash[h].head);
240 qm->free_hash[h].head.size=0;
241 qm->free_hash[h].tail.size=0;
244 /* link initial fragment into the free list*/
246 qm_insert_free(qm, qm->first_frag);
248 /*qm->first_frag->u.nxt_free=&(qm->free_lst);
249 qm->last_frag_end->prev_free=&(qm->free_lst);
258 static inline void qm_detach_free(struct qm_block* qm, struct qm_frag* frag)
260 struct qm_frag *prev;
261 struct qm_frag *next;
263 prev=FRAG_END(frag)->prev_free;
264 next=frag->u.nxt_free;
265 prev->u.nxt_free=next;
266 FRAG_END(next)->prev_free=prev;
273 static inline struct qm_frag* qm_find_free(struct qm_block* qm,
278 static inline struct qm_frag* qm_find_free(struct qm_block* qm,
286 for (hash=GET_HASH(size); hash<QM_HASH_SIZE; hash++){
287 for (f=qm->free_hash[hash].head.u.nxt_free;
288 f!=&(qm->free_hash[hash].head); f=f->u.nxt_free){
290 *count+=1; /* *count++ generates a warning with gcc 2.9* -Wall */
292 if (f->size>=size){ *h=hash; return f; }
294 /*try in a bigger bucket*/
301 /* returns 0 on success, -1 on error;
302 * new_size < size & rounded-up already!*/
305 int split_frag(struct qm_block* qm, struct qm_frag* f, unsigned long new_size,
306 const char* file, const char* func, unsigned int line)
308 int split_frag(struct qm_block* qm, struct qm_frag* f, unsigned long new_size)
313 struct qm_frag_end* end;
315 rest=f->size-new_size;
316 #ifdef MEM_FRAG_AVOIDANCE
317 if ((rest> (FRAG_OVERHEAD+QM_MALLOC_OPTIMIZE))||
318 (rest>=(FRAG_OVERHEAD+new_size))){/* the residue fragm. is big enough*/
320 if (rest>(FRAG_OVERHEAD+MIN_FRAG_SIZE)){
323 /*split the fragment*/
326 n=(struct qm_frag*)((char*)end+sizeof(struct qm_frag_end));
327 n->size=rest-FRAG_OVERHEAD;
328 FRAG_END(n)->size=n->size;
329 FRAG_CLEAR_USED(n); /* never used */
330 qm->real_used+=FRAG_OVERHEAD;
332 end->check1=END_CHECK_PATTERN1;
333 end->check2=END_CHECK_PATTERN2;
334 /* frag created by malloc, mark it*/
338 n->check=ST_CHECK_PATTERN;
340 /* reinsert n in free list*/
341 qm_insert_free(qm, n);
344 /* we cannot split this fragment any more */
352 void* qm_malloc(struct qm_block* qm, unsigned long size,
353 const char* file, const char* func, unsigned int line)
355 void* qm_malloc(struct qm_block* qm, unsigned long size)
362 unsigned int list_cntr;
365 MDBG("qm_malloc(%p, %lu) called from %s: %s(%d)\n", qm, size, file, func,
368 /*size must be a multiple of 8*/
370 if (size>(qm->size-qm->real_used)) return 0;
372 /*search for a suitable free frag*/
374 if ((f=qm_find_free(qm, size, &hash, &list_cntr))!=0){
376 if ((f=qm_find_free(qm, size, &hash))!=0){
379 /*detach it from the free list*/
381 qm_debug_frag(qm, f);
383 qm_detach_free(qm, f);
384 /*mark it as "busy"*/
386 qm->free_hash[hash].no--;
387 /* we ignore split return */
389 split_frag(qm, f, size, file, "fragm. from qm_malloc", line);
391 split_frag(qm, f, size);
393 qm->real_used+=f->size;
395 if (qm->max_real_used<qm->real_used)
396 qm->max_real_used=qm->real_used;
401 f->check=ST_CHECK_PATTERN;
402 /* FRAG_END(f)->check1=END_CHECK_PATTERN1;
403 FRAG_END(f)->check2=END_CHECK_PATTERN2;*/
404 MDBG("qm_malloc(%p, %lu) returns address %p frag. %p (size=%lu) on %d"
406 qm, size, (char*)f+sizeof(struct qm_frag), f, f->size, list_cntr );
408 return (char*)f+sizeof(struct qm_frag);
416 void qm_free(struct qm_block* qm, void* p, const char* file, const char* func,
419 void qm_free(struct qm_block* qm, void* p)
423 struct qm_frag* prev;
424 struct qm_frag* next;
428 MDBG("qm_free(%p, %p), called from %s: %s(%d)\n", qm, p, file, func, line);
429 if (p>(void*)qm->last_frag_end || p<(void*)qm->first_frag){
430 LOG(L_CRIT, "BUG: qm_free: bad pointer %p (out of memory block!) - "
436 LOG(L_WARN, "WARNING:qm_free: free(0) called\n");
440 f=(struct qm_frag*) ((char*)p-sizeof(struct qm_frag));
442 qm_debug_frag(qm, f);
444 LOG(L_CRIT, "BUG: qm_free: freeing already freed pointer,"
445 " first free: %s: %s(%ld) - aborting\n",
446 f->file, f->func, f->line);
449 MDBG("qm_free: freeing frag. %p alloc'ed from %s: %s(%ld)\n",
450 f, f->file, f->func, f->line);
457 /* mark this fragment as used (might fall into the middle of joined frags)
458 to give us an extra change of detecting a double free call (if the joined
459 fragment has not yet been reused) */
460 f->u.nxt_free=(void*)0x1L; /* bogus value, just to mark it as free */
461 /* join packets if possible*/
463 if (((char*)next < (char*)qm->last_frag_end) &&( next->u.is_free)){
466 qm_debug_frag(qm, next);
468 qm_detach_free(qm, next);
469 size+=next->size+FRAG_OVERHEAD;
470 qm->real_used-=FRAG_OVERHEAD;
471 qm->free_hash[GET_HASH(next->size)].no--; /* FIXME slow */
474 if (f > qm->first_frag){
476 /* (struct qm_frag*)((char*)f - (struct qm_frag_end*)((char*)f-
477 sizeof(struct qm_frag_end))->size);*/
479 qm_debug_frag(qm, prev);
481 if (prev->u.is_free){
483 qm_detach_free(qm, prev);
484 size+=prev->size+FRAG_OVERHEAD;
485 qm->real_used-=FRAG_OVERHEAD;
486 qm->free_hash[GET_HASH(prev->size)].no--; /* FIXME slow */
491 FRAG_END(f)->size=f->size;
492 #endif /* QM_JOIN_FREE*/
498 qm_insert_free(qm, f);
504 void* qm_realloc(struct qm_block* qm, void* p, unsigned long size,
505 const char* file, const char* func, unsigned int line)
507 void* qm_realloc(struct qm_block* qm, void* p, unsigned long size)
512 unsigned long orig_size;
518 MDBG("qm_realloc(%p, %p, %lu) called from %s: %s(%d)\n", qm, p, size,
520 if ((p)&&(p>(void*)qm->last_frag_end || p<(void*)qm->first_frag)){
521 LOG(L_CRIT, "BUG: qm_free: bad pointer %p (out of memory block!) - "
530 qm_free(qm, p, file, func, line);
538 return qm_malloc(qm, size, file, func, line);
540 return qm_malloc(qm, size);
542 f=(struct qm_frag*) ((char*)p-sizeof(struct qm_frag));
544 qm_debug_frag(qm, f);
545 MDBG("qm_realloc: realloc'ing frag %p alloc'ed from %s: %s(%ld)\n",
546 f, f->file, f->func, f->line);
548 LOG(L_CRIT, "BUG:qm_realloc: trying to realloc an already freed "
549 "pointer %p , fragment %p -- aborting\n", p, f);
553 /* find first acceptable size */
559 MDBG("qm_realloc: shrinking from %lu to %lu\n", f->size, size);
560 if(split_frag(qm, f, size, file, "fragm. from qm_realloc", line)!=0){
561 MDBG("qm_realloc : shrinked successful\n");
563 if(split_frag(qm, f, size)!=0){
565 /* update used sizes: freed the splited frag */
566 /* split frag already adds FRAG_OVERHEAD for the newly created
567 free frag, so here we only need orig_size-f->size for real used
569 qm->real_used-=(orig_size-f->size);
570 qm->used-=(orig_size-f->size);
573 }else if (f->size < size){
576 MDBG("qm_realloc: growing from %lu to %lu\n", f->size, size);
581 if (((char*)n < (char*)qm->last_frag_end) &&
582 (n->u.is_free)&&((n->size+FRAG_OVERHEAD)>=diff)){
584 qm_detach_free(qm, n);
585 qm->free_hash[GET_HASH(n->size)].no--; /*FIXME: slow*/
586 f->size+=n->size+FRAG_OVERHEAD;
587 qm->real_used-=FRAG_OVERHEAD;
588 FRAG_END(f)->size=f->size;
589 /* end checks should be ok */
590 /* split it if necessary */
591 if (f->size > size ){
593 split_frag(qm, f, size, file, "fragm. from qm_realloc",
596 split_frag(qm, f, size);
599 qm->real_used+=(f->size-orig_size);
600 qm->used+=(f->size-orig_size);
602 /* could not join => realloc */
604 ptr=qm_malloc(qm, size, file, func, line);
606 ptr=qm_malloc(qm, size);
609 /* copy, need by libssl */
610 memcpy(ptr, p, orig_size);
612 qm_free(qm, p, file, func, line);
622 MDBG("qm_realloc: doing nothing, same size: %lu - %lu\n",
627 MDBG("qm_realloc: returning %p\n", p);
633 void qm_check(struct qm_block* qm)
639 memlog=cfg_get(core, core_cfg, memlog);
640 LOG(memlog, "DEBUG: qm_check()\n");
642 while ((char*)f < (char*)qm->last_frag_end) {
644 /* check struct qm_frag */
646 if (f->check!=ST_CHECK_PATTERN){
647 LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p) "
648 "beginning overwritten(%lx)!\n",
649 f, (char*)f + sizeof(struct qm_frag),
655 if (f + sizeof(struct qm_frag) + f->size + sizeof(struct qm_frag_end) > qm->first_frag + qm->size) {
656 LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p) "
657 "bad size: %lu (frag end: %p > end of block: %p)\n",
658 f, (char*)f + sizeof(struct qm_frag) + sizeof(struct qm_frag_end), f->size,
659 f + sizeof(struct qm_frag) + f->size, qm->first_frag + qm->size);
663 /* check struct qm_frag_end */
664 if (FRAG_END(f)->size != f->size) {
665 LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p) "
666 "size in qm_frag and qm_frag_end does not match: frag->size=%lu, frag_end->size=%lu)\n",
667 f, (char*)f + sizeof(struct qm_frag),
668 f->size, FRAG_END(f)->size);
673 if ((FRAG_END(f)->check1 != END_CHECK_PATTERN1) ||
674 (FRAG_END(f)->check2 != END_CHECK_PATTERN2)) {
675 LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p)"
676 " end overwritten(%lx, %lx)!\n",
677 f, (char*)f + sizeof(struct qm_frag),
678 FRAG_END(f)->check1, FRAG_END(f)->check2);
686 LOG(memlog, "DEBUG: qm_check: %lu fragments OK\n", fcount);
689 void qm_status(struct qm_block* qm)
698 memlog=cfg_get(core, core_cfg, memlog);
699 LOG_(DEFAULT_FACILITY, memlog, "qm_status: ", "(%p):\n", qm);
702 LOG_(DEFAULT_FACILITY, memlog, "qm_status: ", "heap size= %lu\n",
704 LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
705 "used= %lu, used+overhead=%lu, free=%lu\n",
706 qm->used, qm->real_used, qm->size-qm->real_used);
707 LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
708 "max used (+overhead)= %lu\n", qm->max_real_used);
710 LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
711 "dumping all alloc'ed. fragments:\n");
712 for (f=qm->first_frag, i=0;(char*)f<(char*)qm->last_frag_end;f=FRAG_NEXT(f)
715 LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
716 " %3d. %c address=%p frag=%p size=%lu used=%d\n",
718 (f->u.is_free)?'a':'N',
719 (char*)f+sizeof(struct qm_frag), f, f->size, FRAG_WAS_USED(f));
721 LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
722 " %s from %s: %s(%ld)\n",
723 (f->u.is_free)?"freed":"alloc'd", f->file, f->func, f->line);
724 LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
725 " start check=%lx, end check= %lx, %lx\n",
726 f->check, FRAG_END(f)->check1, FRAG_END(f)->check2);
730 LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
731 "dumping free list stats :\n");
732 for(h=0,i=0;h<QM_HASH_SIZE;h++){
734 for (f=qm->free_hash[h].head.u.nxt_free,j=0;
735 f!=&(qm->free_hash[h].head); f=f->u.nxt_free, i++, j++){
736 if (!FRAG_WAS_USED(f)){
739 LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
740 "unused fragm.: hash = %3d, fragment %p,"
741 " address %p size %lu, created from %s: %s(%lu)\n",
742 h, f, (char*)f+sizeof(struct qm_frag), f->size,
743 f->file, f->func, f->line);
748 if (j) LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
749 "hash= %3d. fragments no.: %5d, unused: %5d\n"
750 "\t\t bucket size: %9lu - %9ld (first %9lu)\n",
751 h, j, unused, UN_HASH(h),
752 ((h<=QM_MALLOC_OPTIMIZE/ROUNDTO)?1:2)*UN_HASH(h),
753 qm->free_hash[h].head.u.nxt_free->size
755 if (j!=qm->free_hash[h].no){
756 LOG(L_CRIT, "BUG: qm_status: different free frag. count: %d!=%lu"
757 " for hash %3d\n", j, qm->free_hash[h].no, h);
761 LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
762 "-----------------------------\n");
766 /* fills a malloc info structure with info about the block
767 * if a parameter is not supported, it will be filled with 0 */
768 void qm_info(struct qm_block* qm, struct mem_info* info)
774 memset(info,0, sizeof(*info));
775 info->total_size=qm->size;
776 info->min_frag=MIN_FRAG_SIZE;
777 info->free=qm->size-qm->real_used;
779 info->real_used=qm->real_used;
780 info->max_used=qm->max_real_used;
781 for(r=0;r<QM_HASH_SIZE; r++){
782 total_frags+=qm->free_hash[r].no;
784 info->total_frags=total_frags;
788 /* returns how much free memory is available
789 * it never returns an error (unlike fm_available) */
790 unsigned long qm_available(struct qm_block* qm)
792 return qm->size-qm->real_used;
799 typedef struct _mem_counter{
807 struct _mem_counter *next;
810 static mem_counter* get_mem_counter(mem_counter **root, struct qm_frag* f)
813 if (!*root) goto make_new;
814 for(x=*root;x;x=x->next)
815 if (x->file == f->file && x->func == f->func && x->line == f->line)
818 x = malloc(sizeof(mem_counter));
831 void qm_sums(struct qm_block* qm)
835 mem_counter *root, *x;
841 memlog=cfg_get(core, core_cfg, memlog);
842 LOG_(DEFAULT_FACILITY, memlog, "qm_sums: ",
843 "summarizing all alloc'ed. fragments:\n");
845 for (f=qm->first_frag, i=0;(char*)f<(char*)qm->last_frag_end;
848 x = get_mem_counter(&root,f);
855 LOG_(DEFAULT_FACILITY, memlog, "qm_sums: ",
856 " count=%6d size=%10lu bytes from %s: %s(%ld)\n",
858 x->file, x->func, x->line
864 LOG_(DEFAULT_FACILITY, memlog, "qm_sums: ",
865 "-----------------------------\n");
867 #endif /* DBG_QM_MALLOC */