4 * Copyright (C) 2001-2003 FhG Fokus
6 * This file is part of sip-router, a free SIP server.
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 * ????-??-?? created by andrei
24 * 2003-04-14 more debugging added in DBG_QM_MALLOC mode (andrei)
25 * 2003-06-29 added qm_realloc (andrei)
26 * 2004-07-19 fragments book keeping code and support for 64 bits
27 * memory blocks (64 bits machine & size>=2^32) (andrei)
28 * GET_HASH s/</<=/ (avoids waste of 1 hash cell) (andrei)
29 * 2004-11-10 support for > 4Gb mem., switched to long (andrei)
30 * 2005-03-02 added qm_info() (andrei)
31 * 2005-12-12 fixed realloc shrink real_used & used accounting;
32 * fixed initial size (andrei)
33 * 2006-02-03 fixed realloc out of mem. free bug (andrei)
34 * 2006-04-07 s/DBG/MDBG (andrei)
35 * 2007-02-23 added fm_available() (andrei)
36 * 2009-09-28 added fm_sums() (patch from Dragos Vingarzan)
40 #if !defined(q_malloc) && !(defined F_MALLOC)
47 #include "../dprint.h"
48 #include "../globals.h"
50 #include "../cfg/cfg.h" /* memlog */
52 #include "../events.h"
58 ((struct qm_frag_end*)((char*)(f)+sizeof(struct qm_frag)+ \
61 #define FRAG_NEXT(f) \
62 ((struct qm_frag*)((char*)(f)+sizeof(struct qm_frag)+(f)->size+ \
63 sizeof(struct qm_frag_end)))
65 #define FRAG_PREV(f) \
66 ( (struct qm_frag*) ( ((char*)(f)-sizeof(struct qm_frag_end))- \
67 ((struct qm_frag_end*)((char*)(f)-sizeof(struct qm_frag_end)))->size- \
68 sizeof(struct qm_frag) ) )
70 #define PREV_FRAG_END(f) \
71 ((struct qm_frag_end*)((char*)(f)-sizeof(struct qm_frag_end)))
74 #define FRAG_OVERHEAD (sizeof(struct qm_frag)+sizeof(struct qm_frag_end))
77 #define ROUNDTO_MASK (~((unsigned long)ROUNDTO-1))
78 #define ROUNDUP(s) (((s)+(ROUNDTO-1))&ROUNDTO_MASK)
79 #define ROUNDDOWN(s) ((s)&ROUNDTO_MASK)
82 #define ROUNDUP(s) (((s)%ROUNDTO)?((s)+ROUNDTO)/ROUNDTO*ROUNDTO:(s))
83 #define ROUNDDOWN(s) (((s)%ROUNDTO)?((s)-ROUNDTO)/ROUNDTO*ROUNDTO:(s))
88 /* finds the hash value for s, s=ROUNDTO multiple*/
89 #define GET_HASH(s) ( ((unsigned long)(s)<=QM_MALLOC_OPTIMIZE)?\
90 (unsigned long)(s)/ROUNDTO: \
91 QM_MALLOC_OPTIMIZE/ROUNDTO+big_hash_idx((s))- \
92 QM_MALLOC_OPTIMIZE_FACTOR+1 )
94 #define UN_HASH(h) ( ((unsigned long)(h)<=(QM_MALLOC_OPTIMIZE/ROUNDTO))?\
95 (unsigned long)(h)*ROUNDTO: \
96 1UL<<((h)-QM_MALLOC_OPTIMIZE/ROUNDTO+\
97 QM_MALLOC_OPTIMIZE_FACTOR-1)\
101 /* mark/test used/unused frags */
102 #define FRAG_MARK_USED(f)
103 #define FRAG_CLEAR_USED(f)
104 #define FRAG_WAS_USED(f) (1)
106 /* other frag related defines:
111 #define MEM_FRAG_AVOIDANCE
114 /* computes hash number for big buckets*/
115 inline static unsigned long big_hash_idx(unsigned long s)
118 /* s is rounded => s = k*2^n (ROUNDTO=2^n)
119 * index= i such that 2^i > s >= 2^(i-1)
121 * => index = number of the first non null bit in s*/
122 idx=sizeof(long)*8-1;
123 for (; !(s&(1UL<<(sizeof(long)*8-1))) ; s<<=1, idx--);
129 #define ST_CHECK_PATTERN 0xf0f0f0f0
130 #define END_CHECK_PATTERN1 0xc0c0c0c0
131 #define END_CHECK_PATTERN2 0xabcdefed
134 static void qm_debug_frag(struct qm_block* qm, struct qm_frag* f)
136 if (f->check!=ST_CHECK_PATTERN){
137 LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p) "
138 "beginning overwritten(%lx)!\n",
139 f, (char*)f+sizeof(struct qm_frag),
144 if ((FRAG_END(f)->check1!=END_CHECK_PATTERN1)||
145 (FRAG_END(f)->check2!=END_CHECK_PATTERN2)){
146 LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p)"
147 " end overwritten(%lx, %lx)!\n",
148 f, (char*)f+sizeof(struct qm_frag),
149 FRAG_END(f)->check1, FRAG_END(f)->check2);
153 if ((f>qm->first_frag)&&
154 ((PREV_FRAG_END(f)->check1!=END_CHECK_PATTERN1) ||
155 (PREV_FRAG_END(f)->check2!=END_CHECK_PATTERN2) ) ){
156 LOG(L_CRIT, "BUG: qm_*: prev. fragm. tail overwritten(%lx, %lx)[%p:%p]!"
158 PREV_FRAG_END(f)->check1, PREV_FRAG_END(f)->check2, f,
159 (char*)f+sizeof(struct qm_frag));
168 static inline void qm_insert_free(struct qm_block* qm, struct qm_frag* frag)
171 struct qm_frag* prev;
174 hash=GET_HASH(frag->size);
175 for(f=qm->free_hash[hash].head.u.nxt_free; f!=&(qm->free_hash[hash].head);
177 if (frag->size <= f->size) break;
180 prev=FRAG_END(f)->prev_free;
181 prev->u.nxt_free=frag;
182 FRAG_END(frag)->prev_free=prev;
184 FRAG_END(f)->prev_free=frag;
185 qm->free_hash[hash].no++;
190 /* init malloc and return a qm_block*/
191 struct qm_block* qm_malloc_init(char* address, unsigned long size)
196 unsigned long init_overhead;
199 /* make address and size multiple of 8*/
200 start=(char*)ROUNDUP((unsigned long) address);
201 DBG("qm_malloc_init: QM_OPTIMIZE=%lu, /ROUNDTO=%lu\n",
202 QM_MALLOC_OPTIMIZE, QM_MALLOC_OPTIMIZE/ROUNDTO);
203 DBG("qm_malloc_init: QM_HASH_SIZE=%lu, qm_block size=%lu\n",
204 QM_HASH_SIZE, (long)sizeof(struct qm_block));
205 DBG("qm_malloc_init(%p, %lu), start=%p\n", address, size, start);
206 if (size<start-address) return 0;
207 size-=(start-address);
208 if (size <(MIN_FRAG_SIZE+FRAG_OVERHEAD)) return 0;
209 size=ROUNDDOWN(size);
211 init_overhead=ROUNDUP(sizeof(struct qm_block))+sizeof(struct qm_frag)+
212 sizeof(struct qm_frag_end);
213 DBG("qm_malloc_init: size= %lu, init_overhead=%lu\n", size, init_overhead);
215 if (size < init_overhead)
217 /* not enough mem to create our control structures !!!*/
221 qm=(struct qm_block*)start;
222 memset(qm, 0, sizeof(struct qm_block));
224 qm->real_used=init_overhead;
225 qm->max_real_used=qm->real_used;
228 qm->first_frag=(struct qm_frag*)(start+ROUNDUP(sizeof(struct qm_block)));
229 qm->last_frag_end=(struct qm_frag_end*)(end-sizeof(struct qm_frag_end));
230 /* init initial fragment*/
231 qm->first_frag->size=size;
232 qm->last_frag_end->size=size;
235 qm->first_frag->check=ST_CHECK_PATTERN;
236 qm->last_frag_end->check1=END_CHECK_PATTERN1;
237 qm->last_frag_end->check2=END_CHECK_PATTERN2;
239 /* init free_hash* */
240 for (h=0; h<QM_HASH_SIZE;h++){
241 qm->free_hash[h].head.u.nxt_free=&(qm->free_hash[h].head);
242 qm->free_hash[h].tail.prev_free=&(qm->free_hash[h].head);
243 qm->free_hash[h].head.size=0;
244 qm->free_hash[h].tail.size=0;
247 /* link initial fragment into the free list*/
249 qm_insert_free(qm, qm->first_frag);
251 /*qm->first_frag->u.nxt_free=&(qm->free_lst);
252 qm->last_frag_end->prev_free=&(qm->free_lst);
261 static inline void qm_detach_free(struct qm_block* qm, struct qm_frag* frag)
263 struct qm_frag *prev;
264 struct qm_frag *next;
266 prev=FRAG_END(frag)->prev_free;
267 next=frag->u.nxt_free;
268 prev->u.nxt_free=next;
269 FRAG_END(next)->prev_free=prev;
276 static inline struct qm_frag* qm_find_free(struct qm_block* qm,
281 static inline struct qm_frag* qm_find_free(struct qm_block* qm,
289 for (hash=GET_HASH(size); hash<QM_HASH_SIZE; hash++){
290 for (f=qm->free_hash[hash].head.u.nxt_free;
291 f!=&(qm->free_hash[hash].head); f=f->u.nxt_free){
293 *count+=1; /* *count++ generates a warning with gcc 2.9* -Wall */
295 if (f->size>=size){ *h=hash; return f; }
297 /*try in a bigger bucket*/
304 /* returns 0 on success, -1 on error;
305 * new_size < size & rounded-up already!*/
308 int split_frag(struct qm_block* qm, struct qm_frag* f, unsigned long new_size,
309 const char* file, const char* func, unsigned int line)
311 int split_frag(struct qm_block* qm, struct qm_frag* f, unsigned long new_size)
316 struct qm_frag_end* end;
318 rest=f->size-new_size;
319 #ifdef MEM_FRAG_AVOIDANCE
320 if ((rest> (FRAG_OVERHEAD+QM_MALLOC_OPTIMIZE))||
321 (rest>=(FRAG_OVERHEAD+new_size))){/* the residue fragm. is big enough*/
323 if (rest>(FRAG_OVERHEAD+MIN_FRAG_SIZE)){
326 /*split the fragment*/
329 n=(struct qm_frag*)((char*)end+sizeof(struct qm_frag_end));
330 n->size=rest-FRAG_OVERHEAD;
331 FRAG_END(n)->size=n->size;
332 FRAG_CLEAR_USED(n); /* never used */
333 qm->real_used+=FRAG_OVERHEAD;
335 end->check1=END_CHECK_PATTERN1;
336 end->check2=END_CHECK_PATTERN2;
337 /* frag created by malloc, mark it*/
341 n->check=ST_CHECK_PATTERN;
343 /* reinsert n in free list*/
344 qm_insert_free(qm, n);
347 /* we cannot split this fragment any more */
355 void* qm_malloc(struct qm_block* qm, unsigned long size,
356 const char* file, const char* func, unsigned int line)
358 void* qm_malloc(struct qm_block* qm, unsigned long size)
365 unsigned int list_cntr;
368 MDBG("qm_malloc(%p, %lu) called from %s: %s(%d)\n", qm, size, file, func,
371 /*size must be a multiple of 8*/
373 if (size>(qm->size-qm->real_used)) return 0;
375 /*search for a suitable free frag*/
377 if ((f=qm_find_free(qm, size, &hash, &list_cntr))!=0){
379 if ((f=qm_find_free(qm, size, &hash))!=0){
382 /*detach it from the free list*/
384 qm_debug_frag(qm, f);
386 qm_detach_free(qm, f);
387 /*mark it as "busy"*/
389 qm->free_hash[hash].no--;
390 /* we ignore split return */
392 split_frag(qm, f, size, file, "fragm. from qm_malloc", line);
394 split_frag(qm, f, size);
396 qm->real_used+=f->size;
398 if (qm->max_real_used<qm->real_used)
399 qm->max_real_used=qm->real_used;
401 sr_event_exec(SREV_PKG_SET_USED, (void*)qm->used);
402 sr_event_exec(SREV_PKG_SET_REAL_USED, (void*)qm->real_used);
408 f->check=ST_CHECK_PATTERN;
409 /* FRAG_END(f)->check1=END_CHECK_PATTERN1;
410 FRAG_END(f)->check2=END_CHECK_PATTERN2;*/
411 MDBG("qm_malloc(%p, %lu) returns address %p frag. %p (size=%lu) on %d"
413 qm, size, (char*)f+sizeof(struct qm_frag), f, f->size, list_cntr );
415 return (char*)f+sizeof(struct qm_frag);
423 void qm_free(struct qm_block* qm, void* p, const char* file, const char* func,
426 void qm_free(struct qm_block* qm, void* p)
432 struct qm_frag* next;
433 struct qm_frag* prev;
434 #endif /* QM_JOIN_FREE*/
437 MDBG("qm_free(%p, %p), called from %s: %s(%d)\n", qm, p, file, func, line);
438 if (p>(void*)qm->last_frag_end || p<(void*)qm->first_frag){
439 LOG(L_CRIT, "BUG: qm_free: bad pointer %p (out of memory block!) - "
445 LOG(L_WARN, "WARNING:qm_free: free(0) called\n");
450 #endif /* QM_JOIN_FREE*/
451 f=(struct qm_frag*) ((char*)p-sizeof(struct qm_frag));
453 qm_debug_frag(qm, f);
455 LOG(L_CRIT, "BUG: qm_free: freeing already freed pointer,"
456 " first free: %s: %s(%ld) - aborting\n",
457 f->file, f->func, f->line);
460 MDBG("qm_free: freeing frag. %p alloc'ed from %s: %s(%ld)\n",
461 f, f->file, f->func, f->line);
467 sr_event_exec(SREV_PKG_SET_USED, (void*)qm->used);
468 sr_event_exec(SREV_PKG_SET_REAL_USED, (void*)qm->real_used);
472 /* mark this fragment as used (might fall into the middle of joined frags)
473 to give us an extra change of detecting a double free call (if the joined
474 fragment has not yet been reused) */
475 f->u.nxt_free=(void*)0x1L; /* bogus value, just to mark it as free */
476 /* join packets if possible*/
478 if (((char*)next < (char*)qm->last_frag_end) &&( next->u.is_free)){
481 qm_debug_frag(qm, next);
483 qm_detach_free(qm, next);
484 size+=next->size+FRAG_OVERHEAD;
485 qm->real_used-=FRAG_OVERHEAD;
486 qm->free_hash[GET_HASH(next->size)].no--; /* FIXME slow */
489 if (f > qm->first_frag){
491 /* (struct qm_frag*)((char*)f - (struct qm_frag_end*)((char*)f-
492 sizeof(struct qm_frag_end))->size);*/
494 qm_debug_frag(qm, prev);
496 if (prev->u.is_free){
498 qm_detach_free(qm, prev);
499 size+=prev->size+FRAG_OVERHEAD;
500 qm->real_used-=FRAG_OVERHEAD;
501 qm->free_hash[GET_HASH(prev->size)].no--; /* FIXME slow */
506 FRAG_END(f)->size=f->size;
507 #endif /* QM_JOIN_FREE*/
513 qm_insert_free(qm, f);
519 void* qm_realloc(struct qm_block* qm, void* p, unsigned long size,
520 const char* file, const char* func, unsigned int line)
522 void* qm_realloc(struct qm_block* qm, void* p, unsigned long size)
527 unsigned long orig_size;
533 MDBG("qm_realloc(%p, %p, %lu) called from %s: %s(%d)\n", qm, p, size,
535 if ((p)&&(p>(void*)qm->last_frag_end || p<(void*)qm->first_frag)){
536 LOG(L_CRIT, "BUG: qm_free: bad pointer %p (out of memory block!) - "
545 qm_free(qm, p, file, func, line);
553 return qm_malloc(qm, size, file, func, line);
555 return qm_malloc(qm, size);
557 f=(struct qm_frag*) ((char*)p-sizeof(struct qm_frag));
559 qm_debug_frag(qm, f);
560 MDBG("qm_realloc: realloc'ing frag %p alloc'ed from %s: %s(%ld)\n",
561 f, f->file, f->func, f->line);
563 LOG(L_CRIT, "BUG:qm_realloc: trying to realloc an already freed "
564 "pointer %p , fragment %p -- aborting\n", p, f);
568 /* find first acceptable size */
574 MDBG("qm_realloc: shrinking from %lu to %lu\n", f->size, size);
575 if(split_frag(qm, f, size, file, "fragm. from qm_realloc", line)!=0){
576 MDBG("qm_realloc : shrinked successful\n");
578 if(split_frag(qm, f, size)!=0){
580 /* update used sizes: freed the splited frag */
581 /* split frag already adds FRAG_OVERHEAD for the newly created
582 free frag, so here we only need orig_size-f->size for real used
584 qm->real_used-=(orig_size-f->size);
585 qm->used-=(orig_size-f->size);
587 sr_event_exec(SREV_PKG_SET_USED, (void*)qm->used);
588 sr_event_exec(SREV_PKG_SET_REAL_USED, (void*)qm->real_used);
592 }else if (f->size < size){
595 MDBG("qm_realloc: growing from %lu to %lu\n", f->size, size);
600 if (((char*)n < (char*)qm->last_frag_end) &&
601 (n->u.is_free)&&((n->size+FRAG_OVERHEAD)>=diff)){
603 qm_detach_free(qm, n);
604 qm->free_hash[GET_HASH(n->size)].no--; /*FIXME: slow*/
605 f->size+=n->size+FRAG_OVERHEAD;
606 qm->real_used-=FRAG_OVERHEAD;
607 FRAG_END(f)->size=f->size;
608 /* end checks should be ok */
609 /* split it if necessary */
610 if (f->size > size ){
612 split_frag(qm, f, size, file, "fragm. from qm_realloc",
615 split_frag(qm, f, size);
618 qm->real_used+=(f->size-orig_size);
619 qm->used+=(f->size-orig_size);
621 sr_event_exec(SREV_PKG_SET_USED, (void*)qm->used);
622 sr_event_exec(SREV_PKG_SET_REAL_USED, (void*)qm->real_used);
625 /* could not join => realloc */
627 ptr=qm_malloc(qm, size, file, func, line);
629 ptr=qm_malloc(qm, size);
632 /* copy, need by libssl */
633 memcpy(ptr, p, orig_size);
635 qm_free(qm, p, file, func, line);
645 MDBG("qm_realloc: doing nothing, same size: %lu - %lu\n",
650 MDBG("qm_realloc: returning %p\n", p);
656 void qm_check(struct qm_block* qm)
662 memlog=cfg_get(core, core_cfg, memlog);
663 LOG(memlog, "DEBUG: qm_check()\n");
665 while ((char*)f < (char*)qm->last_frag_end) {
667 /* check struct qm_frag */
669 if (f->check!=ST_CHECK_PATTERN){
670 LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p) "
671 "beginning overwritten(%lx)!\n",
672 f, (char*)f + sizeof(struct qm_frag),
678 if (f + sizeof(struct qm_frag) + f->size + sizeof(struct qm_frag_end) > qm->first_frag + qm->size) {
679 LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p) "
680 "bad size: %lu (frag end: %p > end of block: %p)\n",
681 f, (char*)f + sizeof(struct qm_frag) + sizeof(struct qm_frag_end), f->size,
682 f + sizeof(struct qm_frag) + f->size, qm->first_frag + qm->size);
686 /* check struct qm_frag_end */
687 if (FRAG_END(f)->size != f->size) {
688 LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p) "
689 "size in qm_frag and qm_frag_end does not match: frag->size=%lu, frag_end->size=%lu)\n",
690 f, (char*)f + sizeof(struct qm_frag),
691 f->size, FRAG_END(f)->size);
696 if ((FRAG_END(f)->check1 != END_CHECK_PATTERN1) ||
697 (FRAG_END(f)->check2 != END_CHECK_PATTERN2)) {
698 LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p)"
699 " end overwritten(%lx, %lx)!\n",
700 f, (char*)f + sizeof(struct qm_frag),
701 FRAG_END(f)->check1, FRAG_END(f)->check2);
709 LOG(memlog, "DEBUG: qm_check: %lu fragments OK\n", fcount);
712 void qm_status(struct qm_block* qm)
721 memlog=cfg_get(core, core_cfg, memlog);
722 LOG_(DEFAULT_FACILITY, memlog, "qm_status: ", "(%p):\n", qm);
725 LOG_(DEFAULT_FACILITY, memlog, "qm_status: ", "heap size= %lu\n",
727 LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
728 "used= %lu, used+overhead=%lu, free=%lu\n",
729 qm->used, qm->real_used, qm->size-qm->real_used);
730 LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
731 "max used (+overhead)= %lu\n", qm->max_real_used);
733 LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
734 "dumping all alloc'ed. fragments:\n");
735 for (f=qm->first_frag, i=0;(char*)f<(char*)qm->last_frag_end;f=FRAG_NEXT(f)
738 LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
739 " %3d. %c address=%p frag=%p size=%lu used=%d\n",
741 (f->u.is_free)?'a':'N',
742 (char*)f+sizeof(struct qm_frag), f, f->size, FRAG_WAS_USED(f));
744 LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
745 " %s from %s: %s(%ld)\n",
746 (f->u.is_free)?"freed":"alloc'd", f->file, f->func, f->line);
747 LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
748 " start check=%lx, end check= %lx, %lx\n",
749 f->check, FRAG_END(f)->check1, FRAG_END(f)->check2);
753 LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
754 "dumping free list stats :\n");
755 for(h=0,i=0;h<QM_HASH_SIZE;h++){
757 for (f=qm->free_hash[h].head.u.nxt_free,j=0;
758 f!=&(qm->free_hash[h].head); f=f->u.nxt_free, i++, j++){
759 if (!FRAG_WAS_USED(f)){
762 LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
763 "unused fragm.: hash = %3d, fragment %p,"
764 " address %p size %lu, created from %s: %s(%lu)\n",
765 h, f, (char*)f+sizeof(struct qm_frag), f->size,
766 f->file, f->func, f->line);
771 if (j) LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
772 "hash= %3d. fragments no.: %5d, unused: %5d\n"
773 "\t\t bucket size: %9lu - %9ld (first %9lu)\n",
774 h, j, unused, UN_HASH(h),
775 ((h<=QM_MALLOC_OPTIMIZE/ROUNDTO)?1:2)*UN_HASH(h),
776 qm->free_hash[h].head.u.nxt_free->size
778 if (j!=qm->free_hash[h].no){
779 LOG(L_CRIT, "BUG: qm_status: different free frag. count: %d!=%lu"
780 " for hash %3d\n", j, qm->free_hash[h].no, h);
784 LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
785 "-----------------------------\n");
789 /* fills a malloc info structure with info about the block
790 * if a parameter is not supported, it will be filled with 0 */
791 void qm_info(struct qm_block* qm, struct mem_info* info)
797 memset(info,0, sizeof(*info));
798 info->total_size=qm->size;
799 info->min_frag=MIN_FRAG_SIZE;
800 info->free=qm->size-qm->real_used;
802 info->real_used=qm->real_used;
803 info->max_used=qm->max_real_used;
804 for(r=0;r<QM_HASH_SIZE; r++){
805 total_frags+=qm->free_hash[r].no;
807 info->total_frags=total_frags;
811 /* returns how much free memory is available
812 * it never returns an error (unlike fm_available) */
813 unsigned long qm_available(struct qm_block* qm)
815 return qm->size-qm->real_used;
822 typedef struct _mem_counter{
830 struct _mem_counter *next;
833 static mem_counter* get_mem_counter(mem_counter **root, struct qm_frag* f)
836 if (!*root) goto make_new;
837 for(x=*root;x;x=x->next)
838 if (x->file == f->file && x->func == f->func && x->line == f->line)
841 x = malloc(sizeof(mem_counter));
854 void qm_sums(struct qm_block* qm)
858 mem_counter *root, *x;
864 memlog=cfg_get(core, core_cfg, memlog);
865 LOG_(DEFAULT_FACILITY, memlog, "qm_sums: ",
866 "summarizing all alloc'ed. fragments:\n");
868 for (f=qm->first_frag, i=0;(char*)f<(char*)qm->last_frag_end;
871 x = get_mem_counter(&root,f);
878 LOG_(DEFAULT_FACILITY, memlog, "qm_sums: ",
879 " count=%6d size=%10lu bytes from %s: %s(%ld)\n",
881 x->file, x->func, x->line
887 LOG_(DEFAULT_FACILITY, memlog, "qm_sums: ",
888 "-----------------------------\n");
890 #endif /* DBG_QM_MALLOC */