4 * Copyright (C) 2001-2003 FhG Fokus
6 * This file is part of sip-router, a free SIP server.
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 * ????-??-?? created by andrei
24 * 2003-04-14 more debugging added in DBG_QM_MALLOC mode (andrei)
25 * 2003-06-29 added qm_realloc (andrei)
26 * 2004-07-19 fragments book keeping code and support for 64 bits
27 * memory blocks (64 bits machine & size>=2^32) (andrei)
28 * GET_HASH s/</<=/ (avoids waste of 1 hash cell) (andrei)
29 * 2004-11-10 support for > 4Gb mem., switched to long (andrei)
30 * 2005-03-02 added qm_info() (andrei)
31 * 2005-12-12 fixed realloc shrink real_used & used accounting;
32 * fixed initial size (andrei)
33 * 2006-02-03 fixed realloc out of mem. free bug (andrei)
34 * 2006-04-07 s/DBG/MDBG (andrei)
35 * 2007-02-23 added fm_available() (andrei)
36 * 2009-09-28 added fm_sums() (patch from Dragos Vingarzan)
40 #if !defined(q_malloc) && !(defined F_MALLOC)
47 #include "../dprint.h"
48 #include "../globals.h"
50 #include "../cfg/cfg.h" /* memlog */
52 #include "../events.h"
58 ((struct qm_frag_end*)((char*)(f)+sizeof(struct qm_frag)+ \
61 #define FRAG_NEXT(f) \
62 ((struct qm_frag*)((char*)(f)+sizeof(struct qm_frag)+(f)->size+ \
63 sizeof(struct qm_frag_end)))
65 #define FRAG_PREV(f) \
66 ( (struct qm_frag*) ( ((char*)(f)-sizeof(struct qm_frag_end))- \
67 ((struct qm_frag_end*)((char*)(f)-sizeof(struct qm_frag_end)))->size- \
68 sizeof(struct qm_frag) ) )
70 #define PREV_FRAG_END(f) \
71 ((struct qm_frag_end*)((char*)(f)-sizeof(struct qm_frag_end)))
74 #define FRAG_OVERHEAD (sizeof(struct qm_frag)+sizeof(struct qm_frag_end))
77 #define ROUNDTO_MASK (~((unsigned long)ROUNDTO-1))
78 #define ROUNDUP(s) (((s)+(ROUNDTO-1))&ROUNDTO_MASK)
79 #define ROUNDDOWN(s) ((s)&ROUNDTO_MASK)
82 #define ROUNDUP(s) (((s)%ROUNDTO)?((s)+ROUNDTO)/ROUNDTO*ROUNDTO:(s))
83 #define ROUNDDOWN(s) (((s)%ROUNDTO)?((s)-ROUNDTO)/ROUNDTO*ROUNDTO:(s))
88 /* finds the hash value for s, s=ROUNDTO multiple*/
89 #define GET_HASH(s) ( ((unsigned long)(s)<=QM_MALLOC_OPTIMIZE)?\
90 (unsigned long)(s)/ROUNDTO: \
91 QM_MALLOC_OPTIMIZE/ROUNDTO+big_hash_idx((s))- \
92 QM_MALLOC_OPTIMIZE_FACTOR+1 )
94 #define UN_HASH(h) ( ((unsigned long)(h)<=(QM_MALLOC_OPTIMIZE/ROUNDTO))?\
95 (unsigned long)(h)*ROUNDTO: \
96 1UL<<((h)-QM_MALLOC_OPTIMIZE/ROUNDTO+\
97 QM_MALLOC_OPTIMIZE_FACTOR-1)\
101 /* mark/test used/unused frags */
102 #define FRAG_MARK_USED(f)
103 #define FRAG_CLEAR_USED(f)
104 #define FRAG_WAS_USED(f) (1)
106 /* other frag related defines:
111 #define MEM_FRAG_AVOIDANCE
114 /* computes hash number for big buckets*/
115 inline static unsigned long big_hash_idx(unsigned long s)
118 /* s is rounded => s = k*2^n (ROUNDTO=2^n)
119 * index= i such that 2^i > s >= 2^(i-1)
121 * => index = number of the first non null bit in s*/
122 idx=sizeof(long)*8-1;
123 for (; !(s&(1UL<<(sizeof(long)*8-1))) ; s<<=1, idx--);
129 #define ST_CHECK_PATTERN 0xf0f0f0f0
130 #define END_CHECK_PATTERN1 0xc0c0c0c0
131 #define END_CHECK_PATTERN2 0xabcdefed
134 static void qm_debug_frag(struct qm_block* qm, struct qm_frag* f)
136 if (f->check!=ST_CHECK_PATTERN){
137 LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p) "
138 "beginning overwritten(%lx)!\n",
139 f, (char*)f+sizeof(struct qm_frag),
144 if ((FRAG_END(f)->check1!=END_CHECK_PATTERN1)||
145 (FRAG_END(f)->check2!=END_CHECK_PATTERN2)){
146 LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p)"
147 " end overwritten(%lx, %lx)!\n",
148 f, (char*)f+sizeof(struct qm_frag),
149 FRAG_END(f)->check1, FRAG_END(f)->check2);
153 if ((f>qm->first_frag)&&
154 ((PREV_FRAG_END(f)->check1!=END_CHECK_PATTERN1) ||
155 (PREV_FRAG_END(f)->check2!=END_CHECK_PATTERN2) ) ){
156 LOG(L_CRIT, "BUG: qm_*: prev. fragm. tail overwritten(%lx, %lx)[%p:%p]!"
158 PREV_FRAG_END(f)->check1, PREV_FRAG_END(f)->check2, f,
159 (char*)f+sizeof(struct qm_frag));
168 static inline void qm_insert_free(struct qm_block* qm, struct qm_frag* frag)
171 struct qm_frag* prev;
174 hash=GET_HASH(frag->size);
175 for(f=qm->free_hash[hash].head.u.nxt_free; f!=&(qm->free_hash[hash].head);
177 if (frag->size <= f->size) break;
180 prev=FRAG_END(f)->prev_free;
181 prev->u.nxt_free=frag;
182 FRAG_END(frag)->prev_free=prev;
184 FRAG_END(f)->prev_free=frag;
185 qm->free_hash[hash].no++;
190 /* init malloc and return a qm_block*/
191 struct qm_block* qm_malloc_init(char* address, unsigned long size)
196 unsigned long init_overhead;
199 /* make address and size multiple of 8*/
200 start=(char*)ROUNDUP((unsigned long) address);
201 DBG("qm_malloc_init: QM_OPTIMIZE=%lu, /ROUNDTO=%lu\n",
202 QM_MALLOC_OPTIMIZE, QM_MALLOC_OPTIMIZE/ROUNDTO);
203 DBG("qm_malloc_init: QM_HASH_SIZE=%lu, qm_block size=%lu\n",
204 QM_HASH_SIZE, (long)sizeof(struct qm_block));
205 DBG("qm_malloc_init(%p, %lu), start=%p\n", address, size, start);
206 if (size<start-address) return 0;
207 size-=(start-address);
208 if (size <(MIN_FRAG_SIZE+FRAG_OVERHEAD)) return 0;
209 size=ROUNDDOWN(size);
211 init_overhead=ROUNDUP(sizeof(struct qm_block))+sizeof(struct qm_frag)+
212 sizeof(struct qm_frag_end);
213 DBG("qm_malloc_init: size= %lu, init_overhead=%lu\n", size, init_overhead);
215 if (size < init_overhead)
217 /* not enough mem to create our control structures !!!*/
221 qm=(struct qm_block*)start;
222 memset(qm, 0, sizeof(struct qm_block));
224 qm->real_used=init_overhead;
225 qm->max_real_used=qm->real_used;
228 qm->first_frag=(struct qm_frag*)(start+ROUNDUP(sizeof(struct qm_block)));
229 qm->last_frag_end=(struct qm_frag_end*)(end-sizeof(struct qm_frag_end));
230 /* init initial fragment*/
231 qm->first_frag->size=size;
232 qm->last_frag_end->size=size;
235 qm->first_frag->check=ST_CHECK_PATTERN;
236 qm->last_frag_end->check1=END_CHECK_PATTERN1;
237 qm->last_frag_end->check2=END_CHECK_PATTERN2;
239 /* init free_hash* */
240 for (h=0; h<QM_HASH_SIZE;h++){
241 qm->free_hash[h].head.u.nxt_free=&(qm->free_hash[h].head);
242 qm->free_hash[h].tail.prev_free=&(qm->free_hash[h].head);
243 qm->free_hash[h].head.size=0;
244 qm->free_hash[h].tail.size=0;
247 /* link initial fragment into the free list*/
249 qm_insert_free(qm, qm->first_frag);
251 /*qm->first_frag->u.nxt_free=&(qm->free_lst);
252 qm->last_frag_end->prev_free=&(qm->free_lst);
261 static inline void qm_detach_free(struct qm_block* qm, struct qm_frag* frag)
263 struct qm_frag *prev;
264 struct qm_frag *next;
266 prev=FRAG_END(frag)->prev_free;
267 next=frag->u.nxt_free;
268 prev->u.nxt_free=next;
269 FRAG_END(next)->prev_free=prev;
276 static inline struct qm_frag* qm_find_free(struct qm_block* qm,
281 static inline struct qm_frag* qm_find_free(struct qm_block* qm,
289 for (hash=GET_HASH(size); hash<QM_HASH_SIZE; hash++){
290 for (f=qm->free_hash[hash].head.u.nxt_free;
291 f!=&(qm->free_hash[hash].head); f=f->u.nxt_free){
293 *count+=1; /* *count++ generates a warning with gcc 2.9* -Wall */
295 if (f->size>=size){ *h=hash; return f; }
297 /*try in a bigger bucket*/
304 /* returns 0 on success, -1 on error;
305 * new_size < size & rounded-up already!*/
308 int split_frag(struct qm_block* qm, struct qm_frag* f, unsigned long new_size,
309 const char* file, const char* func, unsigned int line)
311 int split_frag(struct qm_block* qm, struct qm_frag* f, unsigned long new_size)
316 struct qm_frag_end* end;
318 rest=f->size-new_size;
319 #ifdef MEM_FRAG_AVOIDANCE
320 if ((rest> (FRAG_OVERHEAD+QM_MALLOC_OPTIMIZE))||
321 (rest>=(FRAG_OVERHEAD+new_size))){/* the residue fragm. is big enough*/
323 if (rest>(FRAG_OVERHEAD+MIN_FRAG_SIZE)){
326 /*split the fragment*/
329 n=(struct qm_frag*)((char*)end+sizeof(struct qm_frag_end));
330 n->size=rest-FRAG_OVERHEAD;
331 FRAG_END(n)->size=n->size;
332 FRAG_CLEAR_USED(n); /* never used */
333 qm->real_used+=FRAG_OVERHEAD;
335 end->check1=END_CHECK_PATTERN1;
336 end->check2=END_CHECK_PATTERN2;
337 /* frag created by malloc, mark it*/
341 n->check=ST_CHECK_PATTERN;
343 /* reinsert n in free list*/
344 qm_insert_free(qm, n);
347 /* we cannot split this fragment any more */
355 void* qm_malloc(struct qm_block* qm, unsigned long size,
356 const char* file, const char* func, unsigned int line)
358 void* qm_malloc(struct qm_block* qm, unsigned long size)
365 unsigned int list_cntr;
368 MDBG("qm_malloc(%p, %lu) called from %s: %s(%d)\n", qm, size, file, func,
371 /*size must be a multiple of 8*/
373 if (size>(qm->size-qm->real_used)) return 0;
375 /*search for a suitable free frag*/
377 if ((f=qm_find_free(qm, size, &hash, &list_cntr))!=0){
379 if ((f=qm_find_free(qm, size, &hash))!=0){
382 /*detach it from the free list*/
384 qm_debug_frag(qm, f);
386 qm_detach_free(qm, f);
387 /*mark it as "busy"*/
389 qm->free_hash[hash].no--;
390 /* we ignore split return */
392 split_frag(qm, f, size, file, "fragm. from qm_malloc", line);
394 split_frag(qm, f, size);
396 qm->real_used+=f->size;
398 if (qm->max_real_used<qm->real_used)
399 qm->max_real_used=qm->real_used;
401 sr_event_exec(SREV_PKG_SET_USED, (void*)qm->used);
402 sr_event_exec(SREV_PKG_SET_REAL_USED, (void*)qm->real_used);
408 f->check=ST_CHECK_PATTERN;
409 /* FRAG_END(f)->check1=END_CHECK_PATTERN1;
410 FRAG_END(f)->check2=END_CHECK_PATTERN2;*/
411 MDBG("qm_malloc(%p, %lu) returns address %p frag. %p (size=%lu) on %d"
413 qm, size, (char*)f+sizeof(struct qm_frag), f, f->size, list_cntr );
415 return (char*)f+sizeof(struct qm_frag);
423 void qm_free(struct qm_block* qm, void* p, const char* file, const char* func,
426 void qm_free(struct qm_block* qm, void* p)
430 struct qm_frag* prev;
431 struct qm_frag* next;
435 MDBG("qm_free(%p, %p), called from %s: %s(%d)\n", qm, p, file, func, line);
436 if (p>(void*)qm->last_frag_end || p<(void*)qm->first_frag){
437 LOG(L_CRIT, "BUG: qm_free: bad pointer %p (out of memory block!) - "
443 LOG(L_WARN, "WARNING:qm_free: free(0) called\n");
447 f=(struct qm_frag*) ((char*)p-sizeof(struct qm_frag));
449 qm_debug_frag(qm, f);
451 LOG(L_CRIT, "BUG: qm_free: freeing already freed pointer,"
452 " first free: %s: %s(%ld) - aborting\n",
453 f->file, f->func, f->line);
456 MDBG("qm_free: freeing frag. %p alloc'ed from %s: %s(%ld)\n",
457 f, f->file, f->func, f->line);
463 sr_event_exec(SREV_PKG_SET_USED, (void*)qm->used);
464 sr_event_exec(SREV_PKG_SET_REAL_USED, (void*)qm->real_used);
468 /* mark this fragment as used (might fall into the middle of joined frags)
469 to give us an extra change of detecting a double free call (if the joined
470 fragment has not yet been reused) */
471 f->u.nxt_free=(void*)0x1L; /* bogus value, just to mark it as free */
472 /* join packets if possible*/
474 if (((char*)next < (char*)qm->last_frag_end) &&( next->u.is_free)){
477 qm_debug_frag(qm, next);
479 qm_detach_free(qm, next);
480 size+=next->size+FRAG_OVERHEAD;
481 qm->real_used-=FRAG_OVERHEAD;
482 qm->free_hash[GET_HASH(next->size)].no--; /* FIXME slow */
485 if (f > qm->first_frag){
487 /* (struct qm_frag*)((char*)f - (struct qm_frag_end*)((char*)f-
488 sizeof(struct qm_frag_end))->size);*/
490 qm_debug_frag(qm, prev);
492 if (prev->u.is_free){
494 qm_detach_free(qm, prev);
495 size+=prev->size+FRAG_OVERHEAD;
496 qm->real_used-=FRAG_OVERHEAD;
497 qm->free_hash[GET_HASH(prev->size)].no--; /* FIXME slow */
502 FRAG_END(f)->size=f->size;
503 #endif /* QM_JOIN_FREE*/
509 qm_insert_free(qm, f);
515 void* qm_realloc(struct qm_block* qm, void* p, unsigned long size,
516 const char* file, const char* func, unsigned int line)
518 void* qm_realloc(struct qm_block* qm, void* p, unsigned long size)
523 unsigned long orig_size;
529 MDBG("qm_realloc(%p, %p, %lu) called from %s: %s(%d)\n", qm, p, size,
531 if ((p)&&(p>(void*)qm->last_frag_end || p<(void*)qm->first_frag)){
532 LOG(L_CRIT, "BUG: qm_free: bad pointer %p (out of memory block!) - "
541 qm_free(qm, p, file, func, line);
549 return qm_malloc(qm, size, file, func, line);
551 return qm_malloc(qm, size);
553 f=(struct qm_frag*) ((char*)p-sizeof(struct qm_frag));
555 qm_debug_frag(qm, f);
556 MDBG("qm_realloc: realloc'ing frag %p alloc'ed from %s: %s(%ld)\n",
557 f, f->file, f->func, f->line);
559 LOG(L_CRIT, "BUG:qm_realloc: trying to realloc an already freed "
560 "pointer %p , fragment %p -- aborting\n", p, f);
564 /* find first acceptable size */
570 MDBG("qm_realloc: shrinking from %lu to %lu\n", f->size, size);
571 if(split_frag(qm, f, size, file, "fragm. from qm_realloc", line)!=0){
572 MDBG("qm_realloc : shrinked successful\n");
574 if(split_frag(qm, f, size)!=0){
576 /* update used sizes: freed the splited frag */
577 /* split frag already adds FRAG_OVERHEAD for the newly created
578 free frag, so here we only need orig_size-f->size for real used
580 qm->real_used-=(orig_size-f->size);
581 qm->used-=(orig_size-f->size);
583 sr_event_exec(SREV_PKG_SET_USED, (void*)qm->used);
584 sr_event_exec(SREV_PKG_SET_REAL_USED, (void*)qm->real_used);
588 }else if (f->size < size){
591 MDBG("qm_realloc: growing from %lu to %lu\n", f->size, size);
596 if (((char*)n < (char*)qm->last_frag_end) &&
597 (n->u.is_free)&&((n->size+FRAG_OVERHEAD)>=diff)){
599 qm_detach_free(qm, n);
600 qm->free_hash[GET_HASH(n->size)].no--; /*FIXME: slow*/
601 f->size+=n->size+FRAG_OVERHEAD;
602 qm->real_used-=FRAG_OVERHEAD;
603 FRAG_END(f)->size=f->size;
604 /* end checks should be ok */
605 /* split it if necessary */
606 if (f->size > size ){
608 split_frag(qm, f, size, file, "fragm. from qm_realloc",
611 split_frag(qm, f, size);
614 qm->real_used+=(f->size-orig_size);
615 qm->used+=(f->size-orig_size);
617 sr_event_exec(SREV_PKG_SET_USED, (void*)qm->used);
618 sr_event_exec(SREV_PKG_SET_REAL_USED, (void*)qm->real_used);
621 /* could not join => realloc */
623 ptr=qm_malloc(qm, size, file, func, line);
625 ptr=qm_malloc(qm, size);
628 /* copy, need by libssl */
629 memcpy(ptr, p, orig_size);
631 qm_free(qm, p, file, func, line);
641 MDBG("qm_realloc: doing nothing, same size: %lu - %lu\n",
646 MDBG("qm_realloc: returning %p\n", p);
652 void qm_check(struct qm_block* qm)
658 memlog=cfg_get(core, core_cfg, memlog);
659 LOG(memlog, "DEBUG: qm_check()\n");
661 while ((char*)f < (char*)qm->last_frag_end) {
663 /* check struct qm_frag */
665 if (f->check!=ST_CHECK_PATTERN){
666 LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p) "
667 "beginning overwritten(%lx)!\n",
668 f, (char*)f + sizeof(struct qm_frag),
674 if (f + sizeof(struct qm_frag) + f->size + sizeof(struct qm_frag_end) > qm->first_frag + qm->size) {
675 LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p) "
676 "bad size: %lu (frag end: %p > end of block: %p)\n",
677 f, (char*)f + sizeof(struct qm_frag) + sizeof(struct qm_frag_end), f->size,
678 f + sizeof(struct qm_frag) + f->size, qm->first_frag + qm->size);
682 /* check struct qm_frag_end */
683 if (FRAG_END(f)->size != f->size) {
684 LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p) "
685 "size in qm_frag and qm_frag_end does not match: frag->size=%lu, frag_end->size=%lu)\n",
686 f, (char*)f + sizeof(struct qm_frag),
687 f->size, FRAG_END(f)->size);
692 if ((FRAG_END(f)->check1 != END_CHECK_PATTERN1) ||
693 (FRAG_END(f)->check2 != END_CHECK_PATTERN2)) {
694 LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p)"
695 " end overwritten(%lx, %lx)!\n",
696 f, (char*)f + sizeof(struct qm_frag),
697 FRAG_END(f)->check1, FRAG_END(f)->check2);
705 LOG(memlog, "DEBUG: qm_check: %lu fragments OK\n", fcount);
708 void qm_status(struct qm_block* qm)
717 memlog=cfg_get(core, core_cfg, memlog);
718 LOG_(DEFAULT_FACILITY, memlog, "qm_status: ", "(%p):\n", qm);
721 LOG_(DEFAULT_FACILITY, memlog, "qm_status: ", "heap size= %lu\n",
723 LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
724 "used= %lu, used+overhead=%lu, free=%lu\n",
725 qm->used, qm->real_used, qm->size-qm->real_used);
726 LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
727 "max used (+overhead)= %lu\n", qm->max_real_used);
729 LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
730 "dumping all alloc'ed. fragments:\n");
731 for (f=qm->first_frag, i=0;(char*)f<(char*)qm->last_frag_end;f=FRAG_NEXT(f)
734 LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
735 " %3d. %c address=%p frag=%p size=%lu used=%d\n",
737 (f->u.is_free)?'a':'N',
738 (char*)f+sizeof(struct qm_frag), f, f->size, FRAG_WAS_USED(f));
740 LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
741 " %s from %s: %s(%ld)\n",
742 (f->u.is_free)?"freed":"alloc'd", f->file, f->func, f->line);
743 LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
744 " start check=%lx, end check= %lx, %lx\n",
745 f->check, FRAG_END(f)->check1, FRAG_END(f)->check2);
749 LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
750 "dumping free list stats :\n");
751 for(h=0,i=0;h<QM_HASH_SIZE;h++){
753 for (f=qm->free_hash[h].head.u.nxt_free,j=0;
754 f!=&(qm->free_hash[h].head); f=f->u.nxt_free, i++, j++){
755 if (!FRAG_WAS_USED(f)){
758 LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
759 "unused fragm.: hash = %3d, fragment %p,"
760 " address %p size %lu, created from %s: %s(%lu)\n",
761 h, f, (char*)f+sizeof(struct qm_frag), f->size,
762 f->file, f->func, f->line);
767 if (j) LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
768 "hash= %3d. fragments no.: %5d, unused: %5d\n"
769 "\t\t bucket size: %9lu - %9ld (first %9lu)\n",
770 h, j, unused, UN_HASH(h),
771 ((h<=QM_MALLOC_OPTIMIZE/ROUNDTO)?1:2)*UN_HASH(h),
772 qm->free_hash[h].head.u.nxt_free->size
774 if (j!=qm->free_hash[h].no){
775 LOG(L_CRIT, "BUG: qm_status: different free frag. count: %d!=%lu"
776 " for hash %3d\n", j, qm->free_hash[h].no, h);
780 LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
781 "-----------------------------\n");
785 /* fills a malloc info structure with info about the block
786 * if a parameter is not supported, it will be filled with 0 */
787 void qm_info(struct qm_block* qm, struct mem_info* info)
793 memset(info,0, sizeof(*info));
794 info->total_size=qm->size;
795 info->min_frag=MIN_FRAG_SIZE;
796 info->free=qm->size-qm->real_used;
798 info->real_used=qm->real_used;
799 info->max_used=qm->max_real_used;
800 for(r=0;r<QM_HASH_SIZE; r++){
801 total_frags+=qm->free_hash[r].no;
803 info->total_frags=total_frags;
807 /* returns how much free memory is available
808 * it never returns an error (unlike fm_available) */
809 unsigned long qm_available(struct qm_block* qm)
811 return qm->size-qm->real_used;
818 typedef struct _mem_counter{
826 struct _mem_counter *next;
829 static mem_counter* get_mem_counter(mem_counter **root, struct qm_frag* f)
832 if (!*root) goto make_new;
833 for(x=*root;x;x=x->next)
834 if (x->file == f->file && x->func == f->func && x->line == f->line)
837 x = malloc(sizeof(mem_counter));
850 void qm_sums(struct qm_block* qm)
854 mem_counter *root, *x;
860 memlog=cfg_get(core, core_cfg, memlog);
861 LOG_(DEFAULT_FACILITY, memlog, "qm_sums: ",
862 "summarizing all alloc'ed. fragments:\n");
864 for (f=qm->first_frag, i=0;(char*)f<(char*)qm->last_frag_end;
867 x = get_mem_counter(&root,f);
874 LOG_(DEFAULT_FACILITY, memlog, "qm_sums: ",
875 " count=%6d size=%10lu bytes from %s: %s(%ld)\n",
877 x->file, x->func, x->line
883 LOG_(DEFAULT_FACILITY, memlog, "qm_sums: ",
884 "-----------------------------\n");
886 #endif /* DBG_QM_MALLOC */