4 * Copyright (C) 2010 iptelorg GmbH
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
25 * 2010-08-06 initial version (andrei)
26 * 2010-08-24 counters can be used (inc,add) before prefork_init (andrei)
32 #include "compiler_opt.h"
34 #include "mem/shm_mem.h"
37 #define CNT_HASH_SIZE 64
38 /* group hash size (rpc use) */
39 #define GRP_HASH_SIZE 16
40 /* initial sorted groups array size (rpc use) */
41 #define GRP_SORTED_SIZE 16
42 /* intial counter id 2 record array size */
43 #define CNT_ID2RECORD_SIZE 64
45 #define CACHELINE_PAD 128
49 /* leave space for one flag */
50 #define MAX_COUNTER_ID 32767
51 /* size (number of entries) of the temporary array used for keeping stats
52 pre-prefork init. Note: if more counters are registered then this size,
53 the array will be dynamically increased (doubled each time). The value
54 here is meant only to optimize startup/memory fragmentation. */
55 #define PREINIT_CNTS_VALS_SIZE 128
57 struct counter_record {
64 struct counter_record* grp_next; /* next in group */
71 struct counter_record* first;
76 /** hash table mapping a counter name to an id */
77 static struct str_hash_table cnts_hash_table;
78 /** array maping id 2 record */
79 struct counter_record** cnt_id2record;
80 static int cnt_id2record_size;
81 /** hash table for groups (maps a group name to a counter list) */
82 static struct str_hash_table grp_hash_table;
83 /** array of groups, sorted */
84 static struct grp_record** grp_sorted;
85 static int grp_sorted_max_size;
86 static int grp_sorted_crt_size;
87 static int grp_no; /* number of groups */
89 /** counters array. a[proc_no][counter_id] =>
90 _cnst_vals[proc_no*cnts_no+counter_id] */
91 counter_array_t* _cnts_vals;
92 int _cnts_row_len; /* number of elements per row */
93 static int cnts_no; /* number of registered counters */
94 static int cnts_max_rows; /* set to 0 if not yet fully init */
98 /** init the coutner hash table(s).
99 * @return 0 on success, -1 on error.
103 if (str_hash_alloc(&cnts_hash_table, CNT_HASH_SIZE) < 0)
105 str_hash_init(&cnts_hash_table);
106 if (str_hash_alloc(&grp_hash_table, GRP_HASH_SIZE) < 0)
108 str_hash_init(&grp_hash_table);
109 cnts_no = 1; /* start at 1 (0 used only for invalid counters) */
110 cnts_max_rows = 0; /* 0 initially, !=0 after full init
111 (counters_prefork_init()) */
113 cnt_id2record_size = CNT_ID2RECORD_SIZE;
114 cnt_id2record = pkg_malloc(sizeof(*cnt_id2record) * cnt_id2record_size);
115 if (cnt_id2record == 0)
117 memset(cnt_id2record, 0, sizeof(*cnt_id2record) * cnt_id2record_size);
118 grp_sorted_max_size = GRP_SORTED_SIZE;
119 grp_sorted_crt_size = 0;
120 grp_sorted = pkg_malloc(sizeof(*grp_sorted) * grp_sorted_max_size);
123 memset(grp_sorted, 0, sizeof(*grp_sorted) * grp_sorted_max_size);
132 void destroy_counters()
135 struct str_hash_entry* e;
136 struct str_hash_entry* bak;
139 /* fully init => it is in shm */
140 shm_free(_cnts_vals);
142 /* partially init (before prefork) => pkg */
143 pkg_free(_cnts_vals);
146 if (cnts_hash_table.table) {
147 for (r=0; r< cnts_hash_table.size; r++) {
148 clist_foreach_safe(&cnts_hash_table.table[r], e, bak, next) {
152 pkg_free(cnts_hash_table.table);
154 if (grp_hash_table.table) {
155 for (r=0; r< grp_hash_table.size; r++) {
156 clist_foreach_safe(&grp_hash_table.table[r], e, bak, next) {
160 pkg_free(grp_hash_table.table);
163 pkg_free(cnt_id2record);
165 pkg_free(grp_sorted);
166 cnts_hash_table.table = 0;
167 cnts_hash_table.size = 0;
170 grp_hash_table.table = 0;
171 grp_hash_table.size = 0;
172 grp_sorted_crt_size = 0;
173 grp_sorted_max_size = 0;
182 /** complete counter intialization, when the number of processes is known.
183 * shm must be available.
184 * @return 0 on success, < 0 on error
186 int counters_prefork_init(int max_process_no)
188 counter_array_t* old;
191 /* round cnts_no so that cnts_no * sizeof(counter) it's a CACHELINE_PAD
193 /* round-up row_size to a CACHELINE_PAD multiple if needed */
194 row_size = ((sizeof(*_cnts_vals) * cnts_no - 1) / CACHELINE_PAD + 1) *
196 /* round-up the resulted row_siue to a sizeof(*_cnts_vals) multiple */
197 row_size = ((row_size -1) / sizeof(*_cnts_vals) + 1) *
199 /* get updated cnts_no (row length) */
200 _cnts_row_len = row_size / sizeof(*_cnts_vals);
201 size = max_process_no * row_size;
202 /* replace the temporary pre-fork pkg array (with only 1 row) with
203 the final shm version (with max_process_no rows) */
205 _cnts_vals = shm_malloc(max_process_no * row_size);
208 memset(_cnts_vals, 0, max_process_no * row_size);
209 cnts_max_rows = max_process_no;
210 /* copy prefork values into the newly shm array */
212 for (h.id = 0; h.id < cnts_no; h.id++)
213 counter_pprocess_val(process_no, h) = old[h.id].v;
221 /** adds new group to the group hash table (no checks, internal version).
222 * @return pointer to new group record on success, 0 on error.
224 static struct grp_record* grp_hash_add(str* group)
226 struct str_hash_entry* g;
227 struct grp_record* grp_rec;
228 struct grp_record** r;
230 /* grp_rec copied at &g->u.data */
231 g = pkg_malloc(sizeof(struct str_hash_entry) - sizeof(g->u.data) +
232 sizeof(*grp_rec) + group->len + 1);
235 grp_rec = (struct grp_record*)&g->u.data[0];
236 grp_rec->group.s = (char*)(grp_rec + 1);
237 grp_rec->group.len = group->len;
239 memcpy(grp_rec->group.s, group->s, group->len + 1);
240 g->key = grp_rec->group;
242 /* insert group into the sorted group array */
243 if (grp_sorted_max_size <= grp_sorted_crt_size) {
244 /* must increase the array */
245 r = pkg_realloc(grp_sorted, 2 * grp_sorted_max_size *
246 sizeof(*grp_sorted));
250 grp_sorted_max_size *= 2;
251 memset(&grp_sorted[grp_sorted_crt_size], 0,
252 (grp_sorted_max_size - grp_sorted_crt_size) *
253 sizeof(*grp_sorted));
255 for (r = grp_sorted; r < (grp_sorted + grp_sorted_crt_size); r++)
256 if (strcmp(grp_rec->group.s, (*r)->group.s) < 0)
258 if (r != (grp_sorted + grp_sorted_crt_size))
259 memmove(r+1, r, (int)(long)((char*)(grp_sorted + grp_sorted_crt_size) -
261 grp_sorted_crt_size++;
263 /* insert into the hash only on success */
264 str_hash_add(&grp_hash_table, g);
274 /** lookup a group into the group hash (internal version).
275 * @return pointer to grp_record on success, 0 on failure (not found).
277 static struct grp_record* grp_hash_lookup(str* group)
279 struct str_hash_entry* e;
280 e = str_hash_get(&grp_hash_table, group->s, group->len);
281 return e?(struct grp_record*)&e->u.data[0]:0;
286 /** lookup a group and if not found create a new group record.
287 * @return pointer to grp_record on succes, 0 on failure ( not found and
288 * failed to create new group record).
290 static struct grp_record* grp_hash_get_create(str* group)
292 struct grp_record* ret;
294 ret = grp_hash_lookup(group);
297 return grp_hash_add(group);
302 /** adds new counter to the hash table (no checks, internal version).
303 * @return pointer to new record on success, 0 on error.
305 static struct counter_record* cnt_hash_add(
306 str* group, str* name,
307 int flags, counter_cbk_f cbk,
308 void* param, const char* doc)
310 struct str_hash_entry* e;
311 struct counter_record* cnt_rec;
312 struct grp_record* grp_rec;
313 struct counter_record** p;
319 if (cnts_no >= MAX_COUNTER_ID)
320 /* too many counters */
322 grp_rec = grp_hash_get_create(group);
324 /* non existing group an no new one could be created */
326 doc_len = doc?strlen(doc):0;
327 /* cnt_rec copied at &e->u.data[0] */
328 e = pkg_malloc(sizeof(struct str_hash_entry) - sizeof(e->u.data) +
329 sizeof(*cnt_rec) + name->len + 1 + group->len + 1 +
333 cnt_rec = (struct counter_record*)&e->u.data[0];
334 cnt_rec->group.s = (char*)(cnt_rec + 1);
335 cnt_rec->group.len = group->len;
336 cnt_rec->name.s = cnt_rec->group.s + group->len + 1;
337 cnt_rec->name.len = name->len;
338 cnt_rec->doc.s = cnt_rec->name.s + name->len +1;
339 cnt_rec->doc.len = doc_len;
340 cnt_rec->h.id = cnts_no++;
341 cnt_rec->flags = flags;
342 cnt_rec->cbk_param = param;
344 cnt_rec->grp_next = 0;
345 memcpy(cnt_rec->group.s, group->s, group->len + 1);
346 memcpy(cnt_rec->name.s, name->s, name->len + 1);
348 memcpy(cnt_rec->doc.s, doc, doc_len + 1);
350 cnt_rec->doc.s[0] = 0;
351 e->key = cnt_rec->name;
353 /* check to see if it fits in the prefork tmp. vals array.
354 This array contains only one "row", is allocated in pkg and
355 is used only until counters_prefork_init() (after that the
356 array is replaced with a shm version with all the needed rows).
358 if (cnt_rec->h.id >= _cnts_row_len || _cnts_vals == 0) {
359 /* array to small or not yet allocated => reallocate/allocate it
360 (min size PREINIT_CNTS_VALS_SIZE, max MAX_COUNTER_ID)
362 n = (cnt_rec->h.id < PREINIT_CNTS_VALS_SIZE) ?
363 PREINIT_CNTS_VALS_SIZE :
364 ((2 * (cnt_rec->h.id + (cnt_rec->h.id == 0)) < MAX_COUNTER_ID)?
365 (2 * (cnt_rec->h.id + (cnt_rec->h.id == 0))) :
367 v = pkg_realloc(_cnts_vals, n * sizeof(*_cnts_vals));
369 /* realloc/malloc error */
372 /* zero newly allocated memory */
373 memset(&_cnts_vals[_cnts_row_len], 0,
374 (n - _cnts_row_len) * sizeof(*_cnts_vals));
375 _cnts_row_len = n; /* record new length */
377 /* add a pointer to it in the records array */
378 if (cnt_id2record_size <= cnt_rec->h.id) {
379 /* must increase the array */
380 p = pkg_realloc(cnt_id2record,
381 2 * cnt_id2record_size * sizeof(*cnt_id2record));
385 cnt_id2record_size *= 2;
386 memset(&cnt_id2record[cnt_rec->h.id], 0,
387 (cnt_id2record_size - cnt_rec->h.id) * sizeof(*cnt_id2record));
389 cnt_id2record[cnt_rec->h.id] = cnt_rec;
390 /* add into the hash */
391 str_hash_add(&cnts_hash_table, e);
392 /* insert it sorted in the per group list */
393 for (p = &grp_rec->first; *p; p = &((*p)->grp_next))
394 if (strcmp(cnt_rec->name.s, (*p)->name.s) < 0)
396 cnt_rec->grp_next = *p;
407 /** lookup a (group, name) pair into the cnts hash (internal version).
408 * @param group - counter group name. If "" the first matching counter with
409 * the given name will be returned (k compat).
411 * @return pointer to counter_record on success, 0 on failure (not found).
413 static struct counter_record* cnt_hash_lookup(str* group, str* name)
415 struct str_hash_entry* e;
416 struct str_hash_entry* first;
417 struct counter_record* cnt_rec;
418 e = str_hash_get(&cnts_hash_table, name->s, name->len);
421 cnt_rec = (struct counter_record*)&e->u.data[0];
422 if (likely( group->len == 0 ||
423 (cnt_rec->group.len == group->len &&
424 memcmp(cnt_rec->group.s, group->s, group->len) == 0)))
428 /* search between records with same name, but different groups */
431 cnt_rec = (struct counter_record*)&e->u.data[0];
432 if (cnt_rec->group.len == group->len &&
433 cnt_rec->name.len == name->len &&
434 memcmp(cnt_rec->group.s, group->s, group->len) == 0 &&
435 memcmp(cnt_rec->name.s, name->s, name->len) == 0)
445 /** lookup a counter and if not found create a new counter record.
446 * @return pointer to counter_record on succes, 0 on failure ( not found and
447 * failed to create new group record).
449 static struct counter_record* cnt_hash_get_create(
450 str* group, str* name,
453 void* param, const char* doc)
455 struct counter_record* ret;
457 ret = cnt_hash_lookup(group, name);
460 return cnt_hash_add(group, name, flags, cbk, param, doc);
465 /** register a new counter.
466 * Can be called only before forking (e.g. from mod_init() or
467 * init_child(PROC_INIT)).
468 * @param handle - result parameter, it will be filled with the counter
469 * handle on success (can be null if not needed).
470 * @param group - group name
471 * @param name - counter name (group.name must be unique).
472 * @param flags - counter flags: one of CNT_F_*.
473 * @param cbk - read callback function (if set it will be called each time
474 * someone will call counter_get()).
475 * @param cbk_param - callback param.
476 * @param doc - description/documentation string.
477 * @param reg_flags - register flags: 1 - don't fail if counter already
478 * registered (act like counter_lookup(handle, group, name).
479 * @return 0 on succes, < 0 on error (-1 not init or malloc error, -2 already
480 * registered (and register_flags & 1 == 0).
482 int counter_register( counter_handle_t* handle, const char* group,
483 const char* name, int flags,
484 counter_cbk_f cbk, void* cbk_param,
490 struct counter_record* cnt_rec;
492 if (unlikely(cnts_max_rows)) {
494 BUG("late attempt to register counter: %s.%s\n", group, name);
498 n.len = strlen(name);
499 if (unlikely(group == 0 || *group == 0)) {
500 BUG("attempt to register counter %s without a group\n", name);
503 grp.s = (char*)group;
504 grp.len = strlen(group);
505 cnt_rec = cnt_hash_lookup(&grp, &n);
510 if (handle) handle->id = 0;
514 cnt_rec = cnt_hash_get_create(&grp, &n, flags, cbk, cbk_param, doc);
515 if (unlikely(cnt_rec == 0))
518 if (handle) *handle = cnt_rec->h;
521 if (handle) handle->id = 0;
527 /** fill in the handle of an existing counter (str parameters).
528 * @param handle - filled with the corresp. handle on success.
529 * @param group - counter group name. If "" the first matching
530 * counter with the given name will be returned
532 * @param name - counter name.
533 * @return 0 on success, < 0 on error
535 int counter_lookup_str(counter_handle_t* handle, str* group, str* name)
537 struct counter_record* cnt_rec;
539 cnt_rec = cnt_hash_lookup(group, name);
540 if (likely(cnt_rec)) {
541 *handle = cnt_rec->h;
550 /** fill in the handle of an existing counter (asciiz parameters).
551 * @param handle - filled with the corresp. handle on success.
552 * @param group - counter group name. If 0 or "" the first matching
553 * counter with the given name will be returned
555 * @param name - counter name.
556 * @return 0 on success, < 0 on error
558 int counter_lookup(counter_handle_t* handle,
559 const char* group, const char* name)
565 n.len = strlen(name);
566 grp.s = (char*)group;
567 grp.len = group?strlen(group):0;
568 return counter_lookup_str(handle, &grp, &n);
573 /** register all the counters declared in a null-terminated array.
574 * @param group - counters group.
575 * @param defs - null terminated array containing counters definitions.
576 * @return 0 on success, < 0 on error ( - (counter_number+1))
578 int counter_register_array(const char* group, counter_def_t* defs)
582 for (r=0; defs[r].name; r++)
583 if (counter_register( defs[r].handle,
584 group, defs[r].name, defs[r].flags,
585 defs[r].get_cbk, defs[r].get_cbk_param,
586 defs[r].descr, 0) <0)
587 return -(r+1); /* return - (idx of bad counter + 1) */
593 /** get the value of the counter, bypassing callbacks.
594 * @param handle - counter handle obtained using counter_lookup() or
595 * counter_register().
596 * @return counter value.
598 counter_val_t counter_get_raw_val(counter_handle_t handle)
603 if (unlikely(_cnts_vals == 0)) {
605 BUG("counters not fully initialized yet\n");
608 if (unlikely(handle.id >= cnts_no || (short)handle.id < 0)) {
609 BUG("invalid counter id %d (max %d)\n", handle.id, cnts_no - 1);
613 for (r = 0; r < cnts_max_rows; r++)
614 ret += counter_pprocess_val(r, handle);
620 /** get the value of the counter, using the callbacks (if defined).
621 * @param handle - counter handle obtained using counter_lookup() or
622 * counter_register().
623 * @return counter value. */
624 counter_val_t counter_get_val(counter_handle_t handle)
626 struct counter_record* cnt_rec;
628 if (unlikely(_cnts_vals == 0 || cnt_id2record == 0)) {
630 BUG("counters not fully initialized yet\n");
633 cnt_rec = cnt_id2record[handle.id];
634 if (unlikely(cnt_rec->cbk))
635 return cnt_rec->cbk(handle, cnt_rec->cbk_param);
636 return counter_get_raw_val(handle);
641 /** reset the counter.
642 * Reset a counter, unless it has the CNT_F_NO_RESET flag set.
643 * @param handle - counter handle obtained using counter_lookup() or
644 * counter_register().
647 void counter_reset(counter_handle_t handle)
651 if (unlikely(_cnts_vals == 0 || cnt_id2record == 0)) {
653 BUG("counters not fully initialized yet\n");
656 if (unlikely(handle.id >= cnts_no)) {
657 BUG("invalid counter id %d (max %d)\n", handle.id, cnts_no - 1);
660 if (unlikely(cnt_id2record[handle.id]->flags & CNT_F_NO_RESET))
662 for (r=0; r < cnts_max_rows; r++)
663 counter_pprocess_val(r, handle) = 0;
669 /** return the name for counter handle.
670 * @param handle - counter handle obtained using counter_lookup() or
671 * counter_register().
672 * @return asciiz pointer on success, 0 on error.
674 char* counter_get_name(counter_handle_t handle)
676 if (unlikely(_cnts_vals == 0 || cnt_id2record == 0)) {
678 BUG("counters not fully initialized yet\n");
681 if (unlikely(handle.id >= cnts_no)) {
682 BUG("invalid counter id %d (max %d)\n", handle.id, cnts_no - 1);
685 return cnt_id2record[handle.id]->name.s;
692 /** return the group name for counter handle.
693 * @param handle - counter handle obtained using counter_lookup() or
694 * counter_register().
695 * @return asciiz pointer on success, 0 on error.
697 char* counter_get_group(counter_handle_t handle)
699 if (unlikely(_cnts_vals == 0 || cnt_id2record == 0)) {
701 BUG("counters not fully initialized yet\n");
704 if (unlikely(handle.id >= cnts_no)) {
705 BUG("invalid counter id %d (max %d)\n", handle.id, cnts_no - 1);
708 return cnt_id2record[handle.id]->group.s;
715 /** return the description (doc) string for a given counter.
716 * @param handle - counter handle obtained using counter_lookup() or
717 * counter_register().
718 * @return asciiz pointer on success, 0 on error.
720 char* counter_get_doc(counter_handle_t handle)
722 if (unlikely(_cnts_vals == 0 || cnt_id2record == 0)) {
724 BUG("counters not fully initialized yet\n");
727 if (unlikely(handle.id >= cnts_no)) {
728 BUG("invalid counter id %d (max %d)\n", handle.id, cnts_no - 1);
731 return cnt_id2record[handle.id]->doc.s;
738 /** iterate on all the counter group names.
739 * @param cbk - pointer to a callback function that will be called for each
741 * @param p - parameter that will be passed to the callback function
742 * (along the group name).
744 void counter_iterate_grp_names(void (*cbk)(void* p, str* grp_name), void* p)
748 for (r=0; r < grp_sorted_crt_size; r++)
749 cbk(p, &grp_sorted[r]->group);
754 /** iterate on all the variable names in a specified group.
755 * @param group - group name.
756 * @param cbk - pointer to a callback function that will be called for each
758 * @param p - parameter that will be passed to the callback function
759 * (along the variable name).
761 void counter_iterate_grp_var_names( const char* group,
762 void (*cbk)(void* p, str* var_name),
765 struct counter_record* r;
766 struct grp_record* g;
769 grp.s = (char*)group;
770 grp.len = strlen(group);
771 g = grp_hash_lookup(&grp);
773 for (r = g->first; r; r = r->grp_next)
779 /** iterate on all the variable names and handles in a specified group.
780 * @param group - group name.
781 * @param cbk - pointer to a callback function that will be called for each
782 * [variable name, variable handle] pair.
783 * @param p - parameter that will be passed to the callback function
784 * (along the group name, variable name and variable handle).
786 void counter_iterate_grp_vars(const char* group,
787 void (*cbk)(void* p, str* g, str* n,
791 struct counter_record* r;
792 struct grp_record* g;
795 grp.s = (char*)group;
796 grp.len = strlen(group);
797 g = grp_hash_lookup(&grp);
799 for (r = g->first; r; r = r->grp_next)
800 cbk(p, &r->group, &r->name, r->h);
803 /* vi: set ts=4 sw=4 tw=79:ai:cindent: */