FAT bug_fix: An unfortuntately big fix of bug which has not been
authorJiri Kuthan <jiri@iptel.org>
Wed, 28 Aug 2002 21:24:28 +0000 (21:24 +0000)
committerJiri Kuthan <jiri@iptel.org>
Wed, 28 Aug 2002 21:24:28 +0000 (21:24 +0000)
unfortunetly understood :-(  . Suddenly, TM started segfaulting on
start-up. In gdb, there was lot of confusion among automatic and
static variable -- funny thing, p1==p2 but *p1!=*p2. I resturucted
TM again, made non-automatic variables in question (hash_table)
static, and split it in timers and transaction table, both in
separate files.

15 files changed:
modules/tm/h_table.c
modules/tm/h_table.h
modules/tm/lock.c
modules/tm/lock.h
modules/tm/t_funcs.c
modules/tm/t_funcs.h
modules/tm/t_fwd.c
modules/tm/t_lookup.c
modules/tm/t_lookup.h
modules/tm/t_reply.c
modules/tm/t_thandlers.c [deleted file]
modules/tm/timer.c
modules/tm/timer.h
modules/tm/tm.c
modules/tm/uac.c

index aaf49c5..2b7cc22 100644 (file)
 #include "t_cancel.h"
 #include "t_stats.h"
 
+/* pointer to the big table where all the transaction data
+   lives
+*/
+
+static struct s_table*  tm_table;
+
+void lock_hash(int i) 
+{
+       lock(&tm_table->entrys[i].mutex);
+}
+
+void unlock_hash(int i) 
+{
+       unlock(&tm_table->entrys[i].mutex);
+}
+
+
+struct s_table* get_tm_table()
+{
+       return tm_table;
+}
+
+
 unsigned int transaction_count( void )
 {
        unsigned int i;
@@ -22,7 +45,7 @@ unsigned int transaction_count( void )
 
        count=0;        
        for (i=0; i<TABLE_ENTRIES; i++) 
-               count+=hash_table->entrys[i].entries;
+               count+=tm_table->entrys[i].entries;
        return count;
 }
 
@@ -186,20 +209,20 @@ error:
 
 /* Release all the data contained by the hash table. All the aux. structures
  *  as sems, lists, etc, are also released */
-void free_hash_table( struct s_table *hash_table )
+void free_hash_table(  )
 {
        struct cell* p_cell;
        struct cell* tmp_cell;
        int    i;
 
-       if (hash_table)
+       if (tm_table)
        {
                /* remove the data contained by each entry */
                for( i = 0 ; i<TABLE_ENTRIES; i++)
                {
-                       release_entry_lock( (hash_table->entrys)+i );
+                       release_entry_lock( (tm_table->entrys)+i );
                        /* delete all synonyms at hash-collision-slot i */
-                       p_cell=hash_table->entrys[i].first_cell;
+                       p_cell=tm_table->entrys[i].first_cell;
                        for( ; p_cell; p_cell = tmp_cell )
                        {
                                tmp_cell = p_cell->next_cell;
@@ -207,11 +230,6 @@ void free_hash_table( struct s_table *hash_table )
                        }
                }
 
-               /* the mutexs for sync the lists are released*/
-               for ( i=0 ; i<NR_OF_TIMER_LISTS ; i++ )
-                       release_timerlist_lock( &(hash_table->timers[i]) );
-
-               shm_free( hash_table );
        }
 }
 
@@ -222,36 +240,37 @@ void free_hash_table( struct s_table *hash_table )
  */
 struct s_table* init_hash_table()
 {
-       struct s_table*  hash_table;
        int              i;
 
        /*allocs the table*/
-       hash_table = (struct s_table*)shm_malloc( sizeof( struct s_table ) );
-       if ( !hash_table )
-               goto error;
+       tm_table= (struct s_table*)shm_malloc( sizeof( struct s_table ) );
+       if ( !tm_table) {
+               LOG(L_ERR, "ERROR: init_hash_table: no shmem for TM table\n");
+               goto error0;
+       }
 
-       memset( hash_table, 0, sizeof (struct s_table ) );
+       memset( tm_table, 0, sizeof (struct s_table ) );
 
        /* try first allocating all the structures needed for syncing */
        if (lock_initialize()==-1)
-               goto error;
+               goto error1;
 
        /* inits the entrys */
        for(  i=0 ; i<TABLE_ENTRIES; i++ )
        {
-               init_entry_lock( hash_table , (hash_table->entrys)+i );
-               hash_table->entrys[i].next_label = rand();
+               init_entry_lock( tm_table, (tm_table->entrys)+i );
+               tm_table->entrys[i].next_label = rand();
        }
 
-       /* inits the timers*/
-       for(  i=0 ; i<NR_OF_TIMER_LISTS ; i++ )
-               init_timer_list( hash_table, i );
-
-       return  hash_table;
+       return  tm_table;
 
-error:
-       free_hash_table( hash_table );
+#ifdef _OBSO
+error2:
        lock_cleanup();
+#endif
+error1:
+       free_hash_table( );
+error0:
        return 0;
 }
 
@@ -260,13 +279,12 @@ error:
 
 /*  Takes an already created cell and links it into hash table on the
  *  appropiate entry. */
-void insert_into_hash_table_unsafe( struct s_table *hash_table,
-                                                                                       struct cell * p_cell )
+void insert_into_hash_table_unsafe( struct cell * p_cell )
 {
        struct entry* p_entry;
 
        /* locates the apropiate entry */
-       p_entry = &hash_table->entrys[ p_cell->hash_index ];
+       p_entry = &tm_table->entrys[ p_cell->hash_index ];
 
        p_cell->label = p_entry->next_label++;
        if ( p_entry->last_cell )
@@ -290,10 +308,10 @@ void insert_into_hash_table_unsafe( struct s_table *hash_table,
 
 
 
-void insert_into_hash_table(struct s_table *hash_table,  struct cell * p_cell)
+void insert_into_hash_table( struct cell * p_cell)
 {
        LOCK_HASH(p_cell->hash_index);
-       insert_into_hash_table_unsafe( hash_table,  p_cell );
+       insert_into_hash_table_unsafe(  p_cell );
        UNLOCK_HASH(p_cell->hash_index);
 }
 
@@ -301,10 +319,9 @@ void insert_into_hash_table(struct s_table *hash_table,  struct cell * p_cell)
 
 
 /*  Un-link a  cell from hash_table, but the cell itself is not released */
-void remove_from_hash_table_unsafe(struct s_table *hash_table,  
- struct cell * p_cell)
+void remove_from_hash_table_unsafe( struct cell * p_cell)
 {
-       struct entry*  p_entry  = &(hash_table->entrys[p_cell->hash_index]);
+       struct entry*  p_entry  = &(tm_table->entrys[p_cell->hash_index]);
 
        /* unlink the cell from entry list */
        /* lock( &(p_entry->mutex) ); */
index 57e6ef5..2f7541c 100644 (file)
@@ -23,11 +23,17 @@ struct timer;
 struct retr_buf;
 
 #include "../../mem/shm_mem.h"
-#include "timer.h" 
 #include "lock.h"
 #include "sip_msg.h"
 #include "t_reply.h"
 #include "t_hooks.h"
+#include "timer.h"
+
+#define LOCK_HASH(_h) lock_hash((_h))
+#define UNLOCK_HASH(_h) unlock_hash((_h))
+
+void lock_hash(int i);
+void unlock_hash(int i);
 
 
 #define NO_CANCEL       ( (char*) 0 )
@@ -218,22 +224,21 @@ struct s_table
 {
        /* table of hash entries; each of them is a list of synonyms  */
        struct entry   entrys[ TABLE_ENTRIES ];
+#ifdef _OBSOLETED
        /* table of timer lists */
        struct timer   timers[ NR_OF_TIMER_LISTS ];
+#endif
 };
 
 
-
+struct s_table* get_tm_table();
 struct s_table* init_hash_table();
-void   free_hash_table( struct s_table* hash_table );
+void   free_hash_table( );
 void   free_cell( struct cell* dead_cell );
 struct cell*  build_cell( struct sip_msg* p_msg );
-void   remove_from_hash_table_unsafe(struct s_table *hash_table,
-       struct cell * p_cell);
-void   insert_into_hash_table(struct s_table *hash_table,
-       struct cell * p_cell);
-void   insert_into_hash_table_unsafe( struct s_table *hash_table,
-               struct cell * p_cell );
+void   remove_from_hash_table_unsafe( struct cell * p_cell);
+void   insert_into_hash_table( struct cell * p_cell);
+void   insert_into_hash_table_unsafe( struct cell * p_cell );
 
 unsigned int transaction_count( void );
 
index 747389e..502f1e1 100644 (file)
@@ -364,7 +364,7 @@ int init_cell_lock( struct cell *cell )
        return 0;
 }
 
-int init_entry_lock( struct s_table* hash_table, struct entry *entry )
+int init_entry_lock( struct s_table* ht, struct entry *entry )
 {
 #ifdef FAST_LOCK
        init_lock(entry->mutex);
@@ -374,24 +374,12 @@ int init_entry_lock( struct s_table* hash_table, struct entry *entry )
           many partitions as number of available semaphors allows
         */
        entry->mutex.semaphore_set=entry_semaphore;
-       entry->mutex.semaphore_index = ( ((void *)entry - (void *)(hash_table->entrys ) )
+       entry->mutex.semaphore_index = ( ((void *)entry - (void *)(ht->entrys ) )
                / sizeof(struct entry) ) % sem_nr;
 #endif
        return 0;
 }
 
-int init_timerlist_lock( struct s_table* hash_table, enum lists timerlist_id)
-{
-       /* each timer list has its own semaphore */
-       /*
-       hash_table->timers[timerlist_id].mutex.semaphore_set=timer_semaphore;
-       hash_table->timers[timerlist_id].mutex.semaphore_index=timer_group[timerlist_id];
-       */
-
-       hash_table->timers[timerlist_id].mutex=&(timer_group_lock[ timer_group[timerlist_id] ]);
-       return 0;
-}
-
 
 
 int release_cell_lock( struct cell *cell )
@@ -420,3 +408,10 @@ int release_timerlist_lock( struct timer *timerlist )
        /* the same as above */
        return 0;
 }
+
+int init_timerlist_lock( enum lists timerlist_id)
+{
+       get_timertable()->timers[timerlist_id].mutex=
+               &(timer_group_lock[ timer_group[timerlist_id] ]);
+       return 0;
+}
index 07c091f..ea54c36 100644 (file)
@@ -66,8 +66,7 @@ static int init_semaphore_set( int size );
 
 
 int init_cell_lock( struct cell *cell );
-int init_entry_lock( struct s_table* hash_table, struct entry *entry );
-int init_timerlist_lock( struct s_table* hash_table, enum lists timerlist_id);
+int init_entry_lock( struct s_table* ht, struct entry *entry );
 
 
 int release_cell_lock( struct cell *cell );
@@ -119,6 +118,8 @@ static inline int _unlock( ser_lock_t* s )
 #endif
 }
 
+int init_timerlist_lock(  enum lists timerlist_id);
+
 
 #endif
 
index 93d5c41..dc54225 100644 (file)
 #include "t_lookup.h"
 #include "config.h"
 
-/* pointer to the big table where all the transaction data
-   lives
-*/
-struct s_table*  hash_table;
 
 /* ----------------------------------------------------- */
 
@@ -39,65 +35,25 @@ int send_pr_buffer( struct retr_buf *rb,
 void start_retr( struct retr_buf *rb )
 {
        rb->retr_list=RT_T1_TO_1;
-       set_timer( hash_table, &rb->retr_timer, RT_T1_TO_1 );
-       set_timer( hash_table, &rb->fr_timer, FR_TIMER_LIST );
+       set_timer( &rb->retr_timer, RT_T1_TO_1 );
+       set_timer( &rb->fr_timer, FR_TIMER_LIST );
 }
 
-int tm_startup()
-{
-       /* building the hash table*/
-       hash_table = init_hash_table();
-       if (!hash_table)
-               return -1;
-
-       /* init. timer lists */
-       hash_table->timers[RT_T1_TO_1].id = RT_T1_TO_1;
-       hash_table->timers[RT_T1_TO_2].id = RT_T1_TO_2;
-       hash_table->timers[RT_T1_TO_3].id = RT_T1_TO_3;
-       hash_table->timers[RT_T2].id      = RT_T2;
-       hash_table->timers[FR_TIMER_LIST].id     = FR_TIMER_LIST;
-       hash_table->timers[FR_INV_TIMER_LIST].id = FR_INV_TIMER_LIST;
-       hash_table->timers[WT_TIMER_LIST].id     = WT_TIMER_LIST;
-       hash_table->timers[DELETE_LIST].id       = DELETE_LIST;
-
-
-       /* fork table */
-       /* nr_forks = 0; */     
-
-       /* init static hidden values */
-       init_t();
-
-       return 0;
-}
 
 
 
 
 void tm_shutdown()
 {
-       struct timer_link  *tl, *end, *tmp;
-       int i;
 
        DBG("DEBUG: tm_shutdown : start\n");
-       /* remember the DELETE LIST */
-       tl = hash_table->timers[DELETE_LIST].first_tl.next_tl;
-       end = & hash_table->timers[DELETE_LIST].last_tl;
-       /* unlink the timer lists */
-       for( i=0; i<NR_OF_TIMER_LISTS ; i++ )
-               reset_timer_list( hash_table, i );
-
-       DBG("DEBUG: tm_shutdown : empting DELETE list\n");
-       /* deletes all cells from DELETE_LIST list
-       (they are no more accessible from enrys) */
-       while (tl!=end) {
-               tmp=tl->next_tl;
-               free_cell((struct cell*)tl->payload);
-               tl=tmp;
-       }
+       unlink_timer_lists();
 
        /* destroy the hash table */
        DBG("DEBUG: tm_shutdown : empting hash table\n");
-       free_hash_table( hash_table );
+       free_hash_table( );
+       DBG("DEBUG: tm_shutdown: releasing timers\n");
+       free_timer_table();
        DBG("DEBUG: tm_shutdown : removing semaphores\n");
        lock_cleanup();
        DBG("DEBUG: tm_shutdown : done\n");
@@ -110,8 +66,8 @@ int t_release_transaction( struct cell *trans )
 {
        trans->kr|=REQ_RLSD;
 
-       reset_timer( hash_table, & trans->uas.response.fr_timer );
-       reset_timer( hash_table, & trans->uas.response.retr_timer );
+       reset_timer( & trans->uas.response.fr_timer );
+       reset_timer( & trans->uas.response.retr_timer );
 
        cleanup_uac_timers( trans );
        
@@ -161,7 +117,7 @@ void put_on_wait(  struct cell  *Trans  )
                4.                                                                      WAIT timer executed,
                                                                                        transaction deleted
        */
-       set_1timer( hash_table, &(Trans->wait_tl), WT_TIMER_LIST );
+       set_1timer( &Trans->wait_tl, WT_TIMER_LIST );
 }
 
 
index 41a2e60..9ac93cd 100644 (file)
@@ -35,26 +35,9 @@ struct timer;
 struct entry;
 struct cell;
 
-extern struct s_table*  hash_table;
 extern int noisy_ctimer;
 
 
-#define LOCK_HASH(_h) lock(&(hash_table->entrys[(_h)].mutex))
-#define UNLOCK_HASH(_h) unlock(&(hash_table->entrys[(_h)].mutex))
-
-#ifdef _OBSOLETED
-#define LOCK_ACK(_t) lock(&(_t)->ack_mutex )
-#define UNLOCK_ACK(_t) unlock(&(_t)->ack_mutex )
-#endif
-
-#ifdef _XWAIT
-       #define LOCK_WAIT(_t) lock(&(_t)->wait_mutex )
-       #define UNLOCK_WAIT(_t) unlock(&(_t)->wait_mutex )
-#else
-       #define LOCK_WAIT(_t)
-       #define UNLOCK_WAIT(_t)
-#endif
-
 /* send a private buffer: utilize a retransmission structure
    but take a separate buffer not refered by it; healthy
    for reducing time spend in REPLIES locks
@@ -109,8 +92,6 @@ int get_ip_and_port_from_uri( str* uri , unsigned int *param_ip,
        unsigned int *param_port);
 
 
-void timer_routine(unsigned int, void*);
-
 int t_newtran( struct sip_msg* p_msg );
 
 void put_on_wait(  struct cell  *Trans  );
index 95ef10e..9ec91a9 100644 (file)
@@ -317,7 +317,7 @@ int t_forward_nonack( struct cell *t, struct sip_msg* p_msg ,
        t->kr|=REQ_FWDED;
 
        if (p_msg->REQ_METHOD==METHOD_CANCEL) {
-               t_invite=t_lookupOriginalT( hash_table, p_msg );
+               t_invite=t_lookupOriginalT(  p_msg );
                if (t_invite!=T_NULL) {
                        e2e_cancel( p_msg, t, t_invite );
                        UNREF(t_invite);
index f8ad2e2..7898926 100644 (file)
@@ -129,7 +129,7 @@ int t_lookup_request( struct sip_msg* p_msg , int leave_new_locked )
        LOCK_HASH(p_msg->hash_index);
 
        /* all the transactions from the entry are compared */
-       for ( p_cell = hash_table->entrys[p_msg->hash_index].first_cell;
+       for ( p_cell = get_tm_table()->entrys[p_msg->hash_index].first_cell;
                  p_cell; p_cell = p_cell->next_cell ) 
        {
                t_msg = p_cell->uas.request;
@@ -239,8 +239,7 @@ found:
  *       0 - transaction wasn't found
  *       T - transaction found
  */
-struct cell* t_lookupOriginalT(  struct s_table* hash_table ,
-       struct sip_msg* p_msg )
+struct cell* t_lookupOriginalT(  struct sip_msg* p_msg )
 {
        struct cell     *p_cell;
        unsigned int     hash_index;
@@ -253,7 +252,7 @@ struct cell* t_lookupOriginalT(  struct s_table* hash_table ,
        DBG("DEBUG: t_lookupOriginalT: searching on hash entry %d\n",hash_index );
 
        /* all the transactions from the entry are compared */
-       for (p_cell=hash_table->entrys[hash_index].first_cell;
+       for (p_cell=get_tm_table()->entrys[hash_index].first_cell;
                p_cell; p_cell = p_cell->next_cell )
        {
                t_msg = p_cell->uas.request;
@@ -407,7 +406,7 @@ int t_reply_matching( struct sip_msg *p_msg , int *p_branch )
        is_cancel=cseq_method.len==CANCEL_LEN 
                && memcmp(cseq_method.s, CANCEL, CANCEL_LEN)==0;
        LOCK_HASH(hash_index);
-       for (p_cell = hash_table->entrys[hash_index].first_cell; p_cell; 
+       for (p_cell = get_tm_table()->entrys[hash_index].first_cell; p_cell; 
                p_cell=p_cell->next_cell) {
 
                /* first look if branch matches */
@@ -631,7 +630,7 @@ int t_newtran( struct sip_msg* p_msg )
                                LOG(L_ERR, "ERROR: t_addifnew: out of mem:\n");
                                ret = E_OUT_OF_MEM;
                        } else {
-                               insert_into_hash_table_unsafe( hash_table , new_cell );
+                               insert_into_hash_table_unsafe( new_cell );
                                T=new_cell;
                                INIT_REF_UNSAFE(T);
                                /* init pointers to headers needed to construct local
index f56d404..7f86521 100644 (file)
@@ -20,8 +20,7 @@ extern unsigned int     global_msg_id;
 
 void init_t();
 int init_rb( struct retr_buf *rb, struct sip_msg *msg );
-struct cell* t_lookupOriginalT(  struct s_table* hash_table,
-       struct sip_msg* p_msg );
+struct cell* t_lookupOriginalT( struct sip_msg* p_msg );
 int t_reply_matching( struct sip_msg* , int* );
 int t_lookup_request( struct sip_msg* p_msg , int leave_new_locked );
 int t_newtran( struct sip_msg* p_msg );
index 3b261b2..9d7452a 100644 (file)
@@ -198,7 +198,7 @@ static int _reply( struct cell *trans, struct sip_msg* p_msg,
        if (code>=200) {
                cleanup_uac_timers( trans );
                if (trans->is_invite) cancel_uacs( trans, cancel_bitmap );
-               set_final_timer( /* hash_table, */ trans );
+               set_final_timer(  trans );
        }
 
        /* send it out */
@@ -244,8 +244,8 @@ void cleanup_uac_timers( struct cell *t )
 
        /* reset FR/retransmission timers */
        for (i=0; i<t->nr_of_outgoings; i++ )  {
-               reset_timer( hash_table, &t->uac[i].request.retr_timer );
-               reset_timer( hash_table, &t->uac[i].request.fr_timer );
+               reset_timer( &t->uac[i].request.retr_timer );
+               reset_timer( &t->uac[i].request.fr_timer );
        }
        DBG("DEBUG: cleanup_uacs: RETR/FR timers reset\n");
 }
@@ -538,9 +538,9 @@ int t_on_reply( struct sip_msg  *p_msg )
                /* .. which is not e2e ? ... */
                && t->is_invite ) {
                        /* ... then just stop timers */
-                       reset_timer( hash_table, &uac->local_cancel.retr_timer);
+                       reset_timer( &uac->local_cancel.retr_timer);
                        if ( msg_status >= 200 )
-                               reset_timer( hash_table, &uac->local_cancel.fr_timer);
+                               reset_timer( &uac->local_cancel.fr_timer);
                        DBG("DEBUG: reply to local CANCEL processed\n");
                        goto done;
        }
@@ -548,10 +548,10 @@ int t_on_reply( struct sip_msg  *p_msg )
 
        /* *** stop timers *** */
        /* stop retransmission */
-       reset_timer( hash_table, &uac->request.retr_timer);
+       reset_timer( &uac->request.retr_timer);
        /* stop final response timer only if I got a final response */
        if ( msg_status >= 200 )
-               reset_timer( hash_table, &uac->request.fr_timer);
+               reset_timer( &uac->request.fr_timer);
 
        LOCK_REPLIES( t );
        if (t->local) {
@@ -581,7 +581,7 @@ int t_on_reply( struct sip_msg  *p_msg )
                cleanup_uac_timers( t );        
                if (t->is_invite) cancel_uacs( t, cancel_bitmap );
                /* FR for negative INVITES, WAIT anything else */
-               set_final_timer( /* hash_table,*/ t );
+               set_final_timer(  t );
        } 
 
        /* update FR/RETR timers on provisional replies */
@@ -590,13 +590,12 @@ int t_on_reply( struct sip_msg  *p_msg )
                        /* invite: change FR to longer FR_INV, do not
                           attempt to restart retransmission any more
                        */
-                       set_timer( hash_table, & uac->request.fr_timer,
+                       set_timer( & uac->request.fr_timer,
                                FR_INV_TIMER_LIST );
                } else {
                        /* non-invite: restart retransmisssions (slow now) */
                        uac->request.retr_list=RT_T2;
-                       set_timer( hash_table, 
-                               & uac->request.retr_timer, RT_T2 );
+                       set_timer(  & uac->request.retr_timer, RT_T2 );
                }
        } /* provisional replies */
 
diff --git a/modules/tm/t_thandlers.c b/modules/tm/t_thandlers.c
deleted file mode 100644 (file)
index 60e7178..0000000
+++ /dev/null
@@ -1,442 +0,0 @@
-/*
- * $Id$
- *
- * Timer handlers
- */
-
-#include "../../hash_func.h"
-#include "../../dprint.h"
-#include "../../config.h"
-#include "../../parser/parser_f.h"
-#include "../../ut.h"
-#include "t_funcs.h"
-#include "t_reply.h"
-#include "t_cancel.h"
-
-int noisy_ctimer=0;
-
-static void unlink_timers( struct cell *t )
-{
-       int i;
-       int remove_fr, remove_retr;
-
-       remove_fr=0; remove_retr=0;
-
-       /* first look if we need to remove timers and play with
-          costly locks at all
-
-           note that is_in_timer_list2 is unsafe but it does not
-           hurt -- transaction is already dead (wait state) so that
-           noone else will install a FR/RETR timer and it can only
-           be removed from timer process itself -> it is safe to
-           use it without any protection
-       */
-       if (is_in_timer_list2(&t->uas.response.fr_timer)) remove_fr=1; 
-       else for (i=0; i<t->nr_of_outgoings; i++)
-               if (is_in_timer_list2(&t->uac[i].request.fr_timer)
-                       || is_in_timer_list2(&t->uac[i].local_cancel.fr_timer)) {
-                               remove_fr=1;
-                               break;
-               }
-       if (is_in_timer_list2(&t->uas.response.retr_timer)) remove_retr=1; 
-       else for (i=0; i<t->nr_of_outgoings; i++)
-               if (is_in_timer_list2(&t->uac[i].request.retr_timer)
-                       || is_in_timer_list2(&t->uac[i].local_cancel.retr_timer)) {
-                               remove_retr=1;
-                               break;
-               }
-
-       /* do what we have to do....*/
-       if (remove_retr) {
-               /* RT_T1 lock is shared by all other RT timer
-                  lists -- we can safely lock just one
-               */
-               lock(hash_table->timers[RT_T1_TO_1].mutex);
-               remove_timer_unsafe(&t->uas.response.retr_timer);
-               for (i=0; i<t->nr_of_outgoings; i++) {
-                       remove_timer_unsafe(&t->uac[i].request.retr_timer);
-                       remove_timer_unsafe(&t->uac[i].local_cancel.retr_timer);
-               }
-               unlock(hash_table->timers[RT_T1_TO_1].mutex);
-       }
-       if (remove_fr) {
-               /* FR lock is shared by all other FR timer
-                  lists -- we can safely lock just one
-               */
-               lock(hash_table->timers[FR_TIMER_LIST].mutex);
-               remove_timer_unsafe(&t->uas.response.fr_timer);
-               for (i=0; i<t->nr_of_outgoings; i++) {
-                       remove_timer_unsafe(&t->uac[i].request.fr_timer);
-                       remove_timer_unsafe(&t->uac[i].local_cancel.fr_timer);
-               }
-               unlock(hash_table->timers[FR_TIMER_LIST].mutex);
-       }
-}
-
-/* delete_cell attempt to delete a transaction of not refered
-   by any process; if so, it is put on a delete timer which will
-   try the same later; it assumes it is safe to read ref_count --
-   either the hash entry is locked or the transaction has been
-   removed from hash table (i.e., other processes can only
-   decrease ref_count)
-
-   it is static as it is safe to be called only from WAIT/DELETE
-   timers, the only valid place from which a transaction can be
-   removed
-*/
-
-static void delete_cell( struct cell *p_cell, int unlock )
-{
-
-       int i;
-
-       /* there may still be FR/RETR timers, which have been reset
-          (i.e., time_out==TIMER_DELETED) but are stilled linked to
-          timer lists and must be removed from there before the
-          structures are released
-       */
-       unlink_timers( p_cell );
-
-#ifdef EXTRA_DEBUG
-
-       if (is_in_timer_list2(& p_cell->wait_tl )) {
-               LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
-                       " still on WAIT, timeout=%d\n", p_cell, p_cell->wait_tl.time_out);
-               abort();
-       }
-       if (is_in_timer_list2(& p_cell->uas.response.retr_timer )) {
-               LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
-                       " still on RETR (rep), timeout=%d\n",
-                       p_cell, p_cell->uas.response.retr_timer.time_out);
-               abort();
-       }
-       if (is_in_timer_list2(& p_cell->uas.response.fr_timer )) {
-               LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
-                       " still on FR (rep), timeout=%d\n", p_cell,
-                       p_cell->uas.response.fr_timer.time_out);
-               abort();
-       }
-       for (i=0; i<p_cell->nr_of_outgoings; i++) {
-               if (is_in_timer_list2(& p_cell->uac[i].request.retr_timer)) {
-                       LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
-                               " still on RETR (req %d), timeout %d\n", p_cell, i,
-                               p_cell->uac[i].request.retr_timer.time_out);
-                       abort();
-               }
-               if (is_in_timer_list2(& p_cell->uac[i].request.fr_timer)) {
-                       LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
-                               " still on FR (req %d), timeout %d\n", p_cell, i,
-                               p_cell->uac[i].request.fr_timer.time_out);
-                       abort();
-               }
-               if (is_in_timer_list2(& p_cell->uac[i].local_cancel.retr_timer)) {
-                       LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
-                               " still on RETR/cancel (req %d), timeout %d\n", p_cell, i,
-                               p_cell->uac[i].request.retr_timer.time_out);
-                       abort();
-               }
-               if (is_in_timer_list2(& p_cell->uac[i].local_cancel.fr_timer)) {
-                       LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
-                               " still on FR/cancel (req %d), timeout %d\n", p_cell, i,
-                               p_cell->uac[i].request.fr_timer.time_out);
-                       abort();
-               }
-       }
-       /* reset_retr_timers( hash_table, p_cell ); */
-#endif
-       /* still in use ... don't delete */
-       if ( IS_REFFED_UNSAFE(p_cell) ) {
-               if (unlock) UNLOCK_HASH(p_cell->hash_index);
-               DBG("DEBUG: delete_cell %p: can't delete -- still reffed\n",
-                       p_cell);
-               /* it's added to del list for future del */
-               set_timer( hash_table, &(p_cell->dele_tl), DELETE_LIST );
-       } else {
-               if (unlock) UNLOCK_HASH(p_cell->hash_index);
-               DBG("DEBUG: delete transaction %p\n", p_cell );
-               free_cell( p_cell );
-       }
-}
-
-
-
-inline void retransmission_handler( void *attr)
-{
-       struct retr_buf* r_buf ;
-       enum lists id;
-
-       r_buf = (struct retr_buf*)attr;
-#ifdef EXTRA_DEBUG
-       if (r_buf->my_T->damocles) {
-               LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
-                       " called from RETR timer\n",r_buf->my_T);
-               abort();
-       }       
-#endif
-
-       /*the transaction is already removed from RETRANSMISSION_LIST by timer*/
-       /* retransmision */
-       if ( r_buf->activ_type==TYPE_LOCAL_CANCEL 
-               || r_buf->activ_type==0 ) {
-                       SEND_BUFFER( r_buf );
-                       DBG("DEBUG: retransmission_handler : "
-                               "request resending (t=%p, %.9s ... )\n", 
-                               r_buf->my_T, r_buf->buffer);
-       } else {
-                       DBG("DEBUG: retransmission_handler : "
-                               "reply resending (t=%p, %.9s ... )\n", 
-                               r_buf->my_T, r_buf->buffer);
-                       t_retransmit_reply(r_buf->my_T);
-       }
-
-       id = r_buf->retr_list;
-       r_buf->retr_list = id < RT_T2 ? id + 1 : RT_T2;
-
-       set_timer(hash_table,&(r_buf->retr_timer),id < RT_T2 ? id + 1 : RT_T2 );
-
-       DBG("DEBUG: retransmission_handler : done\n");
-}
-
-
-
-
-inline void final_response_handler( void *attr)
-{
-       int silent;
-       struct retr_buf* r_buf;
-       enum rps reply_status;
-       struct cell *t;
-       branch_bm_t cancel_bitmap;
-       short do_cancel_branch;
-
-       r_buf = (struct retr_buf*)attr;
-       t=r_buf->my_T;
-
-#      ifdef EXTRA_DEBUG
-       if (t->damocles) 
-       {
-               LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
-                       " called from FR timer\n",r_buf->my_T);
-               abort();
-       }
-#      endif
-
-       reset_timer( hash_table , &(r_buf->retr_timer) );
-
-       /* the transaction is already removed from FR_LIST by the timer */
-
-       /* FR for local cancels.... */
-       if (r_buf->activ_type==TYPE_LOCAL_CANCEL)
-       {
-               DBG("DEBUG: FR_handler: stop retr for Local Cancel\n");
-               return;
-       }
-
-       /* FR for replies (negative INVITE replies) */
-       if (r_buf->activ_type>0) {
-#              ifdef EXTRA_DEBUG
-               if (t->uas.request->REQ_METHOD!=METHOD_INVITE
-                       || t->uas.status < 300 ) {
-                       LOG(L_ERR, "ERROR: FR timer: uknown type reply buffer\n");
-                       abort();
-               }
-#              endif
-               put_on_wait( t );
-               return;
-       };
-
-       /* lock reply processing to determine how to proceed reliably */
-       LOCK_REPLIES( t );
-       /* now it can be only a request retransmission buffer;
-          try if you can simply discard the local transaction 
-          state without compellingly removing it from the
-          world */
-       silent=
-               /* not for UACs */
-               !t->local
-               /* invites only */
-               && t->is_invite
-               /* parallel forking does not allow silent state discarding */
-               && t->nr_of_outgoings==1
-               /* on_no_reply handler not installed -- serial forking could occur 
-                  otherwise */
-               && t->on_negative==0
-               /* something received -- we will not be silent on error */
-               && t->uac[r_buf->branch].last_received>0
-               /* don't go silent if disallowed globally ... */
-               && noisy_ctimer==0
-               /* ... or for this particular transaction */
-               && t->noisy_ctimer==0;
-       if (silent) {
-               UNLOCK_REPLIES(t);
-               DBG("DEBUG: FR_handler: transaction silently dropped (%p)\n",t);
-               put_on_wait( t );
-               return;
-       }
-
-       DBG("DEBUG: FR_handler:stop retr. and send CANCEL (%p)\n", t);
-       do_cancel_branch=t->is_invite && 
-               should_cancel_branch(t, r_buf->branch);
-
-#ifdef _OBSOLETED
-       /* set global environment for currently processed transaction */
-       T=t;
-       global_msg_id=T->uas.request->id;
-#endif 
-
-       cancel_bitmap=do_cancel_branch ? 1<<r_buf->branch : 0;
-       if (t->local) {
-               reply_status=local_reply( t, FAKED_REPLY, r_buf->branch, 
-                       408, &cancel_bitmap );
-       } else {
-               reply_status=relay_reply( t, FAKED_REPLY, r_buf->branch, 408, 
-                       &cancel_bitmap );
-       }
-       /* now when out-of-lock do the cancel I/O */
-       if (do_cancel_branch) cancel_branch(t, r_buf->branch );
-       /* it's cleaned up on error; if no error occured and transaction
-          completed regularly, I have to clean-up myself
-       */
-       if (reply_status==RPS_COMPLETED) {
-               /* don't need to cleanup uac_timers -- they were cleaned
-                  branch by branch and this last branch's timers are
-                  reset now too
-               */
-               /* don't need to issue cancels -- local cancels have been
-                  issued branch by branch and this last branch was
-                  cancelled now too
-               */
-               /* then the only thing to do now is to put the transaction
-                  on FR/wait state 
-               */
-               set_final_timer( /* hash_table, */ t );
-       }
-       DBG("DEBUG: final_response_handler : done\n");
-}
-
-void cleanup_localcancel_timers( struct cell *t )
-{
-       int i;
-       for (i=0; i<t->nr_of_outgoings; i++ )  {
-               reset_timer( hash_table, &t->uac[i].local_cancel.retr_timer );
-               reset_timer( hash_table, &t->uac[i].local_cancel.fr_timer );
-       }
-}
-
-
-inline void wait_handler( void *attr)
-{
-       struct cell *p_cell = (struct cell*)attr;
-
-#ifdef EXTRA_DEBUG
-       if (p_cell->damocles) {
-               LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
-                       " called from WAIT timer\n",p_cell);
-               abort();
-       }       
-       DBG("DEBUG: ---------- WAIT timer hit ------- \n");
-#endif
-
-       /* stop cancel timers if any running */
-       if (p_cell->is_invite) cleanup_localcancel_timers( p_cell );
-
-       /* the transaction is already removed from WT_LIST by the timer */
-       /* remove the cell from the hash table */
-       DBG("DEBUG: wait_handler : removing %p from table \n", p_cell );
-       LOCK_HASH( p_cell->hash_index );
-       remove_from_hash_table_unsafe( hash_table, p_cell );
-       /* jku: no more here -- we do it when we put a transaction on wait */
-#ifdef EXTRA_DEBUG
-       p_cell->damocles = 1;
-#endif
-       /* delete (returns with UNLOCK-ed_HASH) */
-       delete_cell( p_cell, 1 /* unlock on return */ );
-       DBG("DEBUG: wait_handler : done\n");
-}
-
-
-
-
-inline void delete_handler( void *attr)
-{
-       struct cell *p_cell = (struct cell*)attr;
-
-       DBG("DEBUG: delete_handler : removing %p \n", p_cell );
-#ifdef EXTRA_DEBUG
-       if (p_cell->damocles==0) {
-               LOG( L_ERR, "ERROR: transaction %p not scheduled for deletion"
-                       " and called from DELETE timer\n",p_cell);
-               abort();
-       }       
-#endif
-
-       /* we call delete now without any locking on hash/ref_count;
-          we can do that because delete_handler is only entered after
-          the delete timer was installed from wait_handler, which
-          removed transaction from hash table and did not destroy it
-          because some processes were using it; that means that the
-          processes currently using the transaction can unref and no
-          new processes can ref -- we can wait until ref_count is
-          zero safely without locking
-       */
-       delete_cell( p_cell, 0 /* don't unlock on return */ );
-    DBG("DEBUG: delete_handler : done\n");
-}
-
-
-
-
-#define run_handler_for_each( _tl , _handler ) \
-       while ((_tl))\
-       {\
-               /* reset the timer list linkage */\
-               tmp_tl = (_tl)->next_tl;\
-               (_tl)->next_tl = (_tl)->prev_tl = 0;\
-               DBG("DEBUG: timer routine:%d,tl=%p next=%p\n",\
-                       id,(_tl),tmp_tl);\
-               if ((_tl)->time_out>TIMER_DELETED) \
-                       (_handler)( (_tl)->payload );\
-               (_tl) = tmp_tl;\
-       }
-
-
-
-
-void timer_routine(unsigned int ticks , void * attr)
-{
-       struct s_table    *hash_table = (struct s_table *)attr;
-       struct timer_link *tl, *tmp_tl;
-       int                id;
-
-#ifdef BOGDAN_TRIFLE
-       DBG(" %d \n",ticks);
-#endif
-
-       for( id=0 ; id<NR_OF_TIMER_LISTS ; id++ )
-       {
-               /* to waste as little time in lock as possible, detach list
-                  with expired items and process them after leaving the lock */
-               tl=check_and_split_time_list( &(hash_table->timers[ id ]), ticks);
-               /* process items now */
-               switch (id)
-               {
-                       case FR_TIMER_LIST:
-                       case FR_INV_TIMER_LIST:
-                               run_handler_for_each(tl,final_response_handler);
-                               break;
-                       case RT_T1_TO_1:
-                       case RT_T1_TO_2:
-                       case RT_T1_TO_3:
-                       case RT_T2:
-                               run_handler_for_each(tl,retransmission_handler);
-                               break;
-                       case WT_TIMER_LIST:
-                               run_handler_for_each(tl,wait_handler);
-                               break;
-                       case DELETE_LIST:
-                               run_handler_for_each(tl,delete_handler);
-                               break;
-               }
-       }
-}
-
index e7f6afc..ad92688 100644 (file)
 #include "timer.h"
 #include "../../dprint.h"
 #include "lock.h"
-
 #include "t_stats.h"
 
+#include "../../hash_func.h"
+#include "../../dprint.h"
+#include "../../config.h"
+#include "../../parser/parser_f.h"
+#include "../../ut.h"
+#include "t_funcs.h"
+#include "t_reply.h"
+#include "t_cancel.h"
+
+
+static struct timer_table *timertable;
+
+int noisy_ctimer=0;
+
+
 int timer_group[NR_OF_TIMER_LISTS] = 
 {
        TG_FR, TG_FR,
@@ -101,33 +115,410 @@ unsigned int timer_id2timeout[NR_OF_TIMER_LISTS] = {
                                                /* NR_OF_TIMER_LISTS */
 };
 
+/******************** handlers ***************************/
+
+
+
+static void delete_cell( struct cell *p_cell, int unlock )
+{
+
+       int i;
+
+       /* there may still be FR/RETR timers, which have been reset
+          (i.e., time_out==TIMER_DELETED) but are stilled linked to
+          timer lists and must be removed from there before the
+          structures are released
+       */
+       unlink_timers( p_cell );
+
+#ifdef EXTRA_DEBUG
+
+       if (is_in_timer_list2(& p_cell->wait_tl )) {
+               LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
+                       " still on WAIT, timeout=%d\n", p_cell, p_cell->wait_tl.time_out);
+               abort();
+       }
+       if (is_in_timer_list2(& p_cell->uas.response.retr_timer )) {
+               LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
+                       " still on RETR (rep), timeout=%d\n",
+                       p_cell, p_cell->uas.response.retr_timer.time_out);
+               abort();
+       }
+       if (is_in_timer_list2(& p_cell->uas.response.fr_timer )) {
+               LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
+                       " still on FR (rep), timeout=%d\n", p_cell,
+                       p_cell->uas.response.fr_timer.time_out);
+               abort();
+       }
+       for (i=0; i<p_cell->nr_of_outgoings; i++) {
+               if (is_in_timer_list2(& p_cell->uac[i].request.retr_timer)) {
+                       LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
+                               " still on RETR (req %d), timeout %d\n", p_cell, i,
+                               p_cell->uac[i].request.retr_timer.time_out);
+                       abort();
+               }
+               if (is_in_timer_list2(& p_cell->uac[i].request.fr_timer)) {
+                       LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
+                               " still on FR (req %d), timeout %d\n", p_cell, i,
+                               p_cell->uac[i].request.fr_timer.time_out);
+                       abort();
+               }
+               if (is_in_timer_list2(& p_cell->uac[i].local_cancel.retr_timer)) {
+                       LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
+                               " still on RETR/cancel (req %d), timeout %d\n", p_cell, i,
+                               p_cell->uac[i].request.retr_timer.time_out);
+                       abort();
+               }
+               if (is_in_timer_list2(& p_cell->uac[i].local_cancel.fr_timer)) {
+                       LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
+                               " still on FR/cancel (req %d), timeout %d\n", p_cell, i,
+                               p_cell->uac[i].request.fr_timer.time_out);
+                       abort();
+               }
+       }
+       /* reset_retr_timers( hash__XX_table, p_cell ); */
+#endif
+       /* still in use ... don't delete */
+       if ( IS_REFFED_UNSAFE(p_cell) ) {
+               if (unlock) UNLOCK_HASH(p_cell->hash_index);
+               DBG("DEBUG: delete_cell %p: can't delete -- still reffed\n",
+                       p_cell);
+               /* it's added to del list for future del */
+               set_timer( &(p_cell->dele_tl), DELETE_LIST );
+       } else {
+               if (unlock) UNLOCK_HASH(p_cell->hash_index);
+               DBG("DEBUG: delete transaction %p\n", p_cell );
+               free_cell( p_cell );
+       }
+}
+
+
+
+inline void retransmission_handler( void *attr)
+{
+       struct retr_buf* r_buf ;
+       enum lists id;
+
+       r_buf = (struct retr_buf*)attr;
+#ifdef EXTRA_DEBUG
+       if (r_buf->my_T->damocles) {
+               LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
+                       " called from RETR timer\n",r_buf->my_T);
+               abort();
+       }       
+#endif
+
+       /*the transaction is already removed from RETRANSMISSION_LIST by timer*/
+       /* retransmision */
+       if ( r_buf->activ_type==TYPE_LOCAL_CANCEL 
+               || r_buf->activ_type==0 ) {
+                       SEND_BUFFER( r_buf );
+                       DBG("DEBUG: retransmission_handler : "
+                               "request resending (t=%p, %.9s ... )\n", 
+                               r_buf->my_T, r_buf->buffer);
+       } else {
+                       DBG("DEBUG: retransmission_handler : "
+                               "reply resending (t=%p, %.9s ... )\n", 
+                               r_buf->my_T, r_buf->buffer);
+                       t_retransmit_reply(r_buf->my_T);
+       }
+
+       id = r_buf->retr_list;
+       r_buf->retr_list = id < RT_T2 ? id + 1 : RT_T2;
+
+       set_timer(&(r_buf->retr_timer),id < RT_T2 ? id + 1 : RT_T2 );
+
+       DBG("DEBUG: retransmission_handler : done\n");
+}
+
+
+
+
+inline void final_response_handler( void *attr)
+{
+       int silent;
+       struct retr_buf* r_buf;
+       enum rps reply_status;
+       struct cell *t;
+       branch_bm_t cancel_bitmap;
+       short do_cancel_branch;
+
+       r_buf = (struct retr_buf*)attr;
+       t=r_buf->my_T;
+
+#      ifdef EXTRA_DEBUG
+       if (t->damocles) 
+       {
+               LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
+                       " called from FR timer\n",r_buf->my_T);
+               abort();
+       }
+#      endif
+
+       reset_timer(  &(r_buf->retr_timer) );
+
+       /* the transaction is already removed from FR_LIST by the timer */
+
+       /* FR for local cancels.... */
+       if (r_buf->activ_type==TYPE_LOCAL_CANCEL)
+       {
+               DBG("DEBUG: FR_handler: stop retr for Local Cancel\n");
+               return;
+       }
+
+       /* FR for replies (negative INVITE replies) */
+       if (r_buf->activ_type>0) {
+#              ifdef EXTRA_DEBUG
+               if (t->uas.request->REQ_METHOD!=METHOD_INVITE
+                       || t->uas.status < 300 ) {
+                       LOG(L_ERR, "ERROR: FR timer: uknown type reply buffer\n");
+                       abort();
+               }
+#              endif
+               put_on_wait( t );
+               return;
+       };
+
+       /* lock reply processing to determine how to proceed reliably */
+       LOCK_REPLIES( t );
+       /* now it can be only a request retransmission buffer;
+          try if you can simply discard the local transaction 
+          state without compellingly removing it from the
+          world */
+       silent=
+               /* not for UACs */
+               !t->local
+               /* invites only */
+               && t->is_invite
+               /* parallel forking does not allow silent state discarding */
+               && t->nr_of_outgoings==1
+               /* on_no_reply handler not installed -- serial forking could occur 
+                  otherwise */
+               && t->on_negative==0
+               /* something received -- we will not be silent on error */
+               && t->uac[r_buf->branch].last_received>0
+               /* don't go silent if disallowed globally ... */
+               && noisy_ctimer==0
+               /* ... or for this particular transaction */
+               && t->noisy_ctimer==0;
+       if (silent) {
+               UNLOCK_REPLIES(t);
+               DBG("DEBUG: FR_handler: transaction silently dropped (%p)\n",t);
+               put_on_wait( t );
+               return;
+       }
+
+       DBG("DEBUG: FR_handler:stop retr. and send CANCEL (%p)\n", t);
+       do_cancel_branch=t->is_invite && 
+               should_cancel_branch(t, r_buf->branch);
+
+#ifdef _OBSOLETED
+       /* set global environment for currently processed transaction */
+       T=t;
+       global_msg_id=T->uas.request->id;
+#endif 
+
+       cancel_bitmap=do_cancel_branch ? 1<<r_buf->branch : 0;
+       if (t->local) {
+               reply_status=local_reply( t, FAKED_REPLY, r_buf->branch, 
+                       408, &cancel_bitmap );
+       } else {
+               reply_status=relay_reply( t, FAKED_REPLY, r_buf->branch, 408, 
+                       &cancel_bitmap );
+       }
+       /* now when out-of-lock do the cancel I/O */
+       if (do_cancel_branch) cancel_branch(t, r_buf->branch );
+       /* it's cleaned up on error; if no error occured and transaction
+          completed regularly, I have to clean-up myself
+       */
+       if (reply_status==RPS_COMPLETED) {
+               /* don't need to cleanup uac_timers -- they were cleaned
+                  branch by branch and this last branch's timers are
+                  reset now too
+               */
+               /* don't need to issue cancels -- local cancels have been
+                  issued branch by branch and this last branch was
+                  cancelled now too
+               */
+               /* then the only thing to do now is to put the transaction
+                  on FR/wait state 
+               */
+               set_final_timer(  t );
+       }
+       DBG("DEBUG: final_response_handler : done\n");
+}
+
+void cleanup_localcancel_timers( struct cell *t )
+{
+       int i;
+       for (i=0; i<t->nr_of_outgoings; i++ )  {
+               reset_timer(  &t->uac[i].local_cancel.retr_timer );
+               reset_timer(  &t->uac[i].local_cancel.fr_timer );
+       }
+}
+
+
+inline void wait_handler( void *attr)
+{
+       struct cell *p_cell = (struct cell*)attr;
+
+#ifdef EXTRA_DEBUG
+       if (p_cell->damocles) {
+               LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
+                       " called from WAIT timer\n",p_cell);
+               abort();
+       }       
+       DBG("DEBUG: ---------- WAIT timer hit ------- \n");
+#endif
+
+       /* stop cancel timers if any running */
+       if (p_cell->is_invite) cleanup_localcancel_timers( p_cell );
+
+       /* the transaction is already removed from WT_LIST by the timer */
+       /* remove the cell from the hash table */
+       DBG("DEBUG: wait_handler : removing %p from table \n", p_cell );
+       LOCK_HASH( p_cell->hash_index );
+       remove_from_hash_table_unsafe(  p_cell );
+       /* jku: no more here -- we do it when we put a transaction on wait */
+#ifdef EXTRA_DEBUG
+       p_cell->damocles = 1;
+#endif
+       /* delete (returns with UNLOCK-ed_HASH) */
+       delete_cell( p_cell, 1 /* unlock on return */ );
+       DBG("DEBUG: wait_handler : done\n");
+}
+
+
+
+
+inline void delete_handler( void *attr)
+{
+       struct cell *p_cell = (struct cell*)attr;
+
+       DBG("DEBUG: delete_handler : removing %p \n", p_cell );
+#ifdef EXTRA_DEBUG
+       if (p_cell->damocles==0) {
+               LOG( L_ERR, "ERROR: transaction %p not scheduled for deletion"
+                       " and called from DELETE timer\n",p_cell);
+               abort();
+       }       
+#endif
+
+       /* we call delete now without any locking on hash/ref_count;
+          we can do that because delete_handler is only entered after
+          the delete timer was installed from wait_handler, which
+          removed transaction from hash table and did not destroy it
+          because some processes were using it; that means that the
+          processes currently using the transaction can unref and no
+          new processes can ref -- we can wait until ref_count is
+          zero safely without locking
+       */
+       delete_cell( p_cell, 0 /* don't unlock on return */ );
+    DBG("DEBUG: delete_handler : done\n");
+}
+
+
+/***********************************************************/
+
+struct timer_table *get_timertable()
+{
+       return timertable;
+}
+
+
+void unlink_timer_lists()
+{
+       struct timer_link  *tl, *end, *tmp;
+       enum lists i;
+
+       /* remember the DELETE LIST */
+       tl = timertable->timers[DELETE_LIST].first_tl.next_tl;
+       end = & timertable->timers[DELETE_LIST].last_tl;
+       /* unlink the timer lists */
+       for( i=0; i<NR_OF_TIMER_LISTS ; i++ )
+               reset_timer_list( i );
+       DBG("DEBUG: tm_shutdown : empting DELETE list\n");
+       /* deletes all cells from DELETE_LIST list 
+          (they are no more accessible from enrys) */
+       while (tl!=end) {
+               tmp=tl->next_tl;
+               free_cell((struct cell*)tl->payload);
+               tl=tmp;
+       }
+       
+}
+
+struct timer_table *tm_init_timers()
+{
+       enum lists i;
+
+       timertable=(struct timer_table *) shm_malloc(sizeof(struct timer_table));
+       if (!timertable) {
+               LOG(L_ERR, "ERROR: tm_init_timers: no shmem for timer_Table\n");
+               goto error0;
+       }
+       memset(timertable, 0, sizeof (struct timer_table));
+               
+
+       /* inits the timers*/
+       for(  i=0 ; i<NR_OF_TIMER_LISTS ; i++ )
+        init_timer_list( i );
+    
+    /* init. timer lists */
+       timertable->timers[RT_T1_TO_1].id = RT_T1_TO_1;
+       timertable->timers[RT_T1_TO_2].id = RT_T1_TO_2;
+       timertable->timers[RT_T1_TO_3].id = RT_T1_TO_3;
+       timertable->timers[RT_T2].id      = RT_T2;
+       timertable->timers[FR_TIMER_LIST].id     = FR_TIMER_LIST; 
+       timertable->timers[FR_INV_TIMER_LIST].id = FR_INV_TIMER_LIST;
+       timertable->timers[WT_TIMER_LIST].id     = WT_TIMER_LIST;
+       timertable->timers[DELETE_LIST].id       = DELETE_LIST;
+
+       return timertable;
+
+error0:
+       return 0;
+}
+
+void free_timer_table()
+{
+       enum lists i;
+
+       if (timertable) {
+               /* the mutexs for sync the lists are released*/
+               for ( i=0 ; i<NR_OF_TIMER_LISTS ; i++ )
+                       release_timerlist_lock( &timertable->timers[i] );
+               shm_free(timertable);
+       }
+               
+}
 
-void reset_timer_list( struct s_table* hash_table, enum lists list_id)
+void reset_timer_list( enum lists list_id)
 {
-       hash_table->timers[list_id].first_tl.next_tl =
-               &(hash_table->timers[list_id].last_tl );
-       hash_table->timers[list_id].last_tl.prev_tl =
-               &(hash_table->timers[list_id].first_tl );
-       hash_table->timers[list_id].first_tl.prev_tl =
-               hash_table->timers[list_id].last_tl.next_tl = NULL;
-       hash_table->timers[list_id].last_tl.time_out = -1;
+       timertable->timers[list_id].first_tl.next_tl =
+               &(timertable->timers[list_id].last_tl );
+       timertable->timers[list_id].last_tl.prev_tl =
+               &(timertable->timers[list_id].first_tl );
+       timertable->timers[list_id].first_tl.prev_tl =
+               timertable->timers[list_id].last_tl.next_tl = NULL;
+       timertable->timers[list_id].last_tl.time_out = -1;
 }
 
 
 
 
-void init_timer_list( struct s_table* hash_table, enum lists list_id)
+void init_timer_list( /* struct s_table* ht, */ enum lists list_id)
 {
-       reset_timer_list( hash_table, list_id );
-       init_timerlist_lock( hash_table, list_id );
+       reset_timer_list( /* ht, */ list_id );
+       init_timerlist_lock( /* ht, */ list_id );
 }
 
 
 
 
-void print_timer_list(struct s_table* hash_table, enum lists list_id)
+void print_timer_list( enum lists list_id)
 {
-       struct timer* timer_list=&(hash_table->timers[ list_id ]);
+       struct timer* timer_list=&(timertable->timers[ list_id ]);
        struct timer_link *tl ;
 
        tl = timer_list->first_tl.next_tl;
@@ -168,8 +559,7 @@ void remove_timer_unsafe(  struct timer_link* tl )
 
 
 
-/* put a new cell into a list nr. list_id within a hash_table;
-   set initial timeout */
+/* put a new cell into a list nr. list_id */
 void add_timer_unsafe( struct timer *timer_list, struct timer_link *tl,
        unsigned int time_out )
 {
@@ -205,10 +595,12 @@ struct timer_link  *check_and_split_time_list( struct timer *timer_list,
 {
        struct timer_link *tl , *end, *ret;
 
+
        /* quick check whether it is worth entering the lock */
-       if (timer_list->first_tl.next_tl==&timer_list->last_tl ||
-               timer_list->first_tl.next_tl->time_out > time )
-                       return NULL;
+       if (timer_list->first_tl.next_tl==&timer_list->last_tl 
+                       || ( /* timer_list->first_tl.next_tl
+                               && */ timer_list->first_tl.next_tl->time_out > time) )
+               return NULL;
 
        /* the entire timer list is locked now -- noone else can manipulate it */
        lock(timer_list->mutex);
@@ -248,8 +640,7 @@ struct timer_link  *check_and_split_time_list( struct timer *timer_list,
 
 
 /* stop timer */
-void reset_timer( struct s_table *hash_table,
-       struct timer_link* tl )
+void reset_timer( struct timer_link* tl )
 {
        /* disqualify this timer from execution by setting its time_out
           to zero; it will stay in timer-list until the timer process
@@ -264,9 +655,9 @@ void reset_timer( struct s_table *hash_table,
 #ifdef _OBSOLETED
        /* lock(timer_group_lock[ tl->tg ]); */
        /* hack to work arround this timer group thing*/
-       lock(hash_table->timers[timer_group[tl->tg]].mutex);
+       lock(hash__XX_table->timers[timer_group[tl->tg]].mutex);
        remove_timer_unsafe( tl );
-       unlock(hash_table->timers[timer_group[tl->tg]].mutex);
+       unlock(hash_XX_table->timers[timer_group[tl->tg]].mutex);
        /*unlock(timer_group_lock[ tl->tg ]);*/
 #endif
 }
@@ -275,8 +666,7 @@ void reset_timer( struct s_table *hash_table,
 
 
 /* determine timer length and put on a correct timer list */
-void set_timer( struct s_table *hash_table,
-       struct timer_link *new_tl, enum lists list_id )
+void set_timer( struct timer_link *new_tl, enum lists list_id )
 {
        unsigned int timeout;
        struct timer* list;
@@ -290,7 +680,7 @@ void set_timer( struct s_table *hash_table,
                return;
        }
        timeout = timer_id2timeout[ list_id ];
-       list= &(hash_table->timers[ list_id ]);
+       list= &(timertable->timers[ list_id ]);
 
        lock(list->mutex);
        /* make sure I'm not already on a list */
@@ -301,8 +691,7 @@ void set_timer( struct s_table *hash_table,
 
 /* similar to set_timer, except it allows only one-time
    timer setting and all later attempts are ignored */
-void set_1timer( struct s_table *hash_table,
-       struct timer_link *new_tl, enum lists list_id )
+void set_1timer( struct timer_link *new_tl, enum lists list_id )
 {
        unsigned int timeout;
        struct timer* list;
@@ -316,7 +705,7 @@ void set_1timer( struct s_table *hash_table,
                return;
        }
        timeout = timer_id2timeout[ list_id ];
-       list= &(hash_table->timers[ list_id ]);
+       list= &(timertable->timers[ list_id ]);
 
        lock(list->mutex);
        if (!(new_tl->time_out>TIMER_DELETED)) {
@@ -333,3 +722,119 @@ void set_1timer( struct s_table *hash_table,
        unlock(list->mutex);
 }
 
+
+void unlink_timers( struct cell *t )
+{
+       int i;
+       int remove_fr, remove_retr;
+
+       remove_fr=0; remove_retr=0;
+
+       /* first look if we need to remove timers and play with
+          costly locks at all
+
+           note that is_in_timer_list2 is unsafe but it does not
+           hurt -- transaction is already dead (wait state) so that
+           noone else will install a FR/RETR timer and it can only
+           be removed from timer process itself -> it is safe to
+           use it without any protection
+       */
+       if (is_in_timer_list2(&t->uas.response.fr_timer)) remove_fr=1; 
+       else for (i=0; i<t->nr_of_outgoings; i++)
+               if (is_in_timer_list2(&t->uac[i].request.fr_timer)
+                       || is_in_timer_list2(&t->uac[i].local_cancel.fr_timer)) {
+                               remove_fr=1;
+                               break;
+               }
+       if (is_in_timer_list2(&t->uas.response.retr_timer)) remove_retr=1; 
+       else for (i=0; i<t->nr_of_outgoings; i++)
+               if (is_in_timer_list2(&t->uac[i].request.retr_timer)
+                       || is_in_timer_list2(&t->uac[i].local_cancel.retr_timer)) {
+                               remove_retr=1;
+                               break;
+               }
+
+       /* do what we have to do....*/
+       if (remove_retr) {
+               /* RT_T1 lock is shared by all other RT timer
+                  lists -- we can safely lock just one
+               */
+               lock(timertable->timers[RT_T1_TO_1].mutex);
+               remove_timer_unsafe(&t->uas.response.retr_timer);
+               for (i=0; i<t->nr_of_outgoings; i++) {
+                       remove_timer_unsafe(&t->uac[i].request.retr_timer);
+                       remove_timer_unsafe(&t->uac[i].local_cancel.retr_timer);
+               }
+               unlock(timertable->timers[RT_T1_TO_1].mutex);
+       }
+       if (remove_fr) {
+               /* FR lock is shared by all other FR timer
+                  lists -- we can safely lock just one
+               */
+               lock(timertable->timers[FR_TIMER_LIST].mutex);
+               remove_timer_unsafe(&t->uas.response.fr_timer);
+               for (i=0; i<t->nr_of_outgoings; i++) {
+                       remove_timer_unsafe(&t->uac[i].request.fr_timer);
+                       remove_timer_unsafe(&t->uac[i].local_cancel.fr_timer);
+               }
+               unlock(timertable->timers[FR_TIMER_LIST].mutex);
+       }
+}
+
+
+
+
+#define run_handler_for_each( _tl , _handler ) \
+       while ((_tl))\
+       {\
+               /* reset the timer list linkage */\
+               tmp_tl = (_tl)->next_tl;\
+               (_tl)->next_tl = (_tl)->prev_tl = 0;\
+               DBG("DEBUG: timer routine:%d,tl=%p next=%p\n",\
+                       id,(_tl),tmp_tl);\
+               if ((_tl)->time_out>TIMER_DELETED) \
+                       (_handler)( (_tl)->payload );\
+               (_tl) = tmp_tl;\
+       }
+
+
+
+
+void timer_routine(unsigned int ticks , void * attr)
+{
+       /* struct timer_table *tt= (struct timer_table*)attr; */
+       struct timer_link *tl, *tmp_tl;
+       int                id;
+
+#ifdef BOGDAN_TRIFLE
+       DBG(" %d \n",ticks);
+#endif
+
+       for( id=0 ; id<NR_OF_TIMER_LISTS ; id++ )
+       {
+               /* to waste as little time in lock as possible, detach list
+                  with expired items and process them after leaving the lock */
+               tl=check_and_split_time_list( &timertable->timers[ id ], ticks);
+               /* process items now */
+               switch (id)
+               {
+                       case FR_TIMER_LIST:
+                       case FR_INV_TIMER_LIST:
+                               run_handler_for_each(tl,final_response_handler);
+                               break;
+                       case RT_T1_TO_1:
+                       case RT_T1_TO_2:
+                       case RT_T1_TO_3:
+                       case RT_T2:
+                               run_handler_for_each(tl,retransmission_handler);
+                               break;
+                       case WT_TIMER_LIST:
+                               run_handler_for_each(tl,wait_handler);
+                               break;
+                       case DELETE_LIST:
+                               run_handler_for_each(tl,delete_handler);
+                               break;
+               }
+       }
+}
+
index 66be55c..4025a49 100644 (file)
@@ -5,12 +5,16 @@
 #ifndef _TIMER_H
 #define _TIMER_H
 
+#include "lock.h"
+#include "t_funcs.h"
+
 /* timer timestamp value indicating a timer has been 
    deactived and shall not be executed
 */
 #define TIMER_DELETED  1
 
 
+#define is_in_timer_list2(_tl) ( (_tl)->timer_list )
 
 /* identifiers of timer lists;*/
 /* fixed-timer retransmission lists (benefit: fixed timer$
@@ -25,19 +29,6 @@ enum lists
        NR_OF_TIMER_LISTS
 };
 
-
-
-#define is_in_timer_list2(_tl) ( (_tl)->timer_list )
-
-extern int timer_group[NR_OF_TIMER_LISTS];
-extern unsigned int timer_id2timeout[NR_OF_TIMER_LISTS];
-
-struct timer;
-
-#include "lock.h"
-#include "t_funcs.h"
-
-
 /* all you need to put a cell in a timer list
    links to neighbours and timer value */
 typedef struct timer_link
@@ -60,22 +51,41 @@ typedef struct  timer
        enum lists         id;
 } timer_type;
 
+/* transaction table */
+struct timer_table
+{
+    /* table of timer lists */
+    struct timer   timers[ NR_OF_TIMER_LISTS ];
+};
+
+
+
 
 
-void init_timer_list( struct s_table* hash_table, enum lists list_id);
-void reset_timer_list( struct s_table* hash_table, enum lists list_id);
+extern int timer_group[NR_OF_TIMER_LISTS];
+extern unsigned int timer_id2timeout[NR_OF_TIMER_LISTS];
+
+
+
+struct timer_table * tm_init_timers();
+void unlink_timer_lists();
+void free_timer_table();
+void init_timer_list( enum lists list_id);
+void reset_timer_list( enum lists list_id);
 void remove_timer_unsafe(  struct timer_link* tl ) ;
 void add_timer_unsafe( struct timer*, struct timer_link*, unsigned int);
 struct timer_link  *check_and_split_time_list( struct timer*, int);
 
-void reset_timer( struct s_table *hash_table,
-       struct timer_link* tl );
+void reset_timer( struct timer_link* tl );
 /* determine timer length and put on a correct timer list */
-void set_timer( struct s_table *hash_table,
-       struct timer_link *new_tl, enum lists list_id );
+void set_timer( struct timer_link *new_tl, enum lists list_id );
 /* similar to set_timer, except it allows only one-time
    timer setting and all later attempts are ignored */
-void set_1timer( struct s_table *hash_table,
-       struct timer_link *new_tl, enum lists list_id );
+void set_1timer( struct timer_link *new_tl, enum lists list_id );
+void unlink_timers( struct cell *t );
+void timer_routine(unsigned int, void*);
+
+
+struct timer_table *get_timertable();
 
 #endif
index e68b9a8..0c97325 100644 (file)
@@ -243,14 +243,28 @@ static int mod_init(void)
                return -1;
        }
 
-       if (tm_startup()==-1) return -1;
+       /* building the hash table*/
+       if (!init_hash_table()) {
+               LOG(L_ERR, "ERROR: mod_init: initializing hash_table failed\n");
+               return -1;
+       }
+
+       if (!tm_init_timers()) {
+               LOG(L_ERR, "ERROR: mod_init: timer init failed\n");
+               return -1;
+       }
+
+       /* init static hidden values */
+       init_t();
+
        uac_init();
        register_tmcb( TMCB_ON_NEGATIVE, on_negative_reply, 0 /* empty param */);
     /* register the timer function */
-    register_timer( timer_routine , hash_table , 1 );
+    register_timer( timer_routine , 0 /* empty attr */, 1 );
     /* register post-script clean-up function */
     register_script_cb( w_t_unref, POST_SCRIPT_CB, 0 /* empty param */ );
     register_script_cb( script_init, PRE_SCRIPT_CB , 0 /* empty param */ );
+
        return 0;
 }
 
index 8a364aa..9ec1284 100644 (file)
@@ -150,7 +150,7 @@ int t_uac( str *msg_type, str *dst,
                && memcmp(msg_type->s, INVITE, INVITE_LEN)==0;
        new_cell->local=1;
        LOCK_HASH(new_cell->hash_index);
-       insert_into_hash_table_unsafe( hash_table , new_cell );
+       insert_into_hash_table_unsafe(  new_cell );
        UNLOCK_HASH(new_cell->hash_index);
 
        request=&new_cell->uac[branch].request;