0953db6ee350b8c0d09b8d3ee24266363f55f274
[sip-router] / modules / tm / timer.c
1 /*
2  * $Id$
3  *
4  *
5  * Copyright (C) 2001-2003 FhG Fokus
6  *
7  * This file is part of ser, a free SIP server.
8  *
9  * ser is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version
13  *
14  * For a license to use the ser software under conditions
15  * other than those described here, or to purchase support for this
16  * software, please contact iptel.org by e-mail at the following addresses:
17  *    info@iptel.org
18  *
19  * ser is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22  * GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License 
25  * along with this program; if not, write to the Free Software 
26  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
27  */
28
29
30 /* 
31   timer.c is where we implement TM timers. It has been designed
32   for high performance using some techniques of which timer users
33   need to be aware.
34
35         One technique is "fixed-timer-length". We maintain separate 
36         timer lists, all of them include elements of the same time
37         to fire. That allows *appending* new events to the list as
38         opposed to inserting them by time, which is costly due to
39         searching time spent in a mutex. The performance benefit is
40         noticeable. The limitation is you need a new timer list for
41         each new timer length.
42
43         Another technique is the timer process slices off expired elements
44         from the list in a mutex, but executes the timer after the mutex
45         is left. That saves time greatly as whichever process wants to
46         add/remove a timer, it does not have to wait until the current
47         list is processed. However, be aware the timers may hit in a delayed
48         manner; you have no guarantee in your process that after resetting a timer, 
49         it will no more hit. It might have been removed by timer process,
50     and is waiting to be executed.  The following example shows it:
51
52                         PROCESS1                                TIMER PROCESS
53
54         0.                                                              timer hits, it is removed from queue and
55                                                                         about to be executed
56         1.      process1 decides to
57                 reset the timer 
58         2.                                                              timer is executed now
59         3.      if the process1 naively
60                 thinks the timer could not 
61                 have been executed after 
62                 resetting the timer, it is
63                 WRONG -- it was (step 2.)
64
65         So be careful when writing the timer handlers. Currently defined timers 
66         don't hurt if they hit delayed, I hope at least. Retransmission timer 
67         may results in a useless retransmission -- not too bad. FR timer not too
68         bad either as timer processing uses a REPLY mutex making it safe to other
69         processing affecting transaction state. Wait timer not bad either -- processes
70         putting a transaction on wait don't do anything with it anymore.
71
72                 Example when it does not hurt:
73
74                         P1                                              TIMER
75         0.                                                              RETR timer removed from list and
76                                                                         scheduled for execution
77         1. 200/BYE received->
78            reset RETR, put_on_wait
79         2.                                                              RETR timer executed -- too late but it does
80                                                                         not hurt
81         3.                                                              WAIT handler executed
82
83         The rule of thumb is don't touch data you put under a timer. Create data,
84     put them under a timer, and let them live until they are safely destroyed from
85     wait/delete timer.  The only safe place to manipulate the data is 
86     from timer process in which delayed timers cannot hit (all timers are
87     processed sequentially).
88
89         A "bad example" -- rewriting content of retransmission buffer
90         in an unprotected way is bad because a delayed retransmission timer might 
91         hit. Thats why our reply retransmission procedure is enclosed in 
92         a REPLY_LOCK.
93
94 */
95 /*
96  * History:
97  * --------
98  *  2003-06-27  timers are not unlinked if timerlist is 0 (andrei)
99  *  2004-02-13  t->is_invite, t->local, t->noisy_ctimer replaced;
100  *              timer_link.payload removed (bogdan)
101  *  2005-10-03  almost completely rewritten to use the new timers (andrei)
102  *  2005-12-12  on final response marked the rb as removed to avoid deleting
103  *              it from the timer handle; timer_allow_del()  (andrei)
104  *  2006-08-11  final_response_handler dns failover support for timeout-ed
105  *              invites (andrei)
106  *  2006-09-28  removed the 480 on fr_inv_timeout reply: on timeout always 
107  *               return a 408
108  *              set the corresponding "faked" failure route sip_msg->msg_flags 
109  *               on timeout or if the branch received a reply (andrei)
110  *  2007-03-15  TMCB_ONSEND callbacks support (andrei)
111  *  2007-05-29  delete on transaction ref_count==0 : removed the delete timer
112  *               (andrei)
113  * 2007-06-01  support for different retransmissions intervals per transaction;
114  *             added maximum inv. and non-inv. transaction life time (andrei)
115  */
116
117 #include "defs.h"
118
119
120
121 #include "config.h"
122 #include "h_table.h"
123 #include "timer.h"
124 #include "../../dprint.h"
125 #include "lock.h"
126 #include "t_stats.h"
127
128 #include "../../hash_func.h"
129 #include "../../dprint.h"
130 #include "../../config.h"
131 #include "../../parser/parser_f.h"
132 #include "../../ut.h"
133 #include "../../timer_ticks.h"
134 #include "../../compiler_opt.h"
135 #include "t_funcs.h"
136 #include "t_reply.h"
137 #include "t_cancel.h"
138 #include "t_hooks.h"
139 #ifdef USE_DNS_FAILOVER
140 #include "t_fwd.h" /* t_send_branch */
141 #endif
142 #ifdef USE_DST_BLACKLIST
143 #include "../../dst_blacklist.h"
144 #endif
145
146
147
148 int noisy_ctimer=1;
149
150 struct msgid_var user_fr_timeout;
151 struct msgid_var user_fr_inv_timeout;
152 #ifdef TM_DIFF_RT_TIMEOUT
153 struct msgid_var user_rt_t1_timeout;
154 struct msgid_var user_rt_t2_timeout;
155 #endif
156 struct msgid_var user_inv_max_lifetime;
157 struct msgid_var user_noninv_max_lifetime;
158
159 /* default values of timeouts for all the timer list */
160
161 ticks_t fr_timeout              =       FR_TIME_OUT;
162 ticks_t fr_inv_timeout  =       INV_FR_TIME_OUT;
163 ticks_t wait_timeout    =       WT_TIME_OUT;
164 ticks_t delete_timeout  =       DEL_TIME_OUT;
165 ticks_t rt_t1_timeout   =       RETR_T1;
166 ticks_t rt_t2_timeout   =       RETR_T2;
167
168 /* maximum time and invite or noninv transaction will live, from
169  * the moment of creation (overrides larger fr/fr_inv timeouts,
170  * extensions due to dns failover, fr_inv restart a.s.o)
171  * Note: after this time the transaction will not be deleted
172  *  immediately, but forced to go in the wait state or in wait for ack state 
173  *  and then wait state, so it will still be alive for either wait_timeout in 
174  *  the non-inv or "silent" inv. case and for fr_timeout + wait_timeout for an
175  *  invite transaction (for which  we must wait for the neg. reply ack)
176  */
177 ticks_t tm_max_inv_lifetime             =       MAX_INV_LIFETIME;
178 ticks_t tm_max_noninv_lifetime  =       MAX_NONINV_LIFETIME;
179
180
181 /* internal use, val should be unsigned or positive
182  *  <= instead of < to get read of gcc warning when 
183  *  sizeof(cell_member)==sizeof(val) (Note that this limits
184  *  maximum value to max. type -1) */
185 #define SIZE_FIT_CHECK(cell_member, val, cfg_name) \
186         if (MAX_UVAR_VALUE(((struct cell*)0)->cell_member) <= (val)){ \
187                 ERR("tm_init_timers: " cfg_name " too big: %lu (%lu ticks) " \
188                                 "- max %lu (%lu ticks) \n", TICKS_TO_MS((unsigned long)(val)),\
189                                 (unsigned long)(val), \
190                                 TICKS_TO_MS(MAX_UVAR_VALUE(((struct cell*)0)->cell_member)), \
191                                 MAX_UVAR_VALUE(((struct cell*)0)->cell_member)); \
192                 goto error; \
193         } 
194
195 /* fix timer values to ticks */
196 int tm_init_timers()
197 {
198         fr_timeout=MS_TO_TICKS(fr_timeout); 
199         fr_inv_timeout=MS_TO_TICKS(fr_inv_timeout);
200         wait_timeout=MS_TO_TICKS(wait_timeout);
201         delete_timeout=MS_TO_TICKS(delete_timeout);
202         rt_t1_timeout=MS_TO_TICKS(rt_t1_timeout);
203         rt_t2_timeout=MS_TO_TICKS(rt_t2_timeout);
204         tm_max_inv_lifetime=MS_TO_TICKS(tm_max_inv_lifetime);
205         tm_max_noninv_lifetime=MS_TO_TICKS(tm_max_noninv_lifetime);
206         /* fix 0 values to 1 tick (minimum possible wait time ) */
207         if (fr_timeout==0) fr_timeout=1;
208         if (fr_inv_timeout==0) fr_inv_timeout=1;
209         if (wait_timeout==0) wait_timeout=1;
210         if (delete_timeout==0) delete_timeout=1;
211         if (rt_t2_timeout==0) rt_t2_timeout=1;
212         if (rt_t1_timeout==0) rt_t1_timeout=1;
213         if (tm_max_inv_lifetime==0) tm_max_inv_lifetime=1;
214         if (tm_max_noninv_lifetime==0) tm_max_noninv_lifetime=1;
215         
216         /* size fit checks */
217         SIZE_FIT_CHECK(fr_timeout, fr_timeout, "fr_timer");
218         SIZE_FIT_CHECK(fr_inv_timeout, fr_inv_timeout, "fr_inv_timer");
219 #ifdef TM_DIFF_RT_TIMEOUT
220         SIZE_FIT_CHECK(rt_t1_timeout, rt_t1_timeout, "retr_timer1");
221         SIZE_FIT_CHECK(rt_t2_timeout, rt_t2_timeout, "retr_timer2");
222 #endif
223         SIZE_FIT_CHECK(end_of_life, tm_max_inv_lifetime, "max_inv_lifetime");
224         SIZE_FIT_CHECK(end_of_life, tm_max_noninv_lifetime, "max_noninv_lifetime");
225         
226         memset(&user_fr_timeout, 0, sizeof(user_fr_timeout));
227         memset(&user_fr_inv_timeout, 0, sizeof(user_fr_inv_timeout));
228 #ifdef TM_DIFF_RT_TIMEOUT
229         memset(&user_rt_t1_timeout, 0, sizeof(user_rt_t1_timeout));
230         memset(&user_rt_t2_timeout, 0, sizeof(user_rt_t2_timeout));
231 #endif
232         memset(&user_inv_max_lifetime, 0, sizeof(user_inv_max_lifetime));
233         memset(&user_noninv_max_lifetime, 0, sizeof(user_noninv_max_lifetime));
234         
235         DBG("tm: tm_init_timers: fr=%d fr_inv=%d wait=%d delete=%d t1=%d t2=%d"
236                         " max_inv_lifetime=%d max_noninv_lifetime=%d\n",
237                         fr_timeout, fr_inv_timeout, wait_timeout, delete_timeout,
238                         rt_t1_timeout, rt_t2_timeout, tm_max_inv_lifetime,
239                         tm_max_noninv_lifetime);
240         return 0;
241 error:
242         return -1;
243 }
244
245 /******************** handlers ***************************/
246
247
248 #ifndef TM_DEL_UNREF
249 /* returns number of ticks before retrying the del, or 0 if the del.
250  * was succesfull */
251 inline static ticks_t  delete_cell( struct cell *p_cell, int unlock )
252 {
253         /* there may still be FR/RETR timers, which have been reset
254            (i.e., time_out==TIMER_DELETED) but are stilled linked to
255            timer lists and must be removed from there before the
256            structures are released
257         */
258         unlink_timers( p_cell );
259         /* still in use ... don't delete */
260         if ( IS_REFFED_UNSAFE(p_cell) ) {
261                 if (unlock) UNLOCK_HASH(p_cell->hash_index);
262                 DBG("DEBUG: delete_cell %p: can't delete -- still reffed (%d)\n",
263                                 p_cell, p_cell->ref_count);
264                 /* delay the delete */
265                 /* TODO: change refcnts and delete on refcnt==0 */
266                 return delete_timeout;
267         } else {
268                 if (unlock) UNLOCK_HASH(p_cell->hash_index);
269 #ifdef EXTRA_DEBUG
270                 DBG("DEBUG: delete transaction %p\n", p_cell );
271 #endif
272                 free_cell( p_cell );
273                 return 0;
274         }
275 }
276 #endif /* TM_DEL_UNREF */
277
278
279
280
281 /* generate a fake reply
282  * it assumes the REPLY_LOCK is already held and returns unlocked */
283 static void fake_reply(struct cell *t, int branch, int code )
284 {
285         branch_bm_t cancel_bitmap;
286         short do_cancel_branch;
287         enum rps reply_status;
288
289         do_cancel_branch = is_invite(t) && should_cancel_branch(t, branch, 0);
290         /* mark branch as canceled */
291         t->uac[branch].request.flags|=F_RB_CANCELED;
292         if ( is_local(t) ) {
293                 reply_status=local_reply( t, FAKED_REPLY, branch, 
294                                           code, &cancel_bitmap );
295                 if (reply_status == RPS_COMPLETED) {
296                         put_on_wait(t);
297                 }
298         } else {
299                 reply_status=relay_reply( t, FAKED_REPLY, branch, code,
300                                           &cancel_bitmap );
301
302 #if 0
303                 if (reply_status==RPS_COMPLETED) {
304                              /* don't need to cleanup uac_timers -- they were cleaned
305                                 branch by branch and this last branch's timers are
306                                 reset now too
307                              */
308                              /* don't need to issue cancels -- local cancels have been
309                                 issued branch by branch and this last branch was
310                                 canceled now too
311                              */
312                              /* then the only thing to do now is to put the transaction
313                                 on FR/wait state 
314                              */
315                              /*
316                                set_final_timer(  t );
317                              */
318                 }
319 #endif
320
321         }
322         /* now when out-of-lock do the cancel I/O */
323         if (do_cancel_branch) cancel_branch(t, branch, 0);
324         /* it's cleaned up on error; if no error occurred and transaction
325            completed regularly, I have to clean-up myself
326         */
327 }
328
329
330
331 /* return (ticks_t)-1 on error/disable and 0 on success */
332 inline static ticks_t retransmission_handler( struct retr_buf *r_buf )
333 {
334 #ifdef EXTRA_DEBUG
335         if (r_buf->my_T->flags & T_IN_AGONY) {
336                 LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
337                         " called from RETR timer (flags %x)\n",
338                         r_buf->my_T, r_buf->my_T->flags );
339                 abort();
340         }       
341 #endif
342         if ( r_buf->activ_type==TYPE_LOCAL_CANCEL 
343                 || r_buf->activ_type==TYPE_REQUEST ) {
344 #ifdef EXTRA_DEBUG
345                         DBG("DEBUG: retransmission_handler : "
346                                 "request resending (t=%p, %.9s ... )\n", 
347                                 r_buf->my_T, r_buf->buffer);
348 #endif
349                         if (SEND_BUFFER( r_buf )==-1) {
350                                 /* disable retr. timers => return -1 */
351                                 fake_reply(r_buf->my_T, r_buf->branch, 503 );
352                                 return (ticks_t)-1;
353                         }
354 #ifdef TMCB_ONSEND
355                         if (unlikely(has_tran_tmcbs(r_buf->my_T, TMCB_REQUEST_SENT))) 
356                                 run_onsend_callbacks(TMCB_REQUEST_SENT, r_buf, 
357                                                                                 0, 0, TMCB_RETR_F);
358 #endif
359         } else {
360 #ifdef EXTRA_DEBUG
361                         DBG("DEBUG: retransmission_handler : "
362                                 "reply resending (t=%p, %.9s ... )\n", 
363                                 r_buf->my_T, r_buf->buffer);
364 #endif
365                         t_retransmit_reply(r_buf->my_T);
366         }
367         
368         return 0;
369 }
370
371
372
373 inline static void final_response_handler(      struct retr_buf* r_buf,
374                                                                                         struct cell* t)
375 {
376         int silent;
377 #ifdef USE_DNS_FAILOVER
378         /*int i; 
379         int added_branches;
380         */
381         int branch_ret;
382         int prev_branch;
383         ticks_t now;
384 #endif
385
386 #       ifdef EXTRA_DEBUG
387         if (t->flags & T_IN_AGONY) 
388         {
389                 LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
390                         " called from FR timer (flags %x)\n", t, t->flags);
391                 abort();
392         }
393 #       endif
394         /* FR for local cancels.... */
395         if (r_buf->activ_type==TYPE_LOCAL_CANCEL)
396         {
397 #ifdef TIMER_DEBUG
398                 DBG("DEBUG: final_response_handler: stop retr for Local Cancel\n");
399 #endif
400                 return;
401         }
402         /* FR for replies (negative INVITE replies) */
403         if (r_buf->activ_type>0) {
404 #               ifdef EXTRA_DEBUG
405                 if (t->uas.request->REQ_METHOD!=METHOD_INVITE
406                         || t->uas.status < 200 ) {
407                         LOG(L_CRIT, "BUG: final_response_handler: unknown type reply"
408                                         " buffer\n");
409                         abort();
410                 }
411 #               endif
412                 put_on_wait( t );
413                 return;
414         };
415
416         /* lock reply processing to determine how to proceed reliably */
417         LOCK_REPLIES( t );
418         /* now it can be only a request retransmission buffer;
419            try if you can simply discard the local transaction 
420            state without compellingly removing it from the
421            world */
422         silent=
423                 /* don't go silent if disallowed globally ... */
424                 noisy_ctimer==0
425                 /* ... or for this particular transaction */
426                 && has_noisy_ctimer(t) == 0
427                 /* not for UACs */
428                 && !is_local(t)
429                 /* invites only */
430                 && is_invite(t)
431                 /* parallel forking does not allow silent state discarding */
432                 && t->nr_of_outgoings==1
433                 /* on_negativ reply handler not installed -- serial forking 
434                  * could occur otherwise */
435                 && t->on_negative==0
436                 /* the same for FAILURE callbacks */
437                 && !has_tran_tmcbs( t, TMCB_ON_FAILURE_RO|TMCB_ON_FAILURE) 
438                 /* something received -- we will not be silent on error */
439                 && t->uac[r_buf->branch].last_received==0;
440         
441         if (silent) {
442                 UNLOCK_REPLIES(t);
443 #ifdef EXTRA_DEBUG
444                 DBG("DEBUG: final_response_handler: transaction silently dropped (%p)"
445                                 ", branch %d, last_received %d\n",t, r_buf->branch,
446                                  t->uac[r_buf->branch].last_received);
447 #endif
448                 put_on_wait( t );
449                 return;
450         }
451 #ifdef EXTRA_DEBUG
452         DBG("DEBUG: final_response_handler:stop retr. and send CANCEL (%p)\n", t);
453 #endif
454         if ((r_buf->branch < MAX_BRANCHES) && /* r_buf->branch is always >=0 */
455                         (t->uac[r_buf->branch].last_received==0)){
456                 /* no reply received */
457 #ifdef USE_DST_BLACKLIST
458                 if (use_dst_blacklist
459                         && r_buf->my_T
460                         && r_buf->my_T->uas.request
461                         && (r_buf->my_T->uas.request->REQ_METHOD & tm_blst_methods_add)
462                 )
463                         dst_blacklist_add( BLST_ERR_TIMEOUT, &r_buf->dst,
464                                                 r_buf->my_T->uas.request);
465 #endif
466 #ifdef USE_DNS_FAILOVER
467                 /* if this is an invite, the destination resolves to more ips, and
468                  *  it still hasn't passed more than fr_inv_timeout since we
469                  *  started, add another branch/uac */
470                 if (use_dns_failover){
471                         now=get_ticks_raw();
472                         if ((s_ticks_t)(t->end_of_life-now)>0){
473                                 branch_ret=add_uac_dns_fallback(t, t->uas.request,
474                                                                                                         &t->uac[r_buf->branch], 0);
475                                 prev_branch=-1;
476                                 while((branch_ret>=0) &&(branch_ret!=prev_branch)){
477                                         prev_branch=branch_ret;
478                                         branch_ret=t_send_branch(t, branch_ret, t->uas.request , 
479                                                                                                 0, 0);
480                                 }
481                         }
482                 }
483 #endif
484         }
485         fake_reply(t, r_buf->branch, 408);
486 }
487
488
489
490 /* handles retransmissions and fr timers */
491 /* the following assumption are made (to avoid deleting/re-adding the timer):
492  *  retr_buf->retr_interval < ( 1<<((sizeof(ticks_t)*8-1) )
493  *  if retr_buf->retr_interval==0 => timer disabled
494  *                            ==(ticks_t) -1 => retr. disabled (fr working)
495  *     retr_buf->retr_interval & (1 <<(sizeof(ticks_t)*8-1) => retr. & fr reset
496  *     (we never reset only retr, it's either reset both of them or retr 
497  *      disabled & reset fr). In this case the fr_origin will contain the 
498  *      "time" of the reset and next retr should occur at 
499  *      fr->origin+retr_interval (we also assume that we'll never reset retr
500  *      to a lower value then the current one)
501  */
502 ticks_t retr_buf_handler(ticks_t ticks, struct timer_ln* tl, void *p)
503 {
504         struct retr_buf* rbuf ;
505         ticks_t fr_remainder;
506         ticks_t retr_remainder;
507         ticks_t retr_interval;
508         ticks_t new_retr_interval;
509         struct cell *t;
510
511         rbuf=(struct  retr_buf*)
512                         ((void*)tl-(void*)(&((struct retr_buf*)0)->timer));
513         t=rbuf->my_T;
514         
515 #ifdef TIMER_DEBUG
516         DBG("tm: timer retr_buf_handler @%d (%p -> %p -> %p)\n",
517                         ticks, tl, rbuf, t);
518 #endif
519         /* overflow safe check (should work ok for fr_intervals < max ticks_t/2) */
520         if ((s_ticks_t)(rbuf->fr_expire-ticks)<=0){
521                 /* final response */
522                 rbuf->t_active=0; /* mark the timer as removed 
523                                                          (both timers disabled)
524                                                           a little race risk, but
525                                                           nothing bad would happen */
526                 rbuf->flags|=F_RB_TIMEOUT;
527                 timer_allow_del(); /* [optional] allow timer_dels, since we're done
528                                                           and there is no race risk */
529                 final_response_handler(rbuf, t);
530                 return 0;
531         }else{
532                 /*  4 possible states running (t1), t2, paused, disabled */
533                         if ((s_ticks_t)(rbuf->retr_expire-ticks)<=0){
534                                 if (rbuf->flags & F_RB_RETR_DISABLED)
535                                         goto disabled;
536                                 /* retr_interval= min (2*ri, rt_t2) , *p==2*ri*/
537                                 /* no branch version: 
538                                         #idef CC_SIGNED_RIGHT_SHIFT
539                                                 ri=  rt_t2+((2*ri-rt_t2) & 
540                                                 ((signed)(2*ri-rt_t2)>>(sizeof(ticks_t)*8-1));
541                                         #else
542                                                 ri=rt_t2+((2*ri-rt_t2)& -(2*ri<rt_t2));
543                                         #endif
544                                 */
545                                 
546                                 /* get the  current interval from timer param. */
547                                 if ((rbuf->flags & F_RB_T2) || 
548                                                 (((ticks_t)(unsigned long)p)>RT_T2_TIMEOUT(rbuf))){
549                                         retr_interval=RT_T2_TIMEOUT(rbuf);
550                                         new_retr_interval=RT_T2_TIMEOUT(rbuf);
551                                 }else{
552                                         retr_interval=(ticks_t)(unsigned long)p;
553                                         new_retr_interval=retr_interval<<1;
554                                 }
555 #ifdef TIMER_DEBUG
556                                 DBG("tm: timer: retr: new interval %d (max %d)\n", 
557                                                 retr_interval, RT_T2_TIMEOUT(rbuf));
558 #endif
559                                 /* we could race with the reply_received code, but the 
560                                  * worst thing that can happen is to delay a reset_to_t2
561                                  * for crt_interval and send an extra retr.*/
562                                 rbuf->retr_expire=ticks+retr_interval;
563                                 /* set new interval to -1 on error, or retr_int. on success */
564                                 retr_remainder=retransmission_handler(rbuf) | retr_interval;
565                                 /* store the next retr. interval inside the timer struct,
566                                  * in the data member */
567                                 tl->data=(void*)(unsigned long)(new_retr_interval);
568                         }else{
569                                 retr_remainder= rbuf->retr_expire-ticks;
570                                 DBG("tm: timer: retr: nothing to do, expire in %d\n", 
571                                                 retr_remainder);
572                         }
573         }
574 /* skip: */
575         /* return minimum of the next retransmission handler and the 
576          * final response (side benefit: it properly cancels timer if ret==0 and
577          *  sleeps for fr_remainder if retr. is canceled [==(ticks_t)-1]) */
578         fr_remainder=rbuf->fr_expire-ticks; /* to be more precise use
579                                                                                         get_ticks_raw() instead of ticks
580                                                                                         (but make sure that 
581                                                                                         crt. ticks < fr_expire */
582 #ifdef TIMER_DEBUG
583         DBG("tm: timer retr_buf_handler @%d (%p ->%p->%p) exiting min (%d, %d)\n",
584                         ticks, tl, rbuf, t, retr_remainder, fr_remainder);
585 #endif
586 #ifdef EXTRA_DEBUG
587         if  (retr_remainder==0 || fr_remainder==0){
588                 BUG("tm: timer retr_buf_handler: 0 remainder => disabling timer!: "
589                                 "retr_remainder=%d, fr_remainder=%d\n", retr_remainder,
590                                 fr_remainder);
591         }
592 #endif
593         if (retr_remainder<fr_remainder)
594                 return retr_remainder;
595         else{
596                 /* hack to switch to the slow timer */
597 #ifdef TM_FAST_RETR_TIMER
598                 tl->flags&=~F_TIMER_FAST;
599 #endif
600                 return fr_remainder;
601         }
602 disabled:
603         return rbuf->fr_expire-ticks;
604 }
605
606
607
608 ticks_t wait_handler(ticks_t ti, struct timer_ln *wait_tl, void* data)
609 {
610         struct cell *p_cell;
611         ticks_t ret;
612
613         p_cell=(struct cell*)data;
614 #ifdef TIMER_DEBUG
615         DBG("DEBUG: WAIT timer hit @%d for %p (timer_lm %p)\n", 
616                         ti, p_cell, wait_tl);
617 #endif
618
619 #ifdef TM_DEL_UNREF
620         /* stop cancel timers if any running */
621         if ( is_invite(p_cell) ) cleanup_localcancel_timers( p_cell );
622         /* remove the cell from the hash table */
623         LOCK_HASH( p_cell->hash_index );
624         remove_from_hash_table_unsafe(  p_cell );
625         UNLOCK_HASH( p_cell->hash_index );
626         p_cell->flags |= T_IN_AGONY;
627         UNREF_FREE(p_cell);
628         ret=0;
629 #else /* TM_DEL_UNREF */
630         if (p_cell->flags & T_IN_AGONY){
631                 /* delayed delete */
632                 /* we call delete now without any locking on hash/ref_count;
633                    we can do that because delete_handler is only entered after
634                    the delete timer was installed from wait_handler, which
635                    removed transaction from hash table and did not destroy it
636                    because some processes were using it; that means that the
637                    processes currently using the transaction can unref and no
638                    new processes can ref -- we can wait until ref_count is
639                    zero safely without locking
640                 */
641                 ret=delete_cell( p_cell, 0 /* don't unlock on return */ );
642         }else {
643                 /* stop cancel timers if any running */
644                 if ( is_invite(p_cell) ) cleanup_localcancel_timers( p_cell );
645                 /* remove the cell from the hash table */
646                 LOCK_HASH( p_cell->hash_index );
647                 remove_from_hash_table_unsafe(  p_cell );
648                 p_cell->flags |= T_IN_AGONY;
649                 /* delete (returns with UNLOCK-ed_HASH) */
650                 ret=delete_cell( p_cell, 1 /* unlock on return */ );
651         }
652 #endif /* TM_DEL_UNREF */
653         return ret;
654 }
655