tm: fix possible uninit. use of cancel_reason
[sip-router] / modules / tm / timer.c
1 /*
2  * $Id$
3  *
4  *
5  * Copyright (C) 2001-2003 FhG Fokus
6  *
7  * This file is part of ser, a free SIP server.
8  *
9  * ser is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version
13  *
14  * For a license to use the ser software under conditions
15  * other than those described here, or to purchase support for this
16  * software, please contact iptel.org by e-mail at the following addresses:
17  *    info@iptel.org
18  *
19  * ser is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22  * GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License 
25  * along with this program; if not, write to the Free Software 
26  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
27  */
28
29
30 /* 
31   timer.c is where we implement TM timers. It has been designed
32   for high performance using some techniques of which timer users
33   need to be aware.
34
35         One technique is "fixed-timer-length". We maintain separate 
36         timer lists, all of them include elements of the same time
37         to fire. That allows *appending* new events to the list as
38         opposed to inserting them by time, which is costly due to
39         searching time spent in a mutex. The performance benefit is
40         noticeable. The limitation is you need a new timer list for
41         each new timer length.
42
43         Another technique is the timer process slices off expired elements
44         from the list in a mutex, but executes the timer after the mutex
45         is left. That saves time greatly as whichever process wants to
46         add/remove a timer, it does not have to wait until the current
47         list is processed. However, be aware the timers may hit in a delayed
48         manner; you have no guarantee in your process that after resetting a timer, 
49         it will no more hit. It might have been removed by timer process,
50     and is waiting to be executed.  The following example shows it:
51
52                         PROCESS1                                TIMER PROCESS
53
54         0.                                                              timer hits, it is removed from queue and
55                                                                         about to be executed
56         1.      process1 decides to
57                 reset the timer 
58         2.                                                              timer is executed now
59         3.      if the process1 naively
60                 thinks the timer could not 
61                 have been executed after 
62                 resetting the timer, it is
63                 WRONG -- it was (step 2.)
64
65         So be careful when writing the timer handlers. Currently defined timers 
66         don't hurt if they hit delayed, I hope at least. Retransmission timer 
67         may results in a useless retransmission -- not too bad. FR timer not too
68         bad either as timer processing uses a REPLY mutex making it safe to other
69         processing affecting transaction state. Wait timer not bad either -- processes
70         putting a transaction on wait don't do anything with it anymore.
71
72                 Example when it does not hurt:
73
74                         P1                                              TIMER
75         0.                                                              RETR timer removed from list and
76                                                                         scheduled for execution
77         1. 200/BYE received->
78            reset RETR, put_on_wait
79         2.                                                              RETR timer executed -- too late but it does
80                                                                         not hurt
81         3.                                                              WAIT handler executed
82
83         The rule of thumb is don't touch data you put under a timer. Create data,
84     put them under a timer, and let them live until they are safely destroyed from
85     wait/delete timer.  The only safe place to manipulate the data is 
86     from timer process in which delayed timers cannot hit (all timers are
87     processed sequentially).
88
89         A "bad example" -- rewriting content of retransmission buffer
90         in an unprotected way is bad because a delayed retransmission timer might 
91         hit. Thats why our reply retransmission procedure is enclosed in 
92         a REPLY_LOCK.
93
94 */
95 /*
96  * History:
97  * --------
98  *  2003-06-27  timers are not unlinked if timerlist is 0 (andrei)
99  *  2004-02-13  t->is_invite, t->local, t->noisy_ctimer replaced;
100  *              timer_link.payload removed (bogdan)
101  *  2005-10-03  almost completely rewritten to use the new timers (andrei)
102  *  2005-12-12  on final response marked the rb as removed to avoid deleting
103  *              it from the timer handle; timer_allow_del()  (andrei)
104  *  2006-08-11  final_response_handler dns failover support for timeout-ed
105  *              invites (andrei)
106  *  2006-09-28  removed the 480 on fr_inv_timeout reply: on timeout always 
107  *               return a 408
108  *              set the corresponding "faked" failure route sip_msg->msg_flags 
109  *               on timeout or if the branch received a reply (andrei)
110  *  2007-03-15  TMCB_ONSEND callbacks support (andrei)
111  *  2007-05-29  delete on transaction ref_count==0 : removed the delete timer
112  *               (andrei)
113  * 2007-06-01  support for different retransmissions intervals per transaction;
114  *             added maximum inv. and non-inv. transaction life time (andrei)
115  */
116
117 #include "defs.h"
118
119
120
121 #include "config.h"
122 #include "h_table.h"
123 #include "timer.h"
124 #include "../../dprint.h"
125 #include "lock.h"
126 #include "t_stats.h"
127
128 #include "../../hash_func.h"
129 #include "../../dprint.h"
130 #include "../../config.h"
131 #include "../../parser/parser_f.h"
132 #include "../../ut.h"
133 #include "../../timer_ticks.h"
134 #include "../../compiler_opt.h" 
135 #include "../../sr_compat.h" 
136 #include "t_funcs.h"
137 #include "t_reply.h"
138 #include "t_cancel.h"
139 #include "t_hooks.h"
140 #ifdef USE_DNS_FAILOVER
141 #include "t_fwd.h" /* t_send_branch */
142 #include "../../cfg_core.h" /* cfg_get(core, core_cfg, use_dns_failover) */
143 #endif
144 #ifdef USE_DST_BLACKLIST
145 #include "../../dst_blacklist.h"
146 #endif
147
148
149
150 struct msgid_var user_fr_timeout;
151 struct msgid_var user_fr_inv_timeout;
152 #ifdef TM_DIFF_RT_TIMEOUT
153 struct msgid_var user_rt_t1_timeout;
154 struct msgid_var user_rt_t2_timeout;
155 #endif
156 struct msgid_var user_inv_max_lifetime;
157 struct msgid_var user_noninv_max_lifetime;
158
159
160 /* internal use, val should be unsigned or positive
161  *  <= instead of < to get read of gcc warning when 
162  *  sizeof(cell_member)==sizeof(val) (Note that this limits
163  *  maximum value to max. type -1) */
164 #define SIZE_FIT_CHECK(cell_member, val, cfg_name) \
165         if (MAX_UVAR_VALUE(((struct cell*)0)->cell_member) <= (val)){ \
166                 ERR("tm_init_timers: " cfg_name " too big: %lu (%lu ticks) " \
167                                 "- max %lu (%lu ticks) \n", TICKS_TO_MS((unsigned long)(val)),\
168                                 (unsigned long)(val), \
169                                 TICKS_TO_MS(MAX_UVAR_VALUE(((struct cell*)0)->cell_member)), \
170                                 MAX_UVAR_VALUE(((struct cell*)0)->cell_member)); \
171                 goto error; \
172         } 
173
174 /* fix timer values to ticks */
175 int tm_init_timers()
176 {
177         default_tm_cfg.fr_timeout=MS_TO_TICKS(default_tm_cfg.fr_timeout); 
178         default_tm_cfg.fr_inv_timeout=MS_TO_TICKS(default_tm_cfg.fr_inv_timeout);
179         default_tm_cfg.wait_timeout=MS_TO_TICKS(default_tm_cfg.wait_timeout);
180         default_tm_cfg.delete_timeout=MS_TO_TICKS(default_tm_cfg.delete_timeout);
181         default_tm_cfg.rt_t1_timeout=MS_TO_TICKS(default_tm_cfg.rt_t1_timeout);
182         default_tm_cfg.rt_t2_timeout=MS_TO_TICKS(default_tm_cfg.rt_t2_timeout);
183         default_tm_cfg.tm_max_inv_lifetime=MS_TO_TICKS(default_tm_cfg.tm_max_inv_lifetime);
184         default_tm_cfg.tm_max_noninv_lifetime=MS_TO_TICKS(default_tm_cfg.tm_max_noninv_lifetime);
185         /* fix 0 values to 1 tick (minimum possible wait time ) */
186         if (default_tm_cfg.fr_timeout==0) default_tm_cfg.fr_timeout=1;
187         if (default_tm_cfg.fr_inv_timeout==0) default_tm_cfg.fr_inv_timeout=1;
188         if (default_tm_cfg.wait_timeout==0) default_tm_cfg.wait_timeout=1;
189         if (default_tm_cfg.delete_timeout==0) default_tm_cfg.delete_timeout=1;
190         if (default_tm_cfg.rt_t2_timeout==0) default_tm_cfg.rt_t2_timeout=1;
191         if (default_tm_cfg.rt_t1_timeout==0) default_tm_cfg.rt_t1_timeout=1;
192         if (default_tm_cfg.tm_max_inv_lifetime==0) default_tm_cfg.tm_max_inv_lifetime=1;
193         if (default_tm_cfg.tm_max_noninv_lifetime==0) default_tm_cfg.tm_max_noninv_lifetime=1;
194         
195         /* size fit checks */
196         SIZE_FIT_CHECK(fr_timeout, default_tm_cfg.fr_timeout, "fr_timer");
197         SIZE_FIT_CHECK(fr_inv_timeout, default_tm_cfg.fr_inv_timeout, "fr_inv_timer");
198 #ifdef TM_DIFF_RT_TIMEOUT
199         SIZE_FIT_CHECK(rt_t1_timeout, default_tm_cfg.rt_t1_timeout, "retr_timer1");
200         SIZE_FIT_CHECK(rt_t2_timeout, default_tm_cfg.rt_t2_timeout, "retr_timer2");
201 #endif
202         SIZE_FIT_CHECK(end_of_life, default_tm_cfg.tm_max_inv_lifetime, "max_inv_lifetime");
203         SIZE_FIT_CHECK(end_of_life, default_tm_cfg.tm_max_noninv_lifetime, "max_noninv_lifetime");
204         
205         memset(&user_fr_timeout, 0, sizeof(user_fr_timeout));
206         memset(&user_fr_inv_timeout, 0, sizeof(user_fr_inv_timeout));
207 #ifdef TM_DIFF_RT_TIMEOUT
208         memset(&user_rt_t1_timeout, 0, sizeof(user_rt_t1_timeout));
209         memset(&user_rt_t2_timeout, 0, sizeof(user_rt_t2_timeout));
210 #endif
211         memset(&user_inv_max_lifetime, 0, sizeof(user_inv_max_lifetime));
212         memset(&user_noninv_max_lifetime, 0, sizeof(user_noninv_max_lifetime));
213         
214         DBG("tm: tm_init_timers: fr=%d fr_inv=%d wait=%d delete=%d t1=%d t2=%d"
215                         " max_inv_lifetime=%d max_noninv_lifetime=%d\n",
216                         default_tm_cfg.fr_timeout, default_tm_cfg.fr_inv_timeout,
217                         default_tm_cfg.wait_timeout, default_tm_cfg.delete_timeout,
218                         default_tm_cfg.rt_t1_timeout, default_tm_cfg.rt_t2_timeout,
219                         default_tm_cfg.tm_max_inv_lifetime, default_tm_cfg.tm_max_noninv_lifetime);
220         return 0;
221 error:
222         return -1;
223 }
224
225 /* internal macro for timer_fixup()
226  * performs size fit check if the timer name matches
227  */
228 #define IF_IS_TIMER_NAME(cell_member, cfg_name) \
229         if ((name->len == sizeof(cfg_name)-1) && \
230                 (memcmp(name->s, cfg_name, sizeof(cfg_name)-1)==0)) { \
231                         SIZE_FIT_CHECK(cell_member, t, cfg_name); \
232         }
233
234 /* fixup function for the timer values
235  * (called by the configuration framework)
236  */
237 int timer_fixup(void *handle, str *gname, str *name, void **val)
238 {
239         ticks_t t;
240
241         t = MS_TO_TICKS((unsigned int)(long)(*val));
242         /* fix 0 values to 1 tick (minimum possible wait time ) */
243         if (t == 0) t = 1;
244
245         /* size fix checks */
246         IF_IS_TIMER_NAME(fr_timeout, "fr_timer")
247         else IF_IS_TIMER_NAME(fr_inv_timeout, "fr_inv_timer")
248 #ifdef TM_DIFF_RT_TIMEOUT
249         else IF_IS_TIMER_NAME(rt_t1_timeout, "retr_timer1")
250         else IF_IS_TIMER_NAME(rt_t2_timeout, "retr_timer2")
251 #endif
252         else IF_IS_TIMER_NAME(end_of_life, "max_inv_lifetime")
253         else IF_IS_TIMER_NAME(end_of_life, "max_noninv_lifetime")
254
255         *val = (void *)(long)t;
256         return 0;
257
258 error:
259         return -1;
260 }
261
262 /******************** handlers ***************************/
263
264
265 #ifndef TM_DEL_UNREF
266 /* returns number of ticks before retrying the del, or 0 if the del.
267  * was succesfull */
268 inline static ticks_t  delete_cell( struct cell *p_cell, int unlock )
269 {
270         /* there may still be FR/RETR timers, which have been reset
271            (i.e., time_out==TIMER_DELETED) but are stilled linked to
272            timer lists and must be removed from there before the
273            structures are released
274         */
275         unlink_timers( p_cell );
276         /* still in use ... don't delete */
277         if ( IS_REFFED_UNSAFE(p_cell) ) {
278                 if (unlock) UNLOCK_HASH(p_cell->hash_index);
279                 DBG("DEBUG: delete_cell %p: can't delete -- still reffed (%d)\n",
280                                 p_cell, p_cell->ref_count);
281                 /* delay the delete */
282                 /* TODO: change refcnts and delete on refcnt==0 */
283                 return cfg_get(tm, tm_cfg, delete_timeout);
284         } else {
285                 if (unlock) UNLOCK_HASH(p_cell->hash_index);
286 #ifdef EXTRA_DEBUG
287                 DBG("DEBUG: delete transaction %p\n", p_cell );
288 #endif
289                 free_cell( p_cell );
290                 return 0;
291         }
292 }
293 #endif /* TM_DEL_UNREF */
294
295
296
297
298 /* generate a fake reply
299  * it assumes the REPLY_LOCK is already held and returns unlocked */
300 static void fake_reply(struct cell *t, int branch, int code )
301 {
302         struct cancel_info cancel_data;
303         short do_cancel_branch;
304         enum rps reply_status;
305
306         init_cancel_info(&cancel_data);
307         do_cancel_branch = is_invite(t) && prepare_cancel_branch(t, branch, 0);
308         /* mark branch as canceled */
309         t->uac[branch].request.flags|=F_RB_CANCELED;
310         if ( is_local(t) ) {
311                 reply_status=local_reply( t, FAKED_REPLY, branch, 
312                                           code, &cancel_data );
313         } else {
314                 /* rely reply, but don't put on wait, we still need t
315                  * to send the cancels */
316                 reply_status=relay_reply( t, FAKED_REPLY, branch, code,
317                                           &cancel_data, 0 );
318         }
319         /* now when out-of-lock do the cancel I/O */
320 #ifdef CANCEL_REASON_SUPPORT
321         if (do_cancel_branch) cancel_branch(t, branch, &cancel_data.reason, 0);
322 #else /* CANCEL_REASON_SUPPORT */
323         if (do_cancel_branch) cancel_branch(t, branch, 0);
324 #endif /* CANCEL_REASON_SUPPORT */
325         /* it's cleaned up on error; if no error occurred and transaction
326            completed regularly, I have to clean-up myself
327         */
328         if (reply_status == RPS_COMPLETED)
329                 put_on_wait(t);
330 }
331
332
333
334 /* return (ticks_t)-1 on error/disable and 0 on success */
335 inline static ticks_t retransmission_handler( struct retr_buf *r_buf )
336 {
337 #ifdef EXTRA_DEBUG
338         if (r_buf->my_T->flags & T_IN_AGONY) {
339                 LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
340                         " called from RETR timer (flags %x)\n",
341                         r_buf->my_T, r_buf->my_T->flags );
342                 abort();
343         }       
344 #endif
345         if ( r_buf->activ_type==TYPE_LOCAL_CANCEL 
346                 || r_buf->activ_type==TYPE_REQUEST ) {
347 #ifdef EXTRA_DEBUG
348                         DBG("DEBUG: retransmission_handler : "
349                                 "request resending (t=%p, %.9s ... )\n", 
350                                 r_buf->my_T, r_buf->buffer);
351 #endif
352                         if (SEND_BUFFER( r_buf )==-1) {
353                                 /* disable retr. timers => return -1 */
354                                 fake_reply(r_buf->my_T, r_buf->branch, 503 );
355                                 return (ticks_t)-1;
356                         }
357 #ifdef TMCB_ONSEND
358                         if (unlikely(has_tran_tmcbs(r_buf->my_T, TMCB_REQUEST_SENT))) 
359                                 run_onsend_callbacks(TMCB_REQUEST_SENT, r_buf, 
360                                                                                 0, 0, TMCB_RETR_F);
361 #endif
362         } else {
363 #ifdef EXTRA_DEBUG
364                         DBG("DEBUG: retransmission_handler : "
365                                 "reply resending (t=%p, %.9s ... )\n", 
366                                 r_buf->my_T, r_buf->buffer);
367 #endif
368                         t_retransmit_reply(r_buf->my_T);
369         }
370         
371         return 0;
372 }
373
374
375
376 inline static void final_response_handler(      struct retr_buf* r_buf,
377                                                                                         struct cell* t)
378 {
379         int silent;
380 #ifdef USE_DNS_FAILOVER
381         /*int i; 
382         int added_branches;
383         */
384         int branch_ret;
385         int prev_branch;
386         ticks_t now;
387 #endif
388
389 #       ifdef EXTRA_DEBUG
390         if (t->flags & T_IN_AGONY) 
391         {
392                 LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
393                         " called from FR timer (flags %x)\n", t, t->flags);
394                 abort();
395         }
396 #       endif
397         /* FR for local cancels.... */
398         if (r_buf->activ_type==TYPE_LOCAL_CANCEL)
399         {
400 #ifdef TIMER_DEBUG
401                 DBG("DEBUG: final_response_handler: stop retr for Local Cancel\n");
402 #endif
403                 return;
404         }
405         /* FR for replies (negative INVITE replies) */
406         if (r_buf->activ_type>0) {
407 #               ifdef EXTRA_DEBUG
408                 if (t->uas.request->REQ_METHOD!=METHOD_INVITE
409                         || t->uas.status < 200 ) {
410                         LOG(L_CRIT, "BUG: final_response_handler: unknown type reply"
411                                         " buffer\n");
412                         abort();
413                 }
414 #               endif
415                 put_on_wait( t );
416                 return;
417         };
418
419         /* lock reply processing to determine how to proceed reliably */
420         LOCK_REPLIES( t );
421         /* now it can be only a request retransmission buffer;
422            try if you can simply discard the local transaction 
423            state without compellingly removing it from the
424            world */
425         silent=
426                 /* don't go silent if disallowed globally ... */
427                 cfg_get(tm, tm_cfg, noisy_ctimer)==0
428                 /* ... or for this particular transaction */
429                 && has_noisy_ctimer(t) == 0
430                 /* not for UACs */
431                 && !is_local(t)
432                 /* invites only */
433                 && is_invite(t)
434                 /* parallel forking does not allow silent state discarding */
435                 && t->nr_of_outgoings==1
436                 /* on_negativ reply handler not installed -- serial forking 
437                  * could occur otherwise */
438                 && t->on_negative==0
439                 /* the same for FAILURE callbacks */
440                 && !has_tran_tmcbs( t, TMCB_ON_FAILURE_RO|TMCB_ON_FAILURE) 
441                 /* something received -- we will not be silent on error */
442                 && t->uac[r_buf->branch].last_received==0;
443         
444         if (silent) {
445                 UNLOCK_REPLIES(t);
446 #ifdef EXTRA_DEBUG
447                 DBG("DEBUG: final_response_handler: transaction silently dropped (%p)"
448                                 ", branch %d, last_received %d\n",t, r_buf->branch,
449                                  t->uac[r_buf->branch].last_received);
450 #endif
451                 put_on_wait( t );
452                 return;
453         }
454 #ifdef EXTRA_DEBUG
455         DBG("DEBUG: final_response_handler:stop retr. and send CANCEL (%p)\n", t);
456 #endif
457         if ((r_buf->branch < MAX_BRANCHES) && /* r_buf->branch is always >=0 */
458                         (t->uac[r_buf->branch].last_received==0) &&
459                         (t->uac[r_buf->branch].request.buffer!=NULL) /* not a blind UAC */
460         ){
461                 /* no reply received */
462 #ifdef USE_DST_BLACKLIST
463                 if (r_buf->my_T
464                         && r_buf->my_T->uas.request
465                         && (r_buf->my_T->uas.request->REQ_METHOD &
466                                         cfg_get(tm, tm_cfg, tm_blst_methods_add))
467                 )
468                         dst_blacklist_add( BLST_ERR_TIMEOUT, &r_buf->dst,
469                                                                 r_buf->my_T->uas.request);
470 #endif
471 #ifdef USE_DNS_FAILOVER
472                 /* if this is an invite, the destination resolves to more ips, and
473                  *  it still hasn't passed more than fr_inv_timeout since we
474                  *  started, add another branch/uac */
475                 if (cfg_get(core, core_cfg, use_dns_failover)){
476                         now=get_ticks_raw();
477                         if ((s_ticks_t)(t->end_of_life-now)>0){
478                                 branch_ret=add_uac_dns_fallback(t, t->uas.request,
479                                                                                                         &t->uac[r_buf->branch], 0);
480                                 prev_branch=-1;
481                                 while((branch_ret>=0) &&(branch_ret!=prev_branch)){
482                                         prev_branch=branch_ret;
483                                         branch_ret=t_send_branch(t, branch_ret, t->uas.request , 
484                                                                                                 0, 0);
485                                 }
486                         }
487                 }
488 #endif
489         }
490         fake_reply(t, r_buf->branch, 408);
491 }
492
493
494
495 /* handles retransmissions and fr timers */
496 /* the following assumption are made (to avoid deleting/re-adding the timer):
497  *  retr_buf->retr_interval < ( 1<<((sizeof(ticks_t)*8-1) )
498  *  if retr_buf->retr_interval==0 => timer disabled
499  *                            ==(ticks_t) -1 => retr. disabled (fr working)
500  *     retr_buf->retr_interval & (1 <<(sizeof(ticks_t)*8-1) => retr. & fr reset
501  *     (we never reset only retr, it's either reset both of them or retr 
502  *      disabled & reset fr). In this case the fr_origin will contain the 
503  *      "time" of the reset and next retr should occur at 
504  *      fr->origin+retr_interval (we also assume that we'll never reset retr
505  *      to a lower value then the current one)
506  */
507 ticks_t retr_buf_handler(ticks_t ticks, struct timer_ln* tl, void *p)
508 {
509         struct retr_buf* rbuf ;
510         ticks_t fr_remainder;
511         ticks_t retr_remainder;
512         ticks_t retr_interval;
513         ticks_t new_retr_interval;
514         struct cell *t;
515
516         rbuf=(struct  retr_buf*)
517                         ((void*)tl-(void*)(&((struct retr_buf*)0)->timer));
518         membar_depends(); /* to be on the safe side */
519         t=rbuf->my_T;
520         
521 #ifdef TIMER_DEBUG
522         DBG("tm: timer retr_buf_handler @%d (%p -> %p -> %p)\n",
523                         ticks, tl, rbuf, t);
524 #endif
525         if (unlikely(rbuf->flags & F_RB_DEL_TIMER)){
526                 /* timer marked for deletion */
527                 rbuf->t_active=0; /* mark it as removed */
528                 /* a membar is not really needed, in the very unlikely case that 
529                  * another process will see old t_active's value and will try to 
530                  * delete the timer again, but since timer_del it's safe in this cases
531                  * it will be a no-op */
532                 return 0;
533         }
534         /* overflow safe check (should work ok for fr_intervals < max ticks_t/2) */
535         if ((s_ticks_t)(rbuf->fr_expire-ticks)<=0){
536                 /* final response */
537                 rbuf->t_active=0; /* mark the timer as removed 
538                                                          (both timers disabled)
539                                                           a little race risk, but
540                                                           nothing bad would happen */
541                 rbuf->flags|=F_RB_TIMEOUT;
542                 /* WARNING:  the next line depends on taking care not to start the 
543                  *           wait timer before finishing with t (if this is not 
544                  *           guaranteed then comment the timer_allow_del() line) */
545                 timer_allow_del(); /* [optional] allow timer_dels, since we're done
546                                                           and there is no race risk */
547                 final_response_handler(rbuf, t);
548                 return 0;
549         }else{
550                 /*  4 possible states running (t1), t2, paused, disabled */
551                         if ((s_ticks_t)(rbuf->retr_expire-ticks)<=0){
552                                 if (rbuf->flags & F_RB_RETR_DISABLED)
553                                         goto disabled;
554                                 /* retr_interval= min (2*ri, rt_t2) , *p==2*ri*/
555                                 /* no branch version: 
556                                         #idef CC_SIGNED_RIGHT_SHIFT
557                                                 ri=  rt_t2+((2*ri-rt_t2) & 
558                                                 ((signed)(2*ri-rt_t2)>>(sizeof(ticks_t)*8-1));
559                                         #else
560                                                 ri=rt_t2+((2*ri-rt_t2)& -(2*ri<rt_t2));
561                                         #endif
562                                 */
563                                 
564                                 /* get the  current interval from timer param. */
565                                 if ((rbuf->flags & F_RB_T2) || 
566                                                 (((ticks_t)(unsigned long)p)>RT_T2_TIMEOUT(rbuf))){
567                                         retr_interval=RT_T2_TIMEOUT(rbuf);
568                                         new_retr_interval=RT_T2_TIMEOUT(rbuf);
569                                 }else{
570                                         retr_interval=(ticks_t)(unsigned long)p;
571                                         new_retr_interval=retr_interval<<1;
572                                 }
573 #ifdef TIMER_DEBUG
574                                 DBG("tm: timer: retr: new interval %d (max %d)\n", 
575                                                 retr_interval, RT_T2_TIMEOUT(rbuf));
576 #endif
577                                 /* we could race with the reply_received code, but the 
578                                  * worst thing that can happen is to delay a reset_to_t2
579                                  * for crt_interval and send an extra retr.*/
580                                 rbuf->retr_expire=ticks+retr_interval;
581                                 /* set new interval to -1 on error, or retr_int. on success */
582                                 retr_remainder=retransmission_handler(rbuf) | retr_interval;
583                                 /* store the next retr. interval inside the timer struct,
584                                  * in the data member */
585                                 tl->data=(void*)(unsigned long)(new_retr_interval);
586                         }else{
587                                 retr_remainder= rbuf->retr_expire-ticks;
588                                 DBG("tm: timer: retr: nothing to do, expire in %d\n", 
589                                                 retr_remainder);
590                         }
591         }
592 /* skip: */
593         /* return minimum of the next retransmission handler and the 
594          * final response (side benefit: it properly cancels timer if ret==0 and
595          *  sleeps for fr_remainder if retr. is canceled [==(ticks_t)-1]) */
596         fr_remainder=rbuf->fr_expire-ticks; /* to be more precise use
597                                                                                         get_ticks_raw() instead of ticks
598                                                                                         (but make sure that 
599                                                                                         crt. ticks < fr_expire */
600 #ifdef TIMER_DEBUG
601         DBG("tm: timer retr_buf_handler @%d (%p ->%p->%p) exiting min (%d, %d)\n",
602                         ticks, tl, rbuf, t, retr_remainder, fr_remainder);
603 #endif
604 #ifdef EXTRA_DEBUG
605         if  (retr_remainder==0 || fr_remainder==0){
606                 BUG("tm: timer retr_buf_handler: 0 remainder => disabling timer!: "
607                                 "retr_remainder=%d, fr_remainder=%d\n", retr_remainder,
608                                 fr_remainder);
609         }
610 #endif
611         if (retr_remainder<fr_remainder)
612                 return retr_remainder;
613         else{
614                 /* hack to switch to the slow timer */
615 #ifdef TM_FAST_RETR_TIMER
616                 tl->flags&=~F_TIMER_FAST;
617 #endif
618                 return fr_remainder;
619         }
620 disabled:
621         return rbuf->fr_expire-ticks;
622 }
623
624
625
626 ticks_t wait_handler(ticks_t ti, struct timer_ln *wait_tl, void* data)
627 {
628         struct cell *p_cell;
629         ticks_t ret;
630
631         p_cell=(struct cell*)data;
632 #ifdef TIMER_DEBUG
633         DBG("DEBUG: WAIT timer hit @%d for %p (timer_lm %p)\n", 
634                         ti, p_cell, wait_tl);
635 #endif
636
637 #ifdef TM_DEL_UNREF
638         /* stop cancel timers if any running */
639         if ( is_invite(p_cell) ) cleanup_localcancel_timers( p_cell );
640         /* remove the cell from the hash table */
641         LOCK_HASH( p_cell->hash_index );
642         remove_from_hash_table_unsafe(  p_cell );
643         UNLOCK_HASH( p_cell->hash_index );
644         p_cell->flags |= T_IN_AGONY;
645         UNREF_FREE(p_cell);
646         ret=0;
647 #else /* TM_DEL_UNREF */
648         if (p_cell->flags & T_IN_AGONY){
649                 /* delayed delete */
650                 /* we call delete now without any locking on hash/ref_count;
651                    we can do that because delete_handler is only entered after
652                    the delete timer was installed from wait_handler, which
653                    removed transaction from hash table and did not destroy it
654                    because some processes were using it; that means that the
655                    processes currently using the transaction can unref and no
656                    new processes can ref -- we can wait until ref_count is
657                    zero safely without locking
658                 */
659                 ret=delete_cell( p_cell, 0 /* don't unlock on return */ );
660         }else {
661                 /* stop cancel timers if any running */
662                 if ( is_invite(p_cell) ) cleanup_localcancel_timers( p_cell );
663                 /* remove the cell from the hash table */
664                 LOCK_HASH( p_cell->hash_index );
665                 remove_from_hash_table_unsafe(  p_cell );
666                 p_cell->flags |= T_IN_AGONY;
667                 /* delete (returns with UNLOCK-ed_HASH) */
668                 ret=delete_cell( p_cell, 1 /* unlock on return */ );
669         }
670 #endif /* TM_DEL_UNREF */
671         return ret;
672 }
673