tm: auto-correct timeout values in K compat mode
[sip-router] / modules / tm / timer.c
1 /*
2  * $Id$
3  *
4  *
5  * Copyright (C) 2001-2003 FhG Fokus
6  *
7  * This file is part of ser, a free SIP server.
8  *
9  * ser is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version
13  *
14  * For a license to use the ser software under conditions
15  * other than those described here, or to purchase support for this
16  * software, please contact iptel.org by e-mail at the following addresses:
17  *    info@iptel.org
18  *
19  * ser is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22  * GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License 
25  * along with this program; if not, write to the Free Software 
26  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
27  */
28
29
30 /* 
31   timer.c is where we implement TM timers. It has been designed
32   for high performance using some techniques of which timer users
33   need to be aware.
34
35         One technique is "fixed-timer-length". We maintain separate 
36         timer lists, all of them include elements of the same time
37         to fire. That allows *appending* new events to the list as
38         opposed to inserting them by time, which is costly due to
39         searching time spent in a mutex. The performance benefit is
40         noticeable. The limitation is you need a new timer list for
41         each new timer length.
42
43         Another technique is the timer process slices off expired elements
44         from the list in a mutex, but executes the timer after the mutex
45         is left. That saves time greatly as whichever process wants to
46         add/remove a timer, it does not have to wait until the current
47         list is processed. However, be aware the timers may hit in a delayed
48         manner; you have no guarantee in your process that after resetting a timer, 
49         it will no more hit. It might have been removed by timer process,
50     and is waiting to be executed.  The following example shows it:
51
52                         PROCESS1                                TIMER PROCESS
53
54         0.                                                              timer hits, it is removed from queue and
55                                                                         about to be executed
56         1.      process1 decides to
57                 reset the timer 
58         2.                                                              timer is executed now
59         3.      if the process1 naively
60                 thinks the timer could not 
61                 have been executed after 
62                 resetting the timer, it is
63                 WRONG -- it was (step 2.)
64
65         So be careful when writing the timer handlers. Currently defined timers 
66         don't hurt if they hit delayed, I hope at least. Retransmission timer 
67         may results in a useless retransmission -- not too bad. FR timer not too
68         bad either as timer processing uses a REPLY mutex making it safe to other
69         processing affecting transaction state. Wait timer not bad either -- processes
70         putting a transaction on wait don't do anything with it anymore.
71
72                 Example when it does not hurt:
73
74                         P1                                              TIMER
75         0.                                                              RETR timer removed from list and
76                                                                         scheduled for execution
77         1. 200/BYE received->
78            reset RETR, put_on_wait
79         2.                                                              RETR timer executed -- too late but it does
80                                                                         not hurt
81         3.                                                              WAIT handler executed
82
83         The rule of thumb is don't touch data you put under a timer. Create data,
84     put them under a timer, and let them live until they are safely destroyed from
85     wait/delete timer.  The only safe place to manipulate the data is 
86     from timer process in which delayed timers cannot hit (all timers are
87     processed sequentially).
88
89         A "bad example" -- rewriting content of retransmission buffer
90         in an unprotected way is bad because a delayed retransmission timer might 
91         hit. Thats why our reply retransmission procedure is enclosed in 
92         a REPLY_LOCK.
93
94 */
95 /*
96  * History:
97  * --------
98  *  2003-06-27  timers are not unlinked if timerlist is 0 (andrei)
99  *  2004-02-13  t->is_invite, t->local, t->noisy_ctimer replaced;
100  *              timer_link.payload removed (bogdan)
101  *  2005-10-03  almost completely rewritten to use the new timers (andrei)
102  *  2005-12-12  on final response marked the rb as removed to avoid deleting
103  *              it from the timer handle; timer_allow_del()  (andrei)
104  *  2006-08-11  final_response_handler dns failover support for timeout-ed
105  *              invites (andrei)
106  *  2006-09-28  removed the 480 on fr_inv_timeout reply: on timeout always 
107  *               return a 408
108  *              set the corresponding "faked" failure route sip_msg->msg_flags 
109  *               on timeout or if the branch received a reply (andrei)
110  *  2007-03-15  TMCB_ONSEND callbacks support (andrei)
111  *  2007-05-29  delete on transaction ref_count==0 : removed the delete timer
112  *               (andrei)
113  * 2007-06-01  support for different retransmissions intervals per transaction;
114  *             added maximum inv. and non-inv. transaction life time (andrei)
115  */
116
117 #include "defs.h"
118
119
120
121 #include "config.h"
122 #include "h_table.h"
123 #include "timer.h"
124 #include "../../dprint.h"
125 #include "lock.h"
126 #include "t_stats.h"
127
128 #include "../../hash_func.h"
129 #include "../../dprint.h"
130 #include "../../config.h"
131 #include "../../parser/parser_f.h"
132 #include "../../ut.h"
133 #include "../../timer_ticks.h"
134 #include "../../compiler_opt.h" 
135 #include "../../sr_compat.h" 
136 #include "t_funcs.h"
137 #include "t_reply.h"
138 #include "t_cancel.h"
139 #include "t_hooks.h"
140 #ifdef USE_DNS_FAILOVER
141 #include "t_fwd.h" /* t_send_branch */
142 #include "../../cfg_core.h" /* cfg_get(core, core_cfg, use_dns_failover) */
143 #endif
144 #ifdef USE_DST_BLACKLIST
145 #include "../../dst_blacklist.h"
146 #endif
147
148
149
150 struct msgid_var user_fr_timeout;
151 struct msgid_var user_fr_inv_timeout;
152 #ifdef TM_DIFF_RT_TIMEOUT
153 struct msgid_var user_rt_t1_timeout;
154 struct msgid_var user_rt_t2_timeout;
155 #endif
156 struct msgid_var user_inv_max_lifetime;
157 struct msgid_var user_noninv_max_lifetime;
158
159
160 /* internal use, val should be unsigned or positive
161  *  <= instead of < to get read of gcc warning when 
162  *  sizeof(cell_member)==sizeof(val) (Note that this limits
163  *  maximum value to max. type -1) */
164 #define SIZE_FIT_CHECK(cell_member, val, cfg_name) \
165         if (MAX_UVAR_VALUE(((struct cell*)0)->cell_member) <= (val)){ \
166                 ERR("tm_init_timers: " cfg_name " too big: %lu (%lu ticks) " \
167                                 "- max %lu (%lu ticks) \n", TICKS_TO_MS((unsigned long)(val)),\
168                                 (unsigned long)(val), \
169                                 TICKS_TO_MS(MAX_UVAR_VALUE(((struct cell*)0)->cell_member)), \
170                                 MAX_UVAR_VALUE(((struct cell*)0)->cell_member)); \
171                 goto error; \
172         } 
173
174 /* fix timer values to ticks */
175 int tm_init_timers()
176 {
177         if(sr_cfg_compat==SR_COMPAT_KAMAILIO) {
178                 if(default_tm_cfg.fr_timeout<=120) {
179                         LM_WARN("too small given fr_timer value: %ums (using T*1000)\n",
180                                         default_tm_cfg.fr_timeout);
181                         default_tm_cfg.fr_timeout *= 1000;
182                 }
183                 if(default_tm_cfg.fr_inv_timeout<=120) {
184                         LM_WARN("too small given fr_inv_timer value: %ums (using T*1000)\n",
185                                         default_tm_cfg.fr_inv_timeout);
186                         default_tm_cfg.fr_inv_timeout *= 1000;
187                 }
188                 if(default_tm_cfg.wait_timeout<=120) {
189                         LM_WARN("too small given wait_timer value: %ums (using T*1000)\n",
190                                         default_tm_cfg.wait_timeout);
191                         default_tm_cfg.wait_timeout *= 1000;
192                 }
193                 if(default_tm_cfg.delete_timeout<=120) {
194                         LM_WARN("too small given delete_timer value: %ums (using T*1000)\n",
195                                         default_tm_cfg.delete_timeout);
196                         default_tm_cfg.delete_timeout *= 1000;
197                 }
198         }
199
200         default_tm_cfg.fr_timeout=MS_TO_TICKS(default_tm_cfg.fr_timeout); 
201         default_tm_cfg.fr_inv_timeout=MS_TO_TICKS(default_tm_cfg.fr_inv_timeout);
202         default_tm_cfg.wait_timeout=MS_TO_TICKS(default_tm_cfg.wait_timeout);
203         default_tm_cfg.delete_timeout=MS_TO_TICKS(default_tm_cfg.delete_timeout);
204         default_tm_cfg.rt_t1_timeout=MS_TO_TICKS(default_tm_cfg.rt_t1_timeout);
205         default_tm_cfg.rt_t2_timeout=MS_TO_TICKS(default_tm_cfg.rt_t2_timeout);
206         default_tm_cfg.tm_max_inv_lifetime=MS_TO_TICKS(default_tm_cfg.tm_max_inv_lifetime);
207         default_tm_cfg.tm_max_noninv_lifetime=MS_TO_TICKS(default_tm_cfg.tm_max_noninv_lifetime);
208         /* fix 0 values to 1 tick (minimum possible wait time ) */
209         if (default_tm_cfg.fr_timeout==0) default_tm_cfg.fr_timeout=1;
210         if (default_tm_cfg.fr_inv_timeout==0) default_tm_cfg.fr_inv_timeout=1;
211         if (default_tm_cfg.wait_timeout==0) default_tm_cfg.wait_timeout=1;
212         if (default_tm_cfg.delete_timeout==0) default_tm_cfg.delete_timeout=1;
213         if (default_tm_cfg.rt_t2_timeout==0) default_tm_cfg.rt_t2_timeout=1;
214         if (default_tm_cfg.rt_t1_timeout==0) default_tm_cfg.rt_t1_timeout=1;
215         if (default_tm_cfg.tm_max_inv_lifetime==0) default_tm_cfg.tm_max_inv_lifetime=1;
216         if (default_tm_cfg.tm_max_noninv_lifetime==0) default_tm_cfg.tm_max_noninv_lifetime=1;
217         
218         /* size fit checks */
219         SIZE_FIT_CHECK(fr_timeout, default_tm_cfg.fr_timeout, "fr_timer");
220         SIZE_FIT_CHECK(fr_inv_timeout, default_tm_cfg.fr_inv_timeout, "fr_inv_timer");
221 #ifdef TM_DIFF_RT_TIMEOUT
222         SIZE_FIT_CHECK(rt_t1_timeout, default_tm_cfg.rt_t1_timeout, "retr_timer1");
223         SIZE_FIT_CHECK(rt_t2_timeout, default_tm_cfg.rt_t2_timeout, "retr_timer2");
224 #endif
225         SIZE_FIT_CHECK(end_of_life, default_tm_cfg.tm_max_inv_lifetime, "max_inv_lifetime");
226         SIZE_FIT_CHECK(end_of_life, default_tm_cfg.tm_max_noninv_lifetime, "max_noninv_lifetime");
227         
228         memset(&user_fr_timeout, 0, sizeof(user_fr_timeout));
229         memset(&user_fr_inv_timeout, 0, sizeof(user_fr_inv_timeout));
230 #ifdef TM_DIFF_RT_TIMEOUT
231         memset(&user_rt_t1_timeout, 0, sizeof(user_rt_t1_timeout));
232         memset(&user_rt_t2_timeout, 0, sizeof(user_rt_t2_timeout));
233 #endif
234         memset(&user_inv_max_lifetime, 0, sizeof(user_inv_max_lifetime));
235         memset(&user_noninv_max_lifetime, 0, sizeof(user_noninv_max_lifetime));
236         
237         DBG("tm: tm_init_timers: fr=%d fr_inv=%d wait=%d delete=%d t1=%d t2=%d"
238                         " max_inv_lifetime=%d max_noninv_lifetime=%d\n",
239                         default_tm_cfg.fr_timeout, default_tm_cfg.fr_inv_timeout,
240                         default_tm_cfg.wait_timeout, default_tm_cfg.delete_timeout,
241                         default_tm_cfg.rt_t1_timeout, default_tm_cfg.rt_t2_timeout,
242                         default_tm_cfg.tm_max_inv_lifetime, default_tm_cfg.tm_max_noninv_lifetime);
243         return 0;
244 error:
245         return -1;
246 }
247
248 /* internal macro for timer_fixup()
249  * performs size fit check if the timer name matches
250  */
251 #define IF_IS_TIMER_NAME(cell_member, cfg_name) \
252         if ((name->len == sizeof(cfg_name)-1) && \
253                 (memcmp(name->s, cfg_name, sizeof(cfg_name)-1)==0)) { \
254                         SIZE_FIT_CHECK(cell_member, t, cfg_name); \
255         }
256
257 /* fixup function for the timer values
258  * (called by the configuration framework)
259  */
260 int timer_fixup(void *handle, str *gname, str *name, void **val)
261 {
262         ticks_t t;
263
264         t = MS_TO_TICKS((unsigned int)(long)(*val));
265         /* fix 0 values to 1 tick (minimum possible wait time ) */
266         if (t == 0) t = 1;
267
268         /* size fix checks */
269         IF_IS_TIMER_NAME(fr_timeout, "fr_timer")
270         else IF_IS_TIMER_NAME(fr_inv_timeout, "fr_inv_timer")
271 #ifdef TM_DIFF_RT_TIMEOUT
272         else IF_IS_TIMER_NAME(rt_t1_timeout, "retr_timer1")
273         else IF_IS_TIMER_NAME(rt_t2_timeout, "retr_timer2")
274 #endif
275         else IF_IS_TIMER_NAME(end_of_life, "max_inv_lifetime")
276         else IF_IS_TIMER_NAME(end_of_life, "max_noninv_lifetime")
277
278         *val = (void *)(long)t;
279         return 0;
280
281 error:
282         return -1;
283 }
284
285 /******************** handlers ***************************/
286
287
288 #ifndef TM_DEL_UNREF
289 /* returns number of ticks before retrying the del, or 0 if the del.
290  * was succesfull */
291 inline static ticks_t  delete_cell( struct cell *p_cell, int unlock )
292 {
293         /* there may still be FR/RETR timers, which have been reset
294            (i.e., time_out==TIMER_DELETED) but are stilled linked to
295            timer lists and must be removed from there before the
296            structures are released
297         */
298         unlink_timers( p_cell );
299         /* still in use ... don't delete */
300         if ( IS_REFFED_UNSAFE(p_cell) ) {
301                 if (unlock) UNLOCK_HASH(p_cell->hash_index);
302                 DBG("DEBUG: delete_cell %p: can't delete -- still reffed (%d)\n",
303                                 p_cell, p_cell->ref_count);
304                 /* delay the delete */
305                 /* TODO: change refcnts and delete on refcnt==0 */
306                 return cfg_get(tm, tm_cfg, delete_timeout);
307         } else {
308                 if (unlock) UNLOCK_HASH(p_cell->hash_index);
309 #ifdef EXTRA_DEBUG
310                 DBG("DEBUG: delete transaction %p\n", p_cell );
311 #endif
312                 free_cell( p_cell );
313                 return 0;
314         }
315 }
316 #endif /* TM_DEL_UNREF */
317
318
319
320
321 /* generate a fake reply
322  * it assumes the REPLY_LOCK is already held and returns unlocked */
323 static void fake_reply(struct cell *t, int branch, int code )
324 {
325         branch_bm_t cancel_bitmap;
326         short do_cancel_branch;
327         enum rps reply_status;
328
329         do_cancel_branch = is_invite(t) && prepare_cancel_branch(t, branch, 0);
330         /* mark branch as canceled */
331         t->uac[branch].request.flags|=F_RB_CANCELED;
332         if ( is_local(t) ) {
333                 reply_status=local_reply( t, FAKED_REPLY, branch, 
334                                           code, &cancel_bitmap );
335         } else {
336                 /* rely reply, but don't put on wait, we still need t
337                  * to send the cancels */
338                 reply_status=relay_reply( t, FAKED_REPLY, branch, code,
339                                           &cancel_bitmap, 0 );
340         }
341         /* now when out-of-lock do the cancel I/O */
342         if (do_cancel_branch) cancel_branch(t, branch, 0);
343         /* it's cleaned up on error; if no error occurred and transaction
344            completed regularly, I have to clean-up myself
345         */
346         if (reply_status == RPS_COMPLETED)
347                 put_on_wait(t);
348 }
349
350
351
352 /* return (ticks_t)-1 on error/disable and 0 on success */
353 inline static ticks_t retransmission_handler( struct retr_buf *r_buf )
354 {
355 #ifdef EXTRA_DEBUG
356         if (r_buf->my_T->flags & T_IN_AGONY) {
357                 LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
358                         " called from RETR timer (flags %x)\n",
359                         r_buf->my_T, r_buf->my_T->flags );
360                 abort();
361         }       
362 #endif
363         if ( r_buf->activ_type==TYPE_LOCAL_CANCEL 
364                 || r_buf->activ_type==TYPE_REQUEST ) {
365 #ifdef EXTRA_DEBUG
366                         DBG("DEBUG: retransmission_handler : "
367                                 "request resending (t=%p, %.9s ... )\n", 
368                                 r_buf->my_T, r_buf->buffer);
369 #endif
370                         if (SEND_BUFFER( r_buf )==-1) {
371                                 /* disable retr. timers => return -1 */
372                                 fake_reply(r_buf->my_T, r_buf->branch, 503 );
373                                 return (ticks_t)-1;
374                         }
375 #ifdef TMCB_ONSEND
376                         if (unlikely(has_tran_tmcbs(r_buf->my_T, TMCB_REQUEST_SENT))) 
377                                 run_onsend_callbacks(TMCB_REQUEST_SENT, r_buf, 
378                                                                                 0, 0, TMCB_RETR_F);
379 #endif
380         } else {
381 #ifdef EXTRA_DEBUG
382                         DBG("DEBUG: retransmission_handler : "
383                                 "reply resending (t=%p, %.9s ... )\n", 
384                                 r_buf->my_T, r_buf->buffer);
385 #endif
386                         t_retransmit_reply(r_buf->my_T);
387         }
388         
389         return 0;
390 }
391
392
393
394 inline static void final_response_handler(      struct retr_buf* r_buf,
395                                                                                         struct cell* t)
396 {
397         int silent;
398 #ifdef USE_DNS_FAILOVER
399         /*int i; 
400         int added_branches;
401         */
402         int branch_ret;
403         int prev_branch;
404         ticks_t now;
405 #endif
406
407 #       ifdef EXTRA_DEBUG
408         if (t->flags & T_IN_AGONY) 
409         {
410                 LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
411                         " called from FR timer (flags %x)\n", t, t->flags);
412                 abort();
413         }
414 #       endif
415         /* FR for local cancels.... */
416         if (r_buf->activ_type==TYPE_LOCAL_CANCEL)
417         {
418 #ifdef TIMER_DEBUG
419                 DBG("DEBUG: final_response_handler: stop retr for Local Cancel\n");
420 #endif
421                 return;
422         }
423         /* FR for replies (negative INVITE replies) */
424         if (r_buf->activ_type>0) {
425 #               ifdef EXTRA_DEBUG
426                 if (t->uas.request->REQ_METHOD!=METHOD_INVITE
427                         || t->uas.status < 200 ) {
428                         LOG(L_CRIT, "BUG: final_response_handler: unknown type reply"
429                                         " buffer\n");
430                         abort();
431                 }
432 #               endif
433                 put_on_wait( t );
434                 return;
435         };
436
437         /* lock reply processing to determine how to proceed reliably */
438         LOCK_REPLIES( t );
439         /* now it can be only a request retransmission buffer;
440            try if you can simply discard the local transaction 
441            state without compellingly removing it from the
442            world */
443         silent=
444                 /* don't go silent if disallowed globally ... */
445                 cfg_get(tm, tm_cfg, noisy_ctimer)==0
446                 /* ... or for this particular transaction */
447                 && has_noisy_ctimer(t) == 0
448                 /* not for UACs */
449                 && !is_local(t)
450                 /* invites only */
451                 && is_invite(t)
452                 /* parallel forking does not allow silent state discarding */
453                 && t->nr_of_outgoings==1
454                 /* on_negativ reply handler not installed -- serial forking 
455                  * could occur otherwise */
456                 && t->on_negative==0
457                 /* the same for FAILURE callbacks */
458                 && !has_tran_tmcbs( t, TMCB_ON_FAILURE_RO|TMCB_ON_FAILURE) 
459                 /* something received -- we will not be silent on error */
460                 && t->uac[r_buf->branch].last_received==0;
461         
462         if (silent) {
463                 UNLOCK_REPLIES(t);
464 #ifdef EXTRA_DEBUG
465                 DBG("DEBUG: final_response_handler: transaction silently dropped (%p)"
466                                 ", branch %d, last_received %d\n",t, r_buf->branch,
467                                  t->uac[r_buf->branch].last_received);
468 #endif
469                 put_on_wait( t );
470                 return;
471         }
472 #ifdef EXTRA_DEBUG
473         DBG("DEBUG: final_response_handler:stop retr. and send CANCEL (%p)\n", t);
474 #endif
475         if ((r_buf->branch < MAX_BRANCHES) && /* r_buf->branch is always >=0 */
476                         (t->uac[r_buf->branch].last_received==0) &&
477                         (t->uac[r_buf->branch].request.buffer!=NULL) /* not a blind UAC */
478         ){
479                 /* no reply received */
480 #ifdef USE_DST_BLACKLIST
481                 if (cfg_get(core, core_cfg, use_dst_blacklist)
482                         && r_buf->my_T
483                         && r_buf->my_T->uas.request
484                         && (r_buf->my_T->uas.request->REQ_METHOD & cfg_get(tm, tm_cfg, tm_blst_methods_add))
485                 )
486                         dst_blacklist_add( BLST_ERR_TIMEOUT, &r_buf->dst,
487                                                 r_buf->my_T->uas.request);
488 #endif
489 #ifdef USE_DNS_FAILOVER
490                 /* if this is an invite, the destination resolves to more ips, and
491                  *  it still hasn't passed more than fr_inv_timeout since we
492                  *  started, add another branch/uac */
493                 if (cfg_get(core, core_cfg, use_dns_failover)){
494                         now=get_ticks_raw();
495                         if ((s_ticks_t)(t->end_of_life-now)>0){
496                                 branch_ret=add_uac_dns_fallback(t, t->uas.request,
497                                                                                                         &t->uac[r_buf->branch], 0);
498                                 prev_branch=-1;
499                                 while((branch_ret>=0) &&(branch_ret!=prev_branch)){
500                                         prev_branch=branch_ret;
501                                         branch_ret=t_send_branch(t, branch_ret, t->uas.request , 
502                                                                                                 0, 0);
503                                 }
504                         }
505                 }
506 #endif
507         }
508         fake_reply(t, r_buf->branch, 408);
509 }
510
511
512
513 /* handles retransmissions and fr timers */
514 /* the following assumption are made (to avoid deleting/re-adding the timer):
515  *  retr_buf->retr_interval < ( 1<<((sizeof(ticks_t)*8-1) )
516  *  if retr_buf->retr_interval==0 => timer disabled
517  *                            ==(ticks_t) -1 => retr. disabled (fr working)
518  *     retr_buf->retr_interval & (1 <<(sizeof(ticks_t)*8-1) => retr. & fr reset
519  *     (we never reset only retr, it's either reset both of them or retr 
520  *      disabled & reset fr). In this case the fr_origin will contain the 
521  *      "time" of the reset and next retr should occur at 
522  *      fr->origin+retr_interval (we also assume that we'll never reset retr
523  *      to a lower value then the current one)
524  */
525 ticks_t retr_buf_handler(ticks_t ticks, struct timer_ln* tl, void *p)
526 {
527         struct retr_buf* rbuf ;
528         ticks_t fr_remainder;
529         ticks_t retr_remainder;
530         ticks_t retr_interval;
531         ticks_t new_retr_interval;
532         struct cell *t;
533
534         rbuf=(struct  retr_buf*)
535                         ((void*)tl-(void*)(&((struct retr_buf*)0)->timer));
536         membar_depends(); /* to be on the safe side */
537         t=rbuf->my_T;
538         
539 #ifdef TIMER_DEBUG
540         DBG("tm: timer retr_buf_handler @%d (%p -> %p -> %p)\n",
541                         ticks, tl, rbuf, t);
542 #endif
543         if (unlikely(rbuf->flags & F_RB_DEL_TIMER)){
544                 /* timer marked for deletion */
545                 rbuf->t_active=0; /* mark it as removed */
546                 /* a membar is not really needed, in the very unlikely case that 
547                  * another process will see old t_active's value and will try to 
548                  * delete the timer again, but since timer_del it's safe in this cases
549                  * it will be a no-op */
550                 return 0;
551         }
552         /* overflow safe check (should work ok for fr_intervals < max ticks_t/2) */
553         if ((s_ticks_t)(rbuf->fr_expire-ticks)<=0){
554                 /* final response */
555                 rbuf->t_active=0; /* mark the timer as removed 
556                                                          (both timers disabled)
557                                                           a little race risk, but
558                                                           nothing bad would happen */
559                 rbuf->flags|=F_RB_TIMEOUT;
560                 /* WARNING:  the next line depends on taking care not to start the 
561                  *           wait timer before finishing with t (if this is not 
562                  *           guaranteed then comment the timer_allow_del() line) */
563                 timer_allow_del(); /* [optional] allow timer_dels, since we're done
564                                                           and there is no race risk */
565                 final_response_handler(rbuf, t);
566                 return 0;
567         }else{
568                 /*  4 possible states running (t1), t2, paused, disabled */
569                         if ((s_ticks_t)(rbuf->retr_expire-ticks)<=0){
570                                 if (rbuf->flags & F_RB_RETR_DISABLED)
571                                         goto disabled;
572                                 /* retr_interval= min (2*ri, rt_t2) , *p==2*ri*/
573                                 /* no branch version: 
574                                         #idef CC_SIGNED_RIGHT_SHIFT
575                                                 ri=  rt_t2+((2*ri-rt_t2) & 
576                                                 ((signed)(2*ri-rt_t2)>>(sizeof(ticks_t)*8-1));
577                                         #else
578                                                 ri=rt_t2+((2*ri-rt_t2)& -(2*ri<rt_t2));
579                                         #endif
580                                 */
581                                 
582                                 /* get the  current interval from timer param. */
583                                 if ((rbuf->flags & F_RB_T2) || 
584                                                 (((ticks_t)(unsigned long)p)>RT_T2_TIMEOUT(rbuf))){
585                                         retr_interval=RT_T2_TIMEOUT(rbuf);
586                                         new_retr_interval=RT_T2_TIMEOUT(rbuf);
587                                 }else{
588                                         retr_interval=(ticks_t)(unsigned long)p;
589                                         new_retr_interval=retr_interval<<1;
590                                 }
591 #ifdef TIMER_DEBUG
592                                 DBG("tm: timer: retr: new interval %d (max %d)\n", 
593                                                 retr_interval, RT_T2_TIMEOUT(rbuf));
594 #endif
595                                 /* we could race with the reply_received code, but the 
596                                  * worst thing that can happen is to delay a reset_to_t2
597                                  * for crt_interval and send an extra retr.*/
598                                 rbuf->retr_expire=ticks+retr_interval;
599                                 /* set new interval to -1 on error, or retr_int. on success */
600                                 retr_remainder=retransmission_handler(rbuf) | retr_interval;
601                                 /* store the next retr. interval inside the timer struct,
602                                  * in the data member */
603                                 tl->data=(void*)(unsigned long)(new_retr_interval);
604                         }else{
605                                 retr_remainder= rbuf->retr_expire-ticks;
606                                 DBG("tm: timer: retr: nothing to do, expire in %d\n", 
607                                                 retr_remainder);
608                         }
609         }
610 /* skip: */
611         /* return minimum of the next retransmission handler and the 
612          * final response (side benefit: it properly cancels timer if ret==0 and
613          *  sleeps for fr_remainder if retr. is canceled [==(ticks_t)-1]) */
614         fr_remainder=rbuf->fr_expire-ticks; /* to be more precise use
615                                                                                         get_ticks_raw() instead of ticks
616                                                                                         (but make sure that 
617                                                                                         crt. ticks < fr_expire */
618 #ifdef TIMER_DEBUG
619         DBG("tm: timer retr_buf_handler @%d (%p ->%p->%p) exiting min (%d, %d)\n",
620                         ticks, tl, rbuf, t, retr_remainder, fr_remainder);
621 #endif
622 #ifdef EXTRA_DEBUG
623         if  (retr_remainder==0 || fr_remainder==0){
624                 BUG("tm: timer retr_buf_handler: 0 remainder => disabling timer!: "
625                                 "retr_remainder=%d, fr_remainder=%d\n", retr_remainder,
626                                 fr_remainder);
627         }
628 #endif
629         if (retr_remainder<fr_remainder)
630                 return retr_remainder;
631         else{
632                 /* hack to switch to the slow timer */
633 #ifdef TM_FAST_RETR_TIMER
634                 tl->flags&=~F_TIMER_FAST;
635 #endif
636                 return fr_remainder;
637         }
638 disabled:
639         return rbuf->fr_expire-ticks;
640 }
641
642
643
644 ticks_t wait_handler(ticks_t ti, struct timer_ln *wait_tl, void* data)
645 {
646         struct cell *p_cell;
647         ticks_t ret;
648
649         p_cell=(struct cell*)data;
650 #ifdef TIMER_DEBUG
651         DBG("DEBUG: WAIT timer hit @%d for %p (timer_lm %p)\n", 
652                         ti, p_cell, wait_tl);
653 #endif
654
655 #ifdef TM_DEL_UNREF
656         /* stop cancel timers if any running */
657         if ( is_invite(p_cell) ) cleanup_localcancel_timers( p_cell );
658         /* remove the cell from the hash table */
659         LOCK_HASH( p_cell->hash_index );
660         remove_from_hash_table_unsafe(  p_cell );
661         UNLOCK_HASH( p_cell->hash_index );
662         p_cell->flags |= T_IN_AGONY;
663         UNREF_FREE(p_cell);
664         ret=0;
665 #else /* TM_DEL_UNREF */
666         if (p_cell->flags & T_IN_AGONY){
667                 /* delayed delete */
668                 /* we call delete now without any locking on hash/ref_count;
669                    we can do that because delete_handler is only entered after
670                    the delete timer was installed from wait_handler, which
671                    removed transaction from hash table and did not destroy it
672                    because some processes were using it; that means that the
673                    processes currently using the transaction can unref and no
674                    new processes can ref -- we can wait until ref_count is
675                    zero safely without locking
676                 */
677                 ret=delete_cell( p_cell, 0 /* don't unlock on return */ );
678         }else {
679                 /* stop cancel timers if any running */
680                 if ( is_invite(p_cell) ) cleanup_localcancel_timers( p_cell );
681                 /* remove the cell from the hash table */
682                 LOCK_HASH( p_cell->hash_index );
683                 remove_from_hash_table_unsafe(  p_cell );
684                 p_cell->flags |= T_IN_AGONY;
685                 /* delete (returns with UNLOCK-ed_HASH) */
686                 ret=delete_cell( p_cell, 1 /* unlock on return */ );
687         }
688 #endif /* TM_DEL_UNREF */
689         return ret;
690 }
691