Merge branch 'rpc_async'
[sip-router] / modules / tm / h_table.c
1 /*
2  * $Id$
3  *
4  * Copyright (C) 2001-2003 FhG Fokus
5  *
6  * This file is part of ser, a free SIP server.
7  *
8  * ser is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version
12  *
13  * For a license to use the ser software under conditions
14  * other than those described here, or to purchase support for this
15  * software, please contact iptel.org by e-mail at the following addresses:
16  *    info@iptel.org
17  *
18  * ser is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License 
24  * along with this program; if not, write to the Free Software 
25  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
26  *
27  * History
28  * -------
29  * 2003-03-06  200/INV to-tag list deallocation added;
30  *             setting "kill_reason" moved in here -- it is moved
31  *             from transaction state to a static var(jiri)
32  * 2003-03-16  removed _TOTAG (jiri)
33  * 2003-03-30  set_kr for requests only (jiri)
34  * 2003-04-04  bug_fix: REQ_IN callback not called for local 
35  *             UAC transactions (jiri)
36  * 2003-09-12  timer_link->tg will be set only if EXTRA_DEBUG (andrei)
37  * 2003-12-04  global callbacks replaceed with callbacks per transaction;
38  *             completion callback merged into them as LOCAL_COMPETED (bogdan)
39  * 2004-02-11  FIFO/CANCEL + alignments (hash=f(callid,cseq)) (uli+jiri)
40  * 2004-02-13  t->is_invite and t->local replaced with flags;
41  *             timer_link.payload removed (bogdan)
42  * 2004-08-23  avp support added - move and remove avp list to/from
43  *             transactions (bogdan)
44  * 2006-08-11  dns failover support (andrei)
45  * 2007-05-16  callbacks called on destroy (andrei)
46  * 2007-06-06  don't allocate extra space for md5 if not used: syn_branch==1 
47  *              (andrei)
48  * 2007-06-06  switched tm bucket list to a simpler and faster clist (andrei)
49  */
50
51 #include <stdlib.h>
52
53
54 #include "../../mem/shm_mem.h"
55 #include "../../hash_func.h"
56 #include "../../dprint.h"
57 #include "../../md5utils.h"
58 #include "../../ut.h"
59 #include "../../globals.h"
60 #include "../../error.h"
61 #include "defs.h"
62 #include "t_reply.h"
63 #include "t_cancel.h"
64 #include "t_stats.h"
65 #include "h_table.h"
66 #include "../../fix_lumps.h" /* free_via_clen_lump */
67 #include "timer.h"
68 #include "uac.h" /* free_local_ack */
69
70
71 static enum kill_reason kr;
72
73 /* pointer to the big table where all the transaction data
74    lives */
75 struct s_table*  _tm_table;
76
77
78 void reset_kr() {
79         kr=0;
80 }
81
82 void set_kr( enum kill_reason _kr )
83 {
84         kr|=_kr;
85 }
86
87
88 enum kill_reason get_kr() {
89         return kr;
90 }
91
92
93 void lock_hash(int i) 
94 {
95         lock(&_tm_table->entries[i].mutex);
96 }
97
98
99 void unlock_hash(int i) 
100 {
101         unlock(&_tm_table->entries[i].mutex);
102 }
103
104
105
106 #ifdef TM_HASH_STATS
107 unsigned int transaction_count( void )
108 {
109         unsigned int i;
110         unsigned int count;
111
112         count=0;        
113         for (i=0; i<TABLE_ENTRIES; i++) 
114                 count+=_tm_table->entries[i].cur_entries;
115         return count;
116 }
117 #endif
118
119
120
121 void free_cell( struct cell* dead_cell )
122 {
123         char *b;
124         int i;
125         struct sip_msg *rpl;
126         struct totag_elem *tt, *foo;
127         struct tm_callback *cbs, *cbs_tmp;
128
129         release_cell_lock( dead_cell );
130         if (unlikely(has_tran_tmcbs(dead_cell, TMCB_DESTROY)))
131                 run_trans_callbacks(TMCB_DESTROY, dead_cell, 0, 0, 0);
132
133         shm_lock();
134         /* UA Server */
135         if ( dead_cell->uas.request )
136                 sip_msg_free_unsafe( dead_cell->uas.request );
137         if ( dead_cell->uas.response.buffer )
138                 shm_free_unsafe( dead_cell->uas.response.buffer );
139
140         /* callbacks */
141         for( cbs=(struct tm_callback*)dead_cell->tmcb_hl.first ; cbs ; ) {
142                 cbs_tmp = cbs;
143                 cbs = cbs->next;
144                 if (cbs_tmp->release) {
145                         /* It is safer to release the shm memory lock
146                          * otherwise the release function must to be aware of
147                          * the lock state (Miklos)
148                          */
149                         shm_unlock();
150                         cbs_tmp->release(cbs_tmp->param);
151                         shm_lock();
152                 }
153                 shm_free_unsafe( cbs_tmp );
154         }
155
156         /* UA Clients */
157         for ( i =0 ; i<dead_cell->nr_of_outgoings;  i++ )
158         {
159                 /* retransmission buffer */
160                 if ( (b=dead_cell->uac[i].request.buffer) )
161                         shm_free_unsafe( b );
162                 b=dead_cell->uac[i].local_cancel.buffer;
163                 if (b!=0 && b!=BUSY_BUFFER)
164                         shm_free_unsafe( b );
165                 rpl=dead_cell->uac[i].reply;
166                 if (rpl && rpl!=FAKED_REPLY && rpl->msg_flags&FL_SHM_CLONE) {
167                         sip_msg_free_unsafe( rpl );
168                 }
169 #ifdef USE_DNS_FAILOVER
170                 if (dead_cell->uac[i].dns_h.a){
171                         DBG("branch %d -> dns_h.srv (%.*s) ref=%d,"
172                                                         " dns_h.a (%.*s) ref=%d\n", i,
173                                         dead_cell->uac[i].dns_h.srv?
174                                                                 dead_cell->uac[i].dns_h.srv->name_len:0,
175                                         dead_cell->uac[i].dns_h.srv?
176                                                                 dead_cell->uac[i].dns_h.srv->name:"",
177                                         dead_cell->uac[i].dns_h.srv?
178                                                                 dead_cell->uac[i].dns_h.srv->refcnt.val:0,
179                                         dead_cell->uac[i].dns_h.a->name_len,
180                                         dead_cell->uac[i].dns_h.a->name,
181                                         dead_cell->uac[i].dns_h.a->refcnt.val);
182                 }
183                 dns_srv_handle_put_shm_unsafe(&dead_cell->uac[i].dns_h);
184 #endif
185         }
186
187 #ifdef WITH_AS_SUPPORT
188         if (dead_cell->uac[0].local_ack)
189                 free_local_ack_unsafe(dead_cell->uac[0].local_ack);
190 #endif
191
192         /* collected to tags */
193         tt=dead_cell->fwded_totags;
194         while(tt) {
195                 foo=tt->next;
196                 shm_free_unsafe(tt->tag.s);
197                 shm_free_unsafe(tt);
198                 tt=foo;
199         }
200
201         /* free the avp list */
202         if (dead_cell->user_avps_from)
203                 destroy_avp_list_unsafe( &dead_cell->user_avps_from );
204         if (dead_cell->user_avps_to)
205                 destroy_avp_list_unsafe( &dead_cell->user_avps_to );
206         if (dead_cell->uri_avps_from)
207                 destroy_avp_list_unsafe( &dead_cell->uri_avps_from );
208         if (dead_cell->uri_avps_to)
209                 destroy_avp_list_unsafe( &dead_cell->uri_avps_to );
210
211         /* the cell's body */
212         shm_free_unsafe( dead_cell );
213
214         shm_unlock();
215         t_stats_freed();
216 }
217
218
219
220 static inline void init_synonym_id( struct cell *t )
221 {
222         struct sip_msg *p_msg;
223         int size;
224         char *c;
225         unsigned int myrand;
226
227         if (!syn_branch) {
228                 p_msg=t->uas.request;
229                 if (p_msg) {
230                         /* char value of a proxied transaction is
231                            calculated out of header-fields forming
232                            transaction key
233                         */
234                         char_msg_val( p_msg, t->md5 );
235                 } else {
236                         /* char value for a UAC transaction is created
237                            randomly -- UAC is an originating stateful element 
238                            which cannot be refreshed, so the value can be
239                            anything
240                         */
241                         /* HACK : not long enough */
242                         myrand=rand();
243                         c=t->md5;
244                         size=MD5_LEN;
245                         memset(c, '0', size );
246                         int2reverse_hex( &c, &size, myrand );
247                 }
248         }
249 }
250
251 static void inline init_branches(struct cell *t)
252 {
253         unsigned int i;
254         struct ua_client *uac;
255
256         for(i=0;i<MAX_BRANCHES;i++)
257         {
258                 uac=&t->uac[i];
259                 uac->request.my_T = t;
260                 uac->request.branch = i;
261                 init_rb_timers(&uac->request);
262                 uac->local_cancel=uac->request;
263 #ifdef USE_DNS_FAILOVER
264                 dns_srv_handle_init(&uac->dns_h);
265 #endif
266         }
267 }
268
269
270 struct cell*  build_cell( struct sip_msg* p_msg )
271 {
272         struct cell* new_cell;
273         int          sip_msg_len;
274         avp_list_t* old;
275         struct tm_callback *cbs, *cbs_tmp;
276
277         /* allocs a new cell */
278         /* if syn_branch==0 add space for md5 (MD5_LEN -sizeof(struct cell.md5)) */
279         new_cell = (struct cell*)shm_malloc( sizeof( struct cell )+
280                         ((MD5_LEN-sizeof(((struct cell*)0)->md5))&((syn_branch!=0)-1)) );
281         if  ( !new_cell ) {
282                 ser_error=E_OUT_OF_MEM;
283                 return NULL;
284         }
285
286         /* filling with 0 */
287         memset( new_cell, 0, sizeof( struct cell ) );
288
289         /* UAS */
290         new_cell->uas.response.my_T=new_cell;
291         init_rb_timers(&new_cell->uas.response);
292         /* timers */
293         init_cell_timers(new_cell);
294
295         old = set_avp_list(AVP_TRACK_FROM | AVP_CLASS_URI, 
296                         &new_cell->uri_avps_from );
297         new_cell->uri_avps_from = *old;
298         *old = 0;
299
300         old = set_avp_list(AVP_TRACK_TO | AVP_CLASS_URI, 
301                         &new_cell->uri_avps_to );
302         new_cell->uri_avps_to = *old;
303         *old = 0;
304
305         old = set_avp_list(AVP_TRACK_FROM | AVP_CLASS_USER, 
306                         &new_cell->user_avps_from );
307         new_cell->user_avps_from = *old;
308         *old = 0;
309
310         old = set_avp_list(AVP_TRACK_TO | AVP_CLASS_USER, 
311                         &new_cell->user_avps_to );
312         new_cell->user_avps_to = *old;
313         *old = 0;
314
315              /* We can just store pointer to domain avps in the transaction context,
316               * because they are read-only
317               */
318         new_cell->domain_avps_from = get_avp_list(AVP_TRACK_FROM | 
319                                                                 AVP_CLASS_DOMAIN);
320         new_cell->domain_avps_to = get_avp_list(AVP_TRACK_TO | AVP_CLASS_DOMAIN);
321
322         /* enter callback, which may potentially want to parse some stuff,
323          * before the request is shmem-ized */
324         if (p_msg && has_reqin_tmcbs())
325                         run_reqin_callbacks( new_cell, p_msg, p_msg->REQ_METHOD);
326
327         if (p_msg) {
328 #ifndef POSTPONE_MSG_CLONING
329                 /* it makes no sense to clean the lumps when they are not cloned (Miklos) */
330
331                 /* clean possible previous added vias/clen header or else they would 
332                  * get propagated in the failure routes */
333                 free_via_clen_lump(&p_msg->add_rm);
334 #endif
335                 new_cell->uas.request = sip_msg_cloner(p_msg,&sip_msg_len);
336                 if (!new_cell->uas.request)
337                         goto error;
338                 new_cell->uas.end_request=((char*)new_cell->uas.request)+sip_msg_len;
339         }
340
341         /* UAC */
342         init_branches(new_cell);
343
344         new_cell->relayed_reply_branch   = -1;
345         /* new_cell->T_canceled = T_UNDEFINED; */
346
347         init_synonym_id(new_cell);
348         init_cell_lock(  new_cell );
349         t_stats_created();
350         return new_cell;
351
352 error:
353         /* Other modules may have already registered some
354          * transaction callbacks and may also allocated
355          * additional memory for their parameters,
356          * hence TMCB_DESTROY needs to be called. (Miklos)
357          */
358         if (unlikely(has_tran_tmcbs(new_cell, TMCB_DESTROY)))
359                 run_trans_callbacks(TMCB_DESTROY, new_cell, 0, 0, 0);
360
361         /* free the callback list */
362         for( cbs=(struct tm_callback*)new_cell->tmcb_hl.first ; cbs ; ) {
363                 cbs_tmp = cbs;
364                 cbs = cbs->next;
365                 if (cbs_tmp->release) {
366                         cbs_tmp->release(cbs_tmp->param);
367                 }
368                 shm_free( cbs_tmp );
369         }
370         
371         destroy_avp_list(&new_cell->user_avps_from);
372         destroy_avp_list(&new_cell->user_avps_to);
373         destroy_avp_list(&new_cell->uri_avps_from);
374         destroy_avp_list(&new_cell->uri_avps_to);
375         shm_free(new_cell);
376         /* unlink transaction AVP list and link back the global AVP list (bogdan)*/
377         reset_avps();
378         return NULL;
379 }
380
381
382
383 /* Release all the data contained by the hash table. All the aux. structures
384  *  as sems, lists, etc, are also released */
385 void free_hash_table(  )
386 {
387         struct cell* p_cell;
388         struct cell* tmp_cell;
389         int    i;
390
391         if (_tm_table)
392         {
393                 /* remove the data contained by each entry */
394                 for( i = 0 ; i<TABLE_ENTRIES; i++)
395                 {
396                         release_entry_lock( (_tm_table->entries)+i );
397                         /* delete all synonyms at hash-collision-slot i */
398                         clist_foreach_safe(&_tm_table->entries[i], p_cell, tmp_cell,
399                                                                         next_c){
400                                 free_cell(p_cell);
401                         }
402                 }
403                 shm_free(_tm_table);
404         }
405 }
406
407
408
409
410 /*
411  */
412 struct s_table* init_hash_table()
413 {
414         int              i;
415
416         /*allocs the table*/
417         _tm_table= (struct s_table*)shm_malloc( sizeof( struct s_table ) );
418         if ( !_tm_table) {
419                 LOG(L_ERR, "ERROR: init_hash_table: no shmem for TM table\n");
420                 goto error0;
421         }
422
423         memset( _tm_table, 0, sizeof (struct s_table ) );
424
425         /* try first allocating all the structures needed for syncing */
426         if (lock_initialize()==-1)
427                 goto error1;
428
429         /* inits the entriess */
430         for(  i=0 ; i<TABLE_ENTRIES; i++ )
431         {
432                 init_entry_lock( _tm_table, (_tm_table->entries)+i );
433                 _tm_table->entries[i].next_label = rand();
434                 /* init cell list */
435                 clist_init(&_tm_table->entries[i], next_c, prev_c);
436         }
437
438         return  _tm_table;
439
440 error1:
441         free_hash_table( );
442 error0:
443         return 0;
444 }
445
446
447