tm: extended the kemi callbacks after updates to prototype
[sip-router] / modules / tm / t_fwd.c
1 /*
2  * Copyright (C) 2001-2003 FhG Fokus
3  *
4  * This file is part of Kamailio, a free SIP server.
5  *
6  * Kamailio is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version
10  *
11  * Kamailio is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
19  */
20
21 #include "defs.h"
22
23
24 #include "../../dprint.h"
25 #include "../../config.h"
26 #include "../../parser/parser_f.h"
27 #include "../../ut.h"
28 #include "../../timer.h"
29 #include "../../hash_func.h"
30 #include "../../globals.h"
31 #include "../../cfg_core.h"
32 #include "../../mem/mem.h"
33 #include "../../dset.h"
34 #include "../../action.h"
35 #include "../../data_lump.h"
36 #include "../../onsend.h"
37 #include "../../compiler_opt.h"
38 #include "../../route.h"
39 #include "../../script_cb.h"
40 #include "t_funcs.h"
41 #include "t_hooks.h"
42 #include "t_msgbuilder.h"
43 #include "ut.h"
44 #include "t_cancel.h"
45 #include "t_lookup.h"
46 #include "t_fwd.h"
47 #include "t_reply.h"
48 #include "h_table.h"
49 #include "../../fix_lumps.h"
50 #include "config.h"
51 #ifdef USE_DNS_FAILOVER
52 #include "../../dns_cache.h"
53 #include "../../cfg_core.h" /* cfg_get(core, core_cfg, use_dns_failover) */
54 #include "../../msg_translator.h"
55 #include "lw_parser.h"
56 #endif
57 #ifdef USE_DST_BLACKLIST
58 #include "../../dst_blacklist.h"
59 #endif
60 #include "../../atomic_ops.h" /* membar_depends() */
61 #include "../../kemi.h"
62
63
64 extern int tm_failure_exec_mode;
65 extern int tm_dns_reuse_rcv_socket;
66
67 static int goto_on_branch = 0, branch_route = 0;
68
69 void t_on_branch( unsigned int go_to )
70 {
71         struct cell *t = get_t();
72
73         /* in REPLY_ROUTE and FAILURE_ROUTE T will be set to current transaction;
74          * in REQUEST_ROUTE T will be set only if the transaction was already
75          * created; if not -> use the static variable */
76         if (!t || t==T_UNDEFINED ) {
77                 goto_on_branch=go_to;
78         } else {
79                 t->on_branch = go_to;
80         }
81 }
82
83 unsigned int get_on_branch(void)
84 {
85         return goto_on_branch;
86 }
87
88 void set_branch_route( unsigned int on_branch)
89 {
90         branch_route = on_branch;
91 }
92
93
94
95
96 /** prepares a new branch "buffer".
97  * Creates the buffer used in the branch rb, fills everything needed (
98  * the sending information: t->uac[branch].request.dst, branch buffer, uri
99  * path vector a.s.o.) and runs the on_branch route.
100  * t->uac[branch].request.dst will be filled if next_hop !=0 with the result
101  * of the DNS resolution (next_hop, fproto and fsocket).
102  * If next_hop is 0 all the dst members except the send_flags are read-only
103  * (send_flags it's updated) and are supposed to be pre-filled.
104  *
105  * @param t  - transaction
106  * @param i_req - corresponding sip_msg, must be non-null, flags might be
107  *                be modified (on_branch route)
108  * @param branch - branch no
109  * @param uri
110  * @param path  - path vector (list of route like destination in text form,
111  *                 e.g.: "<sip:1.2.3.4;lr>, <sip:5.6.7.8;lr>")
112  * @param next_hop - uri of the next hop. If non 0 it will be used
113  *              for DNS resolution and the branch request.dst structure will
114  *              be filled. If 0 the branch must already have
115  *              a pre-filled valid request.dst.
116  * @param fsocket - forced send socket for forwarding.
117  * @param send_flags - special flags for sending (see SND_F_* / snd_flags_t).
118  * @param fproto - forced proto for forwarding. Used only if next_hop!=0.
119  * @param flags - 0, UAC_DNS_FAILOVER_F or UAC_SKIP_BR_DST_F for now.
120  *
121  * @return  0 on success, < 0 (ser_errror E***) on failure.
122  */
123 static int prepare_new_uac( struct cell *t, struct sip_msg *i_req,
124                 int branch, str *uri, str* path,
125                 str* next_hop,
126                 struct socket_info* fsocket,
127                 snd_flags_t snd_flags,
128                 int fproto, int flags,
129                 str *instance, str *ruid,
130                 str *location_ua)
131 {
132         char *shbuf;
133         struct lump* add_rm_backup, *body_lumps_backup;
134         struct sip_uri parsed_uri_bak;
135         int ret;
136         unsigned int len;
137         int parsed_uri_ok_bak, free_new_uri;
138         str msg_uri_bak;
139         str dst_uri_bak;
140         int dst_uri_backed_up;
141         str path_bak;
142         int free_path;
143         str instance_bak;
144         int free_instance;
145         str ruid_bak;
146         int free_ruid;
147         str ua_bak;
148         int free_ua;
149         int backup_route_type;
150         int test_dst;
151         snd_flags_t fwd_snd_flags_bak;
152         snd_flags_t rpl_snd_flags_bak;
153         struct socket_info *force_send_socket_bak;
154         struct dest_info *dst;
155         struct run_act_ctx ctx;
156         struct run_act_ctx *bctx;
157         sr_kemi_eng_t *keng;
158
159         shbuf=0;
160         ret=E_UNSPEC;
161         msg_uri_bak.s=0; /* kill warnings */
162         msg_uri_bak.len=0;
163         parsed_uri_ok_bak=0;
164         free_new_uri=0;
165         dst_uri_bak.s=0;
166         dst_uri_bak.len=0;
167         dst_uri_backed_up=0;
168         path_bak.s=0;
169         path_bak.len=0;
170         free_path=0;
171         instance_bak.s=0;
172         instance_bak.len=0;
173         free_instance=0;
174         ruid_bak.s=0;
175         ruid_bak.len=0;
176         free_ruid=0;
177         ua_bak.s=0;
178         ua_bak.len=0;
179         free_ua=0;
180         dst=&t->uac[branch].request.dst;
181
182         /* ... we calculate branch ... */
183         if (!t_calc_branch(t, branch, i_req->add_to_branch_s,
184                                 &i_req->add_to_branch_len ))
185         {
186                 LM_ERR("branch computation failed\n");
187                 ret=E_UNSPEC;
188                 goto error00;
189         }
190
191         /* dup lumps
192          * TODO: clone lumps only if needed */
193         /* lumps can be set outside of the lock, make sure that we read
194          * the up-to-date values */
195         membar_depends();
196         add_rm_backup = i_req->add_rm;
197         body_lumps_backup = i_req->body_lumps;
198         if (unlikely(i_req->add_rm)){
199                 i_req->add_rm = dup_lump_list(i_req->add_rm);
200                 if (unlikely(i_req->add_rm==0)){
201                         ret=E_OUT_OF_MEM;
202                         goto error04;
203                 }
204         }
205         if (unlikely(i_req->body_lumps)){
206                 i_req->body_lumps = dup_lump_list(i_req->body_lumps);
207                 if (unlikely(i_req->body_lumps==0)){
208                         ret=E_OUT_OF_MEM;
209                         goto error04;
210                 }
211         }
212         /* backup uri & path: we need to change them so that build_req...()
213          * will use uri & path and not the ones in the original msg (i_req)
214          * => we must back them up so that we can restore them to the original
215          * value after building the send buffer */
216         msg_uri_bak=i_req->new_uri;
217         parsed_uri_bak=i_req->parsed_uri;
218         parsed_uri_ok_bak=i_req->parsed_uri_ok;
219         path_bak=i_req->path_vec;
220         instance_bak=i_req->instance;
221         ruid_bak=i_req->ruid;
222         ua_bak=i_req->location_ua;
223
224         if (unlikely(branch_route || has_tran_tmcbs(t, TMCB_REQUEST_FWDED))){
225                 /* dup uris, path a.s.o. if we have a branch route or callback */
226                 /* ... set ruri ... */
227                 /* if uri points to new_uri, it needs to be "fixed" so that we can
228                  * change msg->new_uri */
229                 if (uri==&i_req->new_uri)
230                         uri=&msg_uri_bak;
231                 i_req->parsed_uri_ok=0;
232                 i_req->new_uri.s=pkg_malloc(uri->len);
233                 if (unlikely(i_req->new_uri.s==0)){
234                         ret=E_OUT_OF_MEM;
235                         goto error03;
236                 }
237                 free_new_uri=1;
238                 memcpy(i_req->new_uri.s, uri->s, uri->len);
239                 i_req->new_uri.len=uri->len;
240
241                 /* update path_vec */
242                 /* if path points to msg path_vec, it needs to be "fixed" so that we
243                  * can change/update msg->path_vec */
244                 if (path==&i_req->path_vec)
245                         path=&path_bak;
246                 /* zero it first so that set_path_vector will work */
247                 i_req->path_vec.s=0;
248                 i_req->path_vec.len=0;
249                 if (unlikely(path)){
250                         if (unlikely(set_path_vector(i_req, path)<0)){
251                                 ret=E_OUT_OF_MEM;
252                                 goto error03;
253                         }
254                         free_path=1;
255                 }
256                 /* update instance */
257                 /* if instance points to msg instance, it needs to be "fixed" so that we
258                  * can change/update msg->instance */
259                 if (instance==&i_req->instance)
260                         instance=&instance_bak;
261                 /* zero it first so that set_instance will work */
262                 i_req->instance.s=0;
263                 i_req->instance.len=0;
264                 if (unlikely(instance)){
265                         if (unlikely(set_instance(i_req, instance)<0)){
266                                 ret=E_OUT_OF_MEM;
267                                 goto error03;
268                         }
269                         free_instance=1;
270                 }
271
272                 /* update ruid */
273                 /* if ruid points to msg ruid, it needs to be "fixed" so that we
274                  * can change/update msg->ruid */
275                 if (ruid==&i_req->ruid)
276                         ruid=&ruid_bak;
277                 /* zero it first so that set_ruid will work */
278                 i_req->ruid.s=0;
279                 i_req->ruid.len=0;
280                 if (unlikely(ruid)){
281                         if (unlikely(set_ruid(i_req, ruid)<0)){
282                                 ret=E_OUT_OF_MEM;
283                                 goto error03;
284                         }
285                         free_ruid=1;
286                 }
287
288                 /* update location_ua */
289                 /* if location_ua points to msg location_ua, it needs to be "fixed"
290                  * so that we can change/update msg->location_ua */
291                 if (location_ua==&i_req->location_ua)
292                         location_ua=&ua_bak;
293                 /* zero it first so that set_ua will work */
294                 i_req->location_ua.s=0;
295                 i_req->location_ua.len=0;
296                 if (unlikely(location_ua)){
297                         if (unlikely(set_ua(i_req, location_ua)<0)){
298                                 ret=E_OUT_OF_MEM;
299                                 goto error03;
300                         }
301                         free_ua=1;
302                 }
303
304                 /* backup dst uri  & zero it*/
305                 dst_uri_bak=i_req->dst_uri;
306                 dst_uri_backed_up=1;
307                 /* if next_hop points to dst_uri, it needs to be "fixed" so that we
308                  * can change msg->dst_uri */
309                 if (next_hop==&i_req->dst_uri)
310                         next_hop=&dst_uri_bak;
311                 /* zero it first so that set_dst_uri will work */
312                 i_req->dst_uri.s=0;
313                 i_req->dst_uri.len=0;
314                 if (likely(next_hop)){
315                         if(unlikely((flags & UAC_SKIP_BR_DST_F)==0)){
316                                 /* set dst uri to next_hop for the on_branch route */
317                                 if (unlikely(set_dst_uri(i_req, next_hop)<0)){
318                                         ret=E_OUT_OF_MEM;
319                                         goto error03;
320                                 }
321                         }
322                 }
323
324                 if (likely(branch_route)) {
325                         /* run branch_route actions if provided */
326                         backup_route_type = get_route_type();
327                         set_route_type(BRANCH_ROUTE);
328                         tm_ctx_set_branch_index(branch);
329                         /* no need to backup/set avp lists: the on_branch route is run
330                          * only in the main route context (e.g. t_relay() in the main
331                          * route) or in the failure route context (e.g. append_branch &
332                          * t_relay()) and in both cases the avp lists are properly set
333                          * Note: the branch route is not run on delayed dns failover
334                          * (for that to work one would have to set branch_route prior to
335                          * calling add_uac(...) and then reset it afterwards).
336                          */
337                         if (exec_pre_script_cb(i_req, BRANCH_CB_TYPE)>0) {
338                                 /* backup ireq msg send flags and force_send_socket*/
339                                 fwd_snd_flags_bak=i_req->fwd_send_flags;;
340                                 rpl_snd_flags_bak=i_req->rpl_send_flags;
341                                 force_send_socket_bak=i_req->force_send_socket;
342                                 /* set the new values */
343                                 i_req->fwd_send_flags=snd_flags /* intial value  */;
344                                 set_force_socket(i_req, fsocket);
345                                 keng = sr_kemi_eng_get();
346                                 if(unlikely(keng!=NULL)) {
347                                         bctx = sr_kemi_act_ctx_get();
348                                         init_run_actions_ctx(&ctx);
349                                         sr_kemi_act_ctx_set(&ctx);
350                                         if(keng->froute(i_req, BRANCH_ROUTE,
351                                                         sr_kemi_cbname_lookup_idx(branch_route), NULL)<0) {
352                                                 LM_ERR("error running branch route kemi callback\n");
353                                         }
354                                         sr_kemi_act_ctx_set(bctx);
355                                 } else {
356                                         if (run_top_route(branch_rt.rlist[branch_route],
357                                                                 i_req, &ctx) < 0) {
358                                                 LM_DBG("negative return code in run_top_route\n");
359                                         }
360                                 }
361                                 /* update dst send_flags  and send socket*/
362                                 snd_flags=i_req->fwd_send_flags;
363                                 fsocket=i_req->force_send_socket;
364                                 /* restore ireq_msg force_send_socket & flags */
365                                 set_force_socket(i_req, force_send_socket_bak);
366                                 i_req->fwd_send_flags=fwd_snd_flags_bak;
367                                 i_req->rpl_send_flags=rpl_snd_flags_bak;
368                                 exec_post_script_cb(i_req, BRANCH_CB_TYPE);
369                                 /* if DROP was called in cfg, don't forward, jump to end */
370                                 if (unlikely(ctx.run_flags&DROP_R_F))
371                                 {
372                                         tm_ctx_set_branch_index(T_BR_UNDEFINED);
373                                         set_route_type(backup_route_type);
374                                         /* triggered by drop in CFG */
375                                         ret=E_CFG;
376                                         goto error03;
377                                 }
378                         }
379                         tm_ctx_set_branch_index(T_BR_UNDEFINED);
380                         set_route_type(backup_route_type);
381                 }
382
383                 /* run the specific callbacks for this transaction */
384                 if (unlikely(has_tran_tmcbs(t, TMCB_REQUEST_FWDED)))
385                         run_trans_callbacks( TMCB_REQUEST_FWDED , t, i_req, 0,
386                                         -i_req->REQ_METHOD);
387
388                 if (likely( !(flags & UAC_DNS_FAILOVER_F) && i_req->dst_uri.s &&
389                                         i_req->dst_uri.len)){
390                         /* no dns failover and non-empty dst_uri => use it as dst
391                          * (on dns failover dns_h will be non-empty => next_hop will be
392                          * ignored) */
393                         next_hop=&i_req->dst_uri;
394                 }
395                 /* no path vector initially, but now is set after branch route and
396                  * callbacks execution */
397                 if(i_req->path_vec.s!=0 && free_path==0)
398                         free_path=1;
399         }else{
400                 /* no branch route and no TMCB_REQUEST_FWDED callback => set
401                  * msg uri and path to the new values (if needed) */
402                 if (unlikely((uri->s!=i_req->new_uri.s || uri->len!=i_req->new_uri.len)
403                                         && (i_req->new_uri.s!=0 ||
404                                                 uri->s!=i_req->first_line.u.request.uri.s ||
405                                                 uri->len!=i_req->first_line.u.request.uri.len) )){
406                         /* uri is different from i_req uri => replace i_req uri and force
407                          * uri re-parsing */
408                         i_req->new_uri=*uri;
409                         i_req->parsed_uri_ok=0;
410                 }
411                 if (unlikely(path && (i_req->path_vec.s!=path->s ||
412                                                 i_req->path_vec.len!=path->len))){
413                         i_req->path_vec=*path;
414                 }else if (unlikely(path==0 && i_req->path_vec.len!=0)){
415                         i_req->path_vec.s=0;
416                         i_req->path_vec.len=0;
417                 }
418                 if (unlikely(instance && (i_req->instance.s!=instance->s ||
419                                                 i_req->instance.len!=instance->len))){
420                         i_req->instance=*instance;
421                 }else if (unlikely(instance==0 && i_req->instance.len!=0)){
422                         i_req->instance.s=0;
423                         i_req->instance.len=0;
424                 }
425                 if (unlikely(ruid && (i_req->ruid.s!=ruid->s ||
426                                                 i_req->ruid.len!=ruid->len))){
427                         i_req->ruid=*ruid;
428                 }else if (unlikely(ruid==0 && i_req->ruid.len!=0)){
429                         i_req->ruid.s=0;
430                         i_req->ruid.len=0;
431                 }
432                 if (unlikely(location_ua && (i_req->location_ua.s!=location_ua->s ||
433                                                 i_req->location_ua.len!=location_ua->len))){
434                         i_req->location_ua=*location_ua;
435                 }else if (unlikely(location_ua==0 && i_req->location_ua.len!=0)){
436                         i_req->location_ua.s=0;
437                         i_req->location_ua.len=0;
438                 }
439         }
440
441         if (likely(next_hop!=0 || (flags & UAC_DNS_FAILOVER_F))){
442                 /* next_hop present => use it for dns resolution */
443 #ifdef USE_DNS_FAILOVER
444                 test_dst = (uri2dst2(&t->uac[branch].dns_h, dst, fsocket, snd_flags,
445                                         next_hop?next_hop:uri, fproto) == 0);
446 #else
447                 /* dst filled from the uri & request (send_socket) */
448                 test_dst = (uri2dst2(dst, fsocket, snd_flags,
449                                         next_hop?next_hop:uri, fproto)==0);
450 #endif
451                 if (test_dst){
452                         ret=E_BAD_ADDRESS;
453                         goto error01;
454                 }
455         }
456         /* else next_hop==0 =>
457          * no dst_uri / empty dst_uri and initial next_hop==0 =>
458          * dst is pre-filled with a valid dst => use the pre-filled dst */
459
460         /* Set on_reply and on_negative handlers for this branch to the handlers in the transaction */
461         t->uac[branch].on_reply = t->on_reply;
462         t->uac[branch].on_failure = t->on_failure;
463         t->uac[branch].on_branch_failure = t->on_branch_failure;
464
465         /* check if send_sock is ok */
466         if (t->uac[branch].request.dst.send_sock==0) {
467                 LM_ERR("can't fwd to af %d, proto %d "
468                                 " (no corresponding listening socket)\n",
469                                 dst->to.s.sa_family, dst->proto );
470                 ret=E_NO_SOCKET;
471                 goto error01;
472         }
473         /* ... and build it now */
474         shbuf=build_req_buf_from_sip_req( i_req, &len, dst, BUILD_IN_SHM);
475         if (!shbuf) {
476                 LM_ERR("could not build request\n");
477                 ret=E_OUT_OF_MEM;
478                 goto error01;
479         }
480 #ifdef DBG_MSG_QA
481         if (shbuf[len-1]==0) {
482                 LM_ERR("sanity check failed\n");
483                 abort();
484         }
485 #endif
486         /* things went well, move ahead and install new buffer! */
487         t->uac[branch].request.buffer=shbuf;
488         t->uac[branch].request.buffer_len=len;
489         t->uac[branch].uri.s=t->uac[branch].request.buffer+
490                 i_req->first_line.u.request.method.len+1;
491         t->uac[branch].uri.len=GET_RURI(i_req)->len;
492         if (unlikely(i_req->path_vec.s && i_req->path_vec.len)){
493                 t->uac[branch].path.s=shm_malloc(i_req->path_vec.len+1);
494                 if (unlikely(t->uac[branch].path.s==0)) {
495                         shm_free(shbuf);
496                         t->uac[branch].request.buffer=0;
497                         t->uac[branch].request.buffer_len=0;
498                         t->uac[branch].uri.s=0;
499                         t->uac[branch].uri.len=0;
500                         ret=E_OUT_OF_MEM;
501                         goto error01;
502                 }
503                 t->uac[branch].path.len=i_req->path_vec.len;
504                 t->uac[branch].path.s[i_req->path_vec.len]=0;
505                 memcpy( t->uac[branch].path.s, i_req->path_vec.s, i_req->path_vec.len);
506         }
507         if (unlikely(i_req->instance.s && i_req->instance.len)){
508                 t->uac[branch].instance.s=shm_malloc(i_req->instance.len+1);
509                 if (unlikely(t->uac[branch].instance.s==0)) {
510                         shm_free(shbuf);
511                         t->uac[branch].request.buffer=0;
512                         t->uac[branch].request.buffer_len=0;
513                         t->uac[branch].uri.s=0;
514                         t->uac[branch].uri.len=0;
515                         ret=E_OUT_OF_MEM;
516                         goto error01;
517                 }
518                 t->uac[branch].instance.len=i_req->instance.len;
519                 t->uac[branch].instance.s[i_req->instance.len]=0;
520                 memcpy( t->uac[branch].instance.s, i_req->instance.s, i_req->instance.len);
521         }
522         if (unlikely(i_req->ruid.s && i_req->ruid.len)){
523                 t->uac[branch].ruid.s=shm_malloc(i_req->ruid.len+1);
524                 if (unlikely(t->uac[branch].ruid.s==0)) {
525                         shm_free(shbuf);
526                         t->uac[branch].request.buffer=0;
527                         t->uac[branch].request.buffer_len=0;
528                         t->uac[branch].uri.s=0;
529                         t->uac[branch].uri.len=0;
530                         ret=E_OUT_OF_MEM;
531                         goto error01;
532                 }
533                 t->uac[branch].ruid.len=i_req->ruid.len;
534                 t->uac[branch].ruid.s[i_req->ruid.len]=0;
535                 memcpy( t->uac[branch].ruid.s, i_req->ruid.s, i_req->ruid.len);
536         }
537         if (unlikely(i_req->location_ua.s && i_req->location_ua.len)){
538                 t->uac[branch].location_ua.s=shm_malloc(i_req->location_ua.len+1);
539                 if (unlikely(t->uac[branch].location_ua.s==0)) {
540                         shm_free(shbuf);
541                         t->uac[branch].request.buffer=0;
542                         t->uac[branch].request.buffer_len=0;
543                         t->uac[branch].uri.s=0;
544                         t->uac[branch].uri.len=0;
545                         ret=E_OUT_OF_MEM;
546                         goto error01;
547                 }
548                 t->uac[branch].location_ua.len=i_req->location_ua.len;
549                 t->uac[branch].location_ua.s[i_req->location_ua.len]=0;
550                 memcpy( t->uac[branch].location_ua.s, i_req->location_ua.s,
551                                 i_req->location_ua.len);
552         }
553
554         len = count_applied_lumps(i_req->add_rm, HDR_RECORDROUTE_T);
555         if(len==1)
556                 t->uac[branch].flags = TM_UAC_FLAG_RR;
557         else if(len==2)
558                 t->uac[branch].flags = TM_UAC_FLAG_RR|TM_UAC_FLAG_R2;
559
560         ret=0;
561
562 error01:
563 error03:
564         /* restore the new_uri & path from the backup */
565         if (unlikely(free_new_uri && i_req->new_uri.s)){
566                 pkg_free(i_req->new_uri.s);
567         }
568         if (unlikely(free_path)){
569                 reset_path_vector(i_req);
570         }
571         if (unlikely(free_instance)){
572                 reset_instance(i_req);
573         }
574         if (unlikely(free_ruid)){
575                 reset_ruid(i_req);
576         }
577         if (unlikely(free_ua)){
578                 reset_ua(i_req);
579         }
580         if (dst_uri_backed_up){
581                 reset_dst_uri(i_req); /* free dst_uri */
582                 i_req->dst_uri=dst_uri_bak;
583         }
584         /* restore original new_uri and path values */
585         i_req->new_uri=msg_uri_bak;
586         i_req->parsed_uri=parsed_uri_bak;
587         i_req->parsed_uri_ok=parsed_uri_ok_bak;
588         i_req->path_vec=path_bak;
589         i_req->instance=instance_bak;
590         i_req->ruid=ruid_bak;
591         i_req->location_ua=ua_bak;
592
593         /* Delete the duplicated lump lists, this will also delete
594          * all lumps created here, such as lumps created in per-branch
595          * routing sections, Via, and Content-Length headers created in
596          * build_req_buf_from_sip_req
597          */
598 error04:
599         free_duped_lump_list(i_req->add_rm);
600         free_duped_lump_list(i_req->body_lumps);
601         /* Restore the lists from backups */
602         i_req->add_rm = add_rm_backup;
603         i_req->body_lumps = body_lumps_backup;
604
605 error00:
606         return ret;
607 }
608
609 #ifdef USE_DNS_FAILOVER
610 /* Similar to print_uac_request(), but this function uses the outgoing message
611  * buffer of the failed branch to construct the new message in case of DNS
612  * failover.
613  *
614  * WARNING: only the first VIA header is replaced in the buffer, the rest
615  * of the message is untouched, thus, the send socket is corrected only in the
616  * VIA HF.
617  */
618 static char *print_uac_request_from_buf( struct cell *t, struct sip_msg *i_req,
619                 int branch, str *uri, unsigned int *len, struct dest_info* dst,
620                 char *buf, short buf_len)
621 {
622         char *shbuf;
623         str branch_str;
624         char *via, *old_via_begin, *old_via_end;
625         unsigned int via_len;
626
627         shbuf=0;
628
629         /* ... we calculate branch ... */
630         if (!t_calc_branch(t, branch, i_req->add_to_branch_s,
631                                 &i_req->add_to_branch_len ))
632         {
633                 LM_ERR("branch computation failed\n");
634                 goto error00;
635         }
636         branch_str.s = i_req->add_to_branch_s;
637         branch_str.len = i_req->add_to_branch_len;
638
639         /* find the beginning of the first via header in the buffer */
640         old_via_begin = lw_find_via(buf, buf+buf_len);
641         if (!old_via_begin) {
642                 LM_ERR("beginning of via header not found\n");
643                 goto error00;
644         }
645         /* find the end of the first via header in the buffer */
646         old_via_end = lw_next_line(old_via_begin, buf+buf_len);
647         if (!old_via_end) {
648                 LM_ERR("end of via header not found\n");
649                 goto error00;
650         }
651
652         /* create the new VIA HF */
653         via = create_via_hf(&via_len, i_req, dst, &branch_str);
654         if (!via) {
655                 LM_ERR("via building failed\n");
656                 goto error00;
657         }
658
659         /* allocate memory for the new buffer */
660         *len = buf_len + via_len - (old_via_end - old_via_begin);
661         shbuf=(char *)shm_malloc(*len);
662         if (!shbuf) {
663                 ser_error=E_OUT_OF_MEM;
664                 LM_ERR("no shmem\n");
665                 goto error01;
666         }
667
668         /* construct the new buffer */
669         memcpy(shbuf, buf, old_via_begin-buf);
670         memcpy(shbuf+(old_via_begin-buf), via, via_len);
671         memcpy(shbuf+(old_via_begin-buf)+via_len, old_via_end, (buf+buf_len)-old_via_end);
672
673 #ifdef DBG_MSG_QA
674         if (shbuf[*len-1]==0) {
675                 LM_ERR("sanity check failed\n");
676                 abort();
677         }
678 #endif
679
680 error01:
681         pkg_free(via);
682 error00:
683         return shbuf;
684 }
685 #endif
686
687 /* introduce a new uac, which is blind -- it only creates the
688  * data structures and starts FR timer, but that's it; it does
689  * not print messages and send anything anywhere; that is good
690  * for FIFO apps -- the transaction must look operationally
691  * and FR must be ticking, whereas the request is "forwarded"
692  * using a non-SIP way and will be replied the same way
693  */
694 int add_blind_uac( /*struct cell *t*/ )
695 {
696         unsigned short branch;
697         struct cell *t;
698
699         t=get_t();
700         if (t==T_UNDEFINED || !t ) {
701                 LM_ERR("no transaction context\n");
702                 return -1;
703         }
704
705         branch=t->nr_of_outgoings;
706         if (branch==sr_dst_max_branches) {
707                 LM_ERR("maximum number of branches exceeded\n");
708                 return -1;
709         }
710         /* make sure it will be replied */
711         t->flags |= T_NOISY_CTIMER_FLAG;
712         membar_write(); /* to allow lockless prepare_to_cancel() we want to be sure
713                                          * all the writes finished before updating branch number*/
714
715         t->uac[branch].flags |= TM_UAC_FLAG_BLIND;
716         t->nr_of_outgoings=(branch+1);
717         t->async_backup.blind_uac = branch;
718         /* ^^^ whenever we create a blind UAC, lets save the current branch
719          * this is used in async tm processing specifically to be able to route replies
720          * that were possibly in response to a request forwarded on this blind UAC......
721          * we still want replies to be processed as if it were a normal UAC */
722
723         /* start FR timer -- protocol set by default to PROTO_NONE,
724          * which means retransmission timer will not be started
725          */
726         if (start_retr(&t->uac[branch].request)!=0)
727                 LM_CRIT("start retr failed for %p\n", &t->uac[branch].request);
728         /* we are on a timer -- don't need to put on wait on script clean-up */
729         set_kr(REQ_FWDED);
730
731         return 1; /* success */
732 }
733
734 /** introduce a new uac to transaction.
735  *  It doesn't send a message yet -- a reply to it might interfere with the
736  *  processes of adding multiple branches; On error returns <0 & sets ser_error
737  *  to the same value.
738  *  @param t - transaction
739  *  @param request - corresponding sip_mst, must be non-null, flags might be
740  *                   modified (on_branch route).
741  *  @param uri - uri used for the branch (must be non-null).
742  *  @param next_hop - next_hop in sip uri format. If null and proxy is null
743  *                    too, the uri will be used
744  *  @param path     - path vector (list of route like destinations in sip
745  *                     uri format, e.g.: "<sip:1.2.3.4;lr>, <sip:5.6.7.8;lr>").
746  *  @param proxy    - proxy structure. If non-null it takes precedence over
747  *                    next_hop/uri and it will be used for forwarding.
748  *  @param fsocket  - forced forward send socket (can be 0).
749  *  @param snd_flags - special send flags (see SND_F_* / snd_flags_t)
750  *  @param proto    - forced protocol for forwarding (overrides the protocol
751  *                    in next_hop/uri or proxy if != PROTO_NONE).
752  *  @param flags    - special flags passed to prepare_new_uac().
753  *                    @see prepare_new_uac().
754  *  @returns branch id (>=0) or error (<0)
755  */
756 int add_uac( struct cell *t, struct sip_msg *request, str *uri,
757                 str* next_hop, str* path, struct proxy_l *proxy,
758                 struct socket_info* fsocket, snd_flags_t snd_flags,
759                 int proto, int flags, str *instance, str *ruid,
760                 str *location_ua)
761 {
762
763         int ret;
764         unsigned short branch;
765
766         branch=t->nr_of_outgoings;
767         if (branch==sr_dst_max_branches) {
768                 LM_ERR("maximum number of branches exceeded\n");
769                 ret=ser_error=E_TOO_MANY_BRANCHES;
770                 goto error;
771         }
772
773         /* check existing buffer -- rewriting should never occur */
774         if (t->uac[branch].request.buffer) {
775                 LM_CRIT("buffer rewrite attempt\n");
776                 ret=ser_error=E_BUG;
777                 goto error;
778         }
779
780         /* check DNS resolution */
781         if (proxy){
782                 /* dst filled from the proxy */
783                 init_dest_info(&t->uac[branch].request.dst);
784                 t->uac[branch].request.dst.proto=get_proto(proto, proxy->proto);
785                 proxy2su(&t->uac[branch].request.dst.to, proxy);
786                 /* fill dst send_sock */
787                 t->uac[branch].request.dst.send_sock =
788                         get_send_socket( request, &t->uac[branch].request.dst.to,
789                                         t->uac[branch].request.dst.proto);
790                 if (request)
791                         t->uac[branch].request.dst.send_flags=request->fwd_send_flags;
792                 else
793                         SND_FLAGS_INIT(&t->uac[branch].request.dst.send_flags);
794                 next_hop=0;
795         }else {
796                 next_hop= next_hop?next_hop:uri;
797         }
798
799         /* now message printing starts ... */
800         if (unlikely( (ret=prepare_new_uac(t, request, branch, uri, path,
801                                                 next_hop, fsocket, snd_flags,
802                                                 proto, flags, instance, ruid,
803                                                 location_ua)) < 0)){
804                 ser_error=ret;
805                 goto error01;
806         }
807         getbflagsval(0, &t->uac[branch].branch_flags);
808         membar_write(); /* to allow lockless ops (e.g. prepare_to_cancel()) we want
809                                          * to be sure everything above is fully written before
810                                          * updating branches no. */
811         t->nr_of_outgoings=(branch+1);
812
813         /* update stats */
814         if (proxy){
815                 proxy_mark(proxy, 1);
816         }
817         /* done! */
818         ret=branch;
819
820 error01:
821 error:
822         return ret;
823 }
824
825
826
827 #ifdef USE_DNS_FAILOVER
828 /* Similar to add_uac(), but this function uses the outgoing message buffer
829  * of the failed branch to construct the new message in case of DNS failover.
830  */
831 static int add_uac_from_buf( struct cell *t, struct sip_msg *request,
832                 str *uri, str* path,
833                 struct socket_info* fsocket,
834                 snd_flags_t send_flags,
835                 int proto,
836                 char *buf, short buf_len,
837                 str *instance, str *ruid,
838                 str *location_ua)
839 {
840
841         int ret;
842         unsigned short branch;
843         char *shbuf;
844         unsigned int len;
845
846         branch=t->nr_of_outgoings;
847         if (branch==sr_dst_max_branches) {
848                 LM_ERR("maximum number of branches exceeded\n");
849                 ret=ser_error=E_TOO_MANY_BRANCHES;
850                 goto error;
851         }
852
853         /* check existing buffer -- rewriting should never occur */
854         if (t->uac[branch].request.buffer) {
855                 LM_CRIT("buffer rewrite attempt\n");
856                 ret=ser_error=E_BUG;
857                 goto error;
858         }
859
860         if (uri2dst2(&t->uac[branch].dns_h, &t->uac[branch].request.dst,
861                                 fsocket, send_flags, uri, proto) == 0)
862         {
863                 ret=ser_error=E_BAD_ADDRESS;
864                 goto error;
865         }
866
867         /* check if send_sock is ok */
868         if (t->uac[branch].request.dst.send_sock==0) {
869                 LM_ERR("can't fwd to af %d, proto %d"
870                                 " (no corresponding listening socket)\n",
871                                 t->uac[branch].request.dst.to.s.sa_family,
872                                 t->uac[branch].request.dst.proto );
873                 ret=ser_error=E_NO_SOCKET;
874                 goto error;
875         }
876
877         /* now message printing starts ... */
878         shbuf=print_uac_request_from_buf( t, request, branch, uri,
879                         &len, &t->uac[branch].request.dst,
880                         buf, buf_len);
881         if (!shbuf) {
882                 ret=ser_error=E_OUT_OF_MEM;
883                 goto error;
884         }
885
886         /* things went well, move ahead and install new buffer! */
887         t->uac[branch].request.buffer=shbuf;
888         t->uac[branch].request.buffer_len=len;
889         t->uac[branch].uri.s=t->uac[branch].request.buffer+
890                 request->first_line.u.request.method.len+1;
891         t->uac[branch].uri.len=uri->len;
892         /* copy the path */
893         if (unlikely(path && path->s)){
894                 t->uac[branch].path.s=shm_malloc(path->len+1);
895                 if (unlikely(t->uac[branch].path.s==0)) {
896                         shm_free(shbuf);
897                         t->uac[branch].request.buffer=0;
898                         t->uac[branch].request.buffer_len=0;
899                         t->uac[branch].uri.s=0;
900                         t->uac[branch].uri.len=0;
901                         ret=ser_error=E_OUT_OF_MEM;
902                         goto error;
903                 }
904                 t->uac[branch].path.len=path->len;
905                 t->uac[branch].path.s[path->len]=0;
906                 memcpy( t->uac[branch].path.s, path->s, path->len);
907         }
908         /* copy the instance */
909         if (unlikely(instance && instance->s)){
910                 t->uac[branch].instance.s=shm_malloc(instance->len+1);
911                 if (unlikely(t->uac[branch].instance.s==0)) {
912                         shm_free(shbuf);
913                         t->uac[branch].request.buffer=0;
914                         t->uac[branch].request.buffer_len=0;
915                         t->uac[branch].uri.s=0;
916                         t->uac[branch].uri.len=0;
917                         ret=ser_error=E_OUT_OF_MEM;
918                         goto error;
919                 }
920                 t->uac[branch].instance.len=instance->len;
921                 t->uac[branch].instance.s[instance->len]=0;
922                 memcpy( t->uac[branch].instance.s, instance->s, instance->len);
923         }
924         /* copy the ruid */
925         if (unlikely(ruid && ruid->s)){
926                 t->uac[branch].ruid.s=shm_malloc(ruid->len+1);
927                 if (unlikely(t->uac[branch].ruid.s==0)) {
928                         shm_free(shbuf);
929                         t->uac[branch].request.buffer=0;
930                         t->uac[branch].request.buffer_len=0;
931                         t->uac[branch].uri.s=0;
932                         t->uac[branch].uri.len=0;
933                         ret=ser_error=E_OUT_OF_MEM;
934                         goto error;
935                 }
936                 t->uac[branch].ruid.len=ruid->len;
937                 t->uac[branch].ruid.s[ruid->len]=0;
938                 memcpy( t->uac[branch].ruid.s, ruid->s, ruid->len);
939         }
940         /* copy the location_ua */
941         if (unlikely(location_ua && location_ua->s)){
942                 t->uac[branch].location_ua.s=shm_malloc(location_ua->len+1);
943                 if (unlikely(t->uac[branch].location_ua.s==0)) {
944                         shm_free(shbuf);
945                         t->uac[branch].request.buffer=0;
946                         t->uac[branch].request.buffer_len=0;
947                         t->uac[branch].uri.s=0;
948                         t->uac[branch].uri.len=0;
949                         ret=ser_error=E_OUT_OF_MEM;
950                         goto error;
951                 }
952                 t->uac[branch].location_ua.len=location_ua->len;
953                 t->uac[branch].location_ua.s[location_ua->len]=0;
954                 memcpy( t->uac[branch].location_ua.s, location_ua->s, location_ua->len);
955         }
956
957         t->uac[branch].on_reply = t->on_reply;
958         t->uac[branch].on_failure = t->on_failure;
959         t->uac[branch].on_branch_failure = t->on_branch_failure;
960
961         membar_write(); /* to allow lockless ops (e.g. prepare_to_cancel()) we want
962                                          * to be sure everything above is fully written before
963                                          * updating branches no. */
964         t->nr_of_outgoings=(branch+1);
965
966         /* done! */
967         ret=branch;
968
969 error:
970         return ret;
971 }
972
973 /* introduce a new uac to transaction, based on old_uac and a possible
974  *  new ip address (if the dns name resolves to more ips). If no more
975  *   ips are found => returns -1.
976  *  returns its branch id (>=0)
977  *  or error (<0) and sets ser_error if needed; it doesn't send a message
978  *  yet -- a reply to it
979  *  might interfere with the processes of adding multiple branches
980  *  if lock_replies is 1 replies will be locked for t until the new branch
981  *  is added (to prevent add branches races). Use 0 if the reply lock is
982  *  already held, e.g. in failure route/handlers (WARNING: using 1 in a
983  *  failure route will cause a deadlock).
984  */
985 int add_uac_dns_fallback(struct cell *t, struct sip_msg* msg,
986                 struct ua_client* old_uac,
987                 int lock_replies)
988 {
989         int ret;
990
991         ret=-1;
992         if (cfg_get(core, core_cfg, use_dns_failover) &&
993                         !((t->flags & (T_DONT_FORK|T_DISABLE_FAILOVER)) ||
994                                 uac_dont_fork(old_uac)) &&
995                         dns_srv_handle_next(&old_uac->dns_h, 0)){
996                 if (lock_replies){
997                         /* use reply lock to guarantee nobody is adding a branch
998                          * in the same time */
999                         LOCK_REPLIES(t);
1000                         /* check again that we can fork */
1001                         if ((t->flags & T_DONT_FORK) || uac_dont_fork(old_uac)){
1002                                 UNLOCK_REPLIES(t);
1003                                 LM_DBG("no forking on => no new branches\n");
1004                                 return ret;
1005                         }
1006                 }
1007                 if (t->nr_of_outgoings >= sr_dst_max_branches){
1008                         LM_ERR("maximum number of branches exceeded\n");
1009                         if (lock_replies)
1010                                 UNLOCK_REPLIES(t);
1011                         ret=ser_error=E_TOO_MANY_BRANCHES;
1012                         return ret;
1013                 }
1014                 /* copy the dns handle into the new uac */
1015                 dns_srv_handle_cpy(&t->uac[t->nr_of_outgoings].dns_h,
1016                                 &old_uac->dns_h);
1017                 /* copy the onreply and onfailure routes */
1018                 t->uac[t->nr_of_outgoings].on_failure = old_uac->on_failure;
1019                 t->uac[t->nr_of_outgoings].on_reply = old_uac->on_reply;
1020                 t->uac[t->nr_of_outgoings].on_branch_failure = old_uac->on_branch_failure;
1021
1022                 if (cfg_get(tm, tm_cfg, reparse_on_dns_failover)){
1023                         /* Reuse the old buffer and only replace the via header.
1024                          * The drawback is that the send_socket is not corrected
1025                          * in the rest of the message, only in the VIA HF (Miklos) */
1026                         ret=add_uac_from_buf(t,  msg, &old_uac->uri,
1027                                         &old_uac->path,
1028                                         (old_uac->request.dst.send_flags.f & SND_F_FORCE_SOCKET)?
1029                                                 old_uac->request.dst.send_sock:
1030                                                 ((tm_dns_reuse_rcv_socket)?msg->rcv.bind_address:0),
1031                                         old_uac->request.dst.send_flags,
1032                                         old_uac->request.dst.proto,
1033                                         old_uac->request.buffer,
1034                                         old_uac->request.buffer_len,
1035                                         &old_uac->instance, &old_uac->ruid,
1036                                         &old_uac->location_ua);
1037                 } else {
1038                         /* add_uac will use dns_h => next_hop will be ignored.
1039                          * Unfortunately we can't reuse the old buffer, the branch id
1040                          *  must be changed and the send_socket might be different =>
1041                          *  re-create the whole uac */
1042                         ret=add_uac(t,  msg, &old_uac->uri, 0, &old_uac->path, 0,
1043                                         (old_uac->request.dst.send_flags.f & SND_F_FORCE_SOCKET)?
1044                                                 old_uac->request.dst.send_sock:
1045                                                 ((tm_dns_reuse_rcv_socket)?msg->rcv.bind_address:0),
1046                                         old_uac->request.dst.send_flags,
1047                                         old_uac->request.dst.proto, UAC_DNS_FAILOVER_F,
1048                                         &old_uac->instance, &old_uac->ruid,
1049                                         &old_uac->location_ua);
1050                 }
1051
1052                 if (ret<0){
1053                         /* failed, delete the copied dns_h */
1054                         dns_srv_handle_put(&t->uac[t->nr_of_outgoings].dns_h);
1055                 }
1056                 if (lock_replies){
1057                         UNLOCK_REPLIES(t);
1058                 }
1059         }
1060         return ret;
1061 }
1062
1063 #endif
1064
1065 int e2e_cancel_branch( struct sip_msg *cancel_msg, struct cell *t_cancel,
1066                 struct cell *t_invite, int branch )
1067 {
1068         int ret;
1069         char *shbuf;
1070         unsigned int len;
1071         snd_flags_t snd_flags;
1072
1073         ret=-1;
1074         if (t_cancel->uac[branch].request.buffer) {
1075                 LM_CRIT("buffer rewrite attempt\n");
1076                 ret=ser_error=E_BUG;
1077                 goto error;
1078         }
1079         if (t_invite->uac[branch].request.buffer==0){
1080                 /* inactive / deleted  branch */
1081                 goto error;
1082         }
1083         t_invite->uac[branch].request.flags|=F_RB_CANCELED;
1084
1085         /* note -- there is a gap in proxy stats -- we don't update
1086          * proxy stats with CANCEL (proxy->ok, proxy->tx, etc.)
1087          */
1088
1089         /* set same dst as the invite */
1090         t_cancel->uac[branch].request.dst=t_invite->uac[branch].request.dst;
1091         /* print */
1092         if (cfg_get(tm, tm_cfg, reparse_invite)) {
1093                 /* buffer is built localy from the INVITE which was sent out */
1094                 /* lumps can be set outside of the lock, make sure that we read
1095                  * the up-to-date values */
1096                 membar_depends();
1097                 if (cancel_msg->add_rm || cancel_msg->body_lumps) {
1098                         LM_WARN("CANCEL is built locally,"
1099                                         " thus lumps are not applied to the message!\n");
1100                 }
1101                 shbuf=build_local_reparse( t_invite, branch, &len, CANCEL,
1102                                 CANCEL_LEN, &t_invite->to
1103 #ifdef CANCEL_REASON_SUPPORT
1104                                 , 0
1105 #endif /* CANCEL_REASON_SUPPORT */
1106                                 );
1107                 if (unlikely(!shbuf)) {
1108                         LOG(L_ERR, "e2e_cancel_branch: printing e2e cancel failed\n");
1109                         ret=ser_error=E_OUT_OF_MEM;
1110                         goto error;
1111                 }
1112                 /* install buffer */
1113                 t_cancel->uac[branch].request.buffer=shbuf;
1114                 t_cancel->uac[branch].request.buffer_len=len;
1115                 t_cancel->uac[branch].uri.s=t_cancel->uac[branch].request.buffer+
1116                         cancel_msg->first_line.u.request.method.len+1;
1117                 t_cancel->uac[branch].uri.len=t_invite->uac[branch].uri.len;
1118         } else {
1119                 SND_FLAGS_INIT(&snd_flags);
1120                 /* buffer is constructed from the received CANCEL with lumps applied */
1121                 /*  t_cancel...request.dst is already filled (see above) */
1122                 if (unlikely((ret=prepare_new_uac( t_cancel, cancel_msg, branch,
1123                                                         &t_invite->uac[branch].uri,
1124                                                         &t_invite->uac[branch].path,
1125                                                         0, 0, snd_flags, PROTO_NONE, 0,
1126                                                         NULL, NULL, NULL)) <0)){
1127                         ser_error=ret;
1128                         goto error;
1129                 }
1130         }
1131         /* success */
1132         ret=1;
1133
1134 error:
1135         return ret;
1136 }
1137
1138
1139
1140 #ifdef CANCEL_REASON_SUPPORT
1141 /** create a cancel reason structure packed into a single shm. block.
1142  * From a cause and a pointer to a str or cancel_msg, build a
1143  * packed cancel reason structure (CANCEL_REAS_PACKED_HDRS), using a
1144  * single memory allocation (so that it can be freed by a simple shm_free().
1145  * @param cause - cancel cause, @see cancel_reason for more details.
1146  * @param data - depends on the cancel cause.
1147  * @return pointer to shm. packed cancel reason struct. on success,
1148  *        0 on error
1149  */
1150 static struct cancel_reason* cancel_reason_pack(short cause, void* data,
1151                 struct cell* t)
1152 {
1153         char* d;
1154         struct cancel_reason* cr;
1155         int reason_len;
1156         int code_len;
1157         struct hdr_field *reas1, *reas_last, *hdr;
1158         str* txt;
1159         struct sip_msg* e2e_cancel;
1160
1161         if (likely(cause != CANCEL_REAS_UNKNOWN)){
1162                 reason_len = 0;
1163                 txt = 0;
1164                 e2e_cancel = 0;
1165                 reas1 = 0;
1166                 reas_last = 0;
1167                 if (likely(cause == CANCEL_REAS_RCVD_CANCEL &&
1168                                         data && !(t->flags & T_NO_E2E_CANCEL_REASON))) {
1169                         /* parse the entire cancel, to get all the Reason headers */
1170                         e2e_cancel = data;
1171                         parse_headers(e2e_cancel, HDR_EOH_F, 0);
1172                         for(hdr=get_hdr(e2e_cancel, HDR_REASON_T), reas1=hdr;
1173                                         hdr; hdr=next_sibling_hdr(hdr)) {
1174                                 /* hdr->len includes CRLF */
1175                                 reason_len += hdr->len;
1176                                 reas_last=hdr;
1177                         }
1178                 } else if (likely(cause > 0 &&
1179                                         cfg_get(tm, tm_cfg, local_cancel_reason))){
1180                         txt = (str*) data;
1181                         /* Reason: SIP;cause=<reason->cause>[;text=<reason->u.text.s>] */
1182                         reason_len = REASON_PREFIX_LEN + USHORT2SBUF_MAX_LEN +
1183                                 ((txt && txt->s)?REASON_TEXT_LEN + 1 + txt->len + 1 : 0)
1184                                         + CRLF_LEN;
1185                 } else if (cause == CANCEL_REAS_PACKED_HDRS &&
1186                                 !(t->flags & T_NO_E2E_CANCEL_REASON) && data) {
1187                         txt = (str*) data;
1188                         reason_len = txt?txt->len:0;
1189                 } else if (unlikely(cause < CANCEL_REAS_MIN)) {
1190                         LM_CRIT("unhandled reason cause %d\n", cause);
1191                         goto error;
1192                 }
1193
1194                 if (unlikely(reason_len == 0))
1195                         return 0; /* nothing to do, no reason */
1196                 cr = shm_malloc(sizeof(struct cancel_reason) + reason_len);
1197                 if (unlikely(cr == 0))
1198                         goto error;
1199                 d = (char*)cr +sizeof(*cr);
1200                 cr->cause = CANCEL_REAS_PACKED_HDRS;
1201                 cr->u.packed_hdrs.s = d;
1202                 cr->u.packed_hdrs.len = reason_len;
1203
1204                 if (cause == CANCEL_REAS_RCVD_CANCEL) {
1205                         for(hdr=reas1; hdr; hdr=next_sibling_hdr(hdr)) {
1206                                 /* hdr->len includes CRLF */
1207                                 append_str(d, hdr->name.s, hdr->len);
1208                                 if (likely(hdr==reas_last))
1209                                         break;
1210                         }
1211                 } else if (likely(cause > 0)) {
1212                         append_str(d, REASON_PREFIX, REASON_PREFIX_LEN);
1213                         code_len=ushort2sbuf(cause, d, reason_len
1214                                         - (int)(d - (char*)cr - sizeof(*cr)));
1215                         if (unlikely(code_len==0)) {
1216                                 shm_free(cr);
1217                                 cr = 0;
1218                                 LM_CRIT("not enough space to write reason code");
1219                                 goto error;
1220                         }
1221                         d+=code_len;
1222                         if (txt && txt->s){
1223                                 append_str(d, REASON_TEXT, REASON_TEXT_LEN);
1224                                 *d='"'; d++;
1225                                 append_str(d, txt->s, txt->len);
1226                                 *d='"'; d++;
1227                         }
1228                         append_str(d, CRLF, CRLF_LEN);
1229                 } else if (cause == CANCEL_REAS_PACKED_HDRS) {
1230                         append_str(d, txt->s, txt->len);
1231                 }
1232                 return cr;
1233         }
1234 error:
1235         return 0;
1236 }
1237 #endif /* CANCEL_REASON_SUPPORT */
1238
1239
1240
1241 void e2e_cancel( struct sip_msg *cancel_msg,
1242                 struct cell *t_cancel, struct cell *t_invite )
1243 {
1244         branch_bm_t cancel_bm;
1245 #ifndef E2E_CANCEL_HOP_BY_HOP
1246         branch_bm_t tmp_bm;
1247 #elif defined (CANCEL_REASON_SUPPORT)
1248         struct cancel_reason* reason;
1249         int free_reason;
1250 #endif /* E2E_CANCEL_HOP_BY_HOP */
1251         int i;
1252         int lowest_error;
1253         int ret;
1254         struct tmcb_params tmcb;
1255
1256         cancel_bm=0;
1257         lowest_error=0;
1258
1259         if (unlikely(has_tran_tmcbs(t_invite, TMCB_E2ECANCEL_IN))){
1260                 INIT_TMCB_PARAMS(tmcb, cancel_msg, 0, cancel_msg->REQ_METHOD);
1261                 run_trans_callbacks_internal(&t_invite->tmcb_hl, TMCB_E2ECANCEL_IN,
1262                                 t_invite, &tmcb);
1263         }
1264         /* mark transaction as canceled, so that no new message are forwarded
1265          * on it and t_is_canceled() returns true
1266          * WARNING: it's safe to do it without locks, at least for now (in a race
1267          * event even if a flag is unwillingly reset nothing bad will happen),
1268          * however this should be rechecked for any future new flags use.
1269          */
1270         t_invite->flags|=T_CANCELED;
1271         /* first check if there are any branches */
1272         if (t_invite->nr_of_outgoings==0){
1273                 /* no branches yet => force a reply to the invite */
1274                 t_reply( t_invite, t_invite->uas.request, 487, CANCELED );
1275                 LM_DBG("e2e cancel -- no more pending branches\n");
1276                 t_reply( t_cancel, cancel_msg, 200, CANCEL_DONE );
1277                 return;
1278         }
1279
1280         /* determine which branches to cancel ... */
1281         prepare_to_cancel(t_invite, &cancel_bm, 0);
1282
1283         /* no branches to cancel (e.g., a suspended transaction with blind uac) */
1284         if (cancel_bm==0){
1285                 /* no outgoing branches yet => force a reply to the invite */
1286                 t_reply( t_invite, t_invite->uas.request, 487, CANCELED );
1287                 LM_DBG("e2e cancel -- no active branches\n");
1288                 t_reply( t_cancel, cancel_msg, 200, CANCEL_DONE );
1289                 return;
1290         }
1291
1292 #ifdef E2E_CANCEL_HOP_BY_HOP
1293         /* we don't need to set t_cancel label to be the same as t_invite if
1294          * we do hop by hop cancel. The cancel transaction will have a different
1295          * label, but this is not a problem since this transaction is only used to
1296          * send a reply back. The cancels sent upstream will be part of the invite
1297          * transaction (local_cancel retr. bufs) and they will be generated with
1298          * the same via as the invite.
1299          * Note however that setting t_cancel label the same as t_invite will work
1300          * too (the upstream cancel replies will properly match the t_invite
1301          * transaction and will not match the t_cancel because t_cancel will always
1302          * have 0 branches and we check for the branch number in
1303          * t_reply_matching() ).
1304          */
1305 #ifdef CANCEL_REASON_SUPPORT
1306         free_reason = 0;
1307         reason = 0;
1308         if (likely(t_invite->uas.cancel_reas == 0)){
1309                 reason = cancel_reason_pack(CANCEL_REAS_RCVD_CANCEL, cancel_msg,
1310                                 t_invite);
1311                 /* set if not already set */
1312                 if (unlikely(reason &&
1313                                         atomic_cmpxchg_long((void*)&t_invite->uas.cancel_reas,
1314                                                 0, (long)reason) != 0)) {
1315                         /* already set, failed to re-set it */
1316                         free_reason = 1;
1317                 }
1318         }
1319 #endif /* CANCEL_REASON_SUPPORT */
1320         for (i=0; i<t_invite->nr_of_outgoings; i++) {
1321                 if (cancel_bm & (1<<i)) {
1322                         /* it's safe to get the reply lock since e2e_cancel is
1323                          * called with the cancel as the "current" transaction so
1324                          * at most t_cancel REPLY_LOCK is held in this process =>
1325                          * no deadlock possibility */
1326                         ret=cancel_branch(
1327                                         t_invite,
1328                                         i,
1329 #ifdef CANCEL_REASON_SUPPORT
1330                                         reason,
1331 #endif /* CANCEL_REASON_SUPPORT */
1332                                         cfg_get(tm,tm_cfg, cancel_b_flags)
1333                                         | ((t_invite->uac[i].request.buffer==NULL)?
1334                                                 F_CANCEL_B_FAKE_REPLY:0) /* blind UAC? */
1335                                         );
1336                         if (ret<0) cancel_bm &= ~(1<<i);
1337                         if (ret<lowest_error) lowest_error=ret;
1338                 }
1339         }
1340 #ifdef CANCEL_REASON_SUPPORT
1341         if (unlikely(free_reason)) {
1342                 /* reason was not set as the global reason => free it */
1343                 shm_free(reason);
1344         }
1345 #endif /* CANCEL_REASON_SUPPORT */
1346 #else /* ! E2E_CANCEL_HOP_BY_HOP */
1347         /* fix label -- it must be same for reply matching (the label is part of
1348          * the generated via branch for the cancels sent upstream and if it
1349          * would be different form the one in the INVITE the transactions would not
1350          * match */
1351         t_cancel->label=t_invite->label;
1352         t_cancel->nr_of_outgoings=t_invite->nr_of_outgoings;
1353         /* ... and install CANCEL UACs */
1354         for (i=0; i<t_invite->nr_of_outgoings; i++)
1355                 if ((cancel_bm & (1<<i)) && (t_invite->uac[i].last_received>=100)) {
1356                         ret=e2e_cancel_branch(cancel_msg, t_cancel, t_invite, i);
1357                         if (ret<0) cancel_bm &= ~(1<<i);
1358                         if (ret<lowest_error) lowest_error=ret;
1359                 }
1360
1361         /* send them out */
1362         for (i = 0; i < t_cancel->nr_of_outgoings; i++) {
1363                 if (cancel_bm & (1 << i)) {
1364                         if (t_invite->uac[i].last_received>=100){
1365                                 /* Provisional reply received on this branch, send CANCEL */
1366                                 /* we do need to stop the retr. timers if the request is not
1367                                  * an invite and since the stop_rb_retr() cost is lower then
1368                                  * the invite check we do it always --andrei */
1369                                 stop_rb_retr(&t_invite->uac[i].request);
1370                                 if (SEND_BUFFER(&t_cancel->uac[i].request) == -1) {
1371                                         LM_ERR("e2e cancel - send failed\n");
1372                                 }
1373                                 else{
1374                                         if (unlikely(has_tran_tmcbs(t_cancel, TMCB_REQUEST_SENT)))
1375                                                 run_trans_callbacks_with_buf(TMCB_REQUEST_SENT,
1376                                                                 &t_cancel->uac[i].request,
1377                                                                 cancel_msg, 0, TMCB_LOCAL_F);
1378                                 }
1379                                 if (start_retr( &t_cancel->uac[i].request )!=0)
1380                                         LM_CRIT("BUG: failed to start retr."
1381                                                         " for %p\n", &t_cancel->uac[i].request);
1382                         } else {
1383                                 /* No provisional response received, stop
1384                                  * retransmission timers */
1385                                 if (!(cfg_get(tm, tm_cfg, cancel_b_flags)
1386                                                         & F_CANCEL_B_FORCE_RETR))
1387                                         stop_rb_retr(&t_invite->uac[i].request);
1388                                 /* no need to stop fr, it will be stopped by relay_reply
1389                                  * put_on_wait -- andrei */
1390                                 /* Generate faked reply */
1391                                 if (cfg_get(tm, tm_cfg, cancel_b_flags) &
1392                                                 F_CANCEL_B_FAKE_REPLY){
1393                                         LOCK_REPLIES(t_invite);
1394                                         if (relay_reply(t_invite, FAKED_REPLY, i,
1395                                                                 487, &tmp_bm, 1) == RPS_ERROR) {
1396                                                 lowest_error = -1;
1397                                         }
1398                                 }
1399                         }
1400                 }
1401         }
1402 #endif /*E2E_CANCEL_HOP_BY_HOP */
1403
1404         /* if error occurred, let it know upstream (final reply
1405          * will also move the transaction on wait state
1406          */
1407         if (lowest_error<0) {
1408                 LM_ERR("cancel error\n");
1409                 /* if called from failure_route, make sure that the unsafe version
1410                  * is called (we are already holding the reply mutex for the cancel
1411                  * transaction).
1412                  */
1413                 if ((is_route_type(FAILURE_ROUTE)) && (t_cancel==get_t()))
1414                         t_reply_unsafe( t_cancel, cancel_msg, 500, "cancel error");
1415                 else
1416                         t_reply( t_cancel, cancel_msg, 500, "cancel error");
1417         } else if (cancel_bm) {
1418                 /* if there are pending branches, let upstream know we
1419                  * are working on it
1420                  */
1421                 LM_DBG("e2e cancel proceeding\n");
1422                 /* if called from failure_route, make sure that the unsafe version
1423                  * is called (we are already hold the reply mutex for the cancel
1424                  * transaction).
1425                  */
1426                 if ((is_route_type(FAILURE_ROUTE)) && (t_cancel==get_t()))
1427                         t_reply_unsafe( t_cancel, cancel_msg, 200, CANCELING );
1428                 else
1429                         t_reply( t_cancel, cancel_msg, 200, CANCELING );
1430         } else {
1431                 /* if the transaction exists, but there are no more pending
1432                  * branches, tell upstream we're done
1433                  */
1434                 LM_DBG("e2e cancel -- no more pending branches\n");
1435                 /* if called from failure_route, make sure that the unsafe version
1436                  * is called (we are already hold the reply mutex for the cancel
1437                  * transaction).
1438                  */
1439                 if ((is_route_type(FAILURE_ROUTE)) && (t_cancel==get_t()))
1440                         t_reply_unsafe( t_cancel, cancel_msg, 200, CANCEL_DONE );
1441                 else
1442                         t_reply( t_cancel, cancel_msg, 200, CANCEL_DONE );
1443         }
1444 }
1445
1446
1447
1448 /* sends one uac/branch buffer and fallbacks to other ips if
1449  *  the destination resolves to several addresses
1450  *  Takes care of starting timers a.s.o. (on send success)
1451  *  returns: -2 on error, -1 on drop,  current branch id on success,
1452  *   new branch id on send error/blacklist, when failover is possible
1453  *    (ret>=0 && ret!=branch)
1454  *    if lock_replies is 1, the replies for t will be locked when adding
1455  *     new branches (to prevent races). Use 0 from failure routes or other
1456  *     places where the reply lock is already held, to avoid deadlocks. */
1457 int t_send_branch( struct cell *t, int branch, struct sip_msg* p_msg ,
1458                 struct proxy_l * proxy, int lock_replies)
1459 {
1460         struct ip_addr ip; /* debugging */
1461         int ret;
1462         struct ua_client* uac;
1463
1464         uac=&t->uac[branch];
1465         ret=branch;
1466         if (run_onsend(p_msg,   &uac->request.dst, uac->request.buffer,
1467                                 uac->request.buffer_len)==0){
1468                 /* disable the current branch: set a "fake" timeout
1469                  *  reply code but don't set uac->reply, to avoid overriding
1470                  *  a higly unlikely, perfectly timed fake reply (to a message
1471                  *   we never sent).
1472                  * (code=final reply && reply==0 => t_pick_branch won't ever pick it)*/
1473                 uac->last_received=408;
1474                 su2ip_addr(&ip, &uac->request.dst.to);
1475                 LM_DBG("onsend_route dropped msg. to %s:%d (%d)\n",
1476                                 ip_addr2a(&ip), su_getport(&uac->request.dst.to),
1477                                 uac->request.dst.proto);
1478 #ifdef USE_DNS_FAILOVER
1479                 /* if the destination resolves to more ips, add another
1480                  *  branch/uac */
1481                 if (cfg_get(core, core_cfg, use_dns_failover)){
1482                         ret=add_uac_dns_fallback(t, p_msg, uac, lock_replies);
1483                         if (ret>=0){
1484                                 su2ip_addr(&ip, &uac->request.dst.to);
1485                                 LM_DBG("send on branch %d failed "
1486                                                 "(onsend_route), trying another ip %s:%d (%d)\n",
1487                                                 branch, ip_addr2a(&ip),
1488                                                 su_getport(&uac->request.dst.to),
1489                                                 uac->request.dst.proto);
1490                                 /* success, return new branch */
1491                                 return ret;
1492                         }
1493                 }
1494 #endif /* USE_DNS_FAILOVER*/
1495                 return -1; /* drop, try next branch */
1496         }
1497 #ifdef USE_DST_BLACKLIST
1498         if (cfg_get(core, core_cfg, use_dst_blacklist)
1499                         && p_msg
1500                         && (p_msg->REQ_METHOD
1501                                 & cfg_get(tm, tm_cfg, tm_blst_methods_lookup))
1502                 ){
1503                 if (dst_is_blacklisted(&uac->request.dst, p_msg)){
1504                         su2ip_addr(&ip, &uac->request.dst.to);
1505                         LM_DBG("blacklisted destination: %s:%d (%d)\n",
1506                                         ip_addr2a(&ip), su_getport(&uac->request.dst.to),
1507                                         uac->request.dst.proto);
1508                         /* disable the current branch: set a "fake" timeout
1509                          *  reply code but don't set uac->reply, to avoid overriding
1510                          *  a higly unlikely, perfectly timed fake reply (to a message
1511                          *   we never sent).  (code=final reply && reply==0 =>
1512                          *   t_pick_branch won't ever pick it)*/
1513                         uac->last_received=408;
1514 #ifdef USE_DNS_FAILOVER
1515                         /* if the destination resolves to more ips, add another
1516                          *  branch/uac */
1517                         if (cfg_get(core, core_cfg, use_dns_failover)){
1518                                 ret=add_uac_dns_fallback(t, p_msg, uac, lock_replies);
1519                                 if (ret>=0){
1520                                         su2ip_addr(&ip, &uac->request.dst.to);
1521                                         LM_DBG("send on branch %d failed (blacklist),"
1522                                                         " trying another ip %s:%d (%d)\n", branch,
1523                                                         ip_addr2a(&ip), su_getport(&uac->request.dst.to),
1524                                                         uac->request.dst.proto);
1525                                         /* success, return new branch */
1526                                         return ret;
1527                                 }
1528                         }
1529 #endif /* USE_DNS_FAILOVER*/
1530                         return -1; /* don't send */
1531                 }
1532         }
1533 #endif /* USE_DST_BLACKLIST */
1534         if (SEND_BUFFER( &uac->request)==-1) {
1535                 /* disable the current branch: set a "fake" timeout
1536                  *  reply code but don't set uac->reply, to avoid overriding
1537                  *  a highly unlikely, perfectly timed fake reply (to a message
1538                  *  we never sent).
1539                  * (code=final reply && reply==0 => t_pick_branch won't ever pick it)*/
1540                 uac->last_received=408;
1541                 su2ip_addr(&ip, &uac->request.dst.to);
1542                 LM_DBG("send to %s:%d (%d) failed\n",
1543                                 ip_addr2a(&ip), su_getport(&uac->request.dst.to),
1544                                 uac->request.dst.proto);
1545 #ifdef USE_DST_BLACKLIST
1546                 dst_blacklist_add(BLST_ERR_SEND, &uac->request.dst, p_msg);
1547 #endif
1548 #ifdef USE_DNS_FAILOVER
1549                 /* if the destination resolves to more ips, add another
1550                  *  branch/uac */
1551                 if (cfg_get(core, core_cfg, use_dns_failover)){
1552                         ret=add_uac_dns_fallback(t, p_msg, uac, lock_replies);
1553                         if (ret>=0){
1554                                 /* success, return new branch */
1555                                 LM_DBG("send on branch %d failed, adding another"
1556                                                 " branch with another ip\n", branch);
1557                                 return ret;
1558                         }
1559                 }
1560 #endif
1561                 uac->icode = 908; /* internal code set to delivery failure */
1562                 LM_WARN("sending request on branch %d failed\n", branch);
1563                 if (proxy) { proxy->errors++; proxy->ok=0; }
1564                 if(tm_failure_exec_mode==1) {
1565                         LM_DBG("putting branch %d on hold \n", branch);
1566                         /* put on retransmission timer,
1567                          * but set proto to NONE, so actually it is not trying to resend */
1568                         uac->request.dst.proto = PROTO_NONE;
1569                         /* reset last_received, 408 reply is faked by timer */
1570                         uac->last_received=0;
1571                         /* add to retransmission timer */
1572                         if (start_retr( &uac->request )!=0){
1573                                 LM_CRIT("BUG: retransmission already started for %p\n",
1574                                                 &uac->request);
1575                                 return -2;
1576                         }
1577                         return branch;
1578                 }
1579                 return -2;
1580         } else {
1581                 if (unlikely(has_tran_tmcbs(t, TMCB_REQUEST_SENT)))
1582                         run_trans_callbacks_with_buf(TMCB_REQUEST_SENT, &uac->request,
1583                                         p_msg, 0,0);
1584                 /* start retr. only if the send succeeded */
1585                 if (start_retr( &uac->request )!=0){
1586                         LM_CRIT("BUG: retransmission already started for: %p\n",
1587                                         &uac->request);
1588                         return -2;
1589                 }
1590         }
1591         return ret;
1592 }
1593
1594
1595
1596 /* function returns:
1597  *       1 - forward successful
1598  *      -1 - error during forward
1599  */
1600 int t_forward_nonack( struct cell *t, struct sip_msg* p_msg,
1601                 struct proxy_l * proxy, int proto)
1602 {
1603         int branch_ret, lowest_ret;
1604         str current_uri;
1605         branch_bm_t     added_branches;
1606         int first_branch;
1607         int i, q;
1608         struct cell *t_invite;
1609         int success_branch;
1610         int try_new;
1611         int lock_replies;
1612         str dst_uri, path, instance, ruid, location_ua;
1613         struct socket_info* si;
1614         flag_t backup_bflags = 0;
1615         flag_t bflags = 0;
1616
1617
1618         /* make -Wall happy */
1619         current_uri.s=0;
1620
1621         getbflagsval(0, &backup_bflags);
1622
1623         if (t->flags & T_CANCELED) goto canceled;
1624
1625         if (p_msg->REQ_METHOD==METHOD_CANCEL) {
1626                 t_invite=t_lookupOriginalT(  p_msg );
1627                 if (t_invite!=T_NULL_CELL) {
1628                         e2e_cancel( p_msg, t, t_invite );
1629                         UNREF(t_invite);
1630                         /* it should be set to REQ_RPLD by e2e_cancel, which should
1631                          * send a final reply */
1632                         set_kr(REQ_FWDED);
1633                         return 1;
1634                 }
1635         }
1636
1637         /* if no more specific error code is known, use this */
1638         lowest_ret=E_UNSPEC;
1639         /* branches added */
1640         added_branches=0;
1641         /* branch to begin with */
1642         first_branch=t->nr_of_outgoings;
1643
1644         if (t->on_branch) {
1645                 /* tell add_uac that it should run branch route actions */
1646                 branch_route = t->on_branch;
1647                 /* save the branch route so that it
1648                  * can be used for adding branches later
1649                  */
1650                 t->on_branch_delayed = t->on_branch;
1651                 /* reset the flag before running the actions (so that it
1652                  * could be set again in branch_route if needed
1653                  */
1654                 t_on_branch(0);
1655         } else {
1656                 branch_route = 0;
1657         }
1658
1659         /* on first-time forwarding, update the lumps */
1660         if (first_branch==0) {
1661                 /* update the shmem-ized msg with the lumps */
1662                 if ((is_route_type(REQUEST_ROUTE)) &&
1663                                 save_msg_lumps(t->uas.request, p_msg)) {
1664                         LM_ERR("failed to save the message lumps\n");
1665                         return -1;
1666                 }
1667         }
1668
1669         /* if ruri is not already consumed (by another invocation), use current
1670          * uri too. Else add only additional branches (which may be continuously
1671          * refilled).
1672          */
1673         if (ruri_get_forking_state()) {
1674                 try_new=1;
1675                 branch_ret=add_uac( t, p_msg, GET_RURI(p_msg), GET_NEXT_HOP(p_msg),
1676                                 &p_msg->path_vec, proxy, p_msg->force_send_socket,
1677                                 p_msg->fwd_send_flags, proto,
1678                                 (p_msg->dst_uri.len)?0:UAC_SKIP_BR_DST_F, &p_msg->instance,
1679                                 &p_msg->ruid, &p_msg->location_ua);
1680                 /* test if cancel was received meanwhile */
1681                 if (t->flags & T_CANCELED) goto canceled;
1682                 if (branch_ret>=0)
1683                         added_branches |= 1<<branch_ret;
1684                 else
1685                         lowest_ret=MIN_int(lowest_ret, branch_ret);
1686         } else try_new=0;
1687
1688         init_branch_iterator();
1689         while((current_uri.s=next_branch( &current_uri.len, &q, &dst_uri, &path,
1690                                         &bflags, &si, &ruid, &instance, &location_ua))) {
1691                 try_new++;
1692                 setbflagsval(0, bflags);
1693
1694                 branch_ret=add_uac( t, p_msg, &current_uri,
1695                                 (dst_uri.len) ? (&dst_uri) : &current_uri,
1696                                 &path, proxy, si, p_msg->fwd_send_flags,
1697                                 proto, (dst_uri.len)?0:UAC_SKIP_BR_DST_F, &instance,
1698                                 &ruid, &location_ua);
1699                 /* test if cancel was received meanwhile */
1700                 if (t->flags & T_CANCELED) goto canceled;
1701                 /* pick some of the errors in case things go wrong;
1702                  * note that picking lowest error is just as good as
1703                  * any other algorithm which picks any other negative
1704                  * branch result */
1705                 if (branch_ret>=0)
1706                         added_branches |= 1<<branch_ret;
1707                 else
1708                         lowest_ret=MIN_int(lowest_ret, branch_ret);
1709         }
1710         /* consume processed branches */
1711         clear_branches();
1712
1713         setbflagsval(0, backup_bflags);
1714
1715         /* update message flags, if changed in branch route */
1716         t->uas.request->flags = p_msg->flags;
1717
1718         /* don't forget to clear all branches processed so far */
1719
1720         /* things went wrong ... no new branch has been fwd-ed at all */
1721         if (added_branches==0) {
1722                 if (try_new==0) {
1723                         LM_ERR("no branches for forwarding\n");
1724                         /* either failed to add branches, or there were no more branches
1725                         */
1726                         ser_error=MIN_int(lowest_ret, E_CFG);
1727                         return -1;
1728                 }
1729                 if(lowest_ret!=E_CFG)
1730                         LM_ERR("failure to add branches\n");
1731                 ser_error=lowest_ret;
1732                 return lowest_ret;
1733         }
1734
1735         /* mark the fist branch in this fwd step */
1736         t->uac[first_branch].flags |= TM_UAC_FLAG_FB;
1737
1738         ser_error=0; /* clear branch adding errors */
1739         /* send them out now */
1740         success_branch=0;
1741         lock_replies= ! ((is_route_type(FAILURE_ROUTE)) && (t==get_t()));
1742         for (i=first_branch; i<t->nr_of_outgoings; i++) {
1743                 if (added_branches & (1<<i)) {
1744
1745                         branch_ret=t_send_branch(t, i, p_msg , proxy, lock_replies);
1746                         if (branch_ret>=0){ /* some kind of success */
1747                                 if (branch_ret==i) { /* success */
1748                                         success_branch++;
1749                                         if (unlikely(has_tran_tmcbs(t, TMCB_REQUEST_OUT)))
1750                                                 run_trans_callbacks_with_buf( TMCB_REQUEST_OUT,
1751                                                                 &t->uac[i].request,
1752                                                                 p_msg, 0, -p_msg->REQ_METHOD);
1753                                 }
1754                                 else /* new branch added */
1755                                         added_branches |= 1<<branch_ret;
1756                         }
1757                 }
1758         }
1759         if (success_branch<=0) {
1760                 /* return always E_SEND for now
1761                  * (the real reason could be: denied by onsend routes, blacklisted,
1762                  *  send failed or any of the errors listed before + dns failed
1763                  *  when attempting dns failover) */
1764                 ser_error=E_SEND;
1765                 /* else return the last error (?) */
1766                 /* the caller should take care and delete the transaction */
1767                 return -1;
1768         }
1769         ser_error=0; /* clear branch send errors, we have overall success */
1770         set_kr(REQ_FWDED);
1771         return 1;
1772
1773 canceled:
1774         LM_DBG("no forwarding on a canceled transaction\n");
1775         /* reset processed branches */
1776         clear_branches();
1777         /* restore backup flags from initial env */
1778         setbflagsval(0, backup_bflags);
1779         /* update message flags, if changed in branch route */
1780         t->uas.request->flags = p_msg->flags;
1781         ser_error=E_CANCELED;
1782         return -1;
1783 }
1784
1785
1786
1787 /* cancel handling/forwarding function
1788  * CANCELs with no matching transaction are handled in function of
1789  * the unmatched_cancel config var: they are either forwarded statefully,
1790  * statelessly or dropped.
1791  * function returns:
1792  *       1 - forward successful
1793  *       0 - error, but do not reply
1794  *      <0 - error during forward
1795  * it also sets *tran if a transaction was created
1796  */
1797 int t_forward_cancel(struct sip_msg* p_msg , struct proxy_l * proxy, int proto,
1798                 struct cell** tran)
1799 {
1800         struct cell* t_invite;
1801         struct cell* t;
1802         int ret;
1803         int new_tran;
1804         struct dest_info dst;
1805         str host;
1806         unsigned short port;
1807         short comp;
1808
1809         t=0;
1810         /* handle cancels for which no transaction was created yet */
1811         if (cfg_get(tm, tm_cfg, unmatched_cancel)==UM_CANCEL_STATEFULL){
1812                 /* create cancel transaction */
1813                 new_tran=t_newtran(p_msg);
1814                 if (new_tran<=0 && new_tran!=E_SCRIPT){
1815                         if (new_tran==0)
1816                                 /* retransmission => do nothing */
1817                                 ret=1;
1818                         else
1819                                 /* some error => return it or DROP */
1820                                 ret=(ser_error==E_BAD_VIA && reply_to_via) ? 0: new_tran;
1821                         goto end;
1822                 }
1823                 t=get_t();
1824                 ret=t_forward_nonack(t, p_msg, proxy, proto);
1825                 goto end;
1826         }
1827
1828         t_invite=t_lookupOriginalT(  p_msg );
1829         if (t_invite!=T_NULL_CELL) {
1830                 /* create cancel transaction */
1831                 new_tran=t_newtran(p_msg);
1832                 if (new_tran<=0 && new_tran!=E_SCRIPT){
1833                         if (new_tran==0)
1834                                 /* retransmission => do nothing */
1835                                 ret=1;
1836                         else
1837                                 /* some error => return it or DROP */
1838                                 ret=(ser_error==E_BAD_VIA && reply_to_via) ? 0: new_tran;
1839                         UNREF(t_invite);
1840                         goto end;
1841                 }
1842                 t=get_t();
1843                 e2e_cancel( p_msg, t, t_invite );
1844                 UNREF(t_invite);
1845                 ret=1;
1846                 goto end;
1847         }else /* no coresponding INVITE transaction */
1848                 if (cfg_get(tm, tm_cfg, unmatched_cancel)==UM_CANCEL_DROP){
1849                         LM_DBG("non matching cancel dropped\n");
1850                         ret=1; /* do nothing -> drop */
1851                         goto end;
1852                 }else{
1853                         /* UM_CANCEL_STATELESS -> stateless forward */
1854                         LM_DBG("forwarding CANCEL statelessly \n");
1855                         if (proxy==0) {
1856                                 init_dest_info(&dst);
1857                                 dst.proto=proto;
1858                                 if (get_uri_send_info(GET_NEXT_HOP(p_msg), &host,
1859                                                         &port, &dst.proto, &comp)!=0){
1860                                         ret=E_BAD_ADDRESS;
1861                                         goto end;
1862                                 }
1863 #ifdef USE_COMP
1864                                 dst.comp=comp;
1865 #endif
1866                                 /* dst->send_sock not set, but forward_request
1867                                  * will take care of it */
1868                                 ret=forward_request(p_msg, &host, port, &dst);
1869                                 goto end;
1870                         } else {
1871                                 init_dest_info(&dst);
1872                                 dst.proto=get_proto(proto, proxy->proto);
1873                                 proxy2su(&dst.to, proxy);
1874                                 /* dst->send_sock not set, but forward_request
1875                                  * will take care of it */
1876                                 ret=forward_request( p_msg , 0, 0, &dst) ;
1877                                 goto end;
1878                         }
1879                 }
1880 end:
1881         if (tran)
1882                 *tran=t;
1883         return ret;
1884 }
1885
1886 /* Relays a CANCEL request if a corresponding INVITE transaction
1887  * can be found. The function is supposed to be used at the very
1888  * beginning of the script with reparse_invite=1 module parameter.
1889  *
1890  * return value:
1891  *    0: the CANCEL was successfully relayed
1892  *       (or error occurred but reply cannot be sent) => DROP
1893  *    1: no corresponding INVITE transaction exisis
1894  *   <0: corresponding INVITE transaction exisis but error occurred
1895  */
1896 int t_relay_cancel(struct sip_msg* p_msg)
1897 {
1898         struct cell* t_invite;
1899         struct cell* t;
1900         int ret;
1901         int new_tran;
1902
1903         t_invite=t_lookupOriginalT(  p_msg );
1904         if (t_invite!=T_NULL_CELL) {
1905                 /* create cancel transaction */
1906                 new_tran=t_newtran(p_msg);
1907                 if (new_tran<=0 && new_tran!=E_SCRIPT){
1908                         if (new_tran==0)
1909                                 /* retransmission => DROP, t_newtran() takes care about it */
1910                                 ret=0;
1911                         else
1912                                 /* some error => return it or DROP */
1913                                 ret=(ser_error==E_BAD_VIA && reply_to_via) ? 0: new_tran;
1914                         UNREF(t_invite);
1915                         goto end;
1916                 }
1917                 t=get_t();
1918                 e2e_cancel( p_msg, t, t_invite );
1919                 UNREF(t_invite);
1920                 /* return 0 to stop the script processing */
1921                 ret=0;
1922                 goto end;
1923
1924         } else {
1925                 /* no corresponding INVITE trasaction found */
1926                 ret=1;
1927         }
1928 end:
1929         return ret;
1930 }
1931
1932 /* WARNING: doesn't work from failure route (deadlock, uses t_relay_to which
1933  *  is failure route unsafe) */
1934 int t_replicate(struct sip_msg *p_msg,  struct proxy_l *proxy, int proto )
1935 {
1936         /* this is a quite tricky hack -- we just take the message
1937          * as is, including Route-s, Record-route-s, and Vias ,
1938          * forward it downstream and prevent replies received
1939          * from relaying by setting the replication/local_trans bit;
1940          *
1941          * nevertheless, it should be good enough for the primary
1942          * customer of this function, REGISTER replication
1943          * if we want later to make it thoroughly, we need to
1944          * introduce delete lumps for all the header fields above
1945          * */
1946         return t_relay_to(p_msg, proxy, proto, 1 /* replicate */);
1947 }
1948
1949 /* fixup function for reparse_on_dns_failover modparam */
1950 int reparse_on_dns_failover_fixup(void *handle, str *gname, str *name, void **val)
1951 {
1952 #ifdef USE_DNS_FAILOVER
1953         if ((int)(long)(*val) && mhomed) {
1954                 LM_WARN("reparse_on_dns_failover is enabled on"
1955                                 " a multihomed host -- check the readme of tm module!\n");
1956         }
1957 #endif
1958         return 0;
1959 }