ac55e3a4f843575a21a2305ebc828156558c7ca0
[sip-router] / src / modules / tm / t_fwd.c
1 /*
2  * Copyright (C) 2001-2003 FhG Fokus
3  *
4  * This file is part of Kamailio, a free SIP server.
5  *
6  * Kamailio is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version
10  *
11  * Kamailio is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
19  */
20
21 #include "defs.h"
22
23
24 #include "../../core/dprint.h"
25 #include "../../core/config.h"
26 #include "../../core/parser/parser_f.h"
27 #include "../../core/ut.h"
28 #include "../../core/timer.h"
29 #include "../../core/hash_func.h"
30 #include "../../core/globals.h"
31 #include "../../core/cfg_core.h"
32 #include "../../core/mem/mem.h"
33 #include "../../core/dset.h"
34 #include "../../core/action.h"
35 #include "../../core/data_lump.h"
36 #include "../../core/onsend.h"
37 #include "../../core/compiler_opt.h"
38 #include "../../core/route.h"
39 #include "../../core/script_cb.h"
40 #include "t_funcs.h"
41 #include "t_hooks.h"
42 #include "t_msgbuilder.h"
43 #include "ut.h"
44 #include "t_cancel.h"
45 #include "t_lookup.h"
46 #include "t_fwd.h"
47 #include "t_reply.h"
48 #include "h_table.h"
49 #include "../../core/fix_lumps.h"
50 #include "config.h"
51 #ifdef USE_DNS_FAILOVER
52 #include "../../core/dns_cache.h"
53 #include "../../core/cfg_core.h" /* cfg_get(core, core_cfg, use_dns_failover) */
54 #include "../../core/msg_translator.h"
55 #include "lw_parser.h"
56 #endif
57 #ifdef USE_DST_BLACKLIST
58 #include "../../core/dst_blacklist.h"
59 #endif
60 #include "../../core/atomic_ops.h" /* membar_depends() */
61 #include "../../core/kemi.h"
62
63
64 extern int tm_failure_exec_mode;
65 extern int tm_dns_reuse_rcv_socket;
66
67 static int goto_on_branch = 0, branch_route = 0;
68
69 void t_on_branch( unsigned int go_to )
70 {
71         struct cell *t = get_t();
72
73         /* in REPLY_ROUTE and FAILURE_ROUTE T will be set to current transaction;
74          * in REQUEST_ROUTE T will be set only if the transaction was already
75          * created; if not -> use the static variable */
76         if (!t || t==T_UNDEFINED ) {
77                 goto_on_branch=go_to;
78         } else {
79                 t->on_branch = go_to;
80         }
81 }
82
83 unsigned int get_on_branch(void)
84 {
85         return goto_on_branch;
86 }
87
88 void set_branch_route( unsigned int on_branch)
89 {
90         branch_route = on_branch;
91 }
92
93
94
95
96 /** prepares a new branch "buffer".
97  * Creates the buffer used in the branch rb, fills everything needed (
98  * the sending information: t->uac[branch].request.dst, branch buffer, uri
99  * path vector a.s.o.) and runs the on_branch route.
100  * t->uac[branch].request.dst will be filled if next_hop !=0 with the result
101  * of the DNS resolution (next_hop, fproto and fsocket).
102  * If next_hop is 0 all the dst members except the send_flags are read-only
103  * (send_flags it's updated) and are supposed to be pre-filled.
104  *
105  * @param t  - transaction
106  * @param i_req - corresponding sip_msg, must be non-null, flags might be
107  *                be modified (on_branch route)
108  * @param branch - branch no
109  * @param uri
110  * @param path  - path vector (list of route like destination in text form,
111  *                 e.g.: "<sip:1.2.3.4;lr>, <sip:5.6.7.8;lr>")
112  * @param next_hop - uri of the next hop. If non 0 it will be used
113  *              for DNS resolution and the branch request.dst structure will
114  *              be filled. If 0 the branch must already have
115  *              a pre-filled valid request.dst.
116  * @param fsocket - forced send socket for forwarding.
117  * @param send_flags - special flags for sending (see SND_F_* / snd_flags_t).
118  * @param fproto - forced proto for forwarding. Used only if next_hop!=0.
119  * @param flags - 0, UAC_DNS_FAILOVER_F or UAC_SKIP_BR_DST_F for now.
120  *
121  * @return  0 on success, < 0 (ser_errror E***) on failure.
122  */
123 static int prepare_new_uac( struct cell *t, struct sip_msg *i_req,
124                 int branch, str *uri, str* path,
125                 str* next_hop,
126                 struct socket_info* fsocket,
127                 snd_flags_t snd_flags,
128                 int fproto, int flags,
129                 str *instance, str *ruid,
130                 str *location_ua)
131 {
132         char *shbuf;
133         struct lump* add_rm_backup, *body_lumps_backup;
134         struct sip_uri parsed_uri_bak;
135         int ret;
136         unsigned int len;
137         int parsed_uri_ok_bak, free_new_uri;
138         str msg_uri_bak;
139         str dst_uri_bak;
140         int dst_uri_backed_up;
141         str path_bak;
142         int free_path;
143         str instance_bak;
144         int free_instance;
145         str ruid_bak;
146         int free_ruid;
147         str ua_bak;
148         int free_ua;
149         int backup_route_type;
150         int test_dst;
151         snd_flags_t fwd_snd_flags_bak;
152         snd_flags_t rpl_snd_flags_bak;
153         struct socket_info *force_send_socket_bak;
154         struct dest_info *dst;
155         struct run_act_ctx ctx;
156         struct run_act_ctx *bctx;
157         sr_kemi_eng_t *keng;
158
159         shbuf=0;
160         ret=E_UNSPEC;
161         msg_uri_bak.s=0; /* kill warnings */
162         msg_uri_bak.len=0;
163         parsed_uri_ok_bak=0;
164         free_new_uri=0;
165         dst_uri_bak.s=0;
166         dst_uri_bak.len=0;
167         dst_uri_backed_up=0;
168         path_bak.s=0;
169         path_bak.len=0;
170         free_path=0;
171         instance_bak.s=0;
172         instance_bak.len=0;
173         free_instance=0;
174         ruid_bak.s=0;
175         ruid_bak.len=0;
176         free_ruid=0;
177         ua_bak.s=0;
178         ua_bak.len=0;
179         free_ua=0;
180         dst=&t->uac[branch].request.dst;
181
182         /* ... we calculate branch ... */
183         if (!t_calc_branch(t, branch, i_req->add_to_branch_s,
184                                 &i_req->add_to_branch_len ))
185         {
186                 LM_ERR("branch computation failed\n");
187                 ret=E_UNSPEC;
188                 goto error00;
189         }
190
191         /* dup lumps
192          * TODO: clone lumps only if needed */
193         /* lumps can be set outside of the lock, make sure that we read
194          * the up-to-date values */
195         membar_depends();
196         add_rm_backup = i_req->add_rm;
197         body_lumps_backup = i_req->body_lumps;
198         if (unlikely(i_req->add_rm)){
199                 i_req->add_rm = dup_lump_list(i_req->add_rm);
200                 if (unlikely(i_req->add_rm==0)){
201                         ret=E_OUT_OF_MEM;
202                         goto error04;
203                 }
204         }
205         if (unlikely(i_req->body_lumps)){
206                 i_req->body_lumps = dup_lump_list(i_req->body_lumps);
207                 if (unlikely(i_req->body_lumps==0)){
208                         ret=E_OUT_OF_MEM;
209                         goto error04;
210                 }
211         }
212         /* backup uri & path: we need to change them so that build_req...()
213          * will use uri & path and not the ones in the original msg (i_req)
214          * => we must back them up so that we can restore them to the original
215          * value after building the send buffer */
216         msg_uri_bak=i_req->new_uri;
217         parsed_uri_bak=i_req->parsed_uri;
218         parsed_uri_ok_bak=i_req->parsed_uri_ok;
219         path_bak=i_req->path_vec;
220         instance_bak=i_req->instance;
221         ruid_bak=i_req->ruid;
222         ua_bak=i_req->location_ua;
223
224         if (unlikely(branch_route || has_tran_tmcbs(t, TMCB_REQUEST_FWDED))){
225                 /* dup uris, path a.s.o. if we have a branch route or callback */
226                 /* ... set ruri ... */
227                 /* if uri points to new_uri, it needs to be "fixed" so that we can
228                  * change msg->new_uri */
229                 if (uri==&i_req->new_uri)
230                         uri=&msg_uri_bak;
231                 i_req->parsed_uri_ok=0;
232                 i_req->new_uri.s=pkg_malloc(uri->len);
233                 if (unlikely(i_req->new_uri.s==0)){
234                         ret=E_OUT_OF_MEM;
235                         goto error03;
236                 }
237                 free_new_uri=1;
238                 memcpy(i_req->new_uri.s, uri->s, uri->len);
239                 i_req->new_uri.len=uri->len;
240
241                 /* update path_vec */
242                 /* if path points to msg path_vec, it needs to be "fixed" so that we
243                  * can change/update msg->path_vec */
244                 if (path==&i_req->path_vec)
245                         path=&path_bak;
246                 /* zero it first so that set_path_vector will work */
247                 i_req->path_vec.s=0;
248                 i_req->path_vec.len=0;
249                 if (unlikely(path)){
250                         if (unlikely(set_path_vector(i_req, path)<0)){
251                                 ret=E_OUT_OF_MEM;
252                                 goto error03;
253                         }
254                         free_path=1;
255                 }
256                 /* update instance */
257                 /* if instance points to msg instance, it needs to be "fixed" so that we
258                  * can change/update msg->instance */
259                 if (instance==&i_req->instance)
260                         instance=&instance_bak;
261                 /* zero it first so that set_instance will work */
262                 i_req->instance.s=0;
263                 i_req->instance.len=0;
264                 if (unlikely(instance)){
265                         if (unlikely(set_instance(i_req, instance)<0)){
266                                 ret=E_OUT_OF_MEM;
267                                 goto error03;
268                         }
269                         free_instance=1;
270                 }
271
272                 /* update ruid */
273                 /* if ruid points to msg ruid, it needs to be "fixed" so that we
274                  * can change/update msg->ruid */
275                 if (ruid==&i_req->ruid)
276                         ruid=&ruid_bak;
277                 /* zero it first so that set_ruid will work */
278                 i_req->ruid.s=0;
279                 i_req->ruid.len=0;
280                 if (unlikely(ruid)){
281                         if (unlikely(set_ruid(i_req, ruid)<0)){
282                                 ret=E_OUT_OF_MEM;
283                                 goto error03;
284                         }
285                         free_ruid=1;
286                 }
287
288                 /* update location_ua */
289                 /* if location_ua points to msg location_ua, it needs to be "fixed"
290                  * so that we can change/update msg->location_ua */
291                 if (location_ua==&i_req->location_ua)
292                         location_ua=&ua_bak;
293                 /* zero it first so that set_ua will work */
294                 i_req->location_ua.s=0;
295                 i_req->location_ua.len=0;
296                 if (unlikely(location_ua)){
297                         if (unlikely(set_ua(i_req, location_ua)<0)){
298                                 ret=E_OUT_OF_MEM;
299                                 goto error03;
300                         }
301                         free_ua=1;
302                 }
303
304                 /* backup dst uri  & zero it*/
305                 dst_uri_bak=i_req->dst_uri;
306                 dst_uri_backed_up=1;
307                 /* if next_hop points to dst_uri, it needs to be "fixed" so that we
308                  * can change msg->dst_uri */
309                 if (next_hop==&i_req->dst_uri)
310                         next_hop=&dst_uri_bak;
311                 /* zero it first so that set_dst_uri will work */
312                 i_req->dst_uri.s=0;
313                 i_req->dst_uri.len=0;
314                 if (likely(next_hop)){
315                         if(unlikely((flags & UAC_SKIP_BR_DST_F)==0)){
316                                 /* set dst uri to next_hop for the on_branch route */
317                                 if (unlikely(set_dst_uri(i_req, next_hop)<0)){
318                                         ret=E_OUT_OF_MEM;
319                                         goto error03;
320                                 }
321                         }
322                 }
323
324                 if (likely(branch_route)) {
325                         /* run branch_route actions if provided */
326                         backup_route_type = get_route_type();
327                         set_route_type(BRANCH_ROUTE);
328                         tm_ctx_set_branch_index(branch);
329                         /* no need to backup/set avp lists: the on_branch route is run
330                          * only in the main route context (e.g. t_relay() in the main
331                          * route) or in the failure route context (e.g. append_branch &
332                          * t_relay()) and in both cases the avp lists are properly set
333                          * Note: the branch route is not run on delayed dns failover
334                          * (for that to work one would have to set branch_route prior to
335                          * calling add_uac(...) and then reset it afterwards).
336                          */
337                         if (exec_pre_script_cb(i_req, BRANCH_CB_TYPE)>0) {
338                                 /* backup ireq msg send flags and force_send_socket*/
339                                 fwd_snd_flags_bak=i_req->fwd_send_flags;;
340                                 rpl_snd_flags_bak=i_req->rpl_send_flags;
341                                 force_send_socket_bak=i_req->force_send_socket;
342                                 /* set the new values */
343                                 i_req->fwd_send_flags=snd_flags /* intial value  */;
344                                 set_force_socket(i_req, fsocket);
345                                 keng = sr_kemi_eng_get();
346                                 if(unlikely(keng!=NULL)) {
347                                         bctx = sr_kemi_act_ctx_get();
348                                         init_run_actions_ctx(&ctx);
349                                         sr_kemi_act_ctx_set(&ctx);
350                                         if(keng->froute(i_req, BRANCH_ROUTE,
351                                                         sr_kemi_cbname_lookup_idx(branch_route), NULL)<0) {
352                                                 LM_ERR("error running branch route kemi callback\n");
353                                         }
354                                         sr_kemi_act_ctx_set(bctx);
355                                 } else {
356                                         if (run_top_route(branch_rt.rlist[branch_route],
357                                                                 i_req, &ctx) < 0) {
358                                                 LM_DBG("negative return code in run_top_route\n");
359                                         }
360                                 }
361                                 /* update dst send_flags  and send socket*/
362                                 snd_flags=i_req->fwd_send_flags;
363                                 fsocket=i_req->force_send_socket;
364                                 /* restore ireq_msg force_send_socket & flags */
365                                 set_force_socket(i_req, force_send_socket_bak);
366                                 i_req->fwd_send_flags=fwd_snd_flags_bak;
367                                 i_req->rpl_send_flags=rpl_snd_flags_bak;
368                                 exec_post_script_cb(i_req, BRANCH_CB_TYPE);
369                                 /* if DROP was called in cfg, don't forward, jump to end */
370                                 if (unlikely(ctx.run_flags&DROP_R_F))
371                                 {
372                                         tm_ctx_set_branch_index(T_BR_UNDEFINED);
373                                         set_route_type(backup_route_type);
374                                         /* triggered by drop in CFG */
375                                         ret=E_CFG;
376                                         goto error03;
377                                 }
378                         }
379                         tm_ctx_set_branch_index(T_BR_UNDEFINED);
380                         set_route_type(backup_route_type);
381                 }
382
383                 /* run the specific callbacks for this transaction */
384                 if (unlikely(has_tran_tmcbs(t, TMCB_REQUEST_FWDED)))
385                         run_trans_callbacks( TMCB_REQUEST_FWDED , t, i_req, 0,
386                                         -i_req->REQ_METHOD);
387
388                 if (likely( !(flags & UAC_DNS_FAILOVER_F) && i_req->dst_uri.s &&
389                                         i_req->dst_uri.len)){
390                         /* no dns failover and non-empty dst_uri => use it as dst
391                          * (on dns failover dns_h will be non-empty => next_hop will be
392                          * ignored) */
393                         next_hop=&i_req->dst_uri;
394                 }
395                 /* no path vector initially, but now is set after branch route and
396                  * callbacks execution */
397                 if(i_req->path_vec.s!=0 && free_path==0)
398                         free_path=1;
399         }else{
400                 /* no branch route and no TMCB_REQUEST_FWDED callback => set
401                  * msg uri and path to the new values (if needed) */
402                 if (unlikely((uri->s!=i_req->new_uri.s || uri->len!=i_req->new_uri.len)
403                                         && (i_req->new_uri.s!=0 ||
404                                                 uri->s!=i_req->first_line.u.request.uri.s ||
405                                                 uri->len!=i_req->first_line.u.request.uri.len) )){
406                         /* uri is different from i_req uri => replace i_req uri and force
407                          * uri re-parsing */
408                         i_req->new_uri=*uri;
409                         i_req->parsed_uri_ok=0;
410                 }
411                 if (unlikely(path && (i_req->path_vec.s!=path->s ||
412                                                 i_req->path_vec.len!=path->len))){
413                         i_req->path_vec=*path;
414                 }else if (unlikely(path==0 && i_req->path_vec.len!=0)){
415                         i_req->path_vec.s=0;
416                         i_req->path_vec.len=0;
417                 }
418                 if (unlikely(instance && (i_req->instance.s!=instance->s ||
419                                                 i_req->instance.len!=instance->len))){
420                         i_req->instance=*instance;
421                 }else if (unlikely(instance==0 && i_req->instance.len!=0)){
422                         i_req->instance.s=0;
423                         i_req->instance.len=0;
424                 }
425                 if (unlikely(ruid && (i_req->ruid.s!=ruid->s ||
426                                                 i_req->ruid.len!=ruid->len))){
427                         i_req->ruid=*ruid;
428                 }else if (unlikely(ruid==0 && i_req->ruid.len!=0)){
429                         i_req->ruid.s=0;
430                         i_req->ruid.len=0;
431                 }
432                 if (unlikely(location_ua && (i_req->location_ua.s!=location_ua->s ||
433                                                 i_req->location_ua.len!=location_ua->len))){
434                         i_req->location_ua=*location_ua;
435                 }else if (unlikely(location_ua==0 && i_req->location_ua.len!=0)){
436                         i_req->location_ua.s=0;
437                         i_req->location_ua.len=0;
438                 }
439         }
440
441         if (likely(next_hop!=0 || (flags & UAC_DNS_FAILOVER_F))){
442                 /* next_hop present => use it for dns resolution */
443 #ifdef USE_DNS_FAILOVER
444                 test_dst = (uri2dst2(&t->uac[branch].dns_h, dst, fsocket, snd_flags,
445                                         next_hop?next_hop:uri, fproto) == 0);
446 #else
447                 /* dst filled from the uri & request (send_socket) */
448                 test_dst = (uri2dst2(dst, fsocket, snd_flags,
449                                         next_hop?next_hop:uri, fproto)==0);
450 #endif
451                 if (test_dst){
452                         ret=E_BAD_ADDRESS;
453                         goto error01;
454                 }
455         }
456         /* else next_hop==0 =>
457          * no dst_uri / empty dst_uri and initial next_hop==0 =>
458          * dst is pre-filled with a valid dst => use the pre-filled dst */
459
460         /* Set on_reply and on_negative handlers for this branch to the handlers in the transaction */
461         t->uac[branch].on_reply = t->on_reply;
462         t->uac[branch].on_failure = t->on_failure;
463         t->uac[branch].on_branch_failure = t->on_branch_failure;
464
465         /* check if send_sock is ok */
466         if (t->uac[branch].request.dst.send_sock==0) {
467                 LM_ERR("can't fwd to af %d, proto %d "
468                                 " (no corresponding listening socket)\n",
469                                 dst->to.s.sa_family, dst->proto );
470                 ret=E_NO_SOCKET;
471                 goto error01;
472         }
473         /* ... and build it now */
474         shbuf=build_req_buf_from_sip_req( i_req, &len, dst, BUILD_IN_SHM);
475         if (!shbuf) {
476                 LM_ERR("could not build request\n");
477                 ret=E_OUT_OF_MEM;
478                 goto error01;
479         }
480 #ifdef DBG_MSG_QA
481         if (shbuf[len-1]==0) {
482                 LM_ERR("sanity check failed\n");
483                 abort();
484         }
485 #endif
486         /* things went well, move ahead and install new buffer! */
487         t->uac[branch].request.buffer=shbuf;
488         t->uac[branch].request.buffer_len=len;
489         t->uac[branch].uri.s=t->uac[branch].request.buffer+
490                 i_req->first_line.u.request.method.len+1;
491         t->uac[branch].uri.len=GET_RURI(i_req)->len;
492         if (unlikely(i_req->path_vec.s && i_req->path_vec.len)){
493                 t->uac[branch].path.s=shm_malloc(i_req->path_vec.len+1);
494                 if (unlikely(t->uac[branch].path.s==0)) {
495                         shm_free(shbuf);
496                         t->uac[branch].request.buffer=0;
497                         t->uac[branch].request.buffer_len=0;
498                         t->uac[branch].uri.s=0;
499                         t->uac[branch].uri.len=0;
500                         ret=E_OUT_OF_MEM;
501                         goto error01;
502                 }
503                 t->uac[branch].path.len=i_req->path_vec.len;
504                 t->uac[branch].path.s[i_req->path_vec.len]=0;
505                 memcpy( t->uac[branch].path.s, i_req->path_vec.s, i_req->path_vec.len);
506         }
507         if (unlikely(i_req->instance.s && i_req->instance.len)){
508                 t->uac[branch].instance.s=shm_malloc(i_req->instance.len+1);
509                 if (unlikely(t->uac[branch].instance.s==0)) {
510                         shm_free(shbuf);
511                         t->uac[branch].request.buffer=0;
512                         t->uac[branch].request.buffer_len=0;
513                         t->uac[branch].uri.s=0;
514                         t->uac[branch].uri.len=0;
515                         ret=E_OUT_OF_MEM;
516                         goto error01;
517                 }
518                 t->uac[branch].instance.len=i_req->instance.len;
519                 t->uac[branch].instance.s[i_req->instance.len]=0;
520                 memcpy( t->uac[branch].instance.s, i_req->instance.s, i_req->instance.len);
521         }
522         if (unlikely(i_req->ruid.s && i_req->ruid.len)){
523                 t->uac[branch].ruid.s=shm_malloc(i_req->ruid.len+1);
524                 if (unlikely(t->uac[branch].ruid.s==0)) {
525                         shm_free(shbuf);
526                         t->uac[branch].request.buffer=0;
527                         t->uac[branch].request.buffer_len=0;
528                         t->uac[branch].uri.s=0;
529                         t->uac[branch].uri.len=0;
530                         ret=E_OUT_OF_MEM;
531                         goto error01;
532                 }
533                 t->uac[branch].ruid.len=i_req->ruid.len;
534                 t->uac[branch].ruid.s[i_req->ruid.len]=0;
535                 memcpy( t->uac[branch].ruid.s, i_req->ruid.s, i_req->ruid.len);
536         }
537         if (unlikely(i_req->location_ua.s && i_req->location_ua.len)){
538                 t->uac[branch].location_ua.s=shm_malloc(i_req->location_ua.len+1);
539                 if (unlikely(t->uac[branch].location_ua.s==0)) {
540                         shm_free(shbuf);
541                         t->uac[branch].request.buffer=0;
542                         t->uac[branch].request.buffer_len=0;
543                         t->uac[branch].uri.s=0;
544                         t->uac[branch].uri.len=0;
545                         ret=E_OUT_OF_MEM;
546                         goto error01;
547                 }
548                 t->uac[branch].location_ua.len=i_req->location_ua.len;
549                 t->uac[branch].location_ua.s[i_req->location_ua.len]=0;
550                 memcpy( t->uac[branch].location_ua.s, i_req->location_ua.s,
551                                 i_req->location_ua.len);
552         }
553
554         len = count_applied_lumps(i_req->add_rm, HDR_RECORDROUTE_T);
555         if(len==1)
556                 t->uac[branch].flags = TM_UAC_FLAG_RR;
557         else if(len==2)
558                 t->uac[branch].flags = TM_UAC_FLAG_RR|TM_UAC_FLAG_R2;
559
560         ret=0;
561
562 error01:
563 error03:
564         /* restore the new_uri & path from the backup */
565         if (unlikely(free_new_uri && i_req->new_uri.s)){
566                 pkg_free(i_req->new_uri.s);
567         }
568         if (unlikely(free_path)){
569                 reset_path_vector(i_req);
570         }
571         if (unlikely(free_instance)){
572                 reset_instance(i_req);
573         }
574         if (unlikely(free_ruid)){
575                 reset_ruid(i_req);
576         }
577         if (unlikely(free_ua)){
578                 reset_ua(i_req);
579         }
580         if (dst_uri_backed_up){
581                 reset_dst_uri(i_req); /* free dst_uri */
582                 i_req->dst_uri=dst_uri_bak;
583         }
584         /* restore original new_uri and path values */
585         i_req->new_uri=msg_uri_bak;
586         i_req->parsed_uri=parsed_uri_bak;
587         i_req->parsed_uri_ok=parsed_uri_ok_bak;
588         i_req->path_vec=path_bak;
589         i_req->instance=instance_bak;
590         i_req->ruid=ruid_bak;
591         i_req->location_ua=ua_bak;
592
593         /* Delete the duplicated lump lists, this will also delete
594          * all lumps created here, such as lumps created in per-branch
595          * routing sections, Via, and Content-Length headers created in
596          * build_req_buf_from_sip_req
597          */
598 error04:
599         free_duped_lump_list(i_req->add_rm);
600         free_duped_lump_list(i_req->body_lumps);
601         /* Restore the lists from backups */
602         i_req->add_rm = add_rm_backup;
603         i_req->body_lumps = body_lumps_backup;
604
605 error00:
606         return ret;
607 }
608
609 #ifdef USE_DNS_FAILOVER
610 /* Similar to print_uac_request(), but this function uses the outgoing message
611  * buffer of the failed branch to construct the new message in case of DNS
612  * failover.
613  *
614  * WARNING: only the first VIA header is replaced in the buffer, the rest
615  * of the message is untouched, thus, the send socket is corrected only in the
616  * VIA HF.
617  */
618 static char *print_uac_request_from_buf( struct cell *t, struct sip_msg *i_req,
619                 int branch, str *uri, unsigned int *len, struct dest_info* dst,
620                 char *buf, short buf_len)
621 {
622         char *shbuf;
623         str branch_str;
624         char *via, *old_via_begin, *old_via_end;
625         unsigned int via_len;
626
627         shbuf=0;
628
629         /* ... we calculate branch ... */
630         if (!t_calc_branch(t, branch, i_req->add_to_branch_s,
631                                 &i_req->add_to_branch_len ))
632         {
633                 LM_ERR("branch computation failed\n");
634                 goto error00;
635         }
636         branch_str.s = i_req->add_to_branch_s;
637         branch_str.len = i_req->add_to_branch_len;
638
639         /* find the beginning of the first via header in the buffer */
640         old_via_begin = lw_find_via(buf, buf+buf_len);
641         if (!old_via_begin) {
642                 LM_ERR("beginning of via header not found\n");
643                 goto error00;
644         }
645         /* find the end of the first via header in the buffer */
646         old_via_end = lw_next_line(old_via_begin, buf+buf_len);
647         if (!old_via_end) {
648                 LM_ERR("end of via header not found\n");
649                 goto error00;
650         }
651
652         /* create the new VIA HF */
653         via = create_via_hf(&via_len, i_req, dst, &branch_str);
654         if (!via) {
655                 LM_ERR("via building failed\n");
656                 goto error00;
657         }
658
659         /* allocate memory for the new buffer */
660         *len = buf_len + via_len - (old_via_end - old_via_begin);
661         shbuf=(char *)shm_malloc(*len);
662         if (!shbuf) {
663                 ser_error=E_OUT_OF_MEM;
664                 LM_ERR("no shmem\n");
665                 goto error01;
666         }
667
668         /* construct the new buffer */
669         memcpy(shbuf, buf, old_via_begin-buf);
670         memcpy(shbuf+(old_via_begin-buf), via, via_len);
671         memcpy(shbuf+(old_via_begin-buf)+via_len, old_via_end, (buf+buf_len)-old_via_end);
672
673 #ifdef DBG_MSG_QA
674         if (shbuf[*len-1]==0) {
675                 LM_ERR("sanity check failed\n");
676                 abort();
677         }
678 #endif
679
680 error01:
681         pkg_free(via);
682 error00:
683         return shbuf;
684 }
685 #endif
686
687 /* introduce a new uac, which is blind -- it only creates the
688  * data structures and starts FR timer, but that's it; it does
689  * not print messages and send anything anywhere; that is good
690  * for FIFO apps -- the transaction must look operationally
691  * and FR must be ticking, whereas the request is "forwarded"
692  * using a non-SIP way and will be replied the same way
693  */
694 int add_blind_uac( /*struct cell *t*/ )
695 {
696         unsigned short branch;
697         struct cell *t;
698
699         t=get_t();
700         if (t==T_UNDEFINED || !t ) {
701                 LM_ERR("no transaction context\n");
702                 return -1;
703         }
704
705         branch=t->nr_of_outgoings;
706         if (branch==sr_dst_max_branches) {
707                 LM_ERR("maximum number of branches exceeded\n");
708                 return -1;
709         }
710         /* make sure it will be replied */
711         t->flags |= T_NOISY_CTIMER_FLAG;
712         membar_write(); /* to allow lockless prepare_to_cancel() we want to be sure
713                                          * all the writes finished before updating branch number*/
714
715         t->uac[branch].flags |= TM_UAC_FLAG_BLIND;
716         t->nr_of_outgoings=(branch+1);
717         t->async_backup.blind_uac = branch;
718         /* ^^^ whenever we create a blind UAC, lets save the current branch
719          * this is used in async tm processing specifically to be able to route replies
720          * that were possibly in response to a request forwarded on this blind UAC......
721          * we still want replies to be processed as if it were a normal UAC */
722
723         /* start FR timer -- protocol set by default to PROTO_NONE,
724          * which means retransmission timer will not be started
725          */
726         if (start_retr(&t->uac[branch].request)!=0)
727                 LM_CRIT("start retr failed for %p\n", &t->uac[branch].request);
728         /* we are on a timer -- don't need to put on wait on script clean-up */
729         set_kr(REQ_FWDED);
730
731         return 1; /* success */
732 }
733
734 /** introduce a new uac to transaction.
735  *  It doesn't send a message yet -- a reply to it might interfere with the
736  *  processes of adding multiple branches; On error returns <0 & sets ser_error
737  *  to the same value.
738  *  @param t - transaction
739  *  @param request - corresponding sip_mst, must be non-null, flags might be
740  *                   modified (on_branch route).
741  *  @param uri - uri used for the branch (must be non-null).
742  *  @param next_hop - next_hop in sip uri format. If null and proxy is null
743  *                    too, the uri will be used
744  *  @param path     - path vector (list of route like destinations in sip
745  *                     uri format, e.g.: "<sip:1.2.3.4;lr>, <sip:5.6.7.8;lr>").
746  *  @param proxy    - proxy structure. If non-null it takes precedence over
747  *                    next_hop/uri and it will be used for forwarding.
748  *  @param fsocket  - forced forward send socket (can be 0).
749  *  @param snd_flags - special send flags (see SND_F_* / snd_flags_t)
750  *  @param proto    - forced protocol for forwarding (overrides the protocol
751  *                    in next_hop/uri or proxy if != PROTO_NONE).
752  *  @param flags    - special flags passed to prepare_new_uac().
753  *                    @see prepare_new_uac().
754  *  @returns branch id (>=0) or error (<0)
755  */
756 int add_uac( struct cell *t, struct sip_msg *request, str *uri,
757                 str* next_hop, str* path, struct proxy_l *proxy,
758                 struct socket_info* fsocket, snd_flags_t snd_flags,
759                 int proto, int flags, str *instance, str *ruid,
760                 str *location_ua)
761 {
762
763         int ret;
764         unsigned short branch;
765
766         branch=t->nr_of_outgoings;
767         if (branch==sr_dst_max_branches) {
768                 LM_ERR("maximum number of branches exceeded\n");
769                 ret=ser_error=E_TOO_MANY_BRANCHES;
770                 goto error;
771         }
772
773         /* check existing buffer -- rewriting should never occur */
774         if (t->uac[branch].request.buffer) {
775                 LM_CRIT("buffer rewrite attempt\n");
776                 ret=ser_error=E_BUG;
777                 goto error;
778         }
779
780         /* check DNS resolution */
781         if (proxy){
782                 /* dst filled from the proxy */
783                 init_dest_info(&t->uac[branch].request.dst);
784                 t->uac[branch].request.dst.proto=get_proto(proto, proxy->proto);
785                 proxy2su(&t->uac[branch].request.dst.to, proxy);
786                 /* fill dst send_sock */
787                 t->uac[branch].request.dst.send_sock =
788                         get_send_socket( request, &t->uac[branch].request.dst.to,
789                                         t->uac[branch].request.dst.proto);
790                 if (request)
791                         t->uac[branch].request.dst.send_flags=request->fwd_send_flags;
792                 else
793                         SND_FLAGS_INIT(&t->uac[branch].request.dst.send_flags);
794                 next_hop=0;
795         }else {
796                 next_hop= next_hop?next_hop:uri;
797         }
798
799         /* now message printing starts ... */
800         if (unlikely( (ret=prepare_new_uac(t, request, branch, uri, path,
801                                                 next_hop, fsocket, snd_flags,
802                                                 proto, flags, instance, ruid,
803                                                 location_ua)) < 0)){
804                 ser_error=ret;
805                 goto error01;
806         }
807         getbflagsval(0, &t->uac[branch].branch_flags);
808         membar_write(); /* to allow lockless ops (e.g. prepare_to_cancel()) we want
809                                          * to be sure everything above is fully written before
810                                          * updating branches no. */
811         t->nr_of_outgoings=(branch+1);
812
813         /* update stats */
814         if (proxy){
815                 proxy_mark(proxy, 1);
816         }
817         /* done! */
818         ret=branch;
819
820 error01:
821 error:
822         return ret;
823 }
824
825
826
827 #ifdef USE_DNS_FAILOVER
828 /* Similar to add_uac(), but this function uses the outgoing message buffer
829  * of the failed branch to construct the new message in case of DNS failover.
830  */
831 static int add_uac_from_buf( struct cell *t, struct sip_msg *request,
832                 str *uri, str* path,
833                 struct socket_info* fsocket,
834                 snd_flags_t send_flags,
835                 int proto,
836                 char *buf, short buf_len,
837                 str *instance, str *ruid,
838                 str *location_ua)
839 {
840
841         int ret;
842         unsigned short branch;
843         char *shbuf;
844         unsigned int len;
845
846         branch=t->nr_of_outgoings;
847         if (branch==sr_dst_max_branches) {
848                 LM_ERR("maximum number of branches exceeded\n");
849                 ret=ser_error=E_TOO_MANY_BRANCHES;
850                 goto error;
851         }
852
853         /* check existing buffer -- rewriting should never occur */
854         if (t->uac[branch].request.buffer) {
855                 LM_CRIT("buffer rewrite attempt\n");
856                 ret=ser_error=E_BUG;
857                 goto error;
858         }
859
860         if (uri2dst2(&t->uac[branch].dns_h, &t->uac[branch].request.dst,
861                                 fsocket, send_flags, uri, proto) == 0)
862         {
863                 ret=ser_error=E_BAD_ADDRESS;
864                 goto error;
865         }
866
867         /* check if send_sock is ok */
868         if (t->uac[branch].request.dst.send_sock==0) {
869                 LM_ERR("can't fwd to af %d, proto %d"
870                                 " (no corresponding listening socket)\n",
871                                 t->uac[branch].request.dst.to.s.sa_family,
872                                 t->uac[branch].request.dst.proto );
873                 ret=ser_error=E_NO_SOCKET;
874                 goto error;
875         }
876
877         /* now message printing starts ... */
878         shbuf=print_uac_request_from_buf( t, request, branch, uri,
879                         &len, &t->uac[branch].request.dst,
880                         buf, buf_len);
881         if (!shbuf) {
882                 ret=ser_error=E_OUT_OF_MEM;
883                 goto error;
884         }
885
886         /* things went well, move ahead and install new buffer! */
887         t->uac[branch].request.buffer=shbuf;
888         t->uac[branch].request.buffer_len=len;
889         t->uac[branch].uri.s=t->uac[branch].request.buffer+
890                 request->first_line.u.request.method.len+1;
891         t->uac[branch].uri.len=uri->len;
892         /* copy the path */
893         if (unlikely(path && path->s)){
894                 t->uac[branch].path.s=shm_malloc(path->len+1);
895                 if (unlikely(t->uac[branch].path.s==0)) {
896                         shm_free(shbuf);
897                         t->uac[branch].request.buffer=0;
898                         t->uac[branch].request.buffer_len=0;
899                         t->uac[branch].uri.s=0;
900                         t->uac[branch].uri.len=0;
901                         ret=ser_error=E_OUT_OF_MEM;
902                         goto error;
903                 }
904                 t->uac[branch].path.len=path->len;
905                 t->uac[branch].path.s[path->len]=0;
906                 memcpy( t->uac[branch].path.s, path->s, path->len);
907         }
908         /* copy the instance */
909         if (unlikely(instance && instance->s)){
910                 t->uac[branch].instance.s=shm_malloc(instance->len+1);
911                 if (unlikely(t->uac[branch].instance.s==0)) {
912                         shm_free(shbuf);
913                         t->uac[branch].request.buffer=0;
914                         t->uac[branch].request.buffer_len=0;
915                         t->uac[branch].uri.s=0;
916                         t->uac[branch].uri.len=0;
917                         ret=ser_error=E_OUT_OF_MEM;
918                         goto error;
919                 }
920                 t->uac[branch].instance.len=instance->len;
921                 t->uac[branch].instance.s[instance->len]=0;
922                 memcpy( t->uac[branch].instance.s, instance->s, instance->len);
923         }
924         /* copy the ruid */
925         if (unlikely(ruid && ruid->s)){
926                 t->uac[branch].ruid.s=shm_malloc(ruid->len+1);
927                 if (unlikely(t->uac[branch].ruid.s==0)) {
928                         shm_free(shbuf);
929                         t->uac[branch].request.buffer=0;
930                         t->uac[branch].request.buffer_len=0;
931                         t->uac[branch].uri.s=0;
932                         t->uac[branch].uri.len=0;
933                         ret=ser_error=E_OUT_OF_MEM;
934                         goto error;
935                 }
936                 t->uac[branch].ruid.len=ruid->len;
937                 t->uac[branch].ruid.s[ruid->len]=0;
938                 memcpy( t->uac[branch].ruid.s, ruid->s, ruid->len);
939         }
940         /* copy the location_ua */
941         if (unlikely(location_ua && location_ua->s)){
942                 t->uac[branch].location_ua.s=shm_malloc(location_ua->len+1);
943                 if (unlikely(t->uac[branch].location_ua.s==0)) {
944                         shm_free(shbuf);
945                         t->uac[branch].request.buffer=0;
946                         t->uac[branch].request.buffer_len=0;
947                         t->uac[branch].uri.s=0;
948                         t->uac[branch].uri.len=0;
949                         ret=ser_error=E_OUT_OF_MEM;
950                         goto error;
951                 }
952                 t->uac[branch].location_ua.len=location_ua->len;
953                 t->uac[branch].location_ua.s[location_ua->len]=0;
954                 memcpy( t->uac[branch].location_ua.s, location_ua->s, location_ua->len);
955         }
956
957         t->uac[branch].on_reply = t->on_reply;
958         t->uac[branch].on_failure = t->on_failure;
959         t->uac[branch].on_branch_failure = t->on_branch_failure;
960
961         membar_write(); /* to allow lockless ops (e.g. prepare_to_cancel()) we want
962                                          * to be sure everything above is fully written before
963                                          * updating branches no. */
964         t->nr_of_outgoings=(branch+1);
965
966         /* done! */
967         ret=branch;
968
969 error:
970         return ret;
971 }
972
973 /* introduce a new uac to transaction, based on old_uac and a possible
974  *  new ip address (if the dns name resolves to more ips). If no more
975  *   ips are found => returns -1.
976  *  returns its branch id (>=0)
977  *  or error (<0) and sets ser_error if needed; it doesn't send a message
978  *  yet -- a reply to it
979  *  might interfere with the processes of adding multiple branches
980  *  if lock_replies is 1 replies will be locked for t until the new branch
981  *  is added (to prevent add branches races). Use 0 if the reply lock is
982  *  already held, e.g. in failure route/handlers (WARNING: using 1 in a
983  *  failure route will cause a deadlock).
984  */
985 int add_uac_dns_fallback(struct cell *t, struct sip_msg* msg,
986                 struct ua_client* old_uac,
987                 int lock_replies)
988 {
989         int ret;
990
991         ret=-1;
992         if (cfg_get(core, core_cfg, use_dns_failover) &&
993                         !((t->flags & (T_DONT_FORK|T_DISABLE_FAILOVER)) ||
994                                 uac_dont_fork(old_uac)) &&
995                         dns_srv_handle_next(&old_uac->dns_h, 0)){
996                 if (lock_replies){
997                         /* use reply lock to guarantee nobody is adding a branch
998                          * in the same time */
999                         LOCK_REPLIES(t);
1000                         /* check again that we can fork */
1001                         if ((t->flags & T_DONT_FORK) || uac_dont_fork(old_uac)){
1002                                 UNLOCK_REPLIES(t);
1003                                 LM_DBG("no forking on => no new branches\n");
1004                                 return ret;
1005                         }
1006                 }
1007                 if (t->nr_of_outgoings >= sr_dst_max_branches){
1008                         LM_ERR("maximum number of branches exceeded\n");
1009                         if (lock_replies)
1010                                 UNLOCK_REPLIES(t);
1011                         ret=ser_error=E_TOO_MANY_BRANCHES;
1012                         return ret;
1013                 }
1014                 /* copy the dns handle into the new uac */
1015                 dns_srv_handle_cpy(&t->uac[t->nr_of_outgoings].dns_h,
1016                                 &old_uac->dns_h);
1017                 /* copy the onreply and onfailure routes */
1018                 t->uac[t->nr_of_outgoings].on_failure = old_uac->on_failure;
1019                 t->uac[t->nr_of_outgoings].on_reply = old_uac->on_reply;
1020                 t->uac[t->nr_of_outgoings].on_branch_failure = old_uac->on_branch_failure;
1021                 /* copy branch flags */
1022                 t->uac[t->nr_of_outgoings].branch_flags = old_uac->branch_flags;
1023
1024                 if (cfg_get(tm, tm_cfg, reparse_on_dns_failover)){
1025                         /* Reuse the old buffer and only replace the via header.
1026                          * The drawback is that the send_socket is not corrected
1027                          * in the rest of the message, only in the VIA HF (Miklos) */
1028                         ret=add_uac_from_buf(t,  msg, &old_uac->uri,
1029                                         &old_uac->path,
1030                                         (old_uac->request.dst.send_flags.f & SND_F_FORCE_SOCKET)?
1031                                                 old_uac->request.dst.send_sock:
1032                                                 ((tm_dns_reuse_rcv_socket)?msg->rcv.bind_address:0),
1033                                         old_uac->request.dst.send_flags,
1034                                         old_uac->request.dst.proto,
1035                                         old_uac->request.buffer,
1036                                         old_uac->request.buffer_len,
1037                                         &old_uac->instance, &old_uac->ruid,
1038                                         &old_uac->location_ua);
1039                 } else {
1040                         /* add_uac will use dns_h => next_hop will be ignored.
1041                          * Unfortunately we can't reuse the old buffer, the branch id
1042                          *  must be changed and the send_socket might be different =>
1043                          *  re-create the whole uac */
1044                         ret=add_uac(t,  msg, &old_uac->uri, 0, &old_uac->path, 0,
1045                                         (old_uac->request.dst.send_flags.f & SND_F_FORCE_SOCKET)?
1046                                                 old_uac->request.dst.send_sock:
1047                                                 ((tm_dns_reuse_rcv_socket)?msg->rcv.bind_address:0),
1048                                         old_uac->request.dst.send_flags,
1049                                         old_uac->request.dst.proto, UAC_DNS_FAILOVER_F,
1050                                         &old_uac->instance, &old_uac->ruid,
1051                                         &old_uac->location_ua);
1052                 }
1053
1054                 if (ret<0){
1055                         /* failed, delete the copied dns_h */
1056                         dns_srv_handle_put(&t->uac[t->nr_of_outgoings].dns_h);
1057                 }
1058                 if (lock_replies){
1059                         UNLOCK_REPLIES(t);
1060                 }
1061         }
1062         return ret;
1063 }
1064
1065 #endif
1066
1067 int e2e_cancel_branch( struct sip_msg *cancel_msg, struct cell *t_cancel,
1068                 struct cell *t_invite, int branch )
1069 {
1070         int ret;
1071         char *shbuf;
1072         unsigned int len;
1073         snd_flags_t snd_flags;
1074
1075         ret=-1;
1076         if (t_cancel->uac[branch].request.buffer) {
1077                 LM_CRIT("buffer rewrite attempt\n");
1078                 ret=ser_error=E_BUG;
1079                 goto error;
1080         }
1081         if (t_invite->uac[branch].request.buffer==0){
1082                 /* inactive / deleted  branch */
1083                 goto error;
1084         }
1085         t_invite->uac[branch].request.flags|=F_RB_CANCELED;
1086
1087         /* note -- there is a gap in proxy stats -- we don't update
1088          * proxy stats with CANCEL (proxy->ok, proxy->tx, etc.)
1089          */
1090
1091         /* set same dst as the invite */
1092         t_cancel->uac[branch].request.dst=t_invite->uac[branch].request.dst;
1093         /* print */
1094         if (cfg_get(tm, tm_cfg, reparse_invite)) {
1095                 /* buffer is built localy from the INVITE which was sent out */
1096                 /* lumps can be set outside of the lock, make sure that we read
1097                  * the up-to-date values */
1098                 membar_depends();
1099                 if (cancel_msg->add_rm || cancel_msg->body_lumps) {
1100                         LM_WARN("CANCEL is built locally,"
1101                                         " thus lumps are not applied to the message!\n");
1102                 }
1103                 shbuf=build_local_reparse( t_invite, branch, &len, CANCEL,
1104                                 CANCEL_LEN, &t_invite->to
1105 #ifdef CANCEL_REASON_SUPPORT
1106                                 , 0
1107 #endif /* CANCEL_REASON_SUPPORT */
1108                                 );
1109                 if (unlikely(!shbuf)) {
1110                         LM_ERR("printing e2e cancel failed\n");
1111                         ret=ser_error=E_OUT_OF_MEM;
1112                         goto error;
1113                 }
1114                 /* install buffer */
1115                 t_cancel->uac[branch].request.buffer=shbuf;
1116                 t_cancel->uac[branch].request.buffer_len=len;
1117                 t_cancel->uac[branch].uri.s=t_cancel->uac[branch].request.buffer+
1118                         cancel_msg->first_line.u.request.method.len+1;
1119                 t_cancel->uac[branch].uri.len=t_invite->uac[branch].uri.len;
1120         } else {
1121                 SND_FLAGS_INIT(&snd_flags);
1122                 /* buffer is constructed from the received CANCEL with lumps applied */
1123                 /*  t_cancel...request.dst is already filled (see above) */
1124                 if (unlikely((ret=prepare_new_uac( t_cancel, cancel_msg, branch,
1125                                                         &t_invite->uac[branch].uri,
1126                                                         &t_invite->uac[branch].path,
1127                                                         0, 0, snd_flags, PROTO_NONE, 0,
1128                                                         NULL, NULL, NULL)) <0)){
1129                         ser_error=ret;
1130                         goto error;
1131                 }
1132         }
1133         /* success */
1134         ret=1;
1135
1136 error:
1137         return ret;
1138 }
1139
1140
1141
1142 #ifdef CANCEL_REASON_SUPPORT
1143 /** create a cancel reason structure packed into a single shm. block.
1144  * From a cause and a pointer to a str or cancel_msg, build a
1145  * packed cancel reason structure (CANCEL_REAS_PACKED_HDRS), using a
1146  * single memory allocation (so that it can be freed by a simple shm_free().
1147  * @param cause - cancel cause, @see cancel_reason for more details.
1148  * @param data - depends on the cancel cause.
1149  * @return pointer to shm. packed cancel reason struct. on success,
1150  *        0 on error
1151  */
1152 static struct cancel_reason* cancel_reason_pack(short cause, void* data,
1153                 struct cell* t)
1154 {
1155         char* d;
1156         struct cancel_reason* cr;
1157         int reason_len;
1158         int code_len;
1159         struct hdr_field *reas1, *reas_last, *hdr;
1160         str* txt;
1161         struct sip_msg* e2e_cancel;
1162
1163         if (likely(cause != CANCEL_REAS_UNKNOWN)){
1164                 reason_len = 0;
1165                 txt = 0;
1166                 e2e_cancel = 0;
1167                 reas1 = 0;
1168                 reas_last = 0;
1169                 if (likely(cause == CANCEL_REAS_RCVD_CANCEL &&
1170                                         data && !(t->flags & T_NO_E2E_CANCEL_REASON))) {
1171                         /* parse the entire cancel, to get all the Reason headers */
1172                         e2e_cancel = data;
1173                         if(parse_headers(e2e_cancel, HDR_EOH_F, 0)==-1) {
1174                                 LM_ERR("failed to parse headers\n");
1175                                 goto error;
1176                         }
1177                         for(hdr=get_hdr(e2e_cancel, HDR_REASON_T), reas1=hdr;
1178                                         hdr; hdr=next_sibling_hdr(hdr)) {
1179                                 /* hdr->len includes CRLF */
1180                                 reason_len += hdr->len;
1181                                 reas_last=hdr;
1182                         }
1183                 } else if (likely(cause > 0 &&
1184                                         cfg_get(tm, tm_cfg, local_cancel_reason))){
1185                         txt = (str*) data;
1186                         /* Reason: SIP;cause=<reason->cause>[;text=<reason->u.text.s>] */
1187                         reason_len = REASON_PREFIX_LEN + USHORT2SBUF_MAX_LEN +
1188                                 ((txt && txt->s)?REASON_TEXT_LEN + 1 + txt->len + 1 : 0)
1189                                         + CRLF_LEN;
1190                 } else if (cause == CANCEL_REAS_PACKED_HDRS &&
1191                                 !(t->flags & T_NO_E2E_CANCEL_REASON) && data) {
1192                         txt = (str*) data;
1193                         reason_len = txt?txt->len:0;
1194                 } else if (unlikely(cause < CANCEL_REAS_MIN)) {
1195                         LM_CRIT("unhandled reason cause %d\n", cause);
1196                         goto error;
1197                 }
1198
1199                 if (unlikely(reason_len == 0))
1200                         return 0; /* nothing to do, no reason */
1201                 cr = shm_malloc(sizeof(struct cancel_reason) + reason_len);
1202                 if (unlikely(cr == 0))
1203                         goto error;
1204                 d = (char*)cr +sizeof(*cr);
1205                 cr->cause = CANCEL_REAS_PACKED_HDRS;
1206                 cr->u.packed_hdrs.s = d;
1207                 cr->u.packed_hdrs.len = reason_len;
1208
1209                 if (cause == CANCEL_REAS_RCVD_CANCEL) {
1210                         for(hdr=reas1; hdr; hdr=next_sibling_hdr(hdr)) {
1211                                 /* hdr->len includes CRLF */
1212                                 append_str(d, hdr->name.s, hdr->len);
1213                                 if (likely(hdr==reas_last))
1214                                         break;
1215                         }
1216                 } else if (likely(cause > 0)) {
1217                         append_str(d, REASON_PREFIX, REASON_PREFIX_LEN);
1218                         code_len=ushort2sbuf(cause, d, reason_len
1219                                         - (int)(d - (char*)cr - sizeof(*cr)));
1220                         if (unlikely(code_len==0)) {
1221                                 shm_free(cr);
1222                                 cr = 0;
1223                                 LM_CRIT("not enough space to write reason code");
1224                                 goto error;
1225                         }
1226                         d+=code_len;
1227                         if (txt && txt->s){
1228                                 append_str(d, REASON_TEXT, REASON_TEXT_LEN);
1229                                 *d='"'; d++;
1230                                 append_str(d, txt->s, txt->len);
1231                                 *d='"'; d++;
1232                         }
1233                         append_str(d, CRLF, CRLF_LEN);
1234                 } else if (cause == CANCEL_REAS_PACKED_HDRS) {
1235                         append_str(d, txt->s, txt->len);
1236                 }
1237                 return cr;
1238         }
1239 error:
1240         return 0;
1241 }
1242 #endif /* CANCEL_REASON_SUPPORT */
1243
1244
1245
1246 void e2e_cancel( struct sip_msg *cancel_msg,
1247                 struct cell *t_cancel, struct cell *t_invite )
1248 {
1249         branch_bm_t cancel_bm;
1250 #ifndef E2E_CANCEL_HOP_BY_HOP
1251         branch_bm_t tmp_bm;
1252 #elif defined (CANCEL_REASON_SUPPORT)
1253         struct cancel_reason* reason;
1254         int free_reason;
1255 #endif /* E2E_CANCEL_HOP_BY_HOP */
1256         int i;
1257         int lowest_error;
1258         int ret;
1259         struct tmcb_params tmcb;
1260
1261         cancel_bm=0;
1262         lowest_error=0;
1263
1264         if (unlikely(has_tran_tmcbs(t_invite, TMCB_E2ECANCEL_IN))){
1265                 INIT_TMCB_PARAMS(tmcb, cancel_msg, 0, cancel_msg->REQ_METHOD);
1266                 run_trans_callbacks_internal(&t_invite->tmcb_hl, TMCB_E2ECANCEL_IN,
1267                                 t_invite, &tmcb);
1268         }
1269         /* mark transaction as canceled, so that no new message are forwarded
1270          * on it and t_is_canceled() returns true
1271          * WARNING: it's safe to do it without locks, at least for now (in a race
1272          * event even if a flag is unwillingly reset nothing bad will happen),
1273          * however this should be rechecked for any future new flags use.
1274          */
1275         t_invite->flags|=T_CANCELED;
1276         /* first check if there are any branches */
1277         if (t_invite->nr_of_outgoings==0){
1278                 /* no branches yet => force a reply to the invite */
1279                 t_reply( t_invite, t_invite->uas.request, 487, CANCELED );
1280                 LM_DBG("e2e cancel -- no more pending branches\n");
1281                 t_reply( t_cancel, cancel_msg, 200, CANCEL_DONE );
1282                 return;
1283         }
1284
1285         /* determine which branches to cancel ... */
1286         prepare_to_cancel(t_invite, &cancel_bm, 0);
1287
1288         /* no branches to cancel (e.g., a suspended transaction with blind uac) */
1289         if (cancel_bm==0){
1290                 /* no outgoing branches yet => force a reply to the invite */
1291                 t_reply( t_invite, t_invite->uas.request, 487, CANCELED );
1292                 LM_DBG("e2e cancel -- no active branches\n");
1293                 t_reply( t_cancel, cancel_msg, 200, CANCEL_DONE );
1294                 return;
1295         }
1296
1297 #ifdef E2E_CANCEL_HOP_BY_HOP
1298         /* we don't need to set t_cancel label to be the same as t_invite if
1299          * we do hop by hop cancel. The cancel transaction will have a different
1300          * label, but this is not a problem since this transaction is only used to
1301          * send a reply back. The cancels sent upstream will be part of the invite
1302          * transaction (local_cancel retr. bufs) and they will be generated with
1303          * the same via as the invite.
1304          * Note however that setting t_cancel label the same as t_invite will work
1305          * too (the upstream cancel replies will properly match the t_invite
1306          * transaction and will not match the t_cancel because t_cancel will always
1307          * have 0 branches and we check for the branch number in
1308          * t_reply_matching() ).
1309          */
1310 #ifdef CANCEL_REASON_SUPPORT
1311         free_reason = 0;
1312         reason = 0;
1313         if (likely(t_invite->uas.cancel_reas == 0)){
1314                 reason = cancel_reason_pack(CANCEL_REAS_RCVD_CANCEL, cancel_msg,
1315                                 t_invite);
1316                 /* set if not already set */
1317                 if (unlikely(reason &&
1318                                         atomic_cmpxchg_long((void*)&t_invite->uas.cancel_reas,
1319                                                 0, (long)reason) != 0)) {
1320                         /* already set, failed to re-set it */
1321                         free_reason = 1;
1322                 }
1323         }
1324 #endif /* CANCEL_REASON_SUPPORT */
1325         for (i=0; i<t_invite->nr_of_outgoings; i++) {
1326                 if (cancel_bm & (1<<i)) {
1327                         /* it's safe to get the reply lock since e2e_cancel is
1328                          * called with the cancel as the "current" transaction so
1329                          * at most t_cancel REPLY_LOCK is held in this process =>
1330                          * no deadlock possibility */
1331                         ret=cancel_branch(
1332                                         t_invite,
1333                                         i,
1334 #ifdef CANCEL_REASON_SUPPORT
1335                                         reason,
1336 #endif /* CANCEL_REASON_SUPPORT */
1337                                         cfg_get(tm,tm_cfg, cancel_b_flags)
1338                                         | ((t_invite->uac[i].request.buffer==NULL)?
1339                                                 F_CANCEL_B_FAKE_REPLY:0) /* blind UAC? */
1340                                         );
1341                         if (ret<0) cancel_bm &= ~(1<<i);
1342                         if (ret<lowest_error) lowest_error=ret;
1343                 }
1344         }
1345 #ifdef CANCEL_REASON_SUPPORT
1346         if (unlikely(free_reason)) {
1347                 /* reason was not set as the global reason => free it */
1348                 shm_free(reason);
1349         }
1350 #endif /* CANCEL_REASON_SUPPORT */
1351 #else /* ! E2E_CANCEL_HOP_BY_HOP */
1352         /* fix label -- it must be same for reply matching (the label is part of
1353          * the generated via branch for the cancels sent upstream and if it
1354          * would be different form the one in the INVITE the transactions would not
1355          * match */
1356         t_cancel->label=t_invite->label;
1357         t_cancel->nr_of_outgoings=t_invite->nr_of_outgoings;
1358         /* ... and install CANCEL UACs */
1359         for (i=0; i<t_invite->nr_of_outgoings; i++)
1360                 if ((cancel_bm & (1<<i)) && (t_invite->uac[i].last_received>=100)) {
1361                         ret=e2e_cancel_branch(cancel_msg, t_cancel, t_invite, i);
1362                         if (ret<0) cancel_bm &= ~(1<<i);
1363                         if (ret<lowest_error) lowest_error=ret;
1364                 }
1365
1366         /* send them out */
1367         for (i = 0; i < t_cancel->nr_of_outgoings; i++) {
1368                 if (cancel_bm & (1 << i)) {
1369                         if (t_invite->uac[i].last_received>=100){
1370                                 /* Provisional reply received on this branch, send CANCEL */
1371                                 /* we do need to stop the retr. timers if the request is not
1372                                  * an invite and since the stop_rb_retr() cost is lower then
1373                                  * the invite check we do it always --andrei */
1374                                 stop_rb_retr(&t_invite->uac[i].request);
1375                                 if (SEND_BUFFER(&t_cancel->uac[i].request) == -1) {
1376                                         LM_ERR("e2e cancel - send failed\n");
1377                                 }
1378                                 else{
1379                                         if (unlikely(has_tran_tmcbs(t_cancel, TMCB_REQUEST_SENT)))
1380                                                 run_trans_callbacks_with_buf(TMCB_REQUEST_SENT,
1381                                                                 &t_cancel->uac[i].request,
1382                                                                 cancel_msg, 0, TMCB_LOCAL_F);
1383                                 }
1384                                 if (start_retr( &t_cancel->uac[i].request )!=0)
1385                                         LM_CRIT("BUG: failed to start retr."
1386                                                         " for %p\n", &t_cancel->uac[i].request);
1387                         } else {
1388                                 /* No provisional response received, stop
1389                                  * retransmission timers */
1390                                 if (!(cfg_get(tm, tm_cfg, cancel_b_flags)
1391                                                         & F_CANCEL_B_FORCE_RETR))
1392                                         stop_rb_retr(&t_invite->uac[i].request);
1393                                 /* no need to stop fr, it will be stopped by relay_reply
1394                                  * put_on_wait -- andrei */
1395                                 /* Generate faked reply */
1396                                 if (cfg_get(tm, tm_cfg, cancel_b_flags) &
1397                                                 F_CANCEL_B_FAKE_REPLY){
1398                                         LOCK_REPLIES(t_invite);
1399                                         if (relay_reply(t_invite, FAKED_REPLY, i,
1400                                                                 487, &tmp_bm, 1) == RPS_ERROR) {
1401                                                 lowest_error = -1;
1402                                         }
1403                                 }
1404                         }
1405                 }
1406         }
1407 #endif /*E2E_CANCEL_HOP_BY_HOP */
1408
1409         /* if error occurred, let it know upstream (final reply
1410          * will also move the transaction on wait state
1411          */
1412         if (lowest_error<0) {
1413                 LM_ERR("cancel error\n");
1414                 /* if called from failure_route, make sure that the unsafe version
1415                  * is called (we are already holding the reply mutex for the cancel
1416                  * transaction).
1417                  */
1418                 if ((is_route_type(FAILURE_ROUTE)) && (t_cancel==get_t()))
1419                         t_reply_unsafe( t_cancel, cancel_msg, 500, "cancel error");
1420                 else
1421                         t_reply( t_cancel, cancel_msg, 500, "cancel error");
1422         } else if (cancel_bm) {
1423                 /* if there are pending branches, let upstream know we
1424                  * are working on it
1425                  */
1426                 LM_DBG("e2e cancel proceeding\n");
1427                 /* if called from failure_route, make sure that the unsafe version
1428                  * is called (we are already hold the reply mutex for the cancel
1429                  * transaction).
1430                  */
1431                 if ((is_route_type(FAILURE_ROUTE)) && (t_cancel==get_t()))
1432                         t_reply_unsafe( t_cancel, cancel_msg, 200, CANCELING );
1433                 else
1434                         t_reply( t_cancel, cancel_msg, 200, CANCELING );
1435         } else {
1436                 /* if the transaction exists, but there are no more pending
1437                  * branches, tell upstream we're done
1438                  */
1439                 LM_DBG("e2e cancel -- no more pending branches\n");
1440                 /* if called from failure_route, make sure that the unsafe version
1441                  * is called (we are already hold the reply mutex for the cancel
1442                  * transaction).
1443                  */
1444                 if ((is_route_type(FAILURE_ROUTE)) && (t_cancel==get_t()))
1445                         t_reply_unsafe( t_cancel, cancel_msg, 200, CANCEL_DONE );
1446                 else
1447                         t_reply( t_cancel, cancel_msg, 200, CANCEL_DONE );
1448         }
1449 }
1450
1451
1452
1453 /* sends one uac/branch buffer and fallbacks to other ips if
1454  *  the destination resolves to several addresses
1455  *  Takes care of starting timers a.s.o. (on send success)
1456  *  returns: -2 on error, -1 on drop,  current branch id on success,
1457  *   new branch id on send error/blacklist, when failover is possible
1458  *    (ret>=0 && ret!=branch)
1459  *    if lock_replies is 1, the replies for t will be locked when adding
1460  *     new branches (to prevent races). Use 0 from failure routes or other
1461  *     places where the reply lock is already held, to avoid deadlocks. */
1462 int t_send_branch( struct cell *t, int branch, struct sip_msg* p_msg ,
1463                 struct proxy_l * proxy, int lock_replies)
1464 {
1465         struct ip_addr ip; /* debugging */
1466         int ret;
1467         struct ua_client* uac;
1468
1469         uac=&t->uac[branch];
1470         ret=branch;
1471         if (run_onsend(p_msg,   &uac->request.dst, uac->request.buffer,
1472                                 uac->request.buffer_len)==0){
1473                 /* disable the current branch: set a "fake" timeout
1474                  *  reply code but don't set uac->reply, to avoid overriding
1475                  *  a higly unlikely, perfectly timed fake reply (to a message
1476                  *   we never sent).
1477                  * (code=final reply && reply==0 => t_pick_branch won't ever pick it)*/
1478                 uac->last_received=408;
1479                 su2ip_addr(&ip, &uac->request.dst.to);
1480                 LM_DBG("onsend_route dropped msg. to %s:%d (%d)\n",
1481                                 ip_addr2a(&ip), su_getport(&uac->request.dst.to),
1482                                 uac->request.dst.proto);
1483 #ifdef USE_DNS_FAILOVER
1484                 /* if the destination resolves to more ips, add another
1485                  *  branch/uac */
1486                 if (cfg_get(core, core_cfg, use_dns_failover)){
1487                         ret=add_uac_dns_fallback(t, p_msg, uac, lock_replies);
1488                         if (ret>=0){
1489                                 su2ip_addr(&ip, &uac->request.dst.to);
1490                                 LM_DBG("send on branch %d failed "
1491                                                 "(onsend_route), trying another ip %s:%d (%d)\n",
1492                                                 branch, ip_addr2a(&ip),
1493                                                 su_getport(&uac->request.dst.to),
1494                                                 uac->request.dst.proto);
1495                                 /* success, return new branch */
1496                                 return ret;
1497                         }
1498                 }
1499 #endif /* USE_DNS_FAILOVER*/
1500                 return -1; /* drop, try next branch */
1501         }
1502 #ifdef USE_DST_BLACKLIST
1503         if (cfg_get(core, core_cfg, use_dst_blacklist)
1504                         && p_msg
1505                         && (p_msg->REQ_METHOD
1506                                 & cfg_get(tm, tm_cfg, tm_blst_methods_lookup))
1507                 ){
1508                 if (dst_is_blacklisted(&uac->request.dst, p_msg)){
1509                         su2ip_addr(&ip, &uac->request.dst.to);
1510                         LM_DBG("blacklisted destination: %s:%d (%d)\n",
1511                                         ip_addr2a(&ip), su_getport(&uac->request.dst.to),
1512                                         uac->request.dst.proto);
1513                         /* disable the current branch: set a "fake" timeout
1514                          *  reply code but don't set uac->reply, to avoid overriding
1515                          *  a higly unlikely, perfectly timed fake reply (to a message
1516                          *   we never sent).  (code=final reply && reply==0 =>
1517                          *   t_pick_branch won't ever pick it)*/
1518                         uac->last_received=408;
1519 #ifdef USE_DNS_FAILOVER
1520                         /* if the destination resolves to more ips, add another
1521                          *  branch/uac */
1522                         if (cfg_get(core, core_cfg, use_dns_failover)){
1523                                 ret=add_uac_dns_fallback(t, p_msg, uac, lock_replies);
1524                                 if (ret>=0){
1525                                         su2ip_addr(&ip, &uac->request.dst.to);
1526                                         LM_DBG("send on branch %d failed (blacklist),"
1527                                                         " trying another ip %s:%d (%d)\n", branch,
1528                                                         ip_addr2a(&ip), su_getport(&uac->request.dst.to),
1529                                                         uac->request.dst.proto);
1530                                         /* success, return new branch */
1531                                         return ret;
1532                                 }
1533                         }
1534 #endif /* USE_DNS_FAILOVER*/
1535                         return -1; /* don't send */
1536                 }
1537         }
1538 #endif /* USE_DST_BLACKLIST */
1539         if (SEND_BUFFER( &uac->request)==-1) {
1540                 /* disable the current branch: set a "fake" timeout
1541                  *  reply code but don't set uac->reply, to avoid overriding
1542                  *  a highly unlikely, perfectly timed fake reply (to a message
1543                  *  we never sent).
1544                  * (code=final reply && reply==0 => t_pick_branch won't ever pick it)*/
1545                 uac->last_received=408;
1546                 su2ip_addr(&ip, &uac->request.dst.to);
1547                 LM_DBG("send to %s:%d (%d) failed\n",
1548                                 ip_addr2a(&ip), su_getport(&uac->request.dst.to),
1549                                 uac->request.dst.proto);
1550 #ifdef USE_DST_BLACKLIST
1551                 dst_blacklist_add(BLST_ERR_SEND, &uac->request.dst, p_msg);
1552 #endif
1553 #ifdef USE_DNS_FAILOVER
1554                 /* if the destination resolves to more ips, add another
1555                  *  branch/uac */
1556                 if (cfg_get(core, core_cfg, use_dns_failover)){
1557                         ret=add_uac_dns_fallback(t, p_msg, uac, lock_replies);
1558                         if (ret>=0){
1559                                 /* success, return new branch */
1560                                 LM_DBG("send on branch %d failed, adding another"
1561                                                 " branch with another ip\n", branch);
1562                                 return ret;
1563                         }
1564                 }
1565 #endif
1566                 uac->icode = 908; /* internal code set to delivery failure */
1567                 LM_WARN("sending request on branch %d failed\n", branch);
1568                 if (proxy) { proxy->errors++; proxy->ok=0; }
1569                 if(tm_failure_exec_mode==1) {
1570                         LM_DBG("putting branch %d on hold \n", branch);
1571                         /* put on retransmission timer,
1572                          * but set proto to NONE, so actually it is not trying to resend */
1573                         uac->request.dst.proto = PROTO_NONE;
1574                         /* reset last_received, 408 reply is faked by timer */
1575                         uac->last_received=0;
1576                         /* add to retransmission timer */
1577                         if (start_retr( &uac->request )!=0){
1578                                 LM_CRIT("BUG: retransmission already started for %p\n",
1579                                                 &uac->request);
1580                                 return -2;
1581                         }
1582                         return branch;
1583                 }
1584                 return -2;
1585         } else {
1586                 if (unlikely(has_tran_tmcbs(t, TMCB_REQUEST_SENT)))
1587                         run_trans_callbacks_with_buf(TMCB_REQUEST_SENT, &uac->request,
1588                                         p_msg, 0,0);
1589                 /* start retr. only if the send succeeded */
1590                 if (start_retr( &uac->request )!=0){
1591                         LM_CRIT("BUG: retransmission already started for: %p\n",
1592                                         &uac->request);
1593                         return -2;
1594                 }
1595         }
1596         return ret;
1597 }
1598
1599
1600
1601 /* function returns:
1602  *       1 - forward successful
1603  *      -1 - error during forward
1604  */
1605 int t_forward_nonack( struct cell *t, struct sip_msg* p_msg,
1606                 struct proxy_l * proxy, int proto)
1607 {
1608         int branch_ret, lowest_ret;
1609         str current_uri;
1610         branch_bm_t     added_branches;
1611         int first_branch;
1612         int i, q;
1613         struct cell *t_invite;
1614         int success_branch;
1615         int try_new;
1616         int lock_replies;
1617         str dst_uri, path, instance, ruid, location_ua;
1618         struct socket_info* si;
1619         flag_t backup_bflags = 0;
1620         flag_t bflags = 0;
1621
1622
1623         /* make -Wall happy */
1624         current_uri.s=0;
1625
1626         getbflagsval(0, &backup_bflags);
1627
1628         if (t->flags & T_CANCELED) goto canceled;
1629
1630         if (p_msg->REQ_METHOD==METHOD_CANCEL) {
1631                 t_invite=t_lookupOriginalT(  p_msg );
1632                 if (t_invite!=T_NULL_CELL) {
1633                         e2e_cancel( p_msg, t, t_invite );
1634                         UNREF(t_invite);
1635                         /* it should be set to REQ_RPLD by e2e_cancel, which should
1636                          * send a final reply */
1637                         set_kr(REQ_FWDED);
1638                         return 1;
1639                 }
1640         }
1641
1642         /* if no more specific error code is known, use this */
1643         lowest_ret=E_UNSPEC;
1644         /* branches added */
1645         added_branches=0;
1646         /* branch to begin with */
1647         first_branch=t->nr_of_outgoings;
1648
1649         if (t->on_branch) {
1650                 /* tell add_uac that it should run branch route actions */
1651                 branch_route = t->on_branch;
1652                 /* save the branch route so that it
1653                  * can be used for adding branches later
1654                  */
1655                 t->on_branch_delayed = t->on_branch;
1656                 /* reset the flag before running the actions (so that it
1657                  * could be set again in branch_route if needed
1658                  */
1659                 t_on_branch(0);
1660         } else {
1661                 branch_route = 0;
1662         }
1663
1664         /* on first-time forwarding, update the lumps */
1665         if (first_branch==0) {
1666                 /* update the shmem-ized msg with the lumps */
1667                 if ((is_route_type(REQUEST_ROUTE)) &&
1668                                 save_msg_lumps(t->uas.request, p_msg)) {
1669                         LM_ERR("failed to save the message lumps\n");
1670                         return -1;
1671                 }
1672         }
1673
1674         /* if ruri is not already consumed (by another invocation), use current
1675          * uri too. Else add only additional branches (which may be continuously
1676          * refilled).
1677          */
1678         if (ruri_get_forking_state()) {
1679                 try_new=1;
1680                 branch_ret=add_uac( t, p_msg, GET_RURI(p_msg), GET_NEXT_HOP(p_msg),
1681                                 &p_msg->path_vec, proxy, p_msg->force_send_socket,
1682                                 p_msg->fwd_send_flags, proto,
1683                                 (p_msg->dst_uri.len)?0:UAC_SKIP_BR_DST_F, &p_msg->instance,
1684                                 &p_msg->ruid, &p_msg->location_ua);
1685                 /* test if cancel was received meanwhile */
1686                 if (t->flags & T_CANCELED) goto canceled;
1687                 if (branch_ret>=0)
1688                         added_branches |= 1<<branch_ret;
1689                 else
1690                         lowest_ret=MIN_int(lowest_ret, branch_ret);
1691         } else try_new=0;
1692
1693         init_branch_iterator();
1694         while((current_uri.s=next_branch( &current_uri.len, &q, &dst_uri, &path,
1695                                         &bflags, &si, &ruid, &instance, &location_ua))) {
1696                 try_new++;
1697                 setbflagsval(0, bflags);
1698
1699                 branch_ret=add_uac( t, p_msg, &current_uri,
1700                                 (dst_uri.len) ? (&dst_uri) : &current_uri,
1701                                 &path, proxy, si, p_msg->fwd_send_flags,
1702                                 proto, (dst_uri.len)?0:UAC_SKIP_BR_DST_F, &instance,
1703                                 &ruid, &location_ua);
1704                 /* test if cancel was received meanwhile */
1705                 if (t->flags & T_CANCELED) goto canceled;
1706                 /* pick some of the errors in case things go wrong;
1707                  * note that picking lowest error is just as good as
1708                  * any other algorithm which picks any other negative
1709                  * branch result */
1710                 if (branch_ret>=0)
1711                         added_branches |= 1<<branch_ret;
1712                 else
1713                         lowest_ret=MIN_int(lowest_ret, branch_ret);
1714         }
1715         /* consume processed branches */
1716         clear_branches();
1717
1718         setbflagsval(0, backup_bflags);
1719
1720         /* update message flags, if changed in branch route */
1721         t->uas.request->flags = p_msg->flags;
1722
1723         /* don't forget to clear all branches processed so far */
1724
1725         /* things went wrong ... no new branch has been fwd-ed at all */
1726         if (added_branches==0) {
1727                 if (try_new==0) {
1728                         LM_ERR("no branches for forwarding\n");
1729                         /* either failed to add branches, or there were no more branches
1730                         */
1731                         ser_error=MIN_int(lowest_ret, E_CFG);
1732                         return -1;
1733                 }
1734                 if(lowest_ret!=E_CFG)
1735                         LM_ERR("failure to add branches\n");
1736                 ser_error=lowest_ret;
1737                 return lowest_ret;
1738         }
1739
1740         /* mark the fist branch in this fwd step */
1741         t->uac[first_branch].flags |= TM_UAC_FLAG_FB;
1742
1743         ser_error=0; /* clear branch adding errors */
1744         /* send them out now */
1745         success_branch=0;
1746         lock_replies= ! ((is_route_type(FAILURE_ROUTE)) && (t==get_t()));
1747         for (i=first_branch; i<t->nr_of_outgoings; i++) {
1748                 if (added_branches & (1<<i)) {
1749
1750                         branch_ret=t_send_branch(t, i, p_msg , proxy, lock_replies);
1751                         if (branch_ret>=0){ /* some kind of success */
1752                                 if (branch_ret==i) { /* success */
1753                                         success_branch++;
1754                                         if (unlikely(has_tran_tmcbs(t, TMCB_REQUEST_OUT)))
1755                                                 run_trans_callbacks_with_buf( TMCB_REQUEST_OUT,
1756                                                                 &t->uac[i].request,
1757                                                                 p_msg, 0, -p_msg->REQ_METHOD);
1758                                 }
1759                                 else /* new branch added */
1760                                         added_branches |= 1<<branch_ret;
1761                         }
1762                 }
1763         }
1764         if (success_branch<=0) {
1765                 /* return always E_SEND for now
1766                  * (the real reason could be: denied by onsend routes, blacklisted,
1767                  *  send failed or any of the errors listed before + dns failed
1768                  *  when attempting dns failover) */
1769                 ser_error=E_SEND;
1770                 /* else return the last error (?) */
1771                 /* the caller should take care and delete the transaction */
1772                 return -1;
1773         }
1774         ser_error=0; /* clear branch send errors, we have overall success */
1775         set_kr(REQ_FWDED);
1776         return 1;
1777
1778 canceled:
1779         LM_DBG("no forwarding on a canceled transaction\n");
1780         /* reset processed branches */
1781         clear_branches();
1782         /* restore backup flags from initial env */
1783         setbflagsval(0, backup_bflags);
1784         /* update message flags, if changed in branch route */
1785         t->uas.request->flags = p_msg->flags;
1786         ser_error=E_CANCELED;
1787         return -1;
1788 }
1789
1790
1791
1792 /* cancel handling/forwarding function
1793  * CANCELs with no matching transaction are handled in function of
1794  * the unmatched_cancel config var: they are either forwarded statefully,
1795  * statelessly or dropped.
1796  * function returns:
1797  *       1 - forward successful
1798  *       0 - error, but do not reply
1799  *      <0 - error during forward
1800  * it also sets *tran if a transaction was created
1801  */
1802 int t_forward_cancel(struct sip_msg* p_msg , struct proxy_l * proxy, int proto,
1803                 struct cell** tran)
1804 {
1805         struct cell* t_invite;
1806         struct cell* t;
1807         int ret;
1808         int new_tran;
1809         struct dest_info dst;
1810         str host;
1811         unsigned short port;
1812         short comp;
1813
1814         t=0;
1815         /* handle cancels for which no transaction was created yet */
1816         if (cfg_get(tm, tm_cfg, unmatched_cancel)==UM_CANCEL_STATEFULL){
1817                 /* create cancel transaction */
1818                 new_tran=t_newtran(p_msg);
1819                 if (new_tran<=0 && new_tran!=E_SCRIPT){
1820                         if (new_tran==0)
1821                                 /* retransmission => do nothing */
1822                                 ret=1;
1823                         else
1824                                 /* some error => return it or DROP */
1825                                 ret=(ser_error==E_BAD_VIA && reply_to_via) ? 0: new_tran;
1826                         goto end;
1827                 }
1828                 t=get_t();
1829                 ret=t_forward_nonack(t, p_msg, proxy, proto);
1830                 goto end;
1831         }
1832
1833         t_invite=t_lookupOriginalT(  p_msg );
1834         if (t_invite!=T_NULL_CELL) {
1835                 /* create cancel transaction */
1836                 new_tran=t_newtran(p_msg);
1837                 if (new_tran<=0 && new_tran!=E_SCRIPT){
1838                         if (new_tran==0)
1839                                 /* retransmission => do nothing */
1840                                 ret=1;
1841                         else
1842                                 /* some error => return it or DROP */
1843                                 ret=(ser_error==E_BAD_VIA && reply_to_via) ? 0: new_tran;
1844                         UNREF(t_invite);
1845                         goto end;
1846                 }
1847                 t=get_t();
1848                 e2e_cancel( p_msg, t, t_invite );
1849                 UNREF(t_invite);
1850                 ret=1;
1851                 goto end;
1852         }else /* no coresponding INVITE transaction */
1853                 if (cfg_get(tm, tm_cfg, unmatched_cancel)==UM_CANCEL_DROP){
1854                         LM_DBG("non matching cancel dropped\n");
1855                         ret=1; /* do nothing -> drop */
1856                         goto end;
1857                 }else{
1858                         /* UM_CANCEL_STATELESS -> stateless forward */
1859                         LM_DBG("forwarding CANCEL statelessly \n");
1860                         if (proxy==0) {
1861                                 init_dest_info(&dst);
1862                                 dst.proto=proto;
1863                                 if (get_uri_send_info(GET_NEXT_HOP(p_msg), &host,
1864                                                         &port, &dst.proto, &comp)!=0){
1865                                         ret=E_BAD_ADDRESS;
1866                                         goto end;
1867                                 }
1868 #ifdef USE_COMP
1869                                 dst.comp=comp;
1870 #endif
1871                                 /* dst->send_sock not set, but forward_request
1872                                  * will take care of it */
1873                                 ret=forward_request(p_msg, &host, port, &dst);
1874                                 goto end;
1875                         } else {
1876                                 init_dest_info(&dst);
1877                                 dst.proto=get_proto(proto, proxy->proto);
1878                                 proxy2su(&dst.to, proxy);
1879                                 /* dst->send_sock not set, but forward_request
1880                                  * will take care of it */
1881                                 ret=forward_request( p_msg , 0, 0, &dst) ;
1882                                 goto end;
1883                         }
1884                 }
1885 end:
1886         if (tran)
1887                 *tran=t;
1888         return ret;
1889 }
1890
1891 /* Relays a CANCEL request if a corresponding INVITE transaction
1892  * can be found. The function is supposed to be used at the very
1893  * beginning of the script with reparse_invite=1 module parameter.
1894  *
1895  * return value:
1896  *    0: the CANCEL was successfully relayed
1897  *       (or error occurred but reply cannot be sent) => DROP
1898  *    1: no corresponding INVITE transaction exisis
1899  *   <0: corresponding INVITE transaction exisis but error occurred
1900  */
1901 int t_relay_cancel(struct sip_msg* p_msg)
1902 {
1903         struct cell* t_invite;
1904         struct cell* t;
1905         int ret;
1906         int new_tran;
1907
1908         t_invite=t_lookupOriginalT(  p_msg );
1909         if (t_invite!=T_NULL_CELL) {
1910                 /* create cancel transaction */
1911                 new_tran=t_newtran(p_msg);
1912                 if (new_tran<=0 && new_tran!=E_SCRIPT){
1913                         if (new_tran==0)
1914                                 /* retransmission => DROP, t_newtran() takes care about it */
1915                                 ret=0;
1916                         else
1917                                 /* some error => return it or DROP */
1918                                 ret=(ser_error==E_BAD_VIA && reply_to_via) ? 0: new_tran;
1919                         UNREF(t_invite);
1920                         goto end;
1921                 }
1922                 t=get_t();
1923                 e2e_cancel( p_msg, t, t_invite );
1924                 UNREF(t_invite);
1925                 /* return 0 to stop the script processing */
1926                 ret=0;
1927                 goto end;
1928
1929         } else {
1930                 /* no corresponding INVITE trasaction found */
1931                 ret=1;
1932         }
1933 end:
1934         return ret;
1935 }
1936
1937 /* WARNING: doesn't work from failure route (deadlock, uses t_relay_to which
1938  *  is failure route unsafe) */
1939 int t_replicate(struct sip_msg *p_msg,  struct proxy_l *proxy, int proto )
1940 {
1941         /* this is a quite tricky hack -- we just take the message
1942          * as is, including Route-s, Record-route-s, and Vias ,
1943          * forward it downstream and prevent replies received
1944          * from relaying by setting the replication/local_trans bit;
1945          *
1946          * nevertheless, it should be good enough for the primary
1947          * customer of this function, REGISTER replication
1948          * if we want later to make it thoroughly, we need to
1949          * introduce delete lumps for all the header fields above
1950          * */
1951         return t_relay_to(p_msg, proxy, proto, 1 /* replicate */);
1952 }
1953
1954 /* fixup function for reparse_on_dns_failover modparam */
1955 int reparse_on_dns_failover_fixup(void *handle, str *gname, str *name, void **val)
1956 {
1957 #ifdef USE_DNS_FAILOVER
1958         if ((int)(long)(*val) && mhomed) {
1959                 LM_WARN("reparse_on_dns_failover is enabled on"
1960                                 " a multihomed host -- check the readme of tm module!\n");
1961         }
1962 #endif
1963         return 0;
1964 }