2 * Copyright (C) 2001-2003 FhG Fokus
4 * This file is part of Kamailio, a free SIP server.
6 * Kamailio is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version
11 * Kamailio is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "../../core/dprint.h"
25 #include "../../core/config.h"
26 #include "../../core/parser/parser_f.h"
27 #include "../../core/ut.h"
28 #include "../../core/timer.h"
29 #include "../../core/hash_func.h"
30 #include "../../core/globals.h"
31 #include "../../core/cfg_core.h"
32 #include "../../core/mem/mem.h"
33 #include "../../core/dset.h"
34 #include "../../core/action.h"
35 #include "../../core/data_lump.h"
36 #include "../../core/onsend.h"
37 #include "../../core/compiler_opt.h"
38 #include "../../core/route.h"
39 #include "../../core/script_cb.h"
42 #include "t_msgbuilder.h"
49 #include "../../core/fix_lumps.h"
51 #ifdef USE_DNS_FAILOVER
52 #include "../../core/dns_cache.h"
53 #include "../../core/cfg_core.h" /* cfg_get(core, core_cfg, use_dns_failover) */
54 #include "../../core/msg_translator.h"
55 #include "lw_parser.h"
57 #ifdef USE_DST_BLACKLIST
58 #include "../../core/dst_blacklist.h"
60 #include "../../core/atomic_ops.h" /* membar_depends() */
61 #include "../../core/kemi.h"
64 extern int tm_failure_exec_mode;
65 extern int tm_dns_reuse_rcv_socket;
67 static int goto_on_branch = 0, branch_route = 0;
69 void t_on_branch( unsigned int go_to )
71 struct cell *t = get_t();
73 /* in REPLY_ROUTE and FAILURE_ROUTE T will be set to current transaction;
74 * in REQUEST_ROUTE T will be set only if the transaction was already
75 * created; if not -> use the static variable */
76 if (!t || t==T_UNDEFINED ) {
83 unsigned int get_on_branch(void)
85 return goto_on_branch;
88 void set_branch_route( unsigned int on_branch)
90 branch_route = on_branch;
96 /** prepares a new branch "buffer".
97 * Creates the buffer used in the branch rb, fills everything needed (
98 * the sending information: t->uac[branch].request.dst, branch buffer, uri
99 * path vector a.s.o.) and runs the on_branch route.
100 * t->uac[branch].request.dst will be filled if next_hop !=0 with the result
101 * of the DNS resolution (next_hop, fproto and fsocket).
102 * If next_hop is 0 all the dst members except the send_flags are read-only
103 * (send_flags it's updated) and are supposed to be pre-filled.
105 * @param t - transaction
106 * @param i_req - corresponding sip_msg, must be non-null, flags might be
107 * be modified (on_branch route)
108 * @param branch - branch no
110 * @param path - path vector (list of route like destination in text form,
111 * e.g.: "<sip:1.2.3.4;lr>, <sip:5.6.7.8;lr>")
112 * @param next_hop - uri of the next hop. If non 0 it will be used
113 * for DNS resolution and the branch request.dst structure will
114 * be filled. If 0 the branch must already have
115 * a pre-filled valid request.dst.
116 * @param fsocket - forced send socket for forwarding.
117 * @param send_flags - special flags for sending (see SND_F_* / snd_flags_t).
118 * @param fproto - forced proto for forwarding. Used only if next_hop!=0.
119 * @param flags - 0, UAC_DNS_FAILOVER_F or UAC_SKIP_BR_DST_F for now.
121 * @return 0 on success, < 0 (ser_errror E***) on failure.
123 static int prepare_new_uac( struct cell *t, struct sip_msg *i_req,
124 int branch, str *uri, str* path,
126 struct socket_info* fsocket,
127 snd_flags_t snd_flags,
128 int fproto, int flags,
129 str *instance, str *ruid,
133 struct lump* add_rm_backup, *body_lumps_backup;
134 struct sip_uri parsed_uri_bak;
137 int parsed_uri_ok_bak, free_new_uri;
140 int dst_uri_backed_up;
149 int backup_route_type;
151 snd_flags_t fwd_snd_flags_bak;
152 snd_flags_t rpl_snd_flags_bak;
153 struct socket_info *force_send_socket_bak;
154 struct dest_info *dst;
155 struct run_act_ctx ctx;
156 struct run_act_ctx *bctx;
161 msg_uri_bak.s=0; /* kill warnings */
180 dst=&t->uac[branch].request.dst;
182 /* ... we calculate branch ... */
183 if (!t_calc_branch(t, branch, i_req->add_to_branch_s,
184 &i_req->add_to_branch_len ))
186 LM_ERR("branch computation failed\n");
192 * TODO: clone lumps only if needed */
193 /* lumps can be set outside of the lock, make sure that we read
194 * the up-to-date values */
196 add_rm_backup = i_req->add_rm;
197 body_lumps_backup = i_req->body_lumps;
198 if (unlikely(i_req->add_rm)){
199 i_req->add_rm = dup_lump_list(i_req->add_rm);
200 if (unlikely(i_req->add_rm==0)){
205 if (unlikely(i_req->body_lumps)){
206 i_req->body_lumps = dup_lump_list(i_req->body_lumps);
207 if (unlikely(i_req->body_lumps==0)){
212 /* backup uri & path: we need to change them so that build_req...()
213 * will use uri & path and not the ones in the original msg (i_req)
214 * => we must back them up so that we can restore them to the original
215 * value after building the send buffer */
216 msg_uri_bak=i_req->new_uri;
217 parsed_uri_bak=i_req->parsed_uri;
218 parsed_uri_ok_bak=i_req->parsed_uri_ok;
219 path_bak=i_req->path_vec;
220 instance_bak=i_req->instance;
221 ruid_bak=i_req->ruid;
222 ua_bak=i_req->location_ua;
224 if (unlikely(branch_route || has_tran_tmcbs(t, TMCB_REQUEST_FWDED))){
225 /* dup uris, path a.s.o. if we have a branch route or callback */
226 /* ... set ruri ... */
227 /* if uri points to new_uri, it needs to be "fixed" so that we can
228 * change msg->new_uri */
229 if (uri==&i_req->new_uri)
231 i_req->parsed_uri_ok=0;
232 i_req->new_uri.s=pkg_malloc(uri->len);
233 if (unlikely(i_req->new_uri.s==0)){
238 memcpy(i_req->new_uri.s, uri->s, uri->len);
239 i_req->new_uri.len=uri->len;
241 /* update path_vec */
242 /* if path points to msg path_vec, it needs to be "fixed" so that we
243 * can change/update msg->path_vec */
244 if (path==&i_req->path_vec)
246 /* zero it first so that set_path_vector will work */
248 i_req->path_vec.len=0;
250 if (unlikely(set_path_vector(i_req, path)<0)){
256 /* update instance */
257 /* if instance points to msg instance, it needs to be "fixed" so that we
258 * can change/update msg->instance */
259 if (instance==&i_req->instance)
260 instance=&instance_bak;
261 /* zero it first so that set_instance will work */
263 i_req->instance.len=0;
264 if (unlikely(instance)){
265 if (unlikely(set_instance(i_req, instance)<0)){
273 /* if ruid points to msg ruid, it needs to be "fixed" so that we
274 * can change/update msg->ruid */
275 if (ruid==&i_req->ruid)
277 /* zero it first so that set_ruid will work */
281 if (unlikely(set_ruid(i_req, ruid)<0)){
288 /* update location_ua */
289 /* if location_ua points to msg location_ua, it needs to be "fixed"
290 * so that we can change/update msg->location_ua */
291 if (location_ua==&i_req->location_ua)
293 /* zero it first so that set_ua will work */
294 i_req->location_ua.s=0;
295 i_req->location_ua.len=0;
296 if (unlikely(location_ua)){
297 if (unlikely(set_ua(i_req, location_ua)<0)){
304 /* backup dst uri & zero it*/
305 dst_uri_bak=i_req->dst_uri;
307 /* if next_hop points to dst_uri, it needs to be "fixed" so that we
308 * can change msg->dst_uri */
309 if (next_hop==&i_req->dst_uri)
310 next_hop=&dst_uri_bak;
311 /* zero it first so that set_dst_uri will work */
313 i_req->dst_uri.len=0;
314 if (likely(next_hop)){
315 if(unlikely((flags & UAC_SKIP_BR_DST_F)==0)){
316 /* set dst uri to next_hop for the on_branch route */
317 if (unlikely(set_dst_uri(i_req, next_hop)<0)){
324 if (likely(branch_route)) {
325 /* run branch_route actions if provided */
326 backup_route_type = get_route_type();
327 set_route_type(BRANCH_ROUTE);
328 tm_ctx_set_branch_index(branch);
329 /* no need to backup/set avp lists: the on_branch route is run
330 * only in the main route context (e.g. t_relay() in the main
331 * route) or in the failure route context (e.g. append_branch &
332 * t_relay()) and in both cases the avp lists are properly set
333 * Note: the branch route is not run on delayed dns failover
334 * (for that to work one would have to set branch_route prior to
335 * calling add_uac(...) and then reset it afterwards).
337 if (exec_pre_script_cb(i_req, BRANCH_CB_TYPE)>0) {
338 /* backup ireq msg send flags and force_send_socket*/
339 fwd_snd_flags_bak=i_req->fwd_send_flags;;
340 rpl_snd_flags_bak=i_req->rpl_send_flags;
341 force_send_socket_bak=i_req->force_send_socket;
342 /* set the new values */
343 i_req->fwd_send_flags=snd_flags /* intial value */;
344 set_force_socket(i_req, fsocket);
345 keng = sr_kemi_eng_get();
346 if(unlikely(keng!=NULL)) {
347 bctx = sr_kemi_act_ctx_get();
348 init_run_actions_ctx(&ctx);
349 sr_kemi_act_ctx_set(&ctx);
350 if(keng->froute(i_req, BRANCH_ROUTE,
351 sr_kemi_cbname_lookup_idx(branch_route), NULL)<0) {
352 LM_ERR("error running branch route kemi callback\n");
354 sr_kemi_act_ctx_set(bctx);
356 if (run_top_route(branch_rt.rlist[branch_route],
358 LM_DBG("negative return code in run_top_route\n");
361 /* update dst send_flags and send socket*/
362 snd_flags=i_req->fwd_send_flags;
363 fsocket=i_req->force_send_socket;
364 /* restore ireq_msg force_send_socket & flags */
365 set_force_socket(i_req, force_send_socket_bak);
366 i_req->fwd_send_flags=fwd_snd_flags_bak;
367 i_req->rpl_send_flags=rpl_snd_flags_bak;
368 exec_post_script_cb(i_req, BRANCH_CB_TYPE);
369 /* if DROP was called in cfg, don't forward, jump to end */
370 if (unlikely(ctx.run_flags&DROP_R_F))
372 tm_ctx_set_branch_index(T_BR_UNDEFINED);
373 set_route_type(backup_route_type);
374 /* triggered by drop in CFG */
379 tm_ctx_set_branch_index(T_BR_UNDEFINED);
380 set_route_type(backup_route_type);
383 /* run the specific callbacks for this transaction */
384 if (unlikely(has_tran_tmcbs(t, TMCB_REQUEST_FWDED)))
385 run_trans_callbacks( TMCB_REQUEST_FWDED , t, i_req, 0,
388 if (likely( !(flags & UAC_DNS_FAILOVER_F) && i_req->dst_uri.s &&
389 i_req->dst_uri.len)){
390 /* no dns failover and non-empty dst_uri => use it as dst
391 * (on dns failover dns_h will be non-empty => next_hop will be
393 next_hop=&i_req->dst_uri;
395 /* no path vector initially, but now is set after branch route and
396 * callbacks execution */
397 if(i_req->path_vec.s!=0 && free_path==0)
400 /* no branch route and no TMCB_REQUEST_FWDED callback => set
401 * msg uri and path to the new values (if needed) */
402 if (unlikely((uri->s!=i_req->new_uri.s || uri->len!=i_req->new_uri.len)
403 && (i_req->new_uri.s!=0 ||
404 uri->s!=i_req->first_line.u.request.uri.s ||
405 uri->len!=i_req->first_line.u.request.uri.len) )){
406 /* uri is different from i_req uri => replace i_req uri and force
409 i_req->parsed_uri_ok=0;
411 if (unlikely(path && (i_req->path_vec.s!=path->s ||
412 i_req->path_vec.len!=path->len))){
413 i_req->path_vec=*path;
414 }else if (unlikely(path==0 && i_req->path_vec.len!=0)){
416 i_req->path_vec.len=0;
418 if (unlikely(instance && (i_req->instance.s!=instance->s ||
419 i_req->instance.len!=instance->len))){
420 i_req->instance=*instance;
421 }else if (unlikely(instance==0 && i_req->instance.len!=0)){
423 i_req->instance.len=0;
425 if (unlikely(ruid && (i_req->ruid.s!=ruid->s ||
426 i_req->ruid.len!=ruid->len))){
428 }else if (unlikely(ruid==0 && i_req->ruid.len!=0)){
432 if (unlikely(location_ua && (i_req->location_ua.s!=location_ua->s ||
433 i_req->location_ua.len!=location_ua->len))){
434 i_req->location_ua=*location_ua;
435 }else if (unlikely(location_ua==0 && i_req->location_ua.len!=0)){
436 i_req->location_ua.s=0;
437 i_req->location_ua.len=0;
441 if (likely(next_hop!=0 || (flags & UAC_DNS_FAILOVER_F))){
442 /* next_hop present => use it for dns resolution */
443 #ifdef USE_DNS_FAILOVER
444 test_dst = (uri2dst2(&t->uac[branch].dns_h, dst, fsocket, snd_flags,
445 next_hop?next_hop:uri, fproto) == 0);
447 /* dst filled from the uri & request (send_socket) */
448 test_dst = (uri2dst2(dst, fsocket, snd_flags,
449 next_hop?next_hop:uri, fproto)==0);
456 /* else next_hop==0 =>
457 * no dst_uri / empty dst_uri and initial next_hop==0 =>
458 * dst is pre-filled with a valid dst => use the pre-filled dst */
460 /* Set on_reply and on_negative handlers for this branch to the handlers in the transaction */
461 t->uac[branch].on_reply = t->on_reply;
462 t->uac[branch].on_failure = t->on_failure;
463 t->uac[branch].on_branch_failure = t->on_branch_failure;
465 /* check if send_sock is ok */
466 if (t->uac[branch].request.dst.send_sock==0) {
467 LM_ERR("can't fwd to af %d, proto %d "
468 " (no corresponding listening socket)\n",
469 dst->to.s.sa_family, dst->proto );
473 /* ... and build it now */
474 shbuf=build_req_buf_from_sip_req( i_req, &len, dst, BUILD_IN_SHM);
476 LM_ERR("could not build request\n");
481 if (shbuf[len-1]==0) {
482 LM_ERR("sanity check failed\n");
486 /* things went well, move ahead and install new buffer! */
487 t->uac[branch].request.buffer=shbuf;
488 t->uac[branch].request.buffer_len=len;
489 t->uac[branch].uri.s=t->uac[branch].request.buffer+
490 i_req->first_line.u.request.method.len+1;
491 t->uac[branch].uri.len=GET_RURI(i_req)->len;
492 if (unlikely(i_req->path_vec.s && i_req->path_vec.len)){
493 t->uac[branch].path.s=shm_malloc(i_req->path_vec.len+1);
494 if (unlikely(t->uac[branch].path.s==0)) {
496 t->uac[branch].request.buffer=0;
497 t->uac[branch].request.buffer_len=0;
498 t->uac[branch].uri.s=0;
499 t->uac[branch].uri.len=0;
503 t->uac[branch].path.len=i_req->path_vec.len;
504 t->uac[branch].path.s[i_req->path_vec.len]=0;
505 memcpy( t->uac[branch].path.s, i_req->path_vec.s, i_req->path_vec.len);
507 if (unlikely(i_req->instance.s && i_req->instance.len)){
508 t->uac[branch].instance.s=shm_malloc(i_req->instance.len+1);
509 if (unlikely(t->uac[branch].instance.s==0)) {
511 t->uac[branch].request.buffer=0;
512 t->uac[branch].request.buffer_len=0;
513 t->uac[branch].uri.s=0;
514 t->uac[branch].uri.len=0;
518 t->uac[branch].instance.len=i_req->instance.len;
519 t->uac[branch].instance.s[i_req->instance.len]=0;
520 memcpy( t->uac[branch].instance.s, i_req->instance.s, i_req->instance.len);
522 if (unlikely(i_req->ruid.s && i_req->ruid.len)){
523 t->uac[branch].ruid.s=shm_malloc(i_req->ruid.len+1);
524 if (unlikely(t->uac[branch].ruid.s==0)) {
526 t->uac[branch].request.buffer=0;
527 t->uac[branch].request.buffer_len=0;
528 t->uac[branch].uri.s=0;
529 t->uac[branch].uri.len=0;
533 t->uac[branch].ruid.len=i_req->ruid.len;
534 t->uac[branch].ruid.s[i_req->ruid.len]=0;
535 memcpy( t->uac[branch].ruid.s, i_req->ruid.s, i_req->ruid.len);
537 if (unlikely(i_req->location_ua.s && i_req->location_ua.len)){
538 t->uac[branch].location_ua.s=shm_malloc(i_req->location_ua.len+1);
539 if (unlikely(t->uac[branch].location_ua.s==0)) {
541 t->uac[branch].request.buffer=0;
542 t->uac[branch].request.buffer_len=0;
543 t->uac[branch].uri.s=0;
544 t->uac[branch].uri.len=0;
548 t->uac[branch].location_ua.len=i_req->location_ua.len;
549 t->uac[branch].location_ua.s[i_req->location_ua.len]=0;
550 memcpy( t->uac[branch].location_ua.s, i_req->location_ua.s,
551 i_req->location_ua.len);
554 len = count_applied_lumps(i_req->add_rm, HDR_RECORDROUTE_T);
556 t->uac[branch].flags = TM_UAC_FLAG_RR;
558 t->uac[branch].flags = TM_UAC_FLAG_RR|TM_UAC_FLAG_R2;
564 /* restore the new_uri & path from the backup */
565 if (unlikely(free_new_uri && i_req->new_uri.s)){
566 pkg_free(i_req->new_uri.s);
568 if (unlikely(free_path)){
569 reset_path_vector(i_req);
571 if (unlikely(free_instance)){
572 reset_instance(i_req);
574 if (unlikely(free_ruid)){
577 if (unlikely(free_ua)){
580 if (dst_uri_backed_up){
581 reset_dst_uri(i_req); /* free dst_uri */
582 i_req->dst_uri=dst_uri_bak;
584 /* restore original new_uri and path values */
585 i_req->new_uri=msg_uri_bak;
586 i_req->parsed_uri=parsed_uri_bak;
587 i_req->parsed_uri_ok=parsed_uri_ok_bak;
588 i_req->path_vec=path_bak;
589 i_req->instance=instance_bak;
590 i_req->ruid=ruid_bak;
591 i_req->location_ua=ua_bak;
593 /* Delete the duplicated lump lists, this will also delete
594 * all lumps created here, such as lumps created in per-branch
595 * routing sections, Via, and Content-Length headers created in
596 * build_req_buf_from_sip_req
599 free_duped_lump_list(i_req->add_rm);
600 free_duped_lump_list(i_req->body_lumps);
601 /* Restore the lists from backups */
602 i_req->add_rm = add_rm_backup;
603 i_req->body_lumps = body_lumps_backup;
609 #ifdef USE_DNS_FAILOVER
610 /* Similar to print_uac_request(), but this function uses the outgoing message
611 * buffer of the failed branch to construct the new message in case of DNS
614 * WARNING: only the first VIA header is replaced in the buffer, the rest
615 * of the message is untouched, thus, the send socket is corrected only in the
618 static char *print_uac_request_from_buf( struct cell *t, struct sip_msg *i_req,
619 int branch, str *uri, unsigned int *len, struct dest_info* dst,
620 char *buf, short buf_len)
624 char *via, *old_via_begin, *old_via_end;
625 unsigned int via_len;
629 /* ... we calculate branch ... */
630 if (!t_calc_branch(t, branch, i_req->add_to_branch_s,
631 &i_req->add_to_branch_len ))
633 LM_ERR("branch computation failed\n");
636 branch_str.s = i_req->add_to_branch_s;
637 branch_str.len = i_req->add_to_branch_len;
639 /* find the beginning of the first via header in the buffer */
640 old_via_begin = lw_find_via(buf, buf+buf_len);
641 if (!old_via_begin) {
642 LM_ERR("beginning of via header not found\n");
645 /* find the end of the first via header in the buffer */
646 old_via_end = lw_next_line(old_via_begin, buf+buf_len);
648 LM_ERR("end of via header not found\n");
652 /* create the new VIA HF */
653 via = create_via_hf(&via_len, i_req, dst, &branch_str);
655 LM_ERR("via building failed\n");
659 /* allocate memory for the new buffer */
660 *len = buf_len + via_len - (old_via_end - old_via_begin);
661 shbuf=(char *)shm_malloc(*len);
663 ser_error=E_OUT_OF_MEM;
664 LM_ERR("no shmem\n");
668 /* construct the new buffer */
669 memcpy(shbuf, buf, old_via_begin-buf);
670 memcpy(shbuf+(old_via_begin-buf), via, via_len);
671 memcpy(shbuf+(old_via_begin-buf)+via_len, old_via_end, (buf+buf_len)-old_via_end);
674 if (shbuf[*len-1]==0) {
675 LM_ERR("sanity check failed\n");
687 /* introduce a new uac, which is blind -- it only creates the
688 * data structures and starts FR timer, but that's it; it does
689 * not print messages and send anything anywhere; that is good
690 * for FIFO apps -- the transaction must look operationally
691 * and FR must be ticking, whereas the request is "forwarded"
692 * using a non-SIP way and will be replied the same way
694 int add_blind_uac( /*struct cell *t*/ )
696 unsigned short branch;
700 if (t==T_UNDEFINED || !t ) {
701 LM_ERR("no transaction context\n");
705 branch=t->nr_of_outgoings;
706 if (branch==sr_dst_max_branches) {
707 LM_ERR("maximum number of branches exceeded\n");
710 /* make sure it will be replied */
711 t->flags |= T_NOISY_CTIMER_FLAG;
712 membar_write(); /* to allow lockless prepare_to_cancel() we want to be sure
713 * all the writes finished before updating branch number*/
715 t->uac[branch].flags |= TM_UAC_FLAG_BLIND;
716 t->nr_of_outgoings=(branch+1);
717 t->async_backup.blind_uac = branch;
718 /* ^^^ whenever we create a blind UAC, lets save the current branch
719 * this is used in async tm processing specifically to be able to route replies
720 * that were possibly in response to a request forwarded on this blind UAC......
721 * we still want replies to be processed as if it were a normal UAC */
723 /* start FR timer -- protocol set by default to PROTO_NONE,
724 * which means retransmission timer will not be started
726 if (start_retr(&t->uac[branch].request)!=0)
727 LM_CRIT("start retr failed for %p\n", &t->uac[branch].request);
728 /* we are on a timer -- don't need to put on wait on script clean-up */
731 return 1; /* success */
734 /** introduce a new uac to transaction.
735 * It doesn't send a message yet -- a reply to it might interfere with the
736 * processes of adding multiple branches; On error returns <0 & sets ser_error
738 * @param t - transaction
739 * @param request - corresponding sip_mst, must be non-null, flags might be
740 * modified (on_branch route).
741 * @param uri - uri used for the branch (must be non-null).
742 * @param next_hop - next_hop in sip uri format. If null and proxy is null
743 * too, the uri will be used
744 * @param path - path vector (list of route like destinations in sip
745 * uri format, e.g.: "<sip:1.2.3.4;lr>, <sip:5.6.7.8;lr>").
746 * @param proxy - proxy structure. If non-null it takes precedence over
747 * next_hop/uri and it will be used for forwarding.
748 * @param fsocket - forced forward send socket (can be 0).
749 * @param snd_flags - special send flags (see SND_F_* / snd_flags_t)
750 * @param proto - forced protocol for forwarding (overrides the protocol
751 * in next_hop/uri or proxy if != PROTO_NONE).
752 * @param flags - special flags passed to prepare_new_uac().
753 * @see prepare_new_uac().
754 * @returns branch id (>=0) or error (<0)
756 int add_uac( struct cell *t, struct sip_msg *request, str *uri,
757 str* next_hop, str* path, struct proxy_l *proxy,
758 struct socket_info* fsocket, snd_flags_t snd_flags,
759 int proto, int flags, str *instance, str *ruid,
764 unsigned short branch;
766 branch=t->nr_of_outgoings;
767 if (branch==sr_dst_max_branches) {
768 LM_ERR("maximum number of branches exceeded\n");
769 ret=ser_error=E_TOO_MANY_BRANCHES;
773 /* check existing buffer -- rewriting should never occur */
774 if (t->uac[branch].request.buffer) {
775 LM_CRIT("buffer rewrite attempt\n");
780 /* check DNS resolution */
782 /* dst filled from the proxy */
783 init_dest_info(&t->uac[branch].request.dst);
784 t->uac[branch].request.dst.proto=get_proto(proto, proxy->proto);
785 proxy2su(&t->uac[branch].request.dst.to, proxy);
786 /* fill dst send_sock */
787 t->uac[branch].request.dst.send_sock =
788 get_send_socket( request, &t->uac[branch].request.dst.to,
789 t->uac[branch].request.dst.proto);
791 t->uac[branch].request.dst.send_flags=request->fwd_send_flags;
793 SND_FLAGS_INIT(&t->uac[branch].request.dst.send_flags);
796 next_hop= next_hop?next_hop:uri;
799 /* now message printing starts ... */
800 if (unlikely( (ret=prepare_new_uac(t, request, branch, uri, path,
801 next_hop, fsocket, snd_flags,
802 proto, flags, instance, ruid,
807 getbflagsval(0, &t->uac[branch].branch_flags);
808 membar_write(); /* to allow lockless ops (e.g. prepare_to_cancel()) we want
809 * to be sure everything above is fully written before
810 * updating branches no. */
811 t->nr_of_outgoings=(branch+1);
815 proxy_mark(proxy, 1);
827 #ifdef USE_DNS_FAILOVER
828 /* Similar to add_uac(), but this function uses the outgoing message buffer
829 * of the failed branch to construct the new message in case of DNS failover.
831 static int add_uac_from_buf( struct cell *t, struct sip_msg *request,
833 struct socket_info* fsocket,
834 snd_flags_t send_flags,
836 char *buf, short buf_len,
837 str *instance, str *ruid,
842 unsigned short branch;
846 branch=t->nr_of_outgoings;
847 if (branch==sr_dst_max_branches) {
848 LM_ERR("maximum number of branches exceeded\n");
849 ret=ser_error=E_TOO_MANY_BRANCHES;
853 /* check existing buffer -- rewriting should never occur */
854 if (t->uac[branch].request.buffer) {
855 LM_CRIT("buffer rewrite attempt\n");
860 if (uri2dst2(&t->uac[branch].dns_h, &t->uac[branch].request.dst,
861 fsocket, send_flags, uri, proto) == 0)
863 ret=ser_error=E_BAD_ADDRESS;
867 /* check if send_sock is ok */
868 if (t->uac[branch].request.dst.send_sock==0) {
869 LM_ERR("can't fwd to af %d, proto %d"
870 " (no corresponding listening socket)\n",
871 t->uac[branch].request.dst.to.s.sa_family,
872 t->uac[branch].request.dst.proto );
873 ret=ser_error=E_NO_SOCKET;
877 /* now message printing starts ... */
878 shbuf=print_uac_request_from_buf( t, request, branch, uri,
879 &len, &t->uac[branch].request.dst,
882 ret=ser_error=E_OUT_OF_MEM;
886 /* things went well, move ahead and install new buffer! */
887 t->uac[branch].request.buffer=shbuf;
888 t->uac[branch].request.buffer_len=len;
889 t->uac[branch].uri.s=t->uac[branch].request.buffer+
890 request->first_line.u.request.method.len+1;
891 t->uac[branch].uri.len=uri->len;
893 if (unlikely(path && path->s)){
894 t->uac[branch].path.s=shm_malloc(path->len+1);
895 if (unlikely(t->uac[branch].path.s==0)) {
897 t->uac[branch].request.buffer=0;
898 t->uac[branch].request.buffer_len=0;
899 t->uac[branch].uri.s=0;
900 t->uac[branch].uri.len=0;
901 ret=ser_error=E_OUT_OF_MEM;
904 t->uac[branch].path.len=path->len;
905 t->uac[branch].path.s[path->len]=0;
906 memcpy( t->uac[branch].path.s, path->s, path->len);
908 /* copy the instance */
909 if (unlikely(instance && instance->s)){
910 t->uac[branch].instance.s=shm_malloc(instance->len+1);
911 if (unlikely(t->uac[branch].instance.s==0)) {
913 t->uac[branch].request.buffer=0;
914 t->uac[branch].request.buffer_len=0;
915 t->uac[branch].uri.s=0;
916 t->uac[branch].uri.len=0;
917 ret=ser_error=E_OUT_OF_MEM;
920 t->uac[branch].instance.len=instance->len;
921 t->uac[branch].instance.s[instance->len]=0;
922 memcpy( t->uac[branch].instance.s, instance->s, instance->len);
925 if (unlikely(ruid && ruid->s)){
926 t->uac[branch].ruid.s=shm_malloc(ruid->len+1);
927 if (unlikely(t->uac[branch].ruid.s==0)) {
929 t->uac[branch].request.buffer=0;
930 t->uac[branch].request.buffer_len=0;
931 t->uac[branch].uri.s=0;
932 t->uac[branch].uri.len=0;
933 ret=ser_error=E_OUT_OF_MEM;
936 t->uac[branch].ruid.len=ruid->len;
937 t->uac[branch].ruid.s[ruid->len]=0;
938 memcpy( t->uac[branch].ruid.s, ruid->s, ruid->len);
940 /* copy the location_ua */
941 if (unlikely(location_ua && location_ua->s)){
942 t->uac[branch].location_ua.s=shm_malloc(location_ua->len+1);
943 if (unlikely(t->uac[branch].location_ua.s==0)) {
945 t->uac[branch].request.buffer=0;
946 t->uac[branch].request.buffer_len=0;
947 t->uac[branch].uri.s=0;
948 t->uac[branch].uri.len=0;
949 ret=ser_error=E_OUT_OF_MEM;
952 t->uac[branch].location_ua.len=location_ua->len;
953 t->uac[branch].location_ua.s[location_ua->len]=0;
954 memcpy( t->uac[branch].location_ua.s, location_ua->s, location_ua->len);
957 t->uac[branch].on_reply = t->on_reply;
958 t->uac[branch].on_failure = t->on_failure;
959 t->uac[branch].on_branch_failure = t->on_branch_failure;
961 membar_write(); /* to allow lockless ops (e.g. prepare_to_cancel()) we want
962 * to be sure everything above is fully written before
963 * updating branches no. */
964 t->nr_of_outgoings=(branch+1);
973 /* introduce a new uac to transaction, based on old_uac and a possible
974 * new ip address (if the dns name resolves to more ips). If no more
975 * ips are found => returns -1.
976 * returns its branch id (>=0)
977 * or error (<0) and sets ser_error if needed; it doesn't send a message
978 * yet -- a reply to it
979 * might interfere with the processes of adding multiple branches
980 * if lock_replies is 1 replies will be locked for t until the new branch
981 * is added (to prevent add branches races). Use 0 if the reply lock is
982 * already held, e.g. in failure route/handlers (WARNING: using 1 in a
983 * failure route will cause a deadlock).
985 int add_uac_dns_fallback(struct cell *t, struct sip_msg* msg,
986 struct ua_client* old_uac,
992 if (cfg_get(core, core_cfg, use_dns_failover) &&
993 !((t->flags & (T_DONT_FORK|T_DISABLE_FAILOVER)) ||
994 uac_dont_fork(old_uac)) &&
995 dns_srv_handle_next(&old_uac->dns_h, 0)){
997 /* use reply lock to guarantee nobody is adding a branch
998 * in the same time */
1000 /* check again that we can fork */
1001 if ((t->flags & T_DONT_FORK) || uac_dont_fork(old_uac)){
1003 LM_DBG("no forking on => no new branches\n");
1007 if (t->nr_of_outgoings >= sr_dst_max_branches){
1008 LM_ERR("maximum number of branches exceeded\n");
1011 ret=ser_error=E_TOO_MANY_BRANCHES;
1014 /* copy the dns handle into the new uac */
1015 dns_srv_handle_cpy(&t->uac[t->nr_of_outgoings].dns_h,
1017 /* copy the onreply and onfailure routes */
1018 t->uac[t->nr_of_outgoings].on_failure = old_uac->on_failure;
1019 t->uac[t->nr_of_outgoings].on_reply = old_uac->on_reply;
1020 t->uac[t->nr_of_outgoings].on_branch_failure = old_uac->on_branch_failure;
1021 /* copy branch flags */
1022 t->uac[t->nr_of_outgoings].branch_flags = old_uac->branch_flags;
1024 if (cfg_get(tm, tm_cfg, reparse_on_dns_failover)){
1025 /* Reuse the old buffer and only replace the via header.
1026 * The drawback is that the send_socket is not corrected
1027 * in the rest of the message, only in the VIA HF (Miklos) */
1028 ret=add_uac_from_buf(t, msg, &old_uac->uri,
1030 (old_uac->request.dst.send_flags.f & SND_F_FORCE_SOCKET)?
1031 old_uac->request.dst.send_sock:
1032 ((tm_dns_reuse_rcv_socket)?msg->rcv.bind_address:0),
1033 old_uac->request.dst.send_flags,
1034 old_uac->request.dst.proto,
1035 old_uac->request.buffer,
1036 old_uac->request.buffer_len,
1037 &old_uac->instance, &old_uac->ruid,
1038 &old_uac->location_ua);
1040 /* add_uac will use dns_h => next_hop will be ignored.
1041 * Unfortunately we can't reuse the old buffer, the branch id
1042 * must be changed and the send_socket might be different =>
1043 * re-create the whole uac */
1044 ret=add_uac(t, msg, &old_uac->uri, 0, &old_uac->path, 0,
1045 (old_uac->request.dst.send_flags.f & SND_F_FORCE_SOCKET)?
1046 old_uac->request.dst.send_sock:
1047 ((tm_dns_reuse_rcv_socket)?msg->rcv.bind_address:0),
1048 old_uac->request.dst.send_flags,
1049 old_uac->request.dst.proto, UAC_DNS_FAILOVER_F,
1050 &old_uac->instance, &old_uac->ruid,
1051 &old_uac->location_ua);
1055 /* failed, delete the copied dns_h */
1056 dns_srv_handle_put(&t->uac[t->nr_of_outgoings].dns_h);
1067 int e2e_cancel_branch( struct sip_msg *cancel_msg, struct cell *t_cancel,
1068 struct cell *t_invite, int branch )
1073 snd_flags_t snd_flags;
1076 if (t_cancel->uac[branch].request.buffer) {
1077 LM_CRIT("buffer rewrite attempt\n");
1078 ret=ser_error=E_BUG;
1081 if (t_invite->uac[branch].request.buffer==0){
1082 /* inactive / deleted branch */
1085 t_invite->uac[branch].request.flags|=F_RB_CANCELED;
1087 /* note -- there is a gap in proxy stats -- we don't update
1088 * proxy stats with CANCEL (proxy->ok, proxy->tx, etc.)
1091 /* set same dst as the invite */
1092 t_cancel->uac[branch].request.dst=t_invite->uac[branch].request.dst;
1094 if (cfg_get(tm, tm_cfg, reparse_invite)) {
1095 /* buffer is built localy from the INVITE which was sent out */
1096 /* lumps can be set outside of the lock, make sure that we read
1097 * the up-to-date values */
1099 if (cancel_msg->add_rm || cancel_msg->body_lumps) {
1100 LM_WARN("CANCEL is built locally,"
1101 " thus lumps are not applied to the message!\n");
1103 shbuf=build_local_reparse( t_invite, branch, &len, CANCEL,
1104 CANCEL_LEN, &t_invite->to
1105 #ifdef CANCEL_REASON_SUPPORT
1107 #endif /* CANCEL_REASON_SUPPORT */
1109 if (unlikely(!shbuf)) {
1110 LM_ERR("printing e2e cancel failed\n");
1111 ret=ser_error=E_OUT_OF_MEM;
1114 /* install buffer */
1115 t_cancel->uac[branch].request.buffer=shbuf;
1116 t_cancel->uac[branch].request.buffer_len=len;
1117 t_cancel->uac[branch].uri.s=t_cancel->uac[branch].request.buffer+
1118 cancel_msg->first_line.u.request.method.len+1;
1119 t_cancel->uac[branch].uri.len=t_invite->uac[branch].uri.len;
1121 SND_FLAGS_INIT(&snd_flags);
1122 /* buffer is constructed from the received CANCEL with lumps applied */
1123 /* t_cancel...request.dst is already filled (see above) */
1124 if (unlikely((ret=prepare_new_uac( t_cancel, cancel_msg, branch,
1125 &t_invite->uac[branch].uri,
1126 &t_invite->uac[branch].path,
1127 0, 0, snd_flags, PROTO_NONE, 0,
1128 NULL, NULL, NULL)) <0)){
1142 #ifdef CANCEL_REASON_SUPPORT
1143 /** create a cancel reason structure packed into a single shm. block.
1144 * From a cause and a pointer to a str or cancel_msg, build a
1145 * packed cancel reason structure (CANCEL_REAS_PACKED_HDRS), using a
1146 * single memory allocation (so that it can be freed by a simple shm_free().
1147 * @param cause - cancel cause, @see cancel_reason for more details.
1148 * @param data - depends on the cancel cause.
1149 * @return pointer to shm. packed cancel reason struct. on success,
1152 static struct cancel_reason* cancel_reason_pack(short cause, void* data,
1156 struct cancel_reason* cr;
1159 struct hdr_field *reas1, *reas_last, *hdr;
1161 struct sip_msg* e2e_cancel;
1163 if (likely(cause != CANCEL_REAS_UNKNOWN)){
1169 if (likely(cause == CANCEL_REAS_RCVD_CANCEL &&
1170 data && !(t->flags & T_NO_E2E_CANCEL_REASON))) {
1171 /* parse the entire cancel, to get all the Reason headers */
1173 if(parse_headers(e2e_cancel, HDR_EOH_F, 0)==-1) {
1174 LM_ERR("failed to parse headers\n");
1177 for(hdr=get_hdr(e2e_cancel, HDR_REASON_T), reas1=hdr;
1178 hdr; hdr=next_sibling_hdr(hdr)) {
1179 /* hdr->len includes CRLF */
1180 reason_len += hdr->len;
1183 } else if (likely(cause > 0 &&
1184 cfg_get(tm, tm_cfg, local_cancel_reason))){
1186 /* Reason: SIP;cause=<reason->cause>[;text=<reason->u.text.s>] */
1187 reason_len = REASON_PREFIX_LEN + USHORT2SBUF_MAX_LEN +
1188 ((txt && txt->s)?REASON_TEXT_LEN + 1 + txt->len + 1 : 0)
1190 } else if (cause == CANCEL_REAS_PACKED_HDRS &&
1191 !(t->flags & T_NO_E2E_CANCEL_REASON) && data) {
1193 reason_len = txt?txt->len:0;
1194 } else if (unlikely(cause < CANCEL_REAS_MIN)) {
1195 LM_CRIT("unhandled reason cause %d\n", cause);
1199 if (unlikely(reason_len == 0))
1200 return 0; /* nothing to do, no reason */
1201 cr = shm_malloc(sizeof(struct cancel_reason) + reason_len);
1202 if (unlikely(cr == 0))
1204 d = (char*)cr +sizeof(*cr);
1205 cr->cause = CANCEL_REAS_PACKED_HDRS;
1206 cr->u.packed_hdrs.s = d;
1207 cr->u.packed_hdrs.len = reason_len;
1209 if (cause == CANCEL_REAS_RCVD_CANCEL) {
1210 for(hdr=reas1; hdr; hdr=next_sibling_hdr(hdr)) {
1211 /* hdr->len includes CRLF */
1212 append_str(d, hdr->name.s, hdr->len);
1213 if (likely(hdr==reas_last))
1216 } else if (likely(cause > 0)) {
1217 append_str(d, REASON_PREFIX, REASON_PREFIX_LEN);
1218 code_len=ushort2sbuf(cause, d, reason_len
1219 - (int)(d - (char*)cr - sizeof(*cr)));
1220 if (unlikely(code_len==0)) {
1223 LM_CRIT("not enough space to write reason code");
1228 append_str(d, REASON_TEXT, REASON_TEXT_LEN);
1230 append_str(d, txt->s, txt->len);
1233 append_str(d, CRLF, CRLF_LEN);
1234 } else if (cause == CANCEL_REAS_PACKED_HDRS) {
1235 append_str(d, txt->s, txt->len);
1242 #endif /* CANCEL_REASON_SUPPORT */
1246 void e2e_cancel( struct sip_msg *cancel_msg,
1247 struct cell *t_cancel, struct cell *t_invite )
1249 branch_bm_t cancel_bm;
1250 #ifndef E2E_CANCEL_HOP_BY_HOP
1252 #elif defined (CANCEL_REASON_SUPPORT)
1253 struct cancel_reason* reason;
1255 #endif /* E2E_CANCEL_HOP_BY_HOP */
1259 struct tmcb_params tmcb;
1264 if (unlikely(has_tran_tmcbs(t_invite, TMCB_E2ECANCEL_IN))){
1265 INIT_TMCB_PARAMS(tmcb, cancel_msg, 0, cancel_msg->REQ_METHOD);
1266 run_trans_callbacks_internal(&t_invite->tmcb_hl, TMCB_E2ECANCEL_IN,
1269 /* mark transaction as canceled, so that no new message are forwarded
1270 * on it and t_is_canceled() returns true
1271 * WARNING: it's safe to do it without locks, at least for now (in a race
1272 * event even if a flag is unwillingly reset nothing bad will happen),
1273 * however this should be rechecked for any future new flags use.
1275 t_invite->flags|=T_CANCELED;
1276 /* first check if there are any branches */
1277 if (t_invite->nr_of_outgoings==0){
1278 /* no branches yet => force a reply to the invite */
1279 t_reply( t_invite, t_invite->uas.request, 487, CANCELED );
1280 LM_DBG("e2e cancel -- no more pending branches\n");
1281 t_reply( t_cancel, cancel_msg, 200, CANCEL_DONE );
1285 /* determine which branches to cancel ... */
1286 prepare_to_cancel(t_invite, &cancel_bm, 0);
1288 /* no branches to cancel (e.g., a suspended transaction with blind uac) */
1290 /* no outgoing branches yet => force a reply to the invite */
1291 t_reply( t_invite, t_invite->uas.request, 487, CANCELED );
1292 LM_DBG("e2e cancel -- no active branches\n");
1293 t_reply( t_cancel, cancel_msg, 200, CANCEL_DONE );
1297 #ifdef E2E_CANCEL_HOP_BY_HOP
1298 /* we don't need to set t_cancel label to be the same as t_invite if
1299 * we do hop by hop cancel. The cancel transaction will have a different
1300 * label, but this is not a problem since this transaction is only used to
1301 * send a reply back. The cancels sent upstream will be part of the invite
1302 * transaction (local_cancel retr. bufs) and they will be generated with
1303 * the same via as the invite.
1304 * Note however that setting t_cancel label the same as t_invite will work
1305 * too (the upstream cancel replies will properly match the t_invite
1306 * transaction and will not match the t_cancel because t_cancel will always
1307 * have 0 branches and we check for the branch number in
1308 * t_reply_matching() ).
1310 #ifdef CANCEL_REASON_SUPPORT
1313 if (likely(t_invite->uas.cancel_reas == 0)){
1314 reason = cancel_reason_pack(CANCEL_REAS_RCVD_CANCEL, cancel_msg,
1316 /* set if not already set */
1317 if (unlikely(reason &&
1318 atomic_cmpxchg_long((void*)&t_invite->uas.cancel_reas,
1319 0, (long)reason) != 0)) {
1320 /* already set, failed to re-set it */
1324 #endif /* CANCEL_REASON_SUPPORT */
1325 for (i=0; i<t_invite->nr_of_outgoings; i++) {
1326 if (cancel_bm & (1<<i)) {
1327 /* it's safe to get the reply lock since e2e_cancel is
1328 * called with the cancel as the "current" transaction so
1329 * at most t_cancel REPLY_LOCK is held in this process =>
1330 * no deadlock possibility */
1334 #ifdef CANCEL_REASON_SUPPORT
1336 #endif /* CANCEL_REASON_SUPPORT */
1337 cfg_get(tm,tm_cfg, cancel_b_flags)
1338 | ((t_invite->uac[i].request.buffer==NULL)?
1339 F_CANCEL_B_FAKE_REPLY:0) /* blind UAC? */
1341 if (ret<0) cancel_bm &= ~(1<<i);
1342 if (ret<lowest_error) lowest_error=ret;
1345 #ifdef CANCEL_REASON_SUPPORT
1346 if (unlikely(free_reason)) {
1347 /* reason was not set as the global reason => free it */
1350 #endif /* CANCEL_REASON_SUPPORT */
1351 #else /* ! E2E_CANCEL_HOP_BY_HOP */
1352 /* fix label -- it must be same for reply matching (the label is part of
1353 * the generated via branch for the cancels sent upstream and if it
1354 * would be different form the one in the INVITE the transactions would not
1356 t_cancel->label=t_invite->label;
1357 t_cancel->nr_of_outgoings=t_invite->nr_of_outgoings;
1358 /* ... and install CANCEL UACs */
1359 for (i=0; i<t_invite->nr_of_outgoings; i++)
1360 if ((cancel_bm & (1<<i)) && (t_invite->uac[i].last_received>=100)) {
1361 ret=e2e_cancel_branch(cancel_msg, t_cancel, t_invite, i);
1362 if (ret<0) cancel_bm &= ~(1<<i);
1363 if (ret<lowest_error) lowest_error=ret;
1367 for (i = 0; i < t_cancel->nr_of_outgoings; i++) {
1368 if (cancel_bm & (1 << i)) {
1369 if (t_invite->uac[i].last_received>=100){
1370 /* Provisional reply received on this branch, send CANCEL */
1371 /* we do need to stop the retr. timers if the request is not
1372 * an invite and since the stop_rb_retr() cost is lower then
1373 * the invite check we do it always --andrei */
1374 stop_rb_retr(&t_invite->uac[i].request);
1375 if (SEND_BUFFER(&t_cancel->uac[i].request) == -1) {
1376 LM_ERR("e2e cancel - send failed\n");
1379 if (unlikely(has_tran_tmcbs(t_cancel, TMCB_REQUEST_SENT)))
1380 run_trans_callbacks_with_buf(TMCB_REQUEST_SENT,
1381 &t_cancel->uac[i].request,
1382 cancel_msg, 0, TMCB_LOCAL_F);
1384 if (start_retr( &t_cancel->uac[i].request )!=0)
1385 LM_CRIT("BUG: failed to start retr."
1386 " for %p\n", &t_cancel->uac[i].request);
1388 /* No provisional response received, stop
1389 * retransmission timers */
1390 if (!(cfg_get(tm, tm_cfg, cancel_b_flags)
1391 & F_CANCEL_B_FORCE_RETR))
1392 stop_rb_retr(&t_invite->uac[i].request);
1393 /* no need to stop fr, it will be stopped by relay_reply
1394 * put_on_wait -- andrei */
1395 /* Generate faked reply */
1396 if (cfg_get(tm, tm_cfg, cancel_b_flags) &
1397 F_CANCEL_B_FAKE_REPLY){
1398 LOCK_REPLIES(t_invite);
1399 if (relay_reply(t_invite, FAKED_REPLY, i,
1400 487, &tmp_bm, 1) == RPS_ERROR) {
1407 #endif /*E2E_CANCEL_HOP_BY_HOP */
1409 /* if error occurred, let it know upstream (final reply
1410 * will also move the transaction on wait state
1412 if (lowest_error<0) {
1413 LM_ERR("cancel error\n");
1414 /* if called from failure_route, make sure that the unsafe version
1415 * is called (we are already holding the reply mutex for the cancel
1418 if ((is_route_type(FAILURE_ROUTE)) && (t_cancel==get_t()))
1419 t_reply_unsafe( t_cancel, cancel_msg, 500, "cancel error");
1421 t_reply( t_cancel, cancel_msg, 500, "cancel error");
1422 } else if (cancel_bm) {
1423 /* if there are pending branches, let upstream know we
1426 LM_DBG("e2e cancel proceeding\n");
1427 /* if called from failure_route, make sure that the unsafe version
1428 * is called (we are already hold the reply mutex for the cancel
1431 if ((is_route_type(FAILURE_ROUTE)) && (t_cancel==get_t()))
1432 t_reply_unsafe( t_cancel, cancel_msg, 200, CANCELING );
1434 t_reply( t_cancel, cancel_msg, 200, CANCELING );
1436 /* if the transaction exists, but there are no more pending
1437 * branches, tell upstream we're done
1439 LM_DBG("e2e cancel -- no more pending branches\n");
1440 /* if called from failure_route, make sure that the unsafe version
1441 * is called (we are already hold the reply mutex for the cancel
1444 if ((is_route_type(FAILURE_ROUTE)) && (t_cancel==get_t()))
1445 t_reply_unsafe( t_cancel, cancel_msg, 200, CANCEL_DONE );
1447 t_reply( t_cancel, cancel_msg, 200, CANCEL_DONE );
1453 /* sends one uac/branch buffer and fallbacks to other ips if
1454 * the destination resolves to several addresses
1455 * Takes care of starting timers a.s.o. (on send success)
1456 * returns: -2 on error, -1 on drop, current branch id on success,
1457 * new branch id on send error/blacklist, when failover is possible
1458 * (ret>=0 && ret!=branch)
1459 * if lock_replies is 1, the replies for t will be locked when adding
1460 * new branches (to prevent races). Use 0 from failure routes or other
1461 * places where the reply lock is already held, to avoid deadlocks. */
1462 int t_send_branch( struct cell *t, int branch, struct sip_msg* p_msg ,
1463 struct proxy_l * proxy, int lock_replies)
1465 struct ip_addr ip; /* debugging */
1467 struct ua_client* uac;
1469 uac=&t->uac[branch];
1471 if (run_onsend(p_msg, &uac->request.dst, uac->request.buffer,
1472 uac->request.buffer_len)==0){
1473 /* disable the current branch: set a "fake" timeout
1474 * reply code but don't set uac->reply, to avoid overriding
1475 * a higly unlikely, perfectly timed fake reply (to a message
1477 * (code=final reply && reply==0 => t_pick_branch won't ever pick it)*/
1478 uac->last_received=408;
1479 su2ip_addr(&ip, &uac->request.dst.to);
1480 LM_DBG("onsend_route dropped msg. to %s:%d (%d)\n",
1481 ip_addr2a(&ip), su_getport(&uac->request.dst.to),
1482 uac->request.dst.proto);
1483 #ifdef USE_DNS_FAILOVER
1484 /* if the destination resolves to more ips, add another
1486 if (cfg_get(core, core_cfg, use_dns_failover)){
1487 ret=add_uac_dns_fallback(t, p_msg, uac, lock_replies);
1489 su2ip_addr(&ip, &uac->request.dst.to);
1490 LM_DBG("send on branch %d failed "
1491 "(onsend_route), trying another ip %s:%d (%d)\n",
1492 branch, ip_addr2a(&ip),
1493 su_getport(&uac->request.dst.to),
1494 uac->request.dst.proto);
1495 /* success, return new branch */
1499 #endif /* USE_DNS_FAILOVER*/
1500 return -1; /* drop, try next branch */
1502 #ifdef USE_DST_BLACKLIST
1503 if (cfg_get(core, core_cfg, use_dst_blacklist)
1505 && (p_msg->REQ_METHOD
1506 & cfg_get(tm, tm_cfg, tm_blst_methods_lookup))
1508 if (dst_is_blacklisted(&uac->request.dst, p_msg)){
1509 su2ip_addr(&ip, &uac->request.dst.to);
1510 LM_DBG("blacklisted destination: %s:%d (%d)\n",
1511 ip_addr2a(&ip), su_getport(&uac->request.dst.to),
1512 uac->request.dst.proto);
1513 /* disable the current branch: set a "fake" timeout
1514 * reply code but don't set uac->reply, to avoid overriding
1515 * a higly unlikely, perfectly timed fake reply (to a message
1516 * we never sent). (code=final reply && reply==0 =>
1517 * t_pick_branch won't ever pick it)*/
1518 uac->last_received=408;
1519 #ifdef USE_DNS_FAILOVER
1520 /* if the destination resolves to more ips, add another
1522 if (cfg_get(core, core_cfg, use_dns_failover)){
1523 ret=add_uac_dns_fallback(t, p_msg, uac, lock_replies);
1525 su2ip_addr(&ip, &uac->request.dst.to);
1526 LM_DBG("send on branch %d failed (blacklist),"
1527 " trying another ip %s:%d (%d)\n", branch,
1528 ip_addr2a(&ip), su_getport(&uac->request.dst.to),
1529 uac->request.dst.proto);
1530 /* success, return new branch */
1534 #endif /* USE_DNS_FAILOVER*/
1535 return -1; /* don't send */
1538 #endif /* USE_DST_BLACKLIST */
1539 if (SEND_BUFFER( &uac->request)==-1) {
1540 /* disable the current branch: set a "fake" timeout
1541 * reply code but don't set uac->reply, to avoid overriding
1542 * a highly unlikely, perfectly timed fake reply (to a message
1544 * (code=final reply && reply==0 => t_pick_branch won't ever pick it)*/
1545 uac->last_received=408;
1546 su2ip_addr(&ip, &uac->request.dst.to);
1547 LM_DBG("send to %s:%d (%d) failed\n",
1548 ip_addr2a(&ip), su_getport(&uac->request.dst.to),
1549 uac->request.dst.proto);
1550 #ifdef USE_DST_BLACKLIST
1551 dst_blacklist_add(BLST_ERR_SEND, &uac->request.dst, p_msg);
1553 #ifdef USE_DNS_FAILOVER
1554 /* if the destination resolves to more ips, add another
1556 if (cfg_get(core, core_cfg, use_dns_failover)){
1557 ret=add_uac_dns_fallback(t, p_msg, uac, lock_replies);
1559 /* success, return new branch */
1560 LM_DBG("send on branch %d failed, adding another"
1561 " branch with another ip\n", branch);
1566 uac->icode = 908; /* internal code set to delivery failure */
1567 LM_WARN("sending request on branch %d failed\n", branch);
1568 if (proxy) { proxy->errors++; proxy->ok=0; }
1569 if(tm_failure_exec_mode==1) {
1570 LM_DBG("putting branch %d on hold \n", branch);
1571 /* put on retransmission timer,
1572 * but set proto to NONE, so actually it is not trying to resend */
1573 uac->request.dst.proto = PROTO_NONE;
1574 /* reset last_received, 408 reply is faked by timer */
1575 uac->last_received=0;
1576 /* add to retransmission timer */
1577 if (start_retr( &uac->request )!=0){
1578 LM_CRIT("BUG: retransmission already started for %p\n",
1586 if (unlikely(has_tran_tmcbs(t, TMCB_REQUEST_SENT)))
1587 run_trans_callbacks_with_buf(TMCB_REQUEST_SENT, &uac->request,
1589 /* start retr. only if the send succeeded */
1590 if (start_retr( &uac->request )!=0){
1591 LM_CRIT("BUG: retransmission already started for: %p\n",
1601 /* function returns:
1602 * 1 - forward successful
1603 * -1 - error during forward
1605 int t_forward_nonack( struct cell *t, struct sip_msg* p_msg,
1606 struct proxy_l * proxy, int proto)
1608 int branch_ret, lowest_ret;
1610 branch_bm_t added_branches;
1613 struct cell *t_invite;
1617 str dst_uri, path, instance, ruid, location_ua;
1618 struct socket_info* si;
1619 flag_t backup_bflags = 0;
1623 /* make -Wall happy */
1626 getbflagsval(0, &backup_bflags);
1628 if (t->flags & T_CANCELED) goto canceled;
1630 if (p_msg->REQ_METHOD==METHOD_CANCEL) {
1631 t_invite=t_lookupOriginalT( p_msg );
1632 if (t_invite!=T_NULL_CELL) {
1633 e2e_cancel( p_msg, t, t_invite );
1635 /* it should be set to REQ_RPLD by e2e_cancel, which should
1636 * send a final reply */
1642 /* if no more specific error code is known, use this */
1643 lowest_ret=E_UNSPEC;
1644 /* branches added */
1646 /* branch to begin with */
1647 first_branch=t->nr_of_outgoings;
1650 /* tell add_uac that it should run branch route actions */
1651 branch_route = t->on_branch;
1652 /* save the branch route so that it
1653 * can be used for adding branches later
1655 t->on_branch_delayed = t->on_branch;
1656 /* reset the flag before running the actions (so that it
1657 * could be set again in branch_route if needed
1664 /* on first-time forwarding, update the lumps */
1665 if (first_branch==0) {
1666 /* update the shmem-ized msg with the lumps */
1667 if ((is_route_type(REQUEST_ROUTE)) &&
1668 save_msg_lumps(t->uas.request, p_msg)) {
1669 LM_ERR("failed to save the message lumps\n");
1674 /* if ruri is not already consumed (by another invocation), use current
1675 * uri too. Else add only additional branches (which may be continuously
1678 if (ruri_get_forking_state()) {
1680 branch_ret=add_uac( t, p_msg, GET_RURI(p_msg), GET_NEXT_HOP(p_msg),
1681 &p_msg->path_vec, proxy, p_msg->force_send_socket,
1682 p_msg->fwd_send_flags, proto,
1683 (p_msg->dst_uri.len)?0:UAC_SKIP_BR_DST_F, &p_msg->instance,
1684 &p_msg->ruid, &p_msg->location_ua);
1685 /* test if cancel was received meanwhile */
1686 if (t->flags & T_CANCELED) goto canceled;
1688 added_branches |= 1<<branch_ret;
1690 lowest_ret=MIN_int(lowest_ret, branch_ret);
1693 init_branch_iterator();
1694 while((current_uri.s=next_branch( ¤t_uri.len, &q, &dst_uri, &path,
1695 &bflags, &si, &ruid, &instance, &location_ua))) {
1697 setbflagsval(0, bflags);
1699 branch_ret=add_uac( t, p_msg, ¤t_uri,
1700 (dst_uri.len) ? (&dst_uri) : ¤t_uri,
1701 &path, proxy, si, p_msg->fwd_send_flags,
1702 proto, (dst_uri.len)?0:UAC_SKIP_BR_DST_F, &instance,
1703 &ruid, &location_ua);
1704 /* test if cancel was received meanwhile */
1705 if (t->flags & T_CANCELED) goto canceled;
1706 /* pick some of the errors in case things go wrong;
1707 * note that picking lowest error is just as good as
1708 * any other algorithm which picks any other negative
1711 added_branches |= 1<<branch_ret;
1713 lowest_ret=MIN_int(lowest_ret, branch_ret);
1715 /* consume processed branches */
1718 setbflagsval(0, backup_bflags);
1720 /* update message flags, if changed in branch route */
1721 t->uas.request->flags = p_msg->flags;
1723 /* don't forget to clear all branches processed so far */
1725 /* things went wrong ... no new branch has been fwd-ed at all */
1726 if (added_branches==0) {
1728 LM_ERR("no branches for forwarding\n");
1729 /* either failed to add branches, or there were no more branches
1731 ser_error=MIN_int(lowest_ret, E_CFG);
1734 if(lowest_ret!=E_CFG)
1735 LM_ERR("failure to add branches\n");
1736 ser_error=lowest_ret;
1740 /* mark the fist branch in this fwd step */
1741 t->uac[first_branch].flags |= TM_UAC_FLAG_FB;
1743 ser_error=0; /* clear branch adding errors */
1744 /* send them out now */
1746 lock_replies= ! ((is_route_type(FAILURE_ROUTE)) && (t==get_t()));
1747 for (i=first_branch; i<t->nr_of_outgoings; i++) {
1748 if (added_branches & (1<<i)) {
1750 branch_ret=t_send_branch(t, i, p_msg , proxy, lock_replies);
1751 if (branch_ret>=0){ /* some kind of success */
1752 if (branch_ret==i) { /* success */
1754 if (unlikely(has_tran_tmcbs(t, TMCB_REQUEST_OUT)))
1755 run_trans_callbacks_with_buf( TMCB_REQUEST_OUT,
1757 p_msg, 0, -p_msg->REQ_METHOD);
1759 else /* new branch added */
1760 added_branches |= 1<<branch_ret;
1764 if (success_branch<=0) {
1765 /* return always E_SEND for now
1766 * (the real reason could be: denied by onsend routes, blacklisted,
1767 * send failed or any of the errors listed before + dns failed
1768 * when attempting dns failover) */
1770 /* else return the last error (?) */
1771 /* the caller should take care and delete the transaction */
1774 ser_error=0; /* clear branch send errors, we have overall success */
1779 LM_DBG("no forwarding on a canceled transaction\n");
1780 /* reset processed branches */
1782 /* restore backup flags from initial env */
1783 setbflagsval(0, backup_bflags);
1784 /* update message flags, if changed in branch route */
1785 t->uas.request->flags = p_msg->flags;
1786 ser_error=E_CANCELED;
1792 /* cancel handling/forwarding function
1793 * CANCELs with no matching transaction are handled in function of
1794 * the unmatched_cancel config var: they are either forwarded statefully,
1795 * statelessly or dropped.
1797 * 1 - forward successful
1798 * 0 - error, but do not reply
1799 * <0 - error during forward
1800 * it also sets *tran if a transaction was created
1802 int t_forward_cancel(struct sip_msg* p_msg , struct proxy_l * proxy, int proto,
1805 struct cell* t_invite;
1809 struct dest_info dst;
1811 unsigned short port;
1815 /* handle cancels for which no transaction was created yet */
1816 if (cfg_get(tm, tm_cfg, unmatched_cancel)==UM_CANCEL_STATEFULL){
1817 /* create cancel transaction */
1818 new_tran=t_newtran(p_msg);
1819 if (new_tran<=0 && new_tran!=E_SCRIPT){
1821 /* retransmission => do nothing */
1824 /* some error => return it or DROP */
1825 ret=(ser_error==E_BAD_VIA && reply_to_via) ? 0: new_tran;
1829 ret=t_forward_nonack(t, p_msg, proxy, proto);
1833 t_invite=t_lookupOriginalT( p_msg );
1834 if (t_invite!=T_NULL_CELL) {
1835 /* create cancel transaction */
1836 new_tran=t_newtran(p_msg);
1837 if (new_tran<=0 && new_tran!=E_SCRIPT){
1839 /* retransmission => do nothing */
1842 /* some error => return it or DROP */
1843 ret=(ser_error==E_BAD_VIA && reply_to_via) ? 0: new_tran;
1848 e2e_cancel( p_msg, t, t_invite );
1852 }else /* no coresponding INVITE transaction */
1853 if (cfg_get(tm, tm_cfg, unmatched_cancel)==UM_CANCEL_DROP){
1854 LM_DBG("non matching cancel dropped\n");
1855 ret=1; /* do nothing -> drop */
1858 /* UM_CANCEL_STATELESS -> stateless forward */
1859 LM_DBG("forwarding CANCEL statelessly \n");
1861 init_dest_info(&dst);
1863 if (get_uri_send_info(GET_NEXT_HOP(p_msg), &host,
1864 &port, &dst.proto, &comp)!=0){
1871 /* dst->send_sock not set, but forward_request
1872 * will take care of it */
1873 ret=forward_request(p_msg, &host, port, &dst);
1876 init_dest_info(&dst);
1877 dst.proto=get_proto(proto, proxy->proto);
1878 proxy2su(&dst.to, proxy);
1879 /* dst->send_sock not set, but forward_request
1880 * will take care of it */
1881 ret=forward_request( p_msg , 0, 0, &dst) ;
1891 /* Relays a CANCEL request if a corresponding INVITE transaction
1892 * can be found. The function is supposed to be used at the very
1893 * beginning of the script with reparse_invite=1 module parameter.
1896 * 0: the CANCEL was successfully relayed
1897 * (or error occurred but reply cannot be sent) => DROP
1898 * 1: no corresponding INVITE transaction exisis
1899 * <0: corresponding INVITE transaction exisis but error occurred
1901 int t_relay_cancel(struct sip_msg* p_msg)
1903 struct cell* t_invite;
1908 t_invite=t_lookupOriginalT( p_msg );
1909 if (t_invite!=T_NULL_CELL) {
1910 /* create cancel transaction */
1911 new_tran=t_newtran(p_msg);
1912 if (new_tran<=0 && new_tran!=E_SCRIPT){
1914 /* retransmission => DROP, t_newtran() takes care about it */
1917 /* some error => return it or DROP */
1918 ret=(ser_error==E_BAD_VIA && reply_to_via) ? 0: new_tran;
1923 e2e_cancel( p_msg, t, t_invite );
1925 /* return 0 to stop the script processing */
1930 /* no corresponding INVITE trasaction found */
1937 /* WARNING: doesn't work from failure route (deadlock, uses t_relay_to which
1938 * is failure route unsafe) */
1939 int t_replicate(struct sip_msg *p_msg, struct proxy_l *proxy, int proto )
1941 /* this is a quite tricky hack -- we just take the message
1942 * as is, including Route-s, Record-route-s, and Vias ,
1943 * forward it downstream and prevent replies received
1944 * from relaying by setting the replication/local_trans bit;
1946 * nevertheless, it should be good enough for the primary
1947 * customer of this function, REGISTER replication
1948 * if we want later to make it thoroughly, we need to
1949 * introduce delete lumps for all the header fields above
1951 return t_relay_to(p_msg, proxy, proto, 1 /* replicate */);
1954 /* fixup function for reparse_on_dns_failover modparam */
1955 int reparse_on_dns_failover_fixup(void *handle, str *gname, str *name, void **val)
1957 #ifdef USE_DNS_FAILOVER
1958 if ((int)(long)(*val) && mhomed) {
1959 LM_WARN("reparse_on_dns_failover is enabled on"
1960 " a multihomed host -- check the readme of tm module!\n");