2 * net/tipc/link.c: TIPC link code
4 * Copyright (c) 1996-2007, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2011, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
40 #include "name_distr.h"
46 * Out-of-range value for link session numbers
49 #define INVALID_SESSION 0x10000
55 #define STARTING_EVT 856384768 /* link processing trigger */
56 #define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */
57 #define TIMEOUT_EVT 560817u /* link timer expired */
60 * The following two 'message types' is really just implementation
61 * data conveniently stored in the message header.
62 * They must not be considered part of the protocol
68 * State value stored in 'exp_msg_count'
71 #define START_CHANGEOVER 100000u
74 * struct link_name - deconstructed link name
75 * @addr_local: network address of node at this end
76 * @if_local: name of interface at this end
77 * @addr_peer: network address of node at far end
78 * @if_peer: name of interface at far end
83 char if_local[TIPC_MAX_IF_NAME];
85 char if_peer[TIPC_MAX_IF_NAME];
88 static void link_handle_out_of_seq_msg(struct link *l_ptr,
90 static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf);
91 static int link_recv_changeover_msg(struct link **l_ptr, struct sk_buff **buf);
92 static void link_set_supervision_props(struct link *l_ptr, u32 tolerance);
93 static int link_send_sections_long(struct tipc_port *sender,
94 struct iovec const *msg_sect,
95 u32 num_sect, u32 destnode);
96 static void link_check_defragm_bufs(struct link *l_ptr);
97 static void link_state_event(struct link *l_ptr, u32 event);
98 static void link_reset_statistics(struct link *l_ptr);
99 static void link_print(struct link *l_ptr, const char *str);
100 static void link_start(struct link *l_ptr);
101 static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf);
104 * Simple link routines
107 static unsigned int align(unsigned int i)
109 return (i + 3) & ~3u;
112 static void link_init_max_pkt(struct link *l_ptr)
116 max_pkt = (l_ptr->b_ptr->mtu & ~3);
117 if (max_pkt > MAX_MSG_SIZE)
118 max_pkt = MAX_MSG_SIZE;
120 l_ptr->max_pkt_target = max_pkt;
121 if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
122 l_ptr->max_pkt = l_ptr->max_pkt_target;
124 l_ptr->max_pkt = MAX_PKT_DEFAULT;
126 l_ptr->max_pkt_probes = 0;
129 static u32 link_next_sent(struct link *l_ptr)
132 return msg_seqno(buf_msg(l_ptr->next_out));
133 return mod(l_ptr->next_out_no);
136 static u32 link_last_sent(struct link *l_ptr)
138 return mod(link_next_sent(l_ptr) - 1);
142 * Simple non-static link routines (i.e. referenced outside this file)
145 int tipc_link_is_up(struct link *l_ptr)
149 return link_working_working(l_ptr) || link_working_unknown(l_ptr);
152 int tipc_link_is_active(struct link *l_ptr)
154 return (l_ptr->owner->active_links[0] == l_ptr) ||
155 (l_ptr->owner->active_links[1] == l_ptr);
159 * link_name_validate - validate & (optionally) deconstruct link name
160 * @name - ptr to link name string
161 * @name_parts - ptr to area for link name components (or NULL if not needed)
163 * Returns 1 if link name is valid, otherwise 0.
166 static int link_name_validate(const char *name, struct link_name *name_parts)
168 char name_copy[TIPC_MAX_LINK_NAME];
174 u32 z_local, c_local, n_local;
175 u32 z_peer, c_peer, n_peer;
179 /* copy link name & ensure length is OK */
181 name_copy[TIPC_MAX_LINK_NAME - 1] = 0;
182 /* need above in case non-Posix strncpy() doesn't pad with nulls */
183 strncpy(name_copy, name, TIPC_MAX_LINK_NAME);
184 if (name_copy[TIPC_MAX_LINK_NAME - 1] != 0)
187 /* ensure all component parts of link name are present */
189 addr_local = name_copy;
190 if_local = strchr(addr_local, ':');
191 if (if_local == NULL)
194 addr_peer = strchr(if_local, '-');
195 if (addr_peer == NULL)
198 if_local_len = addr_peer - if_local;
199 if_peer = strchr(addr_peer, ':');
203 if_peer_len = strlen(if_peer) + 1;
205 /* validate component parts of link name */
207 if ((sscanf(addr_local, "%u.%u.%u%c",
208 &z_local, &c_local, &n_local, &dummy) != 3) ||
209 (sscanf(addr_peer, "%u.%u.%u%c",
210 &z_peer, &c_peer, &n_peer, &dummy) != 3) ||
211 (z_local > 255) || (c_local > 4095) || (n_local > 4095) ||
212 (z_peer > 255) || (c_peer > 4095) || (n_peer > 4095) ||
213 (if_local_len <= 1) || (if_local_len > TIPC_MAX_IF_NAME) ||
214 (if_peer_len <= 1) || (if_peer_len > TIPC_MAX_IF_NAME) ||
215 (strspn(if_local, tipc_alphabet) != (if_local_len - 1)) ||
216 (strspn(if_peer, tipc_alphabet) != (if_peer_len - 1)))
219 /* return link name components, if necessary */
222 name_parts->addr_local = tipc_addr(z_local, c_local, n_local);
223 strcpy(name_parts->if_local, if_local);
224 name_parts->addr_peer = tipc_addr(z_peer, c_peer, n_peer);
225 strcpy(name_parts->if_peer, if_peer);
231 * link_timeout - handle expiration of link timer
232 * @l_ptr: pointer to link
234 * This routine must not grab "tipc_net_lock" to avoid a potential deadlock conflict
235 * with tipc_link_delete(). (There is no risk that the node will be deleted by
236 * another thread because tipc_link_delete() always cancels the link timer before
237 * tipc_node_delete() is called.)
240 static void link_timeout(struct link *l_ptr)
242 tipc_node_lock(l_ptr->owner);
244 /* update counters used in statistical profiling of send traffic */
246 l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
247 l_ptr->stats.queue_sz_counts++;
249 if (l_ptr->first_out) {
250 struct tipc_msg *msg = buf_msg(l_ptr->first_out);
251 u32 length = msg_size(msg);
253 if ((msg_user(msg) == MSG_FRAGMENTER) &&
254 (msg_type(msg) == FIRST_FRAGMENT)) {
255 length = msg_size(msg_get_wrapped(msg));
258 l_ptr->stats.msg_lengths_total += length;
259 l_ptr->stats.msg_length_counts++;
261 l_ptr->stats.msg_length_profile[0]++;
262 else if (length <= 256)
263 l_ptr->stats.msg_length_profile[1]++;
264 else if (length <= 1024)
265 l_ptr->stats.msg_length_profile[2]++;
266 else if (length <= 4096)
267 l_ptr->stats.msg_length_profile[3]++;
268 else if (length <= 16384)
269 l_ptr->stats.msg_length_profile[4]++;
270 else if (length <= 32768)
271 l_ptr->stats.msg_length_profile[5]++;
273 l_ptr->stats.msg_length_profile[6]++;
277 /* do all other link processing performed on a periodic basis */
279 link_check_defragm_bufs(l_ptr);
281 link_state_event(l_ptr, TIMEOUT_EVT);
284 tipc_link_push_queue(l_ptr);
286 tipc_node_unlock(l_ptr->owner);
289 static void link_set_timer(struct link *l_ptr, u32 time)
291 k_start_timer(&l_ptr->timer, time);
295 * tipc_link_create - create a new link
296 * @n_ptr: pointer to associated node
297 * @b_ptr: pointer to associated bearer
298 * @media_addr: media address to use when sending messages over link
300 * Returns pointer to link.
303 struct link *tipc_link_create(struct tipc_node *n_ptr,
304 struct tipc_bearer *b_ptr,
305 const struct tipc_media_addr *media_addr)
308 struct tipc_msg *msg;
310 char addr_string[16];
311 u32 peer = n_ptr->addr;
313 if (n_ptr->link_cnt >= 2) {
314 tipc_addr_string_fill(addr_string, n_ptr->addr);
315 err("Attempt to establish third link to %s\n", addr_string);
319 if (n_ptr->links[b_ptr->identity]) {
320 tipc_addr_string_fill(addr_string, n_ptr->addr);
321 err("Attempt to establish second link on <%s> to %s\n",
322 b_ptr->name, addr_string);
326 l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
328 warn("Link creation failed, no memory\n");
333 if_name = strchr(b_ptr->name, ':') + 1;
334 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:",
335 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
336 tipc_node(tipc_own_addr),
338 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
339 /* note: peer i/f is appended to link name by reset/activate */
340 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
341 l_ptr->owner = n_ptr;
342 l_ptr->checkpoint = 1;
343 l_ptr->b_ptr = b_ptr;
344 link_set_supervision_props(l_ptr, b_ptr->media->tolerance);
345 l_ptr->state = RESET_UNKNOWN;
347 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
349 tipc_msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr);
350 msg_set_size(msg, sizeof(l_ptr->proto_msg));
351 msg_set_session(msg, (tipc_random & 0xffff));
352 msg_set_bearer_id(msg, b_ptr->identity);
353 strcpy((char *)msg_data(msg), if_name);
355 l_ptr->priority = b_ptr->priority;
356 tipc_link_set_queue_limits(l_ptr, b_ptr->media->window);
358 link_init_max_pkt(l_ptr);
360 l_ptr->next_out_no = 1;
361 INIT_LIST_HEAD(&l_ptr->waiting_ports);
363 link_reset_statistics(l_ptr);
365 tipc_node_attach_link(n_ptr, l_ptr);
367 k_init_timer(&l_ptr->timer, (Handler)link_timeout, (unsigned long)l_ptr);
368 list_add_tail(&l_ptr->link_list, &b_ptr->links);
369 tipc_k_signal((Handler)link_start, (unsigned long)l_ptr);
375 * tipc_link_delete - delete a link
376 * @l_ptr: pointer to link
378 * Note: 'tipc_net_lock' is write_locked, bearer is locked.
379 * This routine must not grab the node lock until after link timer cancellation
380 * to avoid a potential deadlock situation.
383 void tipc_link_delete(struct link *l_ptr)
386 err("Attempt to delete non-existent link\n");
390 k_cancel_timer(&l_ptr->timer);
392 tipc_node_lock(l_ptr->owner);
393 tipc_link_reset(l_ptr);
394 tipc_node_detach_link(l_ptr->owner, l_ptr);
395 tipc_link_stop(l_ptr);
396 list_del_init(&l_ptr->link_list);
397 tipc_node_unlock(l_ptr->owner);
398 k_term_timer(&l_ptr->timer);
402 static void link_start(struct link *l_ptr)
404 tipc_node_lock(l_ptr->owner);
405 link_state_event(l_ptr, STARTING_EVT);
406 tipc_node_unlock(l_ptr->owner);
410 * link_schedule_port - schedule port for deferred sending
411 * @l_ptr: pointer to link
412 * @origport: reference to sending port
413 * @sz: amount of data to be sent
415 * Schedules port for renewed sending of messages after link congestion
419 static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz)
421 struct tipc_port *p_ptr;
423 spin_lock_bh(&tipc_port_list_lock);
424 p_ptr = tipc_port_lock(origport);
428 if (!list_empty(&p_ptr->wait_list))
430 p_ptr->congested = 1;
431 p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt);
432 list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
433 l_ptr->stats.link_congs++;
435 tipc_port_unlock(p_ptr);
437 spin_unlock_bh(&tipc_port_list_lock);
441 void tipc_link_wakeup_ports(struct link *l_ptr, int all)
443 struct tipc_port *p_ptr;
444 struct tipc_port *temp_p_ptr;
445 int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size;
451 if (!spin_trylock_bh(&tipc_port_list_lock))
453 if (link_congested(l_ptr))
455 list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports,
459 list_del_init(&p_ptr->wait_list);
460 spin_lock_bh(p_ptr->lock);
461 p_ptr->congested = 0;
462 p_ptr->wakeup(p_ptr);
463 win -= p_ptr->waiting_pkts;
464 spin_unlock_bh(p_ptr->lock);
468 spin_unlock_bh(&tipc_port_list_lock);
472 * link_release_outqueue - purge link's outbound message queue
473 * @l_ptr: pointer to link
476 static void link_release_outqueue(struct link *l_ptr)
478 struct sk_buff *buf = l_ptr->first_out;
479 struct sk_buff *next;
486 l_ptr->first_out = NULL;
487 l_ptr->out_queue_size = 0;
491 * tipc_link_reset_fragments - purge link's inbound message fragments queue
492 * @l_ptr: pointer to link
495 void tipc_link_reset_fragments(struct link *l_ptr)
497 struct sk_buff *buf = l_ptr->defragm_buf;
498 struct sk_buff *next;
505 l_ptr->defragm_buf = NULL;
509 * tipc_link_stop - purge all inbound and outbound messages associated with link
510 * @l_ptr: pointer to link
513 void tipc_link_stop(struct link *l_ptr)
516 struct sk_buff *next;
518 buf = l_ptr->oldest_deferred_in;
525 buf = l_ptr->first_out;
532 tipc_link_reset_fragments(l_ptr);
534 buf_discard(l_ptr->proto_msg_queue);
535 l_ptr->proto_msg_queue = NULL;
538 /* LINK EVENT CODE IS NOT SUPPORTED AT PRESENT */
539 #define link_send_event(fcn, l_ptr, up) do { } while (0)
541 void tipc_link_reset(struct link *l_ptr)
544 u32 prev_state = l_ptr->state;
545 u32 checkpoint = l_ptr->next_in_no;
546 int was_active_link = tipc_link_is_active(l_ptr);
548 msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
550 /* Link is down, accept any session */
551 l_ptr->peer_session = INVALID_SESSION;
553 /* Prepare for max packet size negotiation */
554 link_init_max_pkt(l_ptr);
556 l_ptr->state = RESET_UNKNOWN;
558 if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
561 tipc_node_link_down(l_ptr->owner, l_ptr);
562 tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr);
564 if (was_active_link && tipc_node_active_links(l_ptr->owner) &&
565 l_ptr->owner->permit_changeover) {
566 l_ptr->reset_checkpoint = checkpoint;
567 l_ptr->exp_msg_count = START_CHANGEOVER;
570 /* Clean up all queues: */
572 link_release_outqueue(l_ptr);
573 buf_discard(l_ptr->proto_msg_queue);
574 l_ptr->proto_msg_queue = NULL;
575 buf = l_ptr->oldest_deferred_in;
577 struct sk_buff *next = buf->next;
581 if (!list_empty(&l_ptr->waiting_ports))
582 tipc_link_wakeup_ports(l_ptr, 1);
584 l_ptr->retransm_queue_head = 0;
585 l_ptr->retransm_queue_size = 0;
586 l_ptr->last_out = NULL;
587 l_ptr->first_out = NULL;
588 l_ptr->next_out = NULL;
589 l_ptr->unacked_window = 0;
590 l_ptr->checkpoint = 1;
591 l_ptr->next_out_no = 1;
592 l_ptr->deferred_inqueue_sz = 0;
593 l_ptr->oldest_deferred_in = NULL;
594 l_ptr->newest_deferred_in = NULL;
595 l_ptr->fsm_msg_cnt = 0;
596 l_ptr->stale_count = 0;
597 link_reset_statistics(l_ptr);
599 link_send_event(tipc_cfg_link_event, l_ptr, 0);
600 if (!in_own_cluster(l_ptr->addr))
601 link_send_event(tipc_disc_link_event, l_ptr, 0);
605 static void link_activate(struct link *l_ptr)
607 l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
608 tipc_node_link_up(l_ptr->owner, l_ptr);
609 tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
610 link_send_event(tipc_cfg_link_event, l_ptr, 1);
611 if (!in_own_cluster(l_ptr->addr))
612 link_send_event(tipc_disc_link_event, l_ptr, 1);
616 * link_state_event - link finite state machine
617 * @l_ptr: pointer to link
618 * @event: state machine event to process
621 static void link_state_event(struct link *l_ptr, unsigned event)
624 u32 cont_intv = l_ptr->continuity_interval;
626 if (!l_ptr->started && (event != STARTING_EVT))
627 return; /* Not yet. */
629 if (link_blocked(l_ptr)) {
630 if (event == TIMEOUT_EVT)
631 link_set_timer(l_ptr, cont_intv);
632 return; /* Changeover going on */
635 switch (l_ptr->state) {
636 case WORKING_WORKING:
638 case TRAFFIC_MSG_EVT:
642 if (l_ptr->next_in_no != l_ptr->checkpoint) {
643 l_ptr->checkpoint = l_ptr->next_in_no;
644 if (tipc_bclink_acks_missing(l_ptr->owner)) {
645 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
647 l_ptr->fsm_msg_cnt++;
648 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
649 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
651 l_ptr->fsm_msg_cnt++;
653 link_set_timer(l_ptr, cont_intv);
656 l_ptr->state = WORKING_UNKNOWN;
657 l_ptr->fsm_msg_cnt = 0;
658 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
659 l_ptr->fsm_msg_cnt++;
660 link_set_timer(l_ptr, cont_intv / 4);
663 info("Resetting link <%s>, requested by peer\n",
665 tipc_link_reset(l_ptr);
666 l_ptr->state = RESET_RESET;
667 l_ptr->fsm_msg_cnt = 0;
668 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
669 l_ptr->fsm_msg_cnt++;
670 link_set_timer(l_ptr, cont_intv);
673 err("Unknown link event %u in WW state\n", event);
676 case WORKING_UNKNOWN:
678 case TRAFFIC_MSG_EVT:
680 l_ptr->state = WORKING_WORKING;
681 l_ptr->fsm_msg_cnt = 0;
682 link_set_timer(l_ptr, cont_intv);
685 info("Resetting link <%s>, requested by peer "
686 "while probing\n", l_ptr->name);
687 tipc_link_reset(l_ptr);
688 l_ptr->state = RESET_RESET;
689 l_ptr->fsm_msg_cnt = 0;
690 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
691 l_ptr->fsm_msg_cnt++;
692 link_set_timer(l_ptr, cont_intv);
695 if (l_ptr->next_in_no != l_ptr->checkpoint) {
696 l_ptr->state = WORKING_WORKING;
697 l_ptr->fsm_msg_cnt = 0;
698 l_ptr->checkpoint = l_ptr->next_in_no;
699 if (tipc_bclink_acks_missing(l_ptr->owner)) {
700 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
702 l_ptr->fsm_msg_cnt++;
704 link_set_timer(l_ptr, cont_intv);
705 } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
706 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
708 l_ptr->fsm_msg_cnt++;
709 link_set_timer(l_ptr, cont_intv / 4);
710 } else { /* Link has failed */
711 warn("Resetting link <%s>, peer not responding\n",
713 tipc_link_reset(l_ptr);
714 l_ptr->state = RESET_UNKNOWN;
715 l_ptr->fsm_msg_cnt = 0;
716 tipc_link_send_proto_msg(l_ptr, RESET_MSG,
718 l_ptr->fsm_msg_cnt++;
719 link_set_timer(l_ptr, cont_intv);
723 err("Unknown link event %u in WU state\n", event);
728 case TRAFFIC_MSG_EVT:
731 other = l_ptr->owner->active_links[0];
732 if (other && link_working_unknown(other))
734 l_ptr->state = WORKING_WORKING;
735 l_ptr->fsm_msg_cnt = 0;
736 link_activate(l_ptr);
737 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
738 l_ptr->fsm_msg_cnt++;
739 link_set_timer(l_ptr, cont_intv);
742 l_ptr->state = RESET_RESET;
743 l_ptr->fsm_msg_cnt = 0;
744 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0);
745 l_ptr->fsm_msg_cnt++;
746 link_set_timer(l_ptr, cont_intv);
752 tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
753 l_ptr->fsm_msg_cnt++;
754 link_set_timer(l_ptr, cont_intv);
757 err("Unknown link event %u in RU state\n", event);
762 case TRAFFIC_MSG_EVT:
764 other = l_ptr->owner->active_links[0];
765 if (other && link_working_unknown(other))
767 l_ptr->state = WORKING_WORKING;
768 l_ptr->fsm_msg_cnt = 0;
769 link_activate(l_ptr);
770 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
771 l_ptr->fsm_msg_cnt++;
772 link_set_timer(l_ptr, cont_intv);
777 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
778 l_ptr->fsm_msg_cnt++;
779 link_set_timer(l_ptr, cont_intv);
782 err("Unknown link event %u in RR state\n", event);
786 err("Unknown link state %u/%u\n", l_ptr->state, event);
791 * link_bundle_buf(): Append contents of a buffer to
792 * the tail of an existing one.
795 static int link_bundle_buf(struct link *l_ptr,
796 struct sk_buff *bundler,
799 struct tipc_msg *bundler_msg = buf_msg(bundler);
800 struct tipc_msg *msg = buf_msg(buf);
801 u32 size = msg_size(msg);
802 u32 bundle_size = msg_size(bundler_msg);
803 u32 to_pos = align(bundle_size);
804 u32 pad = to_pos - bundle_size;
806 if (msg_user(bundler_msg) != MSG_BUNDLER)
808 if (msg_type(bundler_msg) != OPEN_MSG)
810 if (skb_tailroom(bundler) < (pad + size))
812 if (l_ptr->max_pkt < (to_pos + size))
815 skb_put(bundler, pad + size);
816 skb_copy_to_linear_data_offset(bundler, to_pos, buf->data, size);
817 msg_set_size(bundler_msg, to_pos + size);
818 msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1);
820 l_ptr->stats.sent_bundled++;
824 static void link_add_to_outqueue(struct link *l_ptr,
826 struct tipc_msg *msg)
828 u32 ack = mod(l_ptr->next_in_no - 1);
829 u32 seqno = mod(l_ptr->next_out_no++);
831 msg_set_word(msg, 2, ((ack << 16) | seqno));
832 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
834 if (l_ptr->first_out) {
835 l_ptr->last_out->next = buf;
836 l_ptr->last_out = buf;
838 l_ptr->first_out = l_ptr->last_out = buf;
840 l_ptr->out_queue_size++;
841 if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
842 l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
846 * tipc_link_send_buf() is the 'full path' for messages, called from
847 * inside TIPC when the 'fast path' in tipc_send_buf
848 * has failed, and from link_send()
851 int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
853 struct tipc_msg *msg = buf_msg(buf);
854 u32 size = msg_size(msg);
855 u32 dsz = msg_data_sz(msg);
856 u32 queue_size = l_ptr->out_queue_size;
857 u32 imp = tipc_msg_tot_importance(msg);
858 u32 queue_limit = l_ptr->queue_limit[imp];
859 u32 max_packet = l_ptr->max_pkt;
861 msg_set_prevnode(msg, tipc_own_addr); /* If routed message */
863 /* Match msg importance against queue limits: */
865 if (unlikely(queue_size >= queue_limit)) {
866 if (imp <= TIPC_CRITICAL_IMPORTANCE) {
867 return link_schedule_port(l_ptr, msg_origport(msg),
871 if (imp > CONN_MANAGER) {
872 warn("Resetting link <%s>, send queue full", l_ptr->name);
873 tipc_link_reset(l_ptr);
878 /* Fragmentation needed ? */
880 if (size > max_packet)
881 return link_send_long_buf(l_ptr, buf);
883 /* Packet can be queued or sent: */
885 if (likely(!tipc_bearer_congested(l_ptr->b_ptr, l_ptr) &&
886 !link_congested(l_ptr))) {
887 link_add_to_outqueue(l_ptr, buf, msg);
889 if (likely(tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr))) {
890 l_ptr->unacked_window = 0;
892 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
893 l_ptr->stats.bearer_congs++;
894 l_ptr->next_out = buf;
898 /* Congestion: can message be bundled ?: */
900 if ((msg_user(msg) != CHANGEOVER_PROTOCOL) &&
901 (msg_user(msg) != MSG_FRAGMENTER)) {
903 /* Try adding message to an existing bundle */
905 if (l_ptr->next_out &&
906 link_bundle_buf(l_ptr, l_ptr->last_out, buf)) {
907 tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
911 /* Try creating a new bundle */
913 if (size <= max_packet * 2 / 3) {
914 struct sk_buff *bundler = tipc_buf_acquire(max_packet);
915 struct tipc_msg bundler_hdr;
918 tipc_msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG,
919 INT_H_SIZE, l_ptr->addr);
920 skb_copy_to_linear_data(bundler, &bundler_hdr,
922 skb_trim(bundler, INT_H_SIZE);
923 link_bundle_buf(l_ptr, bundler, buf);
926 l_ptr->stats.sent_bundles++;
930 if (!l_ptr->next_out)
931 l_ptr->next_out = buf;
932 link_add_to_outqueue(l_ptr, buf, msg);
933 tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
938 * tipc_link_send(): same as tipc_link_send_buf(), but the link to use has
939 * not been selected yet, and the the owner node is not locked
940 * Called by TIPC internal users, e.g. the name distributor
943 int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
946 struct tipc_node *n_ptr;
947 int res = -ELINKCONG;
949 read_lock_bh(&tipc_net_lock);
950 n_ptr = tipc_node_find(dest);
952 tipc_node_lock(n_ptr);
953 l_ptr = n_ptr->active_links[selector & 1];
955 res = tipc_link_send_buf(l_ptr, buf);
958 tipc_node_unlock(n_ptr);
962 read_unlock_bh(&tipc_net_lock);
967 * link_send_buf_fast: Entry for data messages where the
968 * destination link is known and the header is complete,
969 * inclusive total message length. Very time critical.
970 * Link is locked. Returns user data length.
973 static int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf,
976 struct tipc_msg *msg = buf_msg(buf);
977 int res = msg_data_sz(msg);
979 if (likely(!link_congested(l_ptr))) {
980 if (likely(msg_size(msg) <= l_ptr->max_pkt)) {
981 if (likely(list_empty(&l_ptr->b_ptr->cong_links))) {
982 link_add_to_outqueue(l_ptr, buf, msg);
983 if (likely(tipc_bearer_send(l_ptr->b_ptr, buf,
984 &l_ptr->media_addr))) {
985 l_ptr->unacked_window = 0;
988 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
989 l_ptr->stats.bearer_congs++;
990 l_ptr->next_out = buf;
994 *used_max_pkt = l_ptr->max_pkt;
996 return tipc_link_send_buf(l_ptr, buf); /* All other cases */
1000 * tipc_send_buf_fast: Entry for data messages where the
1001 * destination node is known and the header is complete,
1002 * inclusive total message length.
1003 * Returns user data length.
1005 int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
1008 struct tipc_node *n_ptr;
1010 u32 selector = msg_origport(buf_msg(buf)) & 1;
1013 if (destnode == tipc_own_addr)
1014 return tipc_port_recv_msg(buf);
1016 read_lock_bh(&tipc_net_lock);
1017 n_ptr = tipc_node_find(destnode);
1018 if (likely(n_ptr)) {
1019 tipc_node_lock(n_ptr);
1020 l_ptr = n_ptr->active_links[selector];
1021 if (likely(l_ptr)) {
1022 res = link_send_buf_fast(l_ptr, buf, &dummy);
1023 tipc_node_unlock(n_ptr);
1024 read_unlock_bh(&tipc_net_lock);
1027 tipc_node_unlock(n_ptr);
1029 read_unlock_bh(&tipc_net_lock);
1030 res = msg_data_sz(buf_msg(buf));
1031 tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1037 * tipc_link_send_sections_fast: Entry for messages where the
1038 * destination processor is known and the header is complete,
1039 * except for total message length.
1040 * Returns user data length or errno.
1042 int tipc_link_send_sections_fast(struct tipc_port *sender,
1043 struct iovec const *msg_sect,
1047 struct tipc_msg *hdr = &sender->phdr;
1049 struct sk_buff *buf;
1050 struct tipc_node *node;
1052 u32 selector = msg_origport(hdr) & 1;
1056 * Try building message using port's max_pkt hint.
1057 * (Must not hold any locks while building message.)
1060 res = tipc_msg_build(hdr, msg_sect, num_sect, sender->max_pkt,
1061 !sender->user_port, &buf);
1063 read_lock_bh(&tipc_net_lock);
1064 node = tipc_node_find(destaddr);
1066 tipc_node_lock(node);
1067 l_ptr = node->active_links[selector];
1068 if (likely(l_ptr)) {
1070 res = link_send_buf_fast(l_ptr, buf,
1072 if (unlikely(res < 0))
1075 tipc_node_unlock(node);
1076 read_unlock_bh(&tipc_net_lock);
1080 /* Exit if build request was invalid */
1082 if (unlikely(res < 0))
1085 /* Exit if link (or bearer) is congested */
1087 if (link_congested(l_ptr) ||
1088 !list_empty(&l_ptr->b_ptr->cong_links)) {
1089 res = link_schedule_port(l_ptr,
1095 * Message size exceeds max_pkt hint; update hint,
1096 * then re-try fast path or fragment the message
1099 sender->max_pkt = l_ptr->max_pkt;
1100 tipc_node_unlock(node);
1101 read_unlock_bh(&tipc_net_lock);
1104 if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
1107 return link_send_sections_long(sender, msg_sect,
1108 num_sect, destaddr);
1110 tipc_node_unlock(node);
1112 read_unlock_bh(&tipc_net_lock);
1114 /* Couldn't find a link to the destination node */
1117 return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1119 return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
1125 * link_send_sections_long(): Entry for long messages where the
1126 * destination node is known and the header is complete,
1127 * inclusive total message length.
1128 * Link and bearer congestion status have been checked to be ok,
1129 * and are ignored if they change.
1131 * Note that fragments do not use the full link MTU so that they won't have
1132 * to undergo refragmentation if link changeover causes them to be sent
1133 * over another link with an additional tunnel header added as prefix.
1134 * (Refragmentation will still occur if the other link has a smaller MTU.)
1136 * Returns user data length or errno.
1138 static int link_send_sections_long(struct tipc_port *sender,
1139 struct iovec const *msg_sect,
1144 struct tipc_node *node;
1145 struct tipc_msg *hdr = &sender->phdr;
1146 u32 dsz = msg_data_sz(hdr);
1147 u32 max_pkt, fragm_sz, rest;
1148 struct tipc_msg fragm_hdr;
1149 struct sk_buff *buf, *buf_chain, *prev;
1150 u32 fragm_crs, fragm_rest, hsz, sect_rest;
1151 const unchar *sect_crs;
1157 max_pkt = sender->max_pkt - INT_H_SIZE;
1158 /* leave room for tunnel header in case of link changeover */
1159 fragm_sz = max_pkt - INT_H_SIZE;
1160 /* leave room for fragmentation header in each fragment */
1168 /* Prepare reusable fragment header: */
1170 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
1171 INT_H_SIZE, msg_destnode(hdr));
1172 msg_set_size(&fragm_hdr, max_pkt);
1173 msg_set_fragm_no(&fragm_hdr, 1);
1175 /* Prepare header of first fragment: */
1177 buf_chain = buf = tipc_buf_acquire(max_pkt);
1181 skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
1182 hsz = msg_hdr_sz(hdr);
1183 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz);
1185 /* Chop up message: */
1187 fragm_crs = INT_H_SIZE + hsz;
1188 fragm_rest = fragm_sz - hsz;
1190 do { /* For all sections */
1194 sect_rest = msg_sect[++curr_sect].iov_len;
1195 sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
1198 if (sect_rest < fragm_rest)
1203 if (likely(!sender->user_port)) {
1204 if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) {
1206 for (; buf_chain; buf_chain = buf) {
1207 buf = buf_chain->next;
1208 buf_discard(buf_chain);
1213 skb_copy_to_linear_data_offset(buf, fragm_crs,
1221 if (!fragm_rest && rest) {
1223 /* Initiate new fragment: */
1224 if (rest <= fragm_sz) {
1226 msg_set_type(&fragm_hdr, LAST_FRAGMENT);
1228 msg_set_type(&fragm_hdr, FRAGMENT);
1230 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
1231 msg_set_fragm_no(&fragm_hdr, ++fragm_no);
1233 buf = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
1239 skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
1240 fragm_crs = INT_H_SIZE;
1241 fragm_rest = fragm_sz;
1246 * Now we have a buffer chain. Select a link and check
1247 * that packet size is still OK
1249 node = tipc_node_find(destaddr);
1251 tipc_node_lock(node);
1252 l_ptr = node->active_links[sender->ref & 1];
1254 tipc_node_unlock(node);
1257 if (l_ptr->max_pkt < max_pkt) {
1258 sender->max_pkt = l_ptr->max_pkt;
1259 tipc_node_unlock(node);
1260 for (; buf_chain; buf_chain = buf) {
1261 buf = buf_chain->next;
1262 buf_discard(buf_chain);
1268 for (; buf_chain; buf_chain = buf) {
1269 buf = buf_chain->next;
1270 buf_discard(buf_chain);
1272 return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
1276 /* Append whole chain to send queue: */
1279 l_ptr->long_msg_seq_no++;
1280 if (!l_ptr->next_out)
1281 l_ptr->next_out = buf_chain;
1282 l_ptr->stats.sent_fragmented++;
1284 struct sk_buff *next = buf->next;
1285 struct tipc_msg *msg = buf_msg(buf);
1287 l_ptr->stats.sent_fragments++;
1288 msg_set_long_msgno(msg, l_ptr->long_msg_seq_no);
1289 link_add_to_outqueue(l_ptr, buf, msg);
1293 /* Send it, if possible: */
1295 tipc_link_push_queue(l_ptr);
1296 tipc_node_unlock(node);
1301 * tipc_link_push_packet: Push one unsent packet to the media
1303 u32 tipc_link_push_packet(struct link *l_ptr)
1305 struct sk_buff *buf = l_ptr->first_out;
1306 u32 r_q_size = l_ptr->retransm_queue_size;
1307 u32 r_q_head = l_ptr->retransm_queue_head;
1309 /* Step to position where retransmission failed, if any, */
1310 /* consider that buffers may have been released in meantime */
1312 if (r_q_size && buf) {
1313 u32 last = lesser(mod(r_q_head + r_q_size),
1314 link_last_sent(l_ptr));
1315 u32 first = msg_seqno(buf_msg(buf));
1317 while (buf && less(first, r_q_head)) {
1318 first = mod(first + 1);
1321 l_ptr->retransm_queue_head = r_q_head = first;
1322 l_ptr->retransm_queue_size = r_q_size = mod(last - first);
1325 /* Continue retransmission now, if there is anything: */
1327 if (r_q_size && buf) {
1328 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1329 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1330 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1331 l_ptr->retransm_queue_head = mod(++r_q_head);
1332 l_ptr->retransm_queue_size = --r_q_size;
1333 l_ptr->stats.retransmitted++;
1336 l_ptr->stats.bearer_congs++;
1341 /* Send deferred protocol message, if any: */
1343 buf = l_ptr->proto_msg_queue;
1345 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1346 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1347 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1348 l_ptr->unacked_window = 0;
1350 l_ptr->proto_msg_queue = NULL;
1353 l_ptr->stats.bearer_congs++;
1358 /* Send one deferred data message, if send window not full: */
1360 buf = l_ptr->next_out;
1362 struct tipc_msg *msg = buf_msg(buf);
1363 u32 next = msg_seqno(msg);
1364 u32 first = msg_seqno(buf_msg(l_ptr->first_out));
1366 if (mod(next - first) < l_ptr->queue_limit[0]) {
1367 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1368 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1369 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1370 if (msg_user(msg) == MSG_BUNDLER)
1371 msg_set_type(msg, CLOSED_MSG);
1372 l_ptr->next_out = buf->next;
1375 l_ptr->stats.bearer_congs++;
1380 return PUSH_FINISHED;
1384 * push_queue(): push out the unsent messages of a link where
1385 * congestion has abated. Node is locked
1387 void tipc_link_push_queue(struct link *l_ptr)
1391 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr))
1395 res = tipc_link_push_packet(l_ptr);
1398 if (res == PUSH_FAILED)
1399 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1402 static void link_reset_all(unsigned long addr)
1404 struct tipc_node *n_ptr;
1405 char addr_string[16];
1408 read_lock_bh(&tipc_net_lock);
1409 n_ptr = tipc_node_find((u32)addr);
1411 read_unlock_bh(&tipc_net_lock);
1412 return; /* node no longer exists */
1415 tipc_node_lock(n_ptr);
1417 warn("Resetting all links to %s\n",
1418 tipc_addr_string_fill(addr_string, n_ptr->addr));
1420 for (i = 0; i < MAX_BEARERS; i++) {
1421 if (n_ptr->links[i]) {
1422 link_print(n_ptr->links[i], "Resetting link\n");
1423 tipc_link_reset(n_ptr->links[i]);
1427 tipc_node_unlock(n_ptr);
1428 read_unlock_bh(&tipc_net_lock);
1431 static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf)
1433 struct tipc_msg *msg = buf_msg(buf);
1435 warn("Retransmission failure on link <%s>\n", l_ptr->name);
1439 /* Handle failure on standard link */
1441 link_print(l_ptr, "Resetting link\n");
1442 tipc_link_reset(l_ptr);
1446 /* Handle failure on broadcast link */
1448 struct tipc_node *n_ptr;
1449 char addr_string[16];
1451 info("Msg seq number: %u, ", msg_seqno(msg));
1452 info("Outstanding acks: %lu\n",
1453 (unsigned long) TIPC_SKB_CB(buf)->handle);
1455 n_ptr = tipc_bclink_retransmit_to();
1456 tipc_node_lock(n_ptr);
1458 tipc_addr_string_fill(addr_string, n_ptr->addr);
1459 info("Multicast link info for %s\n", addr_string);
1460 info("Supported: %d, ", n_ptr->bclink.supported);
1461 info("Acked: %u\n", n_ptr->bclink.acked);
1462 info("Last in: %u, ", n_ptr->bclink.last_in);
1463 info("Gap after: %u, ", n_ptr->bclink.gap_after);
1464 info("Gap to: %u\n", n_ptr->bclink.gap_to);
1465 info("Nack sync: %u\n\n", n_ptr->bclink.nack_sync);
1467 tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr);
1469 tipc_node_unlock(n_ptr);
1471 l_ptr->stale_count = 0;
1475 void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
1478 struct tipc_msg *msg;
1485 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
1486 if (l_ptr->retransm_queue_size == 0) {
1487 l_ptr->retransm_queue_head = msg_seqno(msg);
1488 l_ptr->retransm_queue_size = retransmits;
1490 err("Unexpected retransmit on link %s (qsize=%d)\n",
1491 l_ptr->name, l_ptr->retransm_queue_size);
1495 /* Detect repeated retransmit failures on uncongested bearer */
1497 if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1498 if (++l_ptr->stale_count > 100) {
1499 link_retransmit_failure(l_ptr, buf);
1503 l_ptr->last_retransmitted = msg_seqno(msg);
1504 l_ptr->stale_count = 1;
1508 while (retransmits && (buf != l_ptr->next_out) && buf) {
1510 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1511 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1512 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1515 l_ptr->stats.retransmitted++;
1517 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1518 l_ptr->stats.bearer_congs++;
1519 l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
1520 l_ptr->retransm_queue_size = retransmits;
1525 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
1529 * link_insert_deferred_queue - insert deferred messages back into receive chain
1532 static struct sk_buff *link_insert_deferred_queue(struct link *l_ptr,
1533 struct sk_buff *buf)
1537 if (l_ptr->oldest_deferred_in == NULL)
1540 seq_no = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
1541 if (seq_no == mod(l_ptr->next_in_no)) {
1542 l_ptr->newest_deferred_in->next = buf;
1543 buf = l_ptr->oldest_deferred_in;
1544 l_ptr->oldest_deferred_in = NULL;
1545 l_ptr->deferred_inqueue_sz = 0;
1551 * link_recv_buf_validate - validate basic format of received message
1553 * This routine ensures a TIPC message has an acceptable header, and at least
1554 * as much data as the header indicates it should. The routine also ensures
1555 * that the entire message header is stored in the main fragment of the message
1556 * buffer, to simplify future access to message header fields.
1558 * Note: Having extra info present in the message header or data areas is OK.
1559 * TIPC will ignore the excess, under the assumption that it is optional info
1560 * introduced by a later release of the protocol.
1563 static int link_recv_buf_validate(struct sk_buff *buf)
1565 static u32 min_data_hdr_size[8] = {
1566 SHORT_H_SIZE, MCAST_H_SIZE, LONG_H_SIZE, DIR_MSG_H_SIZE,
1567 MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE
1570 struct tipc_msg *msg;
1576 if (unlikely(buf->len < MIN_H_SIZE))
1579 msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr);
1583 if (unlikely(msg_version(msg) != TIPC_VERSION))
1586 size = msg_size(msg);
1587 hdr_size = msg_hdr_sz(msg);
1588 min_hdr_size = msg_isdata(msg) ?
1589 min_data_hdr_size[msg_type(msg)] : INT_H_SIZE;
1591 if (unlikely((hdr_size < min_hdr_size) ||
1592 (size < hdr_size) ||
1593 (buf->len < size) ||
1594 (size - hdr_size > TIPC_MAX_USER_MSG_SIZE)))
1597 return pskb_may_pull(buf, hdr_size);
1601 * tipc_recv_msg - process TIPC messages arriving from off-node
1602 * @head: pointer to message buffer chain
1603 * @tb_ptr: pointer to bearer message arrived on
1605 * Invoked with no locks held. Bearer pointer must point to a valid bearer
1606 * structure (i.e. cannot be NULL), but bearer can be inactive.
1609 void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
1611 read_lock_bh(&tipc_net_lock);
1613 struct tipc_node *n_ptr;
1615 struct sk_buff *crs;
1616 struct sk_buff *buf = head;
1617 struct tipc_msg *msg;
1625 /* Ensure bearer is still enabled */
1627 if (unlikely(!b_ptr->active))
1630 /* Ensure message is well-formed */
1632 if (unlikely(!link_recv_buf_validate(buf)))
1635 /* Ensure message data is a single contiguous unit */
1637 if (unlikely(buf_linearize(buf)))
1640 /* Handle arrival of a non-unicast link message */
1644 if (unlikely(msg_non_seq(msg))) {
1645 if (msg_user(msg) == LINK_CONFIG)
1646 tipc_disc_recv_msg(buf, b_ptr);
1648 tipc_bclink_recv_pkt(buf);
1652 if (unlikely(!msg_short(msg) &&
1653 (msg_destnode(msg) != tipc_own_addr)))
1656 /* Discard non-routeable messages destined for another node */
1658 if (unlikely(!msg_isdata(msg) &&
1659 (msg_destnode(msg) != tipc_own_addr))) {
1660 if ((msg_user(msg) != CONN_MANAGER) &&
1661 (msg_user(msg) != MSG_FRAGMENTER))
1665 /* Locate neighboring node that sent message */
1667 n_ptr = tipc_node_find(msg_prevnode(msg));
1668 if (unlikely(!n_ptr))
1670 tipc_node_lock(n_ptr);
1672 /* Don't talk to neighbor during cleanup after last session */
1674 if (n_ptr->cleanup_required) {
1675 tipc_node_unlock(n_ptr);
1679 /* Locate unicast link endpoint that should handle message */
1681 l_ptr = n_ptr->links[b_ptr->identity];
1682 if (unlikely(!l_ptr)) {
1683 tipc_node_unlock(n_ptr);
1687 /* Validate message sequence number info */
1689 seq_no = msg_seqno(msg);
1690 ackd = msg_ack(msg);
1692 /* Release acked messages */
1694 if (less(n_ptr->bclink.acked, msg_bcast_ack(msg))) {
1695 if (tipc_node_is_up(n_ptr) && n_ptr->bclink.supported)
1696 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
1699 crs = l_ptr->first_out;
1700 while ((crs != l_ptr->next_out) &&
1701 less_eq(msg_seqno(buf_msg(crs)), ackd)) {
1702 struct sk_buff *next = crs->next;
1709 l_ptr->first_out = crs;
1710 l_ptr->out_queue_size -= released;
1713 /* Try sending any messages link endpoint has pending */
1715 if (unlikely(l_ptr->next_out))
1716 tipc_link_push_queue(l_ptr);
1717 if (unlikely(!list_empty(&l_ptr->waiting_ports)))
1718 tipc_link_wakeup_ports(l_ptr, 0);
1719 if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
1720 l_ptr->stats.sent_acks++;
1721 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1724 /* Now (finally!) process the incoming message */
1727 if (likely(link_working_working(l_ptr))) {
1728 if (likely(seq_no == mod(l_ptr->next_in_no))) {
1729 l_ptr->next_in_no++;
1730 if (unlikely(l_ptr->oldest_deferred_in))
1731 head = link_insert_deferred_queue(l_ptr,
1733 if (likely(msg_is_dest(msg, tipc_own_addr))) {
1735 if (likely(msg_isdata(msg))) {
1736 tipc_node_unlock(n_ptr);
1737 tipc_port_recv_msg(buf);
1740 switch (msg_user(msg)) {
1742 l_ptr->stats.recv_bundles++;
1743 l_ptr->stats.recv_bundled +=
1745 tipc_node_unlock(n_ptr);
1746 tipc_link_recv_bundle(buf);
1748 case NAME_DISTRIBUTOR:
1749 tipc_node_unlock(n_ptr);
1750 tipc_named_recv(buf);
1753 tipc_node_unlock(n_ptr);
1754 tipc_port_recv_proto_msg(buf);
1756 case MSG_FRAGMENTER:
1757 l_ptr->stats.recv_fragments++;
1758 if (tipc_link_recv_fragment(&l_ptr->defragm_buf,
1760 l_ptr->stats.recv_fragmented++;
1764 case CHANGEOVER_PROTOCOL:
1765 type = msg_type(msg);
1766 if (link_recv_changeover_msg(&l_ptr, &buf)) {
1768 seq_no = msg_seqno(msg);
1769 if (type == ORIGINAL_MSG)
1771 goto protocol_check;
1780 tipc_node_unlock(n_ptr);
1781 tipc_net_route_msg(buf);
1784 link_handle_out_of_seq_msg(l_ptr, buf);
1785 head = link_insert_deferred_queue(l_ptr, head);
1786 tipc_node_unlock(n_ptr);
1790 if (msg_user(msg) == LINK_PROTOCOL) {
1791 link_recv_proto_msg(l_ptr, buf);
1792 head = link_insert_deferred_queue(l_ptr, head);
1793 tipc_node_unlock(n_ptr);
1796 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1798 if (link_working_working(l_ptr)) {
1799 /* Re-insert in front of queue */
1802 tipc_node_unlock(n_ptr);
1805 tipc_node_unlock(n_ptr);
1809 read_unlock_bh(&tipc_net_lock);
1813 * link_defer_buf(): Sort a received out-of-sequence packet
1814 * into the deferred reception queue.
1815 * Returns the increase of the queue length,i.e. 0 or 1
1818 u32 tipc_link_defer_pkt(struct sk_buff **head,
1819 struct sk_buff **tail,
1820 struct sk_buff *buf)
1822 struct sk_buff *prev = NULL;
1823 struct sk_buff *crs = *head;
1824 u32 seq_no = msg_seqno(buf_msg(buf));
1829 if (*head == NULL) {
1830 *head = *tail = buf;
1835 if (less(msg_seqno(buf_msg(*tail)), seq_no)) {
1836 (*tail)->next = buf;
1841 /* Scan through queue and sort it in */
1843 struct tipc_msg *msg = buf_msg(crs);
1845 if (less(seq_no, msg_seqno(msg))) {
1853 if (seq_no == msg_seqno(msg))
1859 /* Message is a duplicate of an existing message */
1866 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
1869 static void link_handle_out_of_seq_msg(struct link *l_ptr,
1870 struct sk_buff *buf)
1872 u32 seq_no = msg_seqno(buf_msg(buf));
1874 if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
1875 link_recv_proto_msg(l_ptr, buf);
1879 /* Record OOS packet arrival (force mismatch on next timeout) */
1881 l_ptr->checkpoint--;
1884 * Discard packet if a duplicate; otherwise add it to deferred queue
1885 * and notify peer of gap as per protocol specification
1888 if (less(seq_no, mod(l_ptr->next_in_no))) {
1889 l_ptr->stats.duplicates++;
1894 if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in,
1895 &l_ptr->newest_deferred_in, buf)) {
1896 l_ptr->deferred_inqueue_sz++;
1897 l_ptr->stats.deferred_recv++;
1898 if ((l_ptr->deferred_inqueue_sz % 16) == 1)
1899 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1901 l_ptr->stats.duplicates++;
1905 * Send protocol message to the other endpoint.
1907 void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
1908 u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
1910 struct sk_buff *buf = NULL;
1911 struct tipc_msg *msg = l_ptr->pmsg;
1912 u32 msg_size = sizeof(l_ptr->proto_msg);
1915 if (link_blocked(l_ptr))
1917 msg_set_type(msg, msg_typ);
1918 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
1919 msg_set_bcast_ack(msg, mod(l_ptr->owner->bclink.last_in));
1920 msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
1922 if (msg_typ == STATE_MSG) {
1923 u32 next_sent = mod(l_ptr->next_out_no);
1925 if (!tipc_link_is_up(l_ptr))
1927 if (l_ptr->next_out)
1928 next_sent = msg_seqno(buf_msg(l_ptr->next_out));
1929 msg_set_next_sent(msg, next_sent);
1930 if (l_ptr->oldest_deferred_in) {
1931 u32 rec = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
1932 gap = mod(rec - mod(l_ptr->next_in_no));
1934 msg_set_seq_gap(msg, gap);
1936 l_ptr->stats.sent_nacks++;
1937 msg_set_link_tolerance(msg, tolerance);
1938 msg_set_linkprio(msg, priority);
1939 msg_set_max_pkt(msg, ack_mtu);
1940 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1941 msg_set_probe(msg, probe_msg != 0);
1943 u32 mtu = l_ptr->max_pkt;
1945 if ((mtu < l_ptr->max_pkt_target) &&
1946 link_working_working(l_ptr) &&
1947 l_ptr->fsm_msg_cnt) {
1948 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1949 if (l_ptr->max_pkt_probes == 10) {
1950 l_ptr->max_pkt_target = (msg_size - 4);
1951 l_ptr->max_pkt_probes = 0;
1952 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1954 l_ptr->max_pkt_probes++;
1957 l_ptr->stats.sent_probes++;
1959 l_ptr->stats.sent_states++;
1960 } else { /* RESET_MSG or ACTIVATE_MSG */
1961 msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
1962 msg_set_seq_gap(msg, 0);
1963 msg_set_next_sent(msg, 1);
1964 msg_set_probe(msg, 0);
1965 msg_set_link_tolerance(msg, l_ptr->tolerance);
1966 msg_set_linkprio(msg, l_ptr->priority);
1967 msg_set_max_pkt(msg, l_ptr->max_pkt_target);
1970 r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
1971 msg_set_redundant_link(msg, r_flag);
1972 msg_set_linkprio(msg, l_ptr->priority);
1974 /* Ensure sequence number will not fit : */
1976 msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
1980 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
1981 if (!l_ptr->proto_msg_queue) {
1982 l_ptr->proto_msg_queue =
1983 tipc_buf_acquire(sizeof(l_ptr->proto_msg));
1985 buf = l_ptr->proto_msg_queue;
1988 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
1992 /* Message can be sent */
1994 buf = tipc_buf_acquire(msg_size);
1998 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
1999 msg_set_size(buf_msg(buf), msg_size);
2001 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
2002 l_ptr->unacked_window = 0;
2007 /* New congestion */
2008 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
2009 l_ptr->proto_msg_queue = buf;
2010 l_ptr->stats.bearer_congs++;
2014 * Receive protocol message :
2015 * Note that network plane id propagates through the network, and may
2016 * change at any time. The node with lowest address rules
2019 static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
2025 struct tipc_msg *msg = buf_msg(buf);
2027 if (link_blocked(l_ptr))
2030 /* record unnumbered packet arrival (force mismatch on next timeout) */
2032 l_ptr->checkpoint--;
2034 if (l_ptr->b_ptr->net_plane != msg_net_plane(msg))
2035 if (tipc_own_addr > msg_prevnode(msg))
2036 l_ptr->b_ptr->net_plane = msg_net_plane(msg);
2038 l_ptr->owner->permit_changeover = msg_redundant_link(msg);
2040 switch (msg_type(msg)) {
2043 if (!link_working_unknown(l_ptr) &&
2044 (l_ptr->peer_session != INVALID_SESSION)) {
2045 if (msg_session(msg) == l_ptr->peer_session)
2046 break; /* duplicate: ignore */
2050 /* Update link settings according other endpoint's values */
2052 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
2054 msg_tol = msg_link_tolerance(msg);
2055 if (msg_tol > l_ptr->tolerance)
2056 link_set_supervision_props(l_ptr, msg_tol);
2058 if (msg_linkprio(msg) > l_ptr->priority)
2059 l_ptr->priority = msg_linkprio(msg);
2061 max_pkt_info = msg_max_pkt(msg);
2063 if (max_pkt_info < l_ptr->max_pkt_target)
2064 l_ptr->max_pkt_target = max_pkt_info;
2065 if (l_ptr->max_pkt > l_ptr->max_pkt_target)
2066 l_ptr->max_pkt = l_ptr->max_pkt_target;
2068 l_ptr->max_pkt = l_ptr->max_pkt_target;
2070 l_ptr->owner->bclink.supported = (max_pkt_info != 0);
2072 link_state_event(l_ptr, msg_type(msg));
2074 l_ptr->peer_session = msg_session(msg);
2075 l_ptr->peer_bearer_id = msg_bearer_id(msg);
2077 /* Synchronize broadcast sequence numbers */
2078 if (!tipc_node_redundant_links(l_ptr->owner))
2079 l_ptr->owner->bclink.last_in = mod(msg_last_bcast(msg));
2083 msg_tol = msg_link_tolerance(msg);
2085 link_set_supervision_props(l_ptr, msg_tol);
2087 if (msg_linkprio(msg) &&
2088 (msg_linkprio(msg) != l_ptr->priority)) {
2089 warn("Resetting link <%s>, priority change %u->%u\n",
2090 l_ptr->name, l_ptr->priority, msg_linkprio(msg));
2091 l_ptr->priority = msg_linkprio(msg);
2092 tipc_link_reset(l_ptr); /* Enforce change to take effect */
2095 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
2096 l_ptr->stats.recv_states++;
2097 if (link_reset_unknown(l_ptr))
2100 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
2101 rec_gap = mod(msg_next_sent(msg) -
2102 mod(l_ptr->next_in_no));
2105 max_pkt_ack = msg_max_pkt(msg);
2106 if (max_pkt_ack > l_ptr->max_pkt) {
2107 l_ptr->max_pkt = max_pkt_ack;
2108 l_ptr->max_pkt_probes = 0;
2112 if (msg_probe(msg)) {
2113 l_ptr->stats.recv_probes++;
2114 if (msg_size(msg) > sizeof(l_ptr->proto_msg))
2115 max_pkt_ack = msg_size(msg);
2118 /* Protocol message before retransmits, reduce loss risk */
2120 tipc_bclink_check_gap(l_ptr->owner, msg_last_bcast(msg));
2122 if (rec_gap || (msg_probe(msg))) {
2123 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
2124 0, rec_gap, 0, 0, max_pkt_ack);
2126 if (msg_seq_gap(msg)) {
2127 l_ptr->stats.recv_nacks++;
2128 tipc_link_retransmit(l_ptr, l_ptr->first_out,
2139 * tipc_link_tunnel(): Send one message via a link belonging to
2140 * another bearer. Owner node is locked.
2142 static void tipc_link_tunnel(struct link *l_ptr,
2143 struct tipc_msg *tunnel_hdr,
2144 struct tipc_msg *msg,
2147 struct link *tunnel;
2148 struct sk_buff *buf;
2149 u32 length = msg_size(msg);
2151 tunnel = l_ptr->owner->active_links[selector & 1];
2152 if (!tipc_link_is_up(tunnel)) {
2153 warn("Link changeover error, "
2154 "tunnel link no longer available\n");
2157 msg_set_size(tunnel_hdr, length + INT_H_SIZE);
2158 buf = tipc_buf_acquire(length + INT_H_SIZE);
2160 warn("Link changeover error, "
2161 "unable to send tunnel msg\n");
2164 skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE);
2165 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length);
2166 tipc_link_send_buf(tunnel, buf);
2172 * changeover(): Send whole message queue via the remaining link
2173 * Owner node is locked.
2176 void tipc_link_changeover(struct link *l_ptr)
2178 u32 msgcount = l_ptr->out_queue_size;
2179 struct sk_buff *crs = l_ptr->first_out;
2180 struct link *tunnel = l_ptr->owner->active_links[0];
2181 struct tipc_msg tunnel_hdr;
2187 if (!l_ptr->owner->permit_changeover) {
2188 warn("Link changeover error, "
2189 "peer did not permit changeover\n");
2193 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2194 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
2195 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2196 msg_set_msgcnt(&tunnel_hdr, msgcount);
2198 if (!l_ptr->first_out) {
2199 struct sk_buff *buf;
2201 buf = tipc_buf_acquire(INT_H_SIZE);
2203 skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE);
2204 msg_set_size(&tunnel_hdr, INT_H_SIZE);
2205 tipc_link_send_buf(tunnel, buf);
2207 warn("Link changeover error, "
2208 "unable to send changeover msg\n");
2213 split_bundles = (l_ptr->owner->active_links[0] !=
2214 l_ptr->owner->active_links[1]);
2217 struct tipc_msg *msg = buf_msg(crs);
2219 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
2220 struct tipc_msg *m = msg_get_wrapped(msg);
2221 unchar *pos = (unchar *)m;
2223 msgcount = msg_msgcnt(msg);
2224 while (msgcount--) {
2225 msg_set_seqno(m, msg_seqno(msg));
2226 tipc_link_tunnel(l_ptr, &tunnel_hdr, m,
2227 msg_link_selector(m));
2228 pos += align(msg_size(m));
2229 m = (struct tipc_msg *)pos;
2232 tipc_link_tunnel(l_ptr, &tunnel_hdr, msg,
2233 msg_link_selector(msg));
2239 void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel)
2241 struct sk_buff *iter;
2242 struct tipc_msg tunnel_hdr;
2244 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2245 DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
2246 msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size);
2247 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2248 iter = l_ptr->first_out;
2250 struct sk_buff *outbuf;
2251 struct tipc_msg *msg = buf_msg(iter);
2252 u32 length = msg_size(msg);
2254 if (msg_user(msg) == MSG_BUNDLER)
2255 msg_set_type(msg, CLOSED_MSG);
2256 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */
2257 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
2258 msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
2259 outbuf = tipc_buf_acquire(length + INT_H_SIZE);
2260 if (outbuf == NULL) {
2261 warn("Link changeover error, "
2262 "unable to send duplicate msg\n");
2265 skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE);
2266 skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data,
2268 tipc_link_send_buf(tunnel, outbuf);
2269 if (!tipc_link_is_up(l_ptr))
2278 * buf_extract - extracts embedded TIPC message from another message
2279 * @skb: encapsulating message buffer
2280 * @from_pos: offset to extract from
2282 * Returns a new message buffer containing an embedded message. The
2283 * encapsulating message itself is left unchanged.
2286 static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
2288 struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
2289 u32 size = msg_size(msg);
2292 eb = tipc_buf_acquire(size);
2294 skb_copy_to_linear_data(eb, msg, size);
2299 * link_recv_changeover_msg(): Receive tunneled packet sent
2300 * via other link. Node is locked. Return extracted buffer.
2303 static int link_recv_changeover_msg(struct link **l_ptr,
2304 struct sk_buff **buf)
2306 struct sk_buff *tunnel_buf = *buf;
2307 struct link *dest_link;
2308 struct tipc_msg *msg;
2309 struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf);
2310 u32 msg_typ = msg_type(tunnel_msg);
2311 u32 msg_count = msg_msgcnt(tunnel_msg);
2313 dest_link = (*l_ptr)->owner->links[msg_bearer_id(tunnel_msg)];
2316 if (dest_link == *l_ptr) {
2317 err("Unexpected changeover message on link <%s>\n",
2322 msg = msg_get_wrapped(tunnel_msg);
2324 if (msg_typ == DUPLICATE_MSG) {
2325 if (less(msg_seqno(msg), mod(dest_link->next_in_no)))
2327 *buf = buf_extract(tunnel_buf, INT_H_SIZE);
2329 warn("Link changeover error, duplicate msg dropped\n");
2332 buf_discard(tunnel_buf);
2336 /* First original message ?: */
2338 if (tipc_link_is_up(dest_link)) {
2339 info("Resetting link <%s>, changeover initiated by peer\n",
2341 tipc_link_reset(dest_link);
2342 dest_link->exp_msg_count = msg_count;
2345 } else if (dest_link->exp_msg_count == START_CHANGEOVER) {
2346 dest_link->exp_msg_count = msg_count;
2351 /* Receive original message */
2353 if (dest_link->exp_msg_count == 0) {
2354 warn("Link switchover error, "
2355 "got too many tunnelled messages\n");
2358 dest_link->exp_msg_count--;
2359 if (less(msg_seqno(msg), dest_link->reset_checkpoint)) {
2362 *buf = buf_extract(tunnel_buf, INT_H_SIZE);
2364 buf_discard(tunnel_buf);
2367 warn("Link changeover error, original msg dropped\n");
2372 buf_discard(tunnel_buf);
2377 * Bundler functionality:
2379 void tipc_link_recv_bundle(struct sk_buff *buf)
2381 u32 msgcount = msg_msgcnt(buf_msg(buf));
2382 u32 pos = INT_H_SIZE;
2383 struct sk_buff *obuf;
2385 while (msgcount--) {
2386 obuf = buf_extract(buf, pos);
2388 warn("Link unable to unbundle message(s)\n");
2391 pos += align(msg_size(buf_msg(obuf)));
2392 tipc_net_route_msg(obuf);
2398 * Fragmentation/defragmentation:
2403 * link_send_long_buf: Entry for buffers needing fragmentation.
2404 * The buffer is complete, inclusive total message length.
2405 * Returns user data length.
2407 static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
2409 struct sk_buff *buf_chain = NULL;
2410 struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain;
2411 struct tipc_msg *inmsg = buf_msg(buf);
2412 struct tipc_msg fragm_hdr;
2413 u32 insize = msg_size(inmsg);
2414 u32 dsz = msg_data_sz(inmsg);
2415 unchar *crs = buf->data;
2417 u32 pack_sz = l_ptr->max_pkt;
2418 u32 fragm_sz = pack_sz - INT_H_SIZE;
2422 if (msg_short(inmsg))
2423 destaddr = l_ptr->addr;
2425 destaddr = msg_destnode(inmsg);
2427 /* Prepare reusable fragment header: */
2429 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
2430 INT_H_SIZE, destaddr);
2432 /* Chop up message: */
2435 struct sk_buff *fragm;
2437 if (rest <= fragm_sz) {
2439 msg_set_type(&fragm_hdr, LAST_FRAGMENT);
2441 fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
2442 if (fragm == NULL) {
2446 buf_chain = buf_chain->next;
2451 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
2453 msg_set_fragm_no(&fragm_hdr, fragm_no);
2454 skb_copy_to_linear_data(fragm, &fragm_hdr, INT_H_SIZE);
2455 skb_copy_to_linear_data_offset(fragm, INT_H_SIZE, crs,
2457 buf_chain_tail->next = fragm;
2458 buf_chain_tail = fragm;
2462 msg_set_type(&fragm_hdr, FRAGMENT);
2466 /* Append chain of fragments to send queue & send them */
2468 l_ptr->long_msg_seq_no++;
2469 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
2470 l_ptr->stats.sent_fragments += fragm_no;
2471 l_ptr->stats.sent_fragmented++;
2472 tipc_link_push_queue(l_ptr);
2478 * A pending message being re-assembled must store certain values
2479 * to handle subsequent fragments correctly. The following functions
2480 * help storing these values in unused, available fields in the
2481 * pending message. This makes dynamic memory allocation unnecessary.
2484 static void set_long_msg_seqno(struct sk_buff *buf, u32 seqno)
2486 msg_set_seqno(buf_msg(buf), seqno);
2489 static u32 get_fragm_size(struct sk_buff *buf)
2491 return msg_ack(buf_msg(buf));
2494 static void set_fragm_size(struct sk_buff *buf, u32 sz)
2496 msg_set_ack(buf_msg(buf), sz);
2499 static u32 get_expected_frags(struct sk_buff *buf)
2501 return msg_bcast_ack(buf_msg(buf));
2504 static void set_expected_frags(struct sk_buff *buf, u32 exp)
2506 msg_set_bcast_ack(buf_msg(buf), exp);
2509 static u32 get_timer_cnt(struct sk_buff *buf)
2511 return msg_reroute_cnt(buf_msg(buf));
2514 static void incr_timer_cnt(struct sk_buff *buf)
2516 msg_incr_reroute_cnt(buf_msg(buf));
2520 * tipc_link_recv_fragment(): Called with node lock on. Returns
2521 * the reassembled buffer if message is complete.
2523 int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2524 struct tipc_msg **m)
2526 struct sk_buff *prev = NULL;
2527 struct sk_buff *fbuf = *fb;
2528 struct tipc_msg *fragm = buf_msg(fbuf);
2529 struct sk_buff *pbuf = *pending;
2530 u32 long_msg_seq_no = msg_long_msgno(fragm);
2534 /* Is there an incomplete message waiting for this fragment? */
2536 while (pbuf && ((msg_seqno(buf_msg(pbuf)) != long_msg_seq_no) ||
2537 (msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) {
2542 if (!pbuf && (msg_type(fragm) == FIRST_FRAGMENT)) {
2543 struct tipc_msg *imsg = (struct tipc_msg *)msg_data(fragm);
2544 u32 msg_sz = msg_size(imsg);
2545 u32 fragm_sz = msg_data_sz(fragm);
2546 u32 exp_fragm_cnt = msg_sz/fragm_sz + !!(msg_sz % fragm_sz);
2547 u32 max = TIPC_MAX_USER_MSG_SIZE + LONG_H_SIZE;
2548 if (msg_type(imsg) == TIPC_MCAST_MSG)
2549 max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE;
2550 if (msg_size(imsg) > max) {
2554 pbuf = tipc_buf_acquire(msg_size(imsg));
2556 pbuf->next = *pending;
2558 skb_copy_to_linear_data(pbuf, imsg,
2559 msg_data_sz(fragm));
2560 /* Prepare buffer for subsequent fragments. */
2562 set_long_msg_seqno(pbuf, long_msg_seq_no);
2563 set_fragm_size(pbuf, fragm_sz);
2564 set_expected_frags(pbuf, exp_fragm_cnt - 1);
2566 warn("Link unable to reassemble fragmented message\n");
2570 } else if (pbuf && (msg_type(fragm) != FIRST_FRAGMENT)) {
2571 u32 dsz = msg_data_sz(fragm);
2572 u32 fsz = get_fragm_size(pbuf);
2573 u32 crs = ((msg_fragm_no(fragm) - 1) * fsz);
2574 u32 exp_frags = get_expected_frags(pbuf) - 1;
2575 skb_copy_to_linear_data_offset(pbuf, crs,
2576 msg_data(fragm), dsz);
2579 /* Is message complete? */
2581 if (exp_frags == 0) {
2583 prev->next = pbuf->next;
2585 *pending = pbuf->next;
2586 msg_reset_reroute_cnt(buf_msg(pbuf));
2591 set_expected_frags(pbuf, exp_frags);
2599 * link_check_defragm_bufs - flush stale incoming message fragments
2600 * @l_ptr: pointer to link
2603 static void link_check_defragm_bufs(struct link *l_ptr)
2605 struct sk_buff *prev = NULL;
2606 struct sk_buff *next = NULL;
2607 struct sk_buff *buf = l_ptr->defragm_buf;
2611 if (!link_working_working(l_ptr))
2614 u32 cnt = get_timer_cnt(buf);
2618 incr_timer_cnt(buf);
2622 prev->next = buf->next;
2624 l_ptr->defragm_buf = buf->next;
2633 static void link_set_supervision_props(struct link *l_ptr, u32 tolerance)
2635 if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
2638 l_ptr->tolerance = tolerance;
2639 l_ptr->continuity_interval =
2640 ((tolerance / 4) > 500) ? 500 : tolerance / 4;
2641 l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
2645 void tipc_link_set_queue_limits(struct link *l_ptr, u32 window)
2647 /* Data messages from this node, inclusive FIRST_FRAGM */
2648 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window;
2649 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4;
2650 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5;
2651 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6;
2652 /* Transiting data messages,inclusive FIRST_FRAGM */
2653 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300;
2654 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600;
2655 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900;
2656 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200;
2657 l_ptr->queue_limit[CONN_MANAGER] = 1200;
2658 l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
2659 l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
2660 /* FRAGMENT and LAST_FRAGMENT packets */
2661 l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
2665 * link_find_link - locate link by name
2666 * @name - ptr to link name string
2667 * @node - ptr to area to be filled with ptr to associated node
2669 * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted;
2670 * this also prevents link deletion.
2672 * Returns pointer to link (or 0 if invalid link name).
2675 static struct link *link_find_link(const char *name, struct tipc_node **node)
2677 struct link_name link_name_parts;
2678 struct tipc_bearer *b_ptr;
2681 if (!link_name_validate(name, &link_name_parts))
2684 b_ptr = tipc_bearer_find_interface(link_name_parts.if_local);
2688 *node = tipc_node_find(link_name_parts.addr_peer);
2692 l_ptr = (*node)->links[b_ptr->identity];
2693 if (!l_ptr || strcmp(l_ptr->name, name))
2699 struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space,
2702 struct tipc_link_config *args;
2705 struct tipc_node *node;
2708 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
2709 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2711 args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
2712 new_value = ntohl(args->value);
2714 if (!strcmp(args->name, tipc_bclink_name)) {
2715 if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
2716 (tipc_bclink_set_queue_limits(new_value) == 0))
2717 return tipc_cfg_reply_none();
2718 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
2719 " (cannot change setting on broadcast link)");
2722 read_lock_bh(&tipc_net_lock);
2723 l_ptr = link_find_link(args->name, &node);
2725 read_unlock_bh(&tipc_net_lock);
2726 return tipc_cfg_reply_error_string("link not found");
2729 tipc_node_lock(node);
2732 case TIPC_CMD_SET_LINK_TOL:
2733 if ((new_value >= TIPC_MIN_LINK_TOL) &&
2734 (new_value <= TIPC_MAX_LINK_TOL)) {
2735 link_set_supervision_props(l_ptr, new_value);
2736 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
2737 0, 0, new_value, 0, 0);
2741 case TIPC_CMD_SET_LINK_PRI:
2742 if ((new_value >= TIPC_MIN_LINK_PRI) &&
2743 (new_value <= TIPC_MAX_LINK_PRI)) {
2744 l_ptr->priority = new_value;
2745 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
2746 0, 0, 0, new_value, 0);
2750 case TIPC_CMD_SET_LINK_WINDOW:
2751 if ((new_value >= TIPC_MIN_LINK_WIN) &&
2752 (new_value <= TIPC_MAX_LINK_WIN)) {
2753 tipc_link_set_queue_limits(l_ptr, new_value);
2758 tipc_node_unlock(node);
2760 read_unlock_bh(&tipc_net_lock);
2762 return tipc_cfg_reply_error_string("cannot change link setting");
2764 return tipc_cfg_reply_none();
2768 * link_reset_statistics - reset link statistics
2769 * @l_ptr: pointer to link
2772 static void link_reset_statistics(struct link *l_ptr)
2774 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
2775 l_ptr->stats.sent_info = l_ptr->next_out_no;
2776 l_ptr->stats.recv_info = l_ptr->next_in_no;
2779 struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
2783 struct tipc_node *node;
2785 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2786 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2788 link_name = (char *)TLV_DATA(req_tlv_area);
2789 if (!strcmp(link_name, tipc_bclink_name)) {
2790 if (tipc_bclink_reset_stats())
2791 return tipc_cfg_reply_error_string("link not found");
2792 return tipc_cfg_reply_none();
2795 read_lock_bh(&tipc_net_lock);
2796 l_ptr = link_find_link(link_name, &node);
2798 read_unlock_bh(&tipc_net_lock);
2799 return tipc_cfg_reply_error_string("link not found");
2802 tipc_node_lock(node);
2803 link_reset_statistics(l_ptr);
2804 tipc_node_unlock(node);
2805 read_unlock_bh(&tipc_net_lock);
2806 return tipc_cfg_reply_none();
2810 * percent - convert count to a percentage of total (rounding up or down)
2813 static u32 percent(u32 count, u32 total)
2815 return (count * 100 + (total / 2)) / total;
2819 * tipc_link_stats - print link statistics
2821 * @buf: print buffer area
2822 * @buf_size: size of print buffer area
2824 * Returns length of print buffer data string (or 0 if error)
2827 static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
2829 struct print_buf pb;
2831 struct tipc_node *node;
2833 u32 profile_total = 0;
2835 if (!strcmp(name, tipc_bclink_name))
2836 return tipc_bclink_stats(buf, buf_size);
2838 tipc_printbuf_init(&pb, buf, buf_size);
2840 read_lock_bh(&tipc_net_lock);
2841 l_ptr = link_find_link(name, &node);
2843 read_unlock_bh(&tipc_net_lock);
2846 tipc_node_lock(node);
2848 if (tipc_link_is_active(l_ptr))
2850 else if (tipc_link_is_up(l_ptr))
2854 tipc_printf(&pb, "Link <%s>\n"
2855 " %s MTU:%u Priority:%u Tolerance:%u ms"
2856 " Window:%u packets\n",
2857 l_ptr->name, status, l_ptr->max_pkt,
2858 l_ptr->priority, l_ptr->tolerance, l_ptr->queue_limit[0]);
2859 tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
2860 l_ptr->next_in_no - l_ptr->stats.recv_info,
2861 l_ptr->stats.recv_fragments,
2862 l_ptr->stats.recv_fragmented,
2863 l_ptr->stats.recv_bundles,
2864 l_ptr->stats.recv_bundled);
2865 tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
2866 l_ptr->next_out_no - l_ptr->stats.sent_info,
2867 l_ptr->stats.sent_fragments,
2868 l_ptr->stats.sent_fragmented,
2869 l_ptr->stats.sent_bundles,
2870 l_ptr->stats.sent_bundled);
2871 profile_total = l_ptr->stats.msg_length_counts;
2874 tipc_printf(&pb, " TX profile sample:%u packets average:%u octets\n"
2875 " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
2876 "-16354:%u%% -32768:%u%% -66000:%u%%\n",
2877 l_ptr->stats.msg_length_counts,
2878 l_ptr->stats.msg_lengths_total / profile_total,
2879 percent(l_ptr->stats.msg_length_profile[0], profile_total),
2880 percent(l_ptr->stats.msg_length_profile[1], profile_total),
2881 percent(l_ptr->stats.msg_length_profile[2], profile_total),
2882 percent(l_ptr->stats.msg_length_profile[3], profile_total),
2883 percent(l_ptr->stats.msg_length_profile[4], profile_total),
2884 percent(l_ptr->stats.msg_length_profile[5], profile_total),
2885 percent(l_ptr->stats.msg_length_profile[6], profile_total));
2886 tipc_printf(&pb, " RX states:%u probes:%u naks:%u defs:%u dups:%u\n",
2887 l_ptr->stats.recv_states,
2888 l_ptr->stats.recv_probes,
2889 l_ptr->stats.recv_nacks,
2890 l_ptr->stats.deferred_recv,
2891 l_ptr->stats.duplicates);
2892 tipc_printf(&pb, " TX states:%u probes:%u naks:%u acks:%u dups:%u\n",
2893 l_ptr->stats.sent_states,
2894 l_ptr->stats.sent_probes,
2895 l_ptr->stats.sent_nacks,
2896 l_ptr->stats.sent_acks,
2897 l_ptr->stats.retransmitted);
2898 tipc_printf(&pb, " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n",
2899 l_ptr->stats.bearer_congs,
2900 l_ptr->stats.link_congs,
2901 l_ptr->stats.max_queue_sz,
2902 l_ptr->stats.queue_sz_counts
2903 ? (l_ptr->stats.accu_queue_sz / l_ptr->stats.queue_sz_counts)
2906 tipc_node_unlock(node);
2907 read_unlock_bh(&tipc_net_lock);
2908 return tipc_printbuf_validate(&pb);
2911 #define MAX_LINK_STATS_INFO 2000
2913 struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
2915 struct sk_buff *buf;
2916 struct tlv_desc *rep_tlv;
2919 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2920 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2922 buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_LINK_STATS_INFO));
2926 rep_tlv = (struct tlv_desc *)buf->data;
2928 str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area),
2929 (char *)TLV_DATA(rep_tlv), MAX_LINK_STATS_INFO);
2932 return tipc_cfg_reply_error_string("link not found");
2935 skb_put(buf, TLV_SPACE(str_len));
2936 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
2942 * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination
2943 * @dest: network address of destination node
2944 * @selector: used to select from set of active links
2946 * If no active link can be found, uses default maximum packet size.
2949 u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
2951 struct tipc_node *n_ptr;
2953 u32 res = MAX_PKT_DEFAULT;
2955 if (dest == tipc_own_addr)
2956 return MAX_MSG_SIZE;
2958 read_lock_bh(&tipc_net_lock);
2959 n_ptr = tipc_node_find(dest);
2961 tipc_node_lock(n_ptr);
2962 l_ptr = n_ptr->active_links[selector & 1];
2964 res = l_ptr->max_pkt;
2965 tipc_node_unlock(n_ptr);
2967 read_unlock_bh(&tipc_net_lock);
2971 static void link_print(struct link *l_ptr, const char *str)
2973 char print_area[256];
2974 struct print_buf pb;
2975 struct print_buf *buf = &pb;
2977 tipc_printbuf_init(buf, print_area, sizeof(print_area));
2979 tipc_printf(buf, str);
2980 tipc_printf(buf, "Link %x<%s>:",
2981 l_ptr->addr, l_ptr->b_ptr->name);
2983 #ifdef CONFIG_TIPC_DEBUG
2984 if (link_reset_reset(l_ptr) || link_reset_unknown(l_ptr))
2987 tipc_printf(buf, ": NXO(%u):", mod(l_ptr->next_out_no));
2988 tipc_printf(buf, "NXI(%u):", mod(l_ptr->next_in_no));
2989 tipc_printf(buf, "SQUE");
2990 if (l_ptr->first_out) {
2991 tipc_printf(buf, "[%u..", msg_seqno(buf_msg(l_ptr->first_out)));
2992 if (l_ptr->next_out)
2993 tipc_printf(buf, "%u..",
2994 msg_seqno(buf_msg(l_ptr->next_out)));
2995 tipc_printf(buf, "%u]", msg_seqno(buf_msg(l_ptr->last_out)));
2996 if ((mod(msg_seqno(buf_msg(l_ptr->last_out)) -
2997 msg_seqno(buf_msg(l_ptr->first_out)))
2998 != (l_ptr->out_queue_size - 1)) ||
2999 (l_ptr->last_out->next != NULL)) {
3000 tipc_printf(buf, "\nSend queue inconsistency\n");
3001 tipc_printf(buf, "first_out= %p ", l_ptr->first_out);
3002 tipc_printf(buf, "next_out= %p ", l_ptr->next_out);
3003 tipc_printf(buf, "last_out= %p ", l_ptr->last_out);
3006 tipc_printf(buf, "[]");
3007 tipc_printf(buf, "SQSIZ(%u)", l_ptr->out_queue_size);
3008 if (l_ptr->oldest_deferred_in) {
3009 u32 o = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
3010 u32 n = msg_seqno(buf_msg(l_ptr->newest_deferred_in));
3011 tipc_printf(buf, ":RQUE[%u..%u]", o, n);
3012 if (l_ptr->deferred_inqueue_sz != mod((n + 1) - o)) {
3013 tipc_printf(buf, ":RQSIZ(%u)",
3014 l_ptr->deferred_inqueue_sz);
3020 if (link_working_unknown(l_ptr))
3021 tipc_printf(buf, ":WU");
3022 else if (link_reset_reset(l_ptr))
3023 tipc_printf(buf, ":RR");
3024 else if (link_reset_unknown(l_ptr))
3025 tipc_printf(buf, ":RU");
3026 else if (link_working_working(l_ptr))
3027 tipc_printf(buf, ":WW");
3028 tipc_printf(buf, "\n");
3030 tipc_printbuf_validate(buf);
3031 info("%s", print_area);