2 * net/tipc/link.c: TIPC link code
4 * Copyright (c) 1996-2007, 2012-2014, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
40 #include "name_distr.h"
44 #include <linux/pkt_sched.h>
47 * Error message prefixes
49 static const char *link_co_err = "Link changeover error, ";
50 static const char *link_rst_msg = "Resetting link ";
51 static const char *link_unk_evt = "Unknown link event ";
54 * Out-of-range value for link session numbers
56 #define INVALID_SESSION 0x10000
61 #define STARTING_EVT 856384768 /* link processing trigger */
62 #define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */
63 #define TIMEOUT_EVT 560817u /* link timer expired */
66 * The following two 'message types' is really just implementation
67 * data conveniently stored in the message header.
68 * They must not be considered part of the protocol
74 * State value stored in 'exp_msg_count'
76 #define START_CHANGEOVER 100000u
78 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
80 static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf);
81 static int tipc_link_tunnel_rcv(struct tipc_link **l_ptr,
82 struct sk_buff **buf);
83 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance);
84 static int link_send_sections_long(struct tipc_port *sender,
85 struct iovec const *msg_sect,
86 unsigned int len, u32 destnode);
87 static void link_state_event(struct tipc_link *l_ptr, u32 event);
88 static void link_reset_statistics(struct tipc_link *l_ptr);
89 static void link_print(struct tipc_link *l_ptr, const char *str);
90 static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf);
91 static void tipc_link_send_sync(struct tipc_link *l);
92 static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf);
95 * Simple link routines
97 static unsigned int align(unsigned int i)
102 static void link_init_max_pkt(struct tipc_link *l_ptr)
106 max_pkt = (l_ptr->b_ptr->mtu & ~3);
107 if (max_pkt > MAX_MSG_SIZE)
108 max_pkt = MAX_MSG_SIZE;
110 l_ptr->max_pkt_target = max_pkt;
111 if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
112 l_ptr->max_pkt = l_ptr->max_pkt_target;
114 l_ptr->max_pkt = MAX_PKT_DEFAULT;
116 l_ptr->max_pkt_probes = 0;
119 static u32 link_next_sent(struct tipc_link *l_ptr)
122 return buf_seqno(l_ptr->next_out);
123 return mod(l_ptr->next_out_no);
126 static u32 link_last_sent(struct tipc_link *l_ptr)
128 return mod(link_next_sent(l_ptr) - 1);
132 * Simple non-static link routines (i.e. referenced outside this file)
134 int tipc_link_is_up(struct tipc_link *l_ptr)
138 return link_working_working(l_ptr) || link_working_unknown(l_ptr);
141 int tipc_link_is_active(struct tipc_link *l_ptr)
143 return (l_ptr->owner->active_links[0] == l_ptr) ||
144 (l_ptr->owner->active_links[1] == l_ptr);
148 * link_timeout - handle expiration of link timer
149 * @l_ptr: pointer to link
151 * This routine must not grab "tipc_net_lock" to avoid a potential deadlock conflict
152 * with tipc_link_delete(). (There is no risk that the node will be deleted by
153 * another thread because tipc_link_delete() always cancels the link timer before
154 * tipc_node_delete() is called.)
156 static void link_timeout(struct tipc_link *l_ptr)
158 tipc_node_lock(l_ptr->owner);
160 /* update counters used in statistical profiling of send traffic */
161 l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
162 l_ptr->stats.queue_sz_counts++;
164 if (l_ptr->first_out) {
165 struct tipc_msg *msg = buf_msg(l_ptr->first_out);
166 u32 length = msg_size(msg);
168 if ((msg_user(msg) == MSG_FRAGMENTER) &&
169 (msg_type(msg) == FIRST_FRAGMENT)) {
170 length = msg_size(msg_get_wrapped(msg));
173 l_ptr->stats.msg_lengths_total += length;
174 l_ptr->stats.msg_length_counts++;
176 l_ptr->stats.msg_length_profile[0]++;
177 else if (length <= 256)
178 l_ptr->stats.msg_length_profile[1]++;
179 else if (length <= 1024)
180 l_ptr->stats.msg_length_profile[2]++;
181 else if (length <= 4096)
182 l_ptr->stats.msg_length_profile[3]++;
183 else if (length <= 16384)
184 l_ptr->stats.msg_length_profile[4]++;
185 else if (length <= 32768)
186 l_ptr->stats.msg_length_profile[5]++;
188 l_ptr->stats.msg_length_profile[6]++;
192 /* do all other link processing performed on a periodic basis */
194 link_state_event(l_ptr, TIMEOUT_EVT);
197 tipc_link_push_queue(l_ptr);
199 tipc_node_unlock(l_ptr->owner);
202 static void link_set_timer(struct tipc_link *l_ptr, u32 time)
204 k_start_timer(&l_ptr->timer, time);
208 * tipc_link_create - create a new link
209 * @n_ptr: pointer to associated node
210 * @b_ptr: pointer to associated bearer
211 * @media_addr: media address to use when sending messages over link
213 * Returns pointer to link.
215 struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
216 struct tipc_bearer *b_ptr,
217 const struct tipc_media_addr *media_addr)
219 struct tipc_link *l_ptr;
220 struct tipc_msg *msg;
222 char addr_string[16];
223 u32 peer = n_ptr->addr;
225 if (n_ptr->link_cnt >= 2) {
226 tipc_addr_string_fill(addr_string, n_ptr->addr);
227 pr_err("Attempt to establish third link to %s\n", addr_string);
231 if (n_ptr->links[b_ptr->identity]) {
232 tipc_addr_string_fill(addr_string, n_ptr->addr);
233 pr_err("Attempt to establish second link on <%s> to %s\n",
234 b_ptr->name, addr_string);
238 l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
240 pr_warn("Link creation failed, no memory\n");
245 if_name = strchr(b_ptr->name, ':') + 1;
246 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
247 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
248 tipc_node(tipc_own_addr),
250 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
251 /* note: peer i/f name is updated by reset/activate message */
252 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
253 l_ptr->owner = n_ptr;
254 l_ptr->checkpoint = 1;
255 l_ptr->peer_session = INVALID_SESSION;
256 l_ptr->b_ptr = b_ptr;
257 link_set_supervision_props(l_ptr, b_ptr->tolerance);
258 l_ptr->state = RESET_UNKNOWN;
260 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
262 tipc_msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr);
263 msg_set_size(msg, sizeof(l_ptr->proto_msg));
264 msg_set_session(msg, (tipc_random & 0xffff));
265 msg_set_bearer_id(msg, b_ptr->identity);
266 strcpy((char *)msg_data(msg), if_name);
268 l_ptr->priority = b_ptr->priority;
269 tipc_link_set_queue_limits(l_ptr, b_ptr->window);
271 link_init_max_pkt(l_ptr);
273 l_ptr->next_out_no = 1;
274 INIT_LIST_HEAD(&l_ptr->waiting_ports);
276 link_reset_statistics(l_ptr);
278 tipc_node_attach_link(n_ptr, l_ptr);
280 k_init_timer(&l_ptr->timer, (Handler)link_timeout,
281 (unsigned long)l_ptr);
282 list_add_tail(&l_ptr->link_list, &b_ptr->links);
284 link_state_event(l_ptr, STARTING_EVT);
290 * tipc_link_delete - delete a link
291 * @l_ptr: pointer to link
293 * Note: 'tipc_net_lock' is write_locked, bearer is locked.
294 * This routine must not grab the node lock until after link timer cancellation
295 * to avoid a potential deadlock situation.
297 void tipc_link_delete(struct tipc_link *l_ptr)
300 pr_err("Attempt to delete non-existent link\n");
304 k_cancel_timer(&l_ptr->timer);
306 tipc_node_lock(l_ptr->owner);
307 tipc_link_reset(l_ptr);
308 tipc_node_detach_link(l_ptr->owner, l_ptr);
309 tipc_link_purge_queues(l_ptr);
310 list_del_init(&l_ptr->link_list);
311 tipc_node_unlock(l_ptr->owner);
312 k_term_timer(&l_ptr->timer);
318 * link_schedule_port - schedule port for deferred sending
319 * @l_ptr: pointer to link
320 * @origport: reference to sending port
321 * @sz: amount of data to be sent
323 * Schedules port for renewed sending of messages after link congestion
326 static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz)
328 struct tipc_port *p_ptr;
330 spin_lock_bh(&tipc_port_list_lock);
331 p_ptr = tipc_port_lock(origport);
335 if (!list_empty(&p_ptr->wait_list))
337 p_ptr->congested = 1;
338 p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt);
339 list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
340 l_ptr->stats.link_congs++;
342 tipc_port_unlock(p_ptr);
344 spin_unlock_bh(&tipc_port_list_lock);
348 void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all)
350 struct tipc_port *p_ptr;
351 struct tipc_port *temp_p_ptr;
352 int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size;
358 if (!spin_trylock_bh(&tipc_port_list_lock))
360 if (link_congested(l_ptr))
362 list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports,
366 list_del_init(&p_ptr->wait_list);
367 spin_lock_bh(p_ptr->lock);
368 p_ptr->congested = 0;
369 p_ptr->wakeup(p_ptr);
370 win -= p_ptr->waiting_pkts;
371 spin_unlock_bh(p_ptr->lock);
375 spin_unlock_bh(&tipc_port_list_lock);
379 * link_release_outqueue - purge link's outbound message queue
380 * @l_ptr: pointer to link
382 static void link_release_outqueue(struct tipc_link *l_ptr)
384 kfree_skb_list(l_ptr->first_out);
385 l_ptr->first_out = NULL;
386 l_ptr->out_queue_size = 0;
390 * tipc_link_reset_fragments - purge link's inbound message fragments queue
391 * @l_ptr: pointer to link
393 void tipc_link_reset_fragments(struct tipc_link *l_ptr)
395 kfree_skb(l_ptr->reasm_head);
396 l_ptr->reasm_head = NULL;
397 l_ptr->reasm_tail = NULL;
401 * tipc_link_purge_queues - purge all pkt queues associated with link
402 * @l_ptr: pointer to link
404 void tipc_link_purge_queues(struct tipc_link *l_ptr)
406 kfree_skb_list(l_ptr->oldest_deferred_in);
407 kfree_skb_list(l_ptr->first_out);
408 tipc_link_reset_fragments(l_ptr);
409 kfree_skb(l_ptr->proto_msg_queue);
410 l_ptr->proto_msg_queue = NULL;
413 void tipc_link_reset(struct tipc_link *l_ptr)
415 u32 prev_state = l_ptr->state;
416 u32 checkpoint = l_ptr->next_in_no;
417 int was_active_link = tipc_link_is_active(l_ptr);
419 msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
421 /* Link is down, accept any session */
422 l_ptr->peer_session = INVALID_SESSION;
424 /* Prepare for max packet size negotiation */
425 link_init_max_pkt(l_ptr);
427 l_ptr->state = RESET_UNKNOWN;
429 if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
432 tipc_node_link_down(l_ptr->owner, l_ptr);
433 tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr);
435 if (was_active_link && tipc_node_active_links(l_ptr->owner)) {
436 l_ptr->reset_checkpoint = checkpoint;
437 l_ptr->exp_msg_count = START_CHANGEOVER;
440 /* Clean up all queues: */
441 link_release_outqueue(l_ptr);
442 kfree_skb(l_ptr->proto_msg_queue);
443 l_ptr->proto_msg_queue = NULL;
444 kfree_skb_list(l_ptr->oldest_deferred_in);
445 if (!list_empty(&l_ptr->waiting_ports))
446 tipc_link_wakeup_ports(l_ptr, 1);
448 l_ptr->retransm_queue_head = 0;
449 l_ptr->retransm_queue_size = 0;
450 l_ptr->last_out = NULL;
451 l_ptr->first_out = NULL;
452 l_ptr->next_out = NULL;
453 l_ptr->unacked_window = 0;
454 l_ptr->checkpoint = 1;
455 l_ptr->next_out_no = 1;
456 l_ptr->deferred_inqueue_sz = 0;
457 l_ptr->oldest_deferred_in = NULL;
458 l_ptr->newest_deferred_in = NULL;
459 l_ptr->fsm_msg_cnt = 0;
460 l_ptr->stale_count = 0;
461 link_reset_statistics(l_ptr);
465 static void link_activate(struct tipc_link *l_ptr)
467 l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
468 tipc_node_link_up(l_ptr->owner, l_ptr);
469 tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
473 * link_state_event - link finite state machine
474 * @l_ptr: pointer to link
475 * @event: state machine event to process
477 static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
479 struct tipc_link *other;
480 u32 cont_intv = l_ptr->continuity_interval;
482 if (!l_ptr->started && (event != STARTING_EVT))
483 return; /* Not yet. */
485 /* Check whether changeover is going on */
486 if (l_ptr->exp_msg_count) {
487 if (event == TIMEOUT_EVT)
488 link_set_timer(l_ptr, cont_intv);
492 switch (l_ptr->state) {
493 case WORKING_WORKING:
495 case TRAFFIC_MSG_EVT:
499 if (l_ptr->next_in_no != l_ptr->checkpoint) {
500 l_ptr->checkpoint = l_ptr->next_in_no;
501 if (tipc_bclink_acks_missing(l_ptr->owner)) {
502 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
504 l_ptr->fsm_msg_cnt++;
505 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
506 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
508 l_ptr->fsm_msg_cnt++;
510 link_set_timer(l_ptr, cont_intv);
513 l_ptr->state = WORKING_UNKNOWN;
514 l_ptr->fsm_msg_cnt = 0;
515 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
516 l_ptr->fsm_msg_cnt++;
517 link_set_timer(l_ptr, cont_intv / 4);
520 pr_info("%s<%s>, requested by peer\n", link_rst_msg,
522 tipc_link_reset(l_ptr);
523 l_ptr->state = RESET_RESET;
524 l_ptr->fsm_msg_cnt = 0;
525 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
526 l_ptr->fsm_msg_cnt++;
527 link_set_timer(l_ptr, cont_intv);
530 pr_err("%s%u in WW state\n", link_unk_evt, event);
533 case WORKING_UNKNOWN:
535 case TRAFFIC_MSG_EVT:
537 l_ptr->state = WORKING_WORKING;
538 l_ptr->fsm_msg_cnt = 0;
539 link_set_timer(l_ptr, cont_intv);
542 pr_info("%s<%s>, requested by peer while probing\n",
543 link_rst_msg, l_ptr->name);
544 tipc_link_reset(l_ptr);
545 l_ptr->state = RESET_RESET;
546 l_ptr->fsm_msg_cnt = 0;
547 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
548 l_ptr->fsm_msg_cnt++;
549 link_set_timer(l_ptr, cont_intv);
552 if (l_ptr->next_in_no != l_ptr->checkpoint) {
553 l_ptr->state = WORKING_WORKING;
554 l_ptr->fsm_msg_cnt = 0;
555 l_ptr->checkpoint = l_ptr->next_in_no;
556 if (tipc_bclink_acks_missing(l_ptr->owner)) {
557 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
559 l_ptr->fsm_msg_cnt++;
561 link_set_timer(l_ptr, cont_intv);
562 } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
563 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
565 l_ptr->fsm_msg_cnt++;
566 link_set_timer(l_ptr, cont_intv / 4);
567 } else { /* Link has failed */
568 pr_warn("%s<%s>, peer not responding\n",
569 link_rst_msg, l_ptr->name);
570 tipc_link_reset(l_ptr);
571 l_ptr->state = RESET_UNKNOWN;
572 l_ptr->fsm_msg_cnt = 0;
573 tipc_link_send_proto_msg(l_ptr, RESET_MSG,
575 l_ptr->fsm_msg_cnt++;
576 link_set_timer(l_ptr, cont_intv);
580 pr_err("%s%u in WU state\n", link_unk_evt, event);
585 case TRAFFIC_MSG_EVT:
588 other = l_ptr->owner->active_links[0];
589 if (other && link_working_unknown(other))
591 l_ptr->state = WORKING_WORKING;
592 l_ptr->fsm_msg_cnt = 0;
593 link_activate(l_ptr);
594 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
595 l_ptr->fsm_msg_cnt++;
596 if (l_ptr->owner->working_links == 1)
597 tipc_link_send_sync(l_ptr);
598 link_set_timer(l_ptr, cont_intv);
601 l_ptr->state = RESET_RESET;
602 l_ptr->fsm_msg_cnt = 0;
603 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0);
604 l_ptr->fsm_msg_cnt++;
605 link_set_timer(l_ptr, cont_intv);
611 tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
612 l_ptr->fsm_msg_cnt++;
613 link_set_timer(l_ptr, cont_intv);
616 pr_err("%s%u in RU state\n", link_unk_evt, event);
621 case TRAFFIC_MSG_EVT:
623 other = l_ptr->owner->active_links[0];
624 if (other && link_working_unknown(other))
626 l_ptr->state = WORKING_WORKING;
627 l_ptr->fsm_msg_cnt = 0;
628 link_activate(l_ptr);
629 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
630 l_ptr->fsm_msg_cnt++;
631 if (l_ptr->owner->working_links == 1)
632 tipc_link_send_sync(l_ptr);
633 link_set_timer(l_ptr, cont_intv);
638 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
639 l_ptr->fsm_msg_cnt++;
640 link_set_timer(l_ptr, cont_intv);
643 pr_err("%s%u in RR state\n", link_unk_evt, event);
647 pr_err("Unknown link state %u/%u\n", l_ptr->state, event);
652 * link_bundle_buf(): Append contents of a buffer to
653 * the tail of an existing one.
655 static int link_bundle_buf(struct tipc_link *l_ptr, struct sk_buff *bundler,
658 struct tipc_msg *bundler_msg = buf_msg(bundler);
659 struct tipc_msg *msg = buf_msg(buf);
660 u32 size = msg_size(msg);
661 u32 bundle_size = msg_size(bundler_msg);
662 u32 to_pos = align(bundle_size);
663 u32 pad = to_pos - bundle_size;
665 if (msg_user(bundler_msg) != MSG_BUNDLER)
667 if (msg_type(bundler_msg) != OPEN_MSG)
669 if (skb_tailroom(bundler) < (pad + size))
671 if (l_ptr->max_pkt < (to_pos + size))
674 skb_put(bundler, pad + size);
675 skb_copy_to_linear_data_offset(bundler, to_pos, buf->data, size);
676 msg_set_size(bundler_msg, to_pos + size);
677 msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1);
679 l_ptr->stats.sent_bundled++;
683 static void link_add_to_outqueue(struct tipc_link *l_ptr,
685 struct tipc_msg *msg)
687 u32 ack = mod(l_ptr->next_in_no - 1);
688 u32 seqno = mod(l_ptr->next_out_no++);
690 msg_set_word(msg, 2, ((ack << 16) | seqno));
691 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
693 if (l_ptr->first_out) {
694 l_ptr->last_out->next = buf;
695 l_ptr->last_out = buf;
697 l_ptr->first_out = l_ptr->last_out = buf;
699 l_ptr->out_queue_size++;
700 if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
701 l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
704 static void link_add_chain_to_outqueue(struct tipc_link *l_ptr,
705 struct sk_buff *buf_chain,
709 struct tipc_msg *msg;
711 if (!l_ptr->next_out)
712 l_ptr->next_out = buf_chain;
715 buf_chain = buf_chain->next;
718 msg_set_long_msgno(msg, long_msgno);
719 link_add_to_outqueue(l_ptr, buf, msg);
724 * tipc_link_send_buf() is the 'full path' for messages, called from
725 * inside TIPC when the 'fast path' in tipc_send_buf
726 * has failed, and from link_send()
728 int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
730 struct tipc_msg *msg = buf_msg(buf);
731 u32 size = msg_size(msg);
732 u32 dsz = msg_data_sz(msg);
733 u32 queue_size = l_ptr->out_queue_size;
734 u32 imp = tipc_msg_tot_importance(msg);
735 u32 queue_limit = l_ptr->queue_limit[imp];
736 u32 max_packet = l_ptr->max_pkt;
738 /* Match msg importance against queue limits: */
739 if (unlikely(queue_size >= queue_limit)) {
740 if (imp <= TIPC_CRITICAL_IMPORTANCE) {
741 link_schedule_port(l_ptr, msg_origport(msg), size);
746 if (imp > CONN_MANAGER) {
747 pr_warn("%s<%s>, send queue full", link_rst_msg,
749 tipc_link_reset(l_ptr);
754 /* Fragmentation needed ? */
755 if (size > max_packet)
756 return link_send_long_buf(l_ptr, buf);
758 /* Packet can be queued or sent. */
759 if (likely(!link_congested(l_ptr))) {
760 link_add_to_outqueue(l_ptr, buf, msg);
762 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
763 l_ptr->unacked_window = 0;
766 /* Congestion: can message be bundled ? */
767 if ((msg_user(msg) != CHANGEOVER_PROTOCOL) &&
768 (msg_user(msg) != MSG_FRAGMENTER)) {
770 /* Try adding message to an existing bundle */
771 if (l_ptr->next_out &&
772 link_bundle_buf(l_ptr, l_ptr->last_out, buf))
775 /* Try creating a new bundle */
776 if (size <= max_packet * 2 / 3) {
777 struct sk_buff *bundler = tipc_buf_acquire(max_packet);
778 struct tipc_msg bundler_hdr;
781 tipc_msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG,
782 INT_H_SIZE, l_ptr->addr);
783 skb_copy_to_linear_data(bundler, &bundler_hdr,
785 skb_trim(bundler, INT_H_SIZE);
786 link_bundle_buf(l_ptr, bundler, buf);
789 l_ptr->stats.sent_bundles++;
793 if (!l_ptr->next_out)
794 l_ptr->next_out = buf;
795 link_add_to_outqueue(l_ptr, buf, msg);
800 * tipc_link_send(): same as tipc_link_send_buf(), but the link to use has
801 * not been selected yet, and the the owner node is not locked
802 * Called by TIPC internal users, e.g. the name distributor
804 int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
806 struct tipc_link *l_ptr;
807 struct tipc_node *n_ptr;
808 int res = -ELINKCONG;
810 read_lock_bh(&tipc_net_lock);
811 n_ptr = tipc_node_find(dest);
813 tipc_node_lock(n_ptr);
814 l_ptr = n_ptr->active_links[selector & 1];
816 res = tipc_link_send_buf(l_ptr, buf);
819 tipc_node_unlock(n_ptr);
823 read_unlock_bh(&tipc_net_lock);
828 * tipc_link_send_sync - synchronize broadcast link endpoints.
830 * Give a newly added peer node the sequence number where it should
831 * start receiving and acking broadcast packets.
833 * Called with node locked
835 static void tipc_link_send_sync(struct tipc_link *l)
838 struct tipc_msg *msg;
840 buf = tipc_buf_acquire(INT_H_SIZE);
845 tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, l->addr);
846 msg_set_last_bcast(msg, l->owner->bclink.acked);
847 link_add_chain_to_outqueue(l, buf, 0);
848 tipc_link_push_queue(l);
852 * tipc_link_recv_sync - synchronize broadcast link endpoints.
853 * Receive the sequence number where we should start receiving and
854 * acking broadcast packets from a newly added peer node, and open
855 * up for reception of such packets.
857 * Called with node locked
859 static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf)
861 struct tipc_msg *msg = buf_msg(buf);
863 n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg);
864 n->bclink.recv_permitted = true;
869 * tipc_link_send_names - send name table entries to new neighbor
871 * Send routine for bulk delivery of name table messages when contact
872 * with a new neighbor occurs. No link congestion checking is performed
873 * because name table messages *must* be delivered. The messages must be
874 * small enough not to require fragmentation.
875 * Called without any locks held.
877 void tipc_link_send_names(struct list_head *message_list, u32 dest)
879 struct tipc_node *n_ptr;
880 struct tipc_link *l_ptr;
882 struct sk_buff *temp_buf;
884 if (list_empty(message_list))
887 read_lock_bh(&tipc_net_lock);
888 n_ptr = tipc_node_find(dest);
890 tipc_node_lock(n_ptr);
891 l_ptr = n_ptr->active_links[0];
893 /* convert circular list to linear list */
894 ((struct sk_buff *)message_list->prev)->next = NULL;
895 link_add_chain_to_outqueue(l_ptr,
896 (struct sk_buff *)message_list->next, 0);
897 tipc_link_push_queue(l_ptr);
898 INIT_LIST_HEAD(message_list);
900 tipc_node_unlock(n_ptr);
902 read_unlock_bh(&tipc_net_lock);
904 /* discard the messages if they couldn't be sent */
905 list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) {
906 list_del((struct list_head *)buf);
912 * link_send_buf_fast: Entry for data messages where the
913 * destination link is known and the header is complete,
914 * inclusive total message length. Very time critical.
915 * Link is locked. Returns user data length.
917 static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
920 struct tipc_msg *msg = buf_msg(buf);
921 int res = msg_data_sz(msg);
923 if (likely(!link_congested(l_ptr))) {
924 if (likely(msg_size(msg) <= l_ptr->max_pkt)) {
925 link_add_to_outqueue(l_ptr, buf, msg);
926 tipc_bearer_send(l_ptr->b_ptr, buf,
928 l_ptr->unacked_window = 0;
932 *used_max_pkt = l_ptr->max_pkt;
934 return tipc_link_send_buf(l_ptr, buf); /* All other cases */
938 * tipc_link_send_sections_fast: Entry for messages where the
939 * destination processor is known and the header is complete,
940 * except for total message length.
941 * Returns user data length or errno.
943 int tipc_link_send_sections_fast(struct tipc_port *sender,
944 struct iovec const *msg_sect,
945 unsigned int len, u32 destaddr)
947 struct tipc_msg *hdr = &sender->phdr;
948 struct tipc_link *l_ptr;
950 struct tipc_node *node;
952 u32 selector = msg_origport(hdr) & 1;
956 * Try building message using port's max_pkt hint.
957 * (Must not hold any locks while building message.)
959 res = tipc_msg_build(hdr, msg_sect, len, sender->max_pkt, &buf);
960 /* Exit if build request was invalid */
961 if (unlikely(res < 0))
964 read_lock_bh(&tipc_net_lock);
965 node = tipc_node_find(destaddr);
967 tipc_node_lock(node);
968 l_ptr = node->active_links[selector];
971 res = link_send_buf_fast(l_ptr, buf,
974 tipc_node_unlock(node);
975 read_unlock_bh(&tipc_net_lock);
979 /* Exit if link (or bearer) is congested */
980 if (link_congested(l_ptr)) {
981 res = link_schedule_port(l_ptr,
987 * Message size exceeds max_pkt hint; update hint,
988 * then re-try fast path or fragment the message
990 sender->max_pkt = l_ptr->max_pkt;
991 tipc_node_unlock(node);
992 read_unlock_bh(&tipc_net_lock);
995 if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
998 return link_send_sections_long(sender, msg_sect, len,
1001 tipc_node_unlock(node);
1003 read_unlock_bh(&tipc_net_lock);
1005 /* Couldn't find a link to the destination node */
1007 return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1009 return tipc_port_reject_sections(sender, hdr, msg_sect,
1010 len, TIPC_ERR_NO_NODE);
1015 * link_send_sections_long(): Entry for long messages where the
1016 * destination node is known and the header is complete,
1017 * inclusive total message length.
1018 * Link and bearer congestion status have been checked to be ok,
1019 * and are ignored if they change.
1021 * Note that fragments do not use the full link MTU so that they won't have
1022 * to undergo refragmentation if link changeover causes them to be sent
1023 * over another link with an additional tunnel header added as prefix.
1024 * (Refragmentation will still occur if the other link has a smaller MTU.)
1026 * Returns user data length or errno.
1028 static int link_send_sections_long(struct tipc_port *sender,
1029 struct iovec const *msg_sect,
1030 unsigned int len, u32 destaddr)
1032 struct tipc_link *l_ptr;
1033 struct tipc_node *node;
1034 struct tipc_msg *hdr = &sender->phdr;
1036 u32 max_pkt, fragm_sz, rest;
1037 struct tipc_msg fragm_hdr;
1038 struct sk_buff *buf, *buf_chain, *prev;
1039 u32 fragm_crs, fragm_rest, hsz, sect_rest;
1040 const unchar __user *sect_crs;
1047 max_pkt = sender->max_pkt - INT_H_SIZE;
1048 /* leave room for tunnel header in case of link changeover */
1049 fragm_sz = max_pkt - INT_H_SIZE;
1050 /* leave room for fragmentation header in each fragment */
1058 /* Prepare reusable fragment header */
1059 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
1060 INT_H_SIZE, msg_destnode(hdr));
1061 msg_set_size(&fragm_hdr, max_pkt);
1062 msg_set_fragm_no(&fragm_hdr, 1);
1064 /* Prepare header of first fragment */
1065 buf_chain = buf = tipc_buf_acquire(max_pkt);
1069 skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
1070 hsz = msg_hdr_sz(hdr);
1071 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz);
1073 /* Chop up message */
1074 fragm_crs = INT_H_SIZE + hsz;
1075 fragm_rest = fragm_sz - hsz;
1077 do { /* For all sections */
1081 sect_rest = msg_sect[++curr_sect].iov_len;
1082 sect_crs = msg_sect[curr_sect].iov_base;
1085 if (sect_rest < fragm_rest)
1090 if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) {
1093 kfree_skb_list(buf_chain);
1102 if (!fragm_rest && rest) {
1104 /* Initiate new fragment: */
1105 if (rest <= fragm_sz) {
1107 msg_set_type(&fragm_hdr, LAST_FRAGMENT);
1109 msg_set_type(&fragm_hdr, FRAGMENT);
1111 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
1112 msg_set_fragm_no(&fragm_hdr, ++fragm_no);
1114 buf = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
1122 skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
1123 fragm_crs = INT_H_SIZE;
1124 fragm_rest = fragm_sz;
1129 * Now we have a buffer chain. Select a link and check
1130 * that packet size is still OK
1132 node = tipc_node_find(destaddr);
1134 tipc_node_lock(node);
1135 l_ptr = node->active_links[sender->ref & 1];
1137 tipc_node_unlock(node);
1140 if (l_ptr->max_pkt < max_pkt) {
1141 sender->max_pkt = l_ptr->max_pkt;
1142 tipc_node_unlock(node);
1143 kfree_skb_list(buf_chain);
1148 kfree_skb_list(buf_chain);
1149 return tipc_port_reject_sections(sender, hdr, msg_sect,
1150 len, TIPC_ERR_NO_NODE);
1153 /* Append chain of fragments to send queue & send them */
1154 l_ptr->long_msg_seq_no++;
1155 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
1156 l_ptr->stats.sent_fragments += fragm_no;
1157 l_ptr->stats.sent_fragmented++;
1158 tipc_link_push_queue(l_ptr);
1159 tipc_node_unlock(node);
1164 * tipc_link_push_packet: Push one unsent packet to the media
1166 static u32 tipc_link_push_packet(struct tipc_link *l_ptr)
1168 struct sk_buff *buf = l_ptr->first_out;
1169 u32 r_q_size = l_ptr->retransm_queue_size;
1170 u32 r_q_head = l_ptr->retransm_queue_head;
1172 /* Step to position where retransmission failed, if any, */
1173 /* consider that buffers may have been released in meantime */
1174 if (r_q_size && buf) {
1175 u32 last = lesser(mod(r_q_head + r_q_size),
1176 link_last_sent(l_ptr));
1177 u32 first = buf_seqno(buf);
1179 while (buf && less(first, r_q_head)) {
1180 first = mod(first + 1);
1183 l_ptr->retransm_queue_head = r_q_head = first;
1184 l_ptr->retransm_queue_size = r_q_size = mod(last - first);
1187 /* Continue retransmission now, if there is anything: */
1188 if (r_q_size && buf) {
1189 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1190 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1191 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
1192 l_ptr->retransm_queue_head = mod(++r_q_head);
1193 l_ptr->retransm_queue_size = --r_q_size;
1194 l_ptr->stats.retransmitted++;
1198 /* Send deferred protocol message, if any: */
1199 buf = l_ptr->proto_msg_queue;
1201 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1202 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1203 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
1204 l_ptr->unacked_window = 0;
1206 l_ptr->proto_msg_queue = NULL;
1210 /* Send one deferred data message, if send window not full: */
1211 buf = l_ptr->next_out;
1213 struct tipc_msg *msg = buf_msg(buf);
1214 u32 next = msg_seqno(msg);
1215 u32 first = buf_seqno(l_ptr->first_out);
1217 if (mod(next - first) < l_ptr->queue_limit[0]) {
1218 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1219 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1220 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
1221 if (msg_user(msg) == MSG_BUNDLER)
1222 msg_set_type(msg, CLOSED_MSG);
1223 l_ptr->next_out = buf->next;
1231 * push_queue(): push out the unsent messages of a link where
1232 * congestion has abated. Node is locked
1234 void tipc_link_push_queue(struct tipc_link *l_ptr)
1239 res = tipc_link_push_packet(l_ptr);
1243 static void link_reset_all(unsigned long addr)
1245 struct tipc_node *n_ptr;
1246 char addr_string[16];
1249 read_lock_bh(&tipc_net_lock);
1250 n_ptr = tipc_node_find((u32)addr);
1252 read_unlock_bh(&tipc_net_lock);
1253 return; /* node no longer exists */
1256 tipc_node_lock(n_ptr);
1258 pr_warn("Resetting all links to %s\n",
1259 tipc_addr_string_fill(addr_string, n_ptr->addr));
1261 for (i = 0; i < MAX_BEARERS; i++) {
1262 if (n_ptr->links[i]) {
1263 link_print(n_ptr->links[i], "Resetting link\n");
1264 tipc_link_reset(n_ptr->links[i]);
1268 tipc_node_unlock(n_ptr);
1269 read_unlock_bh(&tipc_net_lock);
1272 static void link_retransmit_failure(struct tipc_link *l_ptr,
1273 struct sk_buff *buf)
1275 struct tipc_msg *msg = buf_msg(buf);
1277 pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
1280 /* Handle failure on standard link */
1281 link_print(l_ptr, "Resetting link\n");
1282 tipc_link_reset(l_ptr);
1285 /* Handle failure on broadcast link */
1286 struct tipc_node *n_ptr;
1287 char addr_string[16];
1289 pr_info("Msg seq number: %u, ", msg_seqno(msg));
1290 pr_cont("Outstanding acks: %lu\n",
1291 (unsigned long) TIPC_SKB_CB(buf)->handle);
1293 n_ptr = tipc_bclink_retransmit_to();
1294 tipc_node_lock(n_ptr);
1296 tipc_addr_string_fill(addr_string, n_ptr->addr);
1297 pr_info("Broadcast link info for %s\n", addr_string);
1298 pr_info("Reception permitted: %d, Acked: %u\n",
1299 n_ptr->bclink.recv_permitted,
1300 n_ptr->bclink.acked);
1301 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n",
1302 n_ptr->bclink.last_in,
1303 n_ptr->bclink.oos_state,
1304 n_ptr->bclink.last_sent);
1306 tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr);
1308 tipc_node_unlock(n_ptr);
1310 l_ptr->stale_count = 0;
1314 void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
1317 struct tipc_msg *msg;
1324 /* Detect repeated retransmit failures */
1325 if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1326 if (++l_ptr->stale_count > 100) {
1327 link_retransmit_failure(l_ptr, buf);
1331 l_ptr->last_retransmitted = msg_seqno(msg);
1332 l_ptr->stale_count = 1;
1335 while (retransmits && (buf != l_ptr->next_out) && buf) {
1337 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1338 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1339 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
1342 l_ptr->stats.retransmitted++;
1345 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
1349 * link_insert_deferred_queue - insert deferred messages back into receive chain
1351 static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr,
1352 struct sk_buff *buf)
1356 if (l_ptr->oldest_deferred_in == NULL)
1359 seq_no = buf_seqno(l_ptr->oldest_deferred_in);
1360 if (seq_no == mod(l_ptr->next_in_no)) {
1361 l_ptr->newest_deferred_in->next = buf;
1362 buf = l_ptr->oldest_deferred_in;
1363 l_ptr->oldest_deferred_in = NULL;
1364 l_ptr->deferred_inqueue_sz = 0;
1370 * link_recv_buf_validate - validate basic format of received message
1372 * This routine ensures a TIPC message has an acceptable header, and at least
1373 * as much data as the header indicates it should. The routine also ensures
1374 * that the entire message header is stored in the main fragment of the message
1375 * buffer, to simplify future access to message header fields.
1377 * Note: Having extra info present in the message header or data areas is OK.
1378 * TIPC will ignore the excess, under the assumption that it is optional info
1379 * introduced by a later release of the protocol.
1381 static int link_recv_buf_validate(struct sk_buff *buf)
1383 static u32 min_data_hdr_size[8] = {
1384 SHORT_H_SIZE, MCAST_H_SIZE, NAMED_H_SIZE, BASIC_H_SIZE,
1385 MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE
1388 struct tipc_msg *msg;
1394 if (unlikely(buf->len < MIN_H_SIZE))
1397 msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr);
1401 if (unlikely(msg_version(msg) != TIPC_VERSION))
1404 size = msg_size(msg);
1405 hdr_size = msg_hdr_sz(msg);
1406 min_hdr_size = msg_isdata(msg) ?
1407 min_data_hdr_size[msg_type(msg)] : INT_H_SIZE;
1409 if (unlikely((hdr_size < min_hdr_size) ||
1410 (size < hdr_size) ||
1411 (buf->len < size) ||
1412 (size - hdr_size > TIPC_MAX_USER_MSG_SIZE)))
1415 return pskb_may_pull(buf, hdr_size);
1419 * tipc_rcv - process TIPC packets/messages arriving from off-node
1420 * @head: pointer to message buffer chain
1421 * @tb_ptr: pointer to bearer message arrived on
1423 * Invoked with no locks held. Bearer pointer must point to a valid bearer
1424 * structure (i.e. cannot be NULL), but bearer can be inactive.
1426 void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
1428 read_lock_bh(&tipc_net_lock);
1430 struct tipc_node *n_ptr;
1431 struct tipc_link *l_ptr;
1432 struct sk_buff *crs;
1433 struct sk_buff *buf = head;
1434 struct tipc_msg *msg;
1442 /* Ensure bearer is still enabled */
1443 if (unlikely(!b_ptr->active))
1446 /* Ensure message is well-formed */
1447 if (unlikely(!link_recv_buf_validate(buf)))
1450 /* Ensure message data is a single contiguous unit */
1451 if (unlikely(skb_linearize(buf)))
1454 /* Handle arrival of a non-unicast link message */
1457 if (unlikely(msg_non_seq(msg))) {
1458 if (msg_user(msg) == LINK_CONFIG)
1459 tipc_disc_recv_msg(buf, b_ptr);
1461 tipc_bclink_recv_pkt(buf);
1465 /* Discard unicast link messages destined for another node */
1466 if (unlikely(!msg_short(msg) &&
1467 (msg_destnode(msg) != tipc_own_addr)))
1470 /* Locate neighboring node that sent message */
1471 n_ptr = tipc_node_find(msg_prevnode(msg));
1472 if (unlikely(!n_ptr))
1474 tipc_node_lock(n_ptr);
1476 /* Locate unicast link endpoint that should handle message */
1477 l_ptr = n_ptr->links[b_ptr->identity];
1478 if (unlikely(!l_ptr))
1479 goto unlock_discard;
1481 /* Verify that communication with node is currently allowed */
1482 if ((n_ptr->block_setup & WAIT_PEER_DOWN) &&
1483 msg_user(msg) == LINK_PROTOCOL &&
1484 (msg_type(msg) == RESET_MSG ||
1485 msg_type(msg) == ACTIVATE_MSG) &&
1486 !msg_redundant_link(msg))
1487 n_ptr->block_setup &= ~WAIT_PEER_DOWN;
1489 if (n_ptr->block_setup)
1490 goto unlock_discard;
1492 /* Validate message sequence number info */
1493 seq_no = msg_seqno(msg);
1494 ackd = msg_ack(msg);
1496 /* Release acked messages */
1497 if (n_ptr->bclink.recv_permitted)
1498 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
1500 crs = l_ptr->first_out;
1501 while ((crs != l_ptr->next_out) &&
1502 less_eq(buf_seqno(crs), ackd)) {
1503 struct sk_buff *next = crs->next;
1510 l_ptr->first_out = crs;
1511 l_ptr->out_queue_size -= released;
1514 /* Try sending any messages link endpoint has pending */
1515 if (unlikely(l_ptr->next_out))
1516 tipc_link_push_queue(l_ptr);
1517 if (unlikely(!list_empty(&l_ptr->waiting_ports)))
1518 tipc_link_wakeup_ports(l_ptr, 0);
1519 if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
1520 l_ptr->stats.sent_acks++;
1521 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1524 /* Now (finally!) process the incoming message */
1526 if (unlikely(!link_working_working(l_ptr))) {
1527 if (msg_user(msg) == LINK_PROTOCOL) {
1528 link_recv_proto_msg(l_ptr, buf);
1529 head = link_insert_deferred_queue(l_ptr, head);
1530 tipc_node_unlock(n_ptr);
1534 /* Traffic message. Conditionally activate link */
1535 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1537 if (link_working_working(l_ptr)) {
1538 /* Re-insert buffer in front of queue */
1541 tipc_node_unlock(n_ptr);
1544 goto unlock_discard;
1547 /* Link is now in state WORKING_WORKING */
1548 if (unlikely(seq_no != mod(l_ptr->next_in_no))) {
1549 link_handle_out_of_seq_msg(l_ptr, buf);
1550 head = link_insert_deferred_queue(l_ptr, head);
1551 tipc_node_unlock(n_ptr);
1554 l_ptr->next_in_no++;
1555 if (unlikely(l_ptr->oldest_deferred_in))
1556 head = link_insert_deferred_queue(l_ptr, head);
1558 if (likely(msg_isdata(msg))) {
1559 tipc_node_unlock(n_ptr);
1560 tipc_port_recv_msg(buf);
1563 switch (msg_user(msg)) {
1566 l_ptr->stats.recv_bundles++;
1567 l_ptr->stats.recv_bundled += msg_msgcnt(msg);
1568 tipc_node_unlock(n_ptr);
1569 tipc_link_recv_bundle(buf);
1571 case NAME_DISTRIBUTOR:
1572 n_ptr->bclink.recv_permitted = true;
1573 tipc_node_unlock(n_ptr);
1574 tipc_named_recv(buf);
1576 case BCAST_PROTOCOL:
1577 tipc_link_recv_sync(n_ptr, buf);
1578 tipc_node_unlock(n_ptr);
1581 tipc_node_unlock(n_ptr);
1582 tipc_port_recv_proto_msg(buf);
1584 case MSG_FRAGMENTER:
1585 l_ptr->stats.recv_fragments++;
1586 ret = tipc_link_recv_fragment(&l_ptr->reasm_head,
1589 if (ret == LINK_REASM_COMPLETE) {
1590 l_ptr->stats.recv_fragmented++;
1594 if (ret == LINK_REASM_ERROR)
1595 tipc_link_reset(l_ptr);
1596 tipc_node_unlock(n_ptr);
1598 case CHANGEOVER_PROTOCOL:
1599 type = msg_type(msg);
1600 if (tipc_link_tunnel_rcv(&l_ptr, &buf)) {
1602 seq_no = msg_seqno(msg);
1603 if (type == ORIGINAL_MSG)
1605 goto protocol_check;
1613 tipc_node_unlock(n_ptr);
1614 tipc_net_route_msg(buf);
1618 tipc_node_unlock(n_ptr);
1622 read_unlock_bh(&tipc_net_lock);
1626 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
1628 * Returns increase in queue length (i.e. 0 or 1)
1630 u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
1631 struct sk_buff *buf)
1633 struct sk_buff *queue_buf;
1634 struct sk_buff **prev;
1635 u32 seq_no = buf_seqno(buf);
1640 if (*head == NULL) {
1641 *head = *tail = buf;
1646 if (less(buf_seqno(*tail), seq_no)) {
1647 (*tail)->next = buf;
1652 /* Locate insertion point in queue, then insert; discard if duplicate */
1656 u32 curr_seqno = buf_seqno(queue_buf);
1658 if (seq_no == curr_seqno) {
1663 if (less(seq_no, curr_seqno))
1666 prev = &queue_buf->next;
1667 queue_buf = queue_buf->next;
1670 buf->next = queue_buf;
1676 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
1678 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
1679 struct sk_buff *buf)
1681 u32 seq_no = buf_seqno(buf);
1683 if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
1684 link_recv_proto_msg(l_ptr, buf);
1688 /* Record OOS packet arrival (force mismatch on next timeout) */
1689 l_ptr->checkpoint--;
1692 * Discard packet if a duplicate; otherwise add it to deferred queue
1693 * and notify peer of gap as per protocol specification
1695 if (less(seq_no, mod(l_ptr->next_in_no))) {
1696 l_ptr->stats.duplicates++;
1701 if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in,
1702 &l_ptr->newest_deferred_in, buf)) {
1703 l_ptr->deferred_inqueue_sz++;
1704 l_ptr->stats.deferred_recv++;
1705 if ((l_ptr->deferred_inqueue_sz % 16) == 1)
1706 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1708 l_ptr->stats.duplicates++;
1712 * Send protocol message to the other endpoint.
1714 void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
1715 int probe_msg, u32 gap, u32 tolerance,
1716 u32 priority, u32 ack_mtu)
1718 struct sk_buff *buf = NULL;
1719 struct tipc_msg *msg = l_ptr->pmsg;
1720 u32 msg_size = sizeof(l_ptr->proto_msg);
1723 /* Discard any previous message that was deferred due to congestion */
1724 if (l_ptr->proto_msg_queue) {
1725 kfree_skb(l_ptr->proto_msg_queue);
1726 l_ptr->proto_msg_queue = NULL;
1729 /* Don't send protocol message during link changeover */
1730 if (l_ptr->exp_msg_count)
1733 /* Abort non-RESET send if communication with node is prohibited */
1734 if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG))
1737 /* Create protocol message with "out-of-sequence" sequence number */
1738 msg_set_type(msg, msg_typ);
1739 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
1740 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1741 msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
1743 if (msg_typ == STATE_MSG) {
1744 u32 next_sent = mod(l_ptr->next_out_no);
1746 if (!tipc_link_is_up(l_ptr))
1748 if (l_ptr->next_out)
1749 next_sent = buf_seqno(l_ptr->next_out);
1750 msg_set_next_sent(msg, next_sent);
1751 if (l_ptr->oldest_deferred_in) {
1752 u32 rec = buf_seqno(l_ptr->oldest_deferred_in);
1753 gap = mod(rec - mod(l_ptr->next_in_no));
1755 msg_set_seq_gap(msg, gap);
1757 l_ptr->stats.sent_nacks++;
1758 msg_set_link_tolerance(msg, tolerance);
1759 msg_set_linkprio(msg, priority);
1760 msg_set_max_pkt(msg, ack_mtu);
1761 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1762 msg_set_probe(msg, probe_msg != 0);
1764 u32 mtu = l_ptr->max_pkt;
1766 if ((mtu < l_ptr->max_pkt_target) &&
1767 link_working_working(l_ptr) &&
1768 l_ptr->fsm_msg_cnt) {
1769 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1770 if (l_ptr->max_pkt_probes == 10) {
1771 l_ptr->max_pkt_target = (msg_size - 4);
1772 l_ptr->max_pkt_probes = 0;
1773 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1775 l_ptr->max_pkt_probes++;
1778 l_ptr->stats.sent_probes++;
1780 l_ptr->stats.sent_states++;
1781 } else { /* RESET_MSG or ACTIVATE_MSG */
1782 msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
1783 msg_set_seq_gap(msg, 0);
1784 msg_set_next_sent(msg, 1);
1785 msg_set_probe(msg, 0);
1786 msg_set_link_tolerance(msg, l_ptr->tolerance);
1787 msg_set_linkprio(msg, l_ptr->priority);
1788 msg_set_max_pkt(msg, l_ptr->max_pkt_target);
1791 r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
1792 msg_set_redundant_link(msg, r_flag);
1793 msg_set_linkprio(msg, l_ptr->priority);
1794 msg_set_size(msg, msg_size);
1796 msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
1798 buf = tipc_buf_acquire(msg_size);
1802 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
1803 buf->priority = TC_PRIO_CONTROL;
1805 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
1806 l_ptr->unacked_window = 0;
1811 * Receive protocol message :
1812 * Note that network plane id propagates through the network, and may
1813 * change at any time. The node with lowest address rules
1815 static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
1821 struct tipc_msg *msg = buf_msg(buf);
1823 /* Discard protocol message during link changeover */
1824 if (l_ptr->exp_msg_count)
1827 /* record unnumbered packet arrival (force mismatch on next timeout) */
1828 l_ptr->checkpoint--;
1830 if (l_ptr->b_ptr->net_plane != msg_net_plane(msg))
1831 if (tipc_own_addr > msg_prevnode(msg))
1832 l_ptr->b_ptr->net_plane = msg_net_plane(msg);
1834 switch (msg_type(msg)) {
1837 if (!link_working_unknown(l_ptr) &&
1838 (l_ptr->peer_session != INVALID_SESSION)) {
1839 if (less_eq(msg_session(msg), l_ptr->peer_session))
1840 break; /* duplicate or old reset: ignore */
1843 if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
1844 link_working_unknown(l_ptr))) {
1846 * peer has lost contact -- don't allow peer's links
1847 * to reactivate before we recognize loss & clean up
1849 l_ptr->owner->block_setup = WAIT_NODE_DOWN;
1852 link_state_event(l_ptr, RESET_MSG);
1856 /* Update link settings according other endpoint's values */
1857 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
1859 msg_tol = msg_link_tolerance(msg);
1860 if (msg_tol > l_ptr->tolerance)
1861 link_set_supervision_props(l_ptr, msg_tol);
1863 if (msg_linkprio(msg) > l_ptr->priority)
1864 l_ptr->priority = msg_linkprio(msg);
1866 max_pkt_info = msg_max_pkt(msg);
1868 if (max_pkt_info < l_ptr->max_pkt_target)
1869 l_ptr->max_pkt_target = max_pkt_info;
1870 if (l_ptr->max_pkt > l_ptr->max_pkt_target)
1871 l_ptr->max_pkt = l_ptr->max_pkt_target;
1873 l_ptr->max_pkt = l_ptr->max_pkt_target;
1876 /* Synchronize broadcast link info, if not done previously */
1877 if (!tipc_node_is_up(l_ptr->owner)) {
1878 l_ptr->owner->bclink.last_sent =
1879 l_ptr->owner->bclink.last_in =
1880 msg_last_bcast(msg);
1881 l_ptr->owner->bclink.oos_state = 0;
1884 l_ptr->peer_session = msg_session(msg);
1885 l_ptr->peer_bearer_id = msg_bearer_id(msg);
1887 if (msg_type(msg) == ACTIVATE_MSG)
1888 link_state_event(l_ptr, ACTIVATE_MSG);
1892 msg_tol = msg_link_tolerance(msg);
1894 link_set_supervision_props(l_ptr, msg_tol);
1896 if (msg_linkprio(msg) &&
1897 (msg_linkprio(msg) != l_ptr->priority)) {
1898 pr_warn("%s<%s>, priority change %u->%u\n",
1899 link_rst_msg, l_ptr->name, l_ptr->priority,
1901 l_ptr->priority = msg_linkprio(msg);
1902 tipc_link_reset(l_ptr); /* Enforce change to take effect */
1905 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1906 l_ptr->stats.recv_states++;
1907 if (link_reset_unknown(l_ptr))
1910 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
1911 rec_gap = mod(msg_next_sent(msg) -
1912 mod(l_ptr->next_in_no));
1915 max_pkt_ack = msg_max_pkt(msg);
1916 if (max_pkt_ack > l_ptr->max_pkt) {
1917 l_ptr->max_pkt = max_pkt_ack;
1918 l_ptr->max_pkt_probes = 0;
1922 if (msg_probe(msg)) {
1923 l_ptr->stats.recv_probes++;
1924 if (msg_size(msg) > sizeof(l_ptr->proto_msg))
1925 max_pkt_ack = msg_size(msg);
1928 /* Protocol message before retransmits, reduce loss risk */
1929 if (l_ptr->owner->bclink.recv_permitted)
1930 tipc_bclink_update_link_state(l_ptr->owner,
1931 msg_last_bcast(msg));
1933 if (rec_gap || (msg_probe(msg))) {
1934 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
1935 0, rec_gap, 0, 0, max_pkt_ack);
1937 if (msg_seq_gap(msg)) {
1938 l_ptr->stats.recv_nacks++;
1939 tipc_link_retransmit(l_ptr, l_ptr->first_out,
1949 /* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
1950 * a different bearer. Owner node is locked.
1952 static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
1953 struct tipc_msg *tunnel_hdr,
1954 struct tipc_msg *msg,
1957 struct tipc_link *tunnel;
1958 struct sk_buff *buf;
1959 u32 length = msg_size(msg);
1961 tunnel = l_ptr->owner->active_links[selector & 1];
1962 if (!tipc_link_is_up(tunnel)) {
1963 pr_warn("%stunnel link no longer available\n", link_co_err);
1966 msg_set_size(tunnel_hdr, length + INT_H_SIZE);
1967 buf = tipc_buf_acquire(length + INT_H_SIZE);
1969 pr_warn("%sunable to send tunnel msg\n", link_co_err);
1972 skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE);
1973 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length);
1974 tipc_link_send_buf(tunnel, buf);
1978 /* tipc_link_failover_send_queue(): A link has gone down, but a second
1979 * link is still active. We can do failover. Tunnel the failing link's
1980 * whole send queue via the remaining link. This way, we don't lose
1981 * any packets, and sequence order is preserved for subsequent traffic
1982 * sent over the remaining link. Owner node is locked.
1984 void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
1986 u32 msgcount = l_ptr->out_queue_size;
1987 struct sk_buff *crs = l_ptr->first_out;
1988 struct tipc_link *tunnel = l_ptr->owner->active_links[0];
1989 struct tipc_msg tunnel_hdr;
1995 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
1996 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
1997 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
1998 msg_set_msgcnt(&tunnel_hdr, msgcount);
2000 if (!l_ptr->first_out) {
2001 struct sk_buff *buf;
2003 buf = tipc_buf_acquire(INT_H_SIZE);
2005 skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE);
2006 msg_set_size(&tunnel_hdr, INT_H_SIZE);
2007 tipc_link_send_buf(tunnel, buf);
2009 pr_warn("%sunable to send changeover msg\n",
2015 split_bundles = (l_ptr->owner->active_links[0] !=
2016 l_ptr->owner->active_links[1]);
2019 struct tipc_msg *msg = buf_msg(crs);
2021 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
2022 struct tipc_msg *m = msg_get_wrapped(msg);
2023 unchar *pos = (unchar *)m;
2025 msgcount = msg_msgcnt(msg);
2026 while (msgcount--) {
2027 msg_set_seqno(m, msg_seqno(msg));
2028 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m,
2029 msg_link_selector(m));
2030 pos += align(msg_size(m));
2031 m = (struct tipc_msg *)pos;
2034 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
2035 msg_link_selector(msg));
2041 /* tipc_link_dup_send_queue(): A second link has become active. Tunnel a
2042 * duplicate of the first link's send queue via the new link. This way, we
2043 * are guaranteed that currently queued packets from a socket are delivered
2044 * before future traffic from the same socket, even if this is using the
2045 * new link. The last arriving copy of each duplicate packet is dropped at
2046 * the receiving end by the regular protocol check, so packet cardinality
2047 * and sequence order is preserved per sender/receiver socket pair.
2048 * Owner node is locked.
2050 void tipc_link_dup_send_queue(struct tipc_link *l_ptr,
2051 struct tipc_link *tunnel)
2053 struct sk_buff *iter;
2054 struct tipc_msg tunnel_hdr;
2056 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2057 DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
2058 msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size);
2059 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2060 iter = l_ptr->first_out;
2062 struct sk_buff *outbuf;
2063 struct tipc_msg *msg = buf_msg(iter);
2064 u32 length = msg_size(msg);
2066 if (msg_user(msg) == MSG_BUNDLER)
2067 msg_set_type(msg, CLOSED_MSG);
2068 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */
2069 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
2070 msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
2071 outbuf = tipc_buf_acquire(length + INT_H_SIZE);
2072 if (outbuf == NULL) {
2073 pr_warn("%sunable to send duplicate msg\n",
2077 skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE);
2078 skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data,
2080 tipc_link_send_buf(tunnel, outbuf);
2081 if (!tipc_link_is_up(l_ptr))
2088 * buf_extract - extracts embedded TIPC message from another message
2089 * @skb: encapsulating message buffer
2090 * @from_pos: offset to extract from
2092 * Returns a new message buffer containing an embedded message. The
2093 * encapsulating message itself is left unchanged.
2095 static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
2097 struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
2098 u32 size = msg_size(msg);
2101 eb = tipc_buf_acquire(size);
2103 skb_copy_to_linear_data(eb, msg, size);
2107 /* tipc_link_tunnel_rcv(): Receive a tunneled packet, sent
2108 * via other link as result of a failover (ORIGINAL_MSG) or
2109 * a new active link (DUPLICATE_MSG). Failover packets are
2110 * returned to the active link for delivery upwards.
2111 * Owner node is locked.
2113 static int tipc_link_tunnel_rcv(struct tipc_link **l_ptr,
2114 struct sk_buff **buf)
2116 struct sk_buff *tunnel_buf = *buf;
2117 struct tipc_link *dest_link;
2118 struct tipc_msg *msg;
2119 struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf);
2120 u32 msg_typ = msg_type(tunnel_msg);
2121 u32 msg_count = msg_msgcnt(tunnel_msg);
2122 u32 bearer_id = msg_bearer_id(tunnel_msg);
2124 if (bearer_id >= MAX_BEARERS)
2126 dest_link = (*l_ptr)->owner->links[bearer_id];
2129 if (dest_link == *l_ptr) {
2130 pr_err("Unexpected changeover message on link <%s>\n",
2135 msg = msg_get_wrapped(tunnel_msg);
2137 if (msg_typ == DUPLICATE_MSG) {
2138 if (less(msg_seqno(msg), mod(dest_link->next_in_no)))
2140 *buf = buf_extract(tunnel_buf, INT_H_SIZE);
2142 pr_warn("%sduplicate msg dropped\n", link_co_err);
2145 kfree_skb(tunnel_buf);
2149 /* First original message ?: */
2150 if (tipc_link_is_up(dest_link)) {
2151 pr_info("%s<%s>, changeover initiated by peer\n", link_rst_msg,
2153 tipc_link_reset(dest_link);
2154 dest_link->exp_msg_count = msg_count;
2157 } else if (dest_link->exp_msg_count == START_CHANGEOVER) {
2158 dest_link->exp_msg_count = msg_count;
2163 /* Receive original message */
2164 if (dest_link->exp_msg_count == 0) {
2165 pr_warn("%sgot too many tunnelled messages\n", link_co_err);
2168 dest_link->exp_msg_count--;
2169 if (less(msg_seqno(msg), dest_link->reset_checkpoint)) {
2172 *buf = buf_extract(tunnel_buf, INT_H_SIZE);
2174 kfree_skb(tunnel_buf);
2177 pr_warn("%soriginal msg dropped\n", link_co_err);
2182 kfree_skb(tunnel_buf);
2187 * Bundler functionality:
2189 void tipc_link_recv_bundle(struct sk_buff *buf)
2191 u32 msgcount = msg_msgcnt(buf_msg(buf));
2192 u32 pos = INT_H_SIZE;
2193 struct sk_buff *obuf;
2195 while (msgcount--) {
2196 obuf = buf_extract(buf, pos);
2198 pr_warn("Link unable to unbundle message(s)\n");
2201 pos += align(msg_size(buf_msg(obuf)));
2202 tipc_net_route_msg(obuf);
2208 * Fragmentation/defragmentation:
2212 * link_send_long_buf: Entry for buffers needing fragmentation.
2213 * The buffer is complete, inclusive total message length.
2214 * Returns user data length.
2216 static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
2218 struct sk_buff *buf_chain = NULL;
2219 struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain;
2220 struct tipc_msg *inmsg = buf_msg(buf);
2221 struct tipc_msg fragm_hdr;
2222 u32 insize = msg_size(inmsg);
2223 u32 dsz = msg_data_sz(inmsg);
2224 unchar *crs = buf->data;
2226 u32 pack_sz = l_ptr->max_pkt;
2227 u32 fragm_sz = pack_sz - INT_H_SIZE;
2231 if (msg_short(inmsg))
2232 destaddr = l_ptr->addr;
2234 destaddr = msg_destnode(inmsg);
2236 /* Prepare reusable fragment header: */
2237 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
2238 INT_H_SIZE, destaddr);
2240 /* Chop up message: */
2242 struct sk_buff *fragm;
2244 if (rest <= fragm_sz) {
2246 msg_set_type(&fragm_hdr, LAST_FRAGMENT);
2248 fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
2249 if (fragm == NULL) {
2251 kfree_skb_list(buf_chain);
2254 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
2256 msg_set_fragm_no(&fragm_hdr, fragm_no);
2257 skb_copy_to_linear_data(fragm, &fragm_hdr, INT_H_SIZE);
2258 skb_copy_to_linear_data_offset(fragm, INT_H_SIZE, crs,
2260 buf_chain_tail->next = fragm;
2261 buf_chain_tail = fragm;
2265 msg_set_type(&fragm_hdr, FRAGMENT);
2269 /* Append chain of fragments to send queue & send them */
2270 l_ptr->long_msg_seq_no++;
2271 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
2272 l_ptr->stats.sent_fragments += fragm_no;
2273 l_ptr->stats.sent_fragmented++;
2274 tipc_link_push_queue(l_ptr);
2280 * tipc_link_recv_fragment(): Called with node lock on. Returns
2281 * the reassembled buffer if message is complete.
2283 int tipc_link_recv_fragment(struct sk_buff **head, struct sk_buff **tail,
2284 struct sk_buff **fbuf)
2286 struct sk_buff *frag = *fbuf;
2287 struct tipc_msg *msg = buf_msg(frag);
2288 u32 fragid = msg_type(msg);
2292 skb_pull(frag, msg_hdr_sz(msg));
2293 if (fragid == FIRST_FRAGMENT) {
2294 if (*head || skb_unclone(frag, GFP_ATOMIC))
2297 skb_frag_list_init(*head);
2300 skb_try_coalesce(*head, frag, &headstolen, &delta)) {
2301 kfree_skb_partial(frag, headstolen);
2305 if (!skb_has_frag_list(*head))
2306 skb_shinfo(*head)->frag_list = frag;
2308 (*tail)->next = frag;
2310 (*head)->truesize += frag->truesize;
2312 if (fragid == LAST_FRAGMENT) {
2314 *tail = *head = NULL;
2315 return LINK_REASM_COMPLETE;
2319 pr_warn_ratelimited("Link unable to reassemble fragmented message\n");
2321 return LINK_REASM_ERROR;
2324 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance)
2326 if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
2329 l_ptr->tolerance = tolerance;
2330 l_ptr->continuity_interval =
2331 ((tolerance / 4) > 500) ? 500 : tolerance / 4;
2332 l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
2335 void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
2337 /* Data messages from this node, inclusive FIRST_FRAGM */
2338 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window;
2339 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4;
2340 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5;
2341 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6;
2342 /* Transiting data messages,inclusive FIRST_FRAGM */
2343 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300;
2344 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600;
2345 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900;
2346 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200;
2347 l_ptr->queue_limit[CONN_MANAGER] = 1200;
2348 l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
2349 l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
2350 /* FRAGMENT and LAST_FRAGMENT packets */
2351 l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
2355 * link_find_link - locate link by name
2356 * @name: ptr to link name string
2357 * @node: ptr to area to be filled with ptr to associated node
2359 * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted;
2360 * this also prevents link deletion.
2362 * Returns pointer to link (or 0 if invalid link name).
2364 static struct tipc_link *link_find_link(const char *name,
2365 struct tipc_node **node)
2367 struct tipc_link *l_ptr;
2368 struct tipc_node *n_ptr;
2371 list_for_each_entry(n_ptr, &tipc_node_list, list) {
2372 for (i = 0; i < MAX_BEARERS; i++) {
2373 l_ptr = n_ptr->links[i];
2374 if (l_ptr && !strcmp(l_ptr->name, name))
2386 * link_value_is_valid -- validate proposed link tolerance/priority/window
2388 * @cmd: value type (TIPC_CMD_SET_LINK_*)
2389 * @new_value: the new value
2391 * Returns 1 if value is within range, 0 if not.
2393 static int link_value_is_valid(u16 cmd, u32 new_value)
2396 case TIPC_CMD_SET_LINK_TOL:
2397 return (new_value >= TIPC_MIN_LINK_TOL) &&
2398 (new_value <= TIPC_MAX_LINK_TOL);
2399 case TIPC_CMD_SET_LINK_PRI:
2400 return (new_value <= TIPC_MAX_LINK_PRI);
2401 case TIPC_CMD_SET_LINK_WINDOW:
2402 return (new_value >= TIPC_MIN_LINK_WIN) &&
2403 (new_value <= TIPC_MAX_LINK_WIN);
2409 * link_cmd_set_value - change priority/tolerance/window for link/bearer/media
2410 * @name: ptr to link, bearer, or media name
2411 * @new_value: new value of link, bearer, or media setting
2412 * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*)
2414 * Caller must hold 'tipc_net_lock' to ensure link/bearer/media is not deleted.
2416 * Returns 0 if value updated and negative value on error.
2418 static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
2420 struct tipc_node *node;
2421 struct tipc_link *l_ptr;
2422 struct tipc_bearer *b_ptr;
2423 struct tipc_media *m_ptr;
2426 l_ptr = link_find_link(name, &node);
2429 * acquire node lock for tipc_link_send_proto_msg().
2430 * see "TIPC locking policy" in net.c.
2432 tipc_node_lock(node);
2434 case TIPC_CMD_SET_LINK_TOL:
2435 link_set_supervision_props(l_ptr, new_value);
2436 tipc_link_send_proto_msg(l_ptr,
2437 STATE_MSG, 0, 0, new_value, 0, 0);
2439 case TIPC_CMD_SET_LINK_PRI:
2440 l_ptr->priority = new_value;
2441 tipc_link_send_proto_msg(l_ptr,
2442 STATE_MSG, 0, 0, 0, new_value, 0);
2444 case TIPC_CMD_SET_LINK_WINDOW:
2445 tipc_link_set_queue_limits(l_ptr, new_value);
2451 tipc_node_unlock(node);
2455 b_ptr = tipc_bearer_find(name);
2458 case TIPC_CMD_SET_LINK_TOL:
2459 b_ptr->tolerance = new_value;
2461 case TIPC_CMD_SET_LINK_PRI:
2462 b_ptr->priority = new_value;
2464 case TIPC_CMD_SET_LINK_WINDOW:
2465 b_ptr->window = new_value;
2474 m_ptr = tipc_media_find(name);
2478 case TIPC_CMD_SET_LINK_TOL:
2479 m_ptr->tolerance = new_value;
2481 case TIPC_CMD_SET_LINK_PRI:
2482 m_ptr->priority = new_value;
2484 case TIPC_CMD_SET_LINK_WINDOW:
2485 m_ptr->window = new_value;
2494 struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space,
2497 struct tipc_link_config *args;
2501 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
2502 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2504 args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
2505 new_value = ntohl(args->value);
2507 if (!link_value_is_valid(cmd, new_value))
2508 return tipc_cfg_reply_error_string(
2509 "cannot change, value invalid");
2511 if (!strcmp(args->name, tipc_bclink_name)) {
2512 if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
2513 (tipc_bclink_set_queue_limits(new_value) == 0))
2514 return tipc_cfg_reply_none();
2515 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
2516 " (cannot change setting on broadcast link)");
2519 read_lock_bh(&tipc_net_lock);
2520 res = link_cmd_set_value(args->name, new_value, cmd);
2521 read_unlock_bh(&tipc_net_lock);
2523 return tipc_cfg_reply_error_string("cannot change link setting");
2525 return tipc_cfg_reply_none();
2529 * link_reset_statistics - reset link statistics
2530 * @l_ptr: pointer to link
2532 static void link_reset_statistics(struct tipc_link *l_ptr)
2534 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
2535 l_ptr->stats.sent_info = l_ptr->next_out_no;
2536 l_ptr->stats.recv_info = l_ptr->next_in_no;
2539 struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
2542 struct tipc_link *l_ptr;
2543 struct tipc_node *node;
2545 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2546 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2548 link_name = (char *)TLV_DATA(req_tlv_area);
2549 if (!strcmp(link_name, tipc_bclink_name)) {
2550 if (tipc_bclink_reset_stats())
2551 return tipc_cfg_reply_error_string("link not found");
2552 return tipc_cfg_reply_none();
2555 read_lock_bh(&tipc_net_lock);
2556 l_ptr = link_find_link(link_name, &node);
2558 read_unlock_bh(&tipc_net_lock);
2559 return tipc_cfg_reply_error_string("link not found");
2562 tipc_node_lock(node);
2563 link_reset_statistics(l_ptr);
2564 tipc_node_unlock(node);
2565 read_unlock_bh(&tipc_net_lock);
2566 return tipc_cfg_reply_none();
2570 * percent - convert count to a percentage of total (rounding up or down)
2572 static u32 percent(u32 count, u32 total)
2574 return (count * 100 + (total / 2)) / total;
2578 * tipc_link_stats - print link statistics
2580 * @buf: print buffer area
2581 * @buf_size: size of print buffer area
2583 * Returns length of print buffer data string (or 0 if error)
2585 static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
2587 struct tipc_link *l;
2588 struct tipc_stats *s;
2589 struct tipc_node *node;
2591 u32 profile_total = 0;
2594 if (!strcmp(name, tipc_bclink_name))
2595 return tipc_bclink_stats(buf, buf_size);
2597 read_lock_bh(&tipc_net_lock);
2598 l = link_find_link(name, &node);
2600 read_unlock_bh(&tipc_net_lock);
2603 tipc_node_lock(node);
2606 if (tipc_link_is_active(l))
2608 else if (tipc_link_is_up(l))
2613 ret = tipc_snprintf(buf, buf_size, "Link <%s>\n"
2614 " %s MTU:%u Priority:%u Tolerance:%u ms"
2615 " Window:%u packets\n",
2616 l->name, status, l->max_pkt, l->priority,
2617 l->tolerance, l->queue_limit[0]);
2619 ret += tipc_snprintf(buf + ret, buf_size - ret,
2620 " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
2621 l->next_in_no - s->recv_info, s->recv_fragments,
2622 s->recv_fragmented, s->recv_bundles,
2625 ret += tipc_snprintf(buf + ret, buf_size - ret,
2626 " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
2627 l->next_out_no - s->sent_info, s->sent_fragments,
2628 s->sent_fragmented, s->sent_bundles,
2631 profile_total = s->msg_length_counts;
2635 ret += tipc_snprintf(buf + ret, buf_size - ret,
2636 " TX profile sample:%u packets average:%u octets\n"
2637 " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
2638 "-16384:%u%% -32768:%u%% -66000:%u%%\n",
2639 s->msg_length_counts,
2640 s->msg_lengths_total / profile_total,
2641 percent(s->msg_length_profile[0], profile_total),
2642 percent(s->msg_length_profile[1], profile_total),
2643 percent(s->msg_length_profile[2], profile_total),
2644 percent(s->msg_length_profile[3], profile_total),
2645 percent(s->msg_length_profile[4], profile_total),
2646 percent(s->msg_length_profile[5], profile_total),
2647 percent(s->msg_length_profile[6], profile_total));
2649 ret += tipc_snprintf(buf + ret, buf_size - ret,
2650 " RX states:%u probes:%u naks:%u defs:%u"
2651 " dups:%u\n", s->recv_states, s->recv_probes,
2652 s->recv_nacks, s->deferred_recv, s->duplicates);
2654 ret += tipc_snprintf(buf + ret, buf_size - ret,
2655 " TX states:%u probes:%u naks:%u acks:%u"
2656 " dups:%u\n", s->sent_states, s->sent_probes,
2657 s->sent_nacks, s->sent_acks, s->retransmitted);
2659 ret += tipc_snprintf(buf + ret, buf_size - ret,
2660 " Congestion link:%u Send queue"
2661 " max:%u avg:%u\n", s->link_congs,
2662 s->max_queue_sz, s->queue_sz_counts ?
2663 (s->accu_queue_sz / s->queue_sz_counts) : 0);
2665 tipc_node_unlock(node);
2666 read_unlock_bh(&tipc_net_lock);
2670 struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
2672 struct sk_buff *buf;
2673 struct tlv_desc *rep_tlv;
2678 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2679 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2681 buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
2685 rep_tlv = (struct tlv_desc *)buf->data;
2686 pb = TLV_DATA(rep_tlv);
2687 pb_len = ULTRA_STRING_MAX_LEN;
2688 str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area),
2692 return tipc_cfg_reply_error_string("link not found");
2694 str_len += 1; /* for "\0" */
2695 skb_put(buf, TLV_SPACE(str_len));
2696 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
2702 * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination
2703 * @dest: network address of destination node
2704 * @selector: used to select from set of active links
2706 * If no active link can be found, uses default maximum packet size.
2708 u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
2710 struct tipc_node *n_ptr;
2711 struct tipc_link *l_ptr;
2712 u32 res = MAX_PKT_DEFAULT;
2714 if (dest == tipc_own_addr)
2715 return MAX_MSG_SIZE;
2717 read_lock_bh(&tipc_net_lock);
2718 n_ptr = tipc_node_find(dest);
2720 tipc_node_lock(n_ptr);
2721 l_ptr = n_ptr->active_links[selector & 1];
2723 res = l_ptr->max_pkt;
2724 tipc_node_unlock(n_ptr);
2726 read_unlock_bh(&tipc_net_lock);
2730 static void link_print(struct tipc_link *l_ptr, const char *str)
2732 pr_info("%s Link %x<%s>:", str, l_ptr->addr, l_ptr->b_ptr->name);
2734 if (link_working_unknown(l_ptr))
2736 else if (link_reset_reset(l_ptr))
2738 else if (link_reset_unknown(l_ptr))
2740 else if (link_working_working(l_ptr))