1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/crc32c.h>
4 #include <linux/ctype.h>
5 #include <linux/highmem.h>
6 #include <linux/inet.h>
7 #include <linux/kthread.h>
9 #include <linux/slab.h>
10 #include <linux/socket.h>
11 #include <linux/string.h>
13 #include <linux/bio.h>
14 #endif /* CONFIG_BLOCK */
15 #include <linux/dns_resolver.h>
18 #include <linux/ceph/libceph.h>
19 #include <linux/ceph/messenger.h>
20 #include <linux/ceph/decode.h>
21 #include <linux/ceph/pagelist.h>
22 #include <linux/export.h>
24 #define list_entry_next(pos, member) \
25 list_entry(pos->member.next, typeof(*pos), member)
28 * Ceph uses the messenger to exchange ceph_msg messages with other
29 * hosts in the system. The messenger provides ordered and reliable
30 * delivery. We tolerate TCP disconnects by reconnecting (with
31 * exponential backoff) in the case of a fault (disconnection, bad
32 * crc, protocol error). Acks allow sent messages to be discarded by
37 * We track the state of the socket on a given connection using
38 * values defined below. The transition to a new socket state is
39 * handled by a function which verifies we aren't coming from an
43 * | NEW* | transient initial state
45 * | con_sock_state_init()
48 * | CLOSED | initialized, but no socket (and no
49 * ---------- TCP connection)
51 * | \ con_sock_state_connecting()
52 * | ----------------------
54 * + con_sock_state_closed() \
55 * |+--------------------------- \
58 * | | CLOSING | socket event; \ \
59 * | ----------- await close \ \
62 * | + con_sock_state_closing() \ |
64 * | / --------------- | |
67 * | / -----------------| CONNECTING | socket created, TCP
68 * | | / -------------- connect initiated
69 * | | | con_sock_state_connected()
72 * | CONNECTED | TCP connection established
75 * State values for ceph_connection->sock_state; NEW is assumed to be 0.
78 #define CON_SOCK_STATE_NEW 0 /* -> CLOSED */
79 #define CON_SOCK_STATE_CLOSED 1 /* -> CONNECTING */
80 #define CON_SOCK_STATE_CONNECTING 2 /* -> CONNECTED or -> CLOSING */
81 #define CON_SOCK_STATE_CONNECTED 3 /* -> CLOSING or -> CLOSED */
82 #define CON_SOCK_STATE_CLOSING 4 /* -> CLOSED */
87 #define CON_STATE_CLOSED 1 /* -> PREOPEN */
88 #define CON_STATE_PREOPEN 2 /* -> CONNECTING, CLOSED */
89 #define CON_STATE_CONNECTING 3 /* -> NEGOTIATING, CLOSED */
90 #define CON_STATE_NEGOTIATING 4 /* -> OPEN, CLOSED */
91 #define CON_STATE_OPEN 5 /* -> STANDBY, CLOSED */
92 #define CON_STATE_STANDBY 6 /* -> PREOPEN, CLOSED */
95 * ceph_connection flag bits
97 #define CON_FLAG_LOSSYTX 0 /* we can close channel or drop
98 * messages on errors */
99 #define CON_FLAG_KEEPALIVE_PENDING 1 /* we need to send a keepalive */
100 #define CON_FLAG_WRITE_PENDING 2 /* we have data ready to send */
101 #define CON_FLAG_SOCK_CLOSED 3 /* socket state changed to closed */
102 #define CON_FLAG_BACKOFF 4 /* need to retry queuing delayed work */
104 static bool con_flag_valid(unsigned long con_flag)
107 case CON_FLAG_LOSSYTX:
108 case CON_FLAG_KEEPALIVE_PENDING:
109 case CON_FLAG_WRITE_PENDING:
110 case CON_FLAG_SOCK_CLOSED:
111 case CON_FLAG_BACKOFF:
118 static void con_flag_clear(struct ceph_connection *con, unsigned long con_flag)
120 BUG_ON(!con_flag_valid(con_flag));
122 clear_bit(con_flag, &con->flags);
125 static void con_flag_set(struct ceph_connection *con, unsigned long con_flag)
127 BUG_ON(!con_flag_valid(con_flag));
129 set_bit(con_flag, &con->flags);
132 static bool con_flag_test(struct ceph_connection *con, unsigned long con_flag)
134 BUG_ON(!con_flag_valid(con_flag));
136 return test_bit(con_flag, &con->flags);
139 static bool con_flag_test_and_clear(struct ceph_connection *con,
140 unsigned long con_flag)
142 BUG_ON(!con_flag_valid(con_flag));
144 return test_and_clear_bit(con_flag, &con->flags);
147 static bool con_flag_test_and_set(struct ceph_connection *con,
148 unsigned long con_flag)
150 BUG_ON(!con_flag_valid(con_flag));
152 return test_and_set_bit(con_flag, &con->flags);
155 /* Slab caches for frequently-allocated structures */
157 static struct kmem_cache *ceph_msg_cache;
158 static struct kmem_cache *ceph_msg_data_cache;
160 /* static tag bytes (protocol control messages) */
161 static char tag_msg = CEPH_MSGR_TAG_MSG;
162 static char tag_ack = CEPH_MSGR_TAG_ACK;
163 static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE;
165 #ifdef CONFIG_LOCKDEP
166 static struct lock_class_key socket_class;
170 * When skipping (ignoring) a block of input we read it into a "skip
171 * buffer," which is this many bytes in size.
173 #define SKIP_BUF_SIZE 1024
175 static void queue_con(struct ceph_connection *con);
176 static void con_work(struct work_struct *);
177 static void con_fault(struct ceph_connection *con);
180 * Nicely render a sockaddr as a string. An array of formatted
181 * strings is used, to approximate reentrancy.
183 #define ADDR_STR_COUNT_LOG 5 /* log2(# address strings in array) */
184 #define ADDR_STR_COUNT (1 << ADDR_STR_COUNT_LOG)
185 #define ADDR_STR_COUNT_MASK (ADDR_STR_COUNT - 1)
186 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
188 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
189 static atomic_t addr_str_seq = ATOMIC_INIT(0);
191 static struct page *zero_page; /* used in certain error cases */
193 const char *ceph_pr_addr(const struct sockaddr_storage *ss)
197 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
198 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
200 i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
203 switch (ss->ss_family) {
205 snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%hu", &in4->sin_addr,
206 ntohs(in4->sin_port));
210 snprintf(s, MAX_ADDR_STR_LEN, "[%pI6c]:%hu", &in6->sin6_addr,
211 ntohs(in6->sin6_port));
215 snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %hu)",
221 EXPORT_SYMBOL(ceph_pr_addr);
223 static void encode_my_addr(struct ceph_messenger *msgr)
225 memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr));
226 ceph_encode_addr(&msgr->my_enc_addr);
230 * work queue for all reading and writing to/from the socket.
232 static struct workqueue_struct *ceph_msgr_wq;
234 static int ceph_msgr_slab_init(void)
236 BUG_ON(ceph_msg_cache);
237 ceph_msg_cache = kmem_cache_create("ceph_msg",
238 sizeof (struct ceph_msg),
239 __alignof__(struct ceph_msg), 0, NULL);
244 BUG_ON(ceph_msg_data_cache);
245 ceph_msg_data_cache = kmem_cache_create("ceph_msg_data",
246 sizeof (struct ceph_msg_data),
247 __alignof__(struct ceph_msg_data),
249 if (ceph_msg_data_cache)
252 kmem_cache_destroy(ceph_msg_cache);
253 ceph_msg_cache = NULL;
258 static void ceph_msgr_slab_exit(void)
260 BUG_ON(!ceph_msg_data_cache);
261 kmem_cache_destroy(ceph_msg_data_cache);
262 ceph_msg_data_cache = NULL;
264 BUG_ON(!ceph_msg_cache);
265 kmem_cache_destroy(ceph_msg_cache);
266 ceph_msg_cache = NULL;
269 static void _ceph_msgr_exit(void)
272 destroy_workqueue(ceph_msgr_wq);
276 ceph_msgr_slab_exit();
278 BUG_ON(zero_page == NULL);
280 page_cache_release(zero_page);
284 int ceph_msgr_init(void)
286 BUG_ON(zero_page != NULL);
287 zero_page = ZERO_PAGE(0);
288 page_cache_get(zero_page);
290 if (ceph_msgr_slab_init())
293 ceph_msgr_wq = alloc_workqueue("ceph-msgr", 0, 0);
297 pr_err("msgr_init failed to create workqueue\n");
302 EXPORT_SYMBOL(ceph_msgr_init);
304 void ceph_msgr_exit(void)
306 BUG_ON(ceph_msgr_wq == NULL);
310 EXPORT_SYMBOL(ceph_msgr_exit);
312 void ceph_msgr_flush(void)
314 flush_workqueue(ceph_msgr_wq);
316 EXPORT_SYMBOL(ceph_msgr_flush);
318 /* Connection socket state transition functions */
320 static void con_sock_state_init(struct ceph_connection *con)
324 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
325 if (WARN_ON(old_state != CON_SOCK_STATE_NEW))
326 printk("%s: unexpected old state %d\n", __func__, old_state);
327 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
328 CON_SOCK_STATE_CLOSED);
331 static void con_sock_state_connecting(struct ceph_connection *con)
335 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTING);
336 if (WARN_ON(old_state != CON_SOCK_STATE_CLOSED))
337 printk("%s: unexpected old state %d\n", __func__, old_state);
338 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
339 CON_SOCK_STATE_CONNECTING);
342 static void con_sock_state_connected(struct ceph_connection *con)
346 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTED);
347 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING))
348 printk("%s: unexpected old state %d\n", __func__, old_state);
349 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
350 CON_SOCK_STATE_CONNECTED);
353 static void con_sock_state_closing(struct ceph_connection *con)
357 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSING);
358 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING &&
359 old_state != CON_SOCK_STATE_CONNECTED &&
360 old_state != CON_SOCK_STATE_CLOSING))
361 printk("%s: unexpected old state %d\n", __func__, old_state);
362 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
363 CON_SOCK_STATE_CLOSING);
366 static void con_sock_state_closed(struct ceph_connection *con)
370 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
371 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTED &&
372 old_state != CON_SOCK_STATE_CLOSING &&
373 old_state != CON_SOCK_STATE_CONNECTING &&
374 old_state != CON_SOCK_STATE_CLOSED))
375 printk("%s: unexpected old state %d\n", __func__, old_state);
376 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
377 CON_SOCK_STATE_CLOSED);
381 * socket callback functions
384 /* data available on socket, or listen socket received a connect */
385 static void ceph_sock_data_ready(struct sock *sk, int count_unused)
387 struct ceph_connection *con = sk->sk_user_data;
388 if (atomic_read(&con->msgr->stopping)) {
392 if (sk->sk_state != TCP_CLOSE_WAIT) {
393 dout("%s on %p state = %lu, queueing work\n", __func__,
399 /* socket has buffer space for writing */
400 static void ceph_sock_write_space(struct sock *sk)
402 struct ceph_connection *con = sk->sk_user_data;
404 /* only queue to workqueue if there is data we want to write,
405 * and there is sufficient space in the socket buffer to accept
406 * more data. clear SOCK_NOSPACE so that ceph_sock_write_space()
407 * doesn't get called again until try_write() fills the socket
408 * buffer. See net/ipv4/tcp_input.c:tcp_check_space()
409 * and net/core/stream.c:sk_stream_write_space().
411 if (con_flag_test(con, CON_FLAG_WRITE_PENDING)) {
412 if (sk_stream_is_writeable(sk)) {
413 dout("%s %p queueing write work\n", __func__, con);
414 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
418 dout("%s %p nothing to write\n", __func__, con);
422 /* socket's state has changed */
423 static void ceph_sock_state_change(struct sock *sk)
425 struct ceph_connection *con = sk->sk_user_data;
427 dout("%s %p state = %lu sk_state = %u\n", __func__,
428 con, con->state, sk->sk_state);
430 switch (sk->sk_state) {
432 dout("%s TCP_CLOSE\n", __func__);
434 dout("%s TCP_CLOSE_WAIT\n", __func__);
435 con_sock_state_closing(con);
436 con_flag_set(con, CON_FLAG_SOCK_CLOSED);
439 case TCP_ESTABLISHED:
440 dout("%s TCP_ESTABLISHED\n", __func__);
441 con_sock_state_connected(con);
444 default: /* Everything else is uninteresting */
450 * set up socket callbacks
452 static void set_sock_callbacks(struct socket *sock,
453 struct ceph_connection *con)
455 struct sock *sk = sock->sk;
456 sk->sk_user_data = con;
457 sk->sk_data_ready = ceph_sock_data_ready;
458 sk->sk_write_space = ceph_sock_write_space;
459 sk->sk_state_change = ceph_sock_state_change;
468 * initiate connection to a remote socket.
470 static int ceph_tcp_connect(struct ceph_connection *con)
472 struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
477 ret = sock_create_kern(con->peer_addr.in_addr.ss_family, SOCK_STREAM,
481 sock->sk->sk_allocation = GFP_NOFS;
483 #ifdef CONFIG_LOCKDEP
484 lockdep_set_class(&sock->sk->sk_lock, &socket_class);
487 set_sock_callbacks(sock, con);
489 dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr));
491 con_sock_state_connecting(con);
492 ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr),
494 if (ret == -EINPROGRESS) {
495 dout("connect %s EINPROGRESS sk_state = %u\n",
496 ceph_pr_addr(&con->peer_addr.in_addr),
498 } else if (ret < 0) {
499 pr_err("connect %s error %d\n",
500 ceph_pr_addr(&con->peer_addr.in_addr), ret);
502 con->error_msg = "connect error";
510 static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
512 struct kvec iov = {buf, len};
513 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
516 r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags);
522 static int ceph_tcp_recvpage(struct socket *sock, struct page *page,
523 int page_offset, size_t length)
528 BUG_ON(page_offset + length > PAGE_SIZE);
532 ret = ceph_tcp_recvmsg(sock, kaddr + page_offset, length);
539 * write something. @more is true if caller will be sending more data
542 static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
543 size_t kvlen, size_t len, int more)
545 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
549 msg.msg_flags |= MSG_MORE;
551 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */
553 r = kernel_sendmsg(sock, &msg, iov, kvlen, len);
559 static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
560 int offset, size_t size, bool more)
562 int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR);
565 ret = kernel_sendpage(sock, page, offset, size, flags);
574 * Shutdown/close the socket for the given connection.
576 static int con_close_socket(struct ceph_connection *con)
580 dout("con_close_socket on %p sock %p\n", con, con->sock);
582 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
583 sock_release(con->sock);
588 * Forcibly clear the SOCK_CLOSED flag. It gets set
589 * independent of the connection mutex, and we could have
590 * received a socket close event before we had the chance to
591 * shut the socket down.
593 con_flag_clear(con, CON_FLAG_SOCK_CLOSED);
595 con_sock_state_closed(con);
600 * Reset a connection. Discard all incoming and outgoing messages
601 * and clear *_seq state.
603 static void ceph_msg_remove(struct ceph_msg *msg)
605 list_del_init(&msg->list_head);
606 BUG_ON(msg->con == NULL);
607 msg->con->ops->put(msg->con);
612 static void ceph_msg_remove_list(struct list_head *head)
614 while (!list_empty(head)) {
615 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg,
617 ceph_msg_remove(msg);
621 static void reset_connection(struct ceph_connection *con)
623 /* reset connection, out_queue, msg_ and connect_seq */
624 /* discard existing out_queue and msg_seq */
625 dout("reset_connection %p\n", con);
626 ceph_msg_remove_list(&con->out_queue);
627 ceph_msg_remove_list(&con->out_sent);
630 BUG_ON(con->in_msg->con != con);
631 con->in_msg->con = NULL;
632 ceph_msg_put(con->in_msg);
637 con->connect_seq = 0;
640 ceph_msg_put(con->out_msg);
644 con->in_seq_acked = 0;
648 * mark a peer down. drop any open connections.
650 void ceph_con_close(struct ceph_connection *con)
652 mutex_lock(&con->mutex);
653 dout("con_close %p peer %s\n", con,
654 ceph_pr_addr(&con->peer_addr.in_addr));
655 con->state = CON_STATE_CLOSED;
657 con_flag_clear(con, CON_FLAG_LOSSYTX); /* so we retry next connect */
658 con_flag_clear(con, CON_FLAG_KEEPALIVE_PENDING);
659 con_flag_clear(con, CON_FLAG_WRITE_PENDING);
660 con_flag_clear(con, CON_FLAG_BACKOFF);
662 reset_connection(con);
663 con->peer_global_seq = 0;
664 cancel_delayed_work(&con->work);
665 con_close_socket(con);
666 mutex_unlock(&con->mutex);
668 EXPORT_SYMBOL(ceph_con_close);
671 * Reopen a closed connection, with a new peer address.
673 void ceph_con_open(struct ceph_connection *con,
674 __u8 entity_type, __u64 entity_num,
675 struct ceph_entity_addr *addr)
677 mutex_lock(&con->mutex);
678 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr));
680 WARN_ON(con->state != CON_STATE_CLOSED);
681 con->state = CON_STATE_PREOPEN;
683 con->peer_name.type = (__u8) entity_type;
684 con->peer_name.num = cpu_to_le64(entity_num);
686 memcpy(&con->peer_addr, addr, sizeof(*addr));
687 con->delay = 0; /* reset backoff memory */
688 mutex_unlock(&con->mutex);
691 EXPORT_SYMBOL(ceph_con_open);
694 * return true if this connection ever successfully opened
696 bool ceph_con_opened(struct ceph_connection *con)
698 return con->connect_seq > 0;
702 * initialize a new connection.
704 void ceph_con_init(struct ceph_connection *con, void *private,
705 const struct ceph_connection_operations *ops,
706 struct ceph_messenger *msgr)
708 dout("con_init %p\n", con);
709 memset(con, 0, sizeof(*con));
710 con->private = private;
714 con_sock_state_init(con);
716 mutex_init(&con->mutex);
717 INIT_LIST_HEAD(&con->out_queue);
718 INIT_LIST_HEAD(&con->out_sent);
719 INIT_DELAYED_WORK(&con->work, con_work);
721 con->state = CON_STATE_CLOSED;
723 EXPORT_SYMBOL(ceph_con_init);
727 * We maintain a global counter to order connection attempts. Get
728 * a unique seq greater than @gt.
730 static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
734 spin_lock(&msgr->global_seq_lock);
735 if (msgr->global_seq < gt)
736 msgr->global_seq = gt;
737 ret = ++msgr->global_seq;
738 spin_unlock(&msgr->global_seq_lock);
742 static void con_out_kvec_reset(struct ceph_connection *con)
744 con->out_kvec_left = 0;
745 con->out_kvec_bytes = 0;
746 con->out_kvec_cur = &con->out_kvec[0];
749 static void con_out_kvec_add(struct ceph_connection *con,
750 size_t size, void *data)
754 index = con->out_kvec_left;
755 BUG_ON(index >= ARRAY_SIZE(con->out_kvec));
757 con->out_kvec[index].iov_len = size;
758 con->out_kvec[index].iov_base = data;
759 con->out_kvec_left++;
760 con->out_kvec_bytes += size;
766 * For a bio data item, a piece is whatever remains of the next
767 * entry in the current bio iovec, or the first entry in the next
770 static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data_cursor *cursor,
773 struct ceph_msg_data *data = cursor->data;
776 BUG_ON(data->type != CEPH_MSG_DATA_BIO);
781 cursor->resid = min(length, data->bio_length);
783 cursor->bvec_iter = bio->bi_iter;
785 cursor->resid <= bio_iter_len(bio, cursor->bvec_iter);
788 static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor,
792 struct ceph_msg_data *data = cursor->data;
794 struct bio_vec bio_vec;
796 BUG_ON(data->type != CEPH_MSG_DATA_BIO);
801 bio_vec = bio_iter_iovec(bio, cursor->bvec_iter);
803 *page_offset = (size_t) bio_vec.bv_offset;
804 BUG_ON(*page_offset >= PAGE_SIZE);
805 if (cursor->last_piece) /* pagelist offset is always 0 */
806 *length = cursor->resid;
808 *length = (size_t) bio_vec.bv_len;
809 BUG_ON(*length > cursor->resid);
810 BUG_ON(*page_offset + *length > PAGE_SIZE);
812 return bio_vec.bv_page;
815 static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
819 struct bio_vec bio_vec;
821 BUG_ON(cursor->data->type != CEPH_MSG_DATA_BIO);
826 bio_vec = bio_iter_iovec(bio, cursor->bvec_iter);
828 /* Advance the cursor offset */
830 BUG_ON(cursor->resid < bytes);
831 cursor->resid -= bytes;
833 bio_advance_iter(bio, &cursor->bvec_iter, bytes);
835 if (bytes < bio_vec.bv_len)
836 return false; /* more bytes to process in this segment */
838 /* Move on to the next segment, and possibly the next bio */
840 if (!cursor->bvec_iter.bi_size) {
842 cursor->bvec_iter = bio->bi_iter;
846 if (!cursor->last_piece) {
847 BUG_ON(!cursor->resid);
849 /* A short read is OK, so use <= rather than == */
850 if (cursor->resid <= bio_iter_len(bio, cursor->bvec_iter))
851 cursor->last_piece = true;
856 #endif /* CONFIG_BLOCK */
859 * For a page array, a piece comes from the first page in the array
860 * that has not already been fully consumed.
862 static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor,
865 struct ceph_msg_data *data = cursor->data;
868 BUG_ON(data->type != CEPH_MSG_DATA_PAGES);
870 BUG_ON(!data->pages);
871 BUG_ON(!data->length);
873 cursor->resid = min(length, data->length);
874 page_count = calc_pages_for(data->alignment, (u64)data->length);
875 cursor->page_offset = data->alignment & ~PAGE_MASK;
876 cursor->page_index = 0;
877 BUG_ON(page_count > (int)USHRT_MAX);
878 cursor->page_count = (unsigned short)page_count;
879 BUG_ON(length > SIZE_MAX - cursor->page_offset);
880 cursor->last_piece = (size_t)cursor->page_offset + length <= PAGE_SIZE;
884 ceph_msg_data_pages_next(struct ceph_msg_data_cursor *cursor,
885 size_t *page_offset, size_t *length)
887 struct ceph_msg_data *data = cursor->data;
889 BUG_ON(data->type != CEPH_MSG_DATA_PAGES);
891 BUG_ON(cursor->page_index >= cursor->page_count);
892 BUG_ON(cursor->page_offset >= PAGE_SIZE);
894 *page_offset = cursor->page_offset;
895 if (cursor->last_piece)
896 *length = cursor->resid;
898 *length = PAGE_SIZE - *page_offset;
900 return data->pages[cursor->page_index];
903 static bool ceph_msg_data_pages_advance(struct ceph_msg_data_cursor *cursor,
906 BUG_ON(cursor->data->type != CEPH_MSG_DATA_PAGES);
908 BUG_ON(cursor->page_offset + bytes > PAGE_SIZE);
910 /* Advance the cursor page offset */
912 cursor->resid -= bytes;
913 cursor->page_offset = (cursor->page_offset + bytes) & ~PAGE_MASK;
914 if (!bytes || cursor->page_offset)
915 return false; /* more bytes to process in the current page */
917 /* Move on to the next page; offset is already at 0 */
919 BUG_ON(cursor->page_index >= cursor->page_count);
920 cursor->page_index++;
921 cursor->last_piece = cursor->resid <= PAGE_SIZE;
927 * For a pagelist, a piece is whatever remains to be consumed in the
928 * first page in the list, or the front of the next page.
931 ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data_cursor *cursor,
934 struct ceph_msg_data *data = cursor->data;
935 struct ceph_pagelist *pagelist;
938 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
940 pagelist = data->pagelist;
944 return; /* pagelist can be assigned but empty */
946 BUG_ON(list_empty(&pagelist->head));
947 page = list_first_entry(&pagelist->head, struct page, lru);
949 cursor->resid = min(length, pagelist->length);
952 cursor->last_piece = cursor->resid <= PAGE_SIZE;
956 ceph_msg_data_pagelist_next(struct ceph_msg_data_cursor *cursor,
957 size_t *page_offset, size_t *length)
959 struct ceph_msg_data *data = cursor->data;
960 struct ceph_pagelist *pagelist;
962 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
964 pagelist = data->pagelist;
967 BUG_ON(!cursor->page);
968 BUG_ON(cursor->offset + cursor->resid != pagelist->length);
970 /* offset of first page in pagelist is always 0 */
971 *page_offset = cursor->offset & ~PAGE_MASK;
972 if (cursor->last_piece)
973 *length = cursor->resid;
975 *length = PAGE_SIZE - *page_offset;
980 static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data_cursor *cursor,
983 struct ceph_msg_data *data = cursor->data;
984 struct ceph_pagelist *pagelist;
986 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
988 pagelist = data->pagelist;
991 BUG_ON(cursor->offset + cursor->resid != pagelist->length);
992 BUG_ON((cursor->offset & ~PAGE_MASK) + bytes > PAGE_SIZE);
994 /* Advance the cursor offset */
996 cursor->resid -= bytes;
997 cursor->offset += bytes;
998 /* offset of first page in pagelist is always 0 */
999 if (!bytes || cursor->offset & ~PAGE_MASK)
1000 return false; /* more bytes to process in the current page */
1002 /* Move on to the next page */
1004 BUG_ON(list_is_last(&cursor->page->lru, &pagelist->head));
1005 cursor->page = list_entry_next(cursor->page, lru);
1006 cursor->last_piece = cursor->resid <= PAGE_SIZE;
1012 * Message data is handled (sent or received) in pieces, where each
1013 * piece resides on a single page. The network layer might not
1014 * consume an entire piece at once. A data item's cursor keeps
1015 * track of which piece is next to process and how much remains to
1016 * be processed in that piece. It also tracks whether the current
1017 * piece is the last one in the data item.
1019 static void __ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor)
1021 size_t length = cursor->total_resid;
1023 switch (cursor->data->type) {
1024 case CEPH_MSG_DATA_PAGELIST:
1025 ceph_msg_data_pagelist_cursor_init(cursor, length);
1027 case CEPH_MSG_DATA_PAGES:
1028 ceph_msg_data_pages_cursor_init(cursor, length);
1031 case CEPH_MSG_DATA_BIO:
1032 ceph_msg_data_bio_cursor_init(cursor, length);
1034 #endif /* CONFIG_BLOCK */
1035 case CEPH_MSG_DATA_NONE:
1040 cursor->need_crc = true;
1043 static void ceph_msg_data_cursor_init(struct ceph_msg *msg, size_t length)
1045 struct ceph_msg_data_cursor *cursor = &msg->cursor;
1046 struct ceph_msg_data *data;
1049 BUG_ON(length > msg->data_length);
1050 BUG_ON(list_empty(&msg->data));
1052 cursor->data_head = &msg->data;
1053 cursor->total_resid = length;
1054 data = list_first_entry(&msg->data, struct ceph_msg_data, links);
1055 cursor->data = data;
1057 __ceph_msg_data_cursor_init(cursor);
1061 * Return the page containing the next piece to process for a given
1062 * data item, and supply the page offset and length of that piece.
1063 * Indicate whether this is the last piece in this data item.
1065 static struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor,
1066 size_t *page_offset, size_t *length,
1071 switch (cursor->data->type) {
1072 case CEPH_MSG_DATA_PAGELIST:
1073 page = ceph_msg_data_pagelist_next(cursor, page_offset, length);
1075 case CEPH_MSG_DATA_PAGES:
1076 page = ceph_msg_data_pages_next(cursor, page_offset, length);
1079 case CEPH_MSG_DATA_BIO:
1080 page = ceph_msg_data_bio_next(cursor, page_offset, length);
1082 #endif /* CONFIG_BLOCK */
1083 case CEPH_MSG_DATA_NONE:
1089 BUG_ON(*page_offset + *length > PAGE_SIZE);
1092 *last_piece = cursor->last_piece;
1098 * Returns true if the result moves the cursor on to the next piece
1101 static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor,
1106 BUG_ON(bytes > cursor->resid);
1107 switch (cursor->data->type) {
1108 case CEPH_MSG_DATA_PAGELIST:
1109 new_piece = ceph_msg_data_pagelist_advance(cursor, bytes);
1111 case CEPH_MSG_DATA_PAGES:
1112 new_piece = ceph_msg_data_pages_advance(cursor, bytes);
1115 case CEPH_MSG_DATA_BIO:
1116 new_piece = ceph_msg_data_bio_advance(cursor, bytes);
1118 #endif /* CONFIG_BLOCK */
1119 case CEPH_MSG_DATA_NONE:
1124 cursor->total_resid -= bytes;
1126 if (!cursor->resid && cursor->total_resid) {
1127 WARN_ON(!cursor->last_piece);
1128 BUG_ON(list_is_last(&cursor->data->links, cursor->data_head));
1129 cursor->data = list_entry_next(cursor->data, links);
1130 __ceph_msg_data_cursor_init(cursor);
1133 cursor->need_crc = new_piece;
1138 static void prepare_message_data(struct ceph_msg *msg, u32 data_len)
1143 /* Initialize data cursor */
1145 ceph_msg_data_cursor_init(msg, (size_t)data_len);
1149 * Prepare footer for currently outgoing message, and finish things
1150 * off. Assumes out_kvec* are already valid.. we just add on to the end.
1152 static void prepare_write_message_footer(struct ceph_connection *con)
1154 struct ceph_msg *m = con->out_msg;
1155 int v = con->out_kvec_left;
1157 m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE;
1159 dout("prepare_write_message_footer %p\n", con);
1160 con->out_kvec_is_msg = true;
1161 con->out_kvec[v].iov_base = &m->footer;
1162 con->out_kvec[v].iov_len = sizeof(m->footer);
1163 con->out_kvec_bytes += sizeof(m->footer);
1164 con->out_kvec_left++;
1165 con->out_more = m->more_to_follow;
1166 con->out_msg_done = true;
1170 * Prepare headers for the next outgoing message.
1172 static void prepare_write_message(struct ceph_connection *con)
1177 con_out_kvec_reset(con);
1178 con->out_kvec_is_msg = true;
1179 con->out_msg_done = false;
1181 /* Sneak an ack in there first? If we can get it into the same
1182 * TCP packet that's a good thing. */
1183 if (con->in_seq > con->in_seq_acked) {
1184 con->in_seq_acked = con->in_seq;
1185 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
1186 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
1187 con_out_kvec_add(con, sizeof (con->out_temp_ack),
1188 &con->out_temp_ack);
1191 BUG_ON(list_empty(&con->out_queue));
1192 m = list_first_entry(&con->out_queue, struct ceph_msg, list_head);
1194 BUG_ON(m->con != con);
1196 /* put message on sent list */
1198 list_move_tail(&m->list_head, &con->out_sent);
1201 * only assign outgoing seq # if we haven't sent this message
1202 * yet. if it is requeued, resend with it's original seq.
1204 if (m->needs_out_seq) {
1205 m->hdr.seq = cpu_to_le64(++con->out_seq);
1206 m->needs_out_seq = false;
1208 WARN_ON(m->data_length != le32_to_cpu(m->hdr.data_len));
1210 dout("prepare_write_message %p seq %lld type %d len %d+%d+%zd\n",
1211 m, con->out_seq, le16_to_cpu(m->hdr.type),
1212 le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len),
1214 BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len);
1216 /* tag + hdr + front + middle */
1217 con_out_kvec_add(con, sizeof (tag_msg), &tag_msg);
1218 con_out_kvec_add(con, sizeof (m->hdr), &m->hdr);
1219 con_out_kvec_add(con, m->front.iov_len, m->front.iov_base);
1222 con_out_kvec_add(con, m->middle->vec.iov_len,
1223 m->middle->vec.iov_base);
1225 /* fill in crc (except data pages), footer */
1226 crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc));
1227 con->out_msg->hdr.crc = cpu_to_le32(crc);
1228 con->out_msg->footer.flags = 0;
1230 crc = crc32c(0, m->front.iov_base, m->front.iov_len);
1231 con->out_msg->footer.front_crc = cpu_to_le32(crc);
1233 crc = crc32c(0, m->middle->vec.iov_base,
1234 m->middle->vec.iov_len);
1235 con->out_msg->footer.middle_crc = cpu_to_le32(crc);
1237 con->out_msg->footer.middle_crc = 0;
1238 dout("%s front_crc %u middle_crc %u\n", __func__,
1239 le32_to_cpu(con->out_msg->footer.front_crc),
1240 le32_to_cpu(con->out_msg->footer.middle_crc));
1242 /* is there a data payload? */
1243 con->out_msg->footer.data_crc = 0;
1244 if (m->data_length) {
1245 prepare_message_data(con->out_msg, m->data_length);
1246 con->out_more = 1; /* data + footer will follow */
1248 /* no, queue up footer too and be done */
1249 prepare_write_message_footer(con);
1252 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1258 static void prepare_write_ack(struct ceph_connection *con)
1260 dout("prepare_write_ack %p %llu -> %llu\n", con,
1261 con->in_seq_acked, con->in_seq);
1262 con->in_seq_acked = con->in_seq;
1264 con_out_kvec_reset(con);
1266 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
1268 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
1269 con_out_kvec_add(con, sizeof (con->out_temp_ack),
1270 &con->out_temp_ack);
1272 con->out_more = 1; /* more will follow.. eventually.. */
1273 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1277 * Prepare to share the seq during handshake
1279 static void prepare_write_seq(struct ceph_connection *con)
1281 dout("prepare_write_seq %p %llu -> %llu\n", con,
1282 con->in_seq_acked, con->in_seq);
1283 con->in_seq_acked = con->in_seq;
1285 con_out_kvec_reset(con);
1287 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
1288 con_out_kvec_add(con, sizeof (con->out_temp_ack),
1289 &con->out_temp_ack);
1291 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1295 * Prepare to write keepalive byte.
1297 static void prepare_write_keepalive(struct ceph_connection *con)
1299 dout("prepare_write_keepalive %p\n", con);
1300 con_out_kvec_reset(con);
1301 con_out_kvec_add(con, sizeof (tag_keepalive), &tag_keepalive);
1302 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1306 * Connection negotiation.
1309 static struct ceph_auth_handshake *get_connect_authorizer(struct ceph_connection *con,
1312 struct ceph_auth_handshake *auth;
1314 if (!con->ops->get_authorizer) {
1315 con->out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN;
1316 con->out_connect.authorizer_len = 0;
1320 /* Can't hold the mutex while getting authorizer */
1321 mutex_unlock(&con->mutex);
1322 auth = con->ops->get_authorizer(con, auth_proto, con->auth_retry);
1323 mutex_lock(&con->mutex);
1327 if (con->state != CON_STATE_NEGOTIATING)
1328 return ERR_PTR(-EAGAIN);
1330 con->auth_reply_buf = auth->authorizer_reply_buf;
1331 con->auth_reply_buf_len = auth->authorizer_reply_buf_len;
1336 * We connected to a peer and are saying hello.
1338 static void prepare_write_banner(struct ceph_connection *con)
1340 con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER);
1341 con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr),
1342 &con->msgr->my_enc_addr);
1345 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1348 static int prepare_write_connect(struct ceph_connection *con)
1350 unsigned int global_seq = get_global_seq(con->msgr, 0);
1353 struct ceph_auth_handshake *auth;
1355 switch (con->peer_name.type) {
1356 case CEPH_ENTITY_TYPE_MON:
1357 proto = CEPH_MONC_PROTOCOL;
1359 case CEPH_ENTITY_TYPE_OSD:
1360 proto = CEPH_OSDC_PROTOCOL;
1362 case CEPH_ENTITY_TYPE_MDS:
1363 proto = CEPH_MDSC_PROTOCOL;
1369 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
1370 con->connect_seq, global_seq, proto);
1372 con->out_connect.features = cpu_to_le64(con->msgr->supported_features);
1373 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
1374 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
1375 con->out_connect.global_seq = cpu_to_le32(global_seq);
1376 con->out_connect.protocol_version = cpu_to_le32(proto);
1377 con->out_connect.flags = 0;
1379 auth_proto = CEPH_AUTH_UNKNOWN;
1380 auth = get_connect_authorizer(con, &auth_proto);
1382 return PTR_ERR(auth);
1384 con->out_connect.authorizer_protocol = cpu_to_le32(auth_proto);
1385 con->out_connect.authorizer_len = auth ?
1386 cpu_to_le32(auth->authorizer_buf_len) : 0;
1388 con_out_kvec_add(con, sizeof (con->out_connect),
1390 if (auth && auth->authorizer_buf_len)
1391 con_out_kvec_add(con, auth->authorizer_buf_len,
1392 auth->authorizer_buf);
1395 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1401 * write as much of pending kvecs to the socket as we can.
1403 * 0 -> socket full, but more to do
1406 static int write_partial_kvec(struct ceph_connection *con)
1410 dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes);
1411 while (con->out_kvec_bytes > 0) {
1412 ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur,
1413 con->out_kvec_left, con->out_kvec_bytes,
1417 con->out_kvec_bytes -= ret;
1418 if (con->out_kvec_bytes == 0)
1421 /* account for full iov entries consumed */
1422 while (ret >= con->out_kvec_cur->iov_len) {
1423 BUG_ON(!con->out_kvec_left);
1424 ret -= con->out_kvec_cur->iov_len;
1425 con->out_kvec_cur++;
1426 con->out_kvec_left--;
1428 /* and for a partially-consumed entry */
1430 con->out_kvec_cur->iov_len -= ret;
1431 con->out_kvec_cur->iov_base += ret;
1434 con->out_kvec_left = 0;
1435 con->out_kvec_is_msg = false;
1438 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
1439 con->out_kvec_bytes, con->out_kvec_left, ret);
1440 return ret; /* done! */
1443 static u32 ceph_crc32c_page(u32 crc, struct page *page,
1444 unsigned int page_offset,
1445 unsigned int length)
1450 BUG_ON(kaddr == NULL);
1451 crc = crc32c(crc, kaddr + page_offset, length);
1457 * Write as much message data payload as we can. If we finish, queue
1459 * 1 -> done, footer is now queued in out_kvec[].
1460 * 0 -> socket full, but more to do
1463 static int write_partial_message_data(struct ceph_connection *con)
1465 struct ceph_msg *msg = con->out_msg;
1466 struct ceph_msg_data_cursor *cursor = &msg->cursor;
1467 bool do_datacrc = !con->msgr->nocrc;
1470 dout("%s %p msg %p\n", __func__, con, msg);
1472 if (list_empty(&msg->data))
1476 * Iterate through each page that contains data to be
1477 * written, and send as much as possible for each.
1479 * If we are calculating the data crc (the default), we will
1480 * need to map the page. If we have no pages, they have
1481 * been revoked, so use the zero page.
1483 crc = do_datacrc ? le32_to_cpu(msg->footer.data_crc) : 0;
1484 while (cursor->resid) {
1492 page = ceph_msg_data_next(&msg->cursor, &page_offset, &length,
1494 ret = ceph_tcp_sendpage(con->sock, page, page_offset,
1495 length, last_piece);
1498 msg->footer.data_crc = cpu_to_le32(crc);
1502 if (do_datacrc && cursor->need_crc)
1503 crc = ceph_crc32c_page(crc, page, page_offset, length);
1504 need_crc = ceph_msg_data_advance(&msg->cursor, (size_t)ret);
1507 dout("%s %p msg %p done\n", __func__, con, msg);
1509 /* prepare and queue up footer, too */
1511 msg->footer.data_crc = cpu_to_le32(crc);
1513 msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC;
1514 con_out_kvec_reset(con);
1515 prepare_write_message_footer(con);
1517 return 1; /* must return > 0 to indicate success */
1523 static int write_partial_skip(struct ceph_connection *con)
1527 while (con->out_skip > 0) {
1528 size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE);
1530 ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, true);
1533 con->out_skip -= ret;
1541 * Prepare to read connection handshake, or an ack.
1543 static void prepare_read_banner(struct ceph_connection *con)
1545 dout("prepare_read_banner %p\n", con);
1546 con->in_base_pos = 0;
1549 static void prepare_read_connect(struct ceph_connection *con)
1551 dout("prepare_read_connect %p\n", con);
1552 con->in_base_pos = 0;
1555 static void prepare_read_ack(struct ceph_connection *con)
1557 dout("prepare_read_ack %p\n", con);
1558 con->in_base_pos = 0;
1561 static void prepare_read_seq(struct ceph_connection *con)
1563 dout("prepare_read_seq %p\n", con);
1564 con->in_base_pos = 0;
1565 con->in_tag = CEPH_MSGR_TAG_SEQ;
1568 static void prepare_read_tag(struct ceph_connection *con)
1570 dout("prepare_read_tag %p\n", con);
1571 con->in_base_pos = 0;
1572 con->in_tag = CEPH_MSGR_TAG_READY;
1576 * Prepare to read a message.
1578 static int prepare_read_message(struct ceph_connection *con)
1580 dout("prepare_read_message %p\n", con);
1581 BUG_ON(con->in_msg != NULL);
1582 con->in_base_pos = 0;
1583 con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0;
1588 static int read_partial(struct ceph_connection *con,
1589 int end, int size, void *object)
1591 while (con->in_base_pos < end) {
1592 int left = end - con->in_base_pos;
1593 int have = size - left;
1594 int ret = ceph_tcp_recvmsg(con->sock, object + have, left);
1597 con->in_base_pos += ret;
1604 * Read all or part of the connect-side handshake on a new connection
1606 static int read_partial_banner(struct ceph_connection *con)
1612 dout("read_partial_banner %p at %d\n", con, con->in_base_pos);
1615 size = strlen(CEPH_BANNER);
1617 ret = read_partial(con, end, size, con->in_banner);
1621 size = sizeof (con->actual_peer_addr);
1623 ret = read_partial(con, end, size, &con->actual_peer_addr);
1627 size = sizeof (con->peer_addr_for_me);
1629 ret = read_partial(con, end, size, &con->peer_addr_for_me);
1637 static int read_partial_connect(struct ceph_connection *con)
1643 dout("read_partial_connect %p at %d\n", con, con->in_base_pos);
1645 size = sizeof (con->in_reply);
1647 ret = read_partial(con, end, size, &con->in_reply);
1651 size = le32_to_cpu(con->in_reply.authorizer_len);
1653 ret = read_partial(con, end, size, con->auth_reply_buf);
1657 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n",
1658 con, (int)con->in_reply.tag,
1659 le32_to_cpu(con->in_reply.connect_seq),
1660 le32_to_cpu(con->in_reply.global_seq));
1667 * Verify the hello banner looks okay.
1669 static int verify_hello(struct ceph_connection *con)
1671 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) {
1672 pr_err("connect to %s got bad banner\n",
1673 ceph_pr_addr(&con->peer_addr.in_addr));
1674 con->error_msg = "protocol error, bad banner";
1680 static bool addr_is_blank(struct sockaddr_storage *ss)
1682 switch (ss->ss_family) {
1684 return ((struct sockaddr_in *)ss)->sin_addr.s_addr == 0;
1687 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[0] == 0 &&
1688 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[1] == 0 &&
1689 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[2] == 0 &&
1690 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[3] == 0;
1695 static int addr_port(struct sockaddr_storage *ss)
1697 switch (ss->ss_family) {
1699 return ntohs(((struct sockaddr_in *)ss)->sin_port);
1701 return ntohs(((struct sockaddr_in6 *)ss)->sin6_port);
1706 static void addr_set_port(struct sockaddr_storage *ss, int p)
1708 switch (ss->ss_family) {
1710 ((struct sockaddr_in *)ss)->sin_port = htons(p);
1713 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p);
1719 * Unlike other *_pton function semantics, zero indicates success.
1721 static int ceph_pton(const char *str, size_t len, struct sockaddr_storage *ss,
1722 char delim, const char **ipend)
1724 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
1725 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
1727 memset(ss, 0, sizeof(*ss));
1729 if (in4_pton(str, len, (u8 *)&in4->sin_addr.s_addr, delim, ipend)) {
1730 ss->ss_family = AF_INET;
1734 if (in6_pton(str, len, (u8 *)&in6->sin6_addr.s6_addr, delim, ipend)) {
1735 ss->ss_family = AF_INET6;
1743 * Extract hostname string and resolve using kernel DNS facility.
1745 #ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER
1746 static int ceph_dns_resolve_name(const char *name, size_t namelen,
1747 struct sockaddr_storage *ss, char delim, const char **ipend)
1749 const char *end, *delim_p;
1750 char *colon_p, *ip_addr = NULL;
1754 * The end of the hostname occurs immediately preceding the delimiter or
1755 * the port marker (':') where the delimiter takes precedence.
1757 delim_p = memchr(name, delim, namelen);
1758 colon_p = memchr(name, ':', namelen);
1760 if (delim_p && colon_p)
1761 end = delim_p < colon_p ? delim_p : colon_p;
1762 else if (!delim_p && colon_p)
1766 if (!end) /* case: hostname:/ */
1767 end = name + namelen;
1773 /* do dns_resolve upcall */
1774 ip_len = dns_query(NULL, name, end - name, NULL, &ip_addr, NULL);
1776 ret = ceph_pton(ip_addr, ip_len, ss, -1, NULL);
1784 pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name,
1785 ret, ret ? "failed" : ceph_pr_addr(ss));
1790 static inline int ceph_dns_resolve_name(const char *name, size_t namelen,
1791 struct sockaddr_storage *ss, char delim, const char **ipend)
1798 * Parse a server name (IP or hostname). If a valid IP address is not found
1799 * then try to extract a hostname to resolve using userspace DNS upcall.
1801 static int ceph_parse_server_name(const char *name, size_t namelen,
1802 struct sockaddr_storage *ss, char delim, const char **ipend)
1806 ret = ceph_pton(name, namelen, ss, delim, ipend);
1808 ret = ceph_dns_resolve_name(name, namelen, ss, delim, ipend);
1814 * Parse an ip[:port] list into an addr array. Use the default
1815 * monitor port if a port isn't specified.
1817 int ceph_parse_ips(const char *c, const char *end,
1818 struct ceph_entity_addr *addr,
1819 int max_count, int *count)
1821 int i, ret = -EINVAL;
1824 dout("parse_ips on '%.*s'\n", (int)(end-c), c);
1825 for (i = 0; i < max_count; i++) {
1827 struct sockaddr_storage *ss = &addr[i].in_addr;
1836 ret = ceph_parse_server_name(p, end - p, ss, delim, &ipend);
1845 dout("missing matching ']'\n");
1852 if (p < end && *p == ':') {
1855 while (p < end && *p >= '0' && *p <= '9') {
1856 port = (port * 10) + (*p - '0');
1859 if (port > 65535 || port == 0)
1862 port = CEPH_MON_PORT;
1865 addr_set_port(ss, port);
1867 dout("parse_ips got %s\n", ceph_pr_addr(ss));
1884 pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c);
1887 EXPORT_SYMBOL(ceph_parse_ips);
1889 static int process_banner(struct ceph_connection *con)
1891 dout("process_banner on %p\n", con);
1893 if (verify_hello(con) < 0)
1896 ceph_decode_addr(&con->actual_peer_addr);
1897 ceph_decode_addr(&con->peer_addr_for_me);
1900 * Make sure the other end is who we wanted. note that the other
1901 * end may not yet know their ip address, so if it's 0.0.0.0, give
1902 * them the benefit of the doubt.
1904 if (memcmp(&con->peer_addr, &con->actual_peer_addr,
1905 sizeof(con->peer_addr)) != 0 &&
1906 !(addr_is_blank(&con->actual_peer_addr.in_addr) &&
1907 con->actual_peer_addr.nonce == con->peer_addr.nonce)) {
1908 pr_warning("wrong peer, want %s/%d, got %s/%d\n",
1909 ceph_pr_addr(&con->peer_addr.in_addr),
1910 (int)le32_to_cpu(con->peer_addr.nonce),
1911 ceph_pr_addr(&con->actual_peer_addr.in_addr),
1912 (int)le32_to_cpu(con->actual_peer_addr.nonce));
1913 con->error_msg = "wrong peer at address";
1918 * did we learn our address?
1920 if (addr_is_blank(&con->msgr->inst.addr.in_addr)) {
1921 int port = addr_port(&con->msgr->inst.addr.in_addr);
1923 memcpy(&con->msgr->inst.addr.in_addr,
1924 &con->peer_addr_for_me.in_addr,
1925 sizeof(con->peer_addr_for_me.in_addr));
1926 addr_set_port(&con->msgr->inst.addr.in_addr, port);
1927 encode_my_addr(con->msgr);
1928 dout("process_banner learned my addr is %s\n",
1929 ceph_pr_addr(&con->msgr->inst.addr.in_addr));
1935 static int process_connect(struct ceph_connection *con)
1937 u64 sup_feat = con->msgr->supported_features;
1938 u64 req_feat = con->msgr->required_features;
1939 u64 server_feat = le64_to_cpu(con->in_reply.features);
1942 dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
1944 switch (con->in_reply.tag) {
1945 case CEPH_MSGR_TAG_FEATURES:
1946 pr_err("%s%lld %s feature set mismatch,"
1947 " my %llx < server's %llx, missing %llx\n",
1948 ENTITY_NAME(con->peer_name),
1949 ceph_pr_addr(&con->peer_addr.in_addr),
1950 sup_feat, server_feat, server_feat & ~sup_feat);
1951 con->error_msg = "missing required protocol features";
1952 reset_connection(con);
1955 case CEPH_MSGR_TAG_BADPROTOVER:
1956 pr_err("%s%lld %s protocol version mismatch,"
1957 " my %d != server's %d\n",
1958 ENTITY_NAME(con->peer_name),
1959 ceph_pr_addr(&con->peer_addr.in_addr),
1960 le32_to_cpu(con->out_connect.protocol_version),
1961 le32_to_cpu(con->in_reply.protocol_version));
1962 con->error_msg = "protocol version mismatch";
1963 reset_connection(con);
1966 case CEPH_MSGR_TAG_BADAUTHORIZER:
1968 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con,
1970 if (con->auth_retry == 2) {
1971 con->error_msg = "connect authorization failure";
1974 con_out_kvec_reset(con);
1975 ret = prepare_write_connect(con);
1978 prepare_read_connect(con);
1981 case CEPH_MSGR_TAG_RESETSESSION:
1983 * If we connected with a large connect_seq but the peer
1984 * has no record of a session with us (no connection, or
1985 * connect_seq == 0), they will send RESETSESION to indicate
1986 * that they must have reset their session, and may have
1989 dout("process_connect got RESET peer seq %u\n",
1990 le32_to_cpu(con->in_reply.connect_seq));
1991 pr_err("%s%lld %s connection reset\n",
1992 ENTITY_NAME(con->peer_name),
1993 ceph_pr_addr(&con->peer_addr.in_addr));
1994 reset_connection(con);
1995 con_out_kvec_reset(con);
1996 ret = prepare_write_connect(con);
1999 prepare_read_connect(con);
2001 /* Tell ceph about it. */
2002 mutex_unlock(&con->mutex);
2003 pr_info("reset on %s%lld\n", ENTITY_NAME(con->peer_name));
2004 if (con->ops->peer_reset)
2005 con->ops->peer_reset(con);
2006 mutex_lock(&con->mutex);
2007 if (con->state != CON_STATE_NEGOTIATING)
2011 case CEPH_MSGR_TAG_RETRY_SESSION:
2013 * If we sent a smaller connect_seq than the peer has, try
2014 * again with a larger value.
2016 dout("process_connect got RETRY_SESSION my seq %u, peer %u\n",
2017 le32_to_cpu(con->out_connect.connect_seq),
2018 le32_to_cpu(con->in_reply.connect_seq));
2019 con->connect_seq = le32_to_cpu(con->in_reply.connect_seq);
2020 con_out_kvec_reset(con);
2021 ret = prepare_write_connect(con);
2024 prepare_read_connect(con);
2027 case CEPH_MSGR_TAG_RETRY_GLOBAL:
2029 * If we sent a smaller global_seq than the peer has, try
2030 * again with a larger value.
2032 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n",
2033 con->peer_global_seq,
2034 le32_to_cpu(con->in_reply.global_seq));
2035 get_global_seq(con->msgr,
2036 le32_to_cpu(con->in_reply.global_seq));
2037 con_out_kvec_reset(con);
2038 ret = prepare_write_connect(con);
2041 prepare_read_connect(con);
2044 case CEPH_MSGR_TAG_SEQ:
2045 case CEPH_MSGR_TAG_READY:
2046 if (req_feat & ~server_feat) {
2047 pr_err("%s%lld %s protocol feature mismatch,"
2048 " my required %llx > server's %llx, need %llx\n",
2049 ENTITY_NAME(con->peer_name),
2050 ceph_pr_addr(&con->peer_addr.in_addr),
2051 req_feat, server_feat, req_feat & ~server_feat);
2052 con->error_msg = "missing required protocol features";
2053 reset_connection(con);
2057 WARN_ON(con->state != CON_STATE_NEGOTIATING);
2058 con->state = CON_STATE_OPEN;
2059 con->auth_retry = 0; /* we authenticated; clear flag */
2060 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq);
2062 con->peer_features = server_feat;
2063 dout("process_connect got READY gseq %d cseq %d (%d)\n",
2064 con->peer_global_seq,
2065 le32_to_cpu(con->in_reply.connect_seq),
2067 WARN_ON(con->connect_seq !=
2068 le32_to_cpu(con->in_reply.connect_seq));
2070 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY)
2071 con_flag_set(con, CON_FLAG_LOSSYTX);
2073 con->delay = 0; /* reset backoff memory */
2075 if (con->in_reply.tag == CEPH_MSGR_TAG_SEQ) {
2076 prepare_write_seq(con);
2077 prepare_read_seq(con);
2079 prepare_read_tag(con);
2083 case CEPH_MSGR_TAG_WAIT:
2085 * If there is a connection race (we are opening
2086 * connections to each other), one of us may just have
2087 * to WAIT. This shouldn't happen if we are the
2090 pr_err("process_connect got WAIT as client\n");
2091 con->error_msg = "protocol error, got WAIT as client";
2095 pr_err("connect protocol error, will retry\n");
2096 con->error_msg = "protocol error, garbage tag during connect";
2104 * read (part of) an ack
2106 static int read_partial_ack(struct ceph_connection *con)
2108 int size = sizeof (con->in_temp_ack);
2111 return read_partial(con, end, size, &con->in_temp_ack);
2115 * We can finally discard anything that's been acked.
2117 static void process_ack(struct ceph_connection *con)
2120 u64 ack = le64_to_cpu(con->in_temp_ack);
2123 while (!list_empty(&con->out_sent)) {
2124 m = list_first_entry(&con->out_sent, struct ceph_msg,
2126 seq = le64_to_cpu(m->hdr.seq);
2129 dout("got ack for seq %llu type %d at %p\n", seq,
2130 le16_to_cpu(m->hdr.type), m);
2131 m->ack_stamp = jiffies;
2134 prepare_read_tag(con);
2138 static int read_partial_message_section(struct ceph_connection *con,
2139 struct kvec *section,
2140 unsigned int sec_len, u32 *crc)
2146 while (section->iov_len < sec_len) {
2147 BUG_ON(section->iov_base == NULL);
2148 left = sec_len - section->iov_len;
2149 ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base +
2150 section->iov_len, left);
2153 section->iov_len += ret;
2155 if (section->iov_len == sec_len)
2156 *crc = crc32c(0, section->iov_base, section->iov_len);
2161 static int read_partial_msg_data(struct ceph_connection *con)
2163 struct ceph_msg *msg = con->in_msg;
2164 struct ceph_msg_data_cursor *cursor = &msg->cursor;
2165 const bool do_datacrc = !con->msgr->nocrc;
2173 if (list_empty(&msg->data))
2177 crc = con->in_data_crc;
2178 while (cursor->resid) {
2179 page = ceph_msg_data_next(&msg->cursor, &page_offset, &length,
2181 ret = ceph_tcp_recvpage(con->sock, page, page_offset, length);
2184 con->in_data_crc = crc;
2190 crc = ceph_crc32c_page(crc, page, page_offset, ret);
2191 (void) ceph_msg_data_advance(&msg->cursor, (size_t)ret);
2194 con->in_data_crc = crc;
2196 return 1; /* must return > 0 to indicate success */
2200 * read (part of) a message.
2202 static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip);
2204 static int read_partial_message(struct ceph_connection *con)
2206 struct ceph_msg *m = con->in_msg;
2210 unsigned int front_len, middle_len, data_len;
2211 bool do_datacrc = !con->msgr->nocrc;
2215 dout("read_partial_message con %p msg %p\n", con, m);
2218 size = sizeof (con->in_hdr);
2220 ret = read_partial(con, end, size, &con->in_hdr);
2224 crc = crc32c(0, &con->in_hdr, offsetof(struct ceph_msg_header, crc));
2225 if (cpu_to_le32(crc) != con->in_hdr.crc) {
2226 pr_err("read_partial_message bad hdr "
2227 " crc %u != expected %u\n",
2228 crc, con->in_hdr.crc);
2232 front_len = le32_to_cpu(con->in_hdr.front_len);
2233 if (front_len > CEPH_MSG_MAX_FRONT_LEN)
2235 middle_len = le32_to_cpu(con->in_hdr.middle_len);
2236 if (middle_len > CEPH_MSG_MAX_MIDDLE_LEN)
2238 data_len = le32_to_cpu(con->in_hdr.data_len);
2239 if (data_len > CEPH_MSG_MAX_DATA_LEN)
2243 seq = le64_to_cpu(con->in_hdr.seq);
2244 if ((s64)seq - (s64)con->in_seq < 1) {
2245 pr_info("skipping %s%lld %s seq %lld expected %lld\n",
2246 ENTITY_NAME(con->peer_name),
2247 ceph_pr_addr(&con->peer_addr.in_addr),
2248 seq, con->in_seq + 1);
2249 con->in_base_pos = -front_len - middle_len - data_len -
2251 con->in_tag = CEPH_MSGR_TAG_READY;
2253 } else if ((s64)seq - (s64)con->in_seq > 1) {
2254 pr_err("read_partial_message bad seq %lld expected %lld\n",
2255 seq, con->in_seq + 1);
2256 con->error_msg = "bad message sequence # for incoming message";
2260 /* allocate message? */
2264 dout("got hdr type %d front %d data %d\n", con->in_hdr.type,
2265 front_len, data_len);
2266 ret = ceph_con_in_msg_alloc(con, &skip);
2270 BUG_ON(!con->in_msg ^ skip);
2271 if (con->in_msg && data_len > con->in_msg->data_length) {
2272 pr_warning("%s skipping long message (%u > %zd)\n",
2273 __func__, data_len, con->in_msg->data_length);
2274 ceph_msg_put(con->in_msg);
2279 /* skip this message */
2280 dout("alloc_msg said skip message\n");
2281 con->in_base_pos = -front_len - middle_len - data_len -
2283 con->in_tag = CEPH_MSGR_TAG_READY;
2288 BUG_ON(!con->in_msg);
2289 BUG_ON(con->in_msg->con != con);
2291 m->front.iov_len = 0; /* haven't read it yet */
2293 m->middle->vec.iov_len = 0;
2295 /* prepare for data payload, if any */
2298 prepare_message_data(con->in_msg, data_len);
2302 ret = read_partial_message_section(con, &m->front, front_len,
2303 &con->in_front_crc);
2309 ret = read_partial_message_section(con, &m->middle->vec,
2311 &con->in_middle_crc);
2318 ret = read_partial_msg_data(con);
2324 size = sizeof (m->footer);
2326 ret = read_partial(con, end, size, &m->footer);
2330 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
2331 m, front_len, m->footer.front_crc, middle_len,
2332 m->footer.middle_crc, data_len, m->footer.data_crc);
2335 if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) {
2336 pr_err("read_partial_message %p front crc %u != exp. %u\n",
2337 m, con->in_front_crc, m->footer.front_crc);
2340 if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) {
2341 pr_err("read_partial_message %p middle crc %u != exp %u\n",
2342 m, con->in_middle_crc, m->footer.middle_crc);
2346 (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 &&
2347 con->in_data_crc != le32_to_cpu(m->footer.data_crc)) {
2348 pr_err("read_partial_message %p data crc %u != exp. %u\n", m,
2349 con->in_data_crc, le32_to_cpu(m->footer.data_crc));
2353 return 1; /* done! */
2357 * Process message. This happens in the worker thread. The callback should
2358 * be careful not to do anything that waits on other incoming messages or it
2361 static void process_message(struct ceph_connection *con)
2363 struct ceph_msg *msg;
2365 BUG_ON(con->in_msg->con != con);
2366 con->in_msg->con = NULL;
2371 /* if first message, set peer_name */
2372 if (con->peer_name.type == 0)
2373 con->peer_name = msg->hdr.src;
2376 mutex_unlock(&con->mutex);
2378 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n",
2379 msg, le64_to_cpu(msg->hdr.seq),
2380 ENTITY_NAME(msg->hdr.src),
2381 le16_to_cpu(msg->hdr.type),
2382 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
2383 le32_to_cpu(msg->hdr.front_len),
2384 le32_to_cpu(msg->hdr.data_len),
2385 con->in_front_crc, con->in_middle_crc, con->in_data_crc);
2386 con->ops->dispatch(con, msg);
2388 mutex_lock(&con->mutex);
2393 * Write something to the socket. Called in a worker thread when the
2394 * socket appears to be writeable and we have something ready to send.
2396 static int try_write(struct ceph_connection *con)
2400 dout("try_write start %p state %lu\n", con, con->state);
2403 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
2405 /* open the socket first? */
2406 if (con->state == CON_STATE_PREOPEN) {
2408 con->state = CON_STATE_CONNECTING;
2410 con_out_kvec_reset(con);
2411 prepare_write_banner(con);
2412 prepare_read_banner(con);
2414 BUG_ON(con->in_msg);
2415 con->in_tag = CEPH_MSGR_TAG_READY;
2416 dout("try_write initiating connect on %p new state %lu\n",
2418 ret = ceph_tcp_connect(con);
2420 con->error_msg = "connect error";
2426 /* kvec data queued? */
2427 if (con->out_skip) {
2428 ret = write_partial_skip(con);
2432 if (con->out_kvec_left) {
2433 ret = write_partial_kvec(con);
2440 if (con->out_msg_done) {
2441 ceph_msg_put(con->out_msg);
2442 con->out_msg = NULL; /* we're done with this one */
2446 ret = write_partial_message_data(con);
2448 goto more_kvec; /* we need to send the footer, too! */
2452 dout("try_write write_partial_message_data err %d\n",
2459 if (con->state == CON_STATE_OPEN) {
2460 /* is anything else pending? */
2461 if (!list_empty(&con->out_queue)) {
2462 prepare_write_message(con);
2465 if (con->in_seq > con->in_seq_acked) {
2466 prepare_write_ack(con);
2469 if (con_flag_test_and_clear(con, CON_FLAG_KEEPALIVE_PENDING)) {
2470 prepare_write_keepalive(con);
2475 /* Nothing to do! */
2476 con_flag_clear(con, CON_FLAG_WRITE_PENDING);
2477 dout("try_write nothing else to write.\n");
2480 dout("try_write done on %p ret %d\n", con, ret);
2487 * Read what we can from the socket.
2489 static int try_read(struct ceph_connection *con)
2494 dout("try_read start on %p state %lu\n", con, con->state);
2495 if (con->state != CON_STATE_CONNECTING &&
2496 con->state != CON_STATE_NEGOTIATING &&
2497 con->state != CON_STATE_OPEN)
2502 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag,
2505 if (con->state == CON_STATE_CONNECTING) {
2506 dout("try_read connecting\n");
2507 ret = read_partial_banner(con);
2510 ret = process_banner(con);
2514 con->state = CON_STATE_NEGOTIATING;
2517 * Received banner is good, exchange connection info.
2518 * Do not reset out_kvec, as sending our banner raced
2519 * with receiving peer banner after connect completed.
2521 ret = prepare_write_connect(con);
2524 prepare_read_connect(con);
2526 /* Send connection info before awaiting response */
2530 if (con->state == CON_STATE_NEGOTIATING) {
2531 dout("try_read negotiating\n");
2532 ret = read_partial_connect(con);
2535 ret = process_connect(con);
2541 WARN_ON(con->state != CON_STATE_OPEN);
2543 if (con->in_base_pos < 0) {
2545 * skipping + discarding content.
2547 * FIXME: there must be a better way to do this!
2549 static char buf[SKIP_BUF_SIZE];
2550 int skip = min((int) sizeof (buf), -con->in_base_pos);
2552 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos);
2553 ret = ceph_tcp_recvmsg(con->sock, buf, skip);
2556 con->in_base_pos += ret;
2557 if (con->in_base_pos)
2560 if (con->in_tag == CEPH_MSGR_TAG_READY) {
2564 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1);
2567 dout("try_read got tag %d\n", (int)con->in_tag);
2568 switch (con->in_tag) {
2569 case CEPH_MSGR_TAG_MSG:
2570 prepare_read_message(con);
2572 case CEPH_MSGR_TAG_ACK:
2573 prepare_read_ack(con);
2575 case CEPH_MSGR_TAG_CLOSE:
2576 con_close_socket(con);
2577 con->state = CON_STATE_CLOSED;
2583 if (con->in_tag == CEPH_MSGR_TAG_MSG) {
2584 ret = read_partial_message(con);
2588 con->error_msg = "bad crc";
2592 con->error_msg = "io error";
2597 if (con->in_tag == CEPH_MSGR_TAG_READY)
2599 process_message(con);
2600 if (con->state == CON_STATE_OPEN)
2601 prepare_read_tag(con);
2604 if (con->in_tag == CEPH_MSGR_TAG_ACK ||
2605 con->in_tag == CEPH_MSGR_TAG_SEQ) {
2607 * the final handshake seq exchange is semantically
2608 * equivalent to an ACK
2610 ret = read_partial_ack(con);
2618 dout("try_read done on %p ret %d\n", con, ret);
2622 pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag);
2623 con->error_msg = "protocol error, garbage tag";
2630 * Atomically queue work on a connection after the specified delay.
2631 * Bump @con reference to avoid races with connection teardown.
2632 * Returns 0 if work was queued, or an error code otherwise.
2634 static int queue_con_delay(struct ceph_connection *con, unsigned long delay)
2636 if (!con->ops->get(con)) {
2637 dout("%s %p ref count 0\n", __func__, con);
2642 if (!queue_delayed_work(ceph_msgr_wq, &con->work, delay)) {
2643 dout("%s %p - already queued\n", __func__, con);
2649 dout("%s %p %lu\n", __func__, con, delay);
2654 static void queue_con(struct ceph_connection *con)
2656 (void) queue_con_delay(con, 0);
2659 static bool con_sock_closed(struct ceph_connection *con)
2661 if (!con_flag_test_and_clear(con, CON_FLAG_SOCK_CLOSED))
2665 case CON_STATE_ ## x: \
2666 con->error_msg = "socket closed (con state " #x ")"; \
2669 switch (con->state) {
2677 pr_warning("%s con %p unrecognized state %lu\n",
2678 __func__, con, con->state);
2679 con->error_msg = "unrecognized con state";
2688 static bool con_backoff(struct ceph_connection *con)
2692 if (!con_flag_test_and_clear(con, CON_FLAG_BACKOFF))
2695 ret = queue_con_delay(con, round_jiffies_relative(con->delay));
2697 dout("%s: con %p FAILED to back off %lu\n", __func__,
2699 BUG_ON(ret == -ENOENT);
2700 con_flag_set(con, CON_FLAG_BACKOFF);
2706 /* Finish fault handling; con->mutex must *not* be held here */
2708 static void con_fault_finish(struct ceph_connection *con)
2711 * in case we faulted due to authentication, invalidate our
2712 * current tickets so that we can get new ones.
2714 if (con->auth_retry && con->ops->invalidate_authorizer) {
2715 dout("calling invalidate_authorizer()\n");
2716 con->ops->invalidate_authorizer(con);
2719 if (con->ops->fault)
2720 con->ops->fault(con);
2724 * Do some work on a connection. Drop a connection ref when we're done.
2726 static void con_work(struct work_struct *work)
2728 struct ceph_connection *con = container_of(work, struct ceph_connection,
2732 mutex_lock(&con->mutex);
2736 if ((fault = con_sock_closed(con))) {
2737 dout("%s: con %p SOCK_CLOSED\n", __func__, con);
2740 if (con_backoff(con)) {
2741 dout("%s: con %p BACKOFF\n", __func__, con);
2744 if (con->state == CON_STATE_STANDBY) {
2745 dout("%s: con %p STANDBY\n", __func__, con);
2748 if (con->state == CON_STATE_CLOSED) {
2749 dout("%s: con %p CLOSED\n", __func__, con);
2753 if (con->state == CON_STATE_PREOPEN) {
2754 dout("%s: con %p PREOPEN\n", __func__, con);
2758 ret = try_read(con);
2762 con->error_msg = "socket error on read";
2767 ret = try_write(con);
2771 con->error_msg = "socket error on write";
2775 break; /* If we make it to here, we're done */
2779 mutex_unlock(&con->mutex);
2782 con_fault_finish(con);
2788 * Generic error/fault handler. A retry mechanism is used with
2789 * exponential backoff
2791 static void con_fault(struct ceph_connection *con)
2793 pr_warning("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
2794 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg);
2795 dout("fault %p state %lu to peer %s\n",
2796 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr));
2798 WARN_ON(con->state != CON_STATE_CONNECTING &&
2799 con->state != CON_STATE_NEGOTIATING &&
2800 con->state != CON_STATE_OPEN);
2802 con_close_socket(con);
2804 if (con_flag_test(con, CON_FLAG_LOSSYTX)) {
2805 dout("fault on LOSSYTX channel, marking CLOSED\n");
2806 con->state = CON_STATE_CLOSED;
2811 BUG_ON(con->in_msg->con != con);
2812 con->in_msg->con = NULL;
2813 ceph_msg_put(con->in_msg);
2818 /* Requeue anything that hasn't been acked */
2819 list_splice_init(&con->out_sent, &con->out_queue);
2821 /* If there are no messages queued or keepalive pending, place
2822 * the connection in a STANDBY state */
2823 if (list_empty(&con->out_queue) &&
2824 !con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING)) {
2825 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con);
2826 con_flag_clear(con, CON_FLAG_WRITE_PENDING);
2827 con->state = CON_STATE_STANDBY;
2829 /* retry after a delay. */
2830 con->state = CON_STATE_PREOPEN;
2831 if (con->delay == 0)
2832 con->delay = BASE_DELAY_INTERVAL;
2833 else if (con->delay < MAX_DELAY_INTERVAL)
2835 con_flag_set(con, CON_FLAG_BACKOFF);
2843 * initialize a new messenger instance
2845 void ceph_messenger_init(struct ceph_messenger *msgr,
2846 struct ceph_entity_addr *myaddr,
2847 u32 supported_features,
2848 u32 required_features,
2851 msgr->supported_features = supported_features;
2852 msgr->required_features = required_features;
2854 spin_lock_init(&msgr->global_seq_lock);
2857 msgr->inst.addr = *myaddr;
2859 /* select a random nonce */
2860 msgr->inst.addr.type = 0;
2861 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce));
2862 encode_my_addr(msgr);
2863 msgr->nocrc = nocrc;
2865 atomic_set(&msgr->stopping, 0);
2867 dout("%s %p\n", __func__, msgr);
2869 EXPORT_SYMBOL(ceph_messenger_init);
2871 static void clear_standby(struct ceph_connection *con)
2873 /* come back from STANDBY? */
2874 if (con->state == CON_STATE_STANDBY) {
2875 dout("clear_standby %p and ++connect_seq\n", con);
2876 con->state = CON_STATE_PREOPEN;
2878 WARN_ON(con_flag_test(con, CON_FLAG_WRITE_PENDING));
2879 WARN_ON(con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING));
2884 * Queue up an outgoing message on the given connection.
2886 void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
2889 msg->hdr.src = con->msgr->inst.name;
2890 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len));
2891 msg->needs_out_seq = true;
2893 mutex_lock(&con->mutex);
2895 if (con->state == CON_STATE_CLOSED) {
2896 dout("con_send %p closed, dropping %p\n", con, msg);
2898 mutex_unlock(&con->mutex);
2902 BUG_ON(msg->con != NULL);
2903 msg->con = con->ops->get(con);
2904 BUG_ON(msg->con == NULL);
2906 BUG_ON(!list_empty(&msg->list_head));
2907 list_add_tail(&msg->list_head, &con->out_queue);
2908 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg,
2909 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type),
2910 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
2911 le32_to_cpu(msg->hdr.front_len),
2912 le32_to_cpu(msg->hdr.middle_len),
2913 le32_to_cpu(msg->hdr.data_len));
2916 mutex_unlock(&con->mutex);
2918 /* if there wasn't anything waiting to send before, queue
2920 if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
2923 EXPORT_SYMBOL(ceph_con_send);
2926 * Revoke a message that was previously queued for send
2928 void ceph_msg_revoke(struct ceph_msg *msg)
2930 struct ceph_connection *con = msg->con;
2933 return; /* Message not in our possession */
2935 mutex_lock(&con->mutex);
2936 if (!list_empty(&msg->list_head)) {
2937 dout("%s %p msg %p - was on queue\n", __func__, con, msg);
2938 list_del_init(&msg->list_head);
2939 BUG_ON(msg->con == NULL);
2940 msg->con->ops->put(msg->con);
2946 if (con->out_msg == msg) {
2947 dout("%s %p msg %p - was sending\n", __func__, con, msg);
2948 con->out_msg = NULL;
2949 if (con->out_kvec_is_msg) {
2950 con->out_skip = con->out_kvec_bytes;
2951 con->out_kvec_is_msg = false;
2957 mutex_unlock(&con->mutex);
2961 * Revoke a message that we may be reading data into
2963 void ceph_msg_revoke_incoming(struct ceph_msg *msg)
2965 struct ceph_connection *con;
2967 BUG_ON(msg == NULL);
2969 dout("%s msg %p null con\n", __func__, msg);
2971 return; /* Message not in our possession */
2975 mutex_lock(&con->mutex);
2976 if (con->in_msg == msg) {
2977 unsigned int front_len = le32_to_cpu(con->in_hdr.front_len);
2978 unsigned int middle_len = le32_to_cpu(con->in_hdr.middle_len);
2979 unsigned int data_len = le32_to_cpu(con->in_hdr.data_len);
2981 /* skip rest of message */
2982 dout("%s %p msg %p revoked\n", __func__, con, msg);
2983 con->in_base_pos = con->in_base_pos -
2984 sizeof(struct ceph_msg_header) -
2988 sizeof(struct ceph_msg_footer);
2989 ceph_msg_put(con->in_msg);
2991 con->in_tag = CEPH_MSGR_TAG_READY;
2994 dout("%s %p in_msg %p msg %p no-op\n",
2995 __func__, con, con->in_msg, msg);
2997 mutex_unlock(&con->mutex);
3001 * Queue a keepalive byte to ensure the tcp connection is alive.
3003 void ceph_con_keepalive(struct ceph_connection *con)
3005 dout("con_keepalive %p\n", con);
3006 mutex_lock(&con->mutex);
3008 mutex_unlock(&con->mutex);
3009 if (con_flag_test_and_set(con, CON_FLAG_KEEPALIVE_PENDING) == 0 &&
3010 con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
3013 EXPORT_SYMBOL(ceph_con_keepalive);
3015 static struct ceph_msg_data *ceph_msg_data_create(enum ceph_msg_data_type type)
3017 struct ceph_msg_data *data;
3019 if (WARN_ON(!ceph_msg_data_type_valid(type)))
3022 data = kmem_cache_zalloc(ceph_msg_data_cache, GFP_NOFS);
3025 INIT_LIST_HEAD(&data->links);
3030 static void ceph_msg_data_destroy(struct ceph_msg_data *data)
3035 WARN_ON(!list_empty(&data->links));
3036 if (data->type == CEPH_MSG_DATA_PAGELIST) {
3037 ceph_pagelist_release(data->pagelist);
3038 kfree(data->pagelist);
3040 kmem_cache_free(ceph_msg_data_cache, data);
3043 void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
3044 size_t length, size_t alignment)
3046 struct ceph_msg_data *data;
3051 data = ceph_msg_data_create(CEPH_MSG_DATA_PAGES);
3053 data->pages = pages;
3054 data->length = length;
3055 data->alignment = alignment & ~PAGE_MASK;
3057 list_add_tail(&data->links, &msg->data);
3058 msg->data_length += length;
3060 EXPORT_SYMBOL(ceph_msg_data_add_pages);
3062 void ceph_msg_data_add_pagelist(struct ceph_msg *msg,
3063 struct ceph_pagelist *pagelist)
3065 struct ceph_msg_data *data;
3068 BUG_ON(!pagelist->length);
3070 data = ceph_msg_data_create(CEPH_MSG_DATA_PAGELIST);
3072 data->pagelist = pagelist;
3074 list_add_tail(&data->links, &msg->data);
3075 msg->data_length += pagelist->length;
3077 EXPORT_SYMBOL(ceph_msg_data_add_pagelist);
3080 void ceph_msg_data_add_bio(struct ceph_msg *msg, struct bio *bio,
3083 struct ceph_msg_data *data;
3087 data = ceph_msg_data_create(CEPH_MSG_DATA_BIO);
3090 data->bio_length = length;
3092 list_add_tail(&data->links, &msg->data);
3093 msg->data_length += length;
3095 EXPORT_SYMBOL(ceph_msg_data_add_bio);
3096 #endif /* CONFIG_BLOCK */
3099 * construct a new message with given type, size
3100 * the new msg has a ref count of 1.
3102 struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
3107 m = kmem_cache_zalloc(ceph_msg_cache, flags);
3111 m->hdr.type = cpu_to_le16(type);
3112 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT);
3113 m->hdr.front_len = cpu_to_le32(front_len);
3115 INIT_LIST_HEAD(&m->list_head);
3116 kref_init(&m->kref);
3117 INIT_LIST_HEAD(&m->data);
3120 m->front_max = front_len;
3122 if (front_len > PAGE_CACHE_SIZE) {
3123 m->front.iov_base = __vmalloc(front_len, flags,
3125 m->front_is_vmalloc = true;
3127 m->front.iov_base = kmalloc(front_len, flags);
3129 if (m->front.iov_base == NULL) {
3130 dout("ceph_msg_new can't allocate %d bytes\n",
3135 m->front.iov_base = NULL;
3137 m->front.iov_len = front_len;
3139 dout("ceph_msg_new %p front %d\n", m, front_len);
3146 pr_err("msg_new can't create type %d front %d\n", type,
3150 dout("msg_new can't create type %d front %d\n", type,
3155 EXPORT_SYMBOL(ceph_msg_new);
3158 * Allocate "middle" portion of a message, if it is needed and wasn't
3159 * allocated by alloc_msg. This allows us to read a small fixed-size
3160 * per-type header in the front and then gracefully fail (i.e.,
3161 * propagate the error to the caller based on info in the front) when
3162 * the middle is too large.
3164 static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg)
3166 int type = le16_to_cpu(msg->hdr.type);
3167 int middle_len = le32_to_cpu(msg->hdr.middle_len);
3169 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type,
3170 ceph_msg_type_name(type), middle_len);
3171 BUG_ON(!middle_len);
3172 BUG_ON(msg->middle);
3174 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS);
3181 * Allocate a message for receiving an incoming message on a
3182 * connection, and save the result in con->in_msg. Uses the
3183 * connection's private alloc_msg op if available.
3185 * Returns 0 on success, or a negative error code.
3187 * On success, if we set *skip = 1:
3188 * - the next message should be skipped and ignored.
3189 * - con->in_msg == NULL
3190 * or if we set *skip = 0:
3191 * - con->in_msg is non-null.
3192 * On error (ENOMEM, EAGAIN, ...),
3193 * - con->in_msg == NULL
3195 static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip)
3197 struct ceph_msg_header *hdr = &con->in_hdr;
3198 int middle_len = le32_to_cpu(hdr->middle_len);
3199 struct ceph_msg *msg;
3202 BUG_ON(con->in_msg != NULL);
3203 BUG_ON(!con->ops->alloc_msg);
3205 mutex_unlock(&con->mutex);
3206 msg = con->ops->alloc_msg(con, hdr, skip);
3207 mutex_lock(&con->mutex);
3208 if (con->state != CON_STATE_OPEN) {
3216 con->in_msg->con = con->ops->get(con);
3217 BUG_ON(con->in_msg->con == NULL);
3220 * Null message pointer means either we should skip
3221 * this message or we couldn't allocate memory. The
3222 * former is not an error.
3226 con->error_msg = "error allocating memory for incoming message";
3230 memcpy(&con->in_msg->hdr, &con->in_hdr, sizeof(con->in_hdr));
3232 if (middle_len && !con->in_msg->middle) {
3233 ret = ceph_alloc_middle(con, con->in_msg);
3235 ceph_msg_put(con->in_msg);
3245 * Free a generically kmalloc'd message.
3247 void ceph_msg_kfree(struct ceph_msg *m)
3249 dout("msg_kfree %p\n", m);
3250 if (m->front_is_vmalloc)
3251 vfree(m->front.iov_base);
3253 kfree(m->front.iov_base);
3254 kmem_cache_free(ceph_msg_cache, m);
3258 * Drop a msg ref. Destroy as needed.
3260 void ceph_msg_last_put(struct kref *kref)
3262 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref);
3264 struct list_head *links;
3265 struct list_head *next;
3267 dout("ceph_msg_put last one on %p\n", m);
3268 WARN_ON(!list_empty(&m->list_head));
3270 /* drop middle, data, if any */
3272 ceph_buffer_put(m->middle);
3276 list_splice_init(&m->data, &data);
3277 list_for_each_safe(links, next, &data) {
3278 struct ceph_msg_data *data;
3280 data = list_entry(links, struct ceph_msg_data, links);
3281 list_del_init(links);
3282 ceph_msg_data_destroy(data);
3287 ceph_msgpool_put(m->pool, m);
3291 EXPORT_SYMBOL(ceph_msg_last_put);
3293 void ceph_msg_dump(struct ceph_msg *msg)
3295 pr_debug("msg_dump %p (front_max %d length %zd)\n", msg,
3296 msg->front_max, msg->data_length);
3297 print_hex_dump(KERN_DEBUG, "header: ",
3298 DUMP_PREFIX_OFFSET, 16, 1,
3299 &msg->hdr, sizeof(msg->hdr), true);
3300 print_hex_dump(KERN_DEBUG, " front: ",
3301 DUMP_PREFIX_OFFSET, 16, 1,
3302 msg->front.iov_base, msg->front.iov_len, true);
3304 print_hex_dump(KERN_DEBUG, "middle: ",
3305 DUMP_PREFIX_OFFSET, 16, 1,
3306 msg->middle->vec.iov_base,
3307 msg->middle->vec.iov_len, true);
3308 print_hex_dump(KERN_DEBUG, "footer: ",
3309 DUMP_PREFIX_OFFSET, 16, 1,
3310 &msg->footer, sizeof(msg->footer), true);
3312 EXPORT_SYMBOL(ceph_msg_dump);