1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/module.h>
5 #include <linux/highmem.h>
7 #include <linux/pagemap.h>
8 #include <linux/slab.h>
9 #include <linux/uaccess.h>
11 #include <linux/bio.h>
14 #include <linux/ceph/libceph.h>
15 #include <linux/ceph/osd_client.h>
16 #include <linux/ceph/messenger.h>
17 #include <linux/ceph/decode.h>
18 #include <linux/ceph/auth.h>
19 #include <linux/ceph/pagelist.h>
21 #define OSD_OP_FRONT_LEN 4096
22 #define OSD_OPREPLY_FRONT_LEN 512
24 static const struct ceph_connection_operations osd_con_ops;
26 static void __send_queued(struct ceph_osd_client *osdc);
27 static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd);
28 static void __register_request(struct ceph_osd_client *osdc,
29 struct ceph_osd_request *req);
30 static void __unregister_linger_request(struct ceph_osd_client *osdc,
31 struct ceph_osd_request *req);
32 static void __send_request(struct ceph_osd_client *osdc,
33 struct ceph_osd_request *req);
36 * Implement client access to distributed object storage cluster.
38 * All data objects are stored within a cluster/cloud of OSDs, or
39 * "object storage devices." (Note that Ceph OSDs have _nothing_ to
40 * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply
41 * remote daemons serving up and coordinating consistent and safe
44 * Cluster membership and the mapping of data objects onto storage devices
45 * are described by the osd map.
47 * We keep track of pending OSD requests (read, write), resubmit
48 * requests to different OSDs when the cluster topology/data layout
49 * change, or retry the affected requests when the communications
50 * channel with an OSD is reset.
54 * calculate the mapping of a file extent onto an object, and fill out the
55 * request accordingly. shorten extent as necessary if it crosses an
58 * fill osd op in request message.
60 static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
61 u64 *objnum, u64 *objoff, u64 *objlen)
67 r = ceph_calc_file_object_mapping(layout, off, orig_len, objnum,
71 if (*objlen < orig_len) {
73 dout(" skipping last %llu, final file extent %llu~%llu\n",
74 orig_len - *plen, off, *plen);
77 dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen);
82 static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
84 memset(osd_data, 0, sizeof (*osd_data));
85 osd_data->type = CEPH_OSD_DATA_TYPE_NONE;
88 void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
89 struct page **pages, u64 length, u32 alignment,
90 bool pages_from_pool, bool own_pages)
92 osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
93 osd_data->pages = pages;
94 osd_data->length = length;
95 osd_data->alignment = alignment;
96 osd_data->pages_from_pool = pages_from_pool;
97 osd_data->own_pages = own_pages;
99 EXPORT_SYMBOL(ceph_osd_data_pages_init);
101 void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data,
102 struct ceph_pagelist *pagelist)
104 osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST;
105 osd_data->pagelist = pagelist;
107 EXPORT_SYMBOL(ceph_osd_data_pagelist_init);
110 void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
111 struct bio *bio, size_t bio_length)
113 osd_data->type = CEPH_OSD_DATA_TYPE_BIO;
115 osd_data->bio_length = bio_length;
117 EXPORT_SYMBOL(ceph_osd_data_bio_init);
118 #endif /* CONFIG_BLOCK */
120 static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data)
122 switch (osd_data->type) {
123 case CEPH_OSD_DATA_TYPE_NONE:
125 case CEPH_OSD_DATA_TYPE_PAGES:
126 return osd_data->length;
127 case CEPH_OSD_DATA_TYPE_PAGELIST:
128 return (u64)osd_data->pagelist->length;
130 case CEPH_OSD_DATA_TYPE_BIO:
131 return (u64)osd_data->bio_length;
132 #endif /* CONFIG_BLOCK */
134 WARN(true, "unrecognized data type %d\n", (int)osd_data->type);
139 static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
141 if (osd_data->type != CEPH_OSD_DATA_TYPE_PAGES)
144 if (osd_data->own_pages) {
147 num_pages = calc_pages_for((u64)osd_data->alignment,
148 (u64)osd_data->length);
149 ceph_release_page_vector(osd_data->pages, num_pages);
156 void ceph_osdc_release_request(struct kref *kref)
158 struct ceph_osd_request *req;
160 req = container_of(kref, struct ceph_osd_request, r_kref);
162 ceph_msg_put(req->r_request);
164 ceph_msg_revoke_incoming(req->r_reply);
165 ceph_msg_put(req->r_reply);
168 ceph_osd_data_release(&req->r_data_in);
169 ceph_osd_data_release(&req->r_data_out);
171 ceph_put_snap_context(req->r_snapc);
173 mempool_free(req, req->r_osdc->req_mempool);
177 EXPORT_SYMBOL(ceph_osdc_release_request);
179 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
180 struct ceph_snap_context *snapc,
181 unsigned int num_ops,
185 struct ceph_osd_request *req;
186 struct ceph_msg *msg;
189 BUILD_BUG_ON(CEPH_OSD_MAX_OP > U16_MAX);
190 BUG_ON(num_ops > CEPH_OSD_MAX_OP);
192 msg_size = 4 + 4 + 8 + 8 + 4+8;
193 msg_size += 2 + 4 + 8 + 4 + 4; /* oloc */
194 msg_size += 1 + 8 + 4 + 4; /* pg_t */
195 msg_size += 4 + MAX_OBJ_NAME_SIZE;
196 msg_size += 2 + num_ops*sizeof(struct ceph_osd_op);
197 msg_size += 8; /* snapid */
198 msg_size += 8; /* snap_seq */
199 msg_size += 8 * (snapc ? snapc->num_snaps : 0); /* snaps */
203 req = mempool_alloc(osdc->req_mempool, gfp_flags);
204 memset(req, 0, sizeof(*req));
206 req = kzalloc(sizeof(*req), gfp_flags);
212 req->r_mempool = use_mempool;
213 req->r_num_ops = num_ops;
215 kref_init(&req->r_kref);
216 init_completion(&req->r_completion);
217 init_completion(&req->r_safe_completion);
218 RB_CLEAR_NODE(&req->r_node);
219 INIT_LIST_HEAD(&req->r_unsafe_item);
220 INIT_LIST_HEAD(&req->r_linger_item);
221 INIT_LIST_HEAD(&req->r_linger_osd);
222 INIT_LIST_HEAD(&req->r_req_lru_item);
223 INIT_LIST_HEAD(&req->r_osd_item);
225 /* create reply message */
227 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
229 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY,
230 OSD_OPREPLY_FRONT_LEN, gfp_flags, true);
232 ceph_osdc_put_request(req);
237 ceph_osd_data_init(&req->r_data_in);
238 ceph_osd_data_init(&req->r_data_out);
240 /* create request message; allow space for oid */
242 msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
244 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp_flags, true);
246 ceph_osdc_put_request(req);
250 memset(msg->front.iov_base, 0, msg->front.iov_len);
252 req->r_request = msg;
256 EXPORT_SYMBOL(ceph_osdc_alloc_request);
258 static bool osd_req_opcode_valid(u16 opcode)
261 case CEPH_OSD_OP_READ:
262 case CEPH_OSD_OP_STAT:
263 case CEPH_OSD_OP_MAPEXT:
264 case CEPH_OSD_OP_MASKTRUNC:
265 case CEPH_OSD_OP_SPARSE_READ:
266 case CEPH_OSD_OP_NOTIFY:
267 case CEPH_OSD_OP_NOTIFY_ACK:
268 case CEPH_OSD_OP_ASSERT_VER:
269 case CEPH_OSD_OP_WRITE:
270 case CEPH_OSD_OP_WRITEFULL:
271 case CEPH_OSD_OP_TRUNCATE:
272 case CEPH_OSD_OP_ZERO:
273 case CEPH_OSD_OP_DELETE:
274 case CEPH_OSD_OP_APPEND:
275 case CEPH_OSD_OP_STARTSYNC:
276 case CEPH_OSD_OP_SETTRUNC:
277 case CEPH_OSD_OP_TRIMTRUNC:
278 case CEPH_OSD_OP_TMAPUP:
279 case CEPH_OSD_OP_TMAPPUT:
280 case CEPH_OSD_OP_TMAPGET:
281 case CEPH_OSD_OP_CREATE:
282 case CEPH_OSD_OP_ROLLBACK:
283 case CEPH_OSD_OP_WATCH:
284 case CEPH_OSD_OP_OMAPGETKEYS:
285 case CEPH_OSD_OP_OMAPGETVALS:
286 case CEPH_OSD_OP_OMAPGETHEADER:
287 case CEPH_OSD_OP_OMAPGETVALSBYKEYS:
288 case CEPH_OSD_OP_OMAPSETVALS:
289 case CEPH_OSD_OP_OMAPSETHEADER:
290 case CEPH_OSD_OP_OMAPCLEAR:
291 case CEPH_OSD_OP_OMAPRMKEYS:
292 case CEPH_OSD_OP_OMAP_CMP:
293 case CEPH_OSD_OP_CLONERANGE:
294 case CEPH_OSD_OP_ASSERT_SRC_VERSION:
295 case CEPH_OSD_OP_SRC_CMPXATTR:
296 case CEPH_OSD_OP_GETXATTR:
297 case CEPH_OSD_OP_GETXATTRS:
298 case CEPH_OSD_OP_CMPXATTR:
299 case CEPH_OSD_OP_SETXATTR:
300 case CEPH_OSD_OP_SETXATTRS:
301 case CEPH_OSD_OP_RESETXATTRS:
302 case CEPH_OSD_OP_RMXATTR:
303 case CEPH_OSD_OP_PULL:
304 case CEPH_OSD_OP_PUSH:
305 case CEPH_OSD_OP_BALANCEREADS:
306 case CEPH_OSD_OP_UNBALANCEREADS:
307 case CEPH_OSD_OP_SCRUB:
308 case CEPH_OSD_OP_SCRUB_RESERVE:
309 case CEPH_OSD_OP_SCRUB_UNRESERVE:
310 case CEPH_OSD_OP_SCRUB_STOP:
311 case CEPH_OSD_OP_SCRUB_MAP:
312 case CEPH_OSD_OP_WRLOCK:
313 case CEPH_OSD_OP_WRUNLOCK:
314 case CEPH_OSD_OP_RDLOCK:
315 case CEPH_OSD_OP_RDUNLOCK:
316 case CEPH_OSD_OP_UPLOCK:
317 case CEPH_OSD_OP_DNLOCK:
318 case CEPH_OSD_OP_CALL:
319 case CEPH_OSD_OP_PGLS:
320 case CEPH_OSD_OP_PGLS_FILTER:
328 * This is an osd op init function for opcodes that have no data or
329 * other information associated with them. It also serves as a
330 * common init routine for all the other init functions, below.
332 void osd_req_op_init(struct ceph_osd_req_op *op, u16 opcode)
334 BUG_ON(!osd_req_opcode_valid(opcode));
336 memset(op, 0, sizeof (*op));
341 void osd_req_op_extent_init(struct ceph_osd_req_op *op, u16 opcode,
342 u64 offset, u64 length,
343 u64 truncate_size, u32 truncate_seq)
345 size_t payload_len = 0;
347 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE);
349 osd_req_op_init(op, opcode);
351 op->extent.offset = offset;
352 op->extent.length = length;
353 op->extent.truncate_size = truncate_size;
354 op->extent.truncate_seq = truncate_seq;
355 if (opcode == CEPH_OSD_OP_WRITE)
356 payload_len += length;
358 op->payload_len = payload_len;
360 EXPORT_SYMBOL(osd_req_op_extent_init);
362 void osd_req_op_extent_update(struct ceph_osd_req_op *op, u64 length)
364 u64 previous = op->extent.length;
366 if (length == previous)
367 return; /* Nothing to do */
368 BUG_ON(length > previous);
370 op->extent.length = length;
371 op->payload_len -= previous - length;
373 EXPORT_SYMBOL(osd_req_op_extent_update);
375 void osd_req_op_extent_osd_data(struct ceph_osd_req_op *op,
376 struct ceph_osd_data *osd_data)
378 op->extent.osd_data = osd_data;
380 EXPORT_SYMBOL(osd_req_op_extent_osd_data);
382 void osd_req_op_cls_init(struct ceph_osd_req_op *op, u16 opcode,
383 const char *class, const char *method,
384 const void *request_data, size_t request_data_size)
386 size_t payload_len = 0;
389 BUG_ON(opcode != CEPH_OSD_OP_CALL);
391 osd_req_op_init(op, opcode);
393 op->cls.class_name = class;
394 size = strlen(class);
395 BUG_ON(size > (size_t) U8_MAX);
396 op->cls.class_len = size;
399 op->cls.method_name = method;
400 size = strlen(method);
401 BUG_ON(size > (size_t) U8_MAX);
402 op->cls.method_len = size;
405 op->cls.request_data = request_data;
406 BUG_ON(request_data_size > (size_t) U32_MAX);
407 op->cls.request_data_len = (u32) request_data_size;
408 payload_len += request_data_size;
410 op->cls.argc = 0; /* currently unused */
412 op->payload_len = payload_len;
414 EXPORT_SYMBOL(osd_req_op_cls_init);
416 void osd_req_op_cls_response_data(struct ceph_osd_req_op *op,
417 struct ceph_osd_data *response_data)
419 op->cls.response_data = response_data;
421 EXPORT_SYMBOL(osd_req_op_cls_response_data);
423 void osd_req_op_watch_init(struct ceph_osd_req_op *op, u16 opcode,
424 u64 cookie, u64 version, int flag)
426 BUG_ON(opcode != CEPH_OSD_OP_NOTIFY_ACK && opcode != CEPH_OSD_OP_WATCH);
428 osd_req_op_init(op, opcode);
430 op->watch.cookie = cookie;
431 /* op->watch.ver = version; */ /* XXX 3847 */
432 op->watch.ver = cpu_to_le64(version);
433 if (opcode == CEPH_OSD_OP_WATCH && flag)
434 op->watch.flag = (u8) 1;
436 EXPORT_SYMBOL(osd_req_op_watch_init);
438 static u64 osd_req_encode_op(struct ceph_osd_request *req,
439 struct ceph_osd_op *dst, unsigned int which)
441 struct ceph_osd_req_op *src;
442 u64 request_data_len = 0;
443 struct ceph_pagelist *pagelist;
445 BUG_ON(which >= req->r_num_ops);
446 src = &req->r_ops[which];
447 if (WARN_ON(!osd_req_opcode_valid(src->op))) {
448 pr_err("unrecognized osd opcode %d\n", src->op);
454 case CEPH_OSD_OP_STAT:
456 case CEPH_OSD_OP_READ:
457 case CEPH_OSD_OP_WRITE:
458 if (src->op == CEPH_OSD_OP_WRITE)
459 request_data_len = src->extent.length;
460 dst->extent.offset = cpu_to_le64(src->extent.offset);
461 dst->extent.length = cpu_to_le64(src->extent.length);
462 dst->extent.truncate_size =
463 cpu_to_le64(src->extent.truncate_size);
464 dst->extent.truncate_seq =
465 cpu_to_le32(src->extent.truncate_seq);
466 if (src->op == CEPH_OSD_OP_WRITE)
467 WARN_ON(src->extent.osd_data != &req->r_data_out);
469 WARN_ON(src->extent.osd_data != &req->r_data_in);
471 case CEPH_OSD_OP_CALL:
472 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
474 ceph_pagelist_init(pagelist);
476 dst->cls.class_len = src->cls.class_len;
477 dst->cls.method_len = src->cls.method_len;
478 dst->cls.indata_len = cpu_to_le32(src->cls.request_data_len);
479 ceph_pagelist_append(pagelist, src->cls.class_name,
481 ceph_pagelist_append(pagelist, src->cls.method_name,
482 src->cls.method_len);
483 ceph_pagelist_append(pagelist, src->cls.request_data,
484 src->cls.request_data_len);
485 ceph_osd_data_pagelist_init(&req->r_data_out, pagelist);
487 WARN_ON(src->cls.response_data != &req->r_data_in);
488 request_data_len = pagelist->length;
490 case CEPH_OSD_OP_STARTSYNC:
492 case CEPH_OSD_OP_NOTIFY_ACK:
493 case CEPH_OSD_OP_WATCH:
494 dst->watch.cookie = cpu_to_le64(src->watch.cookie);
495 dst->watch.ver = cpu_to_le64(src->watch.ver);
496 dst->watch.flag = src->watch.flag;
499 pr_err("unsupported osd opcode %s\n",
500 ceph_osd_op_name(src->op));
505 dst->op = cpu_to_le16(src->op);
506 dst->payload_len = cpu_to_le32(src->payload_len);
508 return request_data_len;
512 * build new request AND message
515 void ceph_osdc_build_request(struct ceph_osd_request *req, u64 off,
516 struct ceph_snap_context *snapc, u64 snap_id,
517 struct timespec *mtime)
519 struct ceph_msg *msg = req->r_request;
522 int flags = req->r_flags;
526 req->r_snapid = snap_id;
527 req->r_snapc = ceph_get_snap_context(snapc);
530 msg->hdr.version = cpu_to_le16(4);
532 p = msg->front.iov_base;
533 ceph_encode_32(&p, 1); /* client_inc is always 1 */
534 req->r_request_osdmap_epoch = p;
536 req->r_request_flags = p;
538 if (req->r_flags & CEPH_OSD_FLAG_WRITE)
539 ceph_encode_timespec(p, mtime);
540 p += sizeof(struct ceph_timespec);
541 req->r_request_reassert_version = p;
542 p += sizeof(struct ceph_eversion); /* will get filled in */
545 ceph_encode_8(&p, 4);
546 ceph_encode_8(&p, 4);
547 ceph_encode_32(&p, 8 + 4 + 4);
548 req->r_request_pool = p;
550 ceph_encode_32(&p, -1); /* preferred */
551 ceph_encode_32(&p, 0); /* key len */
553 ceph_encode_8(&p, 1);
554 req->r_request_pgid = p;
556 ceph_encode_32(&p, -1); /* preferred */
559 ceph_encode_32(&p, req->r_oid_len);
560 memcpy(p, req->r_oid, req->r_oid_len);
561 dout("oid '%.*s' len %d\n", req->r_oid_len, req->r_oid, req->r_oid_len);
564 /* ops--can imply data */
565 ceph_encode_16(&p, (u16)req->r_num_ops);
567 for (i = 0; i < req->r_num_ops; i++) {
568 data_len += osd_req_encode_op(req, p, i);
569 p += sizeof(struct ceph_osd_op);
573 ceph_encode_64(&p, req->r_snapid);
574 ceph_encode_64(&p, req->r_snapc ? req->r_snapc->seq : 0);
575 ceph_encode_32(&p, req->r_snapc ? req->r_snapc->num_snaps : 0);
577 for (i = 0; i < snapc->num_snaps; i++) {
578 ceph_encode_64(&p, req->r_snapc->snaps[i]);
582 req->r_request_attempts = p;
586 if (flags & CEPH_OSD_FLAG_WRITE) {
590 * The header "data_off" is a hint to the receiver
591 * allowing it to align received data into its
592 * buffers such that there's no need to re-copy
593 * it before writing it to disk (direct I/O).
595 data_off = (u16) (off & 0xffff);
596 req->r_request->hdr.data_off = cpu_to_le16(data_off);
598 req->r_request->hdr.data_len = cpu_to_le32(data_len);
600 BUG_ON(p > msg->front.iov_base + msg->front.iov_len);
601 msg_size = p - msg->front.iov_base;
602 msg->front.iov_len = msg_size;
603 msg->hdr.front_len = cpu_to_le32(msg_size);
605 dout("build_request msg_size was %d\n", (int)msg_size);
607 EXPORT_SYMBOL(ceph_osdc_build_request);
610 * build new request AND message, calculate layout, and adjust file
613 * if the file was recently truncated, we include information about its
614 * old and new size so that the object can be updated appropriately. (we
615 * avoid synchronously deleting truncated objects because it's slow.)
617 * if @do_sync, include a 'startsync' command so that the osd will flush
620 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
621 struct ceph_file_layout *layout,
622 struct ceph_vino vino,
623 u64 off, u64 *plen, int num_ops,
624 int opcode, int flags,
625 struct ceph_snap_context *snapc,
630 struct ceph_osd_request *req;
631 struct ceph_osd_data *osd_data;
632 struct ceph_osd_req_op *op;
640 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE);
642 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
645 return ERR_PTR(-ENOMEM);
646 osd_data = opcode == CEPH_OSD_OP_WRITE ? &req->r_data_out
649 req->r_flags = flags;
651 /* calculate max write size */
652 r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen);
654 ceph_osdc_put_request(req);
658 object_size = le32_to_cpu(layout->fl_object_size);
659 object_base = off - objoff;
660 if (truncate_size <= object_base) {
663 truncate_size -= object_base;
664 if (truncate_size > object_size)
665 truncate_size = object_size;
669 osd_req_op_extent_init(op, opcode, objoff, objlen,
670 truncate_size, truncate_seq);
671 osd_req_op_extent_osd_data(op, osd_data);
674 * A second op in the ops array means the caller wants to
675 * also issue a include a 'startsync' command so that the
676 * osd will flush data quickly.
679 osd_req_op_init(++op, CEPH_OSD_OP_STARTSYNC);
681 req->r_file_layout = *layout; /* keep a copy */
683 snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx",
685 req->r_oid_len = strlen(req->r_oid);
689 EXPORT_SYMBOL(ceph_osdc_new_request);
692 * We keep osd requests in an rbtree, sorted by ->r_tid.
694 static void __insert_request(struct ceph_osd_client *osdc,
695 struct ceph_osd_request *new)
697 struct rb_node **p = &osdc->requests.rb_node;
698 struct rb_node *parent = NULL;
699 struct ceph_osd_request *req = NULL;
703 req = rb_entry(parent, struct ceph_osd_request, r_node);
704 if (new->r_tid < req->r_tid)
706 else if (new->r_tid > req->r_tid)
712 rb_link_node(&new->r_node, parent, p);
713 rb_insert_color(&new->r_node, &osdc->requests);
716 static struct ceph_osd_request *__lookup_request(struct ceph_osd_client *osdc,
719 struct ceph_osd_request *req;
720 struct rb_node *n = osdc->requests.rb_node;
723 req = rb_entry(n, struct ceph_osd_request, r_node);
724 if (tid < req->r_tid)
726 else if (tid > req->r_tid)
734 static struct ceph_osd_request *
735 __lookup_request_ge(struct ceph_osd_client *osdc,
738 struct ceph_osd_request *req;
739 struct rb_node *n = osdc->requests.rb_node;
742 req = rb_entry(n, struct ceph_osd_request, r_node);
743 if (tid < req->r_tid) {
747 } else if (tid > req->r_tid) {
757 * Resubmit requests pending on the given osd.
759 static void __kick_osd_requests(struct ceph_osd_client *osdc,
760 struct ceph_osd *osd)
762 struct ceph_osd_request *req, *nreq;
766 dout("__kick_osd_requests osd%d\n", osd->o_osd);
767 err = __reset_osd(osdc, osd);
771 * Build up a list of requests to resend by traversing the
772 * osd's list of requests. Requests for a given object are
773 * sent in tid order, and that is also the order they're
774 * kept on this list. Therefore all requests that are in
775 * flight will be found first, followed by all requests that
776 * have not yet been sent. And to resend requests while
777 * preserving this order we will want to put any sent
778 * requests back on the front of the osd client's unsent
781 * So we build a separate ordered list of already-sent
782 * requests for the affected osd and splice it onto the
783 * front of the osd client's unsent list. Once we've seen a
784 * request that has not yet been sent we're done. Those
785 * requests are already sitting right where they belong.
787 list_for_each_entry(req, &osd->o_requests, r_osd_item) {
790 list_move_tail(&req->r_req_lru_item, &resend);
791 dout("requeueing %p tid %llu osd%d\n", req, req->r_tid,
794 req->r_flags |= CEPH_OSD_FLAG_RETRY;
796 list_splice(&resend, &osdc->req_unsent);
799 * Linger requests are re-registered before sending, which
800 * sets up a new tid for each. We add them to the unsent
801 * list at the end to keep things in tid order.
803 list_for_each_entry_safe(req, nreq, &osd->o_linger_requests,
806 * reregister request prior to unregistering linger so
807 * that r_osd is preserved.
809 BUG_ON(!list_empty(&req->r_req_lru_item));
810 __register_request(osdc, req);
811 list_add_tail(&req->r_req_lru_item, &osdc->req_unsent);
812 list_add_tail(&req->r_osd_item, &req->r_osd->o_requests);
813 __unregister_linger_request(osdc, req);
814 dout("requeued lingering %p tid %llu osd%d\n", req, req->r_tid,
820 * If the osd connection drops, we need to resubmit all requests.
822 static void osd_reset(struct ceph_connection *con)
824 struct ceph_osd *osd = con->private;
825 struct ceph_osd_client *osdc;
829 dout("osd_reset osd%d\n", osd->o_osd);
831 down_read(&osdc->map_sem);
832 mutex_lock(&osdc->request_mutex);
833 __kick_osd_requests(osdc, osd);
835 mutex_unlock(&osdc->request_mutex);
836 up_read(&osdc->map_sem);
840 * Track open sessions with osds.
842 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
844 struct ceph_osd *osd;
846 osd = kzalloc(sizeof(*osd), GFP_NOFS);
850 atomic_set(&osd->o_ref, 1);
853 RB_CLEAR_NODE(&osd->o_node);
854 INIT_LIST_HEAD(&osd->o_requests);
855 INIT_LIST_HEAD(&osd->o_linger_requests);
856 INIT_LIST_HEAD(&osd->o_osd_lru);
857 osd->o_incarnation = 1;
859 ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
861 INIT_LIST_HEAD(&osd->o_keepalive_item);
865 static struct ceph_osd *get_osd(struct ceph_osd *osd)
867 if (atomic_inc_not_zero(&osd->o_ref)) {
868 dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1,
869 atomic_read(&osd->o_ref));
872 dout("get_osd %p FAIL\n", osd);
877 static void put_osd(struct ceph_osd *osd)
879 dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
880 atomic_read(&osd->o_ref) - 1);
881 if (atomic_dec_and_test(&osd->o_ref) && osd->o_auth.authorizer) {
882 struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth;
884 ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer);
890 * remove an osd from our map
892 static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
894 dout("__remove_osd %p\n", osd);
895 BUG_ON(!list_empty(&osd->o_requests));
896 rb_erase(&osd->o_node, &osdc->osds);
897 list_del_init(&osd->o_osd_lru);
898 ceph_con_close(&osd->o_con);
902 static void remove_all_osds(struct ceph_osd_client *osdc)
904 dout("%s %p\n", __func__, osdc);
905 mutex_lock(&osdc->request_mutex);
906 while (!RB_EMPTY_ROOT(&osdc->osds)) {
907 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
908 struct ceph_osd, o_node);
909 __remove_osd(osdc, osd);
911 mutex_unlock(&osdc->request_mutex);
914 static void __move_osd_to_lru(struct ceph_osd_client *osdc,
915 struct ceph_osd *osd)
917 dout("__move_osd_to_lru %p\n", osd);
918 BUG_ON(!list_empty(&osd->o_osd_lru));
919 list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
920 osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl * HZ;
923 static void __remove_osd_from_lru(struct ceph_osd *osd)
925 dout("__remove_osd_from_lru %p\n", osd);
926 if (!list_empty(&osd->o_osd_lru))
927 list_del_init(&osd->o_osd_lru);
930 static void remove_old_osds(struct ceph_osd_client *osdc)
932 struct ceph_osd *osd, *nosd;
934 dout("__remove_old_osds %p\n", osdc);
935 mutex_lock(&osdc->request_mutex);
936 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
937 if (time_before(jiffies, osd->lru_ttl))
939 __remove_osd(osdc, osd);
941 mutex_unlock(&osdc->request_mutex);
947 static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
949 struct ceph_entity_addr *peer_addr;
951 dout("__reset_osd %p osd%d\n", osd, osd->o_osd);
952 if (list_empty(&osd->o_requests) &&
953 list_empty(&osd->o_linger_requests)) {
954 __remove_osd(osdc, osd);
959 peer_addr = &osdc->osdmap->osd_addr[osd->o_osd];
960 if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
961 !ceph_con_opened(&osd->o_con)) {
962 struct ceph_osd_request *req;
964 dout(" osd addr hasn't changed and connection never opened,"
965 " letting msgr retry");
966 /* touch each r_stamp for handle_timeout()'s benfit */
967 list_for_each_entry(req, &osd->o_requests, r_osd_item)
968 req->r_stamp = jiffies;
973 ceph_con_close(&osd->o_con);
974 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
975 osd->o_incarnation++;
980 static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new)
982 struct rb_node **p = &osdc->osds.rb_node;
983 struct rb_node *parent = NULL;
984 struct ceph_osd *osd = NULL;
986 dout("__insert_osd %p osd%d\n", new, new->o_osd);
989 osd = rb_entry(parent, struct ceph_osd, o_node);
990 if (new->o_osd < osd->o_osd)
992 else if (new->o_osd > osd->o_osd)
998 rb_link_node(&new->o_node, parent, p);
999 rb_insert_color(&new->o_node, &osdc->osds);
1002 static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o)
1004 struct ceph_osd *osd;
1005 struct rb_node *n = osdc->osds.rb_node;
1008 osd = rb_entry(n, struct ceph_osd, o_node);
1011 else if (o > osd->o_osd)
1019 static void __schedule_osd_timeout(struct ceph_osd_client *osdc)
1021 schedule_delayed_work(&osdc->timeout_work,
1022 osdc->client->options->osd_keepalive_timeout * HZ);
1025 static void __cancel_osd_timeout(struct ceph_osd_client *osdc)
1027 cancel_delayed_work(&osdc->timeout_work);
1031 * Register request, assign tid. If this is the first request, set up
1032 * the timeout event.
1034 static void __register_request(struct ceph_osd_client *osdc,
1035 struct ceph_osd_request *req)
1037 req->r_tid = ++osdc->last_tid;
1038 req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
1039 dout("__register_request %p tid %lld\n", req, req->r_tid);
1040 __insert_request(osdc, req);
1041 ceph_osdc_get_request(req);
1042 osdc->num_requests++;
1043 if (osdc->num_requests == 1) {
1044 dout(" first request, scheduling timeout\n");
1045 __schedule_osd_timeout(osdc);
1050 * called under osdc->request_mutex
1052 static void __unregister_request(struct ceph_osd_client *osdc,
1053 struct ceph_osd_request *req)
1055 if (RB_EMPTY_NODE(&req->r_node)) {
1056 dout("__unregister_request %p tid %lld not registered\n",
1061 dout("__unregister_request %p tid %lld\n", req, req->r_tid);
1062 rb_erase(&req->r_node, &osdc->requests);
1063 osdc->num_requests--;
1066 /* make sure the original request isn't in flight. */
1067 ceph_msg_revoke(req->r_request);
1069 list_del_init(&req->r_osd_item);
1070 if (list_empty(&req->r_osd->o_requests) &&
1071 list_empty(&req->r_osd->o_linger_requests)) {
1072 dout("moving osd to %p lru\n", req->r_osd);
1073 __move_osd_to_lru(osdc, req->r_osd);
1075 if (list_empty(&req->r_linger_item))
1079 list_del_init(&req->r_req_lru_item);
1080 ceph_osdc_put_request(req);
1082 if (osdc->num_requests == 0) {
1083 dout(" no requests, canceling timeout\n");
1084 __cancel_osd_timeout(osdc);
1089 * Cancel a previously queued request message
1091 static void __cancel_request(struct ceph_osd_request *req)
1093 if (req->r_sent && req->r_osd) {
1094 ceph_msg_revoke(req->r_request);
1099 static void __register_linger_request(struct ceph_osd_client *osdc,
1100 struct ceph_osd_request *req)
1102 dout("__register_linger_request %p\n", req);
1103 list_add_tail(&req->r_linger_item, &osdc->req_linger);
1105 list_add_tail(&req->r_linger_osd,
1106 &req->r_osd->o_linger_requests);
1109 static void __unregister_linger_request(struct ceph_osd_client *osdc,
1110 struct ceph_osd_request *req)
1112 dout("__unregister_linger_request %p\n", req);
1113 list_del_init(&req->r_linger_item);
1115 list_del_init(&req->r_linger_osd);
1117 if (list_empty(&req->r_osd->o_requests) &&
1118 list_empty(&req->r_osd->o_linger_requests)) {
1119 dout("moving osd to %p lru\n", req->r_osd);
1120 __move_osd_to_lru(osdc, req->r_osd);
1122 if (list_empty(&req->r_osd_item))
1127 void ceph_osdc_unregister_linger_request(struct ceph_osd_client *osdc,
1128 struct ceph_osd_request *req)
1130 mutex_lock(&osdc->request_mutex);
1131 if (req->r_linger) {
1132 __unregister_linger_request(osdc, req);
1133 ceph_osdc_put_request(req);
1135 mutex_unlock(&osdc->request_mutex);
1137 EXPORT_SYMBOL(ceph_osdc_unregister_linger_request);
1139 void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc,
1140 struct ceph_osd_request *req)
1142 if (!req->r_linger) {
1143 dout("set_request_linger %p\n", req);
1146 * caller is now responsible for calling
1147 * unregister_linger_request
1149 ceph_osdc_get_request(req);
1152 EXPORT_SYMBOL(ceph_osdc_set_request_linger);
1155 * Pick an osd (the first 'up' osd in the pg), allocate the osd struct
1156 * (as needed), and set the request r_osd appropriately. If there is
1157 * no up osd, set r_osd to NULL. Move the request to the appropriate list
1158 * (unsent, homeless) or leave on in-flight lru.
1160 * Return 0 if unchanged, 1 if changed, or negative on error.
1162 * Caller should hold map_sem for read and request_mutex.
1164 static int __map_request(struct ceph_osd_client *osdc,
1165 struct ceph_osd_request *req, int force_resend)
1167 struct ceph_pg pgid;
1168 int acting[CEPH_PG_MAX_SIZE];
1169 int o = -1, num = 0;
1172 dout("map_request %p tid %lld\n", req, req->r_tid);
1173 err = ceph_calc_ceph_pg(&pgid, req->r_oid, osdc->osdmap,
1174 ceph_file_layout_pg_pool(req->r_file_layout));
1176 list_move(&req->r_req_lru_item, &osdc->req_notarget);
1181 err = ceph_calc_pg_acting(osdc->osdmap, pgid, acting);
1187 if ((!force_resend &&
1188 req->r_osd && req->r_osd->o_osd == o &&
1189 req->r_sent >= req->r_osd->o_incarnation &&
1190 req->r_num_pg_osds == num &&
1191 memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) ||
1192 (req->r_osd == NULL && o == -1))
1193 return 0; /* no change */
1195 dout("map_request tid %llu pgid %lld.%x osd%d (was osd%d)\n",
1196 req->r_tid, pgid.pool, pgid.seed, o,
1197 req->r_osd ? req->r_osd->o_osd : -1);
1199 /* record full pg acting set */
1200 memcpy(req->r_pg_osds, acting, sizeof(acting[0]) * num);
1201 req->r_num_pg_osds = num;
1204 __cancel_request(req);
1205 list_del_init(&req->r_osd_item);
1209 req->r_osd = __lookup_osd(osdc, o);
1210 if (!req->r_osd && o >= 0) {
1212 req->r_osd = create_osd(osdc, o);
1214 list_move(&req->r_req_lru_item, &osdc->req_notarget);
1218 dout("map_request osd %p is osd%d\n", req->r_osd, o);
1219 __insert_osd(osdc, req->r_osd);
1221 ceph_con_open(&req->r_osd->o_con,
1222 CEPH_ENTITY_TYPE_OSD, o,
1223 &osdc->osdmap->osd_addr[o]);
1227 __remove_osd_from_lru(req->r_osd);
1228 list_add_tail(&req->r_osd_item, &req->r_osd->o_requests);
1229 list_move_tail(&req->r_req_lru_item, &osdc->req_unsent);
1231 list_move_tail(&req->r_req_lru_item, &osdc->req_notarget);
1233 err = 1; /* osd or pg changed */
1240 * caller should hold map_sem (for read) and request_mutex
1242 static void __send_request(struct ceph_osd_client *osdc,
1243 struct ceph_osd_request *req)
1247 dout("send_request %p tid %llu to osd%d flags %d pg %lld.%x\n",
1248 req, req->r_tid, req->r_osd->o_osd, req->r_flags,
1249 (unsigned long long)req->r_pgid.pool, req->r_pgid.seed);
1251 /* fill in message content that changes each time we send it */
1252 put_unaligned_le32(osdc->osdmap->epoch, req->r_request_osdmap_epoch);
1253 put_unaligned_le32(req->r_flags, req->r_request_flags);
1254 put_unaligned_le64(req->r_pgid.pool, req->r_request_pool);
1255 p = req->r_request_pgid;
1256 ceph_encode_64(&p, req->r_pgid.pool);
1257 ceph_encode_32(&p, req->r_pgid.seed);
1258 put_unaligned_le64(1, req->r_request_attempts); /* FIXME */
1259 memcpy(req->r_request_reassert_version, &req->r_reassert_version,
1260 sizeof(req->r_reassert_version));
1262 req->r_stamp = jiffies;
1263 list_move_tail(&req->r_req_lru_item, &osdc->req_lru);
1265 ceph_msg_get(req->r_request); /* send consumes a ref */
1266 ceph_con_send(&req->r_osd->o_con, req->r_request);
1267 req->r_sent = req->r_osd->o_incarnation;
1271 * Send any requests in the queue (req_unsent).
1273 static void __send_queued(struct ceph_osd_client *osdc)
1275 struct ceph_osd_request *req, *tmp;
1277 dout("__send_queued\n");
1278 list_for_each_entry_safe(req, tmp, &osdc->req_unsent, r_req_lru_item)
1279 __send_request(osdc, req);
1283 * Timeout callback, called every N seconds when 1 or more osd
1284 * requests has been active for more than N seconds. When this
1285 * happens, we ping all OSDs with requests who have timed out to
1286 * ensure any communications channel reset is detected. Reset the
1287 * request timeouts another N seconds in the future as we go.
1288 * Reschedule the timeout event another N seconds in future (unless
1289 * there are no open requests).
1291 static void handle_timeout(struct work_struct *work)
1293 struct ceph_osd_client *osdc =
1294 container_of(work, struct ceph_osd_client, timeout_work.work);
1295 struct ceph_osd_request *req;
1296 struct ceph_osd *osd;
1297 unsigned long keepalive =
1298 osdc->client->options->osd_keepalive_timeout * HZ;
1299 struct list_head slow_osds;
1301 down_read(&osdc->map_sem);
1303 ceph_monc_request_next_osdmap(&osdc->client->monc);
1305 mutex_lock(&osdc->request_mutex);
1308 * ping osds that are a bit slow. this ensures that if there
1309 * is a break in the TCP connection we will notice, and reopen
1310 * a connection with that osd (from the fault callback).
1312 INIT_LIST_HEAD(&slow_osds);
1313 list_for_each_entry(req, &osdc->req_lru, r_req_lru_item) {
1314 if (time_before(jiffies, req->r_stamp + keepalive))
1319 dout(" tid %llu is slow, will send keepalive on osd%d\n",
1320 req->r_tid, osd->o_osd);
1321 list_move_tail(&osd->o_keepalive_item, &slow_osds);
1323 while (!list_empty(&slow_osds)) {
1324 osd = list_entry(slow_osds.next, struct ceph_osd,
1326 list_del_init(&osd->o_keepalive_item);
1327 ceph_con_keepalive(&osd->o_con);
1330 __schedule_osd_timeout(osdc);
1331 __send_queued(osdc);
1332 mutex_unlock(&osdc->request_mutex);
1333 up_read(&osdc->map_sem);
1336 static void handle_osds_timeout(struct work_struct *work)
1338 struct ceph_osd_client *osdc =
1339 container_of(work, struct ceph_osd_client,
1340 osds_timeout_work.work);
1341 unsigned long delay =
1342 osdc->client->options->osd_idle_ttl * HZ >> 2;
1344 dout("osds timeout\n");
1345 down_read(&osdc->map_sem);
1346 remove_old_osds(osdc);
1347 up_read(&osdc->map_sem);
1349 schedule_delayed_work(&osdc->osds_timeout_work,
1350 round_jiffies_relative(delay));
1353 static void complete_request(struct ceph_osd_request *req)
1355 if (req->r_safe_callback)
1356 req->r_safe_callback(req, NULL);
1357 complete_all(&req->r_safe_completion); /* fsync waiter */
1361 * handle osd op reply. either call the callback if it is specified,
1362 * or do the completion to wake up the waiting thread.
1364 static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
1365 struct ceph_connection *con)
1368 struct ceph_osd_request *req;
1371 unsigned int numops;
1372 int payload_len, flags;
1378 u64 reassert_version;
1380 int already_completed;
1384 tid = le64_to_cpu(msg->hdr.tid);
1385 dout("handle_reply %p tid %llu\n", msg, tid);
1387 p = msg->front.iov_base;
1388 end = p + msg->front.iov_len;
1390 ceph_decode_need(&p, end, 4, bad);
1391 object_len = ceph_decode_32(&p);
1392 ceph_decode_need(&p, end, object_len, bad);
1395 err = ceph_decode_pgid(&p, end, &pg);
1399 ceph_decode_need(&p, end, 8 + 4 + 4 + 8 + 4, bad);
1400 flags = ceph_decode_64(&p);
1401 result = ceph_decode_32(&p);
1402 reassert_epoch = ceph_decode_32(&p);
1403 reassert_version = ceph_decode_64(&p);
1404 osdmap_epoch = ceph_decode_32(&p);
1407 mutex_lock(&osdc->request_mutex);
1408 req = __lookup_request(osdc, tid);
1410 dout("handle_reply tid %llu dne\n", tid);
1413 ceph_osdc_get_request(req);
1415 dout("handle_reply %p tid %llu req %p result %d\n", msg, tid,
1418 ceph_decode_need(&p, end, 4, bad);
1419 numops = ceph_decode_32(&p);
1420 if (numops > CEPH_OSD_MAX_OP)
1422 if (numops != req->r_num_ops)
1425 ceph_decode_need(&p, end, numops * sizeof(struct ceph_osd_op), bad);
1426 for (i = 0; i < numops; i++) {
1427 struct ceph_osd_op *op = p;
1430 len = le32_to_cpu(op->payload_len);
1431 req->r_reply_op_len[i] = len;
1432 dout(" op %d has %d bytes\n", i, len);
1436 bytes = le32_to_cpu(msg->hdr.data_len);
1437 if (payload_len != bytes) {
1438 pr_warning("sum of op payload lens %d != data_len %d",
1439 payload_len, bytes);
1443 ceph_decode_need(&p, end, 4 + numops * 4, bad);
1444 retry_attempt = ceph_decode_32(&p);
1445 for (i = 0; i < numops; i++)
1446 req->r_reply_op_result[i] = ceph_decode_32(&p);
1448 if (!req->r_got_reply) {
1450 req->r_result = result;
1451 dout("handle_reply result %d bytes %d\n", req->r_result,
1453 if (req->r_result == 0)
1454 req->r_result = bytes;
1456 /* in case this is a write and we need to replay, */
1457 req->r_reassert_version.epoch = cpu_to_le32(reassert_epoch);
1458 req->r_reassert_version.version = cpu_to_le64(reassert_version);
1460 req->r_got_reply = 1;
1461 } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) {
1462 dout("handle_reply tid %llu dup ack\n", tid);
1463 mutex_unlock(&osdc->request_mutex);
1467 dout("handle_reply tid %llu flags %d\n", tid, flags);
1469 if (req->r_linger && (flags & CEPH_OSD_FLAG_ONDISK))
1470 __register_linger_request(osdc, req);
1472 /* either this is a read, or we got the safe response */
1474 (flags & CEPH_OSD_FLAG_ONDISK) ||
1475 ((flags & CEPH_OSD_FLAG_WRITE) == 0))
1476 __unregister_request(osdc, req);
1478 already_completed = req->r_completed;
1479 req->r_completed = 1;
1480 mutex_unlock(&osdc->request_mutex);
1481 if (already_completed)
1484 if (req->r_callback)
1485 req->r_callback(req, msg);
1487 complete_all(&req->r_completion);
1489 if (flags & CEPH_OSD_FLAG_ONDISK)
1490 complete_request(req);
1493 dout("req=%p req->r_linger=%d\n", req, req->r_linger);
1494 ceph_osdc_put_request(req);
1498 ceph_osdc_put_request(req);
1500 mutex_unlock(&osdc->request_mutex);
1502 pr_err("corrupt osd_op_reply got %d %d\n",
1503 (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len));
1507 static void reset_changed_osds(struct ceph_osd_client *osdc)
1509 struct rb_node *p, *n;
1511 for (p = rb_first(&osdc->osds); p; p = n) {
1512 struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node);
1515 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
1516 memcmp(&osd->o_con.peer_addr,
1517 ceph_osd_addr(osdc->osdmap,
1519 sizeof(struct ceph_entity_addr)) != 0)
1520 __reset_osd(osdc, osd);
1525 * Requeue requests whose mapping to an OSD has changed. If requests map to
1526 * no osd, request a new map.
1528 * Caller should hold map_sem for read.
1530 static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
1532 struct ceph_osd_request *req, *nreq;
1537 dout("kick_requests %s\n", force_resend ? " (force resend)" : "");
1538 mutex_lock(&osdc->request_mutex);
1539 for (p = rb_first(&osdc->requests); p; ) {
1540 req = rb_entry(p, struct ceph_osd_request, r_node);
1544 * For linger requests that have not yet been
1545 * registered, move them to the linger list; they'll
1546 * be sent to the osd in the loop below. Unregister
1547 * the request before re-registering it as a linger
1548 * request to ensure the __map_request() below
1549 * will decide it needs to be sent.
1551 if (req->r_linger && list_empty(&req->r_linger_item)) {
1552 dout("%p tid %llu restart on osd%d\n",
1554 req->r_osd ? req->r_osd->o_osd : -1);
1555 __unregister_request(osdc, req);
1556 __register_linger_request(osdc, req);
1560 err = __map_request(osdc, req, force_resend);
1562 continue; /* error */
1563 if (req->r_osd == NULL) {
1564 dout("%p tid %llu maps to no osd\n", req, req->r_tid);
1565 needmap++; /* request a newer map */
1566 } else if (err > 0) {
1567 if (!req->r_linger) {
1568 dout("%p tid %llu requeued on osd%d\n", req,
1570 req->r_osd ? req->r_osd->o_osd : -1);
1571 req->r_flags |= CEPH_OSD_FLAG_RETRY;
1576 list_for_each_entry_safe(req, nreq, &osdc->req_linger,
1578 dout("linger req=%p req->r_osd=%p\n", req, req->r_osd);
1580 err = __map_request(osdc, req, force_resend);
1581 dout("__map_request returned %d\n", err);
1583 continue; /* no change and no osd was specified */
1585 continue; /* hrm! */
1586 if (req->r_osd == NULL) {
1587 dout("tid %llu maps to no valid osd\n", req->r_tid);
1588 needmap++; /* request a newer map */
1592 dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid,
1593 req->r_osd ? req->r_osd->o_osd : -1);
1594 __register_request(osdc, req);
1595 __unregister_linger_request(osdc, req);
1597 mutex_unlock(&osdc->request_mutex);
1600 dout("%d requests for down osds, need new map\n", needmap);
1601 ceph_monc_request_next_osdmap(&osdc->client->monc);
1603 reset_changed_osds(osdc);
1608 * Process updated osd map.
1610 * The message contains any number of incremental and full maps, normally
1611 * indicating some sort of topology change in the cluster. Kick requests
1612 * off to different OSDs as needed.
1614 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
1616 void *p, *end, *next;
1617 u32 nr_maps, maplen;
1619 struct ceph_osdmap *newmap = NULL, *oldmap;
1621 struct ceph_fsid fsid;
1623 dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0);
1624 p = msg->front.iov_base;
1625 end = p + msg->front.iov_len;
1628 ceph_decode_need(&p, end, sizeof(fsid), bad);
1629 ceph_decode_copy(&p, &fsid, sizeof(fsid));
1630 if (ceph_check_fsid(osdc->client, &fsid) < 0)
1633 down_write(&osdc->map_sem);
1635 /* incremental maps */
1636 ceph_decode_32_safe(&p, end, nr_maps, bad);
1637 dout(" %d inc maps\n", nr_maps);
1638 while (nr_maps > 0) {
1639 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
1640 epoch = ceph_decode_32(&p);
1641 maplen = ceph_decode_32(&p);
1642 ceph_decode_need(&p, end, maplen, bad);
1644 if (osdc->osdmap && osdc->osdmap->epoch+1 == epoch) {
1645 dout("applying incremental map %u len %d\n",
1647 newmap = osdmap_apply_incremental(&p, next,
1649 &osdc->client->msgr);
1650 if (IS_ERR(newmap)) {
1651 err = PTR_ERR(newmap);
1655 if (newmap != osdc->osdmap) {
1656 ceph_osdmap_destroy(osdc->osdmap);
1657 osdc->osdmap = newmap;
1659 kick_requests(osdc, 0);
1661 dout("ignoring incremental map %u len %d\n",
1671 ceph_decode_32_safe(&p, end, nr_maps, bad);
1672 dout(" %d full maps\n", nr_maps);
1674 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
1675 epoch = ceph_decode_32(&p);
1676 maplen = ceph_decode_32(&p);
1677 ceph_decode_need(&p, end, maplen, bad);
1679 dout("skipping non-latest full map %u len %d\n",
1681 } else if (osdc->osdmap && osdc->osdmap->epoch >= epoch) {
1682 dout("skipping full map %u len %d, "
1683 "older than our %u\n", epoch, maplen,
1684 osdc->osdmap->epoch);
1686 int skipped_map = 0;
1688 dout("taking full map %u len %d\n", epoch, maplen);
1689 newmap = osdmap_decode(&p, p+maplen);
1690 if (IS_ERR(newmap)) {
1691 err = PTR_ERR(newmap);
1695 oldmap = osdc->osdmap;
1696 osdc->osdmap = newmap;
1698 if (oldmap->epoch + 1 < newmap->epoch)
1700 ceph_osdmap_destroy(oldmap);
1702 kick_requests(osdc, skipped_map);
1709 downgrade_write(&osdc->map_sem);
1710 ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch);
1713 * subscribe to subsequent osdmap updates if full to ensure
1714 * we find out when we are no longer full and stop returning
1717 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL))
1718 ceph_monc_request_next_osdmap(&osdc->client->monc);
1720 mutex_lock(&osdc->request_mutex);
1721 __send_queued(osdc);
1722 mutex_unlock(&osdc->request_mutex);
1723 up_read(&osdc->map_sem);
1724 wake_up_all(&osdc->client->auth_wq);
1728 pr_err("osdc handle_map corrupt msg\n");
1730 up_write(&osdc->map_sem);
1735 * watch/notify callback event infrastructure
1737 * These callbacks are used both for watch and notify operations.
1739 static void __release_event(struct kref *kref)
1741 struct ceph_osd_event *event =
1742 container_of(kref, struct ceph_osd_event, kref);
1744 dout("__release_event %p\n", event);
1748 static void get_event(struct ceph_osd_event *event)
1750 kref_get(&event->kref);
1753 void ceph_osdc_put_event(struct ceph_osd_event *event)
1755 kref_put(&event->kref, __release_event);
1757 EXPORT_SYMBOL(ceph_osdc_put_event);
1759 static void __insert_event(struct ceph_osd_client *osdc,
1760 struct ceph_osd_event *new)
1762 struct rb_node **p = &osdc->event_tree.rb_node;
1763 struct rb_node *parent = NULL;
1764 struct ceph_osd_event *event = NULL;
1768 event = rb_entry(parent, struct ceph_osd_event, node);
1769 if (new->cookie < event->cookie)
1771 else if (new->cookie > event->cookie)
1772 p = &(*p)->rb_right;
1777 rb_link_node(&new->node, parent, p);
1778 rb_insert_color(&new->node, &osdc->event_tree);
1781 static struct ceph_osd_event *__find_event(struct ceph_osd_client *osdc,
1784 struct rb_node **p = &osdc->event_tree.rb_node;
1785 struct rb_node *parent = NULL;
1786 struct ceph_osd_event *event = NULL;
1790 event = rb_entry(parent, struct ceph_osd_event, node);
1791 if (cookie < event->cookie)
1793 else if (cookie > event->cookie)
1794 p = &(*p)->rb_right;
1801 static void __remove_event(struct ceph_osd_event *event)
1803 struct ceph_osd_client *osdc = event->osdc;
1805 if (!RB_EMPTY_NODE(&event->node)) {
1806 dout("__remove_event removed %p\n", event);
1807 rb_erase(&event->node, &osdc->event_tree);
1808 ceph_osdc_put_event(event);
1810 dout("__remove_event didn't remove %p\n", event);
1814 int ceph_osdc_create_event(struct ceph_osd_client *osdc,
1815 void (*event_cb)(u64, u64, u8, void *),
1816 void *data, struct ceph_osd_event **pevent)
1818 struct ceph_osd_event *event;
1820 event = kmalloc(sizeof(*event), GFP_NOIO);
1824 dout("create_event %p\n", event);
1825 event->cb = event_cb;
1826 event->one_shot = 0;
1829 INIT_LIST_HEAD(&event->osd_node);
1830 RB_CLEAR_NODE(&event->node);
1831 kref_init(&event->kref); /* one ref for us */
1832 kref_get(&event->kref); /* one ref for the caller */
1834 spin_lock(&osdc->event_lock);
1835 event->cookie = ++osdc->event_count;
1836 __insert_event(osdc, event);
1837 spin_unlock(&osdc->event_lock);
1842 EXPORT_SYMBOL(ceph_osdc_create_event);
1844 void ceph_osdc_cancel_event(struct ceph_osd_event *event)
1846 struct ceph_osd_client *osdc = event->osdc;
1848 dout("cancel_event %p\n", event);
1849 spin_lock(&osdc->event_lock);
1850 __remove_event(event);
1851 spin_unlock(&osdc->event_lock);
1852 ceph_osdc_put_event(event); /* caller's */
1854 EXPORT_SYMBOL(ceph_osdc_cancel_event);
1857 static void do_event_work(struct work_struct *work)
1859 struct ceph_osd_event_work *event_work =
1860 container_of(work, struct ceph_osd_event_work, work);
1861 struct ceph_osd_event *event = event_work->event;
1862 u64 ver = event_work->ver;
1863 u64 notify_id = event_work->notify_id;
1864 u8 opcode = event_work->opcode;
1866 dout("do_event_work completing %p\n", event);
1867 event->cb(ver, notify_id, opcode, event->data);
1868 dout("do_event_work completed %p\n", event);
1869 ceph_osdc_put_event(event);
1875 * Process osd watch notifications
1877 static void handle_watch_notify(struct ceph_osd_client *osdc,
1878 struct ceph_msg *msg)
1882 u64 cookie, ver, notify_id;
1884 struct ceph_osd_event *event;
1885 struct ceph_osd_event_work *event_work;
1887 p = msg->front.iov_base;
1888 end = p + msg->front.iov_len;
1890 ceph_decode_8_safe(&p, end, proto_ver, bad);
1891 ceph_decode_8_safe(&p, end, opcode, bad);
1892 ceph_decode_64_safe(&p, end, cookie, bad);
1893 ceph_decode_64_safe(&p, end, ver, bad);
1894 ceph_decode_64_safe(&p, end, notify_id, bad);
1896 spin_lock(&osdc->event_lock);
1897 event = __find_event(osdc, cookie);
1899 BUG_ON(event->one_shot);
1902 spin_unlock(&osdc->event_lock);
1903 dout("handle_watch_notify cookie %lld ver %lld event %p\n",
1904 cookie, ver, event);
1906 event_work = kmalloc(sizeof(*event_work), GFP_NOIO);
1908 dout("ERROR: could not allocate event_work\n");
1911 INIT_WORK(&event_work->work, do_event_work);
1912 event_work->event = event;
1913 event_work->ver = ver;
1914 event_work->notify_id = notify_id;
1915 event_work->opcode = opcode;
1916 if (!queue_work(osdc->notify_wq, &event_work->work)) {
1917 dout("WARNING: failed to queue notify event work\n");
1925 ceph_osdc_put_event(event);
1929 pr_err("osdc handle_watch_notify corrupt msg\n");
1933 static void ceph_osdc_msg_data_set(struct ceph_msg *msg,
1934 struct ceph_osd_data *osd_data)
1936 u64 length = ceph_osd_data_length(osd_data);
1938 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
1939 BUG_ON(length > (u64) SIZE_MAX);
1941 ceph_msg_data_set_pages(msg, osd_data->pages,
1942 length, osd_data->alignment);
1943 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
1945 ceph_msg_data_set_pagelist(msg, osd_data->pagelist);
1947 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) {
1948 ceph_msg_data_set_bio(msg, osd_data->bio, length);
1951 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE);
1956 * Register request, send initial attempt.
1958 int ceph_osdc_start_request(struct ceph_osd_client *osdc,
1959 struct ceph_osd_request *req,
1964 /* Set up response incoming data and request outgoing data fields */
1966 ceph_osdc_msg_data_set(req->r_reply, &req->r_data_in);
1967 ceph_osdc_msg_data_set(req->r_request, &req->r_data_out);
1969 down_read(&osdc->map_sem);
1970 mutex_lock(&osdc->request_mutex);
1971 __register_request(osdc, req);
1972 WARN_ON(req->r_sent);
1973 rc = __map_request(osdc, req, 0);
1976 dout("osdc_start_request failed map, "
1977 " will retry %lld\n", req->r_tid);
1982 if (req->r_osd == NULL) {
1983 dout("send_request %p no up osds in pg\n", req);
1984 ceph_monc_request_next_osdmap(&osdc->client->monc);
1986 __send_queued(osdc);
1990 mutex_unlock(&osdc->request_mutex);
1991 up_read(&osdc->map_sem);
1994 EXPORT_SYMBOL(ceph_osdc_start_request);
1997 * wait for a request to complete
1999 int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
2000 struct ceph_osd_request *req)
2004 rc = wait_for_completion_interruptible(&req->r_completion);
2006 mutex_lock(&osdc->request_mutex);
2007 __cancel_request(req);
2008 __unregister_request(osdc, req);
2009 mutex_unlock(&osdc->request_mutex);
2010 complete_request(req);
2011 dout("wait_request tid %llu canceled/timed out\n", req->r_tid);
2015 dout("wait_request tid %llu result %d\n", req->r_tid, req->r_result);
2016 return req->r_result;
2018 EXPORT_SYMBOL(ceph_osdc_wait_request);
2021 * sync - wait for all in-flight requests to flush. avoid starvation.
2023 void ceph_osdc_sync(struct ceph_osd_client *osdc)
2025 struct ceph_osd_request *req;
2026 u64 last_tid, next_tid = 0;
2028 mutex_lock(&osdc->request_mutex);
2029 last_tid = osdc->last_tid;
2031 req = __lookup_request_ge(osdc, next_tid);
2034 if (req->r_tid > last_tid)
2037 next_tid = req->r_tid + 1;
2038 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) == 0)
2041 ceph_osdc_get_request(req);
2042 mutex_unlock(&osdc->request_mutex);
2043 dout("sync waiting on tid %llu (last is %llu)\n",
2044 req->r_tid, last_tid);
2045 wait_for_completion(&req->r_safe_completion);
2046 mutex_lock(&osdc->request_mutex);
2047 ceph_osdc_put_request(req);
2049 mutex_unlock(&osdc->request_mutex);
2050 dout("sync done (thru tid %llu)\n", last_tid);
2052 EXPORT_SYMBOL(ceph_osdc_sync);
2057 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
2062 osdc->client = client;
2063 osdc->osdmap = NULL;
2064 init_rwsem(&osdc->map_sem);
2065 init_completion(&osdc->map_waiters);
2066 osdc->last_requested_map = 0;
2067 mutex_init(&osdc->request_mutex);
2069 osdc->osds = RB_ROOT;
2070 INIT_LIST_HEAD(&osdc->osd_lru);
2071 osdc->requests = RB_ROOT;
2072 INIT_LIST_HEAD(&osdc->req_lru);
2073 INIT_LIST_HEAD(&osdc->req_unsent);
2074 INIT_LIST_HEAD(&osdc->req_notarget);
2075 INIT_LIST_HEAD(&osdc->req_linger);
2076 osdc->num_requests = 0;
2077 INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
2078 INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
2079 spin_lock_init(&osdc->event_lock);
2080 osdc->event_tree = RB_ROOT;
2081 osdc->event_count = 0;
2083 schedule_delayed_work(&osdc->osds_timeout_work,
2084 round_jiffies_relative(osdc->client->options->osd_idle_ttl * HZ));
2087 osdc->req_mempool = mempool_create_kmalloc_pool(10,
2088 sizeof(struct ceph_osd_request));
2089 if (!osdc->req_mempool)
2092 err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
2093 OSD_OP_FRONT_LEN, 10, true,
2097 err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
2098 OSD_OPREPLY_FRONT_LEN, 10, true,
2103 osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
2104 if (IS_ERR(osdc->notify_wq)) {
2105 err = PTR_ERR(osdc->notify_wq);
2106 osdc->notify_wq = NULL;
2112 ceph_msgpool_destroy(&osdc->msgpool_op);
2114 mempool_destroy(osdc->req_mempool);
2119 void ceph_osdc_stop(struct ceph_osd_client *osdc)
2121 flush_workqueue(osdc->notify_wq);
2122 destroy_workqueue(osdc->notify_wq);
2123 cancel_delayed_work_sync(&osdc->timeout_work);
2124 cancel_delayed_work_sync(&osdc->osds_timeout_work);
2126 ceph_osdmap_destroy(osdc->osdmap);
2127 osdc->osdmap = NULL;
2129 remove_all_osds(osdc);
2130 mempool_destroy(osdc->req_mempool);
2131 ceph_msgpool_destroy(&osdc->msgpool_op);
2132 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
2136 * Read some contiguous pages. If we cross a stripe boundary, shorten
2137 * *plen. Return number of bytes read, or error.
2139 int ceph_osdc_readpages(struct ceph_osd_client *osdc,
2140 struct ceph_vino vino, struct ceph_file_layout *layout,
2142 u32 truncate_seq, u64 truncate_size,
2143 struct page **pages, int num_pages, int page_align)
2145 struct ceph_osd_request *req;
2148 dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
2149 vino.snap, off, *plen);
2150 req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 1,
2151 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
2152 NULL, truncate_seq, truncate_size,
2155 return PTR_ERR(req);
2157 /* it may be a short read due to an object boundary */
2159 ceph_osd_data_pages_init(&req->r_data_in, pages, *plen, page_align,
2162 dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n",
2163 off, *plen, *plen, page_align);
2165 ceph_osdc_build_request(req, off, NULL, vino.snap, NULL);
2167 rc = ceph_osdc_start_request(osdc, req, false);
2169 rc = ceph_osdc_wait_request(osdc, req);
2171 ceph_osdc_put_request(req);
2172 dout("readpages result %d\n", rc);
2175 EXPORT_SYMBOL(ceph_osdc_readpages);
2178 * do a synchronous write on N pages
2180 int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
2181 struct ceph_file_layout *layout,
2182 struct ceph_snap_context *snapc,
2184 u32 truncate_seq, u64 truncate_size,
2185 struct timespec *mtime,
2186 struct page **pages, int num_pages)
2188 struct ceph_osd_request *req;
2190 int page_align = off & ~PAGE_MASK;
2192 BUG_ON(vino.snap != CEPH_NOSNAP); /* snapshots aren't writeable */
2193 req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 1,
2195 CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
2196 snapc, truncate_seq, truncate_size,
2199 return PTR_ERR(req);
2201 /* it may be a short write due to an object boundary */
2202 ceph_osd_data_pages_init(&req->r_data_out, pages, len, page_align,
2204 dout("writepages %llu~%llu (%llu bytes)\n", off, len, len);
2206 ceph_osdc_build_request(req, off, snapc, CEPH_NOSNAP, mtime);
2208 rc = ceph_osdc_start_request(osdc, req, true);
2210 rc = ceph_osdc_wait_request(osdc, req);
2212 ceph_osdc_put_request(req);
2215 dout("writepages result %d\n", rc);
2218 EXPORT_SYMBOL(ceph_osdc_writepages);
2221 * handle incoming message
2223 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
2225 struct ceph_osd *osd = con->private;
2226 struct ceph_osd_client *osdc;
2227 int type = le16_to_cpu(msg->hdr.type);
2234 case CEPH_MSG_OSD_MAP:
2235 ceph_osdc_handle_map(osdc, msg);
2237 case CEPH_MSG_OSD_OPREPLY:
2238 handle_reply(osdc, msg, con);
2240 case CEPH_MSG_WATCH_NOTIFY:
2241 handle_watch_notify(osdc, msg);
2245 pr_err("received unknown message type %d %s\n", type,
2246 ceph_msg_type_name(type));
2253 * lookup and return message for incoming reply. set up reply message
2256 static struct ceph_msg *get_reply(struct ceph_connection *con,
2257 struct ceph_msg_header *hdr,
2260 struct ceph_osd *osd = con->private;
2261 struct ceph_osd_client *osdc = osd->o_osdc;
2263 struct ceph_osd_request *req;
2264 int front = le32_to_cpu(hdr->front_len);
2265 int data_len = le32_to_cpu(hdr->data_len);
2268 tid = le64_to_cpu(hdr->tid);
2269 mutex_lock(&osdc->request_mutex);
2270 req = __lookup_request(osdc, tid);
2274 dout("get_reply unknown tid %llu from osd%d\n", tid,
2279 if (req->r_reply->con)
2280 dout("%s revoking msg %p from old con %p\n", __func__,
2281 req->r_reply, req->r_reply->con);
2282 ceph_msg_revoke_incoming(req->r_reply);
2284 if (front > req->r_reply->front.iov_len) {
2285 pr_warning("get_reply front %d > preallocated %d\n",
2286 front, (int)req->r_reply->front.iov_len);
2287 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, GFP_NOFS, false);
2290 ceph_msg_put(req->r_reply);
2293 m = ceph_msg_get(req->r_reply);
2296 struct ceph_osd_data *osd_data = &req->r_data_in;
2298 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
2299 if (osd_data->pages &&
2300 unlikely(osd_data->length < data_len)) {
2302 pr_warning("tid %lld reply has %d bytes "
2303 "we had only %llu bytes ready\n",
2304 tid, data_len, osd_data->length);
2313 dout("get_reply tid %lld %p\n", tid, m);
2316 mutex_unlock(&osdc->request_mutex);
2321 static struct ceph_msg *alloc_msg(struct ceph_connection *con,
2322 struct ceph_msg_header *hdr,
2325 struct ceph_osd *osd = con->private;
2326 int type = le16_to_cpu(hdr->type);
2327 int front = le32_to_cpu(hdr->front_len);
2331 case CEPH_MSG_OSD_MAP:
2332 case CEPH_MSG_WATCH_NOTIFY:
2333 return ceph_msg_new(type, front, GFP_NOFS, false);
2334 case CEPH_MSG_OSD_OPREPLY:
2335 return get_reply(con, hdr, skip);
2337 pr_info("alloc_msg unexpected msg type %d from osd%d\n", type,
2345 * Wrappers to refcount containing ceph_osd struct
2347 static struct ceph_connection *get_osd_con(struct ceph_connection *con)
2349 struct ceph_osd *osd = con->private;
2355 static void put_osd_con(struct ceph_connection *con)
2357 struct ceph_osd *osd = con->private;
2365 * Note: returned pointer is the address of a structure that's
2366 * managed separately. Caller must *not* attempt to free it.
2368 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
2369 int *proto, int force_new)
2371 struct ceph_osd *o = con->private;
2372 struct ceph_osd_client *osdc = o->o_osdc;
2373 struct ceph_auth_client *ac = osdc->client->monc.auth;
2374 struct ceph_auth_handshake *auth = &o->o_auth;
2376 if (force_new && auth->authorizer) {
2377 ceph_auth_destroy_authorizer(ac, auth->authorizer);
2378 auth->authorizer = NULL;
2380 if (!auth->authorizer) {
2381 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
2384 return ERR_PTR(ret);
2386 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
2389 return ERR_PTR(ret);
2391 *proto = ac->protocol;
2397 static int verify_authorizer_reply(struct ceph_connection *con, int len)
2399 struct ceph_osd *o = con->private;
2400 struct ceph_osd_client *osdc = o->o_osdc;
2401 struct ceph_auth_client *ac = osdc->client->monc.auth;
2403 return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer, len);
2406 static int invalidate_authorizer(struct ceph_connection *con)
2408 struct ceph_osd *o = con->private;
2409 struct ceph_osd_client *osdc = o->o_osdc;
2410 struct ceph_auth_client *ac = osdc->client->monc.auth;
2412 ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
2413 return ceph_monc_validate_auth(&osdc->client->monc);
2416 static const struct ceph_connection_operations osd_con_ops = {
2419 .dispatch = dispatch,
2420 .get_authorizer = get_authorizer,
2421 .verify_authorizer_reply = verify_authorizer_reply,
2422 .invalidate_authorizer = invalidate_authorizer,
2423 .alloc_msg = alloc_msg,