1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/module.h>
5 #include <linux/highmem.h>
7 #include <linux/pagemap.h>
8 #include <linux/slab.h>
9 #include <linux/uaccess.h>
11 #include <linux/bio.h>
14 #include <linux/ceph/libceph.h>
15 #include <linux/ceph/osd_client.h>
16 #include <linux/ceph/messenger.h>
17 #include <linux/ceph/decode.h>
18 #include <linux/ceph/auth.h>
19 #include <linux/ceph/pagelist.h>
21 #define OSD_OP_FRONT_LEN 4096
22 #define OSD_OPREPLY_FRONT_LEN 512
24 static const struct ceph_connection_operations osd_con_ops;
26 static void __send_queued(struct ceph_osd_client *osdc);
27 static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd);
28 static void __register_request(struct ceph_osd_client *osdc,
29 struct ceph_osd_request *req);
30 static void __unregister_linger_request(struct ceph_osd_client *osdc,
31 struct ceph_osd_request *req);
32 static void __send_request(struct ceph_osd_client *osdc,
33 struct ceph_osd_request *req);
36 * Implement client access to distributed object storage cluster.
38 * All data objects are stored within a cluster/cloud of OSDs, or
39 * "object storage devices." (Note that Ceph OSDs have _nothing_ to
40 * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply
41 * remote daemons serving up and coordinating consistent and safe
44 * Cluster membership and the mapping of data objects onto storage devices
45 * are described by the osd map.
47 * We keep track of pending OSD requests (read, write), resubmit
48 * requests to different OSDs when the cluster topology/data layout
49 * change, or retry the affected requests when the communications
50 * channel with an OSD is reset.
54 * calculate the mapping of a file extent onto an object, and fill out the
55 * request accordingly. shorten extent as necessary if it crosses an
58 * fill osd op in request message.
60 static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
61 u64 *objnum, u64 *objoff, u64 *objlen)
67 r = ceph_calc_file_object_mapping(layout, off, orig_len, objnum,
71 if (*objlen < orig_len) {
73 dout(" skipping last %llu, final file extent %llu~%llu\n",
74 orig_len - *plen, off, *plen);
77 dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen);
85 void ceph_osdc_release_request(struct kref *kref)
88 struct ceph_osd_request *req = container_of(kref,
89 struct ceph_osd_request,
93 ceph_msg_put(req->r_request);
95 ceph_msg_revoke_incoming(req->r_reply);
96 ceph_msg_put(req->r_reply);
99 if (req->r_data_in.type == CEPH_OSD_DATA_TYPE_PAGES &&
100 req->r_data_in.own_pages) {
101 num_pages = calc_pages_for((u64)req->r_data_in.alignment,
102 (u64)req->r_data_in.length);
103 ceph_release_page_vector(req->r_data_in.pages, num_pages);
105 if (req->r_data_out.type == CEPH_OSD_DATA_TYPE_PAGES &&
106 req->r_data_out.own_pages) {
107 num_pages = calc_pages_for((u64)req->r_data_out.alignment,
108 (u64)req->r_data_out.length);
109 ceph_release_page_vector(req->r_data_out.pages, num_pages);
112 ceph_put_snap_context(req->r_snapc);
114 mempool_free(req, req->r_osdc->req_mempool);
118 EXPORT_SYMBOL(ceph_osdc_release_request);
120 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
121 struct ceph_snap_context *snapc,
122 unsigned int num_ops,
126 struct ceph_osd_request *req;
127 struct ceph_msg *msg;
130 msg_size = 4 + 4 + 8 + 8 + 4+8;
131 msg_size += 2 + 4 + 8 + 4 + 4; /* oloc */
132 msg_size += 1 + 8 + 4 + 4; /* pg_t */
133 msg_size += 4 + MAX_OBJ_NAME_SIZE;
134 msg_size += 2 + num_ops*sizeof(struct ceph_osd_op);
135 msg_size += 8; /* snapid */
136 msg_size += 8; /* snap_seq */
137 msg_size += 8 * (snapc ? snapc->num_snaps : 0); /* snaps */
141 req = mempool_alloc(osdc->req_mempool, gfp_flags);
142 memset(req, 0, sizeof(*req));
144 req = kzalloc(sizeof(*req), gfp_flags);
150 req->r_mempool = use_mempool;
152 kref_init(&req->r_kref);
153 init_completion(&req->r_completion);
154 init_completion(&req->r_safe_completion);
155 RB_CLEAR_NODE(&req->r_node);
156 INIT_LIST_HEAD(&req->r_unsafe_item);
157 INIT_LIST_HEAD(&req->r_linger_item);
158 INIT_LIST_HEAD(&req->r_linger_osd);
159 INIT_LIST_HEAD(&req->r_req_lru_item);
160 INIT_LIST_HEAD(&req->r_osd_item);
162 /* create reply message */
164 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
166 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY,
167 OSD_OPREPLY_FRONT_LEN, gfp_flags, true);
169 ceph_osdc_put_request(req);
174 req->r_data_in.type = CEPH_OSD_DATA_TYPE_NONE;
175 req->r_data_out.type = CEPH_OSD_DATA_TYPE_NONE;
177 /* create request message; allow space for oid */
179 msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
181 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp_flags, true);
183 ceph_osdc_put_request(req);
187 memset(msg->front.iov_base, 0, msg->front.iov_len);
189 req->r_request = msg;
193 EXPORT_SYMBOL(ceph_osdc_alloc_request);
195 static bool osd_req_opcode_valid(u16 opcode)
198 case CEPH_OSD_OP_READ:
199 case CEPH_OSD_OP_STAT:
200 case CEPH_OSD_OP_MAPEXT:
201 case CEPH_OSD_OP_MASKTRUNC:
202 case CEPH_OSD_OP_SPARSE_READ:
203 case CEPH_OSD_OP_NOTIFY:
204 case CEPH_OSD_OP_NOTIFY_ACK:
205 case CEPH_OSD_OP_ASSERT_VER:
206 case CEPH_OSD_OP_WRITE:
207 case CEPH_OSD_OP_WRITEFULL:
208 case CEPH_OSD_OP_TRUNCATE:
209 case CEPH_OSD_OP_ZERO:
210 case CEPH_OSD_OP_DELETE:
211 case CEPH_OSD_OP_APPEND:
212 case CEPH_OSD_OP_STARTSYNC:
213 case CEPH_OSD_OP_SETTRUNC:
214 case CEPH_OSD_OP_TRIMTRUNC:
215 case CEPH_OSD_OP_TMAPUP:
216 case CEPH_OSD_OP_TMAPPUT:
217 case CEPH_OSD_OP_TMAPGET:
218 case CEPH_OSD_OP_CREATE:
219 case CEPH_OSD_OP_ROLLBACK:
220 case CEPH_OSD_OP_WATCH:
221 case CEPH_OSD_OP_OMAPGETKEYS:
222 case CEPH_OSD_OP_OMAPGETVALS:
223 case CEPH_OSD_OP_OMAPGETHEADER:
224 case CEPH_OSD_OP_OMAPGETVALSBYKEYS:
225 case CEPH_OSD_OP_OMAPSETVALS:
226 case CEPH_OSD_OP_OMAPSETHEADER:
227 case CEPH_OSD_OP_OMAPCLEAR:
228 case CEPH_OSD_OP_OMAPRMKEYS:
229 case CEPH_OSD_OP_OMAP_CMP:
230 case CEPH_OSD_OP_CLONERANGE:
231 case CEPH_OSD_OP_ASSERT_SRC_VERSION:
232 case CEPH_OSD_OP_SRC_CMPXATTR:
233 case CEPH_OSD_OP_GETXATTR:
234 case CEPH_OSD_OP_GETXATTRS:
235 case CEPH_OSD_OP_CMPXATTR:
236 case CEPH_OSD_OP_SETXATTR:
237 case CEPH_OSD_OP_SETXATTRS:
238 case CEPH_OSD_OP_RESETXATTRS:
239 case CEPH_OSD_OP_RMXATTR:
240 case CEPH_OSD_OP_PULL:
241 case CEPH_OSD_OP_PUSH:
242 case CEPH_OSD_OP_BALANCEREADS:
243 case CEPH_OSD_OP_UNBALANCEREADS:
244 case CEPH_OSD_OP_SCRUB:
245 case CEPH_OSD_OP_SCRUB_RESERVE:
246 case CEPH_OSD_OP_SCRUB_UNRESERVE:
247 case CEPH_OSD_OP_SCRUB_STOP:
248 case CEPH_OSD_OP_SCRUB_MAP:
249 case CEPH_OSD_OP_WRLOCK:
250 case CEPH_OSD_OP_WRUNLOCK:
251 case CEPH_OSD_OP_RDLOCK:
252 case CEPH_OSD_OP_RDUNLOCK:
253 case CEPH_OSD_OP_UPLOCK:
254 case CEPH_OSD_OP_DNLOCK:
255 case CEPH_OSD_OP_CALL:
256 case CEPH_OSD_OP_PGLS:
257 case CEPH_OSD_OP_PGLS_FILTER:
265 * This is an osd op init function for opcodes that have no data or
266 * other information associated with them. It also serves as a
267 * common init routine for all the other init functions, below.
269 void osd_req_op_init(struct ceph_osd_req_op *op, u16 opcode)
271 BUG_ON(!osd_req_opcode_valid(opcode));
273 memset(op, 0, sizeof (*op));
278 void osd_req_op_extent_init(struct ceph_osd_req_op *op, u16 opcode,
279 u64 offset, u64 length,
280 u64 truncate_size, u32 truncate_seq)
282 size_t payload_len = 0;
284 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE);
286 osd_req_op_init(op, opcode);
288 op->extent.offset = offset;
289 op->extent.length = length;
290 op->extent.truncate_size = truncate_size;
291 op->extent.truncate_seq = truncate_seq;
292 if (opcode == CEPH_OSD_OP_WRITE)
293 payload_len += length;
295 op->payload_len = payload_len;
297 EXPORT_SYMBOL(osd_req_op_extent_init);
299 void osd_req_op_cls_init(struct ceph_osd_req_op *op, u16 opcode,
300 const char *class, const char *method,
301 const void *request_data, size_t request_data_size)
303 size_t payload_len = 0;
306 BUG_ON(opcode != CEPH_OSD_OP_CALL);
308 osd_req_op_init(op, opcode);
310 op->cls.class_name = class;
311 size = strlen(class);
312 BUG_ON(size > (size_t) U8_MAX);
313 op->cls.class_len = size;
316 op->cls.method_name = method;
317 size = strlen(method);
318 BUG_ON(size > (size_t) U8_MAX);
319 op->cls.method_len = size;
322 op->cls.indata = request_data;
323 BUG_ON(request_data_size > (size_t) U32_MAX);
324 op->cls.indata_len = (u32) request_data_size;
325 payload_len += request_data_size;
327 op->cls.argc = 0; /* currently unused */
329 op->payload_len = payload_len;
331 EXPORT_SYMBOL(osd_req_op_cls_init);
333 void osd_req_op_watch_init(struct ceph_osd_req_op *op, u16 opcode,
334 u64 cookie, u64 version, int flag)
336 BUG_ON(opcode != CEPH_OSD_OP_NOTIFY_ACK && opcode != CEPH_OSD_OP_WATCH);
338 osd_req_op_init(op, opcode);
340 op->watch.cookie = cookie;
341 /* op->watch.ver = version; */ /* XXX 3847 */
342 op->watch.ver = cpu_to_le64(version);
343 if (opcode == CEPH_OSD_OP_WATCH && flag)
344 op->watch.flag = (u8) 1;
346 EXPORT_SYMBOL(osd_req_op_watch_init);
348 static u64 osd_req_encode_op(struct ceph_osd_request *req,
349 struct ceph_osd_op *dst,
350 struct ceph_osd_req_op *src)
352 u64 out_data_len = 0;
353 struct ceph_pagelist *pagelist;
355 if (WARN_ON(!osd_req_opcode_valid(src->op))) {
356 pr_err("unrecognized osd opcode %d\n", src->op);
362 case CEPH_OSD_OP_STAT:
364 case CEPH_OSD_OP_READ:
365 case CEPH_OSD_OP_WRITE:
366 if (src->op == CEPH_OSD_OP_WRITE)
367 out_data_len = src->extent.length;
368 dst->extent.offset = cpu_to_le64(src->extent.offset);
369 dst->extent.length = cpu_to_le64(src->extent.length);
370 dst->extent.truncate_size =
371 cpu_to_le64(src->extent.truncate_size);
372 dst->extent.truncate_seq =
373 cpu_to_le32(src->extent.truncate_seq);
375 case CEPH_OSD_OP_CALL:
376 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
378 ceph_pagelist_init(pagelist);
380 dst->cls.class_len = src->cls.class_len;
381 dst->cls.method_len = src->cls.method_len;
382 dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
383 ceph_pagelist_append(pagelist, src->cls.class_name,
385 ceph_pagelist_append(pagelist, src->cls.method_name,
386 src->cls.method_len);
387 ceph_pagelist_append(pagelist, src->cls.indata,
388 src->cls.indata_len);
390 req->r_data_out.type = CEPH_OSD_DATA_TYPE_PAGELIST;
391 req->r_data_out.pagelist = pagelist;
392 out_data_len = pagelist->length;
394 case CEPH_OSD_OP_STARTSYNC:
396 case CEPH_OSD_OP_NOTIFY_ACK:
397 case CEPH_OSD_OP_WATCH:
398 dst->watch.cookie = cpu_to_le64(src->watch.cookie);
399 dst->watch.ver = cpu_to_le64(src->watch.ver);
400 dst->watch.flag = src->watch.flag;
403 pr_err("unsupported osd opcode %s\n",
404 ceph_osd_op_name(src->op));
409 dst->op = cpu_to_le16(src->op);
410 dst->payload_len = cpu_to_le32(src->payload_len);
416 * build new request AND message
419 void ceph_osdc_build_request(struct ceph_osd_request *req,
420 u64 off, unsigned int num_ops,
421 struct ceph_osd_req_op *src_ops,
422 struct ceph_snap_context *snapc, u64 snap_id,
423 struct timespec *mtime)
425 struct ceph_msg *msg = req->r_request;
426 struct ceph_osd_req_op *src_op;
429 int flags = req->r_flags;
433 req->r_num_ops = num_ops;
434 req->r_snapid = snap_id;
435 req->r_snapc = ceph_get_snap_context(snapc);
438 msg->hdr.version = cpu_to_le16(4);
440 p = msg->front.iov_base;
441 ceph_encode_32(&p, 1); /* client_inc is always 1 */
442 req->r_request_osdmap_epoch = p;
444 req->r_request_flags = p;
446 if (req->r_flags & CEPH_OSD_FLAG_WRITE)
447 ceph_encode_timespec(p, mtime);
448 p += sizeof(struct ceph_timespec);
449 req->r_request_reassert_version = p;
450 p += sizeof(struct ceph_eversion); /* will get filled in */
453 ceph_encode_8(&p, 4);
454 ceph_encode_8(&p, 4);
455 ceph_encode_32(&p, 8 + 4 + 4);
456 req->r_request_pool = p;
458 ceph_encode_32(&p, -1); /* preferred */
459 ceph_encode_32(&p, 0); /* key len */
461 ceph_encode_8(&p, 1);
462 req->r_request_pgid = p;
464 ceph_encode_32(&p, -1); /* preferred */
467 ceph_encode_32(&p, req->r_oid_len);
468 memcpy(p, req->r_oid, req->r_oid_len);
469 dout("oid '%.*s' len %d\n", req->r_oid_len, req->r_oid, req->r_oid_len);
472 /* ops--can imply data */
473 ceph_encode_16(&p, num_ops);
475 req->r_request_ops = p;
477 for (i = 0; i < num_ops; i++, src_op++) {
478 data_len += osd_req_encode_op(req, p, src_op);
479 p += sizeof(struct ceph_osd_op);
483 ceph_encode_64(&p, req->r_snapid);
484 ceph_encode_64(&p, req->r_snapc ? req->r_snapc->seq : 0);
485 ceph_encode_32(&p, req->r_snapc ? req->r_snapc->num_snaps : 0);
487 for (i = 0; i < snapc->num_snaps; i++) {
488 ceph_encode_64(&p, req->r_snapc->snaps[i]);
492 req->r_request_attempts = p;
496 if (flags & CEPH_OSD_FLAG_WRITE) {
500 * The header "data_off" is a hint to the receiver
501 * allowing it to align received data into its
502 * buffers such that there's no need to re-copy
503 * it before writing it to disk (direct I/O).
505 data_off = (u16) (off & 0xffff);
506 req->r_request->hdr.data_off = cpu_to_le16(data_off);
508 req->r_request->hdr.data_len = cpu_to_le32(data_len);
510 BUG_ON(p > msg->front.iov_base + msg->front.iov_len);
511 msg_size = p - msg->front.iov_base;
512 msg->front.iov_len = msg_size;
513 msg->hdr.front_len = cpu_to_le32(msg_size);
515 dout("build_request msg_size was %d num_ops %d\n", (int)msg_size,
519 EXPORT_SYMBOL(ceph_osdc_build_request);
522 * build new request AND message, calculate layout, and adjust file
525 * if the file was recently truncated, we include information about its
526 * old and new size so that the object can be updated appropriately. (we
527 * avoid synchronously deleting truncated objects because it's slow.)
529 * if @do_sync, include a 'startsync' command so that the osd will flush
532 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
533 struct ceph_file_layout *layout,
534 struct ceph_vino vino,
536 int opcode, int flags,
537 struct ceph_snap_context *snapc,
541 struct timespec *mtime,
544 struct ceph_osd_req_op ops[2];
545 struct ceph_osd_request *req;
546 unsigned int num_op = do_sync ? 2 : 1;
554 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE);
556 req = ceph_osdc_alloc_request(osdc, snapc, num_op, use_mempool,
559 return ERR_PTR(-ENOMEM);
560 req->r_flags = flags;
562 /* calculate max write size */
563 r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen);
565 ceph_osdc_put_request(req);
569 object_size = le32_to_cpu(layout->fl_object_size);
570 object_base = off - objoff;
571 if (truncate_size <= object_base) {
574 truncate_size -= object_base;
575 if (truncate_size > object_size)
576 truncate_size = object_size;
579 osd_req_op_extent_init(&ops[0], opcode, objoff, objlen,
580 truncate_size, truncate_seq);
582 osd_req_op_init(&ops[1], CEPH_OSD_OP_STARTSYNC);
584 req->r_file_layout = *layout; /* keep a copy */
586 snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx",
588 req->r_oid_len = strlen(req->r_oid);
590 ceph_osdc_build_request(req, off, num_op, ops,
591 snapc, vino.snap, mtime);
595 EXPORT_SYMBOL(ceph_osdc_new_request);
598 * We keep osd requests in an rbtree, sorted by ->r_tid.
600 static void __insert_request(struct ceph_osd_client *osdc,
601 struct ceph_osd_request *new)
603 struct rb_node **p = &osdc->requests.rb_node;
604 struct rb_node *parent = NULL;
605 struct ceph_osd_request *req = NULL;
609 req = rb_entry(parent, struct ceph_osd_request, r_node);
610 if (new->r_tid < req->r_tid)
612 else if (new->r_tid > req->r_tid)
618 rb_link_node(&new->r_node, parent, p);
619 rb_insert_color(&new->r_node, &osdc->requests);
622 static struct ceph_osd_request *__lookup_request(struct ceph_osd_client *osdc,
625 struct ceph_osd_request *req;
626 struct rb_node *n = osdc->requests.rb_node;
629 req = rb_entry(n, struct ceph_osd_request, r_node);
630 if (tid < req->r_tid)
632 else if (tid > req->r_tid)
640 static struct ceph_osd_request *
641 __lookup_request_ge(struct ceph_osd_client *osdc,
644 struct ceph_osd_request *req;
645 struct rb_node *n = osdc->requests.rb_node;
648 req = rb_entry(n, struct ceph_osd_request, r_node);
649 if (tid < req->r_tid) {
653 } else if (tid > req->r_tid) {
663 * Resubmit requests pending on the given osd.
665 static void __kick_osd_requests(struct ceph_osd_client *osdc,
666 struct ceph_osd *osd)
668 struct ceph_osd_request *req, *nreq;
672 dout("__kick_osd_requests osd%d\n", osd->o_osd);
673 err = __reset_osd(osdc, osd);
677 * Build up a list of requests to resend by traversing the
678 * osd's list of requests. Requests for a given object are
679 * sent in tid order, and that is also the order they're
680 * kept on this list. Therefore all requests that are in
681 * flight will be found first, followed by all requests that
682 * have not yet been sent. And to resend requests while
683 * preserving this order we will want to put any sent
684 * requests back on the front of the osd client's unsent
687 * So we build a separate ordered list of already-sent
688 * requests for the affected osd and splice it onto the
689 * front of the osd client's unsent list. Once we've seen a
690 * request that has not yet been sent we're done. Those
691 * requests are already sitting right where they belong.
693 list_for_each_entry(req, &osd->o_requests, r_osd_item) {
696 list_move_tail(&req->r_req_lru_item, &resend);
697 dout("requeueing %p tid %llu osd%d\n", req, req->r_tid,
700 req->r_flags |= CEPH_OSD_FLAG_RETRY;
702 list_splice(&resend, &osdc->req_unsent);
705 * Linger requests are re-registered before sending, which
706 * sets up a new tid for each. We add them to the unsent
707 * list at the end to keep things in tid order.
709 list_for_each_entry_safe(req, nreq, &osd->o_linger_requests,
712 * reregister request prior to unregistering linger so
713 * that r_osd is preserved.
715 BUG_ON(!list_empty(&req->r_req_lru_item));
716 __register_request(osdc, req);
717 list_add_tail(&req->r_req_lru_item, &osdc->req_unsent);
718 list_add_tail(&req->r_osd_item, &req->r_osd->o_requests);
719 __unregister_linger_request(osdc, req);
720 dout("requeued lingering %p tid %llu osd%d\n", req, req->r_tid,
726 * If the osd connection drops, we need to resubmit all requests.
728 static void osd_reset(struct ceph_connection *con)
730 struct ceph_osd *osd = con->private;
731 struct ceph_osd_client *osdc;
735 dout("osd_reset osd%d\n", osd->o_osd);
737 down_read(&osdc->map_sem);
738 mutex_lock(&osdc->request_mutex);
739 __kick_osd_requests(osdc, osd);
741 mutex_unlock(&osdc->request_mutex);
742 up_read(&osdc->map_sem);
746 * Track open sessions with osds.
748 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
750 struct ceph_osd *osd;
752 osd = kzalloc(sizeof(*osd), GFP_NOFS);
756 atomic_set(&osd->o_ref, 1);
759 RB_CLEAR_NODE(&osd->o_node);
760 INIT_LIST_HEAD(&osd->o_requests);
761 INIT_LIST_HEAD(&osd->o_linger_requests);
762 INIT_LIST_HEAD(&osd->o_osd_lru);
763 osd->o_incarnation = 1;
765 ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
767 INIT_LIST_HEAD(&osd->o_keepalive_item);
771 static struct ceph_osd *get_osd(struct ceph_osd *osd)
773 if (atomic_inc_not_zero(&osd->o_ref)) {
774 dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1,
775 atomic_read(&osd->o_ref));
778 dout("get_osd %p FAIL\n", osd);
783 static void put_osd(struct ceph_osd *osd)
785 dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
786 atomic_read(&osd->o_ref) - 1);
787 if (atomic_dec_and_test(&osd->o_ref) && osd->o_auth.authorizer) {
788 struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth;
790 ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer);
796 * remove an osd from our map
798 static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
800 dout("__remove_osd %p\n", osd);
801 BUG_ON(!list_empty(&osd->o_requests));
802 rb_erase(&osd->o_node, &osdc->osds);
803 list_del_init(&osd->o_osd_lru);
804 ceph_con_close(&osd->o_con);
808 static void remove_all_osds(struct ceph_osd_client *osdc)
810 dout("%s %p\n", __func__, osdc);
811 mutex_lock(&osdc->request_mutex);
812 while (!RB_EMPTY_ROOT(&osdc->osds)) {
813 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
814 struct ceph_osd, o_node);
815 __remove_osd(osdc, osd);
817 mutex_unlock(&osdc->request_mutex);
820 static void __move_osd_to_lru(struct ceph_osd_client *osdc,
821 struct ceph_osd *osd)
823 dout("__move_osd_to_lru %p\n", osd);
824 BUG_ON(!list_empty(&osd->o_osd_lru));
825 list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
826 osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl * HZ;
829 static void __remove_osd_from_lru(struct ceph_osd *osd)
831 dout("__remove_osd_from_lru %p\n", osd);
832 if (!list_empty(&osd->o_osd_lru))
833 list_del_init(&osd->o_osd_lru);
836 static void remove_old_osds(struct ceph_osd_client *osdc)
838 struct ceph_osd *osd, *nosd;
840 dout("__remove_old_osds %p\n", osdc);
841 mutex_lock(&osdc->request_mutex);
842 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
843 if (time_before(jiffies, osd->lru_ttl))
845 __remove_osd(osdc, osd);
847 mutex_unlock(&osdc->request_mutex);
853 static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
855 struct ceph_entity_addr *peer_addr;
857 dout("__reset_osd %p osd%d\n", osd, osd->o_osd);
858 if (list_empty(&osd->o_requests) &&
859 list_empty(&osd->o_linger_requests)) {
860 __remove_osd(osdc, osd);
865 peer_addr = &osdc->osdmap->osd_addr[osd->o_osd];
866 if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
867 !ceph_con_opened(&osd->o_con)) {
868 struct ceph_osd_request *req;
870 dout(" osd addr hasn't changed and connection never opened,"
871 " letting msgr retry");
872 /* touch each r_stamp for handle_timeout()'s benfit */
873 list_for_each_entry(req, &osd->o_requests, r_osd_item)
874 req->r_stamp = jiffies;
879 ceph_con_close(&osd->o_con);
880 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
881 osd->o_incarnation++;
886 static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new)
888 struct rb_node **p = &osdc->osds.rb_node;
889 struct rb_node *parent = NULL;
890 struct ceph_osd *osd = NULL;
892 dout("__insert_osd %p osd%d\n", new, new->o_osd);
895 osd = rb_entry(parent, struct ceph_osd, o_node);
896 if (new->o_osd < osd->o_osd)
898 else if (new->o_osd > osd->o_osd)
904 rb_link_node(&new->o_node, parent, p);
905 rb_insert_color(&new->o_node, &osdc->osds);
908 static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o)
910 struct ceph_osd *osd;
911 struct rb_node *n = osdc->osds.rb_node;
914 osd = rb_entry(n, struct ceph_osd, o_node);
917 else if (o > osd->o_osd)
925 static void __schedule_osd_timeout(struct ceph_osd_client *osdc)
927 schedule_delayed_work(&osdc->timeout_work,
928 osdc->client->options->osd_keepalive_timeout * HZ);
931 static void __cancel_osd_timeout(struct ceph_osd_client *osdc)
933 cancel_delayed_work(&osdc->timeout_work);
937 * Register request, assign tid. If this is the first request, set up
940 static void __register_request(struct ceph_osd_client *osdc,
941 struct ceph_osd_request *req)
943 req->r_tid = ++osdc->last_tid;
944 req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
945 dout("__register_request %p tid %lld\n", req, req->r_tid);
946 __insert_request(osdc, req);
947 ceph_osdc_get_request(req);
948 osdc->num_requests++;
949 if (osdc->num_requests == 1) {
950 dout(" first request, scheduling timeout\n");
951 __schedule_osd_timeout(osdc);
956 * called under osdc->request_mutex
958 static void __unregister_request(struct ceph_osd_client *osdc,
959 struct ceph_osd_request *req)
961 if (RB_EMPTY_NODE(&req->r_node)) {
962 dout("__unregister_request %p tid %lld not registered\n",
967 dout("__unregister_request %p tid %lld\n", req, req->r_tid);
968 rb_erase(&req->r_node, &osdc->requests);
969 osdc->num_requests--;
972 /* make sure the original request isn't in flight. */
973 ceph_msg_revoke(req->r_request);
975 list_del_init(&req->r_osd_item);
976 if (list_empty(&req->r_osd->o_requests) &&
977 list_empty(&req->r_osd->o_linger_requests)) {
978 dout("moving osd to %p lru\n", req->r_osd);
979 __move_osd_to_lru(osdc, req->r_osd);
981 if (list_empty(&req->r_linger_item))
985 list_del_init(&req->r_req_lru_item);
986 ceph_osdc_put_request(req);
988 if (osdc->num_requests == 0) {
989 dout(" no requests, canceling timeout\n");
990 __cancel_osd_timeout(osdc);
995 * Cancel a previously queued request message
997 static void __cancel_request(struct ceph_osd_request *req)
999 if (req->r_sent && req->r_osd) {
1000 ceph_msg_revoke(req->r_request);
1005 static void __register_linger_request(struct ceph_osd_client *osdc,
1006 struct ceph_osd_request *req)
1008 dout("__register_linger_request %p\n", req);
1009 list_add_tail(&req->r_linger_item, &osdc->req_linger);
1011 list_add_tail(&req->r_linger_osd,
1012 &req->r_osd->o_linger_requests);
1015 static void __unregister_linger_request(struct ceph_osd_client *osdc,
1016 struct ceph_osd_request *req)
1018 dout("__unregister_linger_request %p\n", req);
1019 list_del_init(&req->r_linger_item);
1021 list_del_init(&req->r_linger_osd);
1023 if (list_empty(&req->r_osd->o_requests) &&
1024 list_empty(&req->r_osd->o_linger_requests)) {
1025 dout("moving osd to %p lru\n", req->r_osd);
1026 __move_osd_to_lru(osdc, req->r_osd);
1028 if (list_empty(&req->r_osd_item))
1033 void ceph_osdc_unregister_linger_request(struct ceph_osd_client *osdc,
1034 struct ceph_osd_request *req)
1036 mutex_lock(&osdc->request_mutex);
1037 if (req->r_linger) {
1038 __unregister_linger_request(osdc, req);
1039 ceph_osdc_put_request(req);
1041 mutex_unlock(&osdc->request_mutex);
1043 EXPORT_SYMBOL(ceph_osdc_unregister_linger_request);
1045 void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc,
1046 struct ceph_osd_request *req)
1048 if (!req->r_linger) {
1049 dout("set_request_linger %p\n", req);
1052 * caller is now responsible for calling
1053 * unregister_linger_request
1055 ceph_osdc_get_request(req);
1058 EXPORT_SYMBOL(ceph_osdc_set_request_linger);
1061 * Pick an osd (the first 'up' osd in the pg), allocate the osd struct
1062 * (as needed), and set the request r_osd appropriately. If there is
1063 * no up osd, set r_osd to NULL. Move the request to the appropriate list
1064 * (unsent, homeless) or leave on in-flight lru.
1066 * Return 0 if unchanged, 1 if changed, or negative on error.
1068 * Caller should hold map_sem for read and request_mutex.
1070 static int __map_request(struct ceph_osd_client *osdc,
1071 struct ceph_osd_request *req, int force_resend)
1073 struct ceph_pg pgid;
1074 int acting[CEPH_PG_MAX_SIZE];
1075 int o = -1, num = 0;
1078 dout("map_request %p tid %lld\n", req, req->r_tid);
1079 err = ceph_calc_ceph_pg(&pgid, req->r_oid, osdc->osdmap,
1080 ceph_file_layout_pg_pool(req->r_file_layout));
1082 list_move(&req->r_req_lru_item, &osdc->req_notarget);
1087 err = ceph_calc_pg_acting(osdc->osdmap, pgid, acting);
1093 if ((!force_resend &&
1094 req->r_osd && req->r_osd->o_osd == o &&
1095 req->r_sent >= req->r_osd->o_incarnation &&
1096 req->r_num_pg_osds == num &&
1097 memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) ||
1098 (req->r_osd == NULL && o == -1))
1099 return 0; /* no change */
1101 dout("map_request tid %llu pgid %lld.%x osd%d (was osd%d)\n",
1102 req->r_tid, pgid.pool, pgid.seed, o,
1103 req->r_osd ? req->r_osd->o_osd : -1);
1105 /* record full pg acting set */
1106 memcpy(req->r_pg_osds, acting, sizeof(acting[0]) * num);
1107 req->r_num_pg_osds = num;
1110 __cancel_request(req);
1111 list_del_init(&req->r_osd_item);
1115 req->r_osd = __lookup_osd(osdc, o);
1116 if (!req->r_osd && o >= 0) {
1118 req->r_osd = create_osd(osdc, o);
1120 list_move(&req->r_req_lru_item, &osdc->req_notarget);
1124 dout("map_request osd %p is osd%d\n", req->r_osd, o);
1125 __insert_osd(osdc, req->r_osd);
1127 ceph_con_open(&req->r_osd->o_con,
1128 CEPH_ENTITY_TYPE_OSD, o,
1129 &osdc->osdmap->osd_addr[o]);
1133 __remove_osd_from_lru(req->r_osd);
1134 list_add_tail(&req->r_osd_item, &req->r_osd->o_requests);
1135 list_move_tail(&req->r_req_lru_item, &osdc->req_unsent);
1137 list_move_tail(&req->r_req_lru_item, &osdc->req_notarget);
1139 err = 1; /* osd or pg changed */
1146 * caller should hold map_sem (for read) and request_mutex
1148 static void __send_request(struct ceph_osd_client *osdc,
1149 struct ceph_osd_request *req)
1153 dout("send_request %p tid %llu to osd%d flags %d pg %lld.%x\n",
1154 req, req->r_tid, req->r_osd->o_osd, req->r_flags,
1155 (unsigned long long)req->r_pgid.pool, req->r_pgid.seed);
1157 /* fill in message content that changes each time we send it */
1158 put_unaligned_le32(osdc->osdmap->epoch, req->r_request_osdmap_epoch);
1159 put_unaligned_le32(req->r_flags, req->r_request_flags);
1160 put_unaligned_le64(req->r_pgid.pool, req->r_request_pool);
1161 p = req->r_request_pgid;
1162 ceph_encode_64(&p, req->r_pgid.pool);
1163 ceph_encode_32(&p, req->r_pgid.seed);
1164 put_unaligned_le64(1, req->r_request_attempts); /* FIXME */
1165 memcpy(req->r_request_reassert_version, &req->r_reassert_version,
1166 sizeof(req->r_reassert_version));
1168 req->r_stamp = jiffies;
1169 list_move_tail(&req->r_req_lru_item, &osdc->req_lru);
1171 ceph_msg_get(req->r_request); /* send consumes a ref */
1172 ceph_con_send(&req->r_osd->o_con, req->r_request);
1173 req->r_sent = req->r_osd->o_incarnation;
1177 * Send any requests in the queue (req_unsent).
1179 static void __send_queued(struct ceph_osd_client *osdc)
1181 struct ceph_osd_request *req, *tmp;
1183 dout("__send_queued\n");
1184 list_for_each_entry_safe(req, tmp, &osdc->req_unsent, r_req_lru_item)
1185 __send_request(osdc, req);
1189 * Timeout callback, called every N seconds when 1 or more osd
1190 * requests has been active for more than N seconds. When this
1191 * happens, we ping all OSDs with requests who have timed out to
1192 * ensure any communications channel reset is detected. Reset the
1193 * request timeouts another N seconds in the future as we go.
1194 * Reschedule the timeout event another N seconds in future (unless
1195 * there are no open requests).
1197 static void handle_timeout(struct work_struct *work)
1199 struct ceph_osd_client *osdc =
1200 container_of(work, struct ceph_osd_client, timeout_work.work);
1201 struct ceph_osd_request *req;
1202 struct ceph_osd *osd;
1203 unsigned long keepalive =
1204 osdc->client->options->osd_keepalive_timeout * HZ;
1205 struct list_head slow_osds;
1207 down_read(&osdc->map_sem);
1209 ceph_monc_request_next_osdmap(&osdc->client->monc);
1211 mutex_lock(&osdc->request_mutex);
1214 * ping osds that are a bit slow. this ensures that if there
1215 * is a break in the TCP connection we will notice, and reopen
1216 * a connection with that osd (from the fault callback).
1218 INIT_LIST_HEAD(&slow_osds);
1219 list_for_each_entry(req, &osdc->req_lru, r_req_lru_item) {
1220 if (time_before(jiffies, req->r_stamp + keepalive))
1225 dout(" tid %llu is slow, will send keepalive on osd%d\n",
1226 req->r_tid, osd->o_osd);
1227 list_move_tail(&osd->o_keepalive_item, &slow_osds);
1229 while (!list_empty(&slow_osds)) {
1230 osd = list_entry(slow_osds.next, struct ceph_osd,
1232 list_del_init(&osd->o_keepalive_item);
1233 ceph_con_keepalive(&osd->o_con);
1236 __schedule_osd_timeout(osdc);
1237 __send_queued(osdc);
1238 mutex_unlock(&osdc->request_mutex);
1239 up_read(&osdc->map_sem);
1242 static void handle_osds_timeout(struct work_struct *work)
1244 struct ceph_osd_client *osdc =
1245 container_of(work, struct ceph_osd_client,
1246 osds_timeout_work.work);
1247 unsigned long delay =
1248 osdc->client->options->osd_idle_ttl * HZ >> 2;
1250 dout("osds timeout\n");
1251 down_read(&osdc->map_sem);
1252 remove_old_osds(osdc);
1253 up_read(&osdc->map_sem);
1255 schedule_delayed_work(&osdc->osds_timeout_work,
1256 round_jiffies_relative(delay));
1259 static void complete_request(struct ceph_osd_request *req)
1261 if (req->r_safe_callback)
1262 req->r_safe_callback(req, NULL);
1263 complete_all(&req->r_safe_completion); /* fsync waiter */
1267 * handle osd op reply. either call the callback if it is specified,
1268 * or do the completion to wake up the waiting thread.
1270 static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
1271 struct ceph_connection *con)
1274 struct ceph_osd_request *req;
1277 int numops, payload_len, flags;
1283 u64 reassert_version;
1285 int already_completed;
1288 tid = le64_to_cpu(msg->hdr.tid);
1289 dout("handle_reply %p tid %llu\n", msg, tid);
1291 p = msg->front.iov_base;
1292 end = p + msg->front.iov_len;
1294 ceph_decode_need(&p, end, 4, bad);
1295 object_len = ceph_decode_32(&p);
1296 ceph_decode_need(&p, end, object_len, bad);
1299 err = ceph_decode_pgid(&p, end, &pg);
1303 ceph_decode_need(&p, end, 8 + 4 + 4 + 8 + 4, bad);
1304 flags = ceph_decode_64(&p);
1305 result = ceph_decode_32(&p);
1306 reassert_epoch = ceph_decode_32(&p);
1307 reassert_version = ceph_decode_64(&p);
1308 osdmap_epoch = ceph_decode_32(&p);
1311 mutex_lock(&osdc->request_mutex);
1312 req = __lookup_request(osdc, tid);
1314 dout("handle_reply tid %llu dne\n", tid);
1317 ceph_osdc_get_request(req);
1319 dout("handle_reply %p tid %llu req %p result %d\n", msg, tid,
1322 ceph_decode_need(&p, end, 4, bad);
1323 numops = ceph_decode_32(&p);
1324 if (numops > CEPH_OSD_MAX_OP)
1326 if (numops != req->r_num_ops)
1329 ceph_decode_need(&p, end, numops * sizeof(struct ceph_osd_op), bad);
1330 for (i = 0; i < numops; i++) {
1331 struct ceph_osd_op *op = p;
1334 len = le32_to_cpu(op->payload_len);
1335 req->r_reply_op_len[i] = len;
1336 dout(" op %d has %d bytes\n", i, len);
1340 if (payload_len != le32_to_cpu(msg->hdr.data_len)) {
1341 pr_warning("sum of op payload lens %d != data_len %d",
1342 payload_len, le32_to_cpu(msg->hdr.data_len));
1346 ceph_decode_need(&p, end, 4 + numops * 4, bad);
1347 retry_attempt = ceph_decode_32(&p);
1348 for (i = 0; i < numops; i++)
1349 req->r_reply_op_result[i] = ceph_decode_32(&p);
1351 if (!req->r_got_reply) {
1354 req->r_result = result;
1355 bytes = le32_to_cpu(msg->hdr.data_len);
1356 dout("handle_reply result %d bytes %d\n", req->r_result,
1358 if (req->r_result == 0)
1359 req->r_result = bytes;
1361 /* in case this is a write and we need to replay, */
1362 req->r_reassert_version.epoch = cpu_to_le32(reassert_epoch);
1363 req->r_reassert_version.version = cpu_to_le64(reassert_version);
1365 req->r_got_reply = 1;
1366 } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) {
1367 dout("handle_reply tid %llu dup ack\n", tid);
1368 mutex_unlock(&osdc->request_mutex);
1372 dout("handle_reply tid %llu flags %d\n", tid, flags);
1374 if (req->r_linger && (flags & CEPH_OSD_FLAG_ONDISK))
1375 __register_linger_request(osdc, req);
1377 /* either this is a read, or we got the safe response */
1379 (flags & CEPH_OSD_FLAG_ONDISK) ||
1380 ((flags & CEPH_OSD_FLAG_WRITE) == 0))
1381 __unregister_request(osdc, req);
1383 already_completed = req->r_completed;
1384 req->r_completed = 1;
1385 mutex_unlock(&osdc->request_mutex);
1386 if (already_completed)
1389 if (req->r_callback)
1390 req->r_callback(req, msg);
1392 complete_all(&req->r_completion);
1394 if (flags & CEPH_OSD_FLAG_ONDISK)
1395 complete_request(req);
1398 dout("req=%p req->r_linger=%d\n", req, req->r_linger);
1399 ceph_osdc_put_request(req);
1403 ceph_osdc_put_request(req);
1405 mutex_unlock(&osdc->request_mutex);
1407 pr_err("corrupt osd_op_reply got %d %d\n",
1408 (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len));
1412 static void reset_changed_osds(struct ceph_osd_client *osdc)
1414 struct rb_node *p, *n;
1416 for (p = rb_first(&osdc->osds); p; p = n) {
1417 struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node);
1420 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
1421 memcmp(&osd->o_con.peer_addr,
1422 ceph_osd_addr(osdc->osdmap,
1424 sizeof(struct ceph_entity_addr)) != 0)
1425 __reset_osd(osdc, osd);
1430 * Requeue requests whose mapping to an OSD has changed. If requests map to
1431 * no osd, request a new map.
1433 * Caller should hold map_sem for read.
1435 static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
1437 struct ceph_osd_request *req, *nreq;
1442 dout("kick_requests %s\n", force_resend ? " (force resend)" : "");
1443 mutex_lock(&osdc->request_mutex);
1444 for (p = rb_first(&osdc->requests); p; ) {
1445 req = rb_entry(p, struct ceph_osd_request, r_node);
1449 * For linger requests that have not yet been
1450 * registered, move them to the linger list; they'll
1451 * be sent to the osd in the loop below. Unregister
1452 * the request before re-registering it as a linger
1453 * request to ensure the __map_request() below
1454 * will decide it needs to be sent.
1456 if (req->r_linger && list_empty(&req->r_linger_item)) {
1457 dout("%p tid %llu restart on osd%d\n",
1459 req->r_osd ? req->r_osd->o_osd : -1);
1460 __unregister_request(osdc, req);
1461 __register_linger_request(osdc, req);
1465 err = __map_request(osdc, req, force_resend);
1467 continue; /* error */
1468 if (req->r_osd == NULL) {
1469 dout("%p tid %llu maps to no osd\n", req, req->r_tid);
1470 needmap++; /* request a newer map */
1471 } else if (err > 0) {
1472 if (!req->r_linger) {
1473 dout("%p tid %llu requeued on osd%d\n", req,
1475 req->r_osd ? req->r_osd->o_osd : -1);
1476 req->r_flags |= CEPH_OSD_FLAG_RETRY;
1481 list_for_each_entry_safe(req, nreq, &osdc->req_linger,
1483 dout("linger req=%p req->r_osd=%p\n", req, req->r_osd);
1485 err = __map_request(osdc, req, force_resend);
1486 dout("__map_request returned %d\n", err);
1488 continue; /* no change and no osd was specified */
1490 continue; /* hrm! */
1491 if (req->r_osd == NULL) {
1492 dout("tid %llu maps to no valid osd\n", req->r_tid);
1493 needmap++; /* request a newer map */
1497 dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid,
1498 req->r_osd ? req->r_osd->o_osd : -1);
1499 __register_request(osdc, req);
1500 __unregister_linger_request(osdc, req);
1502 mutex_unlock(&osdc->request_mutex);
1505 dout("%d requests for down osds, need new map\n", needmap);
1506 ceph_monc_request_next_osdmap(&osdc->client->monc);
1508 reset_changed_osds(osdc);
1513 * Process updated osd map.
1515 * The message contains any number of incremental and full maps, normally
1516 * indicating some sort of topology change in the cluster. Kick requests
1517 * off to different OSDs as needed.
1519 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
1521 void *p, *end, *next;
1522 u32 nr_maps, maplen;
1524 struct ceph_osdmap *newmap = NULL, *oldmap;
1526 struct ceph_fsid fsid;
1528 dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0);
1529 p = msg->front.iov_base;
1530 end = p + msg->front.iov_len;
1533 ceph_decode_need(&p, end, sizeof(fsid), bad);
1534 ceph_decode_copy(&p, &fsid, sizeof(fsid));
1535 if (ceph_check_fsid(osdc->client, &fsid) < 0)
1538 down_write(&osdc->map_sem);
1540 /* incremental maps */
1541 ceph_decode_32_safe(&p, end, nr_maps, bad);
1542 dout(" %d inc maps\n", nr_maps);
1543 while (nr_maps > 0) {
1544 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
1545 epoch = ceph_decode_32(&p);
1546 maplen = ceph_decode_32(&p);
1547 ceph_decode_need(&p, end, maplen, bad);
1549 if (osdc->osdmap && osdc->osdmap->epoch+1 == epoch) {
1550 dout("applying incremental map %u len %d\n",
1552 newmap = osdmap_apply_incremental(&p, next,
1554 &osdc->client->msgr);
1555 if (IS_ERR(newmap)) {
1556 err = PTR_ERR(newmap);
1560 if (newmap != osdc->osdmap) {
1561 ceph_osdmap_destroy(osdc->osdmap);
1562 osdc->osdmap = newmap;
1564 kick_requests(osdc, 0);
1566 dout("ignoring incremental map %u len %d\n",
1576 ceph_decode_32_safe(&p, end, nr_maps, bad);
1577 dout(" %d full maps\n", nr_maps);
1579 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
1580 epoch = ceph_decode_32(&p);
1581 maplen = ceph_decode_32(&p);
1582 ceph_decode_need(&p, end, maplen, bad);
1584 dout("skipping non-latest full map %u len %d\n",
1586 } else if (osdc->osdmap && osdc->osdmap->epoch >= epoch) {
1587 dout("skipping full map %u len %d, "
1588 "older than our %u\n", epoch, maplen,
1589 osdc->osdmap->epoch);
1591 int skipped_map = 0;
1593 dout("taking full map %u len %d\n", epoch, maplen);
1594 newmap = osdmap_decode(&p, p+maplen);
1595 if (IS_ERR(newmap)) {
1596 err = PTR_ERR(newmap);
1600 oldmap = osdc->osdmap;
1601 osdc->osdmap = newmap;
1603 if (oldmap->epoch + 1 < newmap->epoch)
1605 ceph_osdmap_destroy(oldmap);
1607 kick_requests(osdc, skipped_map);
1614 downgrade_write(&osdc->map_sem);
1615 ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch);
1618 * subscribe to subsequent osdmap updates if full to ensure
1619 * we find out when we are no longer full and stop returning
1622 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL))
1623 ceph_monc_request_next_osdmap(&osdc->client->monc);
1625 mutex_lock(&osdc->request_mutex);
1626 __send_queued(osdc);
1627 mutex_unlock(&osdc->request_mutex);
1628 up_read(&osdc->map_sem);
1629 wake_up_all(&osdc->client->auth_wq);
1633 pr_err("osdc handle_map corrupt msg\n");
1635 up_write(&osdc->map_sem);
1640 * watch/notify callback event infrastructure
1642 * These callbacks are used both for watch and notify operations.
1644 static void __release_event(struct kref *kref)
1646 struct ceph_osd_event *event =
1647 container_of(kref, struct ceph_osd_event, kref);
1649 dout("__release_event %p\n", event);
1653 static void get_event(struct ceph_osd_event *event)
1655 kref_get(&event->kref);
1658 void ceph_osdc_put_event(struct ceph_osd_event *event)
1660 kref_put(&event->kref, __release_event);
1662 EXPORT_SYMBOL(ceph_osdc_put_event);
1664 static void __insert_event(struct ceph_osd_client *osdc,
1665 struct ceph_osd_event *new)
1667 struct rb_node **p = &osdc->event_tree.rb_node;
1668 struct rb_node *parent = NULL;
1669 struct ceph_osd_event *event = NULL;
1673 event = rb_entry(parent, struct ceph_osd_event, node);
1674 if (new->cookie < event->cookie)
1676 else if (new->cookie > event->cookie)
1677 p = &(*p)->rb_right;
1682 rb_link_node(&new->node, parent, p);
1683 rb_insert_color(&new->node, &osdc->event_tree);
1686 static struct ceph_osd_event *__find_event(struct ceph_osd_client *osdc,
1689 struct rb_node **p = &osdc->event_tree.rb_node;
1690 struct rb_node *parent = NULL;
1691 struct ceph_osd_event *event = NULL;
1695 event = rb_entry(parent, struct ceph_osd_event, node);
1696 if (cookie < event->cookie)
1698 else if (cookie > event->cookie)
1699 p = &(*p)->rb_right;
1706 static void __remove_event(struct ceph_osd_event *event)
1708 struct ceph_osd_client *osdc = event->osdc;
1710 if (!RB_EMPTY_NODE(&event->node)) {
1711 dout("__remove_event removed %p\n", event);
1712 rb_erase(&event->node, &osdc->event_tree);
1713 ceph_osdc_put_event(event);
1715 dout("__remove_event didn't remove %p\n", event);
1719 int ceph_osdc_create_event(struct ceph_osd_client *osdc,
1720 void (*event_cb)(u64, u64, u8, void *),
1721 void *data, struct ceph_osd_event **pevent)
1723 struct ceph_osd_event *event;
1725 event = kmalloc(sizeof(*event), GFP_NOIO);
1729 dout("create_event %p\n", event);
1730 event->cb = event_cb;
1731 event->one_shot = 0;
1734 INIT_LIST_HEAD(&event->osd_node);
1735 RB_CLEAR_NODE(&event->node);
1736 kref_init(&event->kref); /* one ref for us */
1737 kref_get(&event->kref); /* one ref for the caller */
1739 spin_lock(&osdc->event_lock);
1740 event->cookie = ++osdc->event_count;
1741 __insert_event(osdc, event);
1742 spin_unlock(&osdc->event_lock);
1747 EXPORT_SYMBOL(ceph_osdc_create_event);
1749 void ceph_osdc_cancel_event(struct ceph_osd_event *event)
1751 struct ceph_osd_client *osdc = event->osdc;
1753 dout("cancel_event %p\n", event);
1754 spin_lock(&osdc->event_lock);
1755 __remove_event(event);
1756 spin_unlock(&osdc->event_lock);
1757 ceph_osdc_put_event(event); /* caller's */
1759 EXPORT_SYMBOL(ceph_osdc_cancel_event);
1762 static void do_event_work(struct work_struct *work)
1764 struct ceph_osd_event_work *event_work =
1765 container_of(work, struct ceph_osd_event_work, work);
1766 struct ceph_osd_event *event = event_work->event;
1767 u64 ver = event_work->ver;
1768 u64 notify_id = event_work->notify_id;
1769 u8 opcode = event_work->opcode;
1771 dout("do_event_work completing %p\n", event);
1772 event->cb(ver, notify_id, opcode, event->data);
1773 dout("do_event_work completed %p\n", event);
1774 ceph_osdc_put_event(event);
1780 * Process osd watch notifications
1782 static void handle_watch_notify(struct ceph_osd_client *osdc,
1783 struct ceph_msg *msg)
1787 u64 cookie, ver, notify_id;
1789 struct ceph_osd_event *event;
1790 struct ceph_osd_event_work *event_work;
1792 p = msg->front.iov_base;
1793 end = p + msg->front.iov_len;
1795 ceph_decode_8_safe(&p, end, proto_ver, bad);
1796 ceph_decode_8_safe(&p, end, opcode, bad);
1797 ceph_decode_64_safe(&p, end, cookie, bad);
1798 ceph_decode_64_safe(&p, end, ver, bad);
1799 ceph_decode_64_safe(&p, end, notify_id, bad);
1801 spin_lock(&osdc->event_lock);
1802 event = __find_event(osdc, cookie);
1804 BUG_ON(event->one_shot);
1807 spin_unlock(&osdc->event_lock);
1808 dout("handle_watch_notify cookie %lld ver %lld event %p\n",
1809 cookie, ver, event);
1811 event_work = kmalloc(sizeof(*event_work), GFP_NOIO);
1813 dout("ERROR: could not allocate event_work\n");
1816 INIT_WORK(&event_work->work, do_event_work);
1817 event_work->event = event;
1818 event_work->ver = ver;
1819 event_work->notify_id = notify_id;
1820 event_work->opcode = opcode;
1821 if (!queue_work(osdc->notify_wq, &event_work->work)) {
1822 dout("WARNING: failed to queue notify event work\n");
1830 ceph_osdc_put_event(event);
1834 pr_err("osdc handle_watch_notify corrupt msg\n");
1838 static void ceph_osdc_msg_data_set(struct ceph_msg *msg,
1839 struct ceph_osd_data *osd_data)
1841 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
1842 BUG_ON(osd_data->length > (u64) SIZE_MAX);
1843 if (osd_data->length)
1844 ceph_msg_data_set_pages(msg, osd_data->pages,
1845 osd_data->length, osd_data->alignment);
1846 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
1847 BUG_ON(!osd_data->pagelist->length);
1848 ceph_msg_data_set_pagelist(msg, osd_data->pagelist);
1850 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) {
1851 ceph_msg_data_set_bio(msg, osd_data->bio, osd_data->bio_length);
1854 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE);
1859 * Register request, send initial attempt.
1861 int ceph_osdc_start_request(struct ceph_osd_client *osdc,
1862 struct ceph_osd_request *req,
1867 /* Set up response incoming data and request outgoing data fields */
1869 ceph_osdc_msg_data_set(req->r_reply, &req->r_data_in);
1870 ceph_osdc_msg_data_set(req->r_request, &req->r_data_out);
1872 down_read(&osdc->map_sem);
1873 mutex_lock(&osdc->request_mutex);
1874 __register_request(osdc, req);
1875 WARN_ON(req->r_sent);
1876 rc = __map_request(osdc, req, 0);
1879 dout("osdc_start_request failed map, "
1880 " will retry %lld\n", req->r_tid);
1885 if (req->r_osd == NULL) {
1886 dout("send_request %p no up osds in pg\n", req);
1887 ceph_monc_request_next_osdmap(&osdc->client->monc);
1889 __send_queued(osdc);
1893 mutex_unlock(&osdc->request_mutex);
1894 up_read(&osdc->map_sem);
1897 EXPORT_SYMBOL(ceph_osdc_start_request);
1900 * wait for a request to complete
1902 int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
1903 struct ceph_osd_request *req)
1907 rc = wait_for_completion_interruptible(&req->r_completion);
1909 mutex_lock(&osdc->request_mutex);
1910 __cancel_request(req);
1911 __unregister_request(osdc, req);
1912 mutex_unlock(&osdc->request_mutex);
1913 complete_request(req);
1914 dout("wait_request tid %llu canceled/timed out\n", req->r_tid);
1918 dout("wait_request tid %llu result %d\n", req->r_tid, req->r_result);
1919 return req->r_result;
1921 EXPORT_SYMBOL(ceph_osdc_wait_request);
1924 * sync - wait for all in-flight requests to flush. avoid starvation.
1926 void ceph_osdc_sync(struct ceph_osd_client *osdc)
1928 struct ceph_osd_request *req;
1929 u64 last_tid, next_tid = 0;
1931 mutex_lock(&osdc->request_mutex);
1932 last_tid = osdc->last_tid;
1934 req = __lookup_request_ge(osdc, next_tid);
1937 if (req->r_tid > last_tid)
1940 next_tid = req->r_tid + 1;
1941 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) == 0)
1944 ceph_osdc_get_request(req);
1945 mutex_unlock(&osdc->request_mutex);
1946 dout("sync waiting on tid %llu (last is %llu)\n",
1947 req->r_tid, last_tid);
1948 wait_for_completion(&req->r_safe_completion);
1949 mutex_lock(&osdc->request_mutex);
1950 ceph_osdc_put_request(req);
1952 mutex_unlock(&osdc->request_mutex);
1953 dout("sync done (thru tid %llu)\n", last_tid);
1955 EXPORT_SYMBOL(ceph_osdc_sync);
1960 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
1965 osdc->client = client;
1966 osdc->osdmap = NULL;
1967 init_rwsem(&osdc->map_sem);
1968 init_completion(&osdc->map_waiters);
1969 osdc->last_requested_map = 0;
1970 mutex_init(&osdc->request_mutex);
1972 osdc->osds = RB_ROOT;
1973 INIT_LIST_HEAD(&osdc->osd_lru);
1974 osdc->requests = RB_ROOT;
1975 INIT_LIST_HEAD(&osdc->req_lru);
1976 INIT_LIST_HEAD(&osdc->req_unsent);
1977 INIT_LIST_HEAD(&osdc->req_notarget);
1978 INIT_LIST_HEAD(&osdc->req_linger);
1979 osdc->num_requests = 0;
1980 INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
1981 INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
1982 spin_lock_init(&osdc->event_lock);
1983 osdc->event_tree = RB_ROOT;
1984 osdc->event_count = 0;
1986 schedule_delayed_work(&osdc->osds_timeout_work,
1987 round_jiffies_relative(osdc->client->options->osd_idle_ttl * HZ));
1990 osdc->req_mempool = mempool_create_kmalloc_pool(10,
1991 sizeof(struct ceph_osd_request));
1992 if (!osdc->req_mempool)
1995 err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
1996 OSD_OP_FRONT_LEN, 10, true,
2000 err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
2001 OSD_OPREPLY_FRONT_LEN, 10, true,
2006 osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
2007 if (IS_ERR(osdc->notify_wq)) {
2008 err = PTR_ERR(osdc->notify_wq);
2009 osdc->notify_wq = NULL;
2015 ceph_msgpool_destroy(&osdc->msgpool_op);
2017 mempool_destroy(osdc->req_mempool);
2022 void ceph_osdc_stop(struct ceph_osd_client *osdc)
2024 flush_workqueue(osdc->notify_wq);
2025 destroy_workqueue(osdc->notify_wq);
2026 cancel_delayed_work_sync(&osdc->timeout_work);
2027 cancel_delayed_work_sync(&osdc->osds_timeout_work);
2029 ceph_osdmap_destroy(osdc->osdmap);
2030 osdc->osdmap = NULL;
2032 remove_all_osds(osdc);
2033 mempool_destroy(osdc->req_mempool);
2034 ceph_msgpool_destroy(&osdc->msgpool_op);
2035 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
2039 * Read some contiguous pages. If we cross a stripe boundary, shorten
2040 * *plen. Return number of bytes read, or error.
2042 int ceph_osdc_readpages(struct ceph_osd_client *osdc,
2043 struct ceph_vino vino, struct ceph_file_layout *layout,
2045 u32 truncate_seq, u64 truncate_size,
2046 struct page **pages, int num_pages, int page_align)
2048 struct ceph_osd_request *req;
2049 struct ceph_osd_data *osd_data;
2052 dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
2053 vino.snap, off, *plen);
2054 req = ceph_osdc_new_request(osdc, layout, vino, off, plen,
2055 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
2056 NULL, 0, truncate_seq, truncate_size, NULL,
2059 return PTR_ERR(req);
2061 /* it may be a short read due to an object boundary */
2063 osd_data = &req->r_data_in;
2064 osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
2065 osd_data->pages = pages;
2066 osd_data->length = *plen;
2067 osd_data->alignment = page_align;
2069 dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n",
2070 off, *plen, osd_data->length, page_align);
2072 rc = ceph_osdc_start_request(osdc, req, false);
2074 rc = ceph_osdc_wait_request(osdc, req);
2076 ceph_osdc_put_request(req);
2077 dout("readpages result %d\n", rc);
2080 EXPORT_SYMBOL(ceph_osdc_readpages);
2083 * do a synchronous write on N pages
2085 int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
2086 struct ceph_file_layout *layout,
2087 struct ceph_snap_context *snapc,
2089 u32 truncate_seq, u64 truncate_size,
2090 struct timespec *mtime,
2091 struct page **pages, int num_pages)
2093 struct ceph_osd_request *req;
2094 struct ceph_osd_data *osd_data;
2096 int page_align = off & ~PAGE_MASK;
2098 BUG_ON(vino.snap != CEPH_NOSNAP);
2099 req = ceph_osdc_new_request(osdc, layout, vino, off, &len,
2101 CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
2103 truncate_seq, truncate_size, mtime,
2106 return PTR_ERR(req);
2108 /* it may be a short write due to an object boundary */
2109 osd_data = &req->r_data_out;
2110 osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
2111 osd_data->pages = pages;
2112 osd_data->length = len;
2113 osd_data->alignment = page_align;
2114 dout("writepages %llu~%llu (%llu bytes)\n", off, len, osd_data->length);
2116 rc = ceph_osdc_start_request(osdc, req, true);
2118 rc = ceph_osdc_wait_request(osdc, req);
2120 ceph_osdc_put_request(req);
2123 dout("writepages result %d\n", rc);
2126 EXPORT_SYMBOL(ceph_osdc_writepages);
2129 * handle incoming message
2131 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
2133 struct ceph_osd *osd = con->private;
2134 struct ceph_osd_client *osdc;
2135 int type = le16_to_cpu(msg->hdr.type);
2142 case CEPH_MSG_OSD_MAP:
2143 ceph_osdc_handle_map(osdc, msg);
2145 case CEPH_MSG_OSD_OPREPLY:
2146 handle_reply(osdc, msg, con);
2148 case CEPH_MSG_WATCH_NOTIFY:
2149 handle_watch_notify(osdc, msg);
2153 pr_err("received unknown message type %d %s\n", type,
2154 ceph_msg_type_name(type));
2161 * lookup and return message for incoming reply. set up reply message
2164 static struct ceph_msg *get_reply(struct ceph_connection *con,
2165 struct ceph_msg_header *hdr,
2168 struct ceph_osd *osd = con->private;
2169 struct ceph_osd_client *osdc = osd->o_osdc;
2171 struct ceph_osd_request *req;
2172 int front = le32_to_cpu(hdr->front_len);
2173 int data_len = le32_to_cpu(hdr->data_len);
2176 tid = le64_to_cpu(hdr->tid);
2177 mutex_lock(&osdc->request_mutex);
2178 req = __lookup_request(osdc, tid);
2182 dout("get_reply unknown tid %llu from osd%d\n", tid,
2187 if (req->r_reply->con)
2188 dout("%s revoking msg %p from old con %p\n", __func__,
2189 req->r_reply, req->r_reply->con);
2190 ceph_msg_revoke_incoming(req->r_reply);
2192 if (front > req->r_reply->front.iov_len) {
2193 pr_warning("get_reply front %d > preallocated %d\n",
2194 front, (int)req->r_reply->front.iov_len);
2195 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, GFP_NOFS, false);
2198 ceph_msg_put(req->r_reply);
2201 m = ceph_msg_get(req->r_reply);
2204 struct ceph_osd_data *osd_data = &req->r_data_in;
2206 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
2207 if (osd_data->pages &&
2208 unlikely(osd_data->length < data_len)) {
2210 pr_warning("tid %lld reply has %d bytes "
2211 "we had only %llu bytes ready\n",
2212 tid, data_len, osd_data->length);
2221 dout("get_reply tid %lld %p\n", tid, m);
2224 mutex_unlock(&osdc->request_mutex);
2229 static struct ceph_msg *alloc_msg(struct ceph_connection *con,
2230 struct ceph_msg_header *hdr,
2233 struct ceph_osd *osd = con->private;
2234 int type = le16_to_cpu(hdr->type);
2235 int front = le32_to_cpu(hdr->front_len);
2239 case CEPH_MSG_OSD_MAP:
2240 case CEPH_MSG_WATCH_NOTIFY:
2241 return ceph_msg_new(type, front, GFP_NOFS, false);
2242 case CEPH_MSG_OSD_OPREPLY:
2243 return get_reply(con, hdr, skip);
2245 pr_info("alloc_msg unexpected msg type %d from osd%d\n", type,
2253 * Wrappers to refcount containing ceph_osd struct
2255 static struct ceph_connection *get_osd_con(struct ceph_connection *con)
2257 struct ceph_osd *osd = con->private;
2263 static void put_osd_con(struct ceph_connection *con)
2265 struct ceph_osd *osd = con->private;
2273 * Note: returned pointer is the address of a structure that's
2274 * managed separately. Caller must *not* attempt to free it.
2276 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
2277 int *proto, int force_new)
2279 struct ceph_osd *o = con->private;
2280 struct ceph_osd_client *osdc = o->o_osdc;
2281 struct ceph_auth_client *ac = osdc->client->monc.auth;
2282 struct ceph_auth_handshake *auth = &o->o_auth;
2284 if (force_new && auth->authorizer) {
2285 ceph_auth_destroy_authorizer(ac, auth->authorizer);
2286 auth->authorizer = NULL;
2288 if (!auth->authorizer) {
2289 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
2292 return ERR_PTR(ret);
2294 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
2297 return ERR_PTR(ret);
2299 *proto = ac->protocol;
2305 static int verify_authorizer_reply(struct ceph_connection *con, int len)
2307 struct ceph_osd *o = con->private;
2308 struct ceph_osd_client *osdc = o->o_osdc;
2309 struct ceph_auth_client *ac = osdc->client->monc.auth;
2311 return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer, len);
2314 static int invalidate_authorizer(struct ceph_connection *con)
2316 struct ceph_osd *o = con->private;
2317 struct ceph_osd_client *osdc = o->o_osdc;
2318 struct ceph_auth_client *ac = osdc->client->monc.auth;
2320 ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
2321 return ceph_monc_validate_auth(&osdc->client->monc);
2324 static const struct ceph_connection_operations osd_con_ops = {
2327 .dispatch = dispatch,
2328 .get_authorizer = get_authorizer,
2329 .verify_authorizer_reply = verify_authorizer_reply,
2330 .invalidate_authorizer = invalidate_authorizer,
2331 .alloc_msg = alloc_msg,