4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
37 * This file is part of Lustre, http://www.lustre.org/
38 * Lustre is a trademark of Sun Microsystems, Inc.
40 * Internal interfaces of LOV layer.
42 * Author: Nikita Danilov <nikita.danilov@sun.com>
43 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
46 #ifndef LOV_CL_INTERNAL_H
47 #define LOV_CL_INTERNAL_H
49 # include <linux/libcfs/libcfs.h>
52 #include <cl_object.h>
53 #include "lov_internal.h"
56 * Logical object volume layer. This layer implements data striping (raid0).
58 * At the lov layer top-entity (object, page, lock, io) is connected to one or
59 * more sub-entities: top-object, representing a file is connected to a set of
60 * sub-objects, each representing a stripe, file-level top-lock is connected
61 * to a set of per-stripe sub-locks, top-page is connected to a (single)
62 * sub-page, and a top-level IO is connected to a set of (potentially
63 * concurrent) sub-IO's.
65 * Sub-object, sub-page, and sub-io have well-defined top-object and top-page
66 * respectively, while a single sub-lock can be part of multiple top-locks.
68 * Reference counting models are different for different types of entities:
70 * - top-object keeps a reference to its sub-objects, and destroys them
71 * when it is destroyed.
73 * - top-page keeps a reference to its sub-page, and destroys it when it
76 * - sub-lock keep a reference to its top-locks. Top-lock keeps a
77 * reference (and a hold, see cl_lock_hold()) on its sub-locks when it
78 * actively using them (that is, in cl_lock_state::CLS_QUEUING,
79 * cl_lock_state::CLS_ENQUEUED, cl_lock_state::CLS_HELD states). When
80 * moving into cl_lock_state::CLS_CACHED state, top-lock releases a
81 * hold. From this moment top-lock has only a 'weak' reference to its
82 * sub-locks. This reference is protected by top-lock
83 * cl_lock::cll_guard, and will be automatically cleared by the sub-lock
84 * when the latter is destroyed. When a sub-lock is canceled, a
85 * reference to it is removed from the top-lock array, and top-lock is
86 * moved into CLS_NEW state. It is guaranteed that all sub-locks exist
87 * while their top-lock is in CLS_HELD or CLS_CACHED states.
89 * - IO's are not reference counted.
91 * To implement a connection between top and sub entities, lov layer is split
92 * into two pieces: lov ("upper half"), and lovsub ("bottom half"), both
93 * implementing full set of cl-interfaces. For example, top-object has vvp and
94 * lov layers, and it's sub-object has lovsub and osc layers. lovsub layer is
95 * used to track child-parent relationship.
100 struct lovsub_device;
101 struct lovsub_object;
104 enum lov_device_flags {
105 LOV_DEV_INITIALIZED = 1 << 0
113 * Resources that are used in memory-cleaning path, and whose allocation
114 * cannot fail even when memory is tight. They are preallocated in sufficient
115 * quantities in lov_device::ld_emerg[], and access to them is serialized
116 * lov_device::ld_mutex.
118 struct lov_device_emerg {
120 * Page list used to submit IO when memory is in pressure.
122 struct cl_page_list emrg_page_list;
124 * sub-io's shared by all threads accessing this device when memory is
125 * too low to allocate sub-io's dynamically.
127 struct cl_io emrg_subio;
129 * Environments used by sub-io's in
130 * lov_device_emerg::emrg_subio.
132 struct lu_env *emrg_env;
134 * Refchecks for lov_device_emerg::emrg_env.
143 * XXX Locking of lov-private data is missing.
145 struct cl_device ld_cl;
146 struct lov_obd *ld_lov;
147 /** size of lov_device::ld_target[] array */
149 struct lovsub_device **ld_target;
152 /** Emergency resources used in memory-cleansing paths. */
153 struct lov_device_emerg **ld_emrg;
155 * Serializes access to lov_device::ld_emrg in low-memory
158 struct mutex ld_mutex;
164 enum lov_layout_type {
165 LLT_EMPTY, /** empty file without body (mknod + truncate) */
166 LLT_RAID0, /** striped file */
167 LLT_RELEASED, /** file with no objects (data in HSM) */
172 * lov-specific file state.
174 * lov object has particular layout type, determining how top-object is built
175 * on top of sub-objects. Layout type can change dynamically. When this
176 * happens, lov_object::lo_type_guard semaphore is taken in exclusive mode,
177 * all state pertaining to the old layout type is destroyed, and new state is
178 * constructed. All object methods take said semaphore in the shared mode,
179 * providing serialization against transition between layout types.
181 * To avoid multiple `if' or `switch' statements, selecting behavior for the
182 * current layout type, object methods perform double-dispatch, invoking
183 * function corresponding to the current layout type.
186 struct cl_object lo_cl;
188 * Serializes object operations with transitions between layout types.
190 * This semaphore is taken in shared mode by all object methods, and
191 * is taken in exclusive mode when object type is changed.
193 * \see lov_object::lo_type
195 struct rw_semaphore lo_type_guard;
197 * Type of an object. Protected by lov_object::lo_type_guard.
199 enum lov_layout_type lo_type;
201 * True if layout is invalid. This bit is cleared when layout lock
204 bool lo_layout_invalid;
206 * How many IOs are on going on this object. Layout can be changed
207 * only if there is no active IO.
209 atomic_t lo_active_ios;
211 * Waitq - wait for no one else is using lo_lsm
213 wait_queue_head_t lo_waitq;
215 * Layout metadata. NULL if empty layout.
217 struct lov_stripe_md *lo_lsm;
219 union lov_layout_state {
220 struct lov_layout_raid0 {
223 * When this is true, lov_object::lo_attr contains
224 * valid up to date attributes for a top-level
225 * object. This field is reset to 0 when attributes of
226 * any sub-object change.
230 * Array of sub-objects. Allocated when top-object is
231 * created (lov_init_raid0()).
233 * Top-object is a strict master of its sub-objects:
234 * it is created before them, and outlives its
235 * children (this later is necessary so that basic
236 * functions like cl_object_top() always
237 * work). Top-object keeps a reference on every
240 * When top-object is destroyed (lov_delete_raid0())
241 * it releases its reference to a sub-object and waits
242 * until the latter is finally destroyed.
244 struct lovsub_object **lo_sub;
248 spinlock_t lo_sub_lock;
250 * Cached object attribute, built from sub-object
253 struct cl_attr lo_attr;
255 struct lov_layout_state_empty {
257 struct lov_layout_state_released {
261 * Thread that acquired lov_object::lo_type_guard in an exclusive
264 struct task_struct *lo_owner;
268 * Flags that top-lock can set on each of its sub-locks.
271 /** Top-lock acquired a hold (cl_lock_hold()) on a sub-lock. */
276 * State lov_lock keeps for each sub-lock.
278 struct lov_lock_sub {
279 /** sub-lock itself */
280 struct lovsub_lock *sub_lock;
281 /** An array of per-sub-lock flags, taken from enum lov_sub_flags */
284 struct cl_lock_descr sub_descr;
285 struct cl_lock_descr sub_got;
289 * lov-specific lock state.
292 struct cl_lock_slice lls_cl;
293 /** Number of sub-locks in this lock */
296 * Number of existing sub-locks.
298 unsigned lls_nr_filled;
300 * Set when sub-lock was canceled, while top-lock was being
303 unsigned int lls_cancel_race:1;
305 * An array of sub-locks
307 * There are two issues with managing sub-locks:
309 * - sub-locks are concurrently canceled, and
311 * - sub-locks are shared with other top-locks.
313 * To manage cancellation, top-lock acquires a hold on a sublock
314 * (lov_sublock_adopt()) when the latter is inserted into
315 * lov_lock::lls_sub[]. This hold is released (lov_sublock_release())
316 * when top-lock is going into CLS_CACHED state or destroyed. Hold
317 * prevents sub-lock from cancellation.
319 * Sub-lock sharing means, among other things, that top-lock that is
320 * in the process of creation (i.e., not yet inserted into lock list)
321 * is already accessible to other threads once at least one of its
322 * sub-locks is created, see lov_lock_sub_init().
324 * Sub-lock can be in one of the following states:
326 * - doesn't exist, lov_lock::lls_sub[]::sub_lock == NULL. Such
327 * sub-lock was either never created (top-lock is in CLS_NEW
328 * state), or it was created, then canceled, then destroyed
329 * (lov_lock_unlink() cleared sub-lock pointer in the top-lock).
331 * - sub-lock exists and is on
332 * hold. (lov_lock::lls_sub[]::sub_flags & LSF_HELD). This is a
333 * normal state of a sub-lock in CLS_HELD and CLS_CACHED states
336 * - sub-lock exists, but is not held by the top-lock. This
337 * happens after top-lock released a hold on sub-locks before
338 * going into cache (lov_lock_unuse()).
340 * \todo To support wide-striping, array has to be replaced with a set
341 * of queues to avoid scanning.
343 struct lov_lock_sub *lls_sub;
345 * Original description with which lock was enqueued.
347 struct cl_lock_descr lls_orig;
351 struct cl_page_slice lps_cl;
359 struct lovsub_device {
360 struct cl_device acid_cl;
361 struct lov_device *acid_super;
363 struct cl_device *acid_next;
366 struct lovsub_object {
367 struct cl_object_header lso_header;
368 struct cl_object lso_cl;
369 struct lov_object *lso_super;
374 * A link between a top-lock and a sub-lock. Separate data-structure is
375 * necessary, because top-locks and sub-locks are in M:N relationship.
377 * \todo This can be optimized for a (by far) most frequent case of a single
378 * top-lock per sub-lock.
380 struct lov_lock_link {
381 struct lov_lock *lll_super;
382 /** An index within parent lock. */
385 * A linkage into per sub-lock list of all corresponding top-locks,
386 * hanging off lovsub_lock::lss_parents.
388 struct list_head lll_list;
392 * Lock state at lovsub layer.
395 struct cl_lock_slice lss_cl;
397 * List of top-locks that have given sub-lock as their part. Protected
398 * by cl_lock::cll_guard mutex.
400 struct list_head lss_parents;
402 * Top-lock that initiated current operation on this sub-lock. This is
403 * only set during top-to-bottom lock operations like enqueue, and is
404 * used to optimize state change notification. Protected by
405 * cl_lock::cll_guard mutex.
407 * \see lovsub_lock_state_one().
409 struct cl_lock *lss_active;
413 * Describe the environment settings for sublocks.
415 struct lov_sublock_env {
416 const struct lu_env *lse_env;
417 struct cl_io *lse_io;
418 struct lov_io_sub *lse_sub;
422 struct cl_page_slice lsb_cl;
426 struct lov_thread_info {
427 struct cl_object_conf lti_stripe_conf;
428 struct lu_fid lti_fid;
429 struct cl_lock_descr lti_ldescr;
430 struct ost_lvb lti_lvb;
431 struct cl_2queue lti_cl2q;
432 struct cl_lock_closure lti_closure;
433 wait_queue_t lti_waiter;
437 * State that lov_io maintains for every sub-io.
442 * sub-io for a stripe. Ideally sub-io's can be stopped and resumed
443 * independently, with lov acting as a scheduler to maximize overall
446 struct cl_io *sub_io;
448 * Linkage into a list (hanging off lov_io::lis_active) of all
449 * sub-io's active for the current IO iteration.
451 struct list_head sub_linkage;
453 * true, iff cl_io_init() was successfully executed against
454 * lov_io_sub::sub_io.
456 int sub_io_initialized;
458 * True, iff lov_io_sub::sub_io and lov_io_sub::sub_env weren't
459 * allocated, but borrowed from a per-device emergency pool.
463 * environment, in which sub-io executes.
465 struct lu_env *sub_env;
467 * environment's refcheck.
478 * IO state private for LOV.
482 struct cl_io_slice lis_cl;
484 * Pointer to the object slice. This is a duplicate of
485 * lov_io::lis_cl::cis_object.
487 struct lov_object *lis_object;
489 * Original end-of-io position for this IO, set by the upper layer as
490 * cl_io::u::ci_rw::pos + cl_io::u::ci_rw::count. lov remembers this,
491 * changes pos and count to fit IO into a single stripe and uses saved
492 * value to determine when IO iterations have to stop.
494 * This is used only for CIT_READ and CIT_WRITE io's.
496 loff_t lis_io_endpos;
499 * starting position within a file, for the current io loop iteration
500 * (stripe), used by ci_io_loop().
504 * end position with in a file, for the current stripe io. This is
505 * exclusive (i.e., next offset after last byte affected by io).
510 int lis_stripe_count;
511 int lis_active_subios;
514 * the index of ls_single_subio in ls_subios array
516 int lis_single_subio_index;
517 struct cl_io lis_single_subio;
520 * size of ls_subios array, actually the highest stripe #
523 struct lov_io_sub *lis_subs;
525 * List of active sub-io's.
527 struct list_head lis_active;
532 struct lov_sublock_env ls_subenv;
536 * State of transfer for lov.
539 struct cl_req_slice lr_cl;
543 * State of transfer for lovsub.
546 struct cl_req_slice lsrq_cl;
549 extern struct lu_device_type lov_device_type;
550 extern struct lu_device_type lovsub_device_type;
552 extern struct lu_context_key lov_key;
553 extern struct lu_context_key lov_session_key;
555 extern struct kmem_cache *lov_lock_kmem;
556 extern struct kmem_cache *lov_object_kmem;
557 extern struct kmem_cache *lov_thread_kmem;
558 extern struct kmem_cache *lov_session_kmem;
559 extern struct kmem_cache *lov_req_kmem;
561 extern struct kmem_cache *lovsub_lock_kmem;
562 extern struct kmem_cache *lovsub_object_kmem;
563 extern struct kmem_cache *lovsub_req_kmem;
565 extern struct kmem_cache *lov_lock_link_kmem;
567 int lov_object_init(const struct lu_env *env, struct lu_object *obj,
568 const struct lu_object_conf *conf);
569 int lovsub_object_init(const struct lu_env *env, struct lu_object *obj,
570 const struct lu_object_conf *conf);
571 int lov_lock_init(const struct lu_env *env, struct cl_object *obj,
572 struct cl_lock *lock, const struct cl_io *io);
573 int lov_io_init(const struct lu_env *env, struct cl_object *obj,
575 int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj,
576 struct cl_lock *lock, const struct cl_io *io);
578 int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
579 struct cl_lock *lock, const struct cl_io *io);
580 int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj,
581 struct cl_lock *lock, const struct cl_io *io);
582 int lov_io_init_raid0(const struct lu_env *env, struct cl_object *obj,
584 int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj,
586 int lov_io_init_released(const struct lu_env *env, struct cl_object *obj,
588 void lov_lock_unlink(const struct lu_env *env, struct lov_lock_link *link,
589 struct lovsub_lock *sub);
591 struct lov_io_sub *lov_sub_get(const struct lu_env *env, struct lov_io *lio,
593 void lov_sub_put(struct lov_io_sub *sub);
594 int lov_sublock_modify(const struct lu_env *env, struct lov_lock *lov,
595 struct lovsub_lock *sublock,
596 const struct cl_lock_descr *d, int idx);
599 int lov_page_init(const struct lu_env *env, struct cl_object *ob,
600 struct cl_page *page, struct page *vmpage);
601 int lovsub_page_init(const struct lu_env *env, struct cl_object *ob,
602 struct cl_page *page, struct page *vmpage);
604 int lov_page_init_empty(const struct lu_env *env,
605 struct cl_object *obj,
606 struct cl_page *page, struct page *vmpage);
607 int lov_page_init_raid0(const struct lu_env *env,
608 struct cl_object *obj,
609 struct cl_page *page, struct page *vmpage);
610 struct lu_object *lov_object_alloc(const struct lu_env *env,
611 const struct lu_object_header *hdr,
612 struct lu_device *dev);
613 struct lu_object *lovsub_object_alloc(const struct lu_env *env,
614 const struct lu_object_header *hdr,
615 struct lu_device *dev);
617 struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
618 struct lov_lock *lck,
619 struct lovsub_lock *sub);
620 struct lov_io_sub *lov_page_subio(const struct lu_env *env,
622 const struct cl_page_slice *slice);
624 void lov_lsm_decref(struct lov_object *lov, struct lov_stripe_md *lsm);
625 struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov);
627 #define lov_foreach_target(lov, var) \
628 for (var = 0; var < lov_targets_nr(lov); ++var)
630 /*****************************************************************************
638 static inline struct lov_session *lov_env_session(const struct lu_env *env)
640 struct lov_session *ses;
642 ses = lu_context_key_get(env->le_ses, &lov_session_key);
643 LASSERT(ses != NULL);
647 static inline struct lov_io *lov_env_io(const struct lu_env *env)
649 return &lov_env_session(env)->ls_io;
652 static inline int lov_is_object(const struct lu_object *obj)
654 return obj->lo_dev->ld_type == &lov_device_type;
657 static inline int lovsub_is_object(const struct lu_object *obj)
659 return obj->lo_dev->ld_type == &lovsub_device_type;
662 static inline struct lu_device *lov2lu_dev(struct lov_device *lov)
664 return &lov->ld_cl.cd_lu_dev;
667 static inline struct lov_device *lu2lov_dev(const struct lu_device *d)
669 LINVRNT(d->ld_type == &lov_device_type);
670 return container_of0(d, struct lov_device, ld_cl.cd_lu_dev);
673 static inline struct cl_device *lovsub2cl_dev(struct lovsub_device *lovsub)
675 return &lovsub->acid_cl;
678 static inline struct lu_device *lovsub2lu_dev(struct lovsub_device *lovsub)
680 return &lovsub2cl_dev(lovsub)->cd_lu_dev;
683 static inline struct lovsub_device *lu2lovsub_dev(const struct lu_device *d)
685 LINVRNT(d->ld_type == &lovsub_device_type);
686 return container_of0(d, struct lovsub_device, acid_cl.cd_lu_dev);
689 static inline struct lovsub_device *cl2lovsub_dev(const struct cl_device *d)
691 LINVRNT(d->cd_lu_dev.ld_type == &lovsub_device_type);
692 return container_of0(d, struct lovsub_device, acid_cl);
695 static inline struct lu_object *lov2lu(struct lov_object *lov)
697 return &lov->lo_cl.co_lu;
700 static inline struct cl_object *lov2cl(struct lov_object *lov)
705 static inline struct lov_object *lu2lov(const struct lu_object *obj)
707 LINVRNT(lov_is_object(obj));
708 return container_of0(obj, struct lov_object, lo_cl.co_lu);
711 static inline struct lov_object *cl2lov(const struct cl_object *obj)
713 LINVRNT(lov_is_object(&obj->co_lu));
714 return container_of0(obj, struct lov_object, lo_cl);
717 static inline struct lu_object *lovsub2lu(struct lovsub_object *los)
719 return &los->lso_cl.co_lu;
722 static inline struct cl_object *lovsub2cl(struct lovsub_object *los)
727 static inline struct lovsub_object *cl2lovsub(const struct cl_object *obj)
729 LINVRNT(lovsub_is_object(&obj->co_lu));
730 return container_of0(obj, struct lovsub_object, lso_cl);
733 static inline struct lovsub_object *lu2lovsub(const struct lu_object *obj)
735 LINVRNT(lovsub_is_object(obj));
736 return container_of0(obj, struct lovsub_object, lso_cl.co_lu);
739 static inline struct lovsub_lock *
740 cl2lovsub_lock(const struct cl_lock_slice *slice)
742 LINVRNT(lovsub_is_object(&slice->cls_obj->co_lu));
743 return container_of(slice, struct lovsub_lock, lss_cl);
746 static inline struct lovsub_lock *cl2sub_lock(const struct cl_lock *lock)
748 const struct cl_lock_slice *slice;
750 slice = cl_lock_at(lock, &lovsub_device_type);
751 LASSERT(slice != NULL);
752 return cl2lovsub_lock(slice);
755 static inline struct lov_lock *cl2lov_lock(const struct cl_lock_slice *slice)
757 LINVRNT(lov_is_object(&slice->cls_obj->co_lu));
758 return container_of(slice, struct lov_lock, lls_cl);
761 static inline struct lov_page *cl2lov_page(const struct cl_page_slice *slice)
763 LINVRNT(lov_is_object(&slice->cpl_obj->co_lu));
764 return container_of0(slice, struct lov_page, lps_cl);
767 static inline struct lov_req *cl2lov_req(const struct cl_req_slice *slice)
769 return container_of0(slice, struct lov_req, lr_cl);
772 static inline struct lovsub_page *
773 cl2lovsub_page(const struct cl_page_slice *slice)
775 LINVRNT(lovsub_is_object(&slice->cpl_obj->co_lu));
776 return container_of0(slice, struct lovsub_page, lsb_cl);
779 static inline struct lovsub_req *cl2lovsub_req(const struct cl_req_slice *slice)
781 return container_of0(slice, struct lovsub_req, lsrq_cl);
784 static inline struct cl_page *lov_sub_page(const struct cl_page_slice *slice)
786 return slice->cpl_page->cp_child;
789 static inline struct lov_io *cl2lov_io(const struct lu_env *env,
790 const struct cl_io_slice *ios)
794 lio = container_of(ios, struct lov_io, lis_cl);
795 LASSERT(lio == lov_env_io(env));
799 static inline int lov_targets_nr(const struct lov_device *lov)
801 return lov->ld_lov->desc.ld_tgt_count;
804 static inline struct lov_thread_info *lov_env_info(const struct lu_env *env)
806 struct lov_thread_info *info;
808 info = lu_context_key_get(&env->le_ctx, &lov_key);
809 LASSERT(info != NULL);
813 static inline struct lov_layout_raid0 *lov_r0(struct lov_object *lov)
815 LASSERT(lov->lo_type == LLT_RAID0);
816 LASSERT(lov->lo_lsm->lsm_wire.lw_magic == LOV_MAGIC ||
817 lov->lo_lsm->lsm_wire.lw_magic == LOV_MAGIC_V3);
818 return &lov->u.raid0;