2 * In-kernel transcendent memory (generic implementation)
4 * Copyright (c) 2009-2011, Dan Magenheimer, Oracle Corp.
6 * The primary purpose of Transcedent Memory ("tmem") is to map object-oriented
7 * "handles" (triples containing a pool id, and object id, and an index), to
8 * pages in a page-accessible memory (PAM). Tmem references the PAM pages via
9 * an abstract "pampd" (PAM page-descriptor), which can be operated on by a
10 * set of functions (pamops). Each pampd contains some representation of
11 * PAGE_SIZE bytes worth of data. Tmem must support potentially millions of
12 * pages and must be able to insert, find, and delete these pages at a
13 * potential frequency of thousands per second concurrently across many CPUs,
14 * (and, if used with KVM, across many vcpus across many guests).
15 * Tmem is tracked with a hierarchy of data structures, organized by
16 * the elements in a handle-tuple: pool_id, object_id, and page index.
17 * One or more "clients" (e.g. guests) each provide one or more tmem_pools.
18 * Each pool, contains a hash table of rb_trees of tmem_objs. Each
19 * tmem_obj contains a radix-tree-like tree of pointers, with intermediate
20 * nodes called tmem_objnodes. Each leaf pointer in this tree points to
21 * a pampd, which is accessible only through a small set of callbacks
22 * registered by the PAM implementation (see tmem_register_pamops). Tmem
23 * does all memory allocation via a set of callbacks registered by the tmem
24 * host implementation (e.g. see tmem_register_hostops).
27 #include <linux/list.h>
28 #include <linux/spinlock.h>
29 #include <linux/atomic.h>
33 /* data structure sentinels used for debugging... see tmem.h */
34 #define POOL_SENTINEL 0x87658765
35 #define OBJ_SENTINEL 0x12345678
36 #define OBJNODE_SENTINEL 0xfedcba09
39 * A tmem host implementation must use this function to register callbacks
40 * for memory allocation.
42 static struct tmem_hostops tmem_hostops;
44 static void tmem_objnode_tree_init(void);
46 void tmem_register_hostops(struct tmem_hostops *m)
48 tmem_objnode_tree_init();
53 * A tmem host implementation must use this function to register
54 * callbacks for a page-accessible memory (PAM) implementation
56 static struct tmem_pamops tmem_pamops;
58 void tmem_register_pamops(struct tmem_pamops *m)
64 * Oid's are potentially very sparse and tmem_objs may have an indeterminately
65 * short life, being added and deleted at a relatively high frequency.
66 * So an rb_tree is an ideal data structure to manage tmem_objs. But because
67 * of the potentially huge number of tmem_objs, each pool manages a hashtable
68 * of rb_trees to reduce search, insert, delete, and rebalancing time.
69 * Each hashbucket also has a lock to manage concurrent access.
71 * The following routines manage tmem_objs. When any tmem_obj is accessed,
72 * the hashbucket lock must be held.
75 /* searches for object==oid in pool, returns locked object if found */
76 static struct tmem_obj *tmem_obj_find(struct tmem_hashbucket *hb,
77 struct tmem_oid *oidp)
79 struct rb_node *rbnode;
82 rbnode = hb->obj_rb_root.rb_node;
84 BUG_ON(RB_EMPTY_NODE(rbnode));
85 obj = rb_entry(rbnode, struct tmem_obj, rb_tree_node);
86 switch (tmem_oid_compare(oidp, &obj->oid)) {
90 rbnode = rbnode->rb_left;
93 rbnode = rbnode->rb_right;
102 static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *);
104 /* free an object that has no more pampds in it */
105 static void tmem_obj_free(struct tmem_obj *obj, struct tmem_hashbucket *hb)
107 struct tmem_pool *pool;
110 ASSERT_SENTINEL(obj, OBJ);
111 BUG_ON(obj->pampd_count > 0);
113 BUG_ON(pool == NULL);
114 if (obj->objnode_tree_root != NULL) /* may be "stump" with no leaves */
115 tmem_pampd_destroy_all_in_obj(obj);
116 BUG_ON(obj->objnode_tree_root != NULL);
117 BUG_ON((long)obj->objnode_count != 0);
118 atomic_dec(&pool->obj_count);
119 BUG_ON(atomic_read(&pool->obj_count) < 0);
120 INVERT_SENTINEL(obj, OBJ);
122 tmem_oid_set_invalid(&obj->oid);
123 rb_erase(&obj->rb_tree_node, &hb->obj_rb_root);
127 * initialize, and insert an tmem_object_root (called only if find failed)
129 static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
130 struct tmem_pool *pool,
131 struct tmem_oid *oidp)
133 struct rb_root *root = &hb->obj_rb_root;
134 struct rb_node **new = &(root->rb_node), *parent = NULL;
135 struct tmem_obj *this;
137 BUG_ON(pool == NULL);
138 atomic_inc(&pool->obj_count);
139 obj->objnode_tree_height = 0;
140 obj->objnode_tree_root = NULL;
143 obj->objnode_count = 0;
144 obj->pampd_count = 0;
145 SET_SENTINEL(obj, OBJ);
147 BUG_ON(RB_EMPTY_NODE(*new));
148 this = rb_entry(*new, struct tmem_obj, rb_tree_node);
150 switch (tmem_oid_compare(oidp, &this->oid)) {
152 BUG(); /* already present; should never happen! */
155 new = &(*new)->rb_left;
158 new = &(*new)->rb_right;
162 rb_link_node(&obj->rb_tree_node, parent, new);
163 rb_insert_color(&obj->rb_tree_node, root);
167 * Tmem is managed as a set of tmem_pools with certain attributes, such as
168 * "ephemeral" vs "persistent". These attributes apply to all tmem_objs
169 * and all pampds that belong to a tmem_pool. A tmem_pool is created
170 * or deleted relatively rarely (for example, when a filesystem is
171 * mounted or unmounted.
174 /* flush all data from a pool and, optionally, free it */
175 static void tmem_pool_flush(struct tmem_pool *pool, bool destroy)
177 struct rb_node *rbnode;
178 struct tmem_obj *obj;
179 struct tmem_hashbucket *hb = &pool->hashbucket[0];
182 BUG_ON(pool == NULL);
183 for (i = 0; i < TMEM_HASH_BUCKETS; i++, hb++) {
184 spin_lock(&hb->lock);
185 rbnode = rb_first(&hb->obj_rb_root);
186 while (rbnode != NULL) {
187 obj = rb_entry(rbnode, struct tmem_obj, rb_tree_node);
188 rbnode = rb_next(rbnode);
189 tmem_pampd_destroy_all_in_obj(obj);
190 tmem_obj_free(obj, hb);
191 (*tmem_hostops.obj_free)(obj, pool);
193 spin_unlock(&hb->lock);
196 list_del(&pool->pool_list);
200 * A tmem_obj contains a radix-tree-like tree in which the intermediate
201 * nodes are called tmem_objnodes. (The kernel lib/radix-tree.c implementation
202 * is very specialized and tuned for specific uses and is not particularly
203 * suited for use from this code, though some code from the core algorithms has
204 * been reused, thus the copyright notices below). Each tmem_objnode contains
205 * a set of pointers which point to either a set of intermediate tmem_objnodes
206 * or a set of of pampds.
208 * Portions Copyright (C) 2001 Momchil Velikov
209 * Portions Copyright (C) 2001 Christoph Hellwig
210 * Portions Copyright (C) 2005 SGI, Christoph Lameter <clameter@sgi.com>
213 struct tmem_objnode_tree_path {
214 struct tmem_objnode *objnode;
218 /* objnode height_to_maxindex translation */
219 static unsigned long tmem_objnode_tree_h2max[OBJNODE_TREE_MAX_PATH + 1];
221 static void tmem_objnode_tree_init(void)
223 unsigned int ht, tmp;
225 for (ht = 0; ht < ARRAY_SIZE(tmem_objnode_tree_h2max); ht++) {
226 tmp = ht * OBJNODE_TREE_MAP_SHIFT;
227 if (tmp >= OBJNODE_TREE_INDEX_BITS)
228 tmem_objnode_tree_h2max[ht] = ~0UL;
230 tmem_objnode_tree_h2max[ht] =
231 (~0UL >> (OBJNODE_TREE_INDEX_BITS - tmp - 1)) >> 1;
235 static struct tmem_objnode *tmem_objnode_alloc(struct tmem_obj *obj)
237 struct tmem_objnode *objnode;
239 ASSERT_SENTINEL(obj, OBJ);
240 BUG_ON(obj->pool == NULL);
241 ASSERT_SENTINEL(obj->pool, POOL);
242 objnode = (*tmem_hostops.objnode_alloc)(obj->pool);
243 if (unlikely(objnode == NULL))
246 SET_SENTINEL(objnode, OBJNODE);
247 memset(&objnode->slots, 0, sizeof(objnode->slots));
248 objnode->slots_in_use = 0;
249 obj->objnode_count++;
254 static void tmem_objnode_free(struct tmem_objnode *objnode)
256 struct tmem_pool *pool;
259 BUG_ON(objnode == NULL);
260 for (i = 0; i < OBJNODE_TREE_MAP_SIZE; i++)
261 BUG_ON(objnode->slots[i] != NULL);
262 ASSERT_SENTINEL(objnode, OBJNODE);
263 INVERT_SENTINEL(objnode, OBJNODE);
264 BUG_ON(objnode->obj == NULL);
265 ASSERT_SENTINEL(objnode->obj, OBJ);
266 pool = objnode->obj->pool;
267 BUG_ON(pool == NULL);
268 ASSERT_SENTINEL(pool, POOL);
269 objnode->obj->objnode_count--;
271 (*tmem_hostops.objnode_free)(objnode, pool);
275 * lookup index in object and return associated pampd (or NULL if not found)
277 static void *tmem_pampd_lookup_in_obj(struct tmem_obj *obj, uint32_t index)
279 unsigned int height, shift;
280 struct tmem_objnode **slot = NULL;
283 ASSERT_SENTINEL(obj, OBJ);
284 BUG_ON(obj->pool == NULL);
285 ASSERT_SENTINEL(obj->pool, POOL);
287 height = obj->objnode_tree_height;
288 if (index > tmem_objnode_tree_h2max[obj->objnode_tree_height])
290 if (height == 0 && obj->objnode_tree_root) {
291 slot = &obj->objnode_tree_root;
294 shift = (height-1) * OBJNODE_TREE_MAP_SHIFT;
295 slot = &obj->objnode_tree_root;
299 slot = (struct tmem_objnode **)
301 ((index >> shift) & OBJNODE_TREE_MAP_MASK));
302 shift -= OBJNODE_TREE_MAP_SHIFT;
306 return slot != NULL ? *slot : NULL;
309 static int tmem_pampd_add_to_obj(struct tmem_obj *obj, uint32_t index,
313 struct tmem_objnode *objnode = NULL, *newnode, *slot;
314 unsigned int height, shift;
317 /* if necessary, extend the tree to be higher */
318 if (index > tmem_objnode_tree_h2max[obj->objnode_tree_height]) {
319 height = obj->objnode_tree_height + 1;
320 if (index > tmem_objnode_tree_h2max[height])
321 while (index > tmem_objnode_tree_h2max[height])
323 if (obj->objnode_tree_root == NULL) {
324 obj->objnode_tree_height = height;
328 newnode = tmem_objnode_alloc(obj);
333 newnode->slots[0] = obj->objnode_tree_root;
334 newnode->slots_in_use = 1;
335 obj->objnode_tree_root = newnode;
336 obj->objnode_tree_height++;
337 } while (height > obj->objnode_tree_height);
340 slot = obj->objnode_tree_root;
341 height = obj->objnode_tree_height;
342 shift = (height-1) * OBJNODE_TREE_MAP_SHIFT;
345 /* add a child objnode. */
346 slot = tmem_objnode_alloc(obj);
353 objnode->slots[offset] = slot;
354 objnode->slots_in_use++;
356 obj->objnode_tree_root = slot;
358 /* go down a level */
359 offset = (index >> shift) & OBJNODE_TREE_MAP_MASK;
361 slot = objnode->slots[offset];
362 shift -= OBJNODE_TREE_MAP_SHIFT;
365 BUG_ON(slot != NULL);
367 objnode->slots_in_use++;
368 objnode->slots[offset] = pampd;
370 obj->objnode_tree_root = pampd;
376 static void *tmem_pampd_delete_from_obj(struct tmem_obj *obj, uint32_t index)
378 struct tmem_objnode_tree_path path[OBJNODE_TREE_MAX_PATH + 1];
379 struct tmem_objnode_tree_path *pathp = path;
380 struct tmem_objnode *slot = NULL;
381 unsigned int height, shift;
385 ASSERT_SENTINEL(obj, OBJ);
386 BUG_ON(obj->pool == NULL);
387 ASSERT_SENTINEL(obj->pool, POOL);
388 height = obj->objnode_tree_height;
389 if (index > tmem_objnode_tree_h2max[height])
391 slot = obj->objnode_tree_root;
392 if (height == 0 && obj->objnode_tree_root) {
393 obj->objnode_tree_root = NULL;
396 shift = (height - 1) * OBJNODE_TREE_MAP_SHIFT;
397 pathp->objnode = NULL;
402 offset = (index >> shift) & OBJNODE_TREE_MAP_MASK;
403 pathp->offset = offset;
404 pathp->objnode = slot;
405 slot = slot->slots[offset];
406 shift -= OBJNODE_TREE_MAP_SHIFT;
408 } while (height > 0);
411 while (pathp->objnode) {
412 pathp->objnode->slots[pathp->offset] = NULL;
413 pathp->objnode->slots_in_use--;
414 if (pathp->objnode->slots_in_use) {
415 if (pathp->objnode == obj->objnode_tree_root) {
416 while (obj->objnode_tree_height > 0 &&
417 obj->objnode_tree_root->slots_in_use == 1 &&
418 obj->objnode_tree_root->slots[0]) {
419 struct tmem_objnode *to_free =
420 obj->objnode_tree_root;
422 obj->objnode_tree_root =
424 obj->objnode_tree_height--;
425 to_free->slots[0] = NULL;
426 to_free->slots_in_use = 0;
427 tmem_objnode_free(to_free);
432 tmem_objnode_free(pathp->objnode); /* 0 slots used, free it */
435 obj->objnode_tree_height = 0;
436 obj->objnode_tree_root = NULL;
441 BUG_ON(obj->pampd_count < 0);
445 /* recursively walk the objnode_tree destroying pampds and objnodes */
446 static void tmem_objnode_node_destroy(struct tmem_obj *obj,
447 struct tmem_objnode *objnode,
454 for (i = 0; i < OBJNODE_TREE_MAP_SIZE; i++) {
455 if (objnode->slots[i]) {
458 (*tmem_pamops.free)(objnode->slots[i],
460 objnode->slots[i] = NULL;
463 tmem_objnode_node_destroy(obj, objnode->slots[i], ht-1);
464 tmem_objnode_free(objnode->slots[i]);
465 objnode->slots[i] = NULL;
470 static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj)
472 if (obj->objnode_tree_root == NULL)
474 if (obj->objnode_tree_height == 0) {
476 (*tmem_pamops.free)(obj->objnode_tree_root, obj->pool);
478 tmem_objnode_node_destroy(obj, obj->objnode_tree_root,
479 obj->objnode_tree_height);
480 tmem_objnode_free(obj->objnode_tree_root);
481 obj->objnode_tree_height = 0;
483 obj->objnode_tree_root = NULL;
487 * Tmem is operated on by a set of well-defined actions:
488 * "put", "get", "flush", "flush_object", "new pool" and "destroy pool".
489 * (The tmem ABI allows for subpages and exchanges but these operations
490 * are not included in this implementation.)
492 * These "tmem core" operations are implemented in the following functions.
496 * "Put" a page, e.g. copy a page from the kernel into newly allocated
497 * PAM space (if such space is available). Tmem_put is complicated by
498 * a corner case: What if a page with matching handle already exists in
499 * tmem? To guarantee coherency, one of two actions is necessary: Either
500 * the data for the page must be overwritten, or the page must be
501 * "flushed" so that the data is not accessible to a subsequent "get".
502 * Since these "duplicate puts" are relatively rare, this implementation
503 * always flushes for simplicity.
505 int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
508 struct tmem_obj *obj = NULL, *objfound = NULL, *objnew = NULL;
509 void *pampd = NULL, *pampd_del = NULL;
512 struct tmem_hashbucket *hb;
514 ephemeral = is_ephemeral(pool);
515 hb = &pool->hashbucket[tmem_oid_hash(oidp)];
516 spin_lock(&hb->lock);
517 obj = objfound = tmem_obj_find(hb, oidp);
519 pampd = tmem_pampd_lookup_in_obj(objfound, index);
521 /* if found, is a dup put, flush the old one */
522 pampd_del = tmem_pampd_delete_from_obj(obj, index);
523 BUG_ON(pampd_del != pampd);
524 (*tmem_pamops.free)(pampd, pool);
525 if (obj->pampd_count == 0) {
532 obj = objnew = (*tmem_hostops.obj_alloc)(pool);
533 if (unlikely(obj == NULL)) {
537 tmem_obj_init(obj, hb, pool, oidp);
540 BUG_ON(((objnew != obj) && (objfound != obj)) || (objnew == objfound));
541 pampd = (*tmem_pamops.create)(obj->pool, &obj->oid, index, page);
542 if (unlikely(pampd == NULL))
544 ret = tmem_pampd_add_to_obj(obj, index, pampd);
545 if (unlikely(ret == -ENOMEM))
546 /* may have partially built objnode tree ("stump") */
547 goto delete_and_free;
551 (void)tmem_pampd_delete_from_obj(obj, index);
554 (*tmem_pamops.free)(pampd, pool);
556 tmem_obj_free(objnew, hb);
557 (*tmem_hostops.obj_free)(objnew, pool);
560 spin_unlock(&hb->lock);
565 * "Get" a page, e.g. if one can be found, copy the tmem page with the
566 * matching handle from PAM space to the kernel. By tmem definition,
567 * when a "get" is successful on an ephemeral page, the page is "flushed",
568 * and when a "get" is successful on a persistent page, the page is retained
569 * in tmem. Note that to preserve
570 * coherency, "get" can never be skipped if tmem contains the data.
571 * That is, if a get is done with a certain handle and fails, any
572 * subsequent "get" must also fail (unless of course there is a
573 * "put" done with the same handle).
576 int tmem_get(struct tmem_pool *pool, struct tmem_oid *oidp,
577 uint32_t index, struct page *page)
579 struct tmem_obj *obj;
581 bool ephemeral = is_ephemeral(pool);
583 struct tmem_hashbucket *hb;
585 hb = &pool->hashbucket[tmem_oid_hash(oidp)];
586 spin_lock(&hb->lock);
587 obj = tmem_obj_find(hb, oidp);
590 ephemeral = is_ephemeral(pool);
592 pampd = tmem_pampd_delete_from_obj(obj, index);
594 pampd = tmem_pampd_lookup_in_obj(obj, index);
597 ret = (*tmem_pamops.get_data)(page, pampd, pool);
601 (*tmem_pamops.free)(pampd, pool);
602 if (obj->pampd_count == 0) {
603 tmem_obj_free(obj, hb);
604 (*tmem_hostops.obj_free)(obj, pool);
610 spin_unlock(&hb->lock);
615 * If a page in tmem matches the handle, "flush" this page from tmem such
616 * that any subsequent "get" does not succeed (unless, of course, there
617 * was another "put" with the same handle).
619 int tmem_flush_page(struct tmem_pool *pool,
620 struct tmem_oid *oidp, uint32_t index)
622 struct tmem_obj *obj;
625 struct tmem_hashbucket *hb;
627 hb = &pool->hashbucket[tmem_oid_hash(oidp)];
628 spin_lock(&hb->lock);
629 obj = tmem_obj_find(hb, oidp);
632 pampd = tmem_pampd_delete_from_obj(obj, index);
635 (*tmem_pamops.free)(pampd, pool);
636 if (obj->pampd_count == 0) {
637 tmem_obj_free(obj, hb);
638 (*tmem_hostops.obj_free)(obj, pool);
643 spin_unlock(&hb->lock);
648 * "Flush" all pages in tmem matching this oid.
650 int tmem_flush_object(struct tmem_pool *pool, struct tmem_oid *oidp)
652 struct tmem_obj *obj;
653 struct tmem_hashbucket *hb;
656 hb = &pool->hashbucket[tmem_oid_hash(oidp)];
657 spin_lock(&hb->lock);
658 obj = tmem_obj_find(hb, oidp);
661 tmem_pampd_destroy_all_in_obj(obj);
662 tmem_obj_free(obj, hb);
663 (*tmem_hostops.obj_free)(obj, pool);
667 spin_unlock(&hb->lock);
672 * "Flush" all pages (and tmem_objs) from this tmem_pool and disable
673 * all subsequent access to this tmem_pool.
675 int tmem_destroy_pool(struct tmem_pool *pool)
681 tmem_pool_flush(pool, 1);
687 static LIST_HEAD(tmem_global_pool_list);
690 * Create a new tmem_pool with the provided flag and return
691 * a pool id provided by the tmem host implementation.
693 void tmem_new_pool(struct tmem_pool *pool, uint32_t flags)
695 int persistent = flags & TMEM_POOL_PERSIST;
696 int shared = flags & TMEM_POOL_SHARED;
697 struct tmem_hashbucket *hb = &pool->hashbucket[0];
700 for (i = 0; i < TMEM_HASH_BUCKETS; i++, hb++) {
701 hb->obj_rb_root = RB_ROOT;
702 spin_lock_init(&hb->lock);
704 INIT_LIST_HEAD(&pool->pool_list);
705 atomic_set(&pool->obj_count, 0);
706 SET_SENTINEL(pool, POOL);
707 list_add_tail(&pool->pool_list, &tmem_global_pool_list);
708 pool->persistent = persistent;
709 pool->shared = shared;