]> Pileus Git - ~andy/linux/blob - drivers/staging/ramzswap/ramzswap_drv.c
Staging: ramzswap: Handler for swap slot free callback
[~andy/linux] / drivers / staging / ramzswap / ramzswap_drv.c
1 /*
2  * Compressed RAM based swap device
3  *
4  * Copyright (C) 2008, 2009, 2010  Nitin Gupta
5  *
6  * This code is released using a dual license strategy: BSD/GPL
7  * You can choose the licence that better fits your requirements.
8  *
9  * Released under the terms of 3-clause BSD License
10  * Released under the terms of GNU General Public License Version 2.0
11  *
12  * Project home: http://compcache.googlecode.com
13  */
14
15 #define KMSG_COMPONENT "ramzswap"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/bitops.h>
21 #include <linux/blkdev.h>
22 #include <linux/buffer_head.h>
23 #include <linux/device.h>
24 #include <linux/genhd.h>
25 #include <linux/highmem.h>
26 #include <linux/slab.h>
27 #include <linux/lzo.h>
28 #include <linux/string.h>
29 #include <linux/swap.h>
30 #include <linux/swapops.h>
31 #include <linux/vmalloc.h>
32
33 #include "ramzswap_drv.h"
34
35 /* Globals */
36 static int ramzswap_major;
37 static struct ramzswap *devices;
38
39 /* Module params (documentation at end) */
40 static unsigned int num_devices;
41
42 static int rzs_test_flag(struct ramzswap *rzs, u32 index,
43                         enum rzs_pageflags flag)
44 {
45         return rzs->table[index].flags & BIT(flag);
46 }
47
48 static void rzs_set_flag(struct ramzswap *rzs, u32 index,
49                         enum rzs_pageflags flag)
50 {
51         rzs->table[index].flags |= BIT(flag);
52 }
53
54 static void rzs_clear_flag(struct ramzswap *rzs, u32 index,
55                         enum rzs_pageflags flag)
56 {
57         rzs->table[index].flags &= ~BIT(flag);
58 }
59
60 static int page_zero_filled(void *ptr)
61 {
62         unsigned int pos;
63         unsigned long *page;
64
65         page = (unsigned long *)ptr;
66
67         for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
68                 if (page[pos])
69                         return 0;
70         }
71
72         return 1;
73 }
74
75 static void ramzswap_set_disksize(struct ramzswap *rzs, size_t totalram_bytes)
76 {
77         if (!rzs->disksize) {
78                 pr_info(
79                 "disk size not provided. You can use disksize_kb module "
80                 "param to specify size.\nUsing default: (%u%% of RAM).\n",
81                 default_disksize_perc_ram
82                 );
83                 rzs->disksize = default_disksize_perc_ram *
84                                         (totalram_bytes / 100);
85         }
86
87         if (rzs->disksize > 2 * (totalram_bytes)) {
88                 pr_info(
89                 "There is little point creating a ramzswap of greater than "
90                 "twice the size of memory since we expect a 2:1 compression "
91                 "ratio. Note that ramzswap uses about 0.1%% of the size of "
92                 "the swap device when not in use so a huge ramzswap is "
93                 "wasteful.\n"
94                 "\tMemory Size: %zu kB\n"
95                 "\tSize you selected: %zu kB\n"
96                 "Continuing anyway ...\n",
97                 totalram_bytes >> 10, rzs->disksize
98                 );
99         }
100
101         rzs->disksize &= PAGE_MASK;
102 }
103
104 /*
105  * Swap header (1st page of swap device) contains information
106  * about a swap file/partition. Prepare such a header for the
107  * given ramzswap device so that swapon can identify it as a
108  * swap partition.
109  */
110 static void setup_swap_header(struct ramzswap *rzs, union swap_header *s)
111 {
112         s->info.version = 1;
113         s->info.last_page = (rzs->disksize >> PAGE_SHIFT) - 1;
114         s->info.nr_badpages = 0;
115         memcpy(s->magic.magic, "SWAPSPACE2", 10);
116 }
117
118 static void ramzswap_ioctl_get_stats(struct ramzswap *rzs,
119                         struct ramzswap_ioctl_stats *s)
120 {
121         s->disksize = rzs->disksize;
122
123 #if defined(CONFIG_RAMZSWAP_STATS)
124         {
125         struct ramzswap_stats *rs = &rzs->stats;
126         size_t succ_writes, mem_used;
127         unsigned int good_compress_perc = 0, no_compress_perc = 0;
128
129         mem_used = xv_get_total_size_bytes(rzs->mem_pool)
130                         + (rs->pages_expand << PAGE_SHIFT);
131         succ_writes = rzs_stat64_read(rzs, &rs->num_writes) -
132                         rzs_stat64_read(rzs, &rs->failed_writes);
133
134         if (succ_writes && rs->pages_stored) {
135                 good_compress_perc = rs->good_compress * 100
136                                         / rs->pages_stored;
137                 no_compress_perc = rs->pages_expand * 100
138                                         / rs->pages_stored;
139         }
140
141         s->num_reads = rzs_stat64_read(rzs, &rs->num_reads);
142         s->num_writes = rzs_stat64_read(rzs, &rs->num_writes);
143         s->failed_reads = rzs_stat64_read(rzs, &rs->failed_reads);
144         s->failed_writes = rzs_stat64_read(rzs, &rs->failed_writes);
145         s->invalid_io = rzs_stat64_read(rzs, &rs->invalid_io);
146         s->notify_free = rzs_stat64_read(rzs, &rs->notify_free);
147         s->pages_zero = rs->pages_zero;
148
149         s->good_compress_pct = good_compress_perc;
150         s->pages_expand_pct = no_compress_perc;
151
152         s->pages_stored = rs->pages_stored;
153         s->pages_used = mem_used >> PAGE_SHIFT;
154         s->orig_data_size = rs->pages_stored << PAGE_SHIFT;
155         s->compr_data_size = rs->compr_size;
156         s->mem_used_total = mem_used;
157         }
158 #endif /* CONFIG_RAMZSWAP_STATS */
159 }
160
161 static void ramzswap_free_page(struct ramzswap *rzs, size_t index)
162 {
163         u32 clen;
164         void *obj;
165
166         struct page *page = rzs->table[index].page;
167         u32 offset = rzs->table[index].offset;
168
169         if (unlikely(!page)) {
170                 /*
171                  * No memory is allocated for zero filled pages.
172                  * Simply clear zero page flag.
173                  */
174                 if (rzs_test_flag(rzs, index, RZS_ZERO)) {
175                         rzs_clear_flag(rzs, index, RZS_ZERO);
176                         rzs_stat_dec(&rzs->stats.pages_zero);
177                 }
178                 return;
179         }
180
181         if (unlikely(rzs_test_flag(rzs, index, RZS_UNCOMPRESSED))) {
182                 clen = PAGE_SIZE;
183                 __free_page(page);
184                 rzs_clear_flag(rzs, index, RZS_UNCOMPRESSED);
185                 rzs_stat_dec(&rzs->stats.pages_expand);
186                 goto out;
187         }
188
189         obj = kmap_atomic(page, KM_USER0) + offset;
190         clen = xv_get_object_size(obj) - sizeof(struct zobj_header);
191         kunmap_atomic(obj, KM_USER0);
192
193         xv_free(rzs->mem_pool, page, offset);
194         if (clen <= PAGE_SIZE / 2)
195                 rzs_stat_dec(&rzs->stats.good_compress);
196
197 out:
198         rzs->stats.compr_size -= clen;
199         rzs_stat_dec(&rzs->stats.pages_stored);
200
201         rzs->table[index].page = NULL;
202         rzs->table[index].offset = 0;
203 }
204
205 static int handle_zero_page(struct bio *bio)
206 {
207         void *user_mem;
208         struct page *page = bio->bi_io_vec[0].bv_page;
209
210         user_mem = kmap_atomic(page, KM_USER0);
211         memset(user_mem, 0, PAGE_SIZE);
212         kunmap_atomic(user_mem, KM_USER0);
213
214         flush_dcache_page(page);
215
216         set_bit(BIO_UPTODATE, &bio->bi_flags);
217         bio_endio(bio, 0);
218         return 0;
219 }
220
221 static int handle_uncompressed_page(struct ramzswap *rzs, struct bio *bio)
222 {
223         u32 index;
224         struct page *page;
225         unsigned char *user_mem, *cmem;
226
227         page = bio->bi_io_vec[0].bv_page;
228         index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
229
230         user_mem = kmap_atomic(page, KM_USER0);
231         cmem = kmap_atomic(rzs->table[index].page, KM_USER1) +
232                         rzs->table[index].offset;
233
234         memcpy(user_mem, cmem, PAGE_SIZE);
235         kunmap_atomic(user_mem, KM_USER0);
236         kunmap_atomic(cmem, KM_USER1);
237
238         flush_dcache_page(page);
239
240         set_bit(BIO_UPTODATE, &bio->bi_flags);
241         bio_endio(bio, 0);
242         return 0;
243 }
244
245 /*
246  * Called when request page is not present in ramzswap.
247  * This is an attempt to read before any previous write
248  * to this location - this happens due to readahead when
249  * swap device is read from user-space (e.g. during swapon)
250  */
251 static int handle_ramzswap_fault(struct ramzswap *rzs, struct bio *bio)
252 {
253         pr_debug("Read before write on swap device: "
254                 "sector=%lu, size=%u, offset=%u\n",
255                 (ulong)(bio->bi_sector), bio->bi_size,
256                 bio->bi_io_vec[0].bv_offset);
257
258         /* Do nothing. Just return success */
259         set_bit(BIO_UPTODATE, &bio->bi_flags);
260         bio_endio(bio, 0);
261         return 0;
262 }
263
264 static int ramzswap_read(struct ramzswap *rzs, struct bio *bio)
265 {
266         int ret;
267         u32 index;
268         size_t clen;
269         struct page *page;
270         struct zobj_header *zheader;
271         unsigned char *user_mem, *cmem;
272
273         rzs_stat64_inc(rzs, &rzs->stats.num_reads);
274
275         page = bio->bi_io_vec[0].bv_page;
276         index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
277
278         if (rzs_test_flag(rzs, index, RZS_ZERO))
279                 return handle_zero_page(bio);
280
281         /* Requested page is not present in compressed area */
282         if (!rzs->table[index].page)
283                 return handle_ramzswap_fault(rzs, bio);
284
285         /* Page is stored uncompressed since it's incompressible */
286         if (unlikely(rzs_test_flag(rzs, index, RZS_UNCOMPRESSED)))
287                 return handle_uncompressed_page(rzs, bio);
288
289         user_mem = kmap_atomic(page, KM_USER0);
290         clen = PAGE_SIZE;
291
292         cmem = kmap_atomic(rzs->table[index].page, KM_USER1) +
293                         rzs->table[index].offset;
294
295         ret = lzo1x_decompress_safe(
296                 cmem + sizeof(*zheader),
297                 xv_get_object_size(cmem) - sizeof(*zheader),
298                 user_mem, &clen);
299
300         kunmap_atomic(user_mem, KM_USER0);
301         kunmap_atomic(cmem, KM_USER1);
302
303         /* should NEVER happen */
304         if (unlikely(ret != LZO_E_OK)) {
305                 pr_err("Decompression failed! err=%d, page=%u\n",
306                         ret, index);
307                 rzs_stat64_inc(rzs, &rzs->stats.failed_reads);
308                 goto out;
309         }
310
311         flush_dcache_page(page);
312
313         set_bit(BIO_UPTODATE, &bio->bi_flags);
314         bio_endio(bio, 0);
315         return 0;
316
317 out:
318         bio_io_error(bio);
319         return 0;
320 }
321
322 static int ramzswap_write(struct ramzswap *rzs, struct bio *bio)
323 {
324         int ret;
325         u32 offset, index;
326         size_t clen;
327         struct zobj_header *zheader;
328         struct page *page, *page_store;
329         unsigned char *user_mem, *cmem, *src;
330
331         rzs_stat64_inc(rzs, &rzs->stats.num_writes);
332
333         page = bio->bi_io_vec[0].bv_page;
334         index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
335
336         src = rzs->compress_buffer;
337
338         mutex_lock(&rzs->lock);
339
340         user_mem = kmap_atomic(page, KM_USER0);
341         if (page_zero_filled(user_mem)) {
342                 kunmap_atomic(user_mem, KM_USER0);
343                 mutex_unlock(&rzs->lock);
344                 rzs_stat_inc(&rzs->stats.pages_zero);
345                 rzs_set_flag(rzs, index, RZS_ZERO);
346
347                 set_bit(BIO_UPTODATE, &bio->bi_flags);
348                 bio_endio(bio, 0);
349                 return 0;
350         }
351
352         ret = lzo1x_1_compress(user_mem, PAGE_SIZE, src, &clen,
353                                 rzs->compress_workmem);
354
355         kunmap_atomic(user_mem, KM_USER0);
356
357         if (unlikely(ret != LZO_E_OK)) {
358                 mutex_unlock(&rzs->lock);
359                 pr_err("Compression failed! err=%d\n", ret);
360                 rzs_stat64_inc(rzs, &rzs->stats.failed_writes);
361                 goto out;
362         }
363
364         /*
365          * Page is incompressible. Store it as-is (uncompressed)
366          * since we do not want to return too many swap write
367          * errors which has side effect of hanging the system.
368          */
369         if (unlikely(clen > max_zpage_size)) {
370                 clen = PAGE_SIZE;
371                 page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
372                 if (unlikely(!page_store)) {
373                         mutex_unlock(&rzs->lock);
374                         pr_info("Error allocating memory for incompressible "
375                                 "page: %u\n", index);
376                         rzs_stat64_inc(rzs, &rzs->stats.failed_writes);
377                         goto out;
378                 }
379
380                 offset = 0;
381                 rzs_set_flag(rzs, index, RZS_UNCOMPRESSED);
382                 rzs_stat_inc(&rzs->stats.pages_expand);
383                 rzs->table[index].page = page_store;
384                 src = kmap_atomic(page, KM_USER0);
385                 goto memstore;
386         }
387
388         if (xv_malloc(rzs->mem_pool, clen + sizeof(*zheader),
389                         &rzs->table[index].page, &offset,
390                         GFP_NOIO | __GFP_HIGHMEM)) {
391                 mutex_unlock(&rzs->lock);
392                 pr_info("Error allocating memory for compressed "
393                         "page: %u, size=%zu\n", index, clen);
394                 rzs_stat64_inc(rzs, &rzs->stats.failed_writes);
395                 goto out;
396         }
397
398 memstore:
399         rzs->table[index].offset = offset;
400
401         cmem = kmap_atomic(rzs->table[index].page, KM_USER1) +
402                         rzs->table[index].offset;
403
404 #if 0
405         /* Back-reference needed for memory defragmentation */
406         if (!rzs_test_flag(rzs, index, RZS_UNCOMPRESSED)) {
407                 zheader = (struct zobj_header *)cmem;
408                 zheader->table_idx = index;
409                 cmem += sizeof(*zheader);
410         }
411 #endif
412
413         memcpy(cmem, src, clen);
414
415         kunmap_atomic(cmem, KM_USER1);
416         if (unlikely(rzs_test_flag(rzs, index, RZS_UNCOMPRESSED)))
417                 kunmap_atomic(src, KM_USER0);
418
419         /* Update stats */
420         rzs->stats.compr_size += clen;
421         rzs_stat_inc(&rzs->stats.pages_stored);
422         if (clen <= PAGE_SIZE / 2)
423                 rzs_stat_inc(&rzs->stats.good_compress);
424
425         mutex_unlock(&rzs->lock);
426
427         set_bit(BIO_UPTODATE, &bio->bi_flags);
428         bio_endio(bio, 0);
429         return 0;
430
431 out:
432         bio_io_error(bio);
433         return 0;
434 }
435
436 /*
437  * Check if request is within bounds and page aligned.
438  */
439 static inline int valid_swap_request(struct ramzswap *rzs, struct bio *bio)
440 {
441         if (unlikely(
442                 (bio->bi_sector >= (rzs->disksize >> SECTOR_SHIFT)) ||
443                 (bio->bi_sector & (SECTORS_PER_PAGE - 1)) ||
444                 (bio->bi_vcnt != 1) ||
445                 (bio->bi_size != PAGE_SIZE) ||
446                 (bio->bi_io_vec[0].bv_offset != 0))) {
447
448                 return 0;
449         }
450
451         /* swap request is valid */
452         return 1;
453 }
454
455 /*
456  * Handler function for all ramzswap I/O requests.
457  */
458 static int ramzswap_make_request(struct request_queue *queue, struct bio *bio)
459 {
460         int ret = 0;
461         struct ramzswap *rzs = queue->queuedata;
462
463         if (unlikely(!rzs->init_done)) {
464                 bio_io_error(bio);
465                 return 0;
466         }
467
468         if (!valid_swap_request(rzs, bio)) {
469                 rzs_stat64_inc(rzs, &rzs->stats.invalid_io);
470                 bio_io_error(bio);
471                 return 0;
472         }
473
474         switch (bio_data_dir(bio)) {
475         case READ:
476                 ret = ramzswap_read(rzs, bio);
477                 break;
478
479         case WRITE:
480                 ret = ramzswap_write(rzs, bio);
481                 break;
482         }
483
484         return ret;
485 }
486
487 static void reset_device(struct ramzswap *rzs)
488 {
489         size_t index;
490
491         /* Do not accept any new I/O request */
492         rzs->init_done = 0;
493
494         /* Free various per-device buffers */
495         kfree(rzs->compress_workmem);
496         free_pages((unsigned long)rzs->compress_buffer, 1);
497
498         rzs->compress_workmem = NULL;
499         rzs->compress_buffer = NULL;
500
501         /* Free all pages that are still in this ramzswap device */
502         for (index = 0; index < rzs->disksize >> PAGE_SHIFT; index++) {
503                 struct page *page;
504                 u16 offset;
505
506                 page = rzs->table[index].page;
507                 offset = rzs->table[index].offset;
508
509                 if (!page)
510                         continue;
511
512                 if (unlikely(rzs_test_flag(rzs, index, RZS_UNCOMPRESSED)))
513                         __free_page(page);
514                 else
515                         xv_free(rzs->mem_pool, page, offset);
516         }
517
518         vfree(rzs->table);
519         rzs->table = NULL;
520
521         xv_destroy_pool(rzs->mem_pool);
522         rzs->mem_pool = NULL;
523
524         /* Reset stats */
525         memset(&rzs->stats, 0, sizeof(rzs->stats));
526
527         rzs->disksize = 0;
528 }
529
530 static int ramzswap_ioctl_init_device(struct ramzswap *rzs)
531 {
532         int ret;
533         size_t num_pages;
534         struct page *page;
535         union swap_header *swap_header;
536
537         if (rzs->init_done) {
538                 pr_info("Device already initialized!\n");
539                 return -EBUSY;
540         }
541
542         ramzswap_set_disksize(rzs, totalram_pages << PAGE_SHIFT);
543
544         rzs->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
545         if (!rzs->compress_workmem) {
546                 pr_err("Error allocating compressor working memory!\n");
547                 ret = -ENOMEM;
548                 goto fail;
549         }
550
551         rzs->compress_buffer = (void *)__get_free_pages(__GFP_ZERO, 1);
552         if (!rzs->compress_buffer) {
553                 pr_err("Error allocating compressor buffer space\n");
554                 ret = -ENOMEM;
555                 goto fail;
556         }
557
558         num_pages = rzs->disksize >> PAGE_SHIFT;
559         rzs->table = vmalloc(num_pages * sizeof(*rzs->table));
560         if (!rzs->table) {
561                 pr_err("Error allocating ramzswap address table\n");
562                 /* To prevent accessing table entries during cleanup */
563                 rzs->disksize = 0;
564                 ret = -ENOMEM;
565                 goto fail;
566         }
567         memset(rzs->table, 0, num_pages * sizeof(*rzs->table));
568
569         page = alloc_page(__GFP_ZERO);
570         if (!page) {
571                 pr_err("Error allocating swap header page\n");
572                 ret = -ENOMEM;
573                 goto fail;
574         }
575         rzs->table[0].page = page;
576         rzs_set_flag(rzs, 0, RZS_UNCOMPRESSED);
577
578         swap_header = kmap(page);
579         setup_swap_header(rzs, swap_header);
580         kunmap(page);
581
582         set_capacity(rzs->disk, rzs->disksize >> SECTOR_SHIFT);
583
584         /* ramzswap devices sort of resembles non-rotational disks */
585         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rzs->disk->queue);
586
587         rzs->mem_pool = xv_create_pool();
588         if (!rzs->mem_pool) {
589                 pr_err("Error creating memory pool\n");
590                 ret = -ENOMEM;
591                 goto fail;
592         }
593
594         rzs->init_done = 1;
595
596         pr_debug("Initialization done!\n");
597         return 0;
598
599 fail:
600         reset_device(rzs);
601
602         pr_err("Initialization failed: err=%d\n", ret);
603         return ret;
604 }
605
606 static int ramzswap_ioctl_reset_device(struct ramzswap *rzs)
607 {
608         if (rzs->init_done)
609                 reset_device(rzs);
610
611         return 0;
612 }
613
614 static int ramzswap_ioctl(struct block_device *bdev, fmode_t mode,
615                         unsigned int cmd, unsigned long arg)
616 {
617         int ret = 0;
618         size_t disksize_kb;
619
620         struct ramzswap *rzs = bdev->bd_disk->private_data;
621
622         switch (cmd) {
623         case RZSIO_SET_DISKSIZE_KB:
624                 if (rzs->init_done) {
625                         ret = -EBUSY;
626                         goto out;
627                 }
628                 if (copy_from_user(&disksize_kb, (void *)arg,
629                                                 _IOC_SIZE(cmd))) {
630                         ret = -EFAULT;
631                         goto out;
632                 }
633                 rzs->disksize = disksize_kb << 10;
634                 pr_info("Disk size set to %zu kB\n", disksize_kb);
635                 break;
636
637         case RZSIO_GET_STATS:
638         {
639                 struct ramzswap_ioctl_stats *stats;
640                 if (!rzs->init_done) {
641                         ret = -ENOTTY;
642                         goto out;
643                 }
644                 stats = kzalloc(sizeof(*stats), GFP_KERNEL);
645                 if (!stats) {
646                         ret = -ENOMEM;
647                         goto out;
648                 }
649                 ramzswap_ioctl_get_stats(rzs, stats);
650                 if (copy_to_user((void *)arg, stats, sizeof(*stats))) {
651                         kfree(stats);
652                         ret = -EFAULT;
653                         goto out;
654                 }
655                 kfree(stats);
656                 break;
657         }
658         case RZSIO_INIT:
659                 ret = ramzswap_ioctl_init_device(rzs);
660                 break;
661
662         case RZSIO_RESET:
663                 /* Do not reset an active device! */
664                 if (bdev->bd_holders) {
665                         ret = -EBUSY;
666                         goto out;
667                 }
668
669                 /* Make sure all pending I/O is finished */
670                 if (bdev)
671                         fsync_bdev(bdev);
672
673                 ret = ramzswap_ioctl_reset_device(rzs);
674                 break;
675
676         default:
677                 pr_info("Invalid ioctl %u\n", cmd);
678                 ret = -ENOTTY;
679         }
680
681 out:
682         return ret;
683 }
684
685 void ramzswap_slot_free_notify(struct block_device *bdev, unsigned long index)
686 {
687         struct ramzswap *rzs;
688
689         rzs = bdev->bd_disk->private_data;
690         ramzswap_free_page(rzs, index);
691         rzs_stat64_inc(rzs, &rzs->stats.notify_free);
692
693         return;
694 }
695
696 static struct block_device_operations ramzswap_devops = {
697         .ioctl = ramzswap_ioctl,
698         .swap_slot_free_notify = ramzswap_slot_free_notify,
699         .owner = THIS_MODULE
700 };
701
702 static int create_device(struct ramzswap *rzs, int device_id)
703 {
704         int ret = 0;
705
706         mutex_init(&rzs->lock);
707         spin_lock_init(&rzs->stat64_lock);
708
709         rzs->queue = blk_alloc_queue(GFP_KERNEL);
710         if (!rzs->queue) {
711                 pr_err("Error allocating disk queue for device %d\n",
712                         device_id);
713                 ret = -ENOMEM;
714                 goto out;
715         }
716
717         blk_queue_make_request(rzs->queue, ramzswap_make_request);
718         rzs->queue->queuedata = rzs;
719
720          /* gendisk structure */
721         rzs->disk = alloc_disk(1);
722         if (!rzs->disk) {
723                 blk_cleanup_queue(rzs->queue);
724                 pr_warning("Error allocating disk structure for device %d\n",
725                         device_id);
726                 ret = -ENOMEM;
727                 goto out;
728         }
729
730         rzs->disk->major = ramzswap_major;
731         rzs->disk->first_minor = device_id;
732         rzs->disk->fops = &ramzswap_devops;
733         rzs->disk->queue = rzs->queue;
734         rzs->disk->private_data = rzs;
735         snprintf(rzs->disk->disk_name, 16, "ramzswap%d", device_id);
736
737         /* Actual capacity set using RZSIO_SET_DISKSIZE_KB ioctl */
738         set_capacity(rzs->disk, 0);
739
740         blk_queue_physical_block_size(rzs->disk->queue, PAGE_SIZE);
741         blk_queue_logical_block_size(rzs->disk->queue, PAGE_SIZE);
742
743         add_disk(rzs->disk);
744
745         rzs->init_done = 0;
746
747 out:
748         return ret;
749 }
750
751 static void destroy_device(struct ramzswap *rzs)
752 {
753         if (rzs->disk) {
754                 del_gendisk(rzs->disk);
755                 put_disk(rzs->disk);
756         }
757
758         if (rzs->queue)
759                 blk_cleanup_queue(rzs->queue);
760 }
761
762 static int __init ramzswap_init(void)
763 {
764         int ret, dev_id;
765
766         if (num_devices > max_num_devices) {
767                 pr_warning("Invalid value for num_devices: %u\n",
768                                 num_devices);
769                 ret = -EINVAL;
770                 goto out;
771         }
772
773         ramzswap_major = register_blkdev(0, "ramzswap");
774         if (ramzswap_major <= 0) {
775                 pr_warning("Unable to get major number\n");
776                 ret = -EBUSY;
777                 goto out;
778         }
779
780         if (!num_devices) {
781                 pr_info("num_devices not specified. Using default: 1\n");
782                 num_devices = 1;
783         }
784
785         /* Allocate the device array and initialize each one */
786         pr_info("Creating %u devices ...\n", num_devices);
787         devices = kzalloc(num_devices * sizeof(struct ramzswap), GFP_KERNEL);
788         if (!devices) {
789                 ret = -ENOMEM;
790                 goto unregister;
791         }
792
793         for (dev_id = 0; dev_id < num_devices; dev_id++) {
794                 ret = create_device(&devices[dev_id], dev_id);
795                 if (ret)
796                         goto free_devices;
797         }
798
799         return 0;
800
801 free_devices:
802         while (dev_id)
803                 destroy_device(&devices[--dev_id]);
804 unregister:
805         unregister_blkdev(ramzswap_major, "ramzswap");
806 out:
807         return ret;
808 }
809
810 static void __exit ramzswap_exit(void)
811 {
812         int i;
813         struct ramzswap *rzs;
814
815         for (i = 0; i < num_devices; i++) {
816                 rzs = &devices[i];
817
818                 destroy_device(rzs);
819                 if (rzs->init_done)
820                         reset_device(rzs);
821         }
822
823         unregister_blkdev(ramzswap_major, "ramzswap");
824
825         kfree(devices);
826         pr_debug("Cleanup done!\n");
827 }
828
829 module_param(num_devices, uint, 0);
830 MODULE_PARM_DESC(num_devices, "Number of ramzswap devices");
831
832 module_init(ramzswap_init);
833 module_exit(ramzswap_exit);
834
835 MODULE_LICENSE("Dual BSD/GPL");
836 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
837 MODULE_DESCRIPTION("Compressed RAM Based Swap Device");