1 /*******************************************************************************
2 * Filename: target_core_rd.c
4 * This file contains the Storage Engine <-> Ramdisk transport
7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
9 * Copyright (c) 2007-2010 Rising Tide Systems
10 * Copyright (c) 2008-2010 Linux-iSCSI.org
12 * Nicholas A. Bellinger <nab@kernel.org>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
28 ******************************************************************************/
30 #include <linux/version.h>
31 #include <linux/string.h>
32 #include <linux/parser.h>
33 #include <linux/timer.h>
34 #include <linux/blkdev.h>
35 #include <linux/slab.h>
36 #include <linux/spinlock.h>
37 #include <scsi/scsi.h>
38 #include <scsi/scsi_host.h>
40 #include <target/target_core_base.h>
41 #include <target/target_core_device.h>
42 #include <target/target_core_transport.h>
43 #include <target/target_core_fabric_ops.h>
45 #include "target_core_rd.h"
47 static struct se_subsystem_api rd_dr_template;
48 static struct se_subsystem_api rd_mcp_template;
50 /* #define DEBUG_RAMDISK_MCP */
51 /* #define DEBUG_RAMDISK_DR */
53 /* rd_attach_hba(): (Part of se_subsystem_api_t template)
57 static int rd_attach_hba(struct se_hba *hba, u32 host_id)
59 struct rd_host *rd_host;
61 rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
63 printk(KERN_ERR "Unable to allocate memory for struct rd_host\n");
67 rd_host->rd_host_id = host_id;
69 atomic_set(&hba->left_queue_depth, RD_HBA_QUEUE_DEPTH);
70 atomic_set(&hba->max_queue_depth, RD_HBA_QUEUE_DEPTH);
71 hba->hba_ptr = (void *) rd_host;
73 printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
74 " Generic Target Core Stack %s\n", hba->hba_id,
75 RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
76 printk(KERN_INFO "CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
77 " Target Core TCQ Depth: %d MaxSectors: %u\n", hba->hba_id,
78 rd_host->rd_host_id, atomic_read(&hba->max_queue_depth),
84 static void rd_detach_hba(struct se_hba *hba)
86 struct rd_host *rd_host = hba->hba_ptr;
88 printk(KERN_INFO "CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
89 " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
95 /* rd_release_device_space():
99 static void rd_release_device_space(struct rd_dev *rd_dev)
101 u32 i, j, page_count = 0, sg_per_table;
102 struct rd_dev_sg_table *sg_table;
104 struct scatterlist *sg;
106 if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
109 sg_table = rd_dev->sg_table_array;
111 for (i = 0; i < rd_dev->sg_table_count; i++) {
112 sg = sg_table[i].sg_table;
113 sg_per_table = sg_table[i].rd_sg_count;
115 for (j = 0; j < sg_per_table; j++) {
116 pg = sg_page(&sg[j]);
126 printk(KERN_INFO "CORE_RD[%u] - Released device space for Ramdisk"
127 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
128 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
129 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
132 rd_dev->sg_table_array = NULL;
133 rd_dev->sg_table_count = 0;
137 /* rd_build_device_space():
141 static int rd_build_device_space(struct rd_dev *rd_dev)
143 u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed;
144 u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
145 sizeof(struct scatterlist));
146 struct rd_dev_sg_table *sg_table;
148 struct scatterlist *sg;
150 if (rd_dev->rd_page_count <= 0) {
151 printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n",
152 rd_dev->rd_page_count);
155 total_sg_needed = rd_dev->rd_page_count;
157 sg_tables = (total_sg_needed / max_sg_per_table) + 1;
159 sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
161 printk(KERN_ERR "Unable to allocate memory for Ramdisk"
162 " scatterlist tables\n");
166 rd_dev->sg_table_array = sg_table;
167 rd_dev->sg_table_count = sg_tables;
169 while (total_sg_needed) {
170 sg_per_table = (total_sg_needed > max_sg_per_table) ?
171 max_sg_per_table : total_sg_needed;
173 sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
176 printk(KERN_ERR "Unable to allocate scatterlist array"
177 " for struct rd_dev\n");
181 sg_init_table((struct scatterlist *)&sg[0], sg_per_table);
183 sg_table[i].sg_table = sg;
184 sg_table[i].rd_sg_count = sg_per_table;
185 sg_table[i].page_start_offset = page_offset;
186 sg_table[i++].page_end_offset = (page_offset + sg_per_table)
189 for (j = 0; j < sg_per_table; j++) {
190 pg = alloc_pages(GFP_KERNEL, 0);
192 printk(KERN_ERR "Unable to allocate scatterlist"
193 " pages for struct rd_dev_sg_table\n");
196 sg_assign_page(&sg[j], pg);
197 sg[j].length = PAGE_SIZE;
200 page_offset += sg_per_table;
201 total_sg_needed -= sg_per_table;
204 printk(KERN_INFO "CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
205 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
206 rd_dev->rd_dev_id, rd_dev->rd_page_count,
207 rd_dev->sg_table_count);
212 static void *rd_allocate_virtdevice(
217 struct rd_dev *rd_dev;
218 struct rd_host *rd_host = hba->hba_ptr;
220 rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
222 printk(KERN_ERR "Unable to allocate memory for struct rd_dev\n");
226 rd_dev->rd_host = rd_host;
227 rd_dev->rd_direct = rd_direct;
232 static void *rd_DIRECT_allocate_virtdevice(struct se_hba *hba, const char *name)
234 return rd_allocate_virtdevice(hba, name, 1);
237 static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name)
239 return rd_allocate_virtdevice(hba, name, 0);
242 /* rd_create_virtdevice():
246 static struct se_device *rd_create_virtdevice(
248 struct se_subsystem_dev *se_dev,
252 struct se_device *dev;
253 struct se_dev_limits dev_limits;
254 struct rd_dev *rd_dev = p;
255 struct rd_host *rd_host = hba->hba_ptr;
256 int dev_flags = 0, ret = -EINVAL;
257 char prod[16], rev[4];
259 memset(&dev_limits, 0, sizeof(struct se_dev_limits));
261 if (rd_build_device_space(rd_dev) < 0) {
266 snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP");
267 snprintf(rev, 4, "%s", (rd_dev->rd_direct) ? RD_DR_VERSION :
270 dev_limits.limits.logical_block_size = RD_BLOCKSIZE;
271 dev_limits.limits.max_hw_sectors = RD_MAX_SECTORS;
272 dev_limits.limits.max_sectors = RD_MAX_SECTORS;
273 dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
274 dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
276 dev = transport_add_device_to_core_hba(hba,
277 (rd_dev->rd_direct) ? &rd_dr_template :
278 &rd_mcp_template, se_dev, dev_flags, (void *)rd_dev,
279 &dev_limits, prod, rev);
283 rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
284 rd_dev->rd_queue_depth = dev->queue_depth;
286 printk(KERN_INFO "CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of"
287 " %u pages in %u tables, %lu total bytes\n",
288 rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" :
289 "DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count,
290 rd_dev->sg_table_count,
291 (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
296 rd_release_device_space(rd_dev);
300 static struct se_device *rd_DIRECT_create_virtdevice(
302 struct se_subsystem_dev *se_dev,
305 return rd_create_virtdevice(hba, se_dev, p, 1);
308 static struct se_device *rd_MEMCPY_create_virtdevice(
310 struct se_subsystem_dev *se_dev,
313 return rd_create_virtdevice(hba, se_dev, p, 0);
316 /* rd_free_device(): (Part of se_subsystem_api_t template)
320 static void rd_free_device(void *p)
322 struct rd_dev *rd_dev = p;
324 rd_release_device_space(rd_dev);
328 static inline struct rd_request *RD_REQ(struct se_task *task)
330 return container_of(task, struct rd_request, rd_task);
333 static struct se_task *
334 rd_alloc_task(struct se_cmd *cmd)
336 struct rd_request *rd_req;
338 rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL);
340 printk(KERN_ERR "Unable to allocate struct rd_request\n");
343 rd_req->rd_dev = SE_DEV(cmd)->dev_ptr;
345 return &rd_req->rd_task;
348 /* rd_get_sg_table():
352 static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
355 struct rd_dev_sg_table *sg_table;
357 for (i = 0; i < rd_dev->sg_table_count; i++) {
358 sg_table = &rd_dev->sg_table_array[i];
359 if ((sg_table->page_start_offset <= page) &&
360 (sg_table->page_end_offset >= page))
364 printk(KERN_ERR "Unable to locate struct rd_dev_sg_table for page: %u\n",
374 static int rd_MEMCPY_read(struct rd_request *req)
376 struct se_task *task = &req->rd_task;
377 struct rd_dev *dev = req->rd_dev;
378 struct rd_dev_sg_table *table;
379 struct scatterlist *sg_d, *sg_s;
381 u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
382 u32 length, page_end = 0, table_sg_end;
383 u32 rd_offset = req->rd_offset;
385 table = rd_get_sg_table(dev, req->rd_page);
389 table_sg_end = (table->page_end_offset - req->rd_page);
390 sg_d = task->task_sg;
391 sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
392 #ifdef DEBUG_RAMDISK_MCP
393 printk(KERN_INFO "RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
394 " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
395 req->rd_page, req->rd_offset);
397 src_offset = rd_offset;
399 while (req->rd_size) {
400 if ((sg_d[i].length - dst_offset) <
401 (sg_s[j].length - src_offset)) {
402 length = (sg_d[i].length - dst_offset);
403 #ifdef DEBUG_RAMDISK_MCP
404 printk(KERN_INFO "Step 1 - sg_d[%d]: %p length: %d"
405 " offset: %u sg_s[%d].length: %u\n", i,
406 &sg_d[i], sg_d[i].length, sg_d[i].offset, j,
408 printk(KERN_INFO "Step 1 - length: %u dst_offset: %u"
409 " src_offset: %u\n", length, dst_offset,
412 if (length > req->rd_size)
413 length = req->rd_size;
415 dst = sg_virt(&sg_d[i++]) + dst_offset;
419 src = sg_virt(&sg_s[j]) + src_offset;
427 length = (sg_s[j].length - src_offset);
428 #ifdef DEBUG_RAMDISK_MCP
429 printk(KERN_INFO "Step 2 - sg_d[%d]: %p length: %d"
430 " offset: %u sg_s[%d].length: %u\n", i,
431 &sg_d[i], sg_d[i].length, sg_d[i].offset,
433 printk(KERN_INFO "Step 2 - length: %u dst_offset: %u"
434 " src_offset: %u\n", length, dst_offset,
437 if (length > req->rd_size)
438 length = req->rd_size;
440 dst = sg_virt(&sg_d[i]) + dst_offset;
444 if (sg_d[i].length == length) {
450 src = sg_virt(&sg_s[j++]) + src_offset;
458 memcpy(dst, src, length);
460 #ifdef DEBUG_RAMDISK_MCP
461 printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
462 " i: %u, j: %u\n", req->rd_page,
463 (req->rd_size - length), length, i, j);
465 req->rd_size -= length;
472 if (++req->rd_page <= table->page_end_offset) {
473 #ifdef DEBUG_RAMDISK_MCP
474 printk(KERN_INFO "page: %u in same page table\n",
479 #ifdef DEBUG_RAMDISK_MCP
480 printk(KERN_INFO "getting new page table for page: %u\n",
483 table = rd_get_sg_table(dev, req->rd_page);
487 sg_s = &table->sg_table[j = 0];
493 /* rd_MEMCPY_write():
497 static int rd_MEMCPY_write(struct rd_request *req)
499 struct se_task *task = &req->rd_task;
500 struct rd_dev *dev = req->rd_dev;
501 struct rd_dev_sg_table *table;
502 struct scatterlist *sg_d, *sg_s;
504 u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
505 u32 length, page_end = 0, table_sg_end;
506 u32 rd_offset = req->rd_offset;
508 table = rd_get_sg_table(dev, req->rd_page);
512 table_sg_end = (table->page_end_offset - req->rd_page);
513 sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
514 sg_s = task->task_sg;
515 #ifdef DEBUG_RAMDISK_MCP
516 printk(KERN_INFO "RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
517 " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
518 req->rd_page, req->rd_offset);
520 dst_offset = rd_offset;
522 while (req->rd_size) {
523 if ((sg_s[i].length - src_offset) <
524 (sg_d[j].length - dst_offset)) {
525 length = (sg_s[i].length - src_offset);
526 #ifdef DEBUG_RAMDISK_MCP
527 printk(KERN_INFO "Step 1 - sg_s[%d]: %p length: %d"
528 " offset: %d sg_d[%d].length: %u\n", i,
529 &sg_s[i], sg_s[i].length, sg_s[i].offset,
531 printk(KERN_INFO "Step 1 - length: %u src_offset: %u"
532 " dst_offset: %u\n", length, src_offset,
535 if (length > req->rd_size)
536 length = req->rd_size;
538 src = sg_virt(&sg_s[i++]) + src_offset;
542 dst = sg_virt(&sg_d[j]) + dst_offset;
550 length = (sg_d[j].length - dst_offset);
551 #ifdef DEBUG_RAMDISK_MCP
552 printk(KERN_INFO "Step 2 - sg_s[%d]: %p length: %d"
553 " offset: %d sg_d[%d].length: %u\n", i,
554 &sg_s[i], sg_s[i].length, sg_s[i].offset,
556 printk(KERN_INFO "Step 2 - length: %u src_offset: %u"
557 " dst_offset: %u\n", length, src_offset,
560 if (length > req->rd_size)
561 length = req->rd_size;
563 src = sg_virt(&sg_s[i]) + src_offset;
567 if (sg_s[i].length == length) {
573 dst = sg_virt(&sg_d[j++]) + dst_offset;
581 memcpy(dst, src, length);
583 #ifdef DEBUG_RAMDISK_MCP
584 printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
585 " i: %u, j: %u\n", req->rd_page,
586 (req->rd_size - length), length, i, j);
588 req->rd_size -= length;
595 if (++req->rd_page <= table->page_end_offset) {
596 #ifdef DEBUG_RAMDISK_MCP
597 printk(KERN_INFO "page: %u in same page table\n",
602 #ifdef DEBUG_RAMDISK_MCP
603 printk(KERN_INFO "getting new page table for page: %u\n",
606 table = rd_get_sg_table(dev, req->rd_page);
610 sg_d = &table->sg_table[j = 0];
616 /* rd_MEMCPY_do_task(): (Part of se_subsystem_api_t template)
620 static int rd_MEMCPY_do_task(struct se_task *task)
622 struct se_device *dev = task->se_dev;
623 struct rd_request *req = RD_REQ(task);
624 unsigned long long lba;
627 req->rd_page = (task->task_lba * DEV_ATTRIB(dev)->block_size) / PAGE_SIZE;
628 lba = task->task_lba;
629 req->rd_offset = (do_div(lba,
630 (PAGE_SIZE / DEV_ATTRIB(dev)->block_size))) *
631 DEV_ATTRIB(dev)->block_size;
632 req->rd_size = task->task_size;
634 if (task->task_data_direction == DMA_FROM_DEVICE)
635 ret = rd_MEMCPY_read(req);
637 ret = rd_MEMCPY_write(req);
642 task->task_scsi_status = GOOD;
643 transport_complete_task(task, 1);
645 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
648 /* rd_DIRECT_with_offset():
652 static int rd_DIRECT_with_offset(
653 struct se_task *task,
654 struct list_head *se_mem_list,
658 struct rd_request *req = RD_REQ(task);
659 struct rd_dev *dev = req->rd_dev;
660 struct rd_dev_sg_table *table;
661 struct se_mem *se_mem;
662 struct scatterlist *sg_s;
663 u32 j = 0, set_offset = 1;
664 u32 get_next_table = 0, offset_length, table_sg_end;
666 table = rd_get_sg_table(dev, req->rd_page);
670 table_sg_end = (table->page_end_offset - req->rd_page);
671 sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
672 #ifdef DEBUG_RAMDISK_DR
673 printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u Page: %u, Offset: %u\n",
674 (task->task_data_direction == DMA_TO_DEVICE) ?
676 task->task_lba, req->rd_size, req->rd_page, req->rd_offset);
678 while (req->rd_size) {
679 se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
681 printk(KERN_ERR "Unable to allocate struct se_mem\n");
684 INIT_LIST_HEAD(&se_mem->se_list);
687 offset_length = sg_s[j].length - req->rd_offset;
688 if (offset_length > req->rd_size)
689 offset_length = req->rd_size;
691 se_mem->se_page = sg_page(&sg_s[j++]);
692 se_mem->se_off = req->rd_offset;
693 se_mem->se_len = offset_length;
696 get_next_table = (j > table_sg_end);
700 offset_length = (req->rd_size < req->rd_offset) ?
701 req->rd_size : req->rd_offset;
703 se_mem->se_page = sg_page(&sg_s[j]);
704 se_mem->se_len = offset_length;
709 #ifdef DEBUG_RAMDISK_DR
710 printk(KERN_INFO "page: %u, size: %u, offset_length: %u, j: %u"
711 " se_mem: %p, se_page: %p se_off: %u se_len: %u\n",
712 req->rd_page, req->rd_size, offset_length, j, se_mem,
713 se_mem->se_page, se_mem->se_off, se_mem->se_len);
715 list_add_tail(&se_mem->se_list, se_mem_list);
718 req->rd_size -= offset_length;
722 if (!set_offset && !get_next_table)
725 if (++req->rd_page <= table->page_end_offset) {
726 #ifdef DEBUG_RAMDISK_DR
727 printk(KERN_INFO "page: %u in same page table\n",
732 #ifdef DEBUG_RAMDISK_DR
733 printk(KERN_INFO "getting new page table for page: %u\n",
736 table = rd_get_sg_table(dev, req->rd_page);
740 sg_s = &table->sg_table[j = 0];
744 T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
745 #ifdef DEBUG_RAMDISK_DR
746 printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
752 /* rd_DIRECT_without_offset():
756 static int rd_DIRECT_without_offset(
757 struct se_task *task,
758 struct list_head *se_mem_list,
762 struct rd_request *req = RD_REQ(task);
763 struct rd_dev *dev = req->rd_dev;
764 struct rd_dev_sg_table *table;
765 struct se_mem *se_mem;
766 struct scatterlist *sg_s;
769 table = rd_get_sg_table(dev, req->rd_page);
773 sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
774 #ifdef DEBUG_RAMDISK_DR
775 printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u, Page: %u\n",
776 (task->task_data_direction == DMA_TO_DEVICE) ?
778 task->task_lba, req->rd_size, req->rd_page);
780 while (req->rd_size) {
781 se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
783 printk(KERN_ERR "Unable to allocate struct se_mem\n");
786 INIT_LIST_HEAD(&se_mem->se_list);
788 length = (req->rd_size < sg_s[j].length) ?
789 req->rd_size : sg_s[j].length;
791 se_mem->se_page = sg_page(&sg_s[j++]);
792 se_mem->se_len = length;
794 #ifdef DEBUG_RAMDISK_DR
795 printk(KERN_INFO "page: %u, size: %u, j: %u se_mem: %p,"
796 " se_page: %p se_off: %u se_len: %u\n", req->rd_page,
797 req->rd_size, j, se_mem, se_mem->se_page,
798 se_mem->se_off, se_mem->se_len);
800 list_add_tail(&se_mem->se_list, se_mem_list);
803 req->rd_size -= length;
807 if (++req->rd_page <= table->page_end_offset) {
808 #ifdef DEBUG_RAMDISK_DR
809 printk("page: %u in same page table\n",
814 #ifdef DEBUG_RAMDISK_DR
815 printk(KERN_INFO "getting new page table for page: %u\n",
818 table = rd_get_sg_table(dev, req->rd_page);
822 sg_s = &table->sg_table[j = 0];
826 T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
827 #ifdef DEBUG_RAMDISK_DR
828 printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
834 /* rd_DIRECT_do_se_mem_map():
838 static int rd_DIRECT_do_se_mem_map(
839 struct se_task *task,
840 struct list_head *se_mem_list,
842 struct se_mem *in_se_mem,
843 struct se_mem **out_se_mem,
847 struct se_cmd *cmd = task->task_se_cmd;
848 struct rd_request *req = RD_REQ(task);
849 u32 task_offset = *task_offset_in;
850 unsigned long long lba;
853 req->rd_page = ((task->task_lba * DEV_ATTRIB(task->se_dev)->block_size) /
855 lba = task->task_lba;
856 req->rd_offset = (do_div(lba,
857 (PAGE_SIZE / DEV_ATTRIB(task->se_dev)->block_size))) *
858 DEV_ATTRIB(task->se_dev)->block_size;
859 req->rd_size = task->task_size;
862 ret = rd_DIRECT_with_offset(task, se_mem_list, se_mem_cnt,
865 ret = rd_DIRECT_without_offset(task, se_mem_list, se_mem_cnt,
871 if (CMD_TFO(cmd)->task_sg_chaining == 0)
874 * Currently prevent writers from multiple HW fabrics doing
875 * pci_map_sg() to RD_DR's internal scatterlist memory.
877 if (cmd->data_direction == DMA_TO_DEVICE) {
878 printk(KERN_ERR "DMA_TO_DEVICE not supported for"
879 " RAMDISK_DR with task_sg_chaining=1\n");
883 * Special case for if task_sg_chaining is enabled, then
884 * we setup struct se_task->task_sg[], as it will be used by
885 * transport_do_task_sg_chain() for creating chainged SGLs
886 * across multiple struct se_task->task_sg[].
888 if (!(transport_calc_sg_num(task,
889 list_entry(T_TASK(cmd)->t_mem_list->next,
890 struct se_mem, se_list),
894 return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
895 list_entry(T_TASK(cmd)->t_mem_list->next,
896 struct se_mem, se_list),
897 out_se_mem, se_mem_cnt, task_offset_in);
900 /* rd_DIRECT_do_task(): (Part of se_subsystem_api_t template)
904 static int rd_DIRECT_do_task(struct se_task *task)
907 * At this point the locally allocated RD tables have been mapped
908 * to struct se_mem elements in rd_DIRECT_do_se_mem_map().
910 task->task_scsi_status = GOOD;
911 transport_complete_task(task, 1);
913 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
916 /* rd_free_task(): (Part of se_subsystem_api_t template)
920 static void rd_free_task(struct se_task *task)
926 Opt_rd_pages, Opt_err
929 static match_table_t tokens = {
930 {Opt_rd_pages, "rd_pages=%d"},
934 static ssize_t rd_set_configfs_dev_params(
936 struct se_subsystem_dev *se_dev,
940 struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
941 char *orig, *ptr, *opts;
942 substring_t args[MAX_OPT_ARGS];
943 int ret = 0, arg, token;
945 opts = kstrdup(page, GFP_KERNEL);
951 while ((ptr = strsep(&opts, ",")) != NULL) {
955 token = match_token(ptr, tokens, args);
958 match_int(args, &arg);
959 rd_dev->rd_page_count = arg;
960 printk(KERN_INFO "RAMDISK: Referencing Page"
961 " Count: %u\n", rd_dev->rd_page_count);
962 rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
970 return (!ret) ? count : ret;
973 static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
975 struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
977 if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
978 printk(KERN_INFO "Missing rd_pages= parameter\n");
985 static ssize_t rd_show_configfs_dev_params(
987 struct se_subsystem_dev *se_dev,
990 struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
991 ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: %s\n",
992 rd_dev->rd_dev_id, (rd_dev->rd_direct) ?
993 "rd_direct" : "rd_mcp");
994 bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
995 " SG_table_count: %u\n", rd_dev->rd_page_count,
996 PAGE_SIZE, rd_dev->sg_table_count);
1000 /* rd_get_cdb(): (Part of se_subsystem_api_t template)
1004 static unsigned char *rd_get_cdb(struct se_task *task)
1006 struct rd_request *req = RD_REQ(task);
1008 return req->rd_scsi_cdb;
1011 static u32 rd_get_device_rev(struct se_device *dev)
1013 return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
1016 static u32 rd_get_device_type(struct se_device *dev)
1021 static sector_t rd_get_blocks(struct se_device *dev)
1023 struct rd_dev *rd_dev = dev->dev_ptr;
1024 unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
1025 DEV_ATTRIB(dev)->block_size) - 1;
1030 static struct se_subsystem_api rd_dr_template = {
1032 .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
1033 .attach_hba = rd_attach_hba,
1034 .detach_hba = rd_detach_hba,
1035 .allocate_virtdevice = rd_DIRECT_allocate_virtdevice,
1036 .create_virtdevice = rd_DIRECT_create_virtdevice,
1037 .free_device = rd_free_device,
1038 .alloc_task = rd_alloc_task,
1039 .do_task = rd_DIRECT_do_task,
1040 .free_task = rd_free_task,
1041 .check_configfs_dev_params = rd_check_configfs_dev_params,
1042 .set_configfs_dev_params = rd_set_configfs_dev_params,
1043 .show_configfs_dev_params = rd_show_configfs_dev_params,
1044 .get_cdb = rd_get_cdb,
1045 .get_device_rev = rd_get_device_rev,
1046 .get_device_type = rd_get_device_type,
1047 .get_blocks = rd_get_blocks,
1048 .do_se_mem_map = rd_DIRECT_do_se_mem_map,
1051 static struct se_subsystem_api rd_mcp_template = {
1053 .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
1054 .attach_hba = rd_attach_hba,
1055 .detach_hba = rd_detach_hba,
1056 .allocate_virtdevice = rd_MEMCPY_allocate_virtdevice,
1057 .create_virtdevice = rd_MEMCPY_create_virtdevice,
1058 .free_device = rd_free_device,
1059 .alloc_task = rd_alloc_task,
1060 .do_task = rd_MEMCPY_do_task,
1061 .free_task = rd_free_task,
1062 .check_configfs_dev_params = rd_check_configfs_dev_params,
1063 .set_configfs_dev_params = rd_set_configfs_dev_params,
1064 .show_configfs_dev_params = rd_show_configfs_dev_params,
1065 .get_cdb = rd_get_cdb,
1066 .get_device_rev = rd_get_device_rev,
1067 .get_device_type = rd_get_device_type,
1068 .get_blocks = rd_get_blocks,
1071 int __init rd_module_init(void)
1075 ret = transport_subsystem_register(&rd_dr_template);
1079 ret = transport_subsystem_register(&rd_mcp_template);
1081 transport_subsystem_release(&rd_dr_template);
1088 void rd_module_exit(void)
1090 transport_subsystem_release(&rd_dr_template);
1091 transport_subsystem_release(&rd_mcp_template);