]> Pileus Git - ~andy/linux/blob - drivers/scsi/be2iscsi/be_main.c
Merge branch 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes...
[~andy/linux] / drivers / scsi / be2iscsi / be_main.c
1 /**
2  * Copyright (C) 2005 - 2010 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
11  *
12  * Contact Information:
13  * linux-drivers@serverengines.com
14  *
15  *  ServerEngines
16  * 209 N. Fair Oaks Ave
17  * Sunnyvale, CA 94085
18  *
19  */
20 #include <linux/reboot.h>
21 #include <linux/delay.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/string.h>
27 #include <linux/kernel.h>
28 #include <linux/semaphore.h>
29
30 #include <scsi/libiscsi.h>
31 #include <scsi/scsi_transport_iscsi.h>
32 #include <scsi/scsi_transport.h>
33 #include <scsi/scsi_cmnd.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_host.h>
36 #include <scsi/scsi.h>
37 #include "be_main.h"
38 #include "be_iscsi.h"
39 #include "be_mgmt.h"
40
41 static unsigned int be_iopoll_budget = 10;
42 static unsigned int be_max_phys_size = 64;
43 static unsigned int enable_msix = 1;
44 static unsigned int gcrashmode = 0;
45 static unsigned int num_hba = 0;
46
47 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
48 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
49 MODULE_AUTHOR("ServerEngines Corporation");
50 MODULE_LICENSE("GPL");
51 module_param(be_iopoll_budget, int, 0);
52 module_param(enable_msix, int, 0);
53 module_param(be_max_phys_size, uint, S_IRUGO);
54 MODULE_PARM_DESC(be_max_phys_size, "Maximum Size (In Kilobytes) of physically"
55                                    "contiguous memory that can be allocated."
56                                    "Range is 16 - 128");
57
58 static int beiscsi_slave_configure(struct scsi_device *sdev)
59 {
60         blk_queue_max_segment_size(sdev->request_queue, 65536);
61         return 0;
62 }
63
64 static int beiscsi_eh_abort(struct scsi_cmnd *sc)
65 {
66         struct iscsi_cls_session *cls_session;
67         struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr;
68         struct beiscsi_io_task *aborted_io_task;
69         struct iscsi_conn *conn;
70         struct beiscsi_conn *beiscsi_conn;
71         struct beiscsi_hba *phba;
72         struct iscsi_session *session;
73         struct invalidate_command_table *inv_tbl;
74         struct be_dma_mem nonemb_cmd;
75         unsigned int cid, tag, num_invalidate;
76
77         cls_session = starget_to_session(scsi_target(sc->device));
78         session = cls_session->dd_data;
79
80         spin_lock_bh(&session->lock);
81         if (!aborted_task || !aborted_task->sc) {
82                 /* we raced */
83                 spin_unlock_bh(&session->lock);
84                 return SUCCESS;
85         }
86
87         aborted_io_task = aborted_task->dd_data;
88         if (!aborted_io_task->scsi_cmnd) {
89                 /* raced or invalid command */
90                 spin_unlock_bh(&session->lock);
91                 return SUCCESS;
92         }
93         spin_unlock_bh(&session->lock);
94         conn = aborted_task->conn;
95         beiscsi_conn = conn->dd_data;
96         phba = beiscsi_conn->phba;
97
98         /* invalidate iocb */
99         cid = beiscsi_conn->beiscsi_conn_cid;
100         inv_tbl = phba->inv_tbl;
101         memset(inv_tbl, 0x0, sizeof(*inv_tbl));
102         inv_tbl->cid = cid;
103         inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index;
104         num_invalidate = 1;
105         nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
106                                 sizeof(struct invalidate_commands_params_in),
107                                 &nonemb_cmd.dma);
108         if (nonemb_cmd.va == NULL) {
109                 SE_DEBUG(DBG_LVL_1,
110                          "Failed to allocate memory for"
111                          "mgmt_invalidate_icds\n");
112                 return FAILED;
113         }
114         nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
115
116         tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
117                                    cid, &nonemb_cmd);
118         if (!tag) {
119                 shost_printk(KERN_WARNING, phba->shost,
120                              "mgmt_invalidate_icds could not be"
121                              " submitted\n");
122                 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
123                                     nonemb_cmd.va, nonemb_cmd.dma);
124
125                 return FAILED;
126         } else {
127                 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
128                                          phba->ctrl.mcc_numtag[tag]);
129                 free_mcc_tag(&phba->ctrl, tag);
130         }
131         pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
132                             nonemb_cmd.va, nonemb_cmd.dma);
133         return iscsi_eh_abort(sc);
134 }
135
136 static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
137 {
138         struct iscsi_task *abrt_task;
139         struct beiscsi_io_task *abrt_io_task;
140         struct iscsi_conn *conn;
141         struct beiscsi_conn *beiscsi_conn;
142         struct beiscsi_hba *phba;
143         struct iscsi_session *session;
144         struct iscsi_cls_session *cls_session;
145         struct invalidate_command_table *inv_tbl;
146         struct be_dma_mem nonemb_cmd;
147         unsigned int cid, tag, i, num_invalidate;
148         int rc = FAILED;
149
150         /* invalidate iocbs */
151         cls_session = starget_to_session(scsi_target(sc->device));
152         session = cls_session->dd_data;
153         spin_lock_bh(&session->lock);
154         if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
155                 goto unlock;
156
157         conn = session->leadconn;
158         beiscsi_conn = conn->dd_data;
159         phba = beiscsi_conn->phba;
160         cid = beiscsi_conn->beiscsi_conn_cid;
161         inv_tbl = phba->inv_tbl;
162         memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN);
163         num_invalidate = 0;
164         for (i = 0; i < conn->session->cmds_max; i++) {
165                 abrt_task = conn->session->cmds[i];
166                 abrt_io_task = abrt_task->dd_data;
167                 if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
168                         continue;
169
170                 if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
171                         continue;
172
173                 inv_tbl->cid = cid;
174                 inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
175                 num_invalidate++;
176                 inv_tbl++;
177         }
178         spin_unlock_bh(&session->lock);
179         inv_tbl = phba->inv_tbl;
180
181         nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
182                                 sizeof(struct invalidate_commands_params_in),
183                                 &nonemb_cmd.dma);
184         if (nonemb_cmd.va == NULL) {
185                 SE_DEBUG(DBG_LVL_1,
186                          "Failed to allocate memory for"
187                          "mgmt_invalidate_icds\n");
188                 return FAILED;
189         }
190         nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
191         memset(nonemb_cmd.va, 0, nonemb_cmd.size);
192         tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
193                                    cid, &nonemb_cmd);
194         if (!tag) {
195                 shost_printk(KERN_WARNING, phba->shost,
196                              "mgmt_invalidate_icds could not be"
197                              " submitted\n");
198                 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
199                                     nonemb_cmd.va, nonemb_cmd.dma);
200                 return FAILED;
201         } else {
202                 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
203                                          phba->ctrl.mcc_numtag[tag]);
204                 free_mcc_tag(&phba->ctrl, tag);
205         }
206         pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
207                             nonemb_cmd.va, nonemb_cmd.dma);
208         return iscsi_eh_device_reset(sc);
209 unlock:
210         spin_unlock_bh(&session->lock);
211         return rc;
212 }
213
214 /*------------------- PCI Driver operations and data ----------------- */
215 static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
216         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
217         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
218         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
219         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
220         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
221         { 0 }
222 };
223 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
224
225 static struct scsi_host_template beiscsi_sht = {
226         .module = THIS_MODULE,
227         .name = "ServerEngines 10Gbe open-iscsi Initiator Driver",
228         .proc_name = DRV_NAME,
229         .queuecommand = iscsi_queuecommand,
230         .change_queue_depth = iscsi_change_queue_depth,
231         .slave_configure = beiscsi_slave_configure,
232         .target_alloc = iscsi_target_alloc,
233         .eh_abort_handler = beiscsi_eh_abort,
234         .eh_device_reset_handler = beiscsi_eh_device_reset,
235         .eh_target_reset_handler = iscsi_eh_session_reset,
236         .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
237         .can_queue = BE2_IO_DEPTH,
238         .this_id = -1,
239         .max_sectors = BEISCSI_MAX_SECTORS,
240         .cmd_per_lun = BEISCSI_CMD_PER_LUN,
241         .use_clustering = ENABLE_CLUSTERING,
242 };
243
244 static struct scsi_transport_template *beiscsi_scsi_transport;
245
246 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
247 {
248         struct beiscsi_hba *phba;
249         struct Scsi_Host *shost;
250
251         shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
252         if (!shost) {
253                 dev_err(&pcidev->dev, "beiscsi_hba_alloc -"
254                         "iscsi_host_alloc failed\n");
255                 return NULL;
256         }
257         shost->dma_boundary = pcidev->dma_mask;
258         shost->max_id = BE2_MAX_SESSIONS;
259         shost->max_channel = 0;
260         shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
261         shost->max_lun = BEISCSI_NUM_MAX_LUN;
262         shost->transportt = beiscsi_scsi_transport;
263         phba = iscsi_host_priv(shost);
264         memset(phba, 0, sizeof(*phba));
265         phba->shost = shost;
266         phba->pcidev = pci_dev_get(pcidev);
267         pci_set_drvdata(pcidev, phba);
268
269         if (iscsi_host_add(shost, &phba->pcidev->dev))
270                 goto free_devices;
271         return phba;
272
273 free_devices:
274         pci_dev_put(phba->pcidev);
275         iscsi_host_free(phba->shost);
276         return NULL;
277 }
278
279 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
280 {
281         if (phba->csr_va) {
282                 iounmap(phba->csr_va);
283                 phba->csr_va = NULL;
284         }
285         if (phba->db_va) {
286                 iounmap(phba->db_va);
287                 phba->db_va = NULL;
288         }
289         if (phba->pci_va) {
290                 iounmap(phba->pci_va);
291                 phba->pci_va = NULL;
292         }
293 }
294
295 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
296                                 struct pci_dev *pcidev)
297 {
298         u8 __iomem *addr;
299         int pcicfg_reg;
300
301         addr = ioremap_nocache(pci_resource_start(pcidev, 2),
302                                pci_resource_len(pcidev, 2));
303         if (addr == NULL)
304                 return -ENOMEM;
305         phba->ctrl.csr = addr;
306         phba->csr_va = addr;
307         phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
308
309         addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
310         if (addr == NULL)
311                 goto pci_map_err;
312         phba->ctrl.db = addr;
313         phba->db_va = addr;
314         phba->db_pa.u.a64.address =  pci_resource_start(pcidev, 4);
315
316         if (phba->generation == BE_GEN2)
317                 pcicfg_reg = 1;
318         else
319                 pcicfg_reg = 0;
320
321         addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg),
322                                pci_resource_len(pcidev, pcicfg_reg));
323
324         if (addr == NULL)
325                 goto pci_map_err;
326         phba->ctrl.pcicfg = addr;
327         phba->pci_va = addr;
328         phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg);
329         return 0;
330
331 pci_map_err:
332         beiscsi_unmap_pci_function(phba);
333         return -ENOMEM;
334 }
335
336 static int beiscsi_enable_pci(struct pci_dev *pcidev)
337 {
338         int ret;
339
340         ret = pci_enable_device(pcidev);
341         if (ret) {
342                 dev_err(&pcidev->dev, "beiscsi_enable_pci - enable device "
343                         "failed. Returning -ENODEV\n");
344                 return ret;
345         }
346
347         pci_set_master(pcidev);
348         if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
349                 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
350                 if (ret) {
351                         dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
352                         pci_disable_device(pcidev);
353                         return ret;
354                 }
355         }
356         return 0;
357 }
358
359 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
360 {
361         struct be_ctrl_info *ctrl = &phba->ctrl;
362         struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
363         struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
364         int status = 0;
365
366         ctrl->pdev = pdev;
367         status = beiscsi_map_pci_bars(phba, pdev);
368         if (status)
369                 return status;
370         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
371         mbox_mem_alloc->va = pci_alloc_consistent(pdev,
372                                                   mbox_mem_alloc->size,
373                                                   &mbox_mem_alloc->dma);
374         if (!mbox_mem_alloc->va) {
375                 beiscsi_unmap_pci_function(phba);
376                 status = -ENOMEM;
377                 return status;
378         }
379
380         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
381         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
382         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
383         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
384         spin_lock_init(&ctrl->mbox_lock);
385         spin_lock_init(&phba->ctrl.mcc_lock);
386         spin_lock_init(&phba->ctrl.mcc_cq_lock);
387
388         return status;
389 }
390
391 static void beiscsi_get_params(struct beiscsi_hba *phba)
392 {
393         phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
394                                     - (phba->fw_config.iscsi_cid_count
395                                     + BE2_TMFS
396                                     + BE2_NOPOUT_REQ));
397         phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
398         phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2;
399         phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;;
400         phba->params.num_sge_per_io = BE2_SGE;
401         phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
402         phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
403         phba->params.eq_timer = 64;
404         phba->params.num_eq_entries =
405             (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
406                                     + BE2_TMFS) / 512) + 1) * 512;
407         phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
408                                 ? 1024 : phba->params.num_eq_entries;
409         SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d\n",
410                              phba->params.num_eq_entries);
411         phba->params.num_cq_entries =
412             (((BE2_CMDS_PER_CXN * 2 +  phba->fw_config.iscsi_cid_count * 2
413                                     + BE2_TMFS) / 512) + 1) * 512;
414         phba->params.wrbs_per_cxn = 256;
415 }
416
417 static void hwi_ring_eq_db(struct beiscsi_hba *phba,
418                            unsigned int id, unsigned int clr_interrupt,
419                            unsigned int num_processed,
420                            unsigned char rearm, unsigned char event)
421 {
422         u32 val = 0;
423         val |= id & DB_EQ_RING_ID_MASK;
424         if (rearm)
425                 val |= 1 << DB_EQ_REARM_SHIFT;
426         if (clr_interrupt)
427                 val |= 1 << DB_EQ_CLR_SHIFT;
428         if (event)
429                 val |= 1 << DB_EQ_EVNT_SHIFT;
430         val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
431         iowrite32(val, phba->db_va + DB_EQ_OFFSET);
432 }
433
434 /**
435  * be_isr_mcc - The isr routine of the driver.
436  * @irq: Not used
437  * @dev_id: Pointer to host adapter structure
438  */
439 static irqreturn_t be_isr_mcc(int irq, void *dev_id)
440 {
441         struct beiscsi_hba *phba;
442         struct be_eq_entry *eqe = NULL;
443         struct be_queue_info *eq;
444         struct be_queue_info *mcc;
445         unsigned int num_eq_processed;
446         struct be_eq_obj *pbe_eq;
447         unsigned long flags;
448
449         pbe_eq = dev_id;
450         eq = &pbe_eq->q;
451         phba =  pbe_eq->phba;
452         mcc = &phba->ctrl.mcc_obj.cq;
453         eqe = queue_tail_node(eq);
454         if (!eqe)
455                 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
456
457         num_eq_processed = 0;
458
459         while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
460                                 & EQE_VALID_MASK) {
461                 if (((eqe->dw[offsetof(struct amap_eq_entry,
462                      resource_id) / 32] &
463                      EQE_RESID_MASK) >> 16) == mcc->id) {
464                         spin_lock_irqsave(&phba->isr_lock, flags);
465                         phba->todo_mcc_cq = 1;
466                         spin_unlock_irqrestore(&phba->isr_lock, flags);
467                 }
468                 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
469                 queue_tail_inc(eq);
470                 eqe = queue_tail_node(eq);
471                 num_eq_processed++;
472         }
473         if (phba->todo_mcc_cq)
474                 queue_work(phba->wq, &phba->work_cqs);
475         if (num_eq_processed)
476                 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
477
478         return IRQ_HANDLED;
479 }
480
481 /**
482  * be_isr_msix - The isr routine of the driver.
483  * @irq: Not used
484  * @dev_id: Pointer to host adapter structure
485  */
486 static irqreturn_t be_isr_msix(int irq, void *dev_id)
487 {
488         struct beiscsi_hba *phba;
489         struct be_eq_entry *eqe = NULL;
490         struct be_queue_info *eq;
491         struct be_queue_info *cq;
492         unsigned int num_eq_processed;
493         struct be_eq_obj *pbe_eq;
494         unsigned long flags;
495
496         pbe_eq = dev_id;
497         eq = &pbe_eq->q;
498         cq = pbe_eq->cq;
499         eqe = queue_tail_node(eq);
500         if (!eqe)
501                 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
502
503         phba = pbe_eq->phba;
504         num_eq_processed = 0;
505         if (blk_iopoll_enabled) {
506                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
507                                         & EQE_VALID_MASK) {
508                         if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
509                                 blk_iopoll_sched(&pbe_eq->iopoll);
510
511                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
512                         queue_tail_inc(eq);
513                         eqe = queue_tail_node(eq);
514                         num_eq_processed++;
515                 }
516                 if (num_eq_processed)
517                         hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
518
519                 return IRQ_HANDLED;
520         } else {
521                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
522                                                 & EQE_VALID_MASK) {
523                         spin_lock_irqsave(&phba->isr_lock, flags);
524                         phba->todo_cq = 1;
525                         spin_unlock_irqrestore(&phba->isr_lock, flags);
526                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
527                         queue_tail_inc(eq);
528                         eqe = queue_tail_node(eq);
529                         num_eq_processed++;
530                 }
531                 if (phba->todo_cq)
532                         queue_work(phba->wq, &phba->work_cqs);
533
534                 if (num_eq_processed)
535                         hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
536
537                 return IRQ_HANDLED;
538         }
539 }
540
541 /**
542  * be_isr - The isr routine of the driver.
543  * @irq: Not used
544  * @dev_id: Pointer to host adapter structure
545  */
546 static irqreturn_t be_isr(int irq, void *dev_id)
547 {
548         struct beiscsi_hba *phba;
549         struct hwi_controller *phwi_ctrlr;
550         struct hwi_context_memory *phwi_context;
551         struct be_eq_entry *eqe = NULL;
552         struct be_queue_info *eq;
553         struct be_queue_info *cq;
554         struct be_queue_info *mcc;
555         unsigned long flags, index;
556         unsigned int num_mcceq_processed, num_ioeq_processed;
557         struct be_ctrl_info *ctrl;
558         struct be_eq_obj *pbe_eq;
559         int isr;
560
561         phba = dev_id;
562         ctrl = &phba->ctrl;;
563         isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
564                        (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
565         if (!isr)
566                 return IRQ_NONE;
567
568         phwi_ctrlr = phba->phwi_ctrlr;
569         phwi_context = phwi_ctrlr->phwi_ctxt;
570         pbe_eq = &phwi_context->be_eq[0];
571
572         eq = &phwi_context->be_eq[0].q;
573         mcc = &phba->ctrl.mcc_obj.cq;
574         index = 0;
575         eqe = queue_tail_node(eq);
576         if (!eqe)
577                 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
578
579         num_ioeq_processed = 0;
580         num_mcceq_processed = 0;
581         if (blk_iopoll_enabled) {
582                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
583                                         & EQE_VALID_MASK) {
584                         if (((eqe->dw[offsetof(struct amap_eq_entry,
585                              resource_id) / 32] &
586                              EQE_RESID_MASK) >> 16) == mcc->id) {
587                                 spin_lock_irqsave(&phba->isr_lock, flags);
588                                 phba->todo_mcc_cq = 1;
589                                 spin_unlock_irqrestore(&phba->isr_lock, flags);
590                                 num_mcceq_processed++;
591                         } else {
592                                 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
593                                         blk_iopoll_sched(&pbe_eq->iopoll);
594                                 num_ioeq_processed++;
595                         }
596                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
597                         queue_tail_inc(eq);
598                         eqe = queue_tail_node(eq);
599                 }
600                 if (num_ioeq_processed || num_mcceq_processed) {
601                         if (phba->todo_mcc_cq)
602                                 queue_work(phba->wq, &phba->work_cqs);
603
604                         if ((num_mcceq_processed) && (!num_ioeq_processed))
605                                 hwi_ring_eq_db(phba, eq->id, 0,
606                                               (num_ioeq_processed +
607                                                num_mcceq_processed) , 1, 1);
608                         else
609                                 hwi_ring_eq_db(phba, eq->id, 0,
610                                                (num_ioeq_processed +
611                                                 num_mcceq_processed), 0, 1);
612
613                         return IRQ_HANDLED;
614                 } else
615                         return IRQ_NONE;
616         } else {
617                 cq = &phwi_context->be_cq[0];
618                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
619                                                 & EQE_VALID_MASK) {
620
621                         if (((eqe->dw[offsetof(struct amap_eq_entry,
622                              resource_id) / 32] &
623                              EQE_RESID_MASK) >> 16) != cq->id) {
624                                 spin_lock_irqsave(&phba->isr_lock, flags);
625                                 phba->todo_mcc_cq = 1;
626                                 spin_unlock_irqrestore(&phba->isr_lock, flags);
627                         } else {
628                                 spin_lock_irqsave(&phba->isr_lock, flags);
629                                 phba->todo_cq = 1;
630                                 spin_unlock_irqrestore(&phba->isr_lock, flags);
631                         }
632                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
633                         queue_tail_inc(eq);
634                         eqe = queue_tail_node(eq);
635                         num_ioeq_processed++;
636                 }
637                 if (phba->todo_cq || phba->todo_mcc_cq)
638                         queue_work(phba->wq, &phba->work_cqs);
639
640                 if (num_ioeq_processed) {
641                         hwi_ring_eq_db(phba, eq->id, 0,
642                                        num_ioeq_processed, 1, 1);
643                         return IRQ_HANDLED;
644                 } else
645                         return IRQ_NONE;
646         }
647 }
648
649 static int beiscsi_init_irqs(struct beiscsi_hba *phba)
650 {
651         struct pci_dev *pcidev = phba->pcidev;
652         struct hwi_controller *phwi_ctrlr;
653         struct hwi_context_memory *phwi_context;
654         int ret, msix_vec, i, j;
655         char desc[32];
656
657         phwi_ctrlr = phba->phwi_ctrlr;
658         phwi_context = phwi_ctrlr->phwi_ctxt;
659
660         if (phba->msix_enabled) {
661                 for (i = 0; i < phba->num_cpus; i++) {
662                         sprintf(desc, "beiscsi_msix_%04x", i);
663                         msix_vec = phba->msix_entries[i].vector;
664                         ret = request_irq(msix_vec, be_isr_msix, 0, desc,
665                                           &phwi_context->be_eq[i]);
666                         if (ret) {
667                                 shost_printk(KERN_ERR, phba->shost,
668                                              "beiscsi_init_irqs-Failed to"
669                                              "register msix for i = %d\n", i);
670                                 if (!i)
671                                         return ret;
672                                 goto free_msix_irqs;
673                         }
674                 }
675                 msix_vec = phba->msix_entries[i].vector;
676                 ret = request_irq(msix_vec, be_isr_mcc, 0, "beiscsi_msix_mcc",
677                                   &phwi_context->be_eq[i]);
678                 if (ret) {
679                         shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
680                                      "Failed to register beiscsi_msix_mcc\n");
681                         i++;
682                         goto free_msix_irqs;
683                 }
684
685         } else {
686                 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
687                                   "beiscsi", phba);
688                 if (ret) {
689                         shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
690                                      "Failed to register irq\\n");
691                         return ret;
692                 }
693         }
694         return 0;
695 free_msix_irqs:
696         for (j = i - 1; j == 0; j++)
697                 free_irq(msix_vec, &phwi_context->be_eq[j]);
698         return ret;
699 }
700
701 static void hwi_ring_cq_db(struct beiscsi_hba *phba,
702                            unsigned int id, unsigned int num_processed,
703                            unsigned char rearm, unsigned char event)
704 {
705         u32 val = 0;
706         val |= id & DB_CQ_RING_ID_MASK;
707         if (rearm)
708                 val |= 1 << DB_CQ_REARM_SHIFT;
709         val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
710         iowrite32(val, phba->db_va + DB_CQ_OFFSET);
711 }
712
713 static unsigned int
714 beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
715                           struct beiscsi_hba *phba,
716                           unsigned short cid,
717                           struct pdu_base *ppdu,
718                           unsigned long pdu_len,
719                           void *pbuffer, unsigned long buf_len)
720 {
721         struct iscsi_conn *conn = beiscsi_conn->conn;
722         struct iscsi_session *session = conn->session;
723         struct iscsi_task *task;
724         struct beiscsi_io_task *io_task;
725         struct iscsi_hdr *login_hdr;
726
727         switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
728                                                 PDUBASE_OPCODE_MASK) {
729         case ISCSI_OP_NOOP_IN:
730                 pbuffer = NULL;
731                 buf_len = 0;
732                 break;
733         case ISCSI_OP_ASYNC_EVENT:
734                 break;
735         case ISCSI_OP_REJECT:
736                 WARN_ON(!pbuffer);
737                 WARN_ON(!(buf_len == 48));
738                 SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
739                 break;
740         case ISCSI_OP_LOGIN_RSP:
741         case ISCSI_OP_TEXT_RSP:
742                 task = conn->login_task;
743                 io_task = task->dd_data;
744                 login_hdr = (struct iscsi_hdr *)ppdu;
745                 login_hdr->itt = io_task->libiscsi_itt;
746                 break;
747         default:
748                 shost_printk(KERN_WARNING, phba->shost,
749                              "Unrecognized opcode 0x%x in async msg\n",
750                              (ppdu->
751                              dw[offsetof(struct amap_pdu_base, opcode) / 32]
752                                                 & PDUBASE_OPCODE_MASK));
753                 return 1;
754         }
755
756         spin_lock_bh(&session->lock);
757         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
758         spin_unlock_bh(&session->lock);
759         return 0;
760 }
761
762 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
763 {
764         struct sgl_handle *psgl_handle;
765
766         if (phba->io_sgl_hndl_avbl) {
767                 SE_DEBUG(DBG_LVL_8,
768                          "In alloc_io_sgl_handle,io_sgl_alloc_index=%d\n",
769                          phba->io_sgl_alloc_index);
770                 psgl_handle = phba->io_sgl_hndl_base[phba->
771                                                 io_sgl_alloc_index];
772                 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
773                 phba->io_sgl_hndl_avbl--;
774                 if (phba->io_sgl_alloc_index == (phba->params.
775                                                  ios_per_ctrl - 1))
776                         phba->io_sgl_alloc_index = 0;
777                 else
778                         phba->io_sgl_alloc_index++;
779         } else
780                 psgl_handle = NULL;
781         return psgl_handle;
782 }
783
784 static void
785 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
786 {
787         SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d\n",
788                  phba->io_sgl_free_index);
789         if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
790                 /*
791                  * this can happen if clean_task is called on a task that
792                  * failed in xmit_task or alloc_pdu.
793                  */
794                  SE_DEBUG(DBG_LVL_8,
795                          "Double Free in IO SGL io_sgl_free_index=%d,"
796                          "value there=%p\n", phba->io_sgl_free_index,
797                          phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
798                 return;
799         }
800         phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
801         phba->io_sgl_hndl_avbl++;
802         if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
803                 phba->io_sgl_free_index = 0;
804         else
805                 phba->io_sgl_free_index++;
806 }
807
808 /**
809  * alloc_wrb_handle - To allocate a wrb handle
810  * @phba: The hba pointer
811  * @cid: The cid to use for allocation
812  *
813  * This happens under session_lock until submission to chip
814  */
815 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
816 {
817         struct hwi_wrb_context *pwrb_context;
818         struct hwi_controller *phwi_ctrlr;
819         struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
820
821         phwi_ctrlr = phba->phwi_ctrlr;
822         pwrb_context = &phwi_ctrlr->wrb_context[cid];
823         if (pwrb_context->wrb_handles_available >= 2) {
824                 pwrb_handle = pwrb_context->pwrb_handle_base[
825                                             pwrb_context->alloc_index];
826                 pwrb_context->wrb_handles_available--;
827                 if (pwrb_context->alloc_index ==
828                                                 (phba->params.wrbs_per_cxn - 1))
829                         pwrb_context->alloc_index = 0;
830                 else
831                         pwrb_context->alloc_index++;
832                 pwrb_handle_tmp = pwrb_context->pwrb_handle_base[
833                                                 pwrb_context->alloc_index];
834                 pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index;
835         } else
836                 pwrb_handle = NULL;
837         return pwrb_handle;
838 }
839
840 /**
841  * free_wrb_handle - To free the wrb handle back to pool
842  * @phba: The hba pointer
843  * @pwrb_context: The context to free from
844  * @pwrb_handle: The wrb_handle to free
845  *
846  * This happens under session_lock until submission to chip
847  */
848 static void
849 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
850                 struct wrb_handle *pwrb_handle)
851 {
852         pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
853         pwrb_context->wrb_handles_available++;
854         if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
855                 pwrb_context->free_index = 0;
856         else
857                 pwrb_context->free_index++;
858
859         SE_DEBUG(DBG_LVL_8,
860                  "FREE WRB: pwrb_handle=%p free_index=0x%x"
861                  "wrb_handles_available=%d\n",
862                  pwrb_handle, pwrb_context->free_index,
863                  pwrb_context->wrb_handles_available);
864 }
865
866 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
867 {
868         struct sgl_handle *psgl_handle;
869
870         if (phba->eh_sgl_hndl_avbl) {
871                 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
872                 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
873                 SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x\n",
874                          phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index);
875                 phba->eh_sgl_hndl_avbl--;
876                 if (phba->eh_sgl_alloc_index ==
877                     (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
878                      1))
879                         phba->eh_sgl_alloc_index = 0;
880                 else
881                         phba->eh_sgl_alloc_index++;
882         } else
883                 psgl_handle = NULL;
884         return psgl_handle;
885 }
886
887 void
888 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
889 {
890
891         SE_DEBUG(DBG_LVL_8, "In  free_mgmt_sgl_handle,eh_sgl_free_index=%d\n",
892                              phba->eh_sgl_free_index);
893         if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
894                 /*
895                  * this can happen if clean_task is called on a task that
896                  * failed in xmit_task or alloc_pdu.
897                  */
898                 SE_DEBUG(DBG_LVL_8,
899                          "Double Free in eh SGL ,eh_sgl_free_index=%d\n",
900                          phba->eh_sgl_free_index);
901                 return;
902         }
903         phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
904         phba->eh_sgl_hndl_avbl++;
905         if (phba->eh_sgl_free_index ==
906             (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
907                 phba->eh_sgl_free_index = 0;
908         else
909                 phba->eh_sgl_free_index++;
910 }
911
912 static void
913 be_complete_io(struct beiscsi_conn *beiscsi_conn,
914                struct iscsi_task *task, struct sol_cqe *psol)
915 {
916         struct beiscsi_io_task *io_task = task->dd_data;
917         struct be_status_bhs *sts_bhs =
918                                 (struct be_status_bhs *)io_task->cmd_bhs;
919         struct iscsi_conn *conn = beiscsi_conn->conn;
920         unsigned int sense_len;
921         unsigned char *sense;
922         u32 resid = 0, exp_cmdsn, max_cmdsn;
923         u8 rsp, status, flags;
924
925         exp_cmdsn = (psol->
926                         dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
927                         & SOL_EXP_CMD_SN_MASK);
928         max_cmdsn = ((psol->
929                         dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
930                         & SOL_EXP_CMD_SN_MASK) +
931                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
932                                 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
933         rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
934                                                 & SOL_RESP_MASK) >> 16);
935         status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
936                                                 & SOL_STS_MASK) >> 8);
937         flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
938                                         & SOL_FLAGS_MASK) >> 24) | 0x80;
939
940         task->sc->result = (DID_OK << 16) | status;
941         if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
942                 task->sc->result = DID_ERROR << 16;
943                 goto unmap;
944         }
945
946         /* bidi not initially supported */
947         if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
948                 resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
949                                 32] & SOL_RES_CNT_MASK);
950
951                 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
952                         task->sc->result = DID_ERROR << 16;
953
954                 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
955                         scsi_set_resid(task->sc, resid);
956                         if (!status && (scsi_bufflen(task->sc) - resid <
957                             task->sc->underflow))
958                                 task->sc->result = DID_ERROR << 16;
959                 }
960         }
961
962         if (status == SAM_STAT_CHECK_CONDITION) {
963                 unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
964                 sense = sts_bhs->sense_info + sizeof(unsigned short);
965                 sense_len =  cpu_to_be16(*slen);
966                 memcpy(task->sc->sense_buffer, sense,
967                        min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
968         }
969
970         if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
971                 if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
972                                                         & SOL_RES_CNT_MASK)
973                          conn->rxdata_octets += (psol->
974                              dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
975                              & SOL_RES_CNT_MASK);
976         }
977 unmap:
978         scsi_dma_unmap(io_task->scsi_cmnd);
979         iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
980 }
981
982 static void
983 be_complete_logout(struct beiscsi_conn *beiscsi_conn,
984                    struct iscsi_task *task, struct sol_cqe *psol)
985 {
986         struct iscsi_logout_rsp *hdr;
987         struct beiscsi_io_task *io_task = task->dd_data;
988         struct iscsi_conn *conn = beiscsi_conn->conn;
989
990         hdr = (struct iscsi_logout_rsp *)task->hdr;
991         hdr->opcode = ISCSI_OP_LOGOUT_RSP;
992         hdr->t2wait = 5;
993         hdr->t2retain = 0;
994         hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
995                                         & SOL_FLAGS_MASK) >> 24) | 0x80;
996         hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
997                                         32] & SOL_RESP_MASK);
998         hdr->exp_cmdsn = cpu_to_be32(psol->
999                         dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1000                                         & SOL_EXP_CMD_SN_MASK);
1001         hdr->max_cmdsn = be32_to_cpu((psol->
1002                          dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1003                                         & SOL_EXP_CMD_SN_MASK) +
1004                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1005                                         / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1006         hdr->dlength[0] = 0;
1007         hdr->dlength[1] = 0;
1008         hdr->dlength[2] = 0;
1009         hdr->hlength = 0;
1010         hdr->itt = io_task->libiscsi_itt;
1011         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1012 }
1013
1014 static void
1015 be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
1016                 struct iscsi_task *task, struct sol_cqe *psol)
1017 {
1018         struct iscsi_tm_rsp *hdr;
1019         struct iscsi_conn *conn = beiscsi_conn->conn;
1020         struct beiscsi_io_task *io_task = task->dd_data;
1021
1022         hdr = (struct iscsi_tm_rsp *)task->hdr;
1023         hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
1024         hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1025                                         & SOL_FLAGS_MASK) >> 24) | 0x80;
1026         hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
1027                                         32] & SOL_RESP_MASK);
1028         hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
1029                                     i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
1030         hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
1031                         i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
1032                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1033                         / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1034         hdr->itt = io_task->libiscsi_itt;
1035         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1036 }
1037
1038 static void
1039 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
1040                        struct beiscsi_hba *phba, struct sol_cqe *psol)
1041 {
1042         struct hwi_wrb_context *pwrb_context;
1043         struct wrb_handle *pwrb_handle = NULL;
1044         struct hwi_controller *phwi_ctrlr;
1045         struct iscsi_task *task;
1046         struct beiscsi_io_task *io_task;
1047         struct iscsi_conn *conn = beiscsi_conn->conn;
1048         struct iscsi_session *session = conn->session;
1049
1050         phwi_ctrlr = phba->phwi_ctrlr;
1051         pwrb_context = &phwi_ctrlr->wrb_context[((psol->
1052                                 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1053                                 SOL_CID_MASK) >> 6) -
1054                                 phba->fw_config.iscsi_cid_start];
1055         pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
1056                                 dw[offsetof(struct amap_sol_cqe, wrb_index) /
1057                                 32] & SOL_WRB_INDEX_MASK) >> 16)];
1058         task = pwrb_handle->pio_handle;
1059
1060         io_task = task->dd_data;
1061         spin_lock(&phba->mgmt_sgl_lock);
1062         free_mgmt_sgl_handle(phba, io_task->psgl_handle);
1063         spin_unlock(&phba->mgmt_sgl_lock);
1064         spin_lock_bh(&session->lock);
1065         free_wrb_handle(phba, pwrb_context, pwrb_handle);
1066         spin_unlock_bh(&session->lock);
1067 }
1068
1069 static void
1070 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
1071                        struct iscsi_task *task, struct sol_cqe *psol)
1072 {
1073         struct iscsi_nopin *hdr;
1074         struct iscsi_conn *conn = beiscsi_conn->conn;
1075         struct beiscsi_io_task *io_task = task->dd_data;
1076
1077         hdr = (struct iscsi_nopin *)task->hdr;
1078         hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1079                         & SOL_FLAGS_MASK) >> 24) | 0x80;
1080         hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
1081                                      i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
1082         hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
1083                         i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
1084                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1085                         / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1086         hdr->opcode = ISCSI_OP_NOOP_IN;
1087         hdr->itt = io_task->libiscsi_itt;
1088         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1089 }
1090
1091 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1092                              struct beiscsi_hba *phba, struct sol_cqe *psol)
1093 {
1094         struct hwi_wrb_context *pwrb_context;
1095         struct wrb_handle *pwrb_handle;
1096         struct iscsi_wrb *pwrb = NULL;
1097         struct hwi_controller *phwi_ctrlr;
1098         struct iscsi_task *task;
1099         unsigned int type;
1100         struct iscsi_conn *conn = beiscsi_conn->conn;
1101         struct iscsi_session *session = conn->session;
1102
1103         phwi_ctrlr = phba->phwi_ctrlr;
1104         pwrb_context = &phwi_ctrlr->wrb_context[((psol->dw[offsetof
1105                                 (struct amap_sol_cqe, cid) / 32]
1106                                 & SOL_CID_MASK) >> 6) -
1107                                 phba->fw_config.iscsi_cid_start];
1108         pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
1109                                 dw[offsetof(struct amap_sol_cqe, wrb_index) /
1110                                 32] & SOL_WRB_INDEX_MASK) >> 16)];
1111         task = pwrb_handle->pio_handle;
1112         pwrb = pwrb_handle->pwrb;
1113         type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
1114                                  WRB_TYPE_MASK) >> 28;
1115
1116         spin_lock_bh(&session->lock);
1117         switch (type) {
1118         case HWH_TYPE_IO:
1119         case HWH_TYPE_IO_RD:
1120                 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
1121                      ISCSI_OP_NOOP_OUT)
1122                         be_complete_nopin_resp(beiscsi_conn, task, psol);
1123                 else
1124                         be_complete_io(beiscsi_conn, task, psol);
1125                 break;
1126
1127         case HWH_TYPE_LOGOUT:
1128                 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
1129                         be_complete_logout(beiscsi_conn, task, psol);
1130                 else
1131                         be_complete_tmf(beiscsi_conn, task, psol);
1132
1133                 break;
1134
1135         case HWH_TYPE_LOGIN:
1136                 SE_DEBUG(DBG_LVL_1,
1137                          "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd"
1138                          "- Solicited path\n");
1139                 break;
1140
1141         case HWH_TYPE_NOP:
1142                 be_complete_nopin_resp(beiscsi_conn, task, psol);
1143                 break;
1144
1145         default:
1146                 shost_printk(KERN_WARNING, phba->shost,
1147                                 "In hwi_complete_cmd, unknown type = %d"
1148                                 "wrb_index 0x%x CID 0x%x\n", type,
1149                                 ((psol->dw[offsetof(struct amap_iscsi_wrb,
1150                                 type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
1151                                 ((psol->dw[offsetof(struct amap_sol_cqe,
1152                                 cid) / 32] & SOL_CID_MASK) >> 6));
1153                 break;
1154         }
1155
1156         spin_unlock_bh(&session->lock);
1157 }
1158
1159 static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
1160                                           *pasync_ctx, unsigned int is_header,
1161                                           unsigned int host_write_ptr)
1162 {
1163         if (is_header)
1164                 return &pasync_ctx->async_entry[host_write_ptr].
1165                     header_busy_list;
1166         else
1167                 return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
1168 }
1169
1170 static struct async_pdu_handle *
1171 hwi_get_async_handle(struct beiscsi_hba *phba,
1172                      struct beiscsi_conn *beiscsi_conn,
1173                      struct hwi_async_pdu_context *pasync_ctx,
1174                      struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
1175 {
1176         struct be_bus_address phys_addr;
1177         struct list_head *pbusy_list;
1178         struct async_pdu_handle *pasync_handle = NULL;
1179         int buffer_len = 0;
1180         unsigned char buffer_index = -1;
1181         unsigned char is_header = 0;
1182
1183         phys_addr.u.a32.address_lo =
1184             pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
1185             ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1186                                                 & PDUCQE_DPL_MASK) >> 16);
1187         phys_addr.u.a32.address_hi =
1188             pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
1189
1190         phys_addr.u.a64.address =
1191                         *((unsigned long long *)(&phys_addr.u.a64.address));
1192
1193         switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
1194                         & PDUCQE_CODE_MASK) {
1195         case UNSOL_HDR_NOTIFY:
1196                 is_header = 1;
1197
1198                 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
1199                         (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1200                         index) / 32] & PDUCQE_INDEX_MASK));
1201
1202                 buffer_len = (unsigned int)(phys_addr.u.a64.address -
1203                                 pasync_ctx->async_header.pa_base.u.a64.address);
1204
1205                 buffer_index = buffer_len /
1206                                 pasync_ctx->async_header.buffer_size;
1207
1208                 break;
1209         case UNSOL_DATA_NOTIFY:
1210                 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
1211                                         dw[offsetof(struct amap_i_t_dpdu_cqe,
1212                                         index) / 32] & PDUCQE_INDEX_MASK));
1213                 buffer_len = (unsigned long)(phys_addr.u.a64.address -
1214                                         pasync_ctx->async_data.pa_base.u.
1215                                         a64.address);
1216                 buffer_index = buffer_len / pasync_ctx->async_data.buffer_size;
1217                 break;
1218         default:
1219                 pbusy_list = NULL;
1220                 shost_printk(KERN_WARNING, phba->shost,
1221                         "Unexpected code=%d\n",
1222                          pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1223                                         code) / 32] & PDUCQE_CODE_MASK);
1224                 return NULL;
1225         }
1226
1227         WARN_ON(!(buffer_index <= pasync_ctx->async_data.num_entries));
1228         WARN_ON(list_empty(pbusy_list));
1229         list_for_each_entry(pasync_handle, pbusy_list, link) {
1230                 WARN_ON(pasync_handle->consumed);
1231                 if (pasync_handle->index == buffer_index)
1232                         break;
1233         }
1234
1235         WARN_ON(!pasync_handle);
1236
1237         pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
1238                                              phba->fw_config.iscsi_cid_start;
1239         pasync_handle->is_header = is_header;
1240         pasync_handle->buffer_len = ((pdpdu_cqe->
1241                         dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1242                         & PDUCQE_DPL_MASK) >> 16);
1243
1244         *pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1245                         index) / 32] & PDUCQE_INDEX_MASK);
1246         return pasync_handle;
1247 }
1248
1249 static unsigned int
1250 hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
1251                            unsigned int is_header, unsigned int cq_index)
1252 {
1253         struct list_head *pbusy_list;
1254         struct async_pdu_handle *pasync_handle;
1255         unsigned int num_entries, writables = 0;
1256         unsigned int *pep_read_ptr, *pwritables;
1257
1258
1259         if (is_header) {
1260                 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1261                 pwritables = &pasync_ctx->async_header.writables;
1262                 num_entries = pasync_ctx->async_header.num_entries;
1263         } else {
1264                 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1265                 pwritables = &pasync_ctx->async_data.writables;
1266                 num_entries = pasync_ctx->async_data.num_entries;
1267         }
1268
1269         while ((*pep_read_ptr) != cq_index) {
1270                 (*pep_read_ptr)++;
1271                 *pep_read_ptr = (*pep_read_ptr) % num_entries;
1272
1273                 pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1274                                                      *pep_read_ptr);
1275                 if (writables == 0)
1276                         WARN_ON(list_empty(pbusy_list));
1277
1278                 if (!list_empty(pbusy_list)) {
1279                         pasync_handle = list_entry(pbusy_list->next,
1280                                                    struct async_pdu_handle,
1281                                                    link);
1282                         WARN_ON(!pasync_handle);
1283                         pasync_handle->consumed = 1;
1284                 }
1285
1286                 writables++;
1287         }
1288
1289         if (!writables) {
1290                 SE_DEBUG(DBG_LVL_1,
1291                          "Duplicate notification received - index 0x%x!!\n",
1292                          cq_index);
1293                 WARN_ON(1);
1294         }
1295
1296         *pwritables = *pwritables + writables;
1297         return 0;
1298 }
1299
1300 static unsigned int hwi_free_async_msg(struct beiscsi_hba *phba,
1301                                        unsigned int cri)
1302 {
1303         struct hwi_controller *phwi_ctrlr;
1304         struct hwi_async_pdu_context *pasync_ctx;
1305         struct async_pdu_handle *pasync_handle, *tmp_handle;
1306         struct list_head *plist;
1307         unsigned int i = 0;
1308
1309         phwi_ctrlr = phba->phwi_ctrlr;
1310         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1311
1312         plist  = &pasync_ctx->async_entry[cri].wait_queue.list;
1313
1314         list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1315                 list_del(&pasync_handle->link);
1316
1317                 if (i == 0) {
1318                         list_add_tail(&pasync_handle->link,
1319                                       &pasync_ctx->async_header.free_list);
1320                         pasync_ctx->async_header.free_entries++;
1321                         i++;
1322                 } else {
1323                         list_add_tail(&pasync_handle->link,
1324                                       &pasync_ctx->async_data.free_list);
1325                         pasync_ctx->async_data.free_entries++;
1326                         i++;
1327                 }
1328         }
1329
1330         INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1331         pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1332         pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1333         return 0;
1334 }
1335
1336 static struct phys_addr *
1337 hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1338                      unsigned int is_header, unsigned int host_write_ptr)
1339 {
1340         struct phys_addr *pasync_sge = NULL;
1341
1342         if (is_header)
1343                 pasync_sge = pasync_ctx->async_header.ring_base;
1344         else
1345                 pasync_sge = pasync_ctx->async_data.ring_base;
1346
1347         return pasync_sge + host_write_ptr;
1348 }
1349
1350 static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1351                                    unsigned int is_header)
1352 {
1353         struct hwi_controller *phwi_ctrlr;
1354         struct hwi_async_pdu_context *pasync_ctx;
1355         struct async_pdu_handle *pasync_handle;
1356         struct list_head *pfree_link, *pbusy_list;
1357         struct phys_addr *pasync_sge;
1358         unsigned int ring_id, num_entries;
1359         unsigned int host_write_num;
1360         unsigned int writables;
1361         unsigned int i = 0;
1362         u32 doorbell = 0;
1363
1364         phwi_ctrlr = phba->phwi_ctrlr;
1365         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1366
1367         if (is_header) {
1368                 num_entries = pasync_ctx->async_header.num_entries;
1369                 writables = min(pasync_ctx->async_header.writables,
1370                                 pasync_ctx->async_header.free_entries);
1371                 pfree_link = pasync_ctx->async_header.free_list.next;
1372                 host_write_num = pasync_ctx->async_header.host_write_ptr;
1373                 ring_id = phwi_ctrlr->default_pdu_hdr.id;
1374         } else {
1375                 num_entries = pasync_ctx->async_data.num_entries;
1376                 writables = min(pasync_ctx->async_data.writables,
1377                                 pasync_ctx->async_data.free_entries);
1378                 pfree_link = pasync_ctx->async_data.free_list.next;
1379                 host_write_num = pasync_ctx->async_data.host_write_ptr;
1380                 ring_id = phwi_ctrlr->default_pdu_data.id;
1381         }
1382
1383         writables = (writables / 8) * 8;
1384         if (writables) {
1385                 for (i = 0; i < writables; i++) {
1386                         pbusy_list =
1387                             hwi_get_async_busy_list(pasync_ctx, is_header,
1388                                                     host_write_num);
1389                         pasync_handle =
1390                             list_entry(pfree_link, struct async_pdu_handle,
1391                                                                 link);
1392                         WARN_ON(!pasync_handle);
1393                         pasync_handle->consumed = 0;
1394
1395                         pfree_link = pfree_link->next;
1396
1397                         pasync_sge = hwi_get_ring_address(pasync_ctx,
1398                                                 is_header, host_write_num);
1399
1400                         pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1401                         pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1402
1403                         list_move(&pasync_handle->link, pbusy_list);
1404
1405                         host_write_num++;
1406                         host_write_num = host_write_num % num_entries;
1407                 }
1408
1409                 if (is_header) {
1410                         pasync_ctx->async_header.host_write_ptr =
1411                                                         host_write_num;
1412                         pasync_ctx->async_header.free_entries -= writables;
1413                         pasync_ctx->async_header.writables -= writables;
1414                         pasync_ctx->async_header.busy_entries += writables;
1415                 } else {
1416                         pasync_ctx->async_data.host_write_ptr = host_write_num;
1417                         pasync_ctx->async_data.free_entries -= writables;
1418                         pasync_ctx->async_data.writables -= writables;
1419                         pasync_ctx->async_data.busy_entries += writables;
1420                 }
1421
1422                 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1423                 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1424                 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1425                 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1426                                         << DB_DEF_PDU_CQPROC_SHIFT;
1427
1428                 iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
1429         }
1430 }
1431
1432 static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1433                                          struct beiscsi_conn *beiscsi_conn,
1434                                          struct i_t_dpdu_cqe *pdpdu_cqe)
1435 {
1436         struct hwi_controller *phwi_ctrlr;
1437         struct hwi_async_pdu_context *pasync_ctx;
1438         struct async_pdu_handle *pasync_handle = NULL;
1439         unsigned int cq_index = -1;
1440
1441         phwi_ctrlr = phba->phwi_ctrlr;
1442         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1443
1444         pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1445                                              pdpdu_cqe, &cq_index);
1446         BUG_ON(pasync_handle->is_header != 0);
1447         if (pasync_handle->consumed == 0)
1448                 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1449                                            cq_index);
1450
1451         hwi_free_async_msg(phba, pasync_handle->cri);
1452         hwi_post_async_buffers(phba, pasync_handle->is_header);
1453 }
1454
1455 static unsigned int
1456 hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1457                   struct beiscsi_hba *phba,
1458                   struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1459 {
1460         struct list_head *plist;
1461         struct async_pdu_handle *pasync_handle;
1462         void *phdr = NULL;
1463         unsigned int hdr_len = 0, buf_len = 0;
1464         unsigned int status, index = 0, offset = 0;
1465         void *pfirst_buffer = NULL;
1466         unsigned int num_buf = 0;
1467
1468         plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1469
1470         list_for_each_entry(pasync_handle, plist, link) {
1471                 if (index == 0) {
1472                         phdr = pasync_handle->pbuffer;
1473                         hdr_len = pasync_handle->buffer_len;
1474                 } else {
1475                         buf_len = pasync_handle->buffer_len;
1476                         if (!num_buf) {
1477                                 pfirst_buffer = pasync_handle->pbuffer;
1478                                 num_buf++;
1479                         }
1480                         memcpy(pfirst_buffer + offset,
1481                                pasync_handle->pbuffer, buf_len);
1482                         offset = buf_len;
1483                 }
1484                 index++;
1485         }
1486
1487         status = beiscsi_process_async_pdu(beiscsi_conn, phba,
1488                                            (beiscsi_conn->beiscsi_conn_cid -
1489                                             phba->fw_config.iscsi_cid_start),
1490                                             phdr, hdr_len, pfirst_buffer,
1491                                             buf_len);
1492
1493         if (status == 0)
1494                 hwi_free_async_msg(phba, cri);
1495         return 0;
1496 }
1497
1498 static unsigned int
1499 hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1500                      struct beiscsi_hba *phba,
1501                      struct async_pdu_handle *pasync_handle)
1502 {
1503         struct hwi_async_pdu_context *pasync_ctx;
1504         struct hwi_controller *phwi_ctrlr;
1505         unsigned int bytes_needed = 0, status = 0;
1506         unsigned short cri = pasync_handle->cri;
1507         struct pdu_base *ppdu;
1508
1509         phwi_ctrlr = phba->phwi_ctrlr;
1510         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1511
1512         list_del(&pasync_handle->link);
1513         if (pasync_handle->is_header) {
1514                 pasync_ctx->async_header.busy_entries--;
1515                 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1516                         hwi_free_async_msg(phba, cri);
1517                         BUG();
1518                 }
1519
1520                 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1521                 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1522                 pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1523                                 (unsigned short)pasync_handle->buffer_len;
1524                 list_add_tail(&pasync_handle->link,
1525                               &pasync_ctx->async_entry[cri].wait_queue.list);
1526
1527                 ppdu = pasync_handle->pbuffer;
1528                 bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1529                         data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1530                         0xFFFF0000) | ((be16_to_cpu((ppdu->
1531                         dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1532                         & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1533
1534                 if (status == 0) {
1535                         pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1536                             bytes_needed;
1537
1538                         if (bytes_needed == 0)
1539                                 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1540                                                            pasync_ctx, cri);
1541                 }
1542         } else {
1543                 pasync_ctx->async_data.busy_entries--;
1544                 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1545                         list_add_tail(&pasync_handle->link,
1546                                       &pasync_ctx->async_entry[cri].wait_queue.
1547                                       list);
1548                         pasync_ctx->async_entry[cri].wait_queue.
1549                                 bytes_received +=
1550                                 (unsigned short)pasync_handle->buffer_len;
1551
1552                         if (pasync_ctx->async_entry[cri].wait_queue.
1553                             bytes_received >=
1554                             pasync_ctx->async_entry[cri].wait_queue.
1555                             bytes_needed)
1556                                 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1557                                                            pasync_ctx, cri);
1558                 }
1559         }
1560         return status;
1561 }
1562
1563 static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1564                                          struct beiscsi_hba *phba,
1565                                          struct i_t_dpdu_cqe *pdpdu_cqe)
1566 {
1567         struct hwi_controller *phwi_ctrlr;
1568         struct hwi_async_pdu_context *pasync_ctx;
1569         struct async_pdu_handle *pasync_handle = NULL;
1570         unsigned int cq_index = -1;
1571
1572         phwi_ctrlr = phba->phwi_ctrlr;
1573         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1574         pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1575                                              pdpdu_cqe, &cq_index);
1576
1577         if (pasync_handle->consumed == 0)
1578                 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1579                                            cq_index);
1580         hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
1581         hwi_post_async_buffers(phba, pasync_handle->is_header);
1582 }
1583
1584 static void  beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
1585 {
1586         struct be_queue_info *mcc_cq;
1587         struct  be_mcc_compl *mcc_compl;
1588         unsigned int num_processed = 0;
1589
1590         mcc_cq = &phba->ctrl.mcc_obj.cq;
1591         mcc_compl = queue_tail_node(mcc_cq);
1592         mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1593         while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
1594
1595                 if (num_processed >= 32) {
1596                         hwi_ring_cq_db(phba, mcc_cq->id,
1597                                         num_processed, 0, 0);
1598                         num_processed = 0;
1599                 }
1600                 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
1601                         /* Interpret flags as an async trailer */
1602                         if (is_link_state_evt(mcc_compl->flags))
1603                                 /* Interpret compl as a async link evt */
1604                                 beiscsi_async_link_state_process(phba,
1605                                 (struct be_async_event_link_state *) mcc_compl);
1606                         else
1607                                 SE_DEBUG(DBG_LVL_1,
1608                                         " Unsupported Async Event, flags"
1609                                         " = 0x%08x\n", mcc_compl->flags);
1610                 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
1611                         be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
1612                         atomic_dec(&phba->ctrl.mcc_obj.q.used);
1613                 }
1614
1615                 mcc_compl->flags = 0;
1616                 queue_tail_inc(mcc_cq);
1617                 mcc_compl = queue_tail_node(mcc_cq);
1618                 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1619                 num_processed++;
1620         }
1621
1622         if (num_processed > 0)
1623                 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0);
1624
1625 }
1626
1627 static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1628 {
1629         struct be_queue_info *cq;
1630         struct sol_cqe *sol;
1631         struct dmsg_cqe *dmsg;
1632         unsigned int num_processed = 0;
1633         unsigned int tot_nump = 0;
1634         struct beiscsi_conn *beiscsi_conn;
1635         struct beiscsi_endpoint *beiscsi_ep;
1636         struct iscsi_endpoint *ep;
1637         struct beiscsi_hba *phba;
1638
1639         cq = pbe_eq->cq;
1640         sol = queue_tail_node(cq);
1641         phba = pbe_eq->phba;
1642
1643         while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
1644                CQE_VALID_MASK) {
1645                 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1646
1647                 ep = phba->ep_array[(u32) ((sol->
1648                                    dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1649                                    SOL_CID_MASK) >> 6) -
1650                                    phba->fw_config.iscsi_cid_start];
1651
1652                 beiscsi_ep = ep->dd_data;
1653                 beiscsi_conn = beiscsi_ep->conn;
1654
1655                 if (num_processed >= 32) {
1656                         hwi_ring_cq_db(phba, cq->id,
1657                                         num_processed, 0, 0);
1658                         tot_nump += num_processed;
1659                         num_processed = 0;
1660                 }
1661
1662                 switch ((u32) sol->dw[offsetof(struct amap_sol_cqe, code) /
1663                         32] & CQE_CODE_MASK) {
1664                 case SOL_CMD_COMPLETE:
1665                         hwi_complete_cmd(beiscsi_conn, phba, sol);
1666                         break;
1667                 case DRIVERMSG_NOTIFY:
1668                         SE_DEBUG(DBG_LVL_8, "Received DRIVERMSG_NOTIFY\n");
1669                         dmsg = (struct dmsg_cqe *)sol;
1670                         hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
1671                         break;
1672                 case UNSOL_HDR_NOTIFY:
1673                         SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR_ NOTIFY\n");
1674                         hwi_process_default_pdu_ring(beiscsi_conn, phba,
1675                                              (struct i_t_dpdu_cqe *)sol);
1676                         break;
1677                 case UNSOL_DATA_NOTIFY:
1678                         SE_DEBUG(DBG_LVL_8, "Received UNSOL_DATA_NOTIFY\n");
1679                         hwi_process_default_pdu_ring(beiscsi_conn, phba,
1680                                              (struct i_t_dpdu_cqe *)sol);
1681                         break;
1682                 case CXN_INVALIDATE_INDEX_NOTIFY:
1683                 case CMD_INVALIDATED_NOTIFY:
1684                 case CXN_INVALIDATE_NOTIFY:
1685                         SE_DEBUG(DBG_LVL_1,
1686                                  "Ignoring CQ Error notification for cmd/cxn"
1687                                  "invalidate\n");
1688                         break;
1689                 case SOL_CMD_KILLED_DATA_DIGEST_ERR:
1690                 case CMD_KILLED_INVALID_STATSN_RCVD:
1691                 case CMD_KILLED_INVALID_R2T_RCVD:
1692                 case CMD_CXN_KILLED_LUN_INVALID:
1693                 case CMD_CXN_KILLED_ICD_INVALID:
1694                 case CMD_CXN_KILLED_ITT_INVALID:
1695                 case CMD_CXN_KILLED_SEQ_OUTOFORDER:
1696                 case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
1697                         SE_DEBUG(DBG_LVL_1,
1698                                  "CQ Error notification for cmd.. "
1699                                  "code %d cid 0x%x\n",
1700                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1701                                  32] & CQE_CODE_MASK,
1702                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1703                                  32] & SOL_CID_MASK));
1704                         break;
1705                 case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
1706                         SE_DEBUG(DBG_LVL_1,
1707                                  "Digest error on def pdu ring, dropping..\n");
1708                         hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
1709                                              (struct i_t_dpdu_cqe *) sol);
1710                         break;
1711                 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
1712                 case CXN_KILLED_BURST_LEN_MISMATCH:
1713                 case CXN_KILLED_AHS_RCVD:
1714                 case CXN_KILLED_HDR_DIGEST_ERR:
1715                 case CXN_KILLED_UNKNOWN_HDR:
1716                 case CXN_KILLED_STALE_ITT_TTT_RCVD:
1717                 case CXN_KILLED_INVALID_ITT_TTT_RCVD:
1718                 case CXN_KILLED_TIMED_OUT:
1719                 case CXN_KILLED_FIN_RCVD:
1720                 case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
1721                 case CXN_KILLED_BAD_WRB_INDEX_ERROR:
1722                 case CXN_KILLED_OVER_RUN_RESIDUAL:
1723                 case CXN_KILLED_UNDER_RUN_RESIDUAL:
1724                 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
1725                         SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
1726                                  "0x%x...\n",
1727                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1728                                  32] & CQE_CODE_MASK,
1729                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1730                                  32] & CQE_CID_MASK));
1731                         iscsi_conn_failure(beiscsi_conn->conn,
1732                                            ISCSI_ERR_CONN_FAILED);
1733                         break;
1734                 case CXN_KILLED_RST_SENT:
1735                 case CXN_KILLED_RST_RCVD:
1736                         SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
1737                                 "received/sent on CID 0x%x...\n",
1738                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1739                                  32] & CQE_CODE_MASK,
1740                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1741                                  32] & CQE_CID_MASK));
1742                         iscsi_conn_failure(beiscsi_conn->conn,
1743                                            ISCSI_ERR_CONN_FAILED);
1744                         break;
1745                 default:
1746                         SE_DEBUG(DBG_LVL_1, "CQ Error Invalid code= %d "
1747                                  "received on CID 0x%x...\n",
1748                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1749                                  32] & CQE_CODE_MASK,
1750                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1751                                  32] & CQE_CID_MASK));
1752                         break;
1753                 }
1754
1755                 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
1756                 queue_tail_inc(cq);
1757                 sol = queue_tail_node(cq);
1758                 num_processed++;
1759         }
1760
1761         if (num_processed > 0) {
1762                 tot_nump += num_processed;
1763                 hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
1764         }
1765         return tot_nump;
1766 }
1767
1768 void beiscsi_process_all_cqs(struct work_struct *work)
1769 {
1770         unsigned long flags;
1771         struct hwi_controller *phwi_ctrlr;
1772         struct hwi_context_memory *phwi_context;
1773         struct be_eq_obj *pbe_eq;
1774         struct beiscsi_hba *phba =
1775             container_of(work, struct beiscsi_hba, work_cqs);
1776
1777         phwi_ctrlr = phba->phwi_ctrlr;
1778         phwi_context = phwi_ctrlr->phwi_ctxt;
1779         if (phba->msix_enabled)
1780                 pbe_eq = &phwi_context->be_eq[phba->num_cpus];
1781         else
1782                 pbe_eq = &phwi_context->be_eq[0];
1783
1784         if (phba->todo_mcc_cq) {
1785                 spin_lock_irqsave(&phba->isr_lock, flags);
1786                 phba->todo_mcc_cq = 0;
1787                 spin_unlock_irqrestore(&phba->isr_lock, flags);
1788                 beiscsi_process_mcc_isr(phba);
1789         }
1790
1791         if (phba->todo_cq) {
1792                 spin_lock_irqsave(&phba->isr_lock, flags);
1793                 phba->todo_cq = 0;
1794                 spin_unlock_irqrestore(&phba->isr_lock, flags);
1795                 beiscsi_process_cq(pbe_eq);
1796         }
1797 }
1798
1799 static int be_iopoll(struct blk_iopoll *iop, int budget)
1800 {
1801         static unsigned int ret;
1802         struct beiscsi_hba *phba;
1803         struct be_eq_obj *pbe_eq;
1804
1805         pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
1806         ret = beiscsi_process_cq(pbe_eq);
1807         if (ret < budget) {
1808                 phba = pbe_eq->phba;
1809                 blk_iopoll_complete(iop);
1810                 SE_DEBUG(DBG_LVL_8, "rearm pbe_eq->q.id =%d\n", pbe_eq->q.id);
1811                 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
1812         }
1813         return ret;
1814 }
1815
1816 static void
1817 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
1818               unsigned int num_sg, struct beiscsi_io_task *io_task)
1819 {
1820         struct iscsi_sge *psgl;
1821         unsigned short sg_len, index;
1822         unsigned int sge_len = 0;
1823         unsigned long long addr;
1824         struct scatterlist *l_sg;
1825         unsigned int offset;
1826
1827         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1828                                       io_task->bhs_pa.u.a32.address_lo);
1829         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1830                                       io_task->bhs_pa.u.a32.address_hi);
1831
1832         l_sg = sg;
1833         for (index = 0; (index < num_sg) && (index < 2); index++,
1834                                                          sg = sg_next(sg)) {
1835                 if (index == 0) {
1836                         sg_len = sg_dma_len(sg);
1837                         addr = (u64) sg_dma_address(sg);
1838                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1839                                                 ((u32)(addr & 0xFFFFFFFF)));
1840                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1841                                                         ((u32)(addr >> 32)));
1842                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1843                                                         sg_len);
1844                         sge_len = sg_len;
1845                 } else {
1846                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
1847                                                         pwrb, sge_len);
1848                         sg_len = sg_dma_len(sg);
1849                         addr = (u64) sg_dma_address(sg);
1850                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
1851                                                 ((u32)(addr & 0xFFFFFFFF)));
1852                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
1853                                                         ((u32)(addr >> 32)));
1854                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
1855                                                         sg_len);
1856                 }
1857         }
1858         psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1859         memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
1860
1861         AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
1862
1863         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1864                         io_task->bhs_pa.u.a32.address_hi);
1865         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1866                         io_task->bhs_pa.u.a32.address_lo);
1867
1868         if (num_sg == 1) {
1869                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1870                                                                 1);
1871                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
1872                                                                 0);
1873         } else if (num_sg == 2) {
1874                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1875                                                                 0);
1876                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
1877                                                                 1);
1878         } else {
1879                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1880                                                                 0);
1881                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
1882                                                                 0);
1883         }
1884         sg = l_sg;
1885         psgl++;
1886         psgl++;
1887         offset = 0;
1888         for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
1889                 sg_len = sg_dma_len(sg);
1890                 addr = (u64) sg_dma_address(sg);
1891                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1892                                                 (addr & 0xFFFFFFFF));
1893                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1894                                                 (addr >> 32));
1895                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
1896                 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
1897                 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1898                 offset += sg_len;
1899         }
1900         psgl--;
1901         AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1902 }
1903
1904 static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
1905 {
1906         struct iscsi_sge *psgl;
1907         unsigned long long addr;
1908         struct beiscsi_io_task *io_task = task->dd_data;
1909         struct beiscsi_conn *beiscsi_conn = io_task->conn;
1910         struct beiscsi_hba *phba = beiscsi_conn->phba;
1911
1912         io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
1913         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1914                                 io_task->bhs_pa.u.a32.address_lo);
1915         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1916                                 io_task->bhs_pa.u.a32.address_hi);
1917
1918         if (task->data) {
1919                 if (task->data_count) {
1920                         AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
1921                         addr = (u64) pci_map_single(phba->pcidev,
1922                                                     task->data,
1923                                                     task->data_count, 1);
1924                 } else {
1925                         AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1926                         addr = 0;
1927                 }
1928                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1929                                                 ((u32)(addr & 0xFFFFFFFF)));
1930                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1931                                                 ((u32)(addr >> 32)));
1932                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1933                                                 task->data_count);
1934
1935                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
1936         } else {
1937                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1938                 addr = 0;
1939         }
1940
1941         psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1942
1943         AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
1944
1945         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1946                       io_task->bhs_pa.u.a32.address_hi);
1947         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1948                       io_task->bhs_pa.u.a32.address_lo);
1949         if (task->data) {
1950                 psgl++;
1951                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
1952                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
1953                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
1954                 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
1955                 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
1956                 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1957
1958                 psgl++;
1959                 if (task->data) {
1960                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1961                                                 ((u32)(addr & 0xFFFFFFFF)));
1962                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1963                                                 ((u32)(addr >> 32)));
1964                 }
1965                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
1966         }
1967         AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1968 }
1969
1970 static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
1971 {
1972         unsigned int num_cq_pages, num_async_pdu_buf_pages;
1973         unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
1974         unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
1975
1976         num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
1977                                       sizeof(struct sol_cqe));
1978         num_async_pdu_buf_pages =
1979                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1980                                        phba->params.defpdu_hdr_sz);
1981         num_async_pdu_buf_sgl_pages =
1982                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1983                                        sizeof(struct phys_addr));
1984         num_async_pdu_data_pages =
1985                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1986                                        phba->params.defpdu_data_sz);
1987         num_async_pdu_data_sgl_pages =
1988                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1989                                        sizeof(struct phys_addr));
1990
1991         phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
1992
1993         phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
1994                                                  BE_ISCSI_PDU_HEADER_SIZE;
1995         phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
1996                                             sizeof(struct hwi_context_memory);
1997
1998
1999         phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
2000             * (phba->params.wrbs_per_cxn)
2001             * phba->params.cxns_per_ctrl;
2002         wrb_sz_per_cxn =  sizeof(struct wrb_handle) *
2003                                  (phba->params.wrbs_per_cxn);
2004         phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
2005                                 phba->params.cxns_per_ctrl);
2006
2007         phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
2008                 phba->params.icds_per_ctrl;
2009         phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
2010                 phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
2011
2012         phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] =
2013                 num_async_pdu_buf_pages * PAGE_SIZE;
2014         phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] =
2015                 num_async_pdu_data_pages * PAGE_SIZE;
2016         phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] =
2017                 num_async_pdu_buf_sgl_pages * PAGE_SIZE;
2018         phba->mem_req[HWI_MEM_ASYNC_DATA_RING] =
2019                 num_async_pdu_data_sgl_pages * PAGE_SIZE;
2020         phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] =
2021                 phba->params.asyncpdus_per_ctrl *
2022                 sizeof(struct async_pdu_handle);
2023         phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] =
2024                 phba->params.asyncpdus_per_ctrl *
2025                 sizeof(struct async_pdu_handle);
2026         phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] =
2027                 sizeof(struct hwi_async_pdu_context) +
2028                 (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
2029 }
2030
2031 static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
2032 {
2033         struct be_mem_descriptor *mem_descr;
2034         dma_addr_t bus_add;
2035         struct mem_array *mem_arr, *mem_arr_orig;
2036         unsigned int i, j, alloc_size, curr_alloc_size;
2037
2038         phba->phwi_ctrlr = kmalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
2039         if (!phba->phwi_ctrlr)
2040                 return -ENOMEM;
2041
2042         phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
2043                                  GFP_KERNEL);
2044         if (!phba->init_mem) {
2045                 kfree(phba->phwi_ctrlr);
2046                 return -ENOMEM;
2047         }
2048
2049         mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
2050                                GFP_KERNEL);
2051         if (!mem_arr_orig) {
2052                 kfree(phba->init_mem);
2053                 kfree(phba->phwi_ctrlr);
2054                 return -ENOMEM;
2055         }
2056
2057         mem_descr = phba->init_mem;
2058         for (i = 0; i < SE_MEM_MAX; i++) {
2059                 j = 0;
2060                 mem_arr = mem_arr_orig;
2061                 alloc_size = phba->mem_req[i];
2062                 memset(mem_arr, 0, sizeof(struct mem_array) *
2063                        BEISCSI_MAX_FRAGS_INIT);
2064                 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
2065                 do {
2066                         mem_arr->virtual_address = pci_alloc_consistent(
2067                                                         phba->pcidev,
2068                                                         curr_alloc_size,
2069                                                         &bus_add);
2070                         if (!mem_arr->virtual_address) {
2071                                 if (curr_alloc_size <= BE_MIN_MEM_SIZE)
2072                                         goto free_mem;
2073                                 if (curr_alloc_size -
2074                                         rounddown_pow_of_two(curr_alloc_size))
2075                                         curr_alloc_size = rounddown_pow_of_two
2076                                                              (curr_alloc_size);
2077                                 else
2078                                         curr_alloc_size = curr_alloc_size / 2;
2079                         } else {
2080                                 mem_arr->bus_address.u.
2081                                     a64.address = (__u64) bus_add;
2082                                 mem_arr->size = curr_alloc_size;
2083                                 alloc_size -= curr_alloc_size;
2084                                 curr_alloc_size = min(be_max_phys_size *
2085                                                       1024, alloc_size);
2086                                 j++;
2087                                 mem_arr++;
2088                         }
2089                 } while (alloc_size);
2090                 mem_descr->num_elements = j;
2091                 mem_descr->size_in_bytes = phba->mem_req[i];
2092                 mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
2093                                                GFP_KERNEL);
2094                 if (!mem_descr->mem_array)
2095                         goto free_mem;
2096
2097                 memcpy(mem_descr->mem_array, mem_arr_orig,
2098                        sizeof(struct mem_array) * j);
2099                 mem_descr++;
2100         }
2101         kfree(mem_arr_orig);
2102         return 0;
2103 free_mem:
2104         mem_descr->num_elements = j;
2105         while ((i) || (j)) {
2106                 for (j = mem_descr->num_elements; j > 0; j--) {
2107                         pci_free_consistent(phba->pcidev,
2108                                             mem_descr->mem_array[j - 1].size,
2109                                             mem_descr->mem_array[j - 1].
2110                                             virtual_address,
2111                                             (unsigned long)mem_descr->
2112                                             mem_array[j - 1].
2113                                             bus_address.u.a64.address);
2114                 }
2115                 if (i) {
2116                         i--;
2117                         kfree(mem_descr->mem_array);
2118                         mem_descr--;
2119                 }
2120         }
2121         kfree(mem_arr_orig);
2122         kfree(phba->init_mem);
2123         kfree(phba->phwi_ctrlr);
2124         return -ENOMEM;
2125 }
2126
2127 static int beiscsi_get_memory(struct beiscsi_hba *phba)
2128 {
2129         beiscsi_find_mem_req(phba);
2130         return beiscsi_alloc_mem(phba);
2131 }
2132
2133 static void iscsi_init_global_templates(struct beiscsi_hba *phba)
2134 {
2135         struct pdu_data_out *pdata_out;
2136         struct pdu_nop_out *pnop_out;
2137         struct be_mem_descriptor *mem_descr;
2138
2139         mem_descr = phba->init_mem;
2140         mem_descr += ISCSI_MEM_GLOBAL_HEADER;
2141         pdata_out =
2142             (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
2143         memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2144
2145         AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
2146                       IIOC_SCSI_DATA);
2147
2148         pnop_out =
2149             (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
2150                                    virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
2151
2152         memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2153         AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
2154         AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
2155         AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
2156 }
2157
2158 static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
2159 {
2160         struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
2161         struct wrb_handle *pwrb_handle;
2162         struct hwi_controller *phwi_ctrlr;
2163         struct hwi_wrb_context *pwrb_context;
2164         struct iscsi_wrb *pwrb;
2165         unsigned int num_cxn_wrbh;
2166         unsigned int num_cxn_wrb, j, idx, index;
2167
2168         mem_descr_wrbh = phba->init_mem;
2169         mem_descr_wrbh += HWI_MEM_WRBH;
2170
2171         mem_descr_wrb = phba->init_mem;
2172         mem_descr_wrb += HWI_MEM_WRB;
2173
2174         idx = 0;
2175         pwrb_handle = mem_descr_wrbh->mem_array[idx].virtual_address;
2176         num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
2177                         ((sizeof(struct wrb_handle)) *
2178                          phba->params.wrbs_per_cxn));
2179         phwi_ctrlr = phba->phwi_ctrlr;
2180
2181         for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2182                 pwrb_context = &phwi_ctrlr->wrb_context[index];
2183                 pwrb_context->pwrb_handle_base =
2184                                 kzalloc(sizeof(struct wrb_handle *) *
2185                                         phba->params.wrbs_per_cxn, GFP_KERNEL);
2186                 pwrb_context->pwrb_handle_basestd =
2187                                 kzalloc(sizeof(struct wrb_handle *) *
2188                                         phba->params.wrbs_per_cxn, GFP_KERNEL);
2189                 if (num_cxn_wrbh) {
2190                         pwrb_context->alloc_index = 0;
2191                         pwrb_context->wrb_handles_available = 0;
2192                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2193                                 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2194                                 pwrb_context->pwrb_handle_basestd[j] =
2195                                                                 pwrb_handle;
2196                                 pwrb_context->wrb_handles_available++;
2197                                 pwrb_handle->wrb_index = j;
2198                                 pwrb_handle++;
2199                         }
2200                         pwrb_context->free_index = 0;
2201                         num_cxn_wrbh--;
2202                 } else {
2203                         idx++;
2204                         pwrb_handle =
2205                             mem_descr_wrbh->mem_array[idx].virtual_address;
2206                         num_cxn_wrbh =
2207                             ((mem_descr_wrbh->mem_array[idx].size) /
2208                              ((sizeof(struct wrb_handle)) *
2209                               phba->params.wrbs_per_cxn));
2210                         pwrb_context->alloc_index = 0;
2211                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2212                                 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2213                                 pwrb_context->pwrb_handle_basestd[j] =
2214                                     pwrb_handle;
2215                                 pwrb_context->wrb_handles_available++;
2216                                 pwrb_handle->wrb_index = j;
2217                                 pwrb_handle++;
2218                         }
2219                         pwrb_context->free_index = 0;
2220                         num_cxn_wrbh--;
2221                 }
2222         }
2223         idx = 0;
2224         pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2225         num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2226                       ((sizeof(struct iscsi_wrb) *
2227                         phba->params.wrbs_per_cxn));
2228         for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2229                 pwrb_context = &phwi_ctrlr->wrb_context[index];
2230                 if (num_cxn_wrb) {
2231                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2232                                 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2233                                 pwrb_handle->pwrb = pwrb;
2234                                 pwrb++;
2235                         }
2236                         num_cxn_wrb--;
2237                 } else {
2238                         idx++;
2239                         pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2240                         num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2241                                       ((sizeof(struct iscsi_wrb) *
2242                                         phba->params.wrbs_per_cxn));
2243                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2244                                 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2245                                 pwrb_handle->pwrb = pwrb;
2246                                 pwrb++;
2247                         }
2248                         num_cxn_wrb--;
2249                 }
2250         }
2251 }
2252
2253 static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2254 {
2255         struct hwi_controller *phwi_ctrlr;
2256         struct hba_parameters *p = &phba->params;
2257         struct hwi_async_pdu_context *pasync_ctx;
2258         struct async_pdu_handle *pasync_header_h, *pasync_data_h;
2259         unsigned int index;
2260         struct be_mem_descriptor *mem_descr;
2261
2262         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2263         mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
2264
2265         phwi_ctrlr = phba->phwi_ctrlr;
2266         phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
2267                                 mem_descr->mem_array[0].virtual_address;
2268         pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
2269         memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2270
2271         pasync_ctx->async_header.num_entries = p->asyncpdus_per_ctrl;
2272         pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz;
2273         pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
2274         pasync_ctx->async_data.num_entries = p->asyncpdus_per_ctrl;
2275
2276         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2277         mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
2278         if (mem_descr->mem_array[0].virtual_address) {
2279                 SE_DEBUG(DBG_LVL_8,
2280                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF"
2281                          "va=%p\n", mem_descr->mem_array[0].virtual_address);
2282         } else
2283                 shost_printk(KERN_WARNING, phba->shost,
2284                              "No Virtual address\n");
2285
2286         pasync_ctx->async_header.va_base =
2287                         mem_descr->mem_array[0].virtual_address;
2288
2289         pasync_ctx->async_header.pa_base.u.a64.address =
2290                         mem_descr->mem_array[0].bus_address.u.a64.address;
2291
2292         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2293         mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2294         if (mem_descr->mem_array[0].virtual_address) {
2295                 SE_DEBUG(DBG_LVL_8,
2296                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING"
2297                          "va=%p\n", mem_descr->mem_array[0].virtual_address);
2298         } else
2299                 shost_printk(KERN_WARNING, phba->shost,
2300                             "No Virtual address\n");
2301         pasync_ctx->async_header.ring_base =
2302                         mem_descr->mem_array[0].virtual_address;
2303
2304         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2305         mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
2306         if (mem_descr->mem_array[0].virtual_address) {
2307                 SE_DEBUG(DBG_LVL_8,
2308                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE"
2309                          "va=%p\n", mem_descr->mem_array[0].virtual_address);
2310         } else
2311                 shost_printk(KERN_WARNING, phba->shost,
2312                             "No Virtual address\n");
2313
2314         pasync_ctx->async_header.handle_base =
2315                         mem_descr->mem_array[0].virtual_address;
2316         pasync_ctx->async_header.writables = 0;
2317         INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
2318
2319         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2320         mem_descr += HWI_MEM_ASYNC_DATA_BUF;
2321         if (mem_descr->mem_array[0].virtual_address) {
2322                 SE_DEBUG(DBG_LVL_8,
2323                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
2324                          "va=%p\n", mem_descr->mem_array[0].virtual_address);
2325         } else
2326                 shost_printk(KERN_WARNING, phba->shost,
2327                             "No Virtual address\n");
2328         pasync_ctx->async_data.va_base =
2329                         mem_descr->mem_array[0].virtual_address;
2330         pasync_ctx->async_data.pa_base.u.a64.address =
2331                         mem_descr->mem_array[0].bus_address.u.a64.address;
2332
2333         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2334         mem_descr += HWI_MEM_ASYNC_DATA_RING;
2335         if (mem_descr->mem_array[0].virtual_address) {
2336                 SE_DEBUG(DBG_LVL_8,
2337                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING"
2338                          "va=%p\n", mem_descr->mem_array[0].virtual_address);
2339         } else
2340                 shost_printk(KERN_WARNING, phba->shost,
2341                              "No Virtual address\n");
2342
2343         pasync_ctx->async_data.ring_base =
2344                         mem_descr->mem_array[0].virtual_address;
2345
2346         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2347         mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
2348         if (!mem_descr->mem_array[0].virtual_address)
2349                 shost_printk(KERN_WARNING, phba->shost,
2350                             "No Virtual address\n");
2351
2352         pasync_ctx->async_data.handle_base =
2353                         mem_descr->mem_array[0].virtual_address;
2354         pasync_ctx->async_data.writables = 0;
2355         INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
2356
2357         pasync_header_h =
2358                 (struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
2359         pasync_data_h =
2360                 (struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
2361
2362         for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
2363                 pasync_header_h->cri = -1;
2364                 pasync_header_h->index = (char)index;
2365                 INIT_LIST_HEAD(&pasync_header_h->link);
2366                 pasync_header_h->pbuffer =
2367                         (void *)((unsigned long)
2368                         (pasync_ctx->async_header.va_base) +
2369                         (p->defpdu_hdr_sz * index));
2370
2371                 pasync_header_h->pa.u.a64.address =
2372                         pasync_ctx->async_header.pa_base.u.a64.address +
2373                         (p->defpdu_hdr_sz * index);
2374
2375                 list_add_tail(&pasync_header_h->link,
2376                                 &pasync_ctx->async_header.free_list);
2377                 pasync_header_h++;
2378                 pasync_ctx->async_header.free_entries++;
2379                 pasync_ctx->async_header.writables++;
2380
2381                 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
2382                 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
2383                                header_busy_list);
2384                 pasync_data_h->cri = -1;
2385                 pasync_data_h->index = (char)index;
2386                 INIT_LIST_HEAD(&pasync_data_h->link);
2387                 pasync_data_h->pbuffer =
2388                         (void *)((unsigned long)
2389                         (pasync_ctx->async_data.va_base) +
2390                         (p->defpdu_data_sz * index));
2391
2392                 pasync_data_h->pa.u.a64.address =
2393                     pasync_ctx->async_data.pa_base.u.a64.address +
2394                     (p->defpdu_data_sz * index);
2395
2396                 list_add_tail(&pasync_data_h->link,
2397                               &pasync_ctx->async_data.free_list);
2398                 pasync_data_h++;
2399                 pasync_ctx->async_data.free_entries++;
2400                 pasync_ctx->async_data.writables++;
2401
2402                 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
2403         }
2404
2405         pasync_ctx->async_header.host_write_ptr = 0;
2406         pasync_ctx->async_header.ep_read_ptr = -1;
2407         pasync_ctx->async_data.host_write_ptr = 0;
2408         pasync_ctx->async_data.ep_read_ptr = -1;
2409 }
2410
2411 static int
2412 be_sgl_create_contiguous(void *virtual_address,
2413                          u64 physical_address, u32 length,
2414                          struct be_dma_mem *sgl)
2415 {
2416         WARN_ON(!virtual_address);
2417         WARN_ON(!physical_address);
2418         WARN_ON(!length > 0);
2419         WARN_ON(!sgl);
2420
2421         sgl->va = virtual_address;
2422         sgl->dma = (unsigned long)physical_address;
2423         sgl->size = length;
2424
2425         return 0;
2426 }
2427
2428 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
2429 {
2430         memset(sgl, 0, sizeof(*sgl));
2431 }
2432
2433 static void
2434 hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
2435                      struct mem_array *pmem, struct be_dma_mem *sgl)
2436 {
2437         if (sgl->va)
2438                 be_sgl_destroy_contiguous(sgl);
2439
2440         be_sgl_create_contiguous(pmem->virtual_address,
2441                                  pmem->bus_address.u.a64.address,
2442                                  pmem->size, sgl);
2443 }
2444
2445 static void
2446 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
2447                            struct mem_array *pmem, struct be_dma_mem *sgl)
2448 {
2449         if (sgl->va)
2450                 be_sgl_destroy_contiguous(sgl);
2451
2452         be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
2453                                  pmem->bus_address.u.a64.address,
2454                                  pmem->size, sgl);
2455 }
2456
2457 static int be_fill_queue(struct be_queue_info *q,
2458                 u16 len, u16 entry_size, void *vaddress)
2459 {
2460         struct be_dma_mem *mem = &q->dma_mem;
2461
2462         memset(q, 0, sizeof(*q));
2463         q->len = len;
2464         q->entry_size = entry_size;
2465         mem->size = len * entry_size;
2466         mem->va = vaddress;
2467         if (!mem->va)
2468                 return -ENOMEM;
2469         memset(mem->va, 0, mem->size);
2470         return 0;
2471 }
2472
2473 static int beiscsi_create_eqs(struct beiscsi_hba *phba,
2474                              struct hwi_context_memory *phwi_context)
2475 {
2476         unsigned int i, num_eq_pages;
2477         int ret, eq_for_mcc;
2478         struct be_queue_info *eq;
2479         struct be_dma_mem *mem;
2480         void *eq_vaddress;
2481         dma_addr_t paddr;
2482
2483         num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
2484                                       sizeof(struct be_eq_entry));
2485
2486         if (phba->msix_enabled)
2487                 eq_for_mcc = 1;
2488         else
2489                 eq_for_mcc = 0;
2490         for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
2491                 eq = &phwi_context->be_eq[i].q;
2492                 mem = &eq->dma_mem;
2493                 phwi_context->be_eq[i].phba = phba;
2494                 eq_vaddress = pci_alloc_consistent(phba->pcidev,
2495                                                      num_eq_pages * PAGE_SIZE,
2496                                                      &paddr);
2497                 if (!eq_vaddress)
2498                         goto create_eq_error;
2499
2500                 mem->va = eq_vaddress;
2501                 ret = be_fill_queue(eq, phba->params.num_eq_entries,
2502                                     sizeof(struct be_eq_entry), eq_vaddress);
2503                 if (ret) {
2504                         shost_printk(KERN_ERR, phba->shost,
2505                                      "be_fill_queue Failed for EQ\n");
2506                         goto create_eq_error;
2507                 }
2508
2509                 mem->dma = paddr;
2510                 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
2511                                             phwi_context->cur_eqd);
2512                 if (ret) {
2513                         shost_printk(KERN_ERR, phba->shost,
2514                                      "beiscsi_cmd_eq_create"
2515                                      "Failedfor EQ\n");
2516                         goto create_eq_error;
2517                 }
2518                 SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id);
2519         }
2520         return 0;
2521 create_eq_error:
2522         for (i = 0; i < (phba->num_cpus + 1); i++) {
2523                 eq = &phwi_context->be_eq[i].q;
2524                 mem = &eq->dma_mem;
2525                 if (mem->va)
2526                         pci_free_consistent(phba->pcidev, num_eq_pages
2527                                             * PAGE_SIZE,
2528                                             mem->va, mem->dma);
2529         }
2530         return ret;
2531 }
2532
2533 static int beiscsi_create_cqs(struct beiscsi_hba *phba,
2534                              struct hwi_context_memory *phwi_context)
2535 {
2536         unsigned int i, num_cq_pages;
2537         int ret;
2538         struct be_queue_info *cq, *eq;
2539         struct be_dma_mem *mem;
2540         struct be_eq_obj *pbe_eq;
2541         void *cq_vaddress;
2542         dma_addr_t paddr;
2543
2544         num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2545                                       sizeof(struct sol_cqe));
2546
2547         for (i = 0; i < phba->num_cpus; i++) {
2548                 cq = &phwi_context->be_cq[i];
2549                 eq = &phwi_context->be_eq[i].q;
2550                 pbe_eq = &phwi_context->be_eq[i];
2551                 pbe_eq->cq = cq;
2552                 pbe_eq->phba = phba;
2553                 mem = &cq->dma_mem;
2554                 cq_vaddress = pci_alloc_consistent(phba->pcidev,
2555                                                      num_cq_pages * PAGE_SIZE,
2556                                                      &paddr);
2557                 if (!cq_vaddress)
2558                         goto create_cq_error;
2559                 ret = be_fill_queue(cq, phba->params.num_cq_entries,
2560                                     sizeof(struct sol_cqe), cq_vaddress);
2561                 if (ret) {
2562                         shost_printk(KERN_ERR, phba->shost,
2563                                      "be_fill_queue Failed for ISCSI CQ\n");
2564                         goto create_cq_error;
2565                 }
2566
2567                 mem->dma = paddr;
2568                 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
2569                                             false, 0);
2570                 if (ret) {
2571                         shost_printk(KERN_ERR, phba->shost,
2572                                      "beiscsi_cmd_eq_create"
2573                                      "Failed for ISCSI CQ\n");
2574                         goto create_cq_error;
2575                 }
2576                 SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n",
2577                                                  cq->id, eq->id);
2578                 SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n");
2579         }
2580         return 0;
2581
2582 create_cq_error:
2583         for (i = 0; i < phba->num_cpus; i++) {
2584                 cq = &phwi_context->be_cq[i];
2585                 mem = &cq->dma_mem;
2586                 if (mem->va)
2587                         pci_free_consistent(phba->pcidev, num_cq_pages
2588                                             * PAGE_SIZE,
2589                                             mem->va, mem->dma);
2590         }
2591         return ret;
2592
2593 }
2594
2595 static int
2596 beiscsi_create_def_hdr(struct beiscsi_hba *phba,
2597                        struct hwi_context_memory *phwi_context,
2598                        struct hwi_controller *phwi_ctrlr,
2599                        unsigned int def_pdu_ring_sz)
2600 {
2601         unsigned int idx;
2602         int ret;
2603         struct be_queue_info *dq, *cq;
2604         struct be_dma_mem *mem;
2605         struct be_mem_descriptor *mem_descr;
2606         void *dq_vaddress;
2607
2608         idx = 0;
2609         dq = &phwi_context->be_def_hdrq;
2610         cq = &phwi_context->be_cq[0];
2611         mem = &dq->dma_mem;
2612         mem_descr = phba->init_mem;
2613         mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2614         dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2615         ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
2616                             sizeof(struct phys_addr),
2617                             sizeof(struct phys_addr), dq_vaddress);
2618         if (ret) {
2619                 shost_printk(KERN_ERR, phba->shost,
2620                              "be_fill_queue Failed for DEF PDU HDR\n");
2621                 return ret;
2622         }
2623         mem->dma = (unsigned long)mem_descr->mem_array[idx].
2624                                   bus_address.u.a64.address;
2625         ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
2626                                               def_pdu_ring_sz,
2627                                               phba->params.defpdu_hdr_sz);
2628         if (ret) {
2629                 shost_printk(KERN_ERR, phba->shost,
2630                              "be_cmd_create_default_pdu_queue Failed DEFHDR\n");
2631                 return ret;
2632         }
2633         phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
2634         SE_DEBUG(DBG_LVL_8, "iscsi def pdu id is %d\n",
2635                  phwi_context->be_def_hdrq.id);
2636         hwi_post_async_buffers(phba, 1);
2637         return 0;
2638 }
2639
2640 static int
2641 beiscsi_create_def_data(struct beiscsi_hba *phba,
2642                         struct hwi_context_memory *phwi_context,
2643                         struct hwi_controller *phwi_ctrlr,
2644                         unsigned int def_pdu_ring_sz)
2645 {
2646         unsigned int idx;
2647         int ret;
2648         struct be_queue_info *dataq, *cq;
2649         struct be_dma_mem *mem;
2650         struct be_mem_descriptor *mem_descr;
2651         void *dq_vaddress;
2652
2653         idx = 0;
2654         dataq = &phwi_context->be_def_dataq;
2655         cq = &phwi_context->be_cq[0];
2656         mem = &dataq->dma_mem;
2657         mem_descr = phba->init_mem;
2658         mem_descr += HWI_MEM_ASYNC_DATA_RING;
2659         dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2660         ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
2661                             sizeof(struct phys_addr),
2662                             sizeof(struct phys_addr), dq_vaddress);
2663         if (ret) {
2664                 shost_printk(KERN_ERR, phba->shost,
2665                              "be_fill_queue Failed for DEF PDU DATA\n");
2666                 return ret;
2667         }
2668         mem->dma = (unsigned long)mem_descr->mem_array[idx].
2669                                   bus_address.u.a64.address;
2670         ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
2671                                               def_pdu_ring_sz,
2672                                               phba->params.defpdu_data_sz);
2673         if (ret) {
2674                 shost_printk(KERN_ERR, phba->shost,
2675                              "be_cmd_create_default_pdu_queue Failed"
2676                              " for DEF PDU DATA\n");
2677                 return ret;
2678         }
2679         phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
2680         SE_DEBUG(DBG_LVL_8, "iscsi def data id is %d\n",
2681                  phwi_context->be_def_dataq.id);
2682         hwi_post_async_buffers(phba, 0);
2683         SE_DEBUG(DBG_LVL_8, "DEFAULT PDU DATA RING CREATED\n");
2684         return 0;
2685 }
2686
2687 static int
2688 beiscsi_post_pages(struct beiscsi_hba *phba)
2689 {
2690         struct be_mem_descriptor *mem_descr;
2691         struct mem_array *pm_arr;
2692         unsigned int page_offset, i;
2693         struct be_dma_mem sgl;
2694         int status;
2695
2696         mem_descr = phba->init_mem;
2697         mem_descr += HWI_MEM_SGE;
2698         pm_arr = mem_descr->mem_array;
2699
2700         page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
2701                         phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
2702         for (i = 0; i < mem_descr->num_elements; i++) {
2703                 hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
2704                 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
2705                                                 page_offset,
2706                                                 (pm_arr->size / PAGE_SIZE));
2707                 page_offset += pm_arr->size / PAGE_SIZE;
2708                 if (status != 0) {
2709                         shost_printk(KERN_ERR, phba->shost,
2710                                      "post sgl failed.\n");
2711                         return status;
2712                 }
2713                 pm_arr++;
2714         }
2715         SE_DEBUG(DBG_LVL_8, "POSTED PAGES\n");
2716         return 0;
2717 }
2718
2719 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
2720 {
2721         struct be_dma_mem *mem = &q->dma_mem;
2722         if (mem->va)
2723                 pci_free_consistent(phba->pcidev, mem->size,
2724                         mem->va, mem->dma);
2725 }
2726
2727 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
2728                 u16 len, u16 entry_size)
2729 {
2730         struct be_dma_mem *mem = &q->dma_mem;
2731
2732         memset(q, 0, sizeof(*q));
2733         q->len = len;
2734         q->entry_size = entry_size;
2735         mem->size = len * entry_size;
2736         mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
2737         if (!mem->va)
2738                 return -ENOMEM;
2739         memset(mem->va, 0, mem->size);
2740         return 0;
2741 }
2742
2743 static int
2744 beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
2745                          struct hwi_context_memory *phwi_context,
2746                          struct hwi_controller *phwi_ctrlr)
2747 {
2748         unsigned int wrb_mem_index, offset, size, num_wrb_rings;
2749         u64 pa_addr_lo;
2750         unsigned int idx, num, i;
2751         struct mem_array *pwrb_arr;
2752         void *wrb_vaddr;
2753         struct be_dma_mem sgl;
2754         struct be_mem_descriptor *mem_descr;
2755         int status;
2756
2757         idx = 0;
2758         mem_descr = phba->init_mem;
2759         mem_descr += HWI_MEM_WRB;
2760         pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
2761                            GFP_KERNEL);
2762         if (!pwrb_arr) {
2763                 shost_printk(KERN_ERR, phba->shost,
2764                              "Memory alloc failed in create wrb ring.\n");
2765                 return -ENOMEM;
2766         }
2767         wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2768         pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
2769         num_wrb_rings = mem_descr->mem_array[idx].size /
2770                 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
2771
2772         for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
2773                 if (num_wrb_rings) {
2774                         pwrb_arr[num].virtual_address = wrb_vaddr;
2775                         pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
2776                         pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2777                                             sizeof(struct iscsi_wrb);
2778                         wrb_vaddr += pwrb_arr[num].size;
2779                         pa_addr_lo += pwrb_arr[num].size;
2780                         num_wrb_rings--;
2781                 } else {
2782                         idx++;
2783                         wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2784                         pa_addr_lo = mem_descr->mem_array[idx].\
2785                                         bus_address.u.a64.address;
2786                         num_wrb_rings = mem_descr->mem_array[idx].size /
2787                                         (phba->params.wrbs_per_cxn *
2788                                         sizeof(struct iscsi_wrb));
2789                         pwrb_arr[num].virtual_address = wrb_vaddr;
2790                         pwrb_arr[num].bus_address.u.a64.address\
2791                                                 = pa_addr_lo;
2792                         pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2793                                                  sizeof(struct iscsi_wrb);
2794                         wrb_vaddr += pwrb_arr[num].size;
2795                         pa_addr_lo   += pwrb_arr[num].size;
2796                         num_wrb_rings--;
2797                 }
2798         }
2799         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2800                 wrb_mem_index = 0;
2801                 offset = 0;
2802                 size = 0;
2803
2804                 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
2805                 status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
2806                                             &phwi_context->be_wrbq[i]);
2807                 if (status != 0) {
2808                         shost_printk(KERN_ERR, phba->shost,
2809                                      "wrbq create failed.");
2810                         kfree(pwrb_arr);
2811                         return status;
2812                 }
2813                 phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
2814                                                                    id;
2815         }
2816         kfree(pwrb_arr);
2817         return 0;
2818 }
2819
2820 static void free_wrb_handles(struct beiscsi_hba *phba)
2821 {
2822         unsigned int index;
2823         struct hwi_controller *phwi_ctrlr;
2824         struct hwi_wrb_context *pwrb_context;
2825
2826         phwi_ctrlr = phba->phwi_ctrlr;
2827         for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2828                 pwrb_context = &phwi_ctrlr->wrb_context[index];
2829                 kfree(pwrb_context->pwrb_handle_base);
2830                 kfree(pwrb_context->pwrb_handle_basestd);
2831         }
2832 }
2833
2834 static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
2835 {
2836         struct be_queue_info *q;
2837         struct be_ctrl_info *ctrl = &phba->ctrl;
2838
2839         q = &phba->ctrl.mcc_obj.q;
2840         if (q->created)
2841                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
2842         be_queue_free(phba, q);
2843
2844         q = &phba->ctrl.mcc_obj.cq;
2845         if (q->created)
2846                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2847         be_queue_free(phba, q);
2848 }
2849
2850 static void hwi_cleanup(struct beiscsi_hba *phba)
2851 {
2852         struct be_queue_info *q;
2853         struct be_ctrl_info *ctrl = &phba->ctrl;
2854         struct hwi_controller *phwi_ctrlr;
2855         struct hwi_context_memory *phwi_context;
2856         int i, eq_num;
2857
2858         phwi_ctrlr = phba->phwi_ctrlr;
2859         phwi_context = phwi_ctrlr->phwi_ctxt;
2860         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2861                 q = &phwi_context->be_wrbq[i];
2862                 if (q->created)
2863                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
2864         }
2865         free_wrb_handles(phba);
2866
2867         q = &phwi_context->be_def_hdrq;
2868         if (q->created)
2869                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2870
2871         q = &phwi_context->be_def_dataq;
2872         if (q->created)
2873                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2874
2875         beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
2876
2877         for (i = 0; i < (phba->num_cpus); i++) {
2878                 q = &phwi_context->be_cq[i];
2879                 if (q->created)
2880                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2881         }
2882         if (phba->msix_enabled)
2883                 eq_num = 1;
2884         else
2885                 eq_num = 0;
2886         for (i = 0; i < (phba->num_cpus + eq_num); i++) {
2887                 q = &phwi_context->be_eq[i].q;
2888                 if (q->created)
2889                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
2890         }
2891         be_mcc_queues_destroy(phba);
2892 }
2893
2894 static int be_mcc_queues_create(struct beiscsi_hba *phba,
2895                                 struct hwi_context_memory *phwi_context)
2896 {
2897         struct be_queue_info *q, *cq;
2898         struct be_ctrl_info *ctrl = &phba->ctrl;
2899
2900         /* Alloc MCC compl queue */
2901         cq = &phba->ctrl.mcc_obj.cq;
2902         if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
2903                         sizeof(struct be_mcc_compl)))
2904                 goto err;
2905         /* Ask BE to create MCC compl queue; */
2906         if (phba->msix_enabled) {
2907                 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
2908                                          [phba->num_cpus].q, false, true, 0))
2909                 goto mcc_cq_free;
2910         } else {
2911                 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
2912                                           false, true, 0))
2913                 goto mcc_cq_free;
2914         }
2915
2916         /* Alloc MCC queue */
2917         q = &phba->ctrl.mcc_obj.q;
2918         if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2919                 goto mcc_cq_destroy;
2920
2921         /* Ask BE to create MCC queue */
2922         if (beiscsi_cmd_mccq_create(phba, q, cq))
2923                 goto mcc_q_free;
2924
2925         return 0;
2926
2927 mcc_q_free:
2928         be_queue_free(phba, q);
2929 mcc_cq_destroy:
2930         beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
2931 mcc_cq_free:
2932         be_queue_free(phba, cq);
2933 err:
2934         return -ENOMEM;
2935 }
2936
2937 static int find_num_cpus(void)
2938 {
2939         int  num_cpus = 0;
2940
2941         num_cpus = num_online_cpus();
2942         if (num_cpus >= MAX_CPUS)
2943                 num_cpus = MAX_CPUS - 1;
2944
2945         SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", num_cpus);
2946         return num_cpus;
2947 }
2948
2949 static int hwi_init_port(struct beiscsi_hba *phba)
2950 {
2951         struct hwi_controller *phwi_ctrlr;
2952         struct hwi_context_memory *phwi_context;
2953         unsigned int def_pdu_ring_sz;
2954         struct be_ctrl_info *ctrl = &phba->ctrl;
2955         int status;
2956
2957         def_pdu_ring_sz =
2958                 phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
2959         phwi_ctrlr = phba->phwi_ctrlr;
2960         phwi_context = phwi_ctrlr->phwi_ctxt;
2961         phwi_context->max_eqd = 0;
2962         phwi_context->min_eqd = 0;
2963         phwi_context->cur_eqd = 64;
2964         be_cmd_fw_initialize(&phba->ctrl);
2965
2966         status = beiscsi_create_eqs(phba, phwi_context);
2967         if (status != 0) {
2968                 shost_printk(KERN_ERR, phba->shost, "EQ not created\n");
2969                 goto error;
2970         }
2971
2972         status = be_mcc_queues_create(phba, phwi_context);
2973         if (status != 0)
2974                 goto error;
2975
2976         status = mgmt_check_supported_fw(ctrl, phba);
2977         if (status != 0) {
2978                 shost_printk(KERN_ERR, phba->shost,
2979                              "Unsupported fw version\n");
2980                 goto error;
2981         }
2982
2983         status = beiscsi_create_cqs(phba, phwi_context);
2984         if (status != 0) {
2985                 shost_printk(KERN_ERR, phba->shost, "CQ not created\n");
2986                 goto error;
2987         }
2988
2989         status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
2990                                         def_pdu_ring_sz);
2991         if (status != 0) {
2992                 shost_printk(KERN_ERR, phba->shost,
2993                              "Default Header not created\n");
2994                 goto error;
2995         }
2996
2997         status = beiscsi_create_def_data(phba, phwi_context,
2998                                          phwi_ctrlr, def_pdu_ring_sz);
2999         if (status != 0) {
3000                 shost_printk(KERN_ERR, phba->shost,
3001                              "Default Data not created\n");
3002                 goto error;
3003         }
3004
3005         status = beiscsi_post_pages(phba);
3006         if (status != 0) {
3007                 shost_printk(KERN_ERR, phba->shost, "Post SGL Pages Failed\n");
3008                 goto error;
3009         }
3010
3011         status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
3012         if (status != 0) {
3013                 shost_printk(KERN_ERR, phba->shost,
3014                              "WRB Rings not created\n");
3015                 goto error;
3016         }
3017
3018         SE_DEBUG(DBG_LVL_8, "hwi_init_port success\n");
3019         return 0;
3020
3021 error:
3022         shost_printk(KERN_ERR, phba->shost, "hwi_init_port failed");
3023         hwi_cleanup(phba);
3024         return -ENOMEM;
3025 }
3026
3027 static int hwi_init_controller(struct beiscsi_hba *phba)
3028 {
3029         struct hwi_controller *phwi_ctrlr;
3030
3031         phwi_ctrlr = phba->phwi_ctrlr;
3032         if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
3033                 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
3034                     init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
3035                 SE_DEBUG(DBG_LVL_8, " phwi_ctrlr->phwi_ctxt=%p\n",
3036                          phwi_ctrlr->phwi_ctxt);
3037         } else {
3038                 shost_printk(KERN_ERR, phba->shost,
3039                              "HWI_MEM_ADDN_CONTEXT is more than one element."
3040                              "Failing to load\n");
3041                 return -ENOMEM;
3042         }
3043
3044         iscsi_init_global_templates(phba);
3045         beiscsi_init_wrb_handle(phba);
3046         hwi_init_async_pdu_ctx(phba);
3047         if (hwi_init_port(phba) != 0) {
3048                 shost_printk(KERN_ERR, phba->shost,
3049                              "hwi_init_controller failed\n");
3050                 return -ENOMEM;
3051         }
3052         return 0;
3053 }
3054
3055 static void beiscsi_free_mem(struct beiscsi_hba *phba)
3056 {
3057         struct be_mem_descriptor *mem_descr;
3058         int i, j;
3059
3060         mem_descr = phba->init_mem;
3061         i = 0;
3062         j = 0;
3063         for (i = 0; i < SE_MEM_MAX; i++) {
3064                 for (j = mem_descr->num_elements; j > 0; j--) {
3065                         pci_free_consistent(phba->pcidev,
3066                           mem_descr->mem_array[j - 1].size,
3067                           mem_descr->mem_array[j - 1].virtual_address,
3068                           (unsigned long)mem_descr->mem_array[j - 1].
3069                           bus_address.u.a64.address);
3070                 }
3071                 kfree(mem_descr->mem_array);
3072                 mem_descr++;
3073         }
3074         kfree(phba->init_mem);
3075         kfree(phba->phwi_ctrlr);
3076 }
3077
3078 static int beiscsi_init_controller(struct beiscsi_hba *phba)
3079 {
3080         int ret = -ENOMEM;
3081
3082         ret = beiscsi_get_memory(phba);
3083         if (ret < 0) {
3084                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe -"
3085                              "Failed in beiscsi_alloc_memory\n");
3086                 return ret;
3087         }
3088
3089         ret = hwi_init_controller(phba);
3090         if (ret)
3091                 goto free_init;
3092         SE_DEBUG(DBG_LVL_8, "Return success from beiscsi_init_controller");
3093         return 0;
3094
3095 free_init:
3096         beiscsi_free_mem(phba);
3097         return -ENOMEM;
3098 }
3099
3100 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
3101 {
3102         struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
3103         struct sgl_handle *psgl_handle;
3104         struct iscsi_sge *pfrag;
3105         unsigned int arr_index, i, idx;
3106
3107         phba->io_sgl_hndl_avbl = 0;
3108         phba->eh_sgl_hndl_avbl = 0;
3109
3110         mem_descr_sglh = phba->init_mem;
3111         mem_descr_sglh += HWI_MEM_SGLH;
3112         if (1 == mem_descr_sglh->num_elements) {
3113                 phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3114                                                  phba->params.ios_per_ctrl,
3115                                                  GFP_KERNEL);
3116                 if (!phba->io_sgl_hndl_base) {
3117                         shost_printk(KERN_ERR, phba->shost,
3118                                      "Mem Alloc Failed. Failing to load\n");
3119                         return -ENOMEM;
3120                 }
3121                 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3122                                                  (phba->params.icds_per_ctrl -
3123                                                  phba->params.ios_per_ctrl),
3124                                                  GFP_KERNEL);
3125                 if (!phba->eh_sgl_hndl_base) {
3126                         kfree(phba->io_sgl_hndl_base);
3127                         shost_printk(KERN_ERR, phba->shost,
3128                                      "Mem Alloc Failed. Failing to load\n");
3129                         return -ENOMEM;
3130                 }
3131         } else {
3132                 shost_printk(KERN_ERR, phba->shost,
3133                              "HWI_MEM_SGLH is more than one element."
3134                              "Failing to load\n");
3135                 return -ENOMEM;
3136         }
3137
3138         arr_index = 0;
3139         idx = 0;
3140         while (idx < mem_descr_sglh->num_elements) {
3141                 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
3142
3143                 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
3144                       sizeof(struct sgl_handle)); i++) {
3145                         if (arr_index < phba->params.ios_per_ctrl) {
3146                                 phba->io_sgl_hndl_base[arr_index] = psgl_handle;
3147                                 phba->io_sgl_hndl_avbl++;
3148                                 arr_index++;
3149                         } else {
3150                                 phba->eh_sgl_hndl_base[arr_index -
3151                                         phba->params.ios_per_ctrl] =
3152                                                                 psgl_handle;
3153                                 arr_index++;
3154                                 phba->eh_sgl_hndl_avbl++;
3155                         }
3156                         psgl_handle++;
3157                 }
3158                 idx++;
3159         }
3160         SE_DEBUG(DBG_LVL_8,
3161                  "phba->io_sgl_hndl_avbl=%d"
3162                  "phba->eh_sgl_hndl_avbl=%d\n",
3163                  phba->io_sgl_hndl_avbl,
3164                  phba->eh_sgl_hndl_avbl);
3165         mem_descr_sg = phba->init_mem;
3166         mem_descr_sg += HWI_MEM_SGE;
3167         SE_DEBUG(DBG_LVL_8, "\n mem_descr_sg->num_elements=%d\n",
3168                  mem_descr_sg->num_elements);
3169         arr_index = 0;
3170         idx = 0;
3171         while (idx < mem_descr_sg->num_elements) {
3172                 pfrag = mem_descr_sg->mem_array[idx].virtual_address;
3173
3174                 for (i = 0;
3175                      i < (mem_descr_sg->mem_array[idx].size) /
3176                      (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
3177                      i++) {
3178                         if (arr_index < phba->params.ios_per_ctrl)
3179                                 psgl_handle = phba->io_sgl_hndl_base[arr_index];
3180                         else
3181                                 psgl_handle = phba->eh_sgl_hndl_base[arr_index -
3182                                                 phba->params.ios_per_ctrl];
3183                         psgl_handle->pfrag = pfrag;
3184                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
3185                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
3186                         pfrag += phba->params.num_sge_per_io;
3187                         psgl_handle->sgl_index =
3188                                 phba->fw_config.iscsi_icd_start + arr_index++;
3189                 }
3190                 idx++;
3191         }
3192         phba->io_sgl_free_index = 0;
3193         phba->io_sgl_alloc_index = 0;
3194         phba->eh_sgl_free_index = 0;
3195         phba->eh_sgl_alloc_index = 0;
3196         return 0;
3197 }
3198
3199 static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3200 {
3201         int i, new_cid;
3202
3203         phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
3204                                   GFP_KERNEL);
3205         if (!phba->cid_array) {
3206                 shost_printk(KERN_ERR, phba->shost,
3207                              "Failed to allocate memory in "
3208                              "hba_setup_cid_tbls\n");
3209                 return -ENOMEM;
3210         }
3211         phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
3212                                  phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
3213         if (!phba->ep_array) {
3214                 shost_printk(KERN_ERR, phba->shost,
3215                              "Failed to allocate memory in "
3216                              "hba_setup_cid_tbls\n");
3217                 kfree(phba->cid_array);
3218                 return -ENOMEM;
3219         }
3220         new_cid = phba->fw_config.iscsi_cid_start;
3221         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3222                 phba->cid_array[i] = new_cid;
3223                 new_cid += 2;
3224         }
3225         phba->avlbl_cids = phba->params.cxns_per_ctrl;
3226         return 0;
3227 }
3228
3229 static void hwi_enable_intr(struct beiscsi_hba *phba)
3230 {
3231         struct be_ctrl_info *ctrl = &phba->ctrl;
3232         struct hwi_controller *phwi_ctrlr;
3233         struct hwi_context_memory *phwi_context;
3234         struct be_queue_info *eq;
3235         u8 __iomem *addr;
3236         u32 reg, i;
3237         u32 enabled;
3238
3239         phwi_ctrlr = phba->phwi_ctrlr;
3240         phwi_context = phwi_ctrlr->phwi_ctxt;
3241
3242         addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
3243                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
3244         reg = ioread32(addr);
3245         SE_DEBUG(DBG_LVL_8, "reg =x%08x\n", reg);
3246
3247         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3248         if (!enabled) {
3249                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3250                 SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p\n", reg, addr);
3251                 iowrite32(reg, addr);
3252                 if (!phba->msix_enabled) {
3253                         eq = &phwi_context->be_eq[0].q;
3254                         SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id);
3255                         hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3256                 } else {
3257                         for (i = 0; i <= phba->num_cpus; i++) {
3258                                 eq = &phwi_context->be_eq[i].q;
3259                                 SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id);
3260                                 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3261                         }
3262                 }
3263         }
3264 }
3265
3266 static void hwi_disable_intr(struct beiscsi_hba *phba)
3267 {
3268         struct be_ctrl_info *ctrl = &phba->ctrl;
3269
3270         u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
3271         u32 reg = ioread32(addr);
3272
3273         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3274         if (enabled) {
3275                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3276                 iowrite32(reg, addr);
3277         } else
3278                 shost_printk(KERN_WARNING, phba->shost,
3279                              "In hwi_disable_intr, Already Disabled\n");
3280 }
3281
3282 static int beiscsi_init_port(struct beiscsi_hba *phba)
3283 {
3284         int ret;
3285
3286         ret = beiscsi_init_controller(phba);
3287         if (ret < 0) {
3288                 shost_printk(KERN_ERR, phba->shost,
3289                              "beiscsi_dev_probe - Failed in"
3290                              "beiscsi_init_controller\n");
3291                 return ret;
3292         }
3293         ret = beiscsi_init_sgl_handle(phba);
3294         if (ret < 0) {
3295                 shost_printk(KERN_ERR, phba->shost,
3296                              "beiscsi_dev_probe - Failed in"
3297                              "beiscsi_init_sgl_handle\n");
3298                 goto do_cleanup_ctrlr;
3299         }
3300
3301         if (hba_setup_cid_tbls(phba)) {
3302                 shost_printk(KERN_ERR, phba->shost,
3303                              "Failed in hba_setup_cid_tbls\n");
3304                 kfree(phba->io_sgl_hndl_base);
3305                 kfree(phba->eh_sgl_hndl_base);
3306                 goto do_cleanup_ctrlr;
3307         }
3308
3309         return ret;
3310
3311 do_cleanup_ctrlr:
3312         hwi_cleanup(phba);
3313         return ret;
3314 }
3315
3316 static void hwi_purge_eq(struct beiscsi_hba *phba)
3317 {
3318         struct hwi_controller *phwi_ctrlr;
3319         struct hwi_context_memory *phwi_context;
3320         struct be_queue_info *eq;
3321         struct be_eq_entry *eqe = NULL;
3322         int i, eq_msix;
3323         unsigned int num_processed;
3324
3325         phwi_ctrlr = phba->phwi_ctrlr;
3326         phwi_context = phwi_ctrlr->phwi_ctxt;
3327         if (phba->msix_enabled)
3328                 eq_msix = 1;
3329         else
3330                 eq_msix = 0;
3331
3332         for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
3333                 eq = &phwi_context->be_eq[i].q;
3334                 eqe = queue_tail_node(eq);
3335                 num_processed = 0;
3336                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3337                                         & EQE_VALID_MASK) {
3338                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3339                         queue_tail_inc(eq);
3340                         eqe = queue_tail_node(eq);
3341                         num_processed++;
3342                 }
3343
3344                 if (num_processed)
3345                         hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1);
3346         }
3347 }
3348
3349 static void beiscsi_clean_port(struct beiscsi_hba *phba)
3350 {
3351         int mgmt_status;
3352
3353         mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
3354         if (mgmt_status)
3355                 shost_printk(KERN_WARNING, phba->shost,
3356                              "mgmt_epfw_cleanup FAILED\n");
3357
3358         hwi_purge_eq(phba);
3359         hwi_cleanup(phba);
3360         kfree(phba->io_sgl_hndl_base);
3361         kfree(phba->eh_sgl_hndl_base);
3362         kfree(phba->cid_array);
3363         kfree(phba->ep_array);
3364 }
3365
3366 void
3367 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
3368                            struct beiscsi_offload_params *params)
3369 {
3370         struct wrb_handle *pwrb_handle;
3371         struct iscsi_target_context_update_wrb *pwrb = NULL;
3372         struct be_mem_descriptor *mem_descr;
3373         struct beiscsi_hba *phba = beiscsi_conn->phba;
3374         u32 doorbell = 0;
3375
3376         /*
3377          * We can always use 0 here because it is reserved by libiscsi for
3378          * login/startup related tasks.
3379          */
3380         pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
3381                                        phba->fw_config.iscsi_cid_start));
3382         pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
3383         memset(pwrb, 0, sizeof(*pwrb));
3384         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3385                       max_burst_length, pwrb, params->dw[offsetof
3386                       (struct amap_beiscsi_offload_params,
3387                       max_burst_length) / 32]);
3388         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3389                       max_send_data_segment_length, pwrb,
3390                       params->dw[offsetof(struct amap_beiscsi_offload_params,
3391                       max_send_data_segment_length) / 32]);
3392         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3393                       first_burst_length,
3394                       pwrb,
3395                       params->dw[offsetof(struct amap_beiscsi_offload_params,
3396                       first_burst_length) / 32]);
3397
3398         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
3399                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3400                       erl) / 32] & OFFLD_PARAMS_ERL));
3401         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
3402                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3403                       dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
3404         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
3405                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3406                       hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
3407         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
3408                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3409                       ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
3410         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
3411                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3412                        imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
3413         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
3414                       pwrb,
3415                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3416                       exp_statsn) / 32] + 1));
3417         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
3418                       0x7);
3419         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
3420                       pwrb, pwrb_handle->wrb_index);
3421         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
3422                       pwrb, pwrb_handle->nxt_wrb_index);
3423         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3424                         session_state, pwrb, 0);
3425         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
3426                       pwrb, 1);
3427         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
3428                       pwrb, 0);
3429         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
3430                       0);
3431
3432         mem_descr = phba->init_mem;
3433         mem_descr += ISCSI_MEM_GLOBAL_HEADER;
3434
3435         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3436                         pad_buffer_addr_hi, pwrb,
3437                       mem_descr->mem_array[0].bus_address.u.a32.address_hi);
3438         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3439                         pad_buffer_addr_lo, pwrb,
3440                       mem_descr->mem_array[0].bus_address.u.a32.address_lo);
3441
3442         be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
3443
3444         doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3445         doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
3446                              << DB_DEF_PDU_WRB_INDEX_SHIFT;
3447         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3448
3449         iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3450 }
3451
3452 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
3453                               int *index, int *age)
3454 {
3455         *index = (int)itt;
3456         if (age)
3457                 *age = conn->session->age;
3458 }
3459
3460 /**
3461  * beiscsi_alloc_pdu - allocates pdu and related resources
3462  * @task: libiscsi task
3463  * @opcode: opcode of pdu for task
3464  *
3465  * This is called with the session lock held. It will allocate
3466  * the wrb and sgl if needed for the command. And it will prep
3467  * the pdu's itt. beiscsi_parse_pdu will later translate
3468  * the pdu itt to the libiscsi task itt.
3469  */
3470 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3471 {
3472         struct beiscsi_io_task *io_task = task->dd_data;
3473         struct iscsi_conn *conn = task->conn;
3474         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3475         struct beiscsi_hba *phba = beiscsi_conn->phba;
3476         struct hwi_wrb_context *pwrb_context;
3477         struct hwi_controller *phwi_ctrlr;
3478         itt_t itt;
3479         struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3480         dma_addr_t paddr;
3481
3482         io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
3483                                           GFP_KERNEL, &paddr);
3484         if (!io_task->cmd_bhs)
3485                 return -ENOMEM;
3486         io_task->bhs_pa.u.a64.address = paddr;
3487         io_task->libiscsi_itt = (itt_t)task->itt;
3488         io_task->conn = beiscsi_conn;
3489
3490         task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
3491         task->hdr_max = sizeof(struct be_cmd_bhs);
3492         io_task->psgl_handle = NULL;
3493         io_task->psgl_handle = NULL;
3494
3495         if (task->sc) {
3496                 spin_lock(&phba->io_sgl_lock);
3497                 io_task->psgl_handle = alloc_io_sgl_handle(phba);
3498                 spin_unlock(&phba->io_sgl_lock);
3499                 if (!io_task->psgl_handle)
3500                         goto free_hndls;
3501                 io_task->pwrb_handle = alloc_wrb_handle(phba,
3502                                         beiscsi_conn->beiscsi_conn_cid -
3503                                         phba->fw_config.iscsi_cid_start);
3504                 if (!io_task->pwrb_handle)
3505                         goto free_io_hndls;
3506         } else {
3507                 io_task->scsi_cmnd = NULL;
3508                 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
3509                         if (!beiscsi_conn->login_in_progress) {
3510                                 spin_lock(&phba->mgmt_sgl_lock);
3511                                 io_task->psgl_handle = (struct sgl_handle *)
3512                                                 alloc_mgmt_sgl_handle(phba);
3513                                 spin_unlock(&phba->mgmt_sgl_lock);
3514                                 if (!io_task->psgl_handle)
3515                                         goto free_hndls;
3516
3517                                 beiscsi_conn->login_in_progress = 1;
3518                                 beiscsi_conn->plogin_sgl_handle =
3519                                                         io_task->psgl_handle;
3520                                 io_task->pwrb_handle =
3521                                         alloc_wrb_handle(phba,
3522                                         beiscsi_conn->beiscsi_conn_cid -
3523                                         phba->fw_config.iscsi_cid_start);
3524                                 if (!io_task->pwrb_handle)
3525                                         goto free_io_hndls;
3526                                 beiscsi_conn->plogin_wrb_handle =
3527                                                         io_task->pwrb_handle;
3528
3529                         } else {
3530                                 io_task->psgl_handle =
3531                                                 beiscsi_conn->plogin_sgl_handle;
3532                                 io_task->pwrb_handle =
3533                                                 beiscsi_conn->plogin_wrb_handle;
3534                         }
3535                 } else {
3536                         spin_lock(&phba->mgmt_sgl_lock);
3537                         io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
3538                         spin_unlock(&phba->mgmt_sgl_lock);
3539                         if (!io_task->psgl_handle)
3540                                 goto free_hndls;
3541                         io_task->pwrb_handle =
3542                                         alloc_wrb_handle(phba,
3543                                         beiscsi_conn->beiscsi_conn_cid -
3544                                         phba->fw_config.iscsi_cid_start);
3545                         if (!io_task->pwrb_handle)
3546                                 goto free_mgmt_hndls;
3547
3548                 }
3549         }
3550         itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
3551                                  wrb_index << 16) | (unsigned int)
3552                                 (io_task->psgl_handle->sgl_index));
3553         io_task->pwrb_handle->pio_handle = task;
3554
3555         io_task->cmd_bhs->iscsi_hdr.itt = itt;
3556         return 0;
3557
3558 free_io_hndls:
3559         spin_lock(&phba->io_sgl_lock);
3560         free_io_sgl_handle(phba, io_task->psgl_handle);
3561         spin_unlock(&phba->io_sgl_lock);
3562         goto free_hndls;
3563 free_mgmt_hndls:
3564         spin_lock(&phba->mgmt_sgl_lock);
3565         free_mgmt_sgl_handle(phba, io_task->psgl_handle);
3566         spin_unlock(&phba->mgmt_sgl_lock);
3567 free_hndls:
3568         phwi_ctrlr = phba->phwi_ctrlr;
3569         pwrb_context = &phwi_ctrlr->wrb_context[
3570                         beiscsi_conn->beiscsi_conn_cid -
3571                         phba->fw_config.iscsi_cid_start];
3572         if (io_task->pwrb_handle)
3573                 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3574         io_task->pwrb_handle = NULL;
3575         pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3576                       io_task->bhs_pa.u.a64.address);
3577         SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed\n");
3578         return -ENOMEM;
3579 }
3580
3581 static void beiscsi_cleanup_task(struct iscsi_task *task)
3582 {
3583         struct beiscsi_io_task *io_task = task->dd_data;
3584         struct iscsi_conn *conn = task->conn;
3585         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3586         struct beiscsi_hba *phba = beiscsi_conn->phba;
3587         struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3588         struct hwi_wrb_context *pwrb_context;
3589         struct hwi_controller *phwi_ctrlr;
3590
3591         phwi_ctrlr = phba->phwi_ctrlr;
3592         pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
3593                         - phba->fw_config.iscsi_cid_start];
3594         if (io_task->pwrb_handle) {
3595                 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3596                 io_task->pwrb_handle = NULL;
3597         }
3598
3599         if (io_task->cmd_bhs) {
3600                 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3601                               io_task->bhs_pa.u.a64.address);
3602         }
3603
3604         if (task->sc) {
3605                 if (io_task->psgl_handle) {
3606                         spin_lock(&phba->io_sgl_lock);
3607                         free_io_sgl_handle(phba, io_task->psgl_handle);
3608                         spin_unlock(&phba->io_sgl_lock);
3609                         io_task->psgl_handle = NULL;
3610                 }
3611         } else {
3612                 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN)
3613                         return;
3614                 if (io_task->psgl_handle) {
3615                         spin_lock(&phba->mgmt_sgl_lock);
3616                         free_mgmt_sgl_handle(phba, io_task->psgl_handle);
3617                         spin_unlock(&phba->mgmt_sgl_lock);
3618                         io_task->psgl_handle = NULL;
3619                 }
3620         }
3621 }
3622
3623 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3624                           unsigned int num_sg, unsigned int xferlen,
3625                           unsigned int writedir)
3626 {
3627
3628         struct beiscsi_io_task *io_task = task->dd_data;
3629         struct iscsi_conn *conn = task->conn;
3630         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3631         struct beiscsi_hba *phba = beiscsi_conn->phba;
3632         struct iscsi_wrb *pwrb = NULL;
3633         unsigned int doorbell = 0;
3634
3635         pwrb = io_task->pwrb_handle->pwrb;
3636         io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
3637         io_task->bhs_len = sizeof(struct be_cmd_bhs);
3638
3639         if (writedir) {
3640                 memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48);
3641                 AMAP_SET_BITS(struct amap_pdu_data_out, itt,
3642                               &io_task->cmd_bhs->iscsi_data_pdu,
3643                               (unsigned int)io_task->cmd_bhs->iscsi_hdr.itt);
3644                 AMAP_SET_BITS(struct amap_pdu_data_out, opcode,
3645                               &io_task->cmd_bhs->iscsi_data_pdu,
3646                               ISCSI_OPCODE_SCSI_DATA_OUT);
3647                 AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
3648                               &io_task->cmd_bhs->iscsi_data_pdu, 1);
3649                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3650                               INI_WR_CMD);
3651                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3652         } else {
3653                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3654                               INI_RD_CMD);
3655                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
3656         }
3657         memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
3658                dw[offsetof(struct amap_pdu_data_out, lun) / 32],
3659                io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
3660
3661         AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
3662                       cpu_to_be16((unsigned short)io_task->cmd_bhs->iscsi_hdr.
3663                                   lun[0]));
3664         AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
3665         AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3666                       io_task->pwrb_handle->wrb_index);
3667         AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3668                       be32_to_cpu(task->cmdsn));
3669         AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3670                       io_task->psgl_handle->sgl_index);
3671
3672         hwi_write_sgl(pwrb, sg, num_sg, io_task);
3673
3674         AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3675                       io_task->pwrb_handle->nxt_wrb_index);
3676         be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3677
3678         doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3679         doorbell |= (io_task->pwrb_handle->wrb_index &
3680                      DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3681         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3682
3683         iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3684         return 0;
3685 }
3686
3687 static int beiscsi_mtask(struct iscsi_task *task)
3688 {
3689         struct beiscsi_io_task *io_task = task->dd_data;
3690         struct iscsi_conn *conn = task->conn;
3691         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3692         struct beiscsi_hba *phba = beiscsi_conn->phba;
3693         struct iscsi_wrb *pwrb = NULL;
3694         unsigned int doorbell = 0;
3695         unsigned int cid;
3696
3697         cid = beiscsi_conn->beiscsi_conn_cid;
3698         pwrb = io_task->pwrb_handle->pwrb;
3699         memset(pwrb, 0, sizeof(*pwrb));
3700         AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3701                       be32_to_cpu(task->cmdsn));
3702         AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3703                       io_task->pwrb_handle->wrb_index);
3704         AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3705                       io_task->psgl_handle->sgl_index);
3706
3707         switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
3708         case ISCSI_OP_LOGIN:
3709                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3710                               TGT_DM_CMD);
3711                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3712                 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
3713                 hwi_write_buffer(pwrb, task);
3714                 break;
3715         case ISCSI_OP_NOOP_OUT:
3716                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3717                               INI_RD_CMD);
3718                 if (task->hdr->ttt == ISCSI_RESERVED_TAG)
3719                         AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3720                 else
3721                         AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1);
3722                 hwi_write_buffer(pwrb, task);
3723                 break;
3724         case ISCSI_OP_TEXT:
3725                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3726                               TGT_DM_CMD);
3727                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3728                 hwi_write_buffer(pwrb, task);
3729                 break;
3730         case ISCSI_OP_SCSI_TMFUNC:
3731                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3732                               INI_TMF_CMD);
3733                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3734                 hwi_write_buffer(pwrb, task);
3735                 break;
3736         case ISCSI_OP_LOGOUT:
3737                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3738                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3739                               HWH_TYPE_LOGOUT);
3740                 hwi_write_buffer(pwrb, task);
3741                 break;
3742
3743         default:
3744                 SE_DEBUG(DBG_LVL_1, "opcode =%d Not supported\n",
3745                          task->hdr->opcode & ISCSI_OPCODE_MASK);
3746                 return -EINVAL;
3747         }
3748
3749         AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
3750                       task->data_count);
3751         AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3752                       io_task->pwrb_handle->nxt_wrb_index);
3753         be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3754
3755         doorbell |= cid & DB_WRB_POST_CID_MASK;
3756         doorbell |= (io_task->pwrb_handle->wrb_index &
3757                      DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3758         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3759         iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3760         return 0;
3761 }
3762
3763 static int beiscsi_task_xmit(struct iscsi_task *task)
3764 {
3765         struct beiscsi_io_task *io_task = task->dd_data;
3766         struct scsi_cmnd *sc = task->sc;
3767         struct scatterlist *sg;
3768         int num_sg;
3769         unsigned int  writedir = 0, xferlen = 0;
3770
3771         if (!sc)
3772                 return beiscsi_mtask(task);
3773
3774         io_task->scsi_cmnd = sc;
3775         num_sg = scsi_dma_map(sc);
3776         if (num_sg < 0) {
3777                 SE_DEBUG(DBG_LVL_1, " scsi_dma_map Failed\n")
3778                 return num_sg;
3779         }
3780         xferlen = scsi_bufflen(sc);
3781         sg = scsi_sglist(sc);
3782         if (sc->sc_data_direction == DMA_TO_DEVICE) {
3783                 writedir = 1;
3784                 SE_DEBUG(DBG_LVL_4, "task->imm_count=0x%08x\n",
3785                          task->imm_count);
3786         } else
3787                 writedir = 0;
3788         return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
3789 }
3790
3791 static void beiscsi_remove(struct pci_dev *pcidev)
3792 {
3793         struct beiscsi_hba *phba = NULL;
3794         struct hwi_controller *phwi_ctrlr;
3795         struct hwi_context_memory *phwi_context;
3796         struct be_eq_obj *pbe_eq;
3797         unsigned int i, msix_vec;
3798         u8 *real_offset = 0;
3799         u32 value = 0;
3800
3801         phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
3802         if (!phba) {
3803                 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
3804                 return;
3805         }
3806
3807         phwi_ctrlr = phba->phwi_ctrlr;
3808         phwi_context = phwi_ctrlr->phwi_ctxt;
3809         hwi_disable_intr(phba);
3810         if (phba->msix_enabled) {
3811                 for (i = 0; i <= phba->num_cpus; i++) {
3812                         msix_vec = phba->msix_entries[i].vector;
3813                         free_irq(msix_vec, &phwi_context->be_eq[i]);
3814                 }
3815         } else
3816                 if (phba->pcidev->irq)
3817                         free_irq(phba->pcidev->irq, phba);
3818         pci_disable_msix(phba->pcidev);
3819         destroy_workqueue(phba->wq);
3820         if (blk_iopoll_enabled)
3821                 for (i = 0; i < phba->num_cpus; i++) {
3822                         pbe_eq = &phwi_context->be_eq[i];
3823                         blk_iopoll_disable(&pbe_eq->iopoll);
3824                 }
3825
3826         beiscsi_clean_port(phba);
3827         beiscsi_free_mem(phba);
3828         real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
3829
3830         value = readl((void *)real_offset);
3831
3832         if (value & 0x00010000) {
3833                 value &= 0xfffeffff;
3834                 writel(value, (void *)real_offset);
3835         }
3836         beiscsi_unmap_pci_function(phba);
3837         pci_free_consistent(phba->pcidev,
3838                             phba->ctrl.mbox_mem_alloced.size,
3839                             phba->ctrl.mbox_mem_alloced.va,
3840                             phba->ctrl.mbox_mem_alloced.dma);
3841         iscsi_host_remove(phba->shost);
3842         pci_dev_put(phba->pcidev);
3843         iscsi_host_free(phba->shost);
3844 }
3845
3846 static void beiscsi_msix_enable(struct beiscsi_hba *phba)
3847 {
3848         int i, status;
3849
3850         for (i = 0; i <= phba->num_cpus; i++)
3851                 phba->msix_entries[i].entry = i;
3852
3853         status = pci_enable_msix(phba->pcidev, phba->msix_entries,
3854                                  (phba->num_cpus + 1));
3855         if (!status)
3856                 phba->msix_enabled = true;
3857
3858         return;
3859 }
3860
3861 static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3862                                 const struct pci_device_id *id)
3863 {
3864         struct beiscsi_hba *phba = NULL;
3865         struct hwi_controller *phwi_ctrlr;
3866         struct hwi_context_memory *phwi_context;
3867         struct be_eq_obj *pbe_eq;
3868         int ret, num_cpus, i;
3869         u8 *real_offset = 0;
3870         u32 value = 0;
3871
3872         ret = beiscsi_enable_pci(pcidev);
3873         if (ret < 0) {
3874                 dev_err(&pcidev->dev, "beiscsi_dev_probe-"
3875                         " Failed to enable pci device\n");
3876                 return ret;
3877         }
3878
3879         phba = beiscsi_hba_alloc(pcidev);
3880         if (!phba) {
3881                 dev_err(&pcidev->dev, "beiscsi_dev_probe-"
3882                         " Failed in beiscsi_hba_alloc\n");
3883                 goto disable_pci;
3884         }
3885
3886         switch (pcidev->device) {
3887         case BE_DEVICE_ID1:
3888         case OC_DEVICE_ID1:
3889         case OC_DEVICE_ID2:
3890                 phba->generation = BE_GEN2;
3891                 break;
3892         case BE_DEVICE_ID2:
3893         case OC_DEVICE_ID3:
3894                 phba->generation = BE_GEN3;
3895                 break;
3896         default:
3897                 phba->generation = 0;
3898         }
3899
3900         if (enable_msix)
3901                 num_cpus = find_num_cpus();
3902         else
3903                 num_cpus = 1;
3904         phba->num_cpus = num_cpus;
3905         SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", phba->num_cpus);
3906
3907         if (enable_msix)
3908                 beiscsi_msix_enable(phba);
3909         ret = be_ctrl_init(phba, pcidev);
3910         if (ret) {
3911                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3912                                 "Failed in be_ctrl_init\n");
3913                 goto hba_free;
3914         }
3915
3916         if (!num_hba) {
3917                 real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
3918                 value = readl((void *)real_offset);
3919                 if (value & 0x00010000) {
3920                         gcrashmode++;
3921                         shost_printk(KERN_ERR, phba->shost,
3922                                 "Loading Driver in crashdump mode\n");
3923                         ret = beiscsi_pci_soft_reset(phba);
3924                         if (ret) {
3925                                 shost_printk(KERN_ERR, phba->shost,
3926                                         "Reset Failed. Aborting Crashdump\n");
3927                                 goto hba_free;
3928                         }
3929                         ret = be_chk_reset_complete(phba);
3930                         if (ret) {
3931                                 shost_printk(KERN_ERR, phba->shost,
3932                                         "Failed to get out of reset."
3933                                         "Aborting Crashdump\n");
3934                                 goto hba_free;
3935                         }
3936                 } else {
3937                         value |= 0x00010000;
3938                         writel(value, (void *)real_offset);
3939                         num_hba++;
3940                 }
3941         }
3942
3943         spin_lock_init(&phba->io_sgl_lock);
3944         spin_lock_init(&phba->mgmt_sgl_lock);
3945         spin_lock_init(&phba->isr_lock);
3946         ret = mgmt_get_fw_config(&phba->ctrl, phba);
3947         if (ret != 0) {
3948                 shost_printk(KERN_ERR, phba->shost,
3949                              "Error getting fw config\n");
3950                 goto free_port;
3951         }
3952         phba->shost->max_id = phba->fw_config.iscsi_cid_count;
3953         beiscsi_get_params(phba);
3954         phba->shost->can_queue = phba->params.ios_per_ctrl;
3955         ret = beiscsi_init_port(phba);
3956         if (ret < 0) {
3957                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3958                              "Failed in beiscsi_init_port\n");
3959                 goto free_port;
3960         }
3961
3962         for (i = 0; i < MAX_MCC_CMD ; i++) {
3963                 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
3964                 phba->ctrl.mcc_tag[i] = i + 1;
3965                 phba->ctrl.mcc_numtag[i + 1] = 0;
3966                 phba->ctrl.mcc_tag_available++;
3967         }
3968
3969         phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
3970
3971         snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
3972                  phba->shost->host_no);
3973         phba->wq = create_workqueue(phba->wq_name);
3974         if (!phba->wq) {
3975                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3976                                 "Failed to allocate work queue\n");
3977                 goto free_twq;
3978         }
3979
3980         INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs);
3981
3982         phwi_ctrlr = phba->phwi_ctrlr;
3983         phwi_context = phwi_ctrlr->phwi_ctxt;
3984         if (blk_iopoll_enabled) {
3985                 for (i = 0; i < phba->num_cpus; i++) {
3986                         pbe_eq = &phwi_context->be_eq[i];
3987                         blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
3988                                         be_iopoll);
3989                         blk_iopoll_enable(&pbe_eq->iopoll);
3990                 }
3991         }
3992         ret = beiscsi_init_irqs(phba);
3993         if (ret < 0) {
3994                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3995                              "Failed to beiscsi_init_irqs\n");
3996                 goto free_blkenbld;
3997         }
3998         hwi_enable_intr(phba);
3999         SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED\n\n\n");
4000         return 0;
4001
4002 free_blkenbld:
4003         destroy_workqueue(phba->wq);
4004         if (blk_iopoll_enabled)
4005                 for (i = 0; i < phba->num_cpus; i++) {
4006                         pbe_eq = &phwi_context->be_eq[i];
4007                         blk_iopoll_disable(&pbe_eq->iopoll);
4008                 }
4009 free_twq:
4010         beiscsi_clean_port(phba);
4011         beiscsi_free_mem(phba);
4012 free_port:
4013         real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
4014
4015         value = readl((void *)real_offset);
4016
4017         if (value & 0x00010000) {
4018                 value &= 0xfffeffff;
4019                 writel(value, (void *)real_offset);
4020         }
4021
4022         pci_free_consistent(phba->pcidev,
4023                             phba->ctrl.mbox_mem_alloced.size,
4024                             phba->ctrl.mbox_mem_alloced.va,
4025                            phba->ctrl.mbox_mem_alloced.dma);
4026         beiscsi_unmap_pci_function(phba);
4027 hba_free:
4028         if (phba->msix_enabled)
4029                 pci_disable_msix(phba->pcidev);
4030         iscsi_host_remove(phba->shost);
4031         pci_dev_put(phba->pcidev);
4032         iscsi_host_free(phba->shost);
4033 disable_pci:
4034         pci_disable_device(pcidev);
4035         return ret;
4036 }
4037
4038 struct iscsi_transport beiscsi_iscsi_transport = {
4039         .owner = THIS_MODULE,
4040         .name = DRV_NAME,
4041         .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO |
4042                 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
4043         .param_mask = ISCSI_MAX_RECV_DLENGTH |
4044                 ISCSI_MAX_XMIT_DLENGTH |
4045                 ISCSI_HDRDGST_EN |
4046                 ISCSI_DATADGST_EN |
4047                 ISCSI_INITIAL_R2T_EN |
4048                 ISCSI_MAX_R2T |
4049                 ISCSI_IMM_DATA_EN |
4050                 ISCSI_FIRST_BURST |
4051                 ISCSI_MAX_BURST |
4052                 ISCSI_PDU_INORDER_EN |
4053                 ISCSI_DATASEQ_INORDER_EN |
4054                 ISCSI_ERL |
4055                 ISCSI_CONN_PORT |
4056                 ISCSI_CONN_ADDRESS |
4057                 ISCSI_EXP_STATSN |
4058                 ISCSI_PERSISTENT_PORT |
4059                 ISCSI_PERSISTENT_ADDRESS |
4060                 ISCSI_TARGET_NAME | ISCSI_TPGT |
4061                 ISCSI_USERNAME | ISCSI_PASSWORD |
4062                 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
4063                 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
4064                 ISCSI_LU_RESET_TMO |
4065                 ISCSI_PING_TMO | ISCSI_RECV_TMO |
4066                 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
4067         .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
4068                                 ISCSI_HOST_INITIATOR_NAME,
4069         .create_session = beiscsi_session_create,
4070         .destroy_session = beiscsi_session_destroy,
4071         .create_conn = beiscsi_conn_create,
4072         .bind_conn = beiscsi_conn_bind,
4073         .destroy_conn = iscsi_conn_teardown,
4074         .set_param = beiscsi_set_param,
4075         .get_conn_param = beiscsi_conn_get_param,
4076         .get_session_param = iscsi_session_get_param,
4077         .get_host_param = beiscsi_get_host_param,
4078         .start_conn = beiscsi_conn_start,
4079         .stop_conn = iscsi_conn_stop,
4080         .send_pdu = iscsi_conn_send_pdu,
4081         .xmit_task = beiscsi_task_xmit,
4082         .cleanup_task = beiscsi_cleanup_task,
4083         .alloc_pdu = beiscsi_alloc_pdu,
4084         .parse_pdu_itt = beiscsi_parse_pdu,
4085         .get_stats = beiscsi_conn_get_stats,
4086         .ep_connect = beiscsi_ep_connect,
4087         .ep_poll = beiscsi_ep_poll,
4088         .ep_disconnect = beiscsi_ep_disconnect,
4089         .session_recovery_timedout = iscsi_session_recovery_timedout,
4090 };
4091
4092 static struct pci_driver beiscsi_pci_driver = {
4093         .name = DRV_NAME,
4094         .probe = beiscsi_dev_probe,
4095         .remove = beiscsi_remove,
4096         .id_table = beiscsi_pci_id_table
4097 };
4098
4099
4100 static int __init beiscsi_module_init(void)
4101 {
4102         int ret;
4103
4104         beiscsi_scsi_transport =
4105                         iscsi_register_transport(&beiscsi_iscsi_transport);
4106         if (!beiscsi_scsi_transport) {
4107                 SE_DEBUG(DBG_LVL_1,
4108                          "beiscsi_module_init - Unable to  register beiscsi"
4109                          "transport.\n");
4110                 return -ENOMEM;
4111         }
4112         SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p\n",
4113                  &beiscsi_iscsi_transport);
4114
4115         ret = pci_register_driver(&beiscsi_pci_driver);
4116         if (ret) {
4117                 SE_DEBUG(DBG_LVL_1,
4118                          "beiscsi_module_init - Unable to  register"
4119                          "beiscsi pci driver.\n");
4120                 goto unregister_iscsi_transport;
4121         }
4122         return 0;
4123
4124 unregister_iscsi_transport:
4125         iscsi_unregister_transport(&beiscsi_iscsi_transport);
4126         return ret;
4127 }
4128
4129 static void __exit beiscsi_module_exit(void)
4130 {
4131         pci_unregister_driver(&beiscsi_pci_driver);
4132         iscsi_unregister_transport(&beiscsi_iscsi_transport);
4133 }
4134
4135 module_init(beiscsi_module_init);
4136 module_exit(beiscsi_module_exit);