]> Pileus Git - ~andy/linux/blob - drivers/scsi/be2iscsi/be_main.c
[SCSI] be2iscsi: Fix to handle request_irq failure
[~andy/linux] / drivers / scsi / be2iscsi / be_main.c
1 /**
2  * Copyright (C) 2005 - 2010 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
11  *
12  * Contact Information:
13  * linux-drivers@serverengines.com
14  *
15  *  ServerEngines
16  * 209 N. Fair Oaks Ave
17  * Sunnyvale, CA 94085
18  *
19  */
20 #include <linux/reboot.h>
21 #include <linux/delay.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/string.h>
27 #include <linux/kernel.h>
28 #include <linux/semaphore.h>
29
30 #include <scsi/libiscsi.h>
31 #include <scsi/scsi_transport_iscsi.h>
32 #include <scsi/scsi_transport.h>
33 #include <scsi/scsi_cmnd.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_host.h>
36 #include <scsi/scsi.h>
37 #include "be_main.h"
38 #include "be_iscsi.h"
39 #include "be_mgmt.h"
40
41 static unsigned int be_iopoll_budget = 10;
42 static unsigned int be_max_phys_size = 64;
43 static unsigned int enable_msix = 1;
44
45 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
46 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
47 MODULE_AUTHOR("ServerEngines Corporation");
48 MODULE_LICENSE("GPL");
49 module_param(be_iopoll_budget, int, 0);
50 module_param(enable_msix, int, 0);
51 module_param(be_max_phys_size, uint, S_IRUGO);
52 MODULE_PARM_DESC(be_max_phys_size, "Maximum Size (In Kilobytes) of physically"
53                                    "contiguous memory that can be allocated."
54                                    "Range is 16 - 128");
55
56 static int beiscsi_slave_configure(struct scsi_device *sdev)
57 {
58         blk_queue_max_segment_size(sdev->request_queue, 65536);
59         return 0;
60 }
61
62 static int beiscsi_eh_abort(struct scsi_cmnd *sc)
63 {
64         struct iscsi_cls_session *cls_session;
65         struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr;
66         struct beiscsi_io_task *aborted_io_task;
67         struct iscsi_conn *conn;
68         struct beiscsi_conn *beiscsi_conn;
69         struct beiscsi_hba *phba;
70         struct iscsi_session *session;
71         struct invalidate_command_table *inv_tbl;
72         unsigned int cid, tag, num_invalidate;
73
74         cls_session = starget_to_session(scsi_target(sc->device));
75         session = cls_session->dd_data;
76
77         spin_lock_bh(&session->lock);
78         if (!aborted_task || !aborted_task->sc) {
79                 /* we raced */
80                 spin_unlock_bh(&session->lock);
81                 return SUCCESS;
82         }
83
84         aborted_io_task = aborted_task->dd_data;
85         if (!aborted_io_task->scsi_cmnd) {
86                 /* raced or invalid command */
87                 spin_unlock_bh(&session->lock);
88                 return SUCCESS;
89         }
90         spin_unlock_bh(&session->lock);
91         conn = aborted_task->conn;
92         beiscsi_conn = conn->dd_data;
93         phba = beiscsi_conn->phba;
94
95         /* invalidate iocb */
96         cid = beiscsi_conn->beiscsi_conn_cid;
97         inv_tbl = phba->inv_tbl;
98         memset(inv_tbl, 0x0, sizeof(*inv_tbl));
99         inv_tbl->cid = cid;
100         inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index;
101         num_invalidate = 1;
102         tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, cid);
103         if (!tag) {
104                 shost_printk(KERN_WARNING, phba->shost,
105                              "mgmt_invalidate_icds could not be"
106                              " submitted\n");
107                 return FAILED;
108         } else {
109                 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
110                                          phba->ctrl.mcc_numtag[tag]);
111                 free_mcc_tag(&phba->ctrl, tag);
112         }
113
114         return iscsi_eh_abort(sc);
115 }
116
117 static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
118 {
119         struct iscsi_task *abrt_task;
120         struct beiscsi_io_task *abrt_io_task;
121         struct iscsi_conn *conn;
122         struct beiscsi_conn *beiscsi_conn;
123         struct beiscsi_hba *phba;
124         struct iscsi_session *session;
125         struct iscsi_cls_session *cls_session;
126         struct invalidate_command_table *inv_tbl;
127         unsigned int cid, tag, i, num_invalidate;
128         int rc = FAILED;
129
130         /* invalidate iocbs */
131         cls_session = starget_to_session(scsi_target(sc->device));
132         session = cls_session->dd_data;
133         spin_lock_bh(&session->lock);
134         if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
135                 goto unlock;
136
137         conn = session->leadconn;
138         beiscsi_conn = conn->dd_data;
139         phba = beiscsi_conn->phba;
140         cid = beiscsi_conn->beiscsi_conn_cid;
141         inv_tbl = phba->inv_tbl;
142         memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN);
143         num_invalidate = 0;
144         for (i = 0; i < conn->session->cmds_max; i++) {
145                 abrt_task = conn->session->cmds[i];
146                 abrt_io_task = abrt_task->dd_data;
147                 if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
148                         continue;
149
150                 if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
151                         continue;
152
153                 inv_tbl->cid = cid;
154                 inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
155                 num_invalidate++;
156                 inv_tbl++;
157         }
158         spin_unlock_bh(&session->lock);
159         inv_tbl = phba->inv_tbl;
160
161         tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, cid);
162         if (!tag) {
163                 shost_printk(KERN_WARNING, phba->shost,
164                              "mgmt_invalidate_icds could not be"
165                              " submitted\n");
166                 return FAILED;
167         } else {
168                 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
169                                          phba->ctrl.mcc_numtag[tag]);
170                 free_mcc_tag(&phba->ctrl, tag);
171         }
172
173         return iscsi_eh_device_reset(sc);
174 unlock:
175         spin_unlock_bh(&session->lock);
176         return rc;
177 }
178
179 /*------------------- PCI Driver operations and data ----------------- */
180 static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
181         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
182         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
183         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
184         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
185         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
186         { 0 }
187 };
188 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
189
190 static struct scsi_host_template beiscsi_sht = {
191         .module = THIS_MODULE,
192         .name = "ServerEngines 10Gbe open-iscsi Initiator Driver",
193         .proc_name = DRV_NAME,
194         .queuecommand = iscsi_queuecommand,
195         .change_queue_depth = iscsi_change_queue_depth,
196         .slave_configure = beiscsi_slave_configure,
197         .target_alloc = iscsi_target_alloc,
198         .eh_abort_handler = beiscsi_eh_abort,
199         .eh_device_reset_handler = beiscsi_eh_device_reset,
200         .eh_target_reset_handler = iscsi_eh_session_reset,
201         .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
202         .can_queue = BE2_IO_DEPTH,
203         .this_id = -1,
204         .max_sectors = BEISCSI_MAX_SECTORS,
205         .cmd_per_lun = BEISCSI_CMD_PER_LUN,
206         .use_clustering = ENABLE_CLUSTERING,
207 };
208
209 static struct scsi_transport_template *beiscsi_scsi_transport;
210
211 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
212 {
213         struct beiscsi_hba *phba;
214         struct Scsi_Host *shost;
215
216         shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
217         if (!shost) {
218                 dev_err(&pcidev->dev, "beiscsi_hba_alloc -"
219                         "iscsi_host_alloc failed\n");
220                 return NULL;
221         }
222         shost->dma_boundary = pcidev->dma_mask;
223         shost->max_id = BE2_MAX_SESSIONS;
224         shost->max_channel = 0;
225         shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
226         shost->max_lun = BEISCSI_NUM_MAX_LUN;
227         shost->transportt = beiscsi_scsi_transport;
228         phba = iscsi_host_priv(shost);
229         memset(phba, 0, sizeof(*phba));
230         phba->shost = shost;
231         phba->pcidev = pci_dev_get(pcidev);
232         pci_set_drvdata(pcidev, phba);
233
234         if (iscsi_host_add(shost, &phba->pcidev->dev))
235                 goto free_devices;
236         return phba;
237
238 free_devices:
239         pci_dev_put(phba->pcidev);
240         iscsi_host_free(phba->shost);
241         return NULL;
242 }
243
244 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
245 {
246         if (phba->csr_va) {
247                 iounmap(phba->csr_va);
248                 phba->csr_va = NULL;
249         }
250         if (phba->db_va) {
251                 iounmap(phba->db_va);
252                 phba->db_va = NULL;
253         }
254         if (phba->pci_va) {
255                 iounmap(phba->pci_va);
256                 phba->pci_va = NULL;
257         }
258 }
259
260 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
261                                 struct pci_dev *pcidev)
262 {
263         u8 __iomem *addr;
264         int pcicfg_reg;
265
266         addr = ioremap_nocache(pci_resource_start(pcidev, 2),
267                                pci_resource_len(pcidev, 2));
268         if (addr == NULL)
269                 return -ENOMEM;
270         phba->ctrl.csr = addr;
271         phba->csr_va = addr;
272         phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
273
274         addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
275         if (addr == NULL)
276                 goto pci_map_err;
277         phba->ctrl.db = addr;
278         phba->db_va = addr;
279         phba->db_pa.u.a64.address =  pci_resource_start(pcidev, 4);
280
281         if (phba->generation == BE_GEN2)
282                 pcicfg_reg = 1;
283         else
284                 pcicfg_reg = 0;
285
286         addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg),
287                                pci_resource_len(pcidev, pcicfg_reg));
288
289         if (addr == NULL)
290                 goto pci_map_err;
291         phba->ctrl.pcicfg = addr;
292         phba->pci_va = addr;
293         phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg);
294         return 0;
295
296 pci_map_err:
297         beiscsi_unmap_pci_function(phba);
298         return -ENOMEM;
299 }
300
301 static int beiscsi_enable_pci(struct pci_dev *pcidev)
302 {
303         int ret;
304
305         ret = pci_enable_device(pcidev);
306         if (ret) {
307                 dev_err(&pcidev->dev, "beiscsi_enable_pci - enable device "
308                         "failed. Returning -ENODEV\n");
309                 return ret;
310         }
311
312         pci_set_master(pcidev);
313         if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
314                 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
315                 if (ret) {
316                         dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
317                         pci_disable_device(pcidev);
318                         return ret;
319                 }
320         }
321         return 0;
322 }
323
324 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
325 {
326         struct be_ctrl_info *ctrl = &phba->ctrl;
327         struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
328         struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
329         int status = 0;
330
331         ctrl->pdev = pdev;
332         status = beiscsi_map_pci_bars(phba, pdev);
333         if (status)
334                 return status;
335         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
336         mbox_mem_alloc->va = pci_alloc_consistent(pdev,
337                                                   mbox_mem_alloc->size,
338                                                   &mbox_mem_alloc->dma);
339         if (!mbox_mem_alloc->va) {
340                 beiscsi_unmap_pci_function(phba);
341                 status = -ENOMEM;
342                 return status;
343         }
344
345         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
346         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
347         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
348         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
349         spin_lock_init(&ctrl->mbox_lock);
350         spin_lock_init(&phba->ctrl.mcc_lock);
351         spin_lock_init(&phba->ctrl.mcc_cq_lock);
352
353         return status;
354 }
355
356 static void beiscsi_get_params(struct beiscsi_hba *phba)
357 {
358         phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
359                                     - (phba->fw_config.iscsi_cid_count
360                                     + BE2_TMFS
361                                     + BE2_NOPOUT_REQ));
362         phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
363         phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2;
364         phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;;
365         phba->params.num_sge_per_io = BE2_SGE;
366         phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
367         phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
368         phba->params.eq_timer = 64;
369         phba->params.num_eq_entries =
370             (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
371                                     + BE2_TMFS) / 512) + 1) * 512;
372         phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
373                                 ? 1024 : phba->params.num_eq_entries;
374         SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d\n",
375                              phba->params.num_eq_entries);
376         phba->params.num_cq_entries =
377             (((BE2_CMDS_PER_CXN * 2 +  phba->fw_config.iscsi_cid_count * 2
378                                     + BE2_TMFS) / 512) + 1) * 512;
379         phba->params.wrbs_per_cxn = 256;
380 }
381
382 static void hwi_ring_eq_db(struct beiscsi_hba *phba,
383                            unsigned int id, unsigned int clr_interrupt,
384                            unsigned int num_processed,
385                            unsigned char rearm, unsigned char event)
386 {
387         u32 val = 0;
388         val |= id & DB_EQ_RING_ID_MASK;
389         if (rearm)
390                 val |= 1 << DB_EQ_REARM_SHIFT;
391         if (clr_interrupt)
392                 val |= 1 << DB_EQ_CLR_SHIFT;
393         if (event)
394                 val |= 1 << DB_EQ_EVNT_SHIFT;
395         val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
396         iowrite32(val, phba->db_va + DB_EQ_OFFSET);
397 }
398
399 /**
400  * be_isr_mcc - The isr routine of the driver.
401  * @irq: Not used
402  * @dev_id: Pointer to host adapter structure
403  */
404 static irqreturn_t be_isr_mcc(int irq, void *dev_id)
405 {
406         struct beiscsi_hba *phba;
407         struct be_eq_entry *eqe = NULL;
408         struct be_queue_info *eq;
409         struct be_queue_info *mcc;
410         unsigned int num_eq_processed;
411         struct be_eq_obj *pbe_eq;
412         unsigned long flags;
413
414         pbe_eq = dev_id;
415         eq = &pbe_eq->q;
416         phba =  pbe_eq->phba;
417         mcc = &phba->ctrl.mcc_obj.cq;
418         eqe = queue_tail_node(eq);
419         if (!eqe)
420                 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
421
422         num_eq_processed = 0;
423
424         while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
425                                 & EQE_VALID_MASK) {
426                 if (((eqe->dw[offsetof(struct amap_eq_entry,
427                      resource_id) / 32] &
428                      EQE_RESID_MASK) >> 16) == mcc->id) {
429                         spin_lock_irqsave(&phba->isr_lock, flags);
430                         phba->todo_mcc_cq = 1;
431                         spin_unlock_irqrestore(&phba->isr_lock, flags);
432                 }
433                 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
434                 queue_tail_inc(eq);
435                 eqe = queue_tail_node(eq);
436                 num_eq_processed++;
437         }
438         if (phba->todo_mcc_cq)
439                 queue_work(phba->wq, &phba->work_cqs);
440         if (num_eq_processed)
441                 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
442
443         return IRQ_HANDLED;
444 }
445
446 /**
447  * be_isr_msix - The isr routine of the driver.
448  * @irq: Not used
449  * @dev_id: Pointer to host adapter structure
450  */
451 static irqreturn_t be_isr_msix(int irq, void *dev_id)
452 {
453         struct beiscsi_hba *phba;
454         struct be_eq_entry *eqe = NULL;
455         struct be_queue_info *eq;
456         struct be_queue_info *cq;
457         unsigned int num_eq_processed;
458         struct be_eq_obj *pbe_eq;
459         unsigned long flags;
460
461         pbe_eq = dev_id;
462         eq = &pbe_eq->q;
463         cq = pbe_eq->cq;
464         eqe = queue_tail_node(eq);
465         if (!eqe)
466                 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
467
468         phba = pbe_eq->phba;
469         num_eq_processed = 0;
470         if (blk_iopoll_enabled) {
471                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
472                                         & EQE_VALID_MASK) {
473                         if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
474                                 blk_iopoll_sched(&pbe_eq->iopoll);
475
476                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
477                         queue_tail_inc(eq);
478                         eqe = queue_tail_node(eq);
479                         num_eq_processed++;
480                 }
481                 if (num_eq_processed)
482                         hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
483
484                 return IRQ_HANDLED;
485         } else {
486                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
487                                                 & EQE_VALID_MASK) {
488                         spin_lock_irqsave(&phba->isr_lock, flags);
489                         phba->todo_cq = 1;
490                         spin_unlock_irqrestore(&phba->isr_lock, flags);
491                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
492                         queue_tail_inc(eq);
493                         eqe = queue_tail_node(eq);
494                         num_eq_processed++;
495                 }
496                 if (phba->todo_cq)
497                         queue_work(phba->wq, &phba->work_cqs);
498
499                 if (num_eq_processed)
500                         hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
501
502                 return IRQ_HANDLED;
503         }
504 }
505
506 /**
507  * be_isr - The isr routine of the driver.
508  * @irq: Not used
509  * @dev_id: Pointer to host adapter structure
510  */
511 static irqreturn_t be_isr(int irq, void *dev_id)
512 {
513         struct beiscsi_hba *phba;
514         struct hwi_controller *phwi_ctrlr;
515         struct hwi_context_memory *phwi_context;
516         struct be_eq_entry *eqe = NULL;
517         struct be_queue_info *eq;
518         struct be_queue_info *cq;
519         struct be_queue_info *mcc;
520         unsigned long flags, index;
521         unsigned int num_mcceq_processed, num_ioeq_processed;
522         struct be_ctrl_info *ctrl;
523         struct be_eq_obj *pbe_eq;
524         int isr;
525
526         phba = dev_id;
527         ctrl = &phba->ctrl;;
528         isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
529                        (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
530         if (!isr)
531                 return IRQ_NONE;
532
533         phwi_ctrlr = phba->phwi_ctrlr;
534         phwi_context = phwi_ctrlr->phwi_ctxt;
535         pbe_eq = &phwi_context->be_eq[0];
536
537         eq = &phwi_context->be_eq[0].q;
538         mcc = &phba->ctrl.mcc_obj.cq;
539         index = 0;
540         eqe = queue_tail_node(eq);
541         if (!eqe)
542                 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
543
544         num_ioeq_processed = 0;
545         num_mcceq_processed = 0;
546         if (blk_iopoll_enabled) {
547                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
548                                         & EQE_VALID_MASK) {
549                         if (((eqe->dw[offsetof(struct amap_eq_entry,
550                              resource_id) / 32] &
551                              EQE_RESID_MASK) >> 16) == mcc->id) {
552                                 spin_lock_irqsave(&phba->isr_lock, flags);
553                                 phba->todo_mcc_cq = 1;
554                                 spin_unlock_irqrestore(&phba->isr_lock, flags);
555                                 num_mcceq_processed++;
556                         } else {
557                                 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
558                                         blk_iopoll_sched(&pbe_eq->iopoll);
559                                 num_ioeq_processed++;
560                         }
561                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
562                         queue_tail_inc(eq);
563                         eqe = queue_tail_node(eq);
564                 }
565                 if (num_ioeq_processed || num_mcceq_processed) {
566                         if (phba->todo_mcc_cq)
567                                 queue_work(phba->wq, &phba->work_cqs);
568
569                         if ((num_mcceq_processed) && (!num_ioeq_processed))
570                                 hwi_ring_eq_db(phba, eq->id, 0,
571                                               (num_ioeq_processed +
572                                                num_mcceq_processed) , 1, 1);
573                         else
574                                 hwi_ring_eq_db(phba, eq->id, 0,
575                                                (num_ioeq_processed +
576                                                 num_mcceq_processed), 0, 1);
577
578                         return IRQ_HANDLED;
579                 } else
580                         return IRQ_NONE;
581         } else {
582                 cq = &phwi_context->be_cq[0];
583                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
584                                                 & EQE_VALID_MASK) {
585
586                         if (((eqe->dw[offsetof(struct amap_eq_entry,
587                              resource_id) / 32] &
588                              EQE_RESID_MASK) >> 16) != cq->id) {
589                                 spin_lock_irqsave(&phba->isr_lock, flags);
590                                 phba->todo_mcc_cq = 1;
591                                 spin_unlock_irqrestore(&phba->isr_lock, flags);
592                         } else {
593                                 spin_lock_irqsave(&phba->isr_lock, flags);
594                                 phba->todo_cq = 1;
595                                 spin_unlock_irqrestore(&phba->isr_lock, flags);
596                         }
597                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
598                         queue_tail_inc(eq);
599                         eqe = queue_tail_node(eq);
600                         num_ioeq_processed++;
601                 }
602                 if (phba->todo_cq || phba->todo_mcc_cq)
603                         queue_work(phba->wq, &phba->work_cqs);
604
605                 if (num_ioeq_processed) {
606                         hwi_ring_eq_db(phba, eq->id, 0,
607                                        num_ioeq_processed, 1, 1);
608                         return IRQ_HANDLED;
609                 } else
610                         return IRQ_NONE;
611         }
612 }
613
614 static int beiscsi_init_irqs(struct beiscsi_hba *phba)
615 {
616         struct pci_dev *pcidev = phba->pcidev;
617         struct hwi_controller *phwi_ctrlr;
618         struct hwi_context_memory *phwi_context;
619         int ret, msix_vec, i, j;
620         char desc[32];
621
622         phwi_ctrlr = phba->phwi_ctrlr;
623         phwi_context = phwi_ctrlr->phwi_ctxt;
624
625         if (phba->msix_enabled) {
626                 for (i = 0; i < phba->num_cpus; i++) {
627                         sprintf(desc, "beiscsi_msix_%04x", i);
628                         msix_vec = phba->msix_entries[i].vector;
629                         ret = request_irq(msix_vec, be_isr_msix, 0, desc,
630                                           &phwi_context->be_eq[i]);
631                         if (ret) {
632                                 shost_printk(KERN_ERR, phba->shost,
633                                              "beiscsi_init_irqs-Failed to"
634                                              "register msix for i = %d\n", i);
635                                 if (!i)
636                                         return ret;
637                                 goto free_msix_irqs;
638                         }
639                 }
640                 msix_vec = phba->msix_entries[i].vector;
641                 ret = request_irq(msix_vec, be_isr_mcc, 0, "beiscsi_msix_mcc",
642                                   &phwi_context->be_eq[i]);
643                 if (ret) {
644                         shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
645                                      "Failed to register beiscsi_msix_mcc\n");
646                         i++;
647                         goto free_msix_irqs;
648                 }
649
650         } else {
651                 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
652                                   "beiscsi", phba);
653                 if (ret) {
654                         shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
655                                      "Failed to register irq\\n");
656                         return ret;
657                 }
658         }
659         return 0;
660 free_msix_irqs:
661         for (j = i - 1; j == 0; j++)
662                 free_irq(msix_vec, &phwi_context->be_eq[j]);
663         return ret;
664 }
665
666 static void hwi_ring_cq_db(struct beiscsi_hba *phba,
667                            unsigned int id, unsigned int num_processed,
668                            unsigned char rearm, unsigned char event)
669 {
670         u32 val = 0;
671         val |= id & DB_CQ_RING_ID_MASK;
672         if (rearm)
673                 val |= 1 << DB_CQ_REARM_SHIFT;
674         val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
675         iowrite32(val, phba->db_va + DB_CQ_OFFSET);
676 }
677
678 static unsigned int
679 beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
680                           struct beiscsi_hba *phba,
681                           unsigned short cid,
682                           struct pdu_base *ppdu,
683                           unsigned long pdu_len,
684                           void *pbuffer, unsigned long buf_len)
685 {
686         struct iscsi_conn *conn = beiscsi_conn->conn;
687         struct iscsi_session *session = conn->session;
688         struct iscsi_task *task;
689         struct beiscsi_io_task *io_task;
690         struct iscsi_hdr *login_hdr;
691
692         switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
693                                                 PDUBASE_OPCODE_MASK) {
694         case ISCSI_OP_NOOP_IN:
695                 pbuffer = NULL;
696                 buf_len = 0;
697                 break;
698         case ISCSI_OP_ASYNC_EVENT:
699                 break;
700         case ISCSI_OP_REJECT:
701                 WARN_ON(!pbuffer);
702                 WARN_ON(!(buf_len == 48));
703                 SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
704                 break;
705         case ISCSI_OP_LOGIN_RSP:
706         case ISCSI_OP_TEXT_RSP:
707                 task = conn->login_task;
708                 io_task = task->dd_data;
709                 login_hdr = (struct iscsi_hdr *)ppdu;
710                 login_hdr->itt = io_task->libiscsi_itt;
711                 break;
712         default:
713                 shost_printk(KERN_WARNING, phba->shost,
714                              "Unrecognized opcode 0x%x in async msg\n",
715                              (ppdu->
716                              dw[offsetof(struct amap_pdu_base, opcode) / 32]
717                                                 & PDUBASE_OPCODE_MASK));
718                 return 1;
719         }
720
721         spin_lock_bh(&session->lock);
722         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
723         spin_unlock_bh(&session->lock);
724         return 0;
725 }
726
727 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
728 {
729         struct sgl_handle *psgl_handle;
730
731         if (phba->io_sgl_hndl_avbl) {
732                 SE_DEBUG(DBG_LVL_8,
733                          "In alloc_io_sgl_handle,io_sgl_alloc_index=%d\n",
734                          phba->io_sgl_alloc_index);
735                 psgl_handle = phba->io_sgl_hndl_base[phba->
736                                                 io_sgl_alloc_index];
737                 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
738                 phba->io_sgl_hndl_avbl--;
739                 if (phba->io_sgl_alloc_index == (phba->params.
740                                                  ios_per_ctrl - 1))
741                         phba->io_sgl_alloc_index = 0;
742                 else
743                         phba->io_sgl_alloc_index++;
744         } else
745                 psgl_handle = NULL;
746         return psgl_handle;
747 }
748
749 static void
750 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
751 {
752         SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d\n",
753                  phba->io_sgl_free_index);
754         if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
755                 /*
756                  * this can happen if clean_task is called on a task that
757                  * failed in xmit_task or alloc_pdu.
758                  */
759                  SE_DEBUG(DBG_LVL_8,
760                          "Double Free in IO SGL io_sgl_free_index=%d,"
761                          "value there=%p\n", phba->io_sgl_free_index,
762                          phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
763                 return;
764         }
765         phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
766         phba->io_sgl_hndl_avbl++;
767         if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
768                 phba->io_sgl_free_index = 0;
769         else
770                 phba->io_sgl_free_index++;
771 }
772
773 /**
774  * alloc_wrb_handle - To allocate a wrb handle
775  * @phba: The hba pointer
776  * @cid: The cid to use for allocation
777  *
778  * This happens under session_lock until submission to chip
779  */
780 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
781 {
782         struct hwi_wrb_context *pwrb_context;
783         struct hwi_controller *phwi_ctrlr;
784         struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
785
786         phwi_ctrlr = phba->phwi_ctrlr;
787         pwrb_context = &phwi_ctrlr->wrb_context[cid];
788         if (pwrb_context->wrb_handles_available >= 2) {
789                 pwrb_handle = pwrb_context->pwrb_handle_base[
790                                             pwrb_context->alloc_index];
791                 pwrb_context->wrb_handles_available--;
792                 if (pwrb_context->alloc_index ==
793                                                 (phba->params.wrbs_per_cxn - 1))
794                         pwrb_context->alloc_index = 0;
795                 else
796                         pwrb_context->alloc_index++;
797                 pwrb_handle_tmp = pwrb_context->pwrb_handle_base[
798                                                 pwrb_context->alloc_index];
799                 pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index;
800         } else
801                 pwrb_handle = NULL;
802         return pwrb_handle;
803 }
804
805 /**
806  * free_wrb_handle - To free the wrb handle back to pool
807  * @phba: The hba pointer
808  * @pwrb_context: The context to free from
809  * @pwrb_handle: The wrb_handle to free
810  *
811  * This happens under session_lock until submission to chip
812  */
813 static void
814 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
815                 struct wrb_handle *pwrb_handle)
816 {
817         pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
818         pwrb_context->wrb_handles_available++;
819         if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
820                 pwrb_context->free_index = 0;
821         else
822                 pwrb_context->free_index++;
823
824         SE_DEBUG(DBG_LVL_8,
825                  "FREE WRB: pwrb_handle=%p free_index=0x%x"
826                  "wrb_handles_available=%d\n",
827                  pwrb_handle, pwrb_context->free_index,
828                  pwrb_context->wrb_handles_available);
829 }
830
831 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
832 {
833         struct sgl_handle *psgl_handle;
834
835         if (phba->eh_sgl_hndl_avbl) {
836                 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
837                 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
838                 SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x\n",
839                          phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index);
840                 phba->eh_sgl_hndl_avbl--;
841                 if (phba->eh_sgl_alloc_index ==
842                     (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
843                      1))
844                         phba->eh_sgl_alloc_index = 0;
845                 else
846                         phba->eh_sgl_alloc_index++;
847         } else
848                 psgl_handle = NULL;
849         return psgl_handle;
850 }
851
852 void
853 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
854 {
855
856         SE_DEBUG(DBG_LVL_8, "In  free_mgmt_sgl_handle,eh_sgl_free_index=%d\n",
857                              phba->eh_sgl_free_index);
858         if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
859                 /*
860                  * this can happen if clean_task is called on a task that
861                  * failed in xmit_task or alloc_pdu.
862                  */
863                 SE_DEBUG(DBG_LVL_8,
864                          "Double Free in eh SGL ,eh_sgl_free_index=%d\n",
865                          phba->eh_sgl_free_index);
866                 return;
867         }
868         phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
869         phba->eh_sgl_hndl_avbl++;
870         if (phba->eh_sgl_free_index ==
871             (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
872                 phba->eh_sgl_free_index = 0;
873         else
874                 phba->eh_sgl_free_index++;
875 }
876
877 static void
878 be_complete_io(struct beiscsi_conn *beiscsi_conn,
879                struct iscsi_task *task, struct sol_cqe *psol)
880 {
881         struct beiscsi_io_task *io_task = task->dd_data;
882         struct be_status_bhs *sts_bhs =
883                                 (struct be_status_bhs *)io_task->cmd_bhs;
884         struct iscsi_conn *conn = beiscsi_conn->conn;
885         unsigned int sense_len;
886         unsigned char *sense;
887         u32 resid = 0, exp_cmdsn, max_cmdsn;
888         u8 rsp, status, flags;
889
890         exp_cmdsn = (psol->
891                         dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
892                         & SOL_EXP_CMD_SN_MASK);
893         max_cmdsn = ((psol->
894                         dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
895                         & SOL_EXP_CMD_SN_MASK) +
896                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
897                                 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
898         rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
899                                                 & SOL_RESP_MASK) >> 16);
900         status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
901                                                 & SOL_STS_MASK) >> 8);
902         flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
903                                         & SOL_FLAGS_MASK) >> 24) | 0x80;
904
905         task->sc->result = (DID_OK << 16) | status;
906         if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
907                 task->sc->result = DID_ERROR << 16;
908                 goto unmap;
909         }
910
911         /* bidi not initially supported */
912         if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
913                 resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
914                                 32] & SOL_RES_CNT_MASK);
915
916                 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
917                         task->sc->result = DID_ERROR << 16;
918
919                 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
920                         scsi_set_resid(task->sc, resid);
921                         if (!status && (scsi_bufflen(task->sc) - resid <
922                             task->sc->underflow))
923                                 task->sc->result = DID_ERROR << 16;
924                 }
925         }
926
927         if (status == SAM_STAT_CHECK_CONDITION) {
928                 unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
929                 sense = sts_bhs->sense_info + sizeof(unsigned short);
930                 sense_len =  cpu_to_be16(*slen);
931                 memcpy(task->sc->sense_buffer, sense,
932                        min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
933         }
934
935         if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
936                 if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
937                                                         & SOL_RES_CNT_MASK)
938                          conn->rxdata_octets += (psol->
939                              dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
940                              & SOL_RES_CNT_MASK);
941         }
942 unmap:
943         scsi_dma_unmap(io_task->scsi_cmnd);
944         iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
945 }
946
947 static void
948 be_complete_logout(struct beiscsi_conn *beiscsi_conn,
949                    struct iscsi_task *task, struct sol_cqe *psol)
950 {
951         struct iscsi_logout_rsp *hdr;
952         struct beiscsi_io_task *io_task = task->dd_data;
953         struct iscsi_conn *conn = beiscsi_conn->conn;
954
955         hdr = (struct iscsi_logout_rsp *)task->hdr;
956         hdr->opcode = ISCSI_OP_LOGOUT_RSP;
957         hdr->t2wait = 5;
958         hdr->t2retain = 0;
959         hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
960                                         & SOL_FLAGS_MASK) >> 24) | 0x80;
961         hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
962                                         32] & SOL_RESP_MASK);
963         hdr->exp_cmdsn = cpu_to_be32(psol->
964                         dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
965                                         & SOL_EXP_CMD_SN_MASK);
966         hdr->max_cmdsn = be32_to_cpu((psol->
967                          dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
968                                         & SOL_EXP_CMD_SN_MASK) +
969                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
970                                         / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
971         hdr->dlength[0] = 0;
972         hdr->dlength[1] = 0;
973         hdr->dlength[2] = 0;
974         hdr->hlength = 0;
975         hdr->itt = io_task->libiscsi_itt;
976         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
977 }
978
979 static void
980 be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
981                 struct iscsi_task *task, struct sol_cqe *psol)
982 {
983         struct iscsi_tm_rsp *hdr;
984         struct iscsi_conn *conn = beiscsi_conn->conn;
985         struct beiscsi_io_task *io_task = task->dd_data;
986
987         hdr = (struct iscsi_tm_rsp *)task->hdr;
988         hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
989         hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
990                                         & SOL_FLAGS_MASK) >> 24) | 0x80;
991         hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
992                                         32] & SOL_RESP_MASK);
993         hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
994                                     i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
995         hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
996                         i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
997                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
998                         / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
999         hdr->itt = io_task->libiscsi_itt;
1000         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1001 }
1002
1003 static void
1004 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
1005                        struct beiscsi_hba *phba, struct sol_cqe *psol)
1006 {
1007         struct hwi_wrb_context *pwrb_context;
1008         struct wrb_handle *pwrb_handle = NULL;
1009         struct hwi_controller *phwi_ctrlr;
1010         struct iscsi_task *task;
1011         struct beiscsi_io_task *io_task;
1012         struct iscsi_conn *conn = beiscsi_conn->conn;
1013         struct iscsi_session *session = conn->session;
1014
1015         phwi_ctrlr = phba->phwi_ctrlr;
1016         pwrb_context = &phwi_ctrlr->wrb_context[((psol->
1017                                 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1018                                 SOL_CID_MASK) >> 6) -
1019                                 phba->fw_config.iscsi_cid_start];
1020         pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
1021                                 dw[offsetof(struct amap_sol_cqe, wrb_index) /
1022                                 32] & SOL_WRB_INDEX_MASK) >> 16)];
1023         task = pwrb_handle->pio_handle;
1024
1025         io_task = task->dd_data;
1026         spin_lock(&phba->mgmt_sgl_lock);
1027         free_mgmt_sgl_handle(phba, io_task->psgl_handle);
1028         spin_unlock(&phba->mgmt_sgl_lock);
1029         spin_lock_bh(&session->lock);
1030         free_wrb_handle(phba, pwrb_context, pwrb_handle);
1031         spin_unlock_bh(&session->lock);
1032 }
1033
1034 static void
1035 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
1036                        struct iscsi_task *task, struct sol_cqe *psol)
1037 {
1038         struct iscsi_nopin *hdr;
1039         struct iscsi_conn *conn = beiscsi_conn->conn;
1040         struct beiscsi_io_task *io_task = task->dd_data;
1041
1042         hdr = (struct iscsi_nopin *)task->hdr;
1043         hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1044                         & SOL_FLAGS_MASK) >> 24) | 0x80;
1045         hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
1046                                      i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
1047         hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
1048                         i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
1049                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1050                         / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1051         hdr->opcode = ISCSI_OP_NOOP_IN;
1052         hdr->itt = io_task->libiscsi_itt;
1053         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1054 }
1055
1056 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1057                              struct beiscsi_hba *phba, struct sol_cqe *psol)
1058 {
1059         struct hwi_wrb_context *pwrb_context;
1060         struct wrb_handle *pwrb_handle;
1061         struct iscsi_wrb *pwrb = NULL;
1062         struct hwi_controller *phwi_ctrlr;
1063         struct iscsi_task *task;
1064         unsigned int type;
1065         struct iscsi_conn *conn = beiscsi_conn->conn;
1066         struct iscsi_session *session = conn->session;
1067
1068         phwi_ctrlr = phba->phwi_ctrlr;
1069         pwrb_context = &phwi_ctrlr->wrb_context[((psol->dw[offsetof
1070                                 (struct amap_sol_cqe, cid) / 32]
1071                                 & SOL_CID_MASK) >> 6) -
1072                                 phba->fw_config.iscsi_cid_start];
1073         pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
1074                                 dw[offsetof(struct amap_sol_cqe, wrb_index) /
1075                                 32] & SOL_WRB_INDEX_MASK) >> 16)];
1076         task = pwrb_handle->pio_handle;
1077         pwrb = pwrb_handle->pwrb;
1078         type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
1079                                  WRB_TYPE_MASK) >> 28;
1080
1081         spin_lock_bh(&session->lock);
1082         switch (type) {
1083         case HWH_TYPE_IO:
1084         case HWH_TYPE_IO_RD:
1085                 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
1086                      ISCSI_OP_NOOP_OUT)
1087                         be_complete_nopin_resp(beiscsi_conn, task, psol);
1088                 else
1089                         be_complete_io(beiscsi_conn, task, psol);
1090                 break;
1091
1092         case HWH_TYPE_LOGOUT:
1093                 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
1094                         be_complete_logout(beiscsi_conn, task, psol);
1095                 else
1096                         be_complete_tmf(beiscsi_conn, task, psol);
1097
1098                 break;
1099
1100         case HWH_TYPE_LOGIN:
1101                 SE_DEBUG(DBG_LVL_1,
1102                          "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd"
1103                          "- Solicited path\n");
1104                 break;
1105
1106         case HWH_TYPE_NOP:
1107                 be_complete_nopin_resp(beiscsi_conn, task, psol);
1108                 break;
1109
1110         default:
1111                 shost_printk(KERN_WARNING, phba->shost,
1112                                 "In hwi_complete_cmd, unknown type = %d"
1113                                 "wrb_index 0x%x CID 0x%x\n", type,
1114                                 ((psol->dw[offsetof(struct amap_iscsi_wrb,
1115                                 type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
1116                                 ((psol->dw[offsetof(struct amap_sol_cqe,
1117                                 cid) / 32] & SOL_CID_MASK) >> 6));
1118                 break;
1119         }
1120
1121         spin_unlock_bh(&session->lock);
1122 }
1123
1124 static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
1125                                           *pasync_ctx, unsigned int is_header,
1126                                           unsigned int host_write_ptr)
1127 {
1128         if (is_header)
1129                 return &pasync_ctx->async_entry[host_write_ptr].
1130                     header_busy_list;
1131         else
1132                 return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
1133 }
1134
1135 static struct async_pdu_handle *
1136 hwi_get_async_handle(struct beiscsi_hba *phba,
1137                      struct beiscsi_conn *beiscsi_conn,
1138                      struct hwi_async_pdu_context *pasync_ctx,
1139                      struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
1140 {
1141         struct be_bus_address phys_addr;
1142         struct list_head *pbusy_list;
1143         struct async_pdu_handle *pasync_handle = NULL;
1144         int buffer_len = 0;
1145         unsigned char buffer_index = -1;
1146         unsigned char is_header = 0;
1147
1148         phys_addr.u.a32.address_lo =
1149             pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
1150             ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1151                                                 & PDUCQE_DPL_MASK) >> 16);
1152         phys_addr.u.a32.address_hi =
1153             pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
1154
1155         phys_addr.u.a64.address =
1156                         *((unsigned long long *)(&phys_addr.u.a64.address));
1157
1158         switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
1159                         & PDUCQE_CODE_MASK) {
1160         case UNSOL_HDR_NOTIFY:
1161                 is_header = 1;
1162
1163                 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
1164                         (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1165                         index) / 32] & PDUCQE_INDEX_MASK));
1166
1167                 buffer_len = (unsigned int)(phys_addr.u.a64.address -
1168                                 pasync_ctx->async_header.pa_base.u.a64.address);
1169
1170                 buffer_index = buffer_len /
1171                                 pasync_ctx->async_header.buffer_size;
1172
1173                 break;
1174         case UNSOL_DATA_NOTIFY:
1175                 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
1176                                         dw[offsetof(struct amap_i_t_dpdu_cqe,
1177                                         index) / 32] & PDUCQE_INDEX_MASK));
1178                 buffer_len = (unsigned long)(phys_addr.u.a64.address -
1179                                         pasync_ctx->async_data.pa_base.u.
1180                                         a64.address);
1181                 buffer_index = buffer_len / pasync_ctx->async_data.buffer_size;
1182                 break;
1183         default:
1184                 pbusy_list = NULL;
1185                 shost_printk(KERN_WARNING, phba->shost,
1186                         "Unexpected code=%d\n",
1187                          pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1188                                         code) / 32] & PDUCQE_CODE_MASK);
1189                 return NULL;
1190         }
1191
1192         WARN_ON(!(buffer_index <= pasync_ctx->async_data.num_entries));
1193         WARN_ON(list_empty(pbusy_list));
1194         list_for_each_entry(pasync_handle, pbusy_list, link) {
1195                 WARN_ON(pasync_handle->consumed);
1196                 if (pasync_handle->index == buffer_index)
1197                         break;
1198         }
1199
1200         WARN_ON(!pasync_handle);
1201
1202         pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
1203                                              phba->fw_config.iscsi_cid_start;
1204         pasync_handle->is_header = is_header;
1205         pasync_handle->buffer_len = ((pdpdu_cqe->
1206                         dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1207                         & PDUCQE_DPL_MASK) >> 16);
1208
1209         *pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1210                         index) / 32] & PDUCQE_INDEX_MASK);
1211         return pasync_handle;
1212 }
1213
1214 static unsigned int
1215 hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
1216                            unsigned int is_header, unsigned int cq_index)
1217 {
1218         struct list_head *pbusy_list;
1219         struct async_pdu_handle *pasync_handle;
1220         unsigned int num_entries, writables = 0;
1221         unsigned int *pep_read_ptr, *pwritables;
1222
1223
1224         if (is_header) {
1225                 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1226                 pwritables = &pasync_ctx->async_header.writables;
1227                 num_entries = pasync_ctx->async_header.num_entries;
1228         } else {
1229                 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1230                 pwritables = &pasync_ctx->async_data.writables;
1231                 num_entries = pasync_ctx->async_data.num_entries;
1232         }
1233
1234         while ((*pep_read_ptr) != cq_index) {
1235                 (*pep_read_ptr)++;
1236                 *pep_read_ptr = (*pep_read_ptr) % num_entries;
1237
1238                 pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1239                                                      *pep_read_ptr);
1240                 if (writables == 0)
1241                         WARN_ON(list_empty(pbusy_list));
1242
1243                 if (!list_empty(pbusy_list)) {
1244                         pasync_handle = list_entry(pbusy_list->next,
1245                                                    struct async_pdu_handle,
1246                                                    link);
1247                         WARN_ON(!pasync_handle);
1248                         pasync_handle->consumed = 1;
1249                 }
1250
1251                 writables++;
1252         }
1253
1254         if (!writables) {
1255                 SE_DEBUG(DBG_LVL_1,
1256                          "Duplicate notification received - index 0x%x!!\n",
1257                          cq_index);
1258                 WARN_ON(1);
1259         }
1260
1261         *pwritables = *pwritables + writables;
1262         return 0;
1263 }
1264
1265 static unsigned int hwi_free_async_msg(struct beiscsi_hba *phba,
1266                                        unsigned int cri)
1267 {
1268         struct hwi_controller *phwi_ctrlr;
1269         struct hwi_async_pdu_context *pasync_ctx;
1270         struct async_pdu_handle *pasync_handle, *tmp_handle;
1271         struct list_head *plist;
1272         unsigned int i = 0;
1273
1274         phwi_ctrlr = phba->phwi_ctrlr;
1275         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1276
1277         plist  = &pasync_ctx->async_entry[cri].wait_queue.list;
1278
1279         list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1280                 list_del(&pasync_handle->link);
1281
1282                 if (i == 0) {
1283                         list_add_tail(&pasync_handle->link,
1284                                       &pasync_ctx->async_header.free_list);
1285                         pasync_ctx->async_header.free_entries++;
1286                         i++;
1287                 } else {
1288                         list_add_tail(&pasync_handle->link,
1289                                       &pasync_ctx->async_data.free_list);
1290                         pasync_ctx->async_data.free_entries++;
1291                         i++;
1292                 }
1293         }
1294
1295         INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1296         pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1297         pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1298         return 0;
1299 }
1300
1301 static struct phys_addr *
1302 hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1303                      unsigned int is_header, unsigned int host_write_ptr)
1304 {
1305         struct phys_addr *pasync_sge = NULL;
1306
1307         if (is_header)
1308                 pasync_sge = pasync_ctx->async_header.ring_base;
1309         else
1310                 pasync_sge = pasync_ctx->async_data.ring_base;
1311
1312         return pasync_sge + host_write_ptr;
1313 }
1314
1315 static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1316                                    unsigned int is_header)
1317 {
1318         struct hwi_controller *phwi_ctrlr;
1319         struct hwi_async_pdu_context *pasync_ctx;
1320         struct async_pdu_handle *pasync_handle;
1321         struct list_head *pfree_link, *pbusy_list;
1322         struct phys_addr *pasync_sge;
1323         unsigned int ring_id, num_entries;
1324         unsigned int host_write_num;
1325         unsigned int writables;
1326         unsigned int i = 0;
1327         u32 doorbell = 0;
1328
1329         phwi_ctrlr = phba->phwi_ctrlr;
1330         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1331
1332         if (is_header) {
1333                 num_entries = pasync_ctx->async_header.num_entries;
1334                 writables = min(pasync_ctx->async_header.writables,
1335                                 pasync_ctx->async_header.free_entries);
1336                 pfree_link = pasync_ctx->async_header.free_list.next;
1337                 host_write_num = pasync_ctx->async_header.host_write_ptr;
1338                 ring_id = phwi_ctrlr->default_pdu_hdr.id;
1339         } else {
1340                 num_entries = pasync_ctx->async_data.num_entries;
1341                 writables = min(pasync_ctx->async_data.writables,
1342                                 pasync_ctx->async_data.free_entries);
1343                 pfree_link = pasync_ctx->async_data.free_list.next;
1344                 host_write_num = pasync_ctx->async_data.host_write_ptr;
1345                 ring_id = phwi_ctrlr->default_pdu_data.id;
1346         }
1347
1348         writables = (writables / 8) * 8;
1349         if (writables) {
1350                 for (i = 0; i < writables; i++) {
1351                         pbusy_list =
1352                             hwi_get_async_busy_list(pasync_ctx, is_header,
1353                                                     host_write_num);
1354                         pasync_handle =
1355                             list_entry(pfree_link, struct async_pdu_handle,
1356                                                                 link);
1357                         WARN_ON(!pasync_handle);
1358                         pasync_handle->consumed = 0;
1359
1360                         pfree_link = pfree_link->next;
1361
1362                         pasync_sge = hwi_get_ring_address(pasync_ctx,
1363                                                 is_header, host_write_num);
1364
1365                         pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1366                         pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1367
1368                         list_move(&pasync_handle->link, pbusy_list);
1369
1370                         host_write_num++;
1371                         host_write_num = host_write_num % num_entries;
1372                 }
1373
1374                 if (is_header) {
1375                         pasync_ctx->async_header.host_write_ptr =
1376                                                         host_write_num;
1377                         pasync_ctx->async_header.free_entries -= writables;
1378                         pasync_ctx->async_header.writables -= writables;
1379                         pasync_ctx->async_header.busy_entries += writables;
1380                 } else {
1381                         pasync_ctx->async_data.host_write_ptr = host_write_num;
1382                         pasync_ctx->async_data.free_entries -= writables;
1383                         pasync_ctx->async_data.writables -= writables;
1384                         pasync_ctx->async_data.busy_entries += writables;
1385                 }
1386
1387                 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1388                 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1389                 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1390                 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1391                                         << DB_DEF_PDU_CQPROC_SHIFT;
1392
1393                 iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
1394         }
1395 }
1396
1397 static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1398                                          struct beiscsi_conn *beiscsi_conn,
1399                                          struct i_t_dpdu_cqe *pdpdu_cqe)
1400 {
1401         struct hwi_controller *phwi_ctrlr;
1402         struct hwi_async_pdu_context *pasync_ctx;
1403         struct async_pdu_handle *pasync_handle = NULL;
1404         unsigned int cq_index = -1;
1405
1406         phwi_ctrlr = phba->phwi_ctrlr;
1407         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1408
1409         pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1410                                              pdpdu_cqe, &cq_index);
1411         BUG_ON(pasync_handle->is_header != 0);
1412         if (pasync_handle->consumed == 0)
1413                 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1414                                            cq_index);
1415
1416         hwi_free_async_msg(phba, pasync_handle->cri);
1417         hwi_post_async_buffers(phba, pasync_handle->is_header);
1418 }
1419
1420 static unsigned int
1421 hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1422                   struct beiscsi_hba *phba,
1423                   struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1424 {
1425         struct list_head *plist;
1426         struct async_pdu_handle *pasync_handle;
1427         void *phdr = NULL;
1428         unsigned int hdr_len = 0, buf_len = 0;
1429         unsigned int status, index = 0, offset = 0;
1430         void *pfirst_buffer = NULL;
1431         unsigned int num_buf = 0;
1432
1433         plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1434
1435         list_for_each_entry(pasync_handle, plist, link) {
1436                 if (index == 0) {
1437                         phdr = pasync_handle->pbuffer;
1438                         hdr_len = pasync_handle->buffer_len;
1439                 } else {
1440                         buf_len = pasync_handle->buffer_len;
1441                         if (!num_buf) {
1442                                 pfirst_buffer = pasync_handle->pbuffer;
1443                                 num_buf++;
1444                         }
1445                         memcpy(pfirst_buffer + offset,
1446                                pasync_handle->pbuffer, buf_len);
1447                         offset = buf_len;
1448                 }
1449                 index++;
1450         }
1451
1452         status = beiscsi_process_async_pdu(beiscsi_conn, phba,
1453                                            (beiscsi_conn->beiscsi_conn_cid -
1454                                             phba->fw_config.iscsi_cid_start),
1455                                             phdr, hdr_len, pfirst_buffer,
1456                                             buf_len);
1457
1458         if (status == 0)
1459                 hwi_free_async_msg(phba, cri);
1460         return 0;
1461 }
1462
1463 static unsigned int
1464 hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1465                      struct beiscsi_hba *phba,
1466                      struct async_pdu_handle *pasync_handle)
1467 {
1468         struct hwi_async_pdu_context *pasync_ctx;
1469         struct hwi_controller *phwi_ctrlr;
1470         unsigned int bytes_needed = 0, status = 0;
1471         unsigned short cri = pasync_handle->cri;
1472         struct pdu_base *ppdu;
1473
1474         phwi_ctrlr = phba->phwi_ctrlr;
1475         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1476
1477         list_del(&pasync_handle->link);
1478         if (pasync_handle->is_header) {
1479                 pasync_ctx->async_header.busy_entries--;
1480                 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1481                         hwi_free_async_msg(phba, cri);
1482                         BUG();
1483                 }
1484
1485                 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1486                 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1487                 pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1488                                 (unsigned short)pasync_handle->buffer_len;
1489                 list_add_tail(&pasync_handle->link,
1490                               &pasync_ctx->async_entry[cri].wait_queue.list);
1491
1492                 ppdu = pasync_handle->pbuffer;
1493                 bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1494                         data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1495                         0xFFFF0000) | ((be16_to_cpu((ppdu->
1496                         dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1497                         & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1498
1499                 if (status == 0) {
1500                         pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1501                             bytes_needed;
1502
1503                         if (bytes_needed == 0)
1504                                 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1505                                                            pasync_ctx, cri);
1506                 }
1507         } else {
1508                 pasync_ctx->async_data.busy_entries--;
1509                 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1510                         list_add_tail(&pasync_handle->link,
1511                                       &pasync_ctx->async_entry[cri].wait_queue.
1512                                       list);
1513                         pasync_ctx->async_entry[cri].wait_queue.
1514                                 bytes_received +=
1515                                 (unsigned short)pasync_handle->buffer_len;
1516
1517                         if (pasync_ctx->async_entry[cri].wait_queue.
1518                             bytes_received >=
1519                             pasync_ctx->async_entry[cri].wait_queue.
1520                             bytes_needed)
1521                                 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1522                                                            pasync_ctx, cri);
1523                 }
1524         }
1525         return status;
1526 }
1527
1528 static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1529                                          struct beiscsi_hba *phba,
1530                                          struct i_t_dpdu_cqe *pdpdu_cqe)
1531 {
1532         struct hwi_controller *phwi_ctrlr;
1533         struct hwi_async_pdu_context *pasync_ctx;
1534         struct async_pdu_handle *pasync_handle = NULL;
1535         unsigned int cq_index = -1;
1536
1537         phwi_ctrlr = phba->phwi_ctrlr;
1538         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1539         pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1540                                              pdpdu_cqe, &cq_index);
1541
1542         if (pasync_handle->consumed == 0)
1543                 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1544                                            cq_index);
1545         hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
1546         hwi_post_async_buffers(phba, pasync_handle->is_header);
1547 }
1548
1549 static void  beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
1550 {
1551         struct be_queue_info *mcc_cq;
1552         struct  be_mcc_compl *mcc_compl;
1553         unsigned int num_processed = 0;
1554
1555         mcc_cq = &phba->ctrl.mcc_obj.cq;
1556         mcc_compl = queue_tail_node(mcc_cq);
1557         mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1558         while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
1559
1560                 if (num_processed >= 32) {
1561                         hwi_ring_cq_db(phba, mcc_cq->id,
1562                                         num_processed, 0, 0);
1563                         num_processed = 0;
1564                 }
1565                 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
1566                         /* Interpret flags as an async trailer */
1567                         if (is_link_state_evt(mcc_compl->flags))
1568                                 /* Interpret compl as a async link evt */
1569                                 beiscsi_async_link_state_process(phba,
1570                                 (struct be_async_event_link_state *) mcc_compl);
1571                         else
1572                                 SE_DEBUG(DBG_LVL_1,
1573                                         " Unsupported Async Event, flags"
1574                                         " = 0x%08x\n", mcc_compl->flags);
1575                 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
1576                         be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
1577                         atomic_dec(&phba->ctrl.mcc_obj.q.used);
1578                 }
1579
1580                 mcc_compl->flags = 0;
1581                 queue_tail_inc(mcc_cq);
1582                 mcc_compl = queue_tail_node(mcc_cq);
1583                 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1584                 num_processed++;
1585         }
1586
1587         if (num_processed > 0)
1588                 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0);
1589
1590 }
1591
1592 static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1593 {
1594         struct be_queue_info *cq;
1595         struct sol_cqe *sol;
1596         struct dmsg_cqe *dmsg;
1597         unsigned int num_processed = 0;
1598         unsigned int tot_nump = 0;
1599         struct beiscsi_conn *beiscsi_conn;
1600         struct beiscsi_endpoint *beiscsi_ep;
1601         struct iscsi_endpoint *ep;
1602         struct beiscsi_hba *phba;
1603
1604         cq = pbe_eq->cq;
1605         sol = queue_tail_node(cq);
1606         phba = pbe_eq->phba;
1607
1608         while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
1609                CQE_VALID_MASK) {
1610                 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1611
1612                 ep = phba->ep_array[(u32) ((sol->
1613                                    dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1614                                    SOL_CID_MASK) >> 6) -
1615                                    phba->fw_config.iscsi_cid_start];
1616
1617                 beiscsi_ep = ep->dd_data;
1618                 beiscsi_conn = beiscsi_ep->conn;
1619
1620                 if (num_processed >= 32) {
1621                         hwi_ring_cq_db(phba, cq->id,
1622                                         num_processed, 0, 0);
1623                         tot_nump += num_processed;
1624                         num_processed = 0;
1625                 }
1626
1627                 switch ((u32) sol->dw[offsetof(struct amap_sol_cqe, code) /
1628                         32] & CQE_CODE_MASK) {
1629                 case SOL_CMD_COMPLETE:
1630                         hwi_complete_cmd(beiscsi_conn, phba, sol);
1631                         break;
1632                 case DRIVERMSG_NOTIFY:
1633                         SE_DEBUG(DBG_LVL_8, "Received DRIVERMSG_NOTIFY\n");
1634                         dmsg = (struct dmsg_cqe *)sol;
1635                         hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
1636                         break;
1637                 case UNSOL_HDR_NOTIFY:
1638                         SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR_ NOTIFY\n");
1639                         hwi_process_default_pdu_ring(beiscsi_conn, phba,
1640                                              (struct i_t_dpdu_cqe *)sol);
1641                         break;
1642                 case UNSOL_DATA_NOTIFY:
1643                         SE_DEBUG(DBG_LVL_8, "Received UNSOL_DATA_NOTIFY\n");
1644                         hwi_process_default_pdu_ring(beiscsi_conn, phba,
1645                                              (struct i_t_dpdu_cqe *)sol);
1646                         break;
1647                 case CXN_INVALIDATE_INDEX_NOTIFY:
1648                 case CMD_INVALIDATED_NOTIFY:
1649                 case CXN_INVALIDATE_NOTIFY:
1650                         SE_DEBUG(DBG_LVL_1,
1651                                  "Ignoring CQ Error notification for cmd/cxn"
1652                                  "invalidate\n");
1653                         break;
1654                 case SOL_CMD_KILLED_DATA_DIGEST_ERR:
1655                 case CMD_KILLED_INVALID_STATSN_RCVD:
1656                 case CMD_KILLED_INVALID_R2T_RCVD:
1657                 case CMD_CXN_KILLED_LUN_INVALID:
1658                 case CMD_CXN_KILLED_ICD_INVALID:
1659                 case CMD_CXN_KILLED_ITT_INVALID:
1660                 case CMD_CXN_KILLED_SEQ_OUTOFORDER:
1661                 case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
1662                         SE_DEBUG(DBG_LVL_1,
1663                                  "CQ Error notification for cmd.. "
1664                                  "code %d cid 0x%x\n",
1665                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1666                                  32] & CQE_CODE_MASK,
1667                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1668                                  32] & SOL_CID_MASK));
1669                         break;
1670                 case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
1671                         SE_DEBUG(DBG_LVL_1,
1672                                  "Digest error on def pdu ring, dropping..\n");
1673                         hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
1674                                              (struct i_t_dpdu_cqe *) sol);
1675                         break;
1676                 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
1677                 case CXN_KILLED_BURST_LEN_MISMATCH:
1678                 case CXN_KILLED_AHS_RCVD:
1679                 case CXN_KILLED_HDR_DIGEST_ERR:
1680                 case CXN_KILLED_UNKNOWN_HDR:
1681                 case CXN_KILLED_STALE_ITT_TTT_RCVD:
1682                 case CXN_KILLED_INVALID_ITT_TTT_RCVD:
1683                 case CXN_KILLED_TIMED_OUT:
1684                 case CXN_KILLED_FIN_RCVD:
1685                 case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
1686                 case CXN_KILLED_BAD_WRB_INDEX_ERROR:
1687                 case CXN_KILLED_OVER_RUN_RESIDUAL:
1688                 case CXN_KILLED_UNDER_RUN_RESIDUAL:
1689                 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
1690                         SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
1691                                  "0x%x...\n",
1692                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1693                                  32] & CQE_CODE_MASK,
1694                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1695                                  32] & CQE_CID_MASK));
1696                         iscsi_conn_failure(beiscsi_conn->conn,
1697                                            ISCSI_ERR_CONN_FAILED);
1698                         break;
1699                 case CXN_KILLED_RST_SENT:
1700                 case CXN_KILLED_RST_RCVD:
1701                         SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
1702                                 "received/sent on CID 0x%x...\n",
1703                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1704                                  32] & CQE_CODE_MASK,
1705                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1706                                  32] & CQE_CID_MASK));
1707                         iscsi_conn_failure(beiscsi_conn->conn,
1708                                            ISCSI_ERR_CONN_FAILED);
1709                         break;
1710                 default:
1711                         SE_DEBUG(DBG_LVL_1, "CQ Error Invalid code= %d "
1712                                  "received on CID 0x%x...\n",
1713                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1714                                  32] & CQE_CODE_MASK,
1715                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1716                                  32] & CQE_CID_MASK));
1717                         break;
1718                 }
1719
1720                 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
1721                 queue_tail_inc(cq);
1722                 sol = queue_tail_node(cq);
1723                 num_processed++;
1724         }
1725
1726         if (num_processed > 0) {
1727                 tot_nump += num_processed;
1728                 hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
1729         }
1730         return tot_nump;
1731 }
1732
1733 void beiscsi_process_all_cqs(struct work_struct *work)
1734 {
1735         unsigned long flags;
1736         struct hwi_controller *phwi_ctrlr;
1737         struct hwi_context_memory *phwi_context;
1738         struct be_eq_obj *pbe_eq;
1739         struct beiscsi_hba *phba =
1740             container_of(work, struct beiscsi_hba, work_cqs);
1741
1742         phwi_ctrlr = phba->phwi_ctrlr;
1743         phwi_context = phwi_ctrlr->phwi_ctxt;
1744         if (phba->msix_enabled)
1745                 pbe_eq = &phwi_context->be_eq[phba->num_cpus];
1746         else
1747                 pbe_eq = &phwi_context->be_eq[0];
1748
1749         if (phba->todo_mcc_cq) {
1750                 spin_lock_irqsave(&phba->isr_lock, flags);
1751                 phba->todo_mcc_cq = 0;
1752                 spin_unlock_irqrestore(&phba->isr_lock, flags);
1753                 beiscsi_process_mcc_isr(phba);
1754         }
1755
1756         if (phba->todo_cq) {
1757                 spin_lock_irqsave(&phba->isr_lock, flags);
1758                 phba->todo_cq = 0;
1759                 spin_unlock_irqrestore(&phba->isr_lock, flags);
1760                 beiscsi_process_cq(pbe_eq);
1761         }
1762 }
1763
1764 static int be_iopoll(struct blk_iopoll *iop, int budget)
1765 {
1766         static unsigned int ret;
1767         struct beiscsi_hba *phba;
1768         struct be_eq_obj *pbe_eq;
1769
1770         pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
1771         ret = beiscsi_process_cq(pbe_eq);
1772         if (ret < budget) {
1773                 phba = pbe_eq->phba;
1774                 blk_iopoll_complete(iop);
1775                 SE_DEBUG(DBG_LVL_8, "rearm pbe_eq->q.id =%d\n", pbe_eq->q.id);
1776                 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
1777         }
1778         return ret;
1779 }
1780
1781 static void
1782 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
1783               unsigned int num_sg, struct beiscsi_io_task *io_task)
1784 {
1785         struct iscsi_sge *psgl;
1786         unsigned short sg_len, index;
1787         unsigned int sge_len = 0;
1788         unsigned long long addr;
1789         struct scatterlist *l_sg;
1790         unsigned int offset;
1791
1792         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1793                                       io_task->bhs_pa.u.a32.address_lo);
1794         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1795                                       io_task->bhs_pa.u.a32.address_hi);
1796
1797         l_sg = sg;
1798         for (index = 0; (index < num_sg) && (index < 2); index++,
1799                                                          sg = sg_next(sg)) {
1800                 if (index == 0) {
1801                         sg_len = sg_dma_len(sg);
1802                         addr = (u64) sg_dma_address(sg);
1803                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1804                                                 ((u32)(addr & 0xFFFFFFFF)));
1805                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1806                                                         ((u32)(addr >> 32)));
1807                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1808                                                         sg_len);
1809                         sge_len = sg_len;
1810                 } else {
1811                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
1812                                                         pwrb, sge_len);
1813                         sg_len = sg_dma_len(sg);
1814                         addr = (u64) sg_dma_address(sg);
1815                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
1816                                                 ((u32)(addr & 0xFFFFFFFF)));
1817                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
1818                                                         ((u32)(addr >> 32)));
1819                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
1820                                                         sg_len);
1821                 }
1822         }
1823         psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1824         memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
1825
1826         AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
1827
1828         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1829                         io_task->bhs_pa.u.a32.address_hi);
1830         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1831                         io_task->bhs_pa.u.a32.address_lo);
1832
1833         if (num_sg == 1) {
1834                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1835                                                                 1);
1836                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
1837                                                                 0);
1838         } else if (num_sg == 2) {
1839                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1840                                                                 0);
1841                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
1842                                                                 1);
1843         } else {
1844                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1845                                                                 0);
1846                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
1847                                                                 0);
1848         }
1849         sg = l_sg;
1850         psgl++;
1851         psgl++;
1852         offset = 0;
1853         for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
1854                 sg_len = sg_dma_len(sg);
1855                 addr = (u64) sg_dma_address(sg);
1856                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1857                                                 (addr & 0xFFFFFFFF));
1858                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1859                                                 (addr >> 32));
1860                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
1861                 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
1862                 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1863                 offset += sg_len;
1864         }
1865         psgl--;
1866         AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1867 }
1868
1869 static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
1870 {
1871         struct iscsi_sge *psgl;
1872         unsigned long long addr;
1873         struct beiscsi_io_task *io_task = task->dd_data;
1874         struct beiscsi_conn *beiscsi_conn = io_task->conn;
1875         struct beiscsi_hba *phba = beiscsi_conn->phba;
1876
1877         io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
1878         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1879                                 io_task->bhs_pa.u.a32.address_lo);
1880         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1881                                 io_task->bhs_pa.u.a32.address_hi);
1882
1883         if (task->data) {
1884                 if (task->data_count) {
1885                         AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
1886                         addr = (u64) pci_map_single(phba->pcidev,
1887                                                     task->data,
1888                                                     task->data_count, 1);
1889                 } else {
1890                         AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1891                         addr = 0;
1892                 }
1893                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1894                                                 ((u32)(addr & 0xFFFFFFFF)));
1895                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1896                                                 ((u32)(addr >> 32)));
1897                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1898                                                 task->data_count);
1899
1900                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
1901         } else {
1902                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1903                 addr = 0;
1904         }
1905
1906         psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1907
1908         AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
1909
1910         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1911                       io_task->bhs_pa.u.a32.address_hi);
1912         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1913                       io_task->bhs_pa.u.a32.address_lo);
1914         if (task->data) {
1915                 psgl++;
1916                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
1917                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
1918                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
1919                 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
1920                 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
1921                 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1922
1923                 psgl++;
1924                 if (task->data) {
1925                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1926                                                 ((u32)(addr & 0xFFFFFFFF)));
1927                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1928                                                 ((u32)(addr >> 32)));
1929                 }
1930                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
1931         }
1932         AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1933 }
1934
1935 static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
1936 {
1937         unsigned int num_cq_pages, num_async_pdu_buf_pages;
1938         unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
1939         unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
1940
1941         num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
1942                                       sizeof(struct sol_cqe));
1943         num_async_pdu_buf_pages =
1944                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1945                                        phba->params.defpdu_hdr_sz);
1946         num_async_pdu_buf_sgl_pages =
1947                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1948                                        sizeof(struct phys_addr));
1949         num_async_pdu_data_pages =
1950                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1951                                        phba->params.defpdu_data_sz);
1952         num_async_pdu_data_sgl_pages =
1953                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1954                                        sizeof(struct phys_addr));
1955
1956         phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
1957
1958         phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
1959                                                  BE_ISCSI_PDU_HEADER_SIZE;
1960         phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
1961                                             sizeof(struct hwi_context_memory);
1962
1963
1964         phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
1965             * (phba->params.wrbs_per_cxn)
1966             * phba->params.cxns_per_ctrl;
1967         wrb_sz_per_cxn =  sizeof(struct wrb_handle) *
1968                                  (phba->params.wrbs_per_cxn);
1969         phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
1970                                 phba->params.cxns_per_ctrl);
1971
1972         phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
1973                 phba->params.icds_per_ctrl;
1974         phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
1975                 phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
1976
1977         phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] =
1978                 num_async_pdu_buf_pages * PAGE_SIZE;
1979         phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] =
1980                 num_async_pdu_data_pages * PAGE_SIZE;
1981         phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] =
1982                 num_async_pdu_buf_sgl_pages * PAGE_SIZE;
1983         phba->mem_req[HWI_MEM_ASYNC_DATA_RING] =
1984                 num_async_pdu_data_sgl_pages * PAGE_SIZE;
1985         phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] =
1986                 phba->params.asyncpdus_per_ctrl *
1987                 sizeof(struct async_pdu_handle);
1988         phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] =
1989                 phba->params.asyncpdus_per_ctrl *
1990                 sizeof(struct async_pdu_handle);
1991         phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] =
1992                 sizeof(struct hwi_async_pdu_context) +
1993                 (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
1994 }
1995
1996 static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
1997 {
1998         struct be_mem_descriptor *mem_descr;
1999         dma_addr_t bus_add;
2000         struct mem_array *mem_arr, *mem_arr_orig;
2001         unsigned int i, j, alloc_size, curr_alloc_size;
2002
2003         phba->phwi_ctrlr = kmalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
2004         if (!phba->phwi_ctrlr)
2005                 return -ENOMEM;
2006
2007         phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
2008                                  GFP_KERNEL);
2009         if (!phba->init_mem) {
2010                 kfree(phba->phwi_ctrlr);
2011                 return -ENOMEM;
2012         }
2013
2014         mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
2015                                GFP_KERNEL);
2016         if (!mem_arr_orig) {
2017                 kfree(phba->init_mem);
2018                 kfree(phba->phwi_ctrlr);
2019                 return -ENOMEM;
2020         }
2021
2022         mem_descr = phba->init_mem;
2023         for (i = 0; i < SE_MEM_MAX; i++) {
2024                 j = 0;
2025                 mem_arr = mem_arr_orig;
2026                 alloc_size = phba->mem_req[i];
2027                 memset(mem_arr, 0, sizeof(struct mem_array) *
2028                        BEISCSI_MAX_FRAGS_INIT);
2029                 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
2030                 do {
2031                         mem_arr->virtual_address = pci_alloc_consistent(
2032                                                         phba->pcidev,
2033                                                         curr_alloc_size,
2034                                                         &bus_add);
2035                         if (!mem_arr->virtual_address) {
2036                                 if (curr_alloc_size <= BE_MIN_MEM_SIZE)
2037                                         goto free_mem;
2038                                 if (curr_alloc_size -
2039                                         rounddown_pow_of_two(curr_alloc_size))
2040                                         curr_alloc_size = rounddown_pow_of_two
2041                                                              (curr_alloc_size);
2042                                 else
2043                                         curr_alloc_size = curr_alloc_size / 2;
2044                         } else {
2045                                 mem_arr->bus_address.u.
2046                                     a64.address = (__u64) bus_add;
2047                                 mem_arr->size = curr_alloc_size;
2048                                 alloc_size -= curr_alloc_size;
2049                                 curr_alloc_size = min(be_max_phys_size *
2050                                                       1024, alloc_size);
2051                                 j++;
2052                                 mem_arr++;
2053                         }
2054                 } while (alloc_size);
2055                 mem_descr->num_elements = j;
2056                 mem_descr->size_in_bytes = phba->mem_req[i];
2057                 mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
2058                                                GFP_KERNEL);
2059                 if (!mem_descr->mem_array)
2060                         goto free_mem;
2061
2062                 memcpy(mem_descr->mem_array, mem_arr_orig,
2063                        sizeof(struct mem_array) * j);
2064                 mem_descr++;
2065         }
2066         kfree(mem_arr_orig);
2067         return 0;
2068 free_mem:
2069         mem_descr->num_elements = j;
2070         while ((i) || (j)) {
2071                 for (j = mem_descr->num_elements; j > 0; j--) {
2072                         pci_free_consistent(phba->pcidev,
2073                                             mem_descr->mem_array[j - 1].size,
2074                                             mem_descr->mem_array[j - 1].
2075                                             virtual_address,
2076                                             (unsigned long)mem_descr->
2077                                             mem_array[j - 1].
2078                                             bus_address.u.a64.address);
2079                 }
2080                 if (i) {
2081                         i--;
2082                         kfree(mem_descr->mem_array);
2083                         mem_descr--;
2084                 }
2085         }
2086         kfree(mem_arr_orig);
2087         kfree(phba->init_mem);
2088         kfree(phba->phwi_ctrlr);
2089         return -ENOMEM;
2090 }
2091
2092 static int beiscsi_get_memory(struct beiscsi_hba *phba)
2093 {
2094         beiscsi_find_mem_req(phba);
2095         return beiscsi_alloc_mem(phba);
2096 }
2097
2098 static void iscsi_init_global_templates(struct beiscsi_hba *phba)
2099 {
2100         struct pdu_data_out *pdata_out;
2101         struct pdu_nop_out *pnop_out;
2102         struct be_mem_descriptor *mem_descr;
2103
2104         mem_descr = phba->init_mem;
2105         mem_descr += ISCSI_MEM_GLOBAL_HEADER;
2106         pdata_out =
2107             (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
2108         memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2109
2110         AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
2111                       IIOC_SCSI_DATA);
2112
2113         pnop_out =
2114             (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
2115                                    virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
2116
2117         memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2118         AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
2119         AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
2120         AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
2121 }
2122
2123 static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
2124 {
2125         struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
2126         struct wrb_handle *pwrb_handle;
2127         struct hwi_controller *phwi_ctrlr;
2128         struct hwi_wrb_context *pwrb_context;
2129         struct iscsi_wrb *pwrb;
2130         unsigned int num_cxn_wrbh;
2131         unsigned int num_cxn_wrb, j, idx, index;
2132
2133         mem_descr_wrbh = phba->init_mem;
2134         mem_descr_wrbh += HWI_MEM_WRBH;
2135
2136         mem_descr_wrb = phba->init_mem;
2137         mem_descr_wrb += HWI_MEM_WRB;
2138
2139         idx = 0;
2140         pwrb_handle = mem_descr_wrbh->mem_array[idx].virtual_address;
2141         num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
2142                         ((sizeof(struct wrb_handle)) *
2143                          phba->params.wrbs_per_cxn));
2144         phwi_ctrlr = phba->phwi_ctrlr;
2145
2146         for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2147                 pwrb_context = &phwi_ctrlr->wrb_context[index];
2148                 pwrb_context->pwrb_handle_base =
2149                                 kzalloc(sizeof(struct wrb_handle *) *
2150                                         phba->params.wrbs_per_cxn, GFP_KERNEL);
2151                 pwrb_context->pwrb_handle_basestd =
2152                                 kzalloc(sizeof(struct wrb_handle *) *
2153                                         phba->params.wrbs_per_cxn, GFP_KERNEL);
2154                 if (num_cxn_wrbh) {
2155                         pwrb_context->alloc_index = 0;
2156                         pwrb_context->wrb_handles_available = 0;
2157                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2158                                 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2159                                 pwrb_context->pwrb_handle_basestd[j] =
2160                                                                 pwrb_handle;
2161                                 pwrb_context->wrb_handles_available++;
2162                                 pwrb_handle->wrb_index = j;
2163                                 pwrb_handle++;
2164                         }
2165                         pwrb_context->free_index = 0;
2166                         num_cxn_wrbh--;
2167                 } else {
2168                         idx++;
2169                         pwrb_handle =
2170                             mem_descr_wrbh->mem_array[idx].virtual_address;
2171                         num_cxn_wrbh =
2172                             ((mem_descr_wrbh->mem_array[idx].size) /
2173                              ((sizeof(struct wrb_handle)) *
2174                               phba->params.wrbs_per_cxn));
2175                         pwrb_context->alloc_index = 0;
2176                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2177                                 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2178                                 pwrb_context->pwrb_handle_basestd[j] =
2179                                     pwrb_handle;
2180                                 pwrb_context->wrb_handles_available++;
2181                                 pwrb_handle->wrb_index = j;
2182                                 pwrb_handle++;
2183                         }
2184                         pwrb_context->free_index = 0;
2185                         num_cxn_wrbh--;
2186                 }
2187         }
2188         idx = 0;
2189         pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2190         num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2191                       ((sizeof(struct iscsi_wrb) *
2192                         phba->params.wrbs_per_cxn));
2193         for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2194                 pwrb_context = &phwi_ctrlr->wrb_context[index];
2195                 if (num_cxn_wrb) {
2196                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2197                                 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2198                                 pwrb_handle->pwrb = pwrb;
2199                                 pwrb++;
2200                         }
2201                         num_cxn_wrb--;
2202                 } else {
2203                         idx++;
2204                         pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2205                         num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2206                                       ((sizeof(struct iscsi_wrb) *
2207                                         phba->params.wrbs_per_cxn));
2208                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2209                                 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2210                                 pwrb_handle->pwrb = pwrb;
2211                                 pwrb++;
2212                         }
2213                         num_cxn_wrb--;
2214                 }
2215         }
2216 }
2217
2218 static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2219 {
2220         struct hwi_controller *phwi_ctrlr;
2221         struct hba_parameters *p = &phba->params;
2222         struct hwi_async_pdu_context *pasync_ctx;
2223         struct async_pdu_handle *pasync_header_h, *pasync_data_h;
2224         unsigned int index;
2225         struct be_mem_descriptor *mem_descr;
2226
2227         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2228         mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
2229
2230         phwi_ctrlr = phba->phwi_ctrlr;
2231         phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
2232                                 mem_descr->mem_array[0].virtual_address;
2233         pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
2234         memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2235
2236         pasync_ctx->async_header.num_entries = p->asyncpdus_per_ctrl;
2237         pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz;
2238         pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
2239         pasync_ctx->async_data.num_entries = p->asyncpdus_per_ctrl;
2240
2241         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2242         mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
2243         if (mem_descr->mem_array[0].virtual_address) {
2244                 SE_DEBUG(DBG_LVL_8,
2245                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF"
2246                          "va=%p\n", mem_descr->mem_array[0].virtual_address);
2247         } else
2248                 shost_printk(KERN_WARNING, phba->shost,
2249                              "No Virtual address\n");
2250
2251         pasync_ctx->async_header.va_base =
2252                         mem_descr->mem_array[0].virtual_address;
2253
2254         pasync_ctx->async_header.pa_base.u.a64.address =
2255                         mem_descr->mem_array[0].bus_address.u.a64.address;
2256
2257         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2258         mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2259         if (mem_descr->mem_array[0].virtual_address) {
2260                 SE_DEBUG(DBG_LVL_8,
2261                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING"
2262                          "va=%p\n", mem_descr->mem_array[0].virtual_address);
2263         } else
2264                 shost_printk(KERN_WARNING, phba->shost,
2265                             "No Virtual address\n");
2266         pasync_ctx->async_header.ring_base =
2267                         mem_descr->mem_array[0].virtual_address;
2268
2269         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2270         mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
2271         if (mem_descr->mem_array[0].virtual_address) {
2272                 SE_DEBUG(DBG_LVL_8,
2273                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE"
2274                          "va=%p\n", mem_descr->mem_array[0].virtual_address);
2275         } else
2276                 shost_printk(KERN_WARNING, phba->shost,
2277                             "No Virtual address\n");
2278
2279         pasync_ctx->async_header.handle_base =
2280                         mem_descr->mem_array[0].virtual_address;
2281         pasync_ctx->async_header.writables = 0;
2282         INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
2283
2284         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2285         mem_descr += HWI_MEM_ASYNC_DATA_BUF;
2286         if (mem_descr->mem_array[0].virtual_address) {
2287                 SE_DEBUG(DBG_LVL_8,
2288                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
2289                          "va=%p\n", mem_descr->mem_array[0].virtual_address);
2290         } else
2291                 shost_printk(KERN_WARNING, phba->shost,
2292                             "No Virtual address\n");
2293         pasync_ctx->async_data.va_base =
2294                         mem_descr->mem_array[0].virtual_address;
2295         pasync_ctx->async_data.pa_base.u.a64.address =
2296                         mem_descr->mem_array[0].bus_address.u.a64.address;
2297
2298         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2299         mem_descr += HWI_MEM_ASYNC_DATA_RING;
2300         if (mem_descr->mem_array[0].virtual_address) {
2301                 SE_DEBUG(DBG_LVL_8,
2302                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING"
2303                          "va=%p\n", mem_descr->mem_array[0].virtual_address);
2304         } else
2305                 shost_printk(KERN_WARNING, phba->shost,
2306                              "No Virtual address\n");
2307
2308         pasync_ctx->async_data.ring_base =
2309                         mem_descr->mem_array[0].virtual_address;
2310
2311         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2312         mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
2313         if (!mem_descr->mem_array[0].virtual_address)
2314                 shost_printk(KERN_WARNING, phba->shost,
2315                             "No Virtual address\n");
2316
2317         pasync_ctx->async_data.handle_base =
2318                         mem_descr->mem_array[0].virtual_address;
2319         pasync_ctx->async_data.writables = 0;
2320         INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
2321
2322         pasync_header_h =
2323                 (struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
2324         pasync_data_h =
2325                 (struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
2326
2327         for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
2328                 pasync_header_h->cri = -1;
2329                 pasync_header_h->index = (char)index;
2330                 INIT_LIST_HEAD(&pasync_header_h->link);
2331                 pasync_header_h->pbuffer =
2332                         (void *)((unsigned long)
2333                         (pasync_ctx->async_header.va_base) +
2334                         (p->defpdu_hdr_sz * index));
2335
2336                 pasync_header_h->pa.u.a64.address =
2337                         pasync_ctx->async_header.pa_base.u.a64.address +
2338                         (p->defpdu_hdr_sz * index);
2339
2340                 list_add_tail(&pasync_header_h->link,
2341                                 &pasync_ctx->async_header.free_list);
2342                 pasync_header_h++;
2343                 pasync_ctx->async_header.free_entries++;
2344                 pasync_ctx->async_header.writables++;
2345
2346                 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
2347                 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
2348                                header_busy_list);
2349                 pasync_data_h->cri = -1;
2350                 pasync_data_h->index = (char)index;
2351                 INIT_LIST_HEAD(&pasync_data_h->link);
2352                 pasync_data_h->pbuffer =
2353                         (void *)((unsigned long)
2354                         (pasync_ctx->async_data.va_base) +
2355                         (p->defpdu_data_sz * index));
2356
2357                 pasync_data_h->pa.u.a64.address =
2358                     pasync_ctx->async_data.pa_base.u.a64.address +
2359                     (p->defpdu_data_sz * index);
2360
2361                 list_add_tail(&pasync_data_h->link,
2362                               &pasync_ctx->async_data.free_list);
2363                 pasync_data_h++;
2364                 pasync_ctx->async_data.free_entries++;
2365                 pasync_ctx->async_data.writables++;
2366
2367                 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
2368         }
2369
2370         pasync_ctx->async_header.host_write_ptr = 0;
2371         pasync_ctx->async_header.ep_read_ptr = -1;
2372         pasync_ctx->async_data.host_write_ptr = 0;
2373         pasync_ctx->async_data.ep_read_ptr = -1;
2374 }
2375
2376 static int
2377 be_sgl_create_contiguous(void *virtual_address,
2378                          u64 physical_address, u32 length,
2379                          struct be_dma_mem *sgl)
2380 {
2381         WARN_ON(!virtual_address);
2382         WARN_ON(!physical_address);
2383         WARN_ON(!length > 0);
2384         WARN_ON(!sgl);
2385
2386         sgl->va = virtual_address;
2387         sgl->dma = (unsigned long)physical_address;
2388         sgl->size = length;
2389
2390         return 0;
2391 }
2392
2393 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
2394 {
2395         memset(sgl, 0, sizeof(*sgl));
2396 }
2397
2398 static void
2399 hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
2400                      struct mem_array *pmem, struct be_dma_mem *sgl)
2401 {
2402         if (sgl->va)
2403                 be_sgl_destroy_contiguous(sgl);
2404
2405         be_sgl_create_contiguous(pmem->virtual_address,
2406                                  pmem->bus_address.u.a64.address,
2407                                  pmem->size, sgl);
2408 }
2409
2410 static void
2411 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
2412                            struct mem_array *pmem, struct be_dma_mem *sgl)
2413 {
2414         if (sgl->va)
2415                 be_sgl_destroy_contiguous(sgl);
2416
2417         be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
2418                                  pmem->bus_address.u.a64.address,
2419                                  pmem->size, sgl);
2420 }
2421
2422 static int be_fill_queue(struct be_queue_info *q,
2423                 u16 len, u16 entry_size, void *vaddress)
2424 {
2425         struct be_dma_mem *mem = &q->dma_mem;
2426
2427         memset(q, 0, sizeof(*q));
2428         q->len = len;
2429         q->entry_size = entry_size;
2430         mem->size = len * entry_size;
2431         mem->va = vaddress;
2432         if (!mem->va)
2433                 return -ENOMEM;
2434         memset(mem->va, 0, mem->size);
2435         return 0;
2436 }
2437
2438 static int beiscsi_create_eqs(struct beiscsi_hba *phba,
2439                              struct hwi_context_memory *phwi_context)
2440 {
2441         unsigned int i, num_eq_pages;
2442         int ret, eq_for_mcc;
2443         struct be_queue_info *eq;
2444         struct be_dma_mem *mem;
2445         void *eq_vaddress;
2446         dma_addr_t paddr;
2447
2448         num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
2449                                       sizeof(struct be_eq_entry));
2450
2451         if (phba->msix_enabled)
2452                 eq_for_mcc = 1;
2453         else
2454                 eq_for_mcc = 0;
2455         for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
2456                 eq = &phwi_context->be_eq[i].q;
2457                 mem = &eq->dma_mem;
2458                 phwi_context->be_eq[i].phba = phba;
2459                 eq_vaddress = pci_alloc_consistent(phba->pcidev,
2460                                                      num_eq_pages * PAGE_SIZE,
2461                                                      &paddr);
2462                 if (!eq_vaddress)
2463                         goto create_eq_error;
2464
2465                 mem->va = eq_vaddress;
2466                 ret = be_fill_queue(eq, phba->params.num_eq_entries,
2467                                     sizeof(struct be_eq_entry), eq_vaddress);
2468                 if (ret) {
2469                         shost_printk(KERN_ERR, phba->shost,
2470                                      "be_fill_queue Failed for EQ\n");
2471                         goto create_eq_error;
2472                 }
2473
2474                 mem->dma = paddr;
2475                 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
2476                                             phwi_context->cur_eqd);
2477                 if (ret) {
2478                         shost_printk(KERN_ERR, phba->shost,
2479                                      "beiscsi_cmd_eq_create"
2480                                      "Failedfor EQ\n");
2481                         goto create_eq_error;
2482                 }
2483                 SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id);
2484         }
2485         return 0;
2486 create_eq_error:
2487         for (i = 0; i < (phba->num_cpus + 1); i++) {
2488                 eq = &phwi_context->be_eq[i].q;
2489                 mem = &eq->dma_mem;
2490                 if (mem->va)
2491                         pci_free_consistent(phba->pcidev, num_eq_pages
2492                                             * PAGE_SIZE,
2493                                             mem->va, mem->dma);
2494         }
2495         return ret;
2496 }
2497
2498 static int beiscsi_create_cqs(struct beiscsi_hba *phba,
2499                              struct hwi_context_memory *phwi_context)
2500 {
2501         unsigned int i, num_cq_pages;
2502         int ret;
2503         struct be_queue_info *cq, *eq;
2504         struct be_dma_mem *mem;
2505         struct be_eq_obj *pbe_eq;
2506         void *cq_vaddress;
2507         dma_addr_t paddr;
2508
2509         num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2510                                       sizeof(struct sol_cqe));
2511
2512         for (i = 0; i < phba->num_cpus; i++) {
2513                 cq = &phwi_context->be_cq[i];
2514                 eq = &phwi_context->be_eq[i].q;
2515                 pbe_eq = &phwi_context->be_eq[i];
2516                 pbe_eq->cq = cq;
2517                 pbe_eq->phba = phba;
2518                 mem = &cq->dma_mem;
2519                 cq_vaddress = pci_alloc_consistent(phba->pcidev,
2520                                                      num_cq_pages * PAGE_SIZE,
2521                                                      &paddr);
2522                 if (!cq_vaddress)
2523                         goto create_cq_error;
2524                 ret = be_fill_queue(cq, phba->params.num_cq_entries,
2525                                     sizeof(struct sol_cqe), cq_vaddress);
2526                 if (ret) {
2527                         shost_printk(KERN_ERR, phba->shost,
2528                                      "be_fill_queue Failed for ISCSI CQ\n");
2529                         goto create_cq_error;
2530                 }
2531
2532                 mem->dma = paddr;
2533                 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
2534                                             false, 0);
2535                 if (ret) {
2536                         shost_printk(KERN_ERR, phba->shost,
2537                                      "beiscsi_cmd_eq_create"
2538                                      "Failed for ISCSI CQ\n");
2539                         goto create_cq_error;
2540                 }
2541                 SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n",
2542                                                  cq->id, eq->id);
2543                 SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n");
2544         }
2545         return 0;
2546
2547 create_cq_error:
2548         for (i = 0; i < phba->num_cpus; i++) {
2549                 cq = &phwi_context->be_cq[i];
2550                 mem = &cq->dma_mem;
2551                 if (mem->va)
2552                         pci_free_consistent(phba->pcidev, num_cq_pages
2553                                             * PAGE_SIZE,
2554                                             mem->va, mem->dma);
2555         }
2556         return ret;
2557
2558 }
2559
2560 static int
2561 beiscsi_create_def_hdr(struct beiscsi_hba *phba,
2562                        struct hwi_context_memory *phwi_context,
2563                        struct hwi_controller *phwi_ctrlr,
2564                        unsigned int def_pdu_ring_sz)
2565 {
2566         unsigned int idx;
2567         int ret;
2568         struct be_queue_info *dq, *cq;
2569         struct be_dma_mem *mem;
2570         struct be_mem_descriptor *mem_descr;
2571         void *dq_vaddress;
2572
2573         idx = 0;
2574         dq = &phwi_context->be_def_hdrq;
2575         cq = &phwi_context->be_cq[0];
2576         mem = &dq->dma_mem;
2577         mem_descr = phba->init_mem;
2578         mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2579         dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2580         ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
2581                             sizeof(struct phys_addr),
2582                             sizeof(struct phys_addr), dq_vaddress);
2583         if (ret) {
2584                 shost_printk(KERN_ERR, phba->shost,
2585                              "be_fill_queue Failed for DEF PDU HDR\n");
2586                 return ret;
2587         }
2588         mem->dma = (unsigned long)mem_descr->mem_array[idx].
2589                                   bus_address.u.a64.address;
2590         ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
2591                                               def_pdu_ring_sz,
2592                                               phba->params.defpdu_hdr_sz);
2593         if (ret) {
2594                 shost_printk(KERN_ERR, phba->shost,
2595                              "be_cmd_create_default_pdu_queue Failed DEFHDR\n");
2596                 return ret;
2597         }
2598         phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
2599         SE_DEBUG(DBG_LVL_8, "iscsi def pdu id is %d\n",
2600                  phwi_context->be_def_hdrq.id);
2601         hwi_post_async_buffers(phba, 1);
2602         return 0;
2603 }
2604
2605 static int
2606 beiscsi_create_def_data(struct beiscsi_hba *phba,
2607                         struct hwi_context_memory *phwi_context,
2608                         struct hwi_controller *phwi_ctrlr,
2609                         unsigned int def_pdu_ring_sz)
2610 {
2611         unsigned int idx;
2612         int ret;
2613         struct be_queue_info *dataq, *cq;
2614         struct be_dma_mem *mem;
2615         struct be_mem_descriptor *mem_descr;
2616         void *dq_vaddress;
2617
2618         idx = 0;
2619         dataq = &phwi_context->be_def_dataq;
2620         cq = &phwi_context->be_cq[0];
2621         mem = &dataq->dma_mem;
2622         mem_descr = phba->init_mem;
2623         mem_descr += HWI_MEM_ASYNC_DATA_RING;
2624         dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2625         ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
2626                             sizeof(struct phys_addr),
2627                             sizeof(struct phys_addr), dq_vaddress);
2628         if (ret) {
2629                 shost_printk(KERN_ERR, phba->shost,
2630                              "be_fill_queue Failed for DEF PDU DATA\n");
2631                 return ret;
2632         }
2633         mem->dma = (unsigned long)mem_descr->mem_array[idx].
2634                                   bus_address.u.a64.address;
2635         ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
2636                                               def_pdu_ring_sz,
2637                                               phba->params.defpdu_data_sz);
2638         if (ret) {
2639                 shost_printk(KERN_ERR, phba->shost,
2640                              "be_cmd_create_default_pdu_queue Failed"
2641                              " for DEF PDU DATA\n");
2642                 return ret;
2643         }
2644         phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
2645         SE_DEBUG(DBG_LVL_8, "iscsi def data id is %d\n",
2646                  phwi_context->be_def_dataq.id);
2647         hwi_post_async_buffers(phba, 0);
2648         SE_DEBUG(DBG_LVL_8, "DEFAULT PDU DATA RING CREATED\n");
2649         return 0;
2650 }
2651
2652 static int
2653 beiscsi_post_pages(struct beiscsi_hba *phba)
2654 {
2655         struct be_mem_descriptor *mem_descr;
2656         struct mem_array *pm_arr;
2657         unsigned int page_offset, i;
2658         struct be_dma_mem sgl;
2659         int status;
2660
2661         mem_descr = phba->init_mem;
2662         mem_descr += HWI_MEM_SGE;
2663         pm_arr = mem_descr->mem_array;
2664
2665         page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
2666                         phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
2667         for (i = 0; i < mem_descr->num_elements; i++) {
2668                 hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
2669                 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
2670                                                 page_offset,
2671                                                 (pm_arr->size / PAGE_SIZE));
2672                 page_offset += pm_arr->size / PAGE_SIZE;
2673                 if (status != 0) {
2674                         shost_printk(KERN_ERR, phba->shost,
2675                                      "post sgl failed.\n");
2676                         return status;
2677                 }
2678                 pm_arr++;
2679         }
2680         SE_DEBUG(DBG_LVL_8, "POSTED PAGES\n");
2681         return 0;
2682 }
2683
2684 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
2685 {
2686         struct be_dma_mem *mem = &q->dma_mem;
2687         if (mem->va)
2688                 pci_free_consistent(phba->pcidev, mem->size,
2689                         mem->va, mem->dma);
2690 }
2691
2692 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
2693                 u16 len, u16 entry_size)
2694 {
2695         struct be_dma_mem *mem = &q->dma_mem;
2696
2697         memset(q, 0, sizeof(*q));
2698         q->len = len;
2699         q->entry_size = entry_size;
2700         mem->size = len * entry_size;
2701         mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
2702         if (!mem->va)
2703                 return -ENOMEM;
2704         memset(mem->va, 0, mem->size);
2705         return 0;
2706 }
2707
2708 static int
2709 beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
2710                          struct hwi_context_memory *phwi_context,
2711                          struct hwi_controller *phwi_ctrlr)
2712 {
2713         unsigned int wrb_mem_index, offset, size, num_wrb_rings;
2714         u64 pa_addr_lo;
2715         unsigned int idx, num, i;
2716         struct mem_array *pwrb_arr;
2717         void *wrb_vaddr;
2718         struct be_dma_mem sgl;
2719         struct be_mem_descriptor *mem_descr;
2720         int status;
2721
2722         idx = 0;
2723         mem_descr = phba->init_mem;
2724         mem_descr += HWI_MEM_WRB;
2725         pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
2726                            GFP_KERNEL);
2727         if (!pwrb_arr) {
2728                 shost_printk(KERN_ERR, phba->shost,
2729                              "Memory alloc failed in create wrb ring.\n");
2730                 return -ENOMEM;
2731         }
2732         wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2733         pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
2734         num_wrb_rings = mem_descr->mem_array[idx].size /
2735                 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
2736
2737         for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
2738                 if (num_wrb_rings) {
2739                         pwrb_arr[num].virtual_address = wrb_vaddr;
2740                         pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
2741                         pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2742                                             sizeof(struct iscsi_wrb);
2743                         wrb_vaddr += pwrb_arr[num].size;
2744                         pa_addr_lo += pwrb_arr[num].size;
2745                         num_wrb_rings--;
2746                 } else {
2747                         idx++;
2748                         wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2749                         pa_addr_lo = mem_descr->mem_array[idx].\
2750                                         bus_address.u.a64.address;
2751                         num_wrb_rings = mem_descr->mem_array[idx].size /
2752                                         (phba->params.wrbs_per_cxn *
2753                                         sizeof(struct iscsi_wrb));
2754                         pwrb_arr[num].virtual_address = wrb_vaddr;
2755                         pwrb_arr[num].bus_address.u.a64.address\
2756                                                 = pa_addr_lo;
2757                         pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2758                                                  sizeof(struct iscsi_wrb);
2759                         wrb_vaddr += pwrb_arr[num].size;
2760                         pa_addr_lo   += pwrb_arr[num].size;
2761                         num_wrb_rings--;
2762                 }
2763         }
2764         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2765                 wrb_mem_index = 0;
2766                 offset = 0;
2767                 size = 0;
2768
2769                 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
2770                 status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
2771                                             &phwi_context->be_wrbq[i]);
2772                 if (status != 0) {
2773                         shost_printk(KERN_ERR, phba->shost,
2774                                      "wrbq create failed.");
2775                         kfree(pwrb_arr);
2776                         return status;
2777                 }
2778                 phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
2779                                                                    id;
2780         }
2781         kfree(pwrb_arr);
2782         return 0;
2783 }
2784
2785 static void free_wrb_handles(struct beiscsi_hba *phba)
2786 {
2787         unsigned int index;
2788         struct hwi_controller *phwi_ctrlr;
2789         struct hwi_wrb_context *pwrb_context;
2790
2791         phwi_ctrlr = phba->phwi_ctrlr;
2792         for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2793                 pwrb_context = &phwi_ctrlr->wrb_context[index];
2794                 kfree(pwrb_context->pwrb_handle_base);
2795                 kfree(pwrb_context->pwrb_handle_basestd);
2796         }
2797 }
2798
2799 static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
2800 {
2801         struct be_queue_info *q;
2802         struct be_ctrl_info *ctrl = &phba->ctrl;
2803
2804         q = &phba->ctrl.mcc_obj.q;
2805         if (q->created)
2806                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
2807         be_queue_free(phba, q);
2808
2809         q = &phba->ctrl.mcc_obj.cq;
2810         if (q->created)
2811                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2812         be_queue_free(phba, q);
2813 }
2814
2815 static void hwi_cleanup(struct beiscsi_hba *phba)
2816 {
2817         struct be_queue_info *q;
2818         struct be_ctrl_info *ctrl = &phba->ctrl;
2819         struct hwi_controller *phwi_ctrlr;
2820         struct hwi_context_memory *phwi_context;
2821         int i, eq_num;
2822
2823         phwi_ctrlr = phba->phwi_ctrlr;
2824         phwi_context = phwi_ctrlr->phwi_ctxt;
2825         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2826                 q = &phwi_context->be_wrbq[i];
2827                 if (q->created)
2828                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
2829         }
2830         free_wrb_handles(phba);
2831
2832         q = &phwi_context->be_def_hdrq;
2833         if (q->created)
2834                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2835
2836         q = &phwi_context->be_def_dataq;
2837         if (q->created)
2838                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2839
2840         beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
2841
2842         for (i = 0; i < (phba->num_cpus); i++) {
2843                 q = &phwi_context->be_cq[i];
2844                 if (q->created)
2845                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2846         }
2847         if (phba->msix_enabled)
2848                 eq_num = 1;
2849         else
2850                 eq_num = 0;
2851         for (i = 0; i < (phba->num_cpus + eq_num); i++) {
2852                 q = &phwi_context->be_eq[i].q;
2853                 if (q->created)
2854                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
2855         }
2856         be_mcc_queues_destroy(phba);
2857 }
2858
2859 static int be_mcc_queues_create(struct beiscsi_hba *phba,
2860                                 struct hwi_context_memory *phwi_context)
2861 {
2862         struct be_queue_info *q, *cq;
2863         struct be_ctrl_info *ctrl = &phba->ctrl;
2864
2865         /* Alloc MCC compl queue */
2866         cq = &phba->ctrl.mcc_obj.cq;
2867         if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
2868                         sizeof(struct be_mcc_compl)))
2869                 goto err;
2870         /* Ask BE to create MCC compl queue; */
2871         if (phba->msix_enabled) {
2872                 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
2873                                          [phba->num_cpus].q, false, true, 0))
2874                 goto mcc_cq_free;
2875         } else {
2876                 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
2877                                           false, true, 0))
2878                 goto mcc_cq_free;
2879         }
2880
2881         /* Alloc MCC queue */
2882         q = &phba->ctrl.mcc_obj.q;
2883         if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2884                 goto mcc_cq_destroy;
2885
2886         /* Ask BE to create MCC queue */
2887         if (beiscsi_cmd_mccq_create(phba, q, cq))
2888                 goto mcc_q_free;
2889
2890         return 0;
2891
2892 mcc_q_free:
2893         be_queue_free(phba, q);
2894 mcc_cq_destroy:
2895         beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
2896 mcc_cq_free:
2897         be_queue_free(phba, cq);
2898 err:
2899         return -ENOMEM;
2900 }
2901
2902 static int find_num_cpus(void)
2903 {
2904         int  num_cpus = 0;
2905
2906         num_cpus = num_online_cpus();
2907         if (num_cpus >= MAX_CPUS)
2908                 num_cpus = MAX_CPUS - 1;
2909
2910         SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", num_cpus);
2911         return num_cpus;
2912 }
2913
2914 static int hwi_init_port(struct beiscsi_hba *phba)
2915 {
2916         struct hwi_controller *phwi_ctrlr;
2917         struct hwi_context_memory *phwi_context;
2918         unsigned int def_pdu_ring_sz;
2919         struct be_ctrl_info *ctrl = &phba->ctrl;
2920         int status;
2921
2922         def_pdu_ring_sz =
2923                 phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
2924         phwi_ctrlr = phba->phwi_ctrlr;
2925         phwi_context = phwi_ctrlr->phwi_ctxt;
2926         phwi_context->max_eqd = 0;
2927         phwi_context->min_eqd = 0;
2928         phwi_context->cur_eqd = 64;
2929         be_cmd_fw_initialize(&phba->ctrl);
2930
2931         status = beiscsi_create_eqs(phba, phwi_context);
2932         if (status != 0) {
2933                 shost_printk(KERN_ERR, phba->shost, "EQ not created\n");
2934                 goto error;
2935         }
2936
2937         status = be_mcc_queues_create(phba, phwi_context);
2938         if (status != 0)
2939                 goto error;
2940
2941         status = mgmt_check_supported_fw(ctrl, phba);
2942         if (status != 0) {
2943                 shost_printk(KERN_ERR, phba->shost,
2944                              "Unsupported fw version\n");
2945                 goto error;
2946         }
2947
2948         status = beiscsi_create_cqs(phba, phwi_context);
2949         if (status != 0) {
2950                 shost_printk(KERN_ERR, phba->shost, "CQ not created\n");
2951                 goto error;
2952         }
2953
2954         status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
2955                                         def_pdu_ring_sz);
2956         if (status != 0) {
2957                 shost_printk(KERN_ERR, phba->shost,
2958                              "Default Header not created\n");
2959                 goto error;
2960         }
2961
2962         status = beiscsi_create_def_data(phba, phwi_context,
2963                                          phwi_ctrlr, def_pdu_ring_sz);
2964         if (status != 0) {
2965                 shost_printk(KERN_ERR, phba->shost,
2966                              "Default Data not created\n");
2967                 goto error;
2968         }
2969
2970         status = beiscsi_post_pages(phba);
2971         if (status != 0) {
2972                 shost_printk(KERN_ERR, phba->shost, "Post SGL Pages Failed\n");
2973                 goto error;
2974         }
2975
2976         status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
2977         if (status != 0) {
2978                 shost_printk(KERN_ERR, phba->shost,
2979                              "WRB Rings not created\n");
2980                 goto error;
2981         }
2982
2983         SE_DEBUG(DBG_LVL_8, "hwi_init_port success\n");
2984         return 0;
2985
2986 error:
2987         shost_printk(KERN_ERR, phba->shost, "hwi_init_port failed");
2988         hwi_cleanup(phba);
2989         return -ENOMEM;
2990 }
2991
2992 static int hwi_init_controller(struct beiscsi_hba *phba)
2993 {
2994         struct hwi_controller *phwi_ctrlr;
2995
2996         phwi_ctrlr = phba->phwi_ctrlr;
2997         if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
2998                 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
2999                     init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
3000                 SE_DEBUG(DBG_LVL_8, " phwi_ctrlr->phwi_ctxt=%p\n",
3001                          phwi_ctrlr->phwi_ctxt);
3002         } else {
3003                 shost_printk(KERN_ERR, phba->shost,
3004                              "HWI_MEM_ADDN_CONTEXT is more than one element."
3005                              "Failing to load\n");
3006                 return -ENOMEM;
3007         }
3008
3009         iscsi_init_global_templates(phba);
3010         beiscsi_init_wrb_handle(phba);
3011         hwi_init_async_pdu_ctx(phba);
3012         if (hwi_init_port(phba) != 0) {
3013                 shost_printk(KERN_ERR, phba->shost,
3014                              "hwi_init_controller failed\n");
3015                 return -ENOMEM;
3016         }
3017         return 0;
3018 }
3019
3020 static void beiscsi_free_mem(struct beiscsi_hba *phba)
3021 {
3022         struct be_mem_descriptor *mem_descr;
3023         int i, j;
3024
3025         mem_descr = phba->init_mem;
3026         i = 0;
3027         j = 0;
3028         for (i = 0; i < SE_MEM_MAX; i++) {
3029                 for (j = mem_descr->num_elements; j > 0; j--) {
3030                         pci_free_consistent(phba->pcidev,
3031                           mem_descr->mem_array[j - 1].size,
3032                           mem_descr->mem_array[j - 1].virtual_address,
3033                           (unsigned long)mem_descr->mem_array[j - 1].
3034                           bus_address.u.a64.address);
3035                 }
3036                 kfree(mem_descr->mem_array);
3037                 mem_descr++;
3038         }
3039         kfree(phba->init_mem);
3040         kfree(phba->phwi_ctrlr);
3041 }
3042
3043 static int beiscsi_init_controller(struct beiscsi_hba *phba)
3044 {
3045         int ret = -ENOMEM;
3046
3047         ret = beiscsi_get_memory(phba);
3048         if (ret < 0) {
3049                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe -"
3050                              "Failed in beiscsi_alloc_memory\n");
3051                 return ret;
3052         }
3053
3054         ret = hwi_init_controller(phba);
3055         if (ret)
3056                 goto free_init;
3057         SE_DEBUG(DBG_LVL_8, "Return success from beiscsi_init_controller");
3058         return 0;
3059
3060 free_init:
3061         beiscsi_free_mem(phba);
3062         return -ENOMEM;
3063 }
3064
3065 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
3066 {
3067         struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
3068         struct sgl_handle *psgl_handle;
3069         struct iscsi_sge *pfrag;
3070         unsigned int arr_index, i, idx;
3071
3072         phba->io_sgl_hndl_avbl = 0;
3073         phba->eh_sgl_hndl_avbl = 0;
3074
3075         mem_descr_sglh = phba->init_mem;
3076         mem_descr_sglh += HWI_MEM_SGLH;
3077         if (1 == mem_descr_sglh->num_elements) {
3078                 phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3079                                                  phba->params.ios_per_ctrl,
3080                                                  GFP_KERNEL);
3081                 if (!phba->io_sgl_hndl_base) {
3082                         shost_printk(KERN_ERR, phba->shost,
3083                                      "Mem Alloc Failed. Failing to load\n");
3084                         return -ENOMEM;
3085                 }
3086                 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3087                                                  (phba->params.icds_per_ctrl -
3088                                                  phba->params.ios_per_ctrl),
3089                                                  GFP_KERNEL);
3090                 if (!phba->eh_sgl_hndl_base) {
3091                         kfree(phba->io_sgl_hndl_base);
3092                         shost_printk(KERN_ERR, phba->shost,
3093                                      "Mem Alloc Failed. Failing to load\n");
3094                         return -ENOMEM;
3095                 }
3096         } else {
3097                 shost_printk(KERN_ERR, phba->shost,
3098                              "HWI_MEM_SGLH is more than one element."
3099                              "Failing to load\n");
3100                 return -ENOMEM;
3101         }
3102
3103         arr_index = 0;
3104         idx = 0;
3105         while (idx < mem_descr_sglh->num_elements) {
3106                 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
3107
3108                 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
3109                       sizeof(struct sgl_handle)); i++) {
3110                         if (arr_index < phba->params.ios_per_ctrl) {
3111                                 phba->io_sgl_hndl_base[arr_index] = psgl_handle;
3112                                 phba->io_sgl_hndl_avbl++;
3113                                 arr_index++;
3114                         } else {
3115                                 phba->eh_sgl_hndl_base[arr_index -
3116                                         phba->params.ios_per_ctrl] =
3117                                                                 psgl_handle;
3118                                 arr_index++;
3119                                 phba->eh_sgl_hndl_avbl++;
3120                         }
3121                         psgl_handle++;
3122                 }
3123                 idx++;
3124         }
3125         SE_DEBUG(DBG_LVL_8,
3126                  "phba->io_sgl_hndl_avbl=%d"
3127                  "phba->eh_sgl_hndl_avbl=%d\n",
3128                  phba->io_sgl_hndl_avbl,
3129                  phba->eh_sgl_hndl_avbl);
3130         mem_descr_sg = phba->init_mem;
3131         mem_descr_sg += HWI_MEM_SGE;
3132         SE_DEBUG(DBG_LVL_8, "\n mem_descr_sg->num_elements=%d\n",
3133                  mem_descr_sg->num_elements);
3134         arr_index = 0;
3135         idx = 0;
3136         while (idx < mem_descr_sg->num_elements) {
3137                 pfrag = mem_descr_sg->mem_array[idx].virtual_address;
3138
3139                 for (i = 0;
3140                      i < (mem_descr_sg->mem_array[idx].size) /
3141                      (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
3142                      i++) {
3143                         if (arr_index < phba->params.ios_per_ctrl)
3144                                 psgl_handle = phba->io_sgl_hndl_base[arr_index];
3145                         else
3146                                 psgl_handle = phba->eh_sgl_hndl_base[arr_index -
3147                                                 phba->params.ios_per_ctrl];
3148                         psgl_handle->pfrag = pfrag;
3149                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
3150                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
3151                         pfrag += phba->params.num_sge_per_io;
3152                         psgl_handle->sgl_index =
3153                                 phba->fw_config.iscsi_icd_start + arr_index++;
3154                 }
3155                 idx++;
3156         }
3157         phba->io_sgl_free_index = 0;
3158         phba->io_sgl_alloc_index = 0;
3159         phba->eh_sgl_free_index = 0;
3160         phba->eh_sgl_alloc_index = 0;
3161         return 0;
3162 }
3163
3164 static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3165 {
3166         int i, new_cid;
3167
3168         phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
3169                                   GFP_KERNEL);
3170         if (!phba->cid_array) {
3171                 shost_printk(KERN_ERR, phba->shost,
3172                              "Failed to allocate memory in "
3173                              "hba_setup_cid_tbls\n");
3174                 return -ENOMEM;
3175         }
3176         phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
3177                                  phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
3178         if (!phba->ep_array) {
3179                 shost_printk(KERN_ERR, phba->shost,
3180                              "Failed to allocate memory in "
3181                              "hba_setup_cid_tbls\n");
3182                 kfree(phba->cid_array);
3183                 return -ENOMEM;
3184         }
3185         new_cid = phba->fw_config.iscsi_cid_start;
3186         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3187                 phba->cid_array[i] = new_cid;
3188                 new_cid += 2;
3189         }
3190         phba->avlbl_cids = phba->params.cxns_per_ctrl;
3191         return 0;
3192 }
3193
3194 static void hwi_enable_intr(struct beiscsi_hba *phba)
3195 {
3196         struct be_ctrl_info *ctrl = &phba->ctrl;
3197         struct hwi_controller *phwi_ctrlr;
3198         struct hwi_context_memory *phwi_context;
3199         struct be_queue_info *eq;
3200         u8 __iomem *addr;
3201         u32 reg, i;
3202         u32 enabled;
3203
3204         phwi_ctrlr = phba->phwi_ctrlr;
3205         phwi_context = phwi_ctrlr->phwi_ctxt;
3206
3207         addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
3208                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
3209         reg = ioread32(addr);
3210         SE_DEBUG(DBG_LVL_8, "reg =x%08x\n", reg);
3211
3212         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3213         if (!enabled) {
3214                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3215                 SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p\n", reg, addr);
3216                 iowrite32(reg, addr);
3217                 if (!phba->msix_enabled) {
3218                         eq = &phwi_context->be_eq[0].q;
3219                         SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id);
3220                         hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3221                 } else {
3222                         for (i = 0; i <= phba->num_cpus; i++) {
3223                                 eq = &phwi_context->be_eq[i].q;
3224                                 SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id);
3225                                 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3226                         }
3227                 }
3228         }
3229 }
3230
3231 static void hwi_disable_intr(struct beiscsi_hba *phba)
3232 {
3233         struct be_ctrl_info *ctrl = &phba->ctrl;
3234
3235         u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
3236         u32 reg = ioread32(addr);
3237
3238         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3239         if (enabled) {
3240                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3241                 iowrite32(reg, addr);
3242         } else
3243                 shost_printk(KERN_WARNING, phba->shost,
3244                              "In hwi_disable_intr, Already Disabled\n");
3245 }
3246
3247 static int beiscsi_init_port(struct beiscsi_hba *phba)
3248 {
3249         int ret;
3250
3251         ret = beiscsi_init_controller(phba);
3252         if (ret < 0) {
3253                 shost_printk(KERN_ERR, phba->shost,
3254                              "beiscsi_dev_probe - Failed in"
3255                              "beiscsi_init_controller\n");
3256                 return ret;
3257         }
3258         ret = beiscsi_init_sgl_handle(phba);
3259         if (ret < 0) {
3260                 shost_printk(KERN_ERR, phba->shost,
3261                              "beiscsi_dev_probe - Failed in"
3262                              "beiscsi_init_sgl_handle\n");
3263                 goto do_cleanup_ctrlr;
3264         }
3265
3266         if (hba_setup_cid_tbls(phba)) {
3267                 shost_printk(KERN_ERR, phba->shost,
3268                              "Failed in hba_setup_cid_tbls\n");
3269                 kfree(phba->io_sgl_hndl_base);
3270                 kfree(phba->eh_sgl_hndl_base);
3271                 goto do_cleanup_ctrlr;
3272         }
3273
3274         return ret;
3275
3276 do_cleanup_ctrlr:
3277         hwi_cleanup(phba);
3278         return ret;
3279 }
3280
3281 static void hwi_purge_eq(struct beiscsi_hba *phba)
3282 {
3283         struct hwi_controller *phwi_ctrlr;
3284         struct hwi_context_memory *phwi_context;
3285         struct be_queue_info *eq;
3286         struct be_eq_entry *eqe = NULL;
3287         int i, eq_msix;
3288         unsigned int num_processed;
3289
3290         phwi_ctrlr = phba->phwi_ctrlr;
3291         phwi_context = phwi_ctrlr->phwi_ctxt;
3292         if (phba->msix_enabled)
3293                 eq_msix = 1;
3294         else
3295                 eq_msix = 0;
3296
3297         for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
3298                 eq = &phwi_context->be_eq[i].q;
3299                 eqe = queue_tail_node(eq);
3300                 num_processed = 0;
3301                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3302                                         & EQE_VALID_MASK) {
3303                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3304                         queue_tail_inc(eq);
3305                         eqe = queue_tail_node(eq);
3306                         num_processed++;
3307                 }
3308
3309                 if (num_processed)
3310                         hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1);
3311         }
3312 }
3313
3314 static void beiscsi_clean_port(struct beiscsi_hba *phba)
3315 {
3316         int mgmt_status;
3317
3318         mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
3319         if (mgmt_status)
3320                 shost_printk(KERN_WARNING, phba->shost,
3321                              "mgmt_epfw_cleanup FAILED\n");
3322
3323         hwi_purge_eq(phba);
3324         hwi_cleanup(phba);
3325         kfree(phba->io_sgl_hndl_base);
3326         kfree(phba->eh_sgl_hndl_base);
3327         kfree(phba->cid_array);
3328         kfree(phba->ep_array);
3329 }
3330
3331 void
3332 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
3333                            struct beiscsi_offload_params *params)
3334 {
3335         struct wrb_handle *pwrb_handle;
3336         struct iscsi_target_context_update_wrb *pwrb = NULL;
3337         struct be_mem_descriptor *mem_descr;
3338         struct beiscsi_hba *phba = beiscsi_conn->phba;
3339         u32 doorbell = 0;
3340
3341         /*
3342          * We can always use 0 here because it is reserved by libiscsi for
3343          * login/startup related tasks.
3344          */
3345         pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
3346                                        phba->fw_config.iscsi_cid_start));
3347         pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
3348         memset(pwrb, 0, sizeof(*pwrb));
3349         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3350                       max_burst_length, pwrb, params->dw[offsetof
3351                       (struct amap_beiscsi_offload_params,
3352                       max_burst_length) / 32]);
3353         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3354                       max_send_data_segment_length, pwrb,
3355                       params->dw[offsetof(struct amap_beiscsi_offload_params,
3356                       max_send_data_segment_length) / 32]);
3357         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3358                       first_burst_length,
3359                       pwrb,
3360                       params->dw[offsetof(struct amap_beiscsi_offload_params,
3361                       first_burst_length) / 32]);
3362
3363         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
3364                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3365                       erl) / 32] & OFFLD_PARAMS_ERL));
3366         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
3367                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3368                       dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
3369         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
3370                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3371                       hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
3372         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
3373                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3374                       ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
3375         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
3376                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3377                        imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
3378         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
3379                       pwrb,
3380                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3381                       exp_statsn) / 32] + 1));
3382         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
3383                       0x7);
3384         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
3385                       pwrb, pwrb_handle->wrb_index);
3386         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
3387                       pwrb, pwrb_handle->nxt_wrb_index);
3388         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3389                         session_state, pwrb, 0);
3390         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
3391                       pwrb, 1);
3392         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
3393                       pwrb, 0);
3394         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
3395                       0);
3396
3397         mem_descr = phba->init_mem;
3398         mem_descr += ISCSI_MEM_GLOBAL_HEADER;
3399
3400         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3401                         pad_buffer_addr_hi, pwrb,
3402                       mem_descr->mem_array[0].bus_address.u.a32.address_hi);
3403         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3404                         pad_buffer_addr_lo, pwrb,
3405                       mem_descr->mem_array[0].bus_address.u.a32.address_lo);
3406
3407         be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
3408
3409         doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3410         doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
3411                              << DB_DEF_PDU_WRB_INDEX_SHIFT;
3412         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3413
3414         iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3415 }
3416
3417 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
3418                               int *index, int *age)
3419 {
3420         *index = (int)itt;
3421         if (age)
3422                 *age = conn->session->age;
3423 }
3424
3425 /**
3426  * beiscsi_alloc_pdu - allocates pdu and related resources
3427  * @task: libiscsi task
3428  * @opcode: opcode of pdu for task
3429  *
3430  * This is called with the session lock held. It will allocate
3431  * the wrb and sgl if needed for the command. And it will prep
3432  * the pdu's itt. beiscsi_parse_pdu will later translate
3433  * the pdu itt to the libiscsi task itt.
3434  */
3435 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3436 {
3437         struct beiscsi_io_task *io_task = task->dd_data;
3438         struct iscsi_conn *conn = task->conn;
3439         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3440         struct beiscsi_hba *phba = beiscsi_conn->phba;
3441         struct hwi_wrb_context *pwrb_context;
3442         struct hwi_controller *phwi_ctrlr;
3443         itt_t itt;
3444         struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3445         dma_addr_t paddr;
3446
3447         io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
3448                                           GFP_KERNEL, &paddr);
3449         if (!io_task->cmd_bhs)
3450                 return -ENOMEM;
3451         io_task->bhs_pa.u.a64.address = paddr;
3452         io_task->libiscsi_itt = (itt_t)task->itt;
3453         io_task->pwrb_handle = alloc_wrb_handle(phba,
3454                                                 beiscsi_conn->beiscsi_conn_cid -
3455                                                 phba->fw_config.iscsi_cid_start
3456                                                 );
3457         io_task->conn = beiscsi_conn;
3458
3459         task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
3460         task->hdr_max = sizeof(struct be_cmd_bhs);
3461
3462         if (task->sc) {
3463                 spin_lock(&phba->io_sgl_lock);
3464                 io_task->psgl_handle = alloc_io_sgl_handle(phba);
3465                 spin_unlock(&phba->io_sgl_lock);
3466                 if (!io_task->psgl_handle)
3467                         goto free_hndls;
3468         } else {
3469                 io_task->scsi_cmnd = NULL;
3470                 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
3471                         if (!beiscsi_conn->login_in_progress) {
3472                                 spin_lock(&phba->mgmt_sgl_lock);
3473                                 io_task->psgl_handle = (struct sgl_handle *)
3474                                                 alloc_mgmt_sgl_handle(phba);
3475                                 spin_unlock(&phba->mgmt_sgl_lock);
3476                                 if (!io_task->psgl_handle)
3477                                         goto free_hndls;
3478
3479                                 beiscsi_conn->login_in_progress = 1;
3480                                 beiscsi_conn->plogin_sgl_handle =
3481                                                         io_task->psgl_handle;
3482                         } else {
3483                                 io_task->psgl_handle =
3484                                                 beiscsi_conn->plogin_sgl_handle;
3485                         }
3486                 } else {
3487                         spin_lock(&phba->mgmt_sgl_lock);
3488                         io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
3489                         spin_unlock(&phba->mgmt_sgl_lock);
3490                         if (!io_task->psgl_handle)
3491                                 goto free_hndls;
3492                 }
3493         }
3494         itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
3495                                  wrb_index << 16) | (unsigned int)
3496                                 (io_task->psgl_handle->sgl_index));
3497         io_task->pwrb_handle->pio_handle = task;
3498
3499         io_task->cmd_bhs->iscsi_hdr.itt = itt;
3500         return 0;
3501
3502 free_hndls:
3503         phwi_ctrlr = phba->phwi_ctrlr;
3504         pwrb_context = &phwi_ctrlr->wrb_context[
3505                         beiscsi_conn->beiscsi_conn_cid -
3506                         phba->fw_config.iscsi_cid_start];
3507         free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3508         io_task->pwrb_handle = NULL;
3509         pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3510                       io_task->bhs_pa.u.a64.address);
3511         SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed\n");
3512         return -ENOMEM;
3513 }
3514
3515 static void beiscsi_cleanup_task(struct iscsi_task *task)
3516 {
3517         struct beiscsi_io_task *io_task = task->dd_data;
3518         struct iscsi_conn *conn = task->conn;
3519         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3520         struct beiscsi_hba *phba = beiscsi_conn->phba;
3521         struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3522         struct hwi_wrb_context *pwrb_context;
3523         struct hwi_controller *phwi_ctrlr;
3524
3525         phwi_ctrlr = phba->phwi_ctrlr;
3526         pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
3527                         - phba->fw_config.iscsi_cid_start];
3528         if (io_task->pwrb_handle) {
3529                 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3530                 io_task->pwrb_handle = NULL;
3531         }
3532
3533         if (io_task->cmd_bhs) {
3534                 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3535                               io_task->bhs_pa.u.a64.address);
3536         }
3537
3538         if (task->sc) {
3539                 if (io_task->psgl_handle) {
3540                         spin_lock(&phba->io_sgl_lock);
3541                         free_io_sgl_handle(phba, io_task->psgl_handle);
3542                         spin_unlock(&phba->io_sgl_lock);
3543                         io_task->psgl_handle = NULL;
3544                 }
3545         } else {
3546                 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN)
3547                         return;
3548                 if (io_task->psgl_handle) {
3549                         spin_lock(&phba->mgmt_sgl_lock);
3550                         free_mgmt_sgl_handle(phba, io_task->psgl_handle);
3551                         spin_unlock(&phba->mgmt_sgl_lock);
3552                         io_task->psgl_handle = NULL;
3553                 }
3554         }
3555 }
3556
3557 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3558                           unsigned int num_sg, unsigned int xferlen,
3559                           unsigned int writedir)
3560 {
3561
3562         struct beiscsi_io_task *io_task = task->dd_data;
3563         struct iscsi_conn *conn = task->conn;
3564         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3565         struct beiscsi_hba *phba = beiscsi_conn->phba;
3566         struct iscsi_wrb *pwrb = NULL;
3567         unsigned int doorbell = 0;
3568
3569         pwrb = io_task->pwrb_handle->pwrb;
3570         io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
3571         io_task->bhs_len = sizeof(struct be_cmd_bhs);
3572
3573         if (writedir) {
3574                 memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48);
3575                 AMAP_SET_BITS(struct amap_pdu_data_out, itt,
3576                               &io_task->cmd_bhs->iscsi_data_pdu,
3577                               (unsigned int)io_task->cmd_bhs->iscsi_hdr.itt);
3578                 AMAP_SET_BITS(struct amap_pdu_data_out, opcode,
3579                               &io_task->cmd_bhs->iscsi_data_pdu,
3580                               ISCSI_OPCODE_SCSI_DATA_OUT);
3581                 AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
3582                               &io_task->cmd_bhs->iscsi_data_pdu, 1);
3583                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3584                               INI_WR_CMD);
3585                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3586         } else {
3587                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3588                               INI_RD_CMD);
3589                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
3590         }
3591         memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
3592                dw[offsetof(struct amap_pdu_data_out, lun) / 32],
3593                io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
3594
3595         AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
3596                       cpu_to_be16((unsigned short)io_task->cmd_bhs->iscsi_hdr.
3597                                   lun[0]));
3598         AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
3599         AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3600                       io_task->pwrb_handle->wrb_index);
3601         AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3602                       be32_to_cpu(task->cmdsn));
3603         AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3604                       io_task->psgl_handle->sgl_index);
3605
3606         hwi_write_sgl(pwrb, sg, num_sg, io_task);
3607
3608         AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3609                       io_task->pwrb_handle->nxt_wrb_index);
3610         be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3611
3612         doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3613         doorbell |= (io_task->pwrb_handle->wrb_index &
3614                      DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3615         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3616
3617         iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3618         return 0;
3619 }
3620
3621 static int beiscsi_mtask(struct iscsi_task *task)
3622 {
3623         struct beiscsi_io_task *io_task = task->dd_data;
3624         struct iscsi_conn *conn = task->conn;
3625         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3626         struct beiscsi_hba *phba = beiscsi_conn->phba;
3627         struct iscsi_wrb *pwrb = NULL;
3628         unsigned int doorbell = 0;
3629         unsigned int cid;
3630
3631         cid = beiscsi_conn->beiscsi_conn_cid;
3632         pwrb = io_task->pwrb_handle->pwrb;
3633         memset(pwrb, 0, sizeof(*pwrb));
3634         AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3635                       be32_to_cpu(task->cmdsn));
3636         AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3637                       io_task->pwrb_handle->wrb_index);
3638         AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3639                       io_task->psgl_handle->sgl_index);
3640
3641         switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
3642         case ISCSI_OP_LOGIN:
3643                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3644                               TGT_DM_CMD);
3645                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3646                 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
3647                 hwi_write_buffer(pwrb, task);
3648                 break;
3649         case ISCSI_OP_NOOP_OUT:
3650                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3651                               INI_RD_CMD);
3652                 if (task->hdr->ttt == ISCSI_RESERVED_TAG)
3653                         AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3654                 else
3655                         AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1);
3656                 hwi_write_buffer(pwrb, task);
3657                 break;
3658         case ISCSI_OP_TEXT:
3659                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3660                               TGT_DM_CMD);
3661                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3662                 hwi_write_buffer(pwrb, task);
3663                 break;
3664         case ISCSI_OP_SCSI_TMFUNC:
3665                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3666                               INI_TMF_CMD);
3667                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3668                 hwi_write_buffer(pwrb, task);
3669                 break;
3670         case ISCSI_OP_LOGOUT:
3671                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3672                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3673                               HWH_TYPE_LOGOUT);
3674                 hwi_write_buffer(pwrb, task);
3675                 break;
3676
3677         default:
3678                 SE_DEBUG(DBG_LVL_1, "opcode =%d Not supported\n",
3679                          task->hdr->opcode & ISCSI_OPCODE_MASK);
3680                 return -EINVAL;
3681         }
3682
3683         AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
3684                       task->data_count);
3685         AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3686                       io_task->pwrb_handle->nxt_wrb_index);
3687         be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3688
3689         doorbell |= cid & DB_WRB_POST_CID_MASK;
3690         doorbell |= (io_task->pwrb_handle->wrb_index &
3691                      DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3692         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3693         iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3694         return 0;
3695 }
3696
3697 static int beiscsi_task_xmit(struct iscsi_task *task)
3698 {
3699         struct beiscsi_io_task *io_task = task->dd_data;
3700         struct scsi_cmnd *sc = task->sc;
3701         struct scatterlist *sg;
3702         int num_sg;
3703         unsigned int  writedir = 0, xferlen = 0;
3704
3705         if (!sc)
3706                 return beiscsi_mtask(task);
3707
3708         io_task->scsi_cmnd = sc;
3709         num_sg = scsi_dma_map(sc);
3710         if (num_sg < 0) {
3711                 SE_DEBUG(DBG_LVL_1, " scsi_dma_map Failed\n")
3712                 return num_sg;
3713         }
3714         SE_DEBUG(DBG_LVL_4, "xferlen=0x%08x scmd=%p num_sg=%d sernum=%lu\n",
3715                   (scsi_bufflen(sc)), sc, num_sg, sc->serial_number);
3716         xferlen = scsi_bufflen(sc);
3717         sg = scsi_sglist(sc);
3718         if (sc->sc_data_direction == DMA_TO_DEVICE) {
3719                 writedir = 1;
3720                 SE_DEBUG(DBG_LVL_4, "task->imm_count=0x%08x\n",
3721                          task->imm_count);
3722         } else
3723                 writedir = 0;
3724         return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
3725 }
3726
3727 static void beiscsi_remove(struct pci_dev *pcidev)
3728 {
3729         struct beiscsi_hba *phba = NULL;
3730         struct hwi_controller *phwi_ctrlr;
3731         struct hwi_context_memory *phwi_context;
3732         struct be_eq_obj *pbe_eq;
3733         unsigned int i, msix_vec;
3734
3735         phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
3736         if (!phba) {
3737                 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
3738                 return;
3739         }
3740
3741         phwi_ctrlr = phba->phwi_ctrlr;
3742         phwi_context = phwi_ctrlr->phwi_ctxt;
3743         hwi_disable_intr(phba);
3744         if (phba->msix_enabled) {
3745                 for (i = 0; i <= phba->num_cpus; i++) {
3746                         msix_vec = phba->msix_entries[i].vector;
3747                         free_irq(msix_vec, &phwi_context->be_eq[i]);
3748                 }
3749         } else
3750                 if (phba->pcidev->irq)
3751                         free_irq(phba->pcidev->irq, phba);
3752         pci_disable_msix(phba->pcidev);
3753         destroy_workqueue(phba->wq);
3754         if (blk_iopoll_enabled)
3755                 for (i = 0; i < phba->num_cpus; i++) {
3756                         pbe_eq = &phwi_context->be_eq[i];
3757                         blk_iopoll_disable(&pbe_eq->iopoll);
3758                 }
3759
3760         beiscsi_clean_port(phba);
3761         beiscsi_free_mem(phba);
3762         beiscsi_unmap_pci_function(phba);
3763         pci_free_consistent(phba->pcidev,
3764                             phba->ctrl.mbox_mem_alloced.size,
3765                             phba->ctrl.mbox_mem_alloced.va,
3766                             phba->ctrl.mbox_mem_alloced.dma);
3767         iscsi_host_remove(phba->shost);
3768         pci_dev_put(phba->pcidev);
3769         iscsi_host_free(phba->shost);
3770 }
3771
3772 static void beiscsi_msix_enable(struct beiscsi_hba *phba)
3773 {
3774         int i, status;
3775
3776         for (i = 0; i <= phba->num_cpus; i++)
3777                 phba->msix_entries[i].entry = i;
3778
3779         status = pci_enable_msix(phba->pcidev, phba->msix_entries,
3780                                  (phba->num_cpus + 1));
3781         if (!status)
3782                 phba->msix_enabled = true;
3783
3784         return;
3785 }
3786
3787 static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3788                                 const struct pci_device_id *id)
3789 {
3790         struct beiscsi_hba *phba = NULL;
3791         struct hwi_controller *phwi_ctrlr;
3792         struct hwi_context_memory *phwi_context;
3793         struct be_eq_obj *pbe_eq;
3794         int ret, num_cpus, i;
3795
3796         ret = beiscsi_enable_pci(pcidev);
3797         if (ret < 0) {
3798                 dev_err(&pcidev->dev, "beiscsi_dev_probe-"
3799                         " Failed to enable pci device\n");
3800                 return ret;
3801         }
3802
3803         phba = beiscsi_hba_alloc(pcidev);
3804         if (!phba) {
3805                 dev_err(&pcidev->dev, "beiscsi_dev_probe-"
3806                         " Failed in beiscsi_hba_alloc\n");
3807                 goto disable_pci;
3808         }
3809
3810         switch (pcidev->device) {
3811         case BE_DEVICE_ID1:
3812         case OC_DEVICE_ID1:
3813         case OC_DEVICE_ID2:
3814                 phba->generation = BE_GEN2;
3815                 break;
3816         case BE_DEVICE_ID2:
3817         case OC_DEVICE_ID3:
3818                 phba->generation = BE_GEN3;
3819                 break;
3820         default:
3821                 phba->generation = 0;
3822         }
3823
3824         if (enable_msix)
3825                 num_cpus = find_num_cpus();
3826         else
3827                 num_cpus = 1;
3828         phba->num_cpus = num_cpus;
3829         SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", phba->num_cpus);
3830
3831         if (enable_msix)
3832                 beiscsi_msix_enable(phba);
3833         ret = be_ctrl_init(phba, pcidev);
3834         if (ret) {
3835                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3836                                 "Failed in be_ctrl_init\n");
3837                 goto hba_free;
3838         }
3839
3840         spin_lock_init(&phba->io_sgl_lock);
3841         spin_lock_init(&phba->mgmt_sgl_lock);
3842         spin_lock_init(&phba->isr_lock);
3843         ret = mgmt_get_fw_config(&phba->ctrl, phba);
3844         if (ret != 0) {
3845                 shost_printk(KERN_ERR, phba->shost,
3846                              "Error getting fw config\n");
3847                 goto free_port;
3848         }
3849         phba->shost->max_id = phba->fw_config.iscsi_cid_count;
3850         beiscsi_get_params(phba);
3851         phba->shost->can_queue = phba->params.ios_per_ctrl;
3852         ret = beiscsi_init_port(phba);
3853         if (ret < 0) {
3854                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3855                              "Failed in beiscsi_init_port\n");
3856                 goto free_port;
3857         }
3858
3859         for (i = 0; i < MAX_MCC_CMD ; i++) {
3860                 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
3861                 phba->ctrl.mcc_tag[i] = i + 1;
3862                 phba->ctrl.mcc_numtag[i + 1] = 0;
3863                 phba->ctrl.mcc_tag_available++;
3864         }
3865
3866         phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
3867
3868         snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
3869                  phba->shost->host_no);
3870         phba->wq = create_workqueue(phba->wq_name);
3871         if (!phba->wq) {
3872                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3873                                 "Failed to allocate work queue\n");
3874                 goto free_twq;
3875         }
3876
3877         INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs);
3878
3879         phwi_ctrlr = phba->phwi_ctrlr;
3880         phwi_context = phwi_ctrlr->phwi_ctxt;
3881         if (blk_iopoll_enabled) {
3882                 for (i = 0; i < phba->num_cpus; i++) {
3883                         pbe_eq = &phwi_context->be_eq[i];
3884                         blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
3885                                         be_iopoll);
3886                         blk_iopoll_enable(&pbe_eq->iopoll);
3887                 }
3888         }
3889         ret = beiscsi_init_irqs(phba);
3890         if (ret < 0) {
3891                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3892                              "Failed to beiscsi_init_irqs\n");
3893                 goto free_blkenbld;
3894         }
3895         hwi_enable_intr(phba);
3896         SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED\n\n\n");
3897         return 0;
3898
3899 free_blkenbld:
3900         destroy_workqueue(phba->wq);
3901         if (blk_iopoll_enabled)
3902                 for (i = 0; i < phba->num_cpus; i++) {
3903                         pbe_eq = &phwi_context->be_eq[i];
3904                         blk_iopoll_disable(&pbe_eq->iopoll);
3905                 }
3906 free_twq:
3907         beiscsi_clean_port(phba);
3908         beiscsi_free_mem(phba);
3909 free_port:
3910         pci_free_consistent(phba->pcidev,
3911                             phba->ctrl.mbox_mem_alloced.size,
3912                             phba->ctrl.mbox_mem_alloced.va,
3913                            phba->ctrl.mbox_mem_alloced.dma);
3914         beiscsi_unmap_pci_function(phba);
3915 hba_free:
3916         if (phba->msix_enabled)
3917                 pci_disable_msix(phba->pcidev);
3918         iscsi_host_remove(phba->shost);
3919         pci_dev_put(phba->pcidev);
3920         iscsi_host_free(phba->shost);
3921 disable_pci:
3922         pci_disable_device(pcidev);
3923         return ret;
3924 }
3925
3926 struct iscsi_transport beiscsi_iscsi_transport = {
3927         .owner = THIS_MODULE,
3928         .name = DRV_NAME,
3929         .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO |
3930                 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
3931         .param_mask = ISCSI_MAX_RECV_DLENGTH |
3932                 ISCSI_MAX_XMIT_DLENGTH |
3933                 ISCSI_HDRDGST_EN |
3934                 ISCSI_DATADGST_EN |
3935                 ISCSI_INITIAL_R2T_EN |
3936                 ISCSI_MAX_R2T |
3937                 ISCSI_IMM_DATA_EN |
3938                 ISCSI_FIRST_BURST |
3939                 ISCSI_MAX_BURST |
3940                 ISCSI_PDU_INORDER_EN |
3941                 ISCSI_DATASEQ_INORDER_EN |
3942                 ISCSI_ERL |
3943                 ISCSI_CONN_PORT |
3944                 ISCSI_CONN_ADDRESS |
3945                 ISCSI_EXP_STATSN |
3946                 ISCSI_PERSISTENT_PORT |
3947                 ISCSI_PERSISTENT_ADDRESS |
3948                 ISCSI_TARGET_NAME | ISCSI_TPGT |
3949                 ISCSI_USERNAME | ISCSI_PASSWORD |
3950                 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
3951                 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
3952                 ISCSI_LU_RESET_TMO |
3953                 ISCSI_PING_TMO | ISCSI_RECV_TMO |
3954                 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
3955         .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
3956                                 ISCSI_HOST_INITIATOR_NAME,
3957         .create_session = beiscsi_session_create,
3958         .destroy_session = beiscsi_session_destroy,
3959         .create_conn = beiscsi_conn_create,
3960         .bind_conn = beiscsi_conn_bind,
3961         .destroy_conn = iscsi_conn_teardown,
3962         .set_param = beiscsi_set_param,
3963         .get_conn_param = beiscsi_conn_get_param,
3964         .get_session_param = iscsi_session_get_param,
3965         .get_host_param = beiscsi_get_host_param,
3966         .start_conn = beiscsi_conn_start,
3967         .stop_conn = iscsi_conn_stop,
3968         .send_pdu = iscsi_conn_send_pdu,
3969         .xmit_task = beiscsi_task_xmit,
3970         .cleanup_task = beiscsi_cleanup_task,
3971         .alloc_pdu = beiscsi_alloc_pdu,
3972         .parse_pdu_itt = beiscsi_parse_pdu,
3973         .get_stats = beiscsi_conn_get_stats,
3974         .ep_connect = beiscsi_ep_connect,
3975         .ep_poll = beiscsi_ep_poll,
3976         .ep_disconnect = beiscsi_ep_disconnect,
3977         .session_recovery_timedout = iscsi_session_recovery_timedout,
3978 };
3979
3980 static struct pci_driver beiscsi_pci_driver = {
3981         .name = DRV_NAME,
3982         .probe = beiscsi_dev_probe,
3983         .remove = beiscsi_remove,
3984         .id_table = beiscsi_pci_id_table
3985 };
3986
3987
3988 static int __init beiscsi_module_init(void)
3989 {
3990         int ret;
3991
3992         beiscsi_scsi_transport =
3993                         iscsi_register_transport(&beiscsi_iscsi_transport);
3994         if (!beiscsi_scsi_transport) {
3995                 SE_DEBUG(DBG_LVL_1,
3996                          "beiscsi_module_init - Unable to  register beiscsi"
3997                          "transport.\n");
3998                 return -ENOMEM;
3999         }
4000         SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p\n",
4001                  &beiscsi_iscsi_transport);
4002
4003         ret = pci_register_driver(&beiscsi_pci_driver);
4004         if (ret) {
4005                 SE_DEBUG(DBG_LVL_1,
4006                          "beiscsi_module_init - Unable to  register"
4007                          "beiscsi pci driver.\n");
4008                 goto unregister_iscsi_transport;
4009         }
4010         return 0;
4011
4012 unregister_iscsi_transport:
4013         iscsi_unregister_transport(&beiscsi_iscsi_transport);
4014         return ret;
4015 }
4016
4017 static void __exit beiscsi_module_exit(void)
4018 {
4019         pci_unregister_driver(&beiscsi_pci_driver);
4020         iscsi_unregister_transport(&beiscsi_iscsi_transport);
4021 }
4022
4023 module_init(beiscsi_module_init);
4024 module_exit(beiscsi_module_exit);