]> Pileus Git - ~andy/linux/blob - drivers/scsi/be2iscsi/be_main.c
[SCSI] be2iscsi: Adding crashdump support
[~andy/linux] / drivers / scsi / be2iscsi / be_main.c
1 /**
2  * Copyright (C) 2005 - 2010 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
11  *
12  * Contact Information:
13  * linux-drivers@serverengines.com
14  *
15  *  ServerEngines
16  * 209 N. Fair Oaks Ave
17  * Sunnyvale, CA 94085
18  *
19  */
20 #include <linux/reboot.h>
21 #include <linux/delay.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/string.h>
27 #include <linux/kernel.h>
28 #include <linux/semaphore.h>
29
30 #include <scsi/libiscsi.h>
31 #include <scsi/scsi_transport_iscsi.h>
32 #include <scsi/scsi_transport.h>
33 #include <scsi/scsi_cmnd.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_host.h>
36 #include <scsi/scsi.h>
37 #include "be_main.h"
38 #include "be_iscsi.h"
39 #include "be_mgmt.h"
40
41 static unsigned int be_iopoll_budget = 10;
42 static unsigned int be_max_phys_size = 64;
43 static unsigned int enable_msix = 1;
44 static unsigned int gcrashmode = 0;
45 static unsigned int num_hba = 0;
46
47 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
48 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
49 MODULE_AUTHOR("ServerEngines Corporation");
50 MODULE_LICENSE("GPL");
51 module_param(be_iopoll_budget, int, 0);
52 module_param(enable_msix, int, 0);
53 module_param(be_max_phys_size, uint, S_IRUGO);
54 MODULE_PARM_DESC(be_max_phys_size, "Maximum Size (In Kilobytes) of physically"
55                                    "contiguous memory that can be allocated."
56                                    "Range is 16 - 128");
57
58 static int beiscsi_slave_configure(struct scsi_device *sdev)
59 {
60         blk_queue_max_segment_size(sdev->request_queue, 65536);
61         return 0;
62 }
63
64 static int beiscsi_eh_abort(struct scsi_cmnd *sc)
65 {
66         struct iscsi_cls_session *cls_session;
67         struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr;
68         struct beiscsi_io_task *aborted_io_task;
69         struct iscsi_conn *conn;
70         struct beiscsi_conn *beiscsi_conn;
71         struct beiscsi_hba *phba;
72         struct iscsi_session *session;
73         struct invalidate_command_table *inv_tbl;
74         unsigned int cid, tag, num_invalidate;
75
76         cls_session = starget_to_session(scsi_target(sc->device));
77         session = cls_session->dd_data;
78
79         spin_lock_bh(&session->lock);
80         if (!aborted_task || !aborted_task->sc) {
81                 /* we raced */
82                 spin_unlock_bh(&session->lock);
83                 return SUCCESS;
84         }
85
86         aborted_io_task = aborted_task->dd_data;
87         if (!aborted_io_task->scsi_cmnd) {
88                 /* raced or invalid command */
89                 spin_unlock_bh(&session->lock);
90                 return SUCCESS;
91         }
92         spin_unlock_bh(&session->lock);
93         conn = aborted_task->conn;
94         beiscsi_conn = conn->dd_data;
95         phba = beiscsi_conn->phba;
96
97         /* invalidate iocb */
98         cid = beiscsi_conn->beiscsi_conn_cid;
99         inv_tbl = phba->inv_tbl;
100         memset(inv_tbl, 0x0, sizeof(*inv_tbl));
101         inv_tbl->cid = cid;
102         inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index;
103         num_invalidate = 1;
104         tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, cid);
105         if (!tag) {
106                 shost_printk(KERN_WARNING, phba->shost,
107                              "mgmt_invalidate_icds could not be"
108                              " submitted\n");
109                 return FAILED;
110         } else {
111                 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
112                                          phba->ctrl.mcc_numtag[tag]);
113                 free_mcc_tag(&phba->ctrl, tag);
114         }
115
116         return iscsi_eh_abort(sc);
117 }
118
119 static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
120 {
121         struct iscsi_task *abrt_task;
122         struct beiscsi_io_task *abrt_io_task;
123         struct iscsi_conn *conn;
124         struct beiscsi_conn *beiscsi_conn;
125         struct beiscsi_hba *phba;
126         struct iscsi_session *session;
127         struct iscsi_cls_session *cls_session;
128         struct invalidate_command_table *inv_tbl;
129         unsigned int cid, tag, i, num_invalidate;
130         int rc = FAILED;
131
132         /* invalidate iocbs */
133         cls_session = starget_to_session(scsi_target(sc->device));
134         session = cls_session->dd_data;
135         spin_lock_bh(&session->lock);
136         if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
137                 goto unlock;
138
139         conn = session->leadconn;
140         beiscsi_conn = conn->dd_data;
141         phba = beiscsi_conn->phba;
142         cid = beiscsi_conn->beiscsi_conn_cid;
143         inv_tbl = phba->inv_tbl;
144         memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN);
145         num_invalidate = 0;
146         for (i = 0; i < conn->session->cmds_max; i++) {
147                 abrt_task = conn->session->cmds[i];
148                 abrt_io_task = abrt_task->dd_data;
149                 if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
150                         continue;
151
152                 if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
153                         continue;
154
155                 inv_tbl->cid = cid;
156                 inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
157                 num_invalidate++;
158                 inv_tbl++;
159         }
160         spin_unlock_bh(&session->lock);
161         inv_tbl = phba->inv_tbl;
162
163         tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, cid);
164         if (!tag) {
165                 shost_printk(KERN_WARNING, phba->shost,
166                              "mgmt_invalidate_icds could not be"
167                              " submitted\n");
168                 return FAILED;
169         } else {
170                 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
171                                          phba->ctrl.mcc_numtag[tag]);
172                 free_mcc_tag(&phba->ctrl, tag);
173         }
174
175         return iscsi_eh_device_reset(sc);
176 unlock:
177         spin_unlock_bh(&session->lock);
178         return rc;
179 }
180
181 /*------------------- PCI Driver operations and data ----------------- */
182 static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
183         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
184         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
185         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
186         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
187         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
188         { 0 }
189 };
190 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
191
192 static struct scsi_host_template beiscsi_sht = {
193         .module = THIS_MODULE,
194         .name = "ServerEngines 10Gbe open-iscsi Initiator Driver",
195         .proc_name = DRV_NAME,
196         .queuecommand = iscsi_queuecommand,
197         .change_queue_depth = iscsi_change_queue_depth,
198         .slave_configure = beiscsi_slave_configure,
199         .target_alloc = iscsi_target_alloc,
200         .eh_abort_handler = beiscsi_eh_abort,
201         .eh_device_reset_handler = beiscsi_eh_device_reset,
202         .eh_target_reset_handler = iscsi_eh_session_reset,
203         .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
204         .can_queue = BE2_IO_DEPTH,
205         .this_id = -1,
206         .max_sectors = BEISCSI_MAX_SECTORS,
207         .cmd_per_lun = BEISCSI_CMD_PER_LUN,
208         .use_clustering = ENABLE_CLUSTERING,
209 };
210
211 static struct scsi_transport_template *beiscsi_scsi_transport;
212
213 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
214 {
215         struct beiscsi_hba *phba;
216         struct Scsi_Host *shost;
217
218         shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
219         if (!shost) {
220                 dev_err(&pcidev->dev, "beiscsi_hba_alloc -"
221                         "iscsi_host_alloc failed\n");
222                 return NULL;
223         }
224         shost->dma_boundary = pcidev->dma_mask;
225         shost->max_id = BE2_MAX_SESSIONS;
226         shost->max_channel = 0;
227         shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
228         shost->max_lun = BEISCSI_NUM_MAX_LUN;
229         shost->transportt = beiscsi_scsi_transport;
230         phba = iscsi_host_priv(shost);
231         memset(phba, 0, sizeof(*phba));
232         phba->shost = shost;
233         phba->pcidev = pci_dev_get(pcidev);
234         pci_set_drvdata(pcidev, phba);
235
236         if (iscsi_host_add(shost, &phba->pcidev->dev))
237                 goto free_devices;
238         return phba;
239
240 free_devices:
241         pci_dev_put(phba->pcidev);
242         iscsi_host_free(phba->shost);
243         return NULL;
244 }
245
246 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
247 {
248         if (phba->csr_va) {
249                 iounmap(phba->csr_va);
250                 phba->csr_va = NULL;
251         }
252         if (phba->db_va) {
253                 iounmap(phba->db_va);
254                 phba->db_va = NULL;
255         }
256         if (phba->pci_va) {
257                 iounmap(phba->pci_va);
258                 phba->pci_va = NULL;
259         }
260 }
261
262 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
263                                 struct pci_dev *pcidev)
264 {
265         u8 __iomem *addr;
266         int pcicfg_reg;
267
268         addr = ioremap_nocache(pci_resource_start(pcidev, 2),
269                                pci_resource_len(pcidev, 2));
270         if (addr == NULL)
271                 return -ENOMEM;
272         phba->ctrl.csr = addr;
273         phba->csr_va = addr;
274         phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
275
276         addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
277         if (addr == NULL)
278                 goto pci_map_err;
279         phba->ctrl.db = addr;
280         phba->db_va = addr;
281         phba->db_pa.u.a64.address =  pci_resource_start(pcidev, 4);
282
283         if (phba->generation == BE_GEN2)
284                 pcicfg_reg = 1;
285         else
286                 pcicfg_reg = 0;
287
288         addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg),
289                                pci_resource_len(pcidev, pcicfg_reg));
290
291         if (addr == NULL)
292                 goto pci_map_err;
293         phba->ctrl.pcicfg = addr;
294         phba->pci_va = addr;
295         phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg);
296         return 0;
297
298 pci_map_err:
299         beiscsi_unmap_pci_function(phba);
300         return -ENOMEM;
301 }
302
303 static int beiscsi_enable_pci(struct pci_dev *pcidev)
304 {
305         int ret;
306
307         ret = pci_enable_device(pcidev);
308         if (ret) {
309                 dev_err(&pcidev->dev, "beiscsi_enable_pci - enable device "
310                         "failed. Returning -ENODEV\n");
311                 return ret;
312         }
313
314         pci_set_master(pcidev);
315         if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
316                 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
317                 if (ret) {
318                         dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
319                         pci_disable_device(pcidev);
320                         return ret;
321                 }
322         }
323         return 0;
324 }
325
326 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
327 {
328         struct be_ctrl_info *ctrl = &phba->ctrl;
329         struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
330         struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
331         int status = 0;
332
333         ctrl->pdev = pdev;
334         status = beiscsi_map_pci_bars(phba, pdev);
335         if (status)
336                 return status;
337         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
338         mbox_mem_alloc->va = pci_alloc_consistent(pdev,
339                                                   mbox_mem_alloc->size,
340                                                   &mbox_mem_alloc->dma);
341         if (!mbox_mem_alloc->va) {
342                 beiscsi_unmap_pci_function(phba);
343                 status = -ENOMEM;
344                 return status;
345         }
346
347         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
348         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
349         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
350         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
351         spin_lock_init(&ctrl->mbox_lock);
352         spin_lock_init(&phba->ctrl.mcc_lock);
353         spin_lock_init(&phba->ctrl.mcc_cq_lock);
354
355         return status;
356 }
357
358 static void beiscsi_get_params(struct beiscsi_hba *phba)
359 {
360         phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
361                                     - (phba->fw_config.iscsi_cid_count
362                                     + BE2_TMFS
363                                     + BE2_NOPOUT_REQ));
364         phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
365         phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2;
366         phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;;
367         phba->params.num_sge_per_io = BE2_SGE;
368         phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
369         phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
370         phba->params.eq_timer = 64;
371         phba->params.num_eq_entries =
372             (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
373                                     + BE2_TMFS) / 512) + 1) * 512;
374         phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
375                                 ? 1024 : phba->params.num_eq_entries;
376         SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d\n",
377                              phba->params.num_eq_entries);
378         phba->params.num_cq_entries =
379             (((BE2_CMDS_PER_CXN * 2 +  phba->fw_config.iscsi_cid_count * 2
380                                     + BE2_TMFS) / 512) + 1) * 512;
381         phba->params.wrbs_per_cxn = 256;
382 }
383
384 static void hwi_ring_eq_db(struct beiscsi_hba *phba,
385                            unsigned int id, unsigned int clr_interrupt,
386                            unsigned int num_processed,
387                            unsigned char rearm, unsigned char event)
388 {
389         u32 val = 0;
390         val |= id & DB_EQ_RING_ID_MASK;
391         if (rearm)
392                 val |= 1 << DB_EQ_REARM_SHIFT;
393         if (clr_interrupt)
394                 val |= 1 << DB_EQ_CLR_SHIFT;
395         if (event)
396                 val |= 1 << DB_EQ_EVNT_SHIFT;
397         val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
398         iowrite32(val, phba->db_va + DB_EQ_OFFSET);
399 }
400
401 /**
402  * be_isr_mcc - The isr routine of the driver.
403  * @irq: Not used
404  * @dev_id: Pointer to host adapter structure
405  */
406 static irqreturn_t be_isr_mcc(int irq, void *dev_id)
407 {
408         struct beiscsi_hba *phba;
409         struct be_eq_entry *eqe = NULL;
410         struct be_queue_info *eq;
411         struct be_queue_info *mcc;
412         unsigned int num_eq_processed;
413         struct be_eq_obj *pbe_eq;
414         unsigned long flags;
415
416         pbe_eq = dev_id;
417         eq = &pbe_eq->q;
418         phba =  pbe_eq->phba;
419         mcc = &phba->ctrl.mcc_obj.cq;
420         eqe = queue_tail_node(eq);
421         if (!eqe)
422                 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
423
424         num_eq_processed = 0;
425
426         while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
427                                 & EQE_VALID_MASK) {
428                 if (((eqe->dw[offsetof(struct amap_eq_entry,
429                      resource_id) / 32] &
430                      EQE_RESID_MASK) >> 16) == mcc->id) {
431                         spin_lock_irqsave(&phba->isr_lock, flags);
432                         phba->todo_mcc_cq = 1;
433                         spin_unlock_irqrestore(&phba->isr_lock, flags);
434                 }
435                 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
436                 queue_tail_inc(eq);
437                 eqe = queue_tail_node(eq);
438                 num_eq_processed++;
439         }
440         if (phba->todo_mcc_cq)
441                 queue_work(phba->wq, &phba->work_cqs);
442         if (num_eq_processed)
443                 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
444
445         return IRQ_HANDLED;
446 }
447
448 /**
449  * be_isr_msix - The isr routine of the driver.
450  * @irq: Not used
451  * @dev_id: Pointer to host adapter structure
452  */
453 static irqreturn_t be_isr_msix(int irq, void *dev_id)
454 {
455         struct beiscsi_hba *phba;
456         struct be_eq_entry *eqe = NULL;
457         struct be_queue_info *eq;
458         struct be_queue_info *cq;
459         unsigned int num_eq_processed;
460         struct be_eq_obj *pbe_eq;
461         unsigned long flags;
462
463         pbe_eq = dev_id;
464         eq = &pbe_eq->q;
465         cq = pbe_eq->cq;
466         eqe = queue_tail_node(eq);
467         if (!eqe)
468                 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
469
470         phba = pbe_eq->phba;
471         num_eq_processed = 0;
472         if (blk_iopoll_enabled) {
473                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
474                                         & EQE_VALID_MASK) {
475                         if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
476                                 blk_iopoll_sched(&pbe_eq->iopoll);
477
478                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
479                         queue_tail_inc(eq);
480                         eqe = queue_tail_node(eq);
481                         num_eq_processed++;
482                 }
483                 if (num_eq_processed)
484                         hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
485
486                 return IRQ_HANDLED;
487         } else {
488                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
489                                                 & EQE_VALID_MASK) {
490                         spin_lock_irqsave(&phba->isr_lock, flags);
491                         phba->todo_cq = 1;
492                         spin_unlock_irqrestore(&phba->isr_lock, flags);
493                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
494                         queue_tail_inc(eq);
495                         eqe = queue_tail_node(eq);
496                         num_eq_processed++;
497                 }
498                 if (phba->todo_cq)
499                         queue_work(phba->wq, &phba->work_cqs);
500
501                 if (num_eq_processed)
502                         hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
503
504                 return IRQ_HANDLED;
505         }
506 }
507
508 /**
509  * be_isr - The isr routine of the driver.
510  * @irq: Not used
511  * @dev_id: Pointer to host adapter structure
512  */
513 static irqreturn_t be_isr(int irq, void *dev_id)
514 {
515         struct beiscsi_hba *phba;
516         struct hwi_controller *phwi_ctrlr;
517         struct hwi_context_memory *phwi_context;
518         struct be_eq_entry *eqe = NULL;
519         struct be_queue_info *eq;
520         struct be_queue_info *cq;
521         struct be_queue_info *mcc;
522         unsigned long flags, index;
523         unsigned int num_mcceq_processed, num_ioeq_processed;
524         struct be_ctrl_info *ctrl;
525         struct be_eq_obj *pbe_eq;
526         int isr;
527
528         phba = dev_id;
529         ctrl = &phba->ctrl;;
530         isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
531                        (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
532         if (!isr)
533                 return IRQ_NONE;
534
535         phwi_ctrlr = phba->phwi_ctrlr;
536         phwi_context = phwi_ctrlr->phwi_ctxt;
537         pbe_eq = &phwi_context->be_eq[0];
538
539         eq = &phwi_context->be_eq[0].q;
540         mcc = &phba->ctrl.mcc_obj.cq;
541         index = 0;
542         eqe = queue_tail_node(eq);
543         if (!eqe)
544                 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
545
546         num_ioeq_processed = 0;
547         num_mcceq_processed = 0;
548         if (blk_iopoll_enabled) {
549                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
550                                         & EQE_VALID_MASK) {
551                         if (((eqe->dw[offsetof(struct amap_eq_entry,
552                              resource_id) / 32] &
553                              EQE_RESID_MASK) >> 16) == mcc->id) {
554                                 spin_lock_irqsave(&phba->isr_lock, flags);
555                                 phba->todo_mcc_cq = 1;
556                                 spin_unlock_irqrestore(&phba->isr_lock, flags);
557                                 num_mcceq_processed++;
558                         } else {
559                                 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
560                                         blk_iopoll_sched(&pbe_eq->iopoll);
561                                 num_ioeq_processed++;
562                         }
563                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
564                         queue_tail_inc(eq);
565                         eqe = queue_tail_node(eq);
566                 }
567                 if (num_ioeq_processed || num_mcceq_processed) {
568                         if (phba->todo_mcc_cq)
569                                 queue_work(phba->wq, &phba->work_cqs);
570
571                         if ((num_mcceq_processed) && (!num_ioeq_processed))
572                                 hwi_ring_eq_db(phba, eq->id, 0,
573                                               (num_ioeq_processed +
574                                                num_mcceq_processed) , 1, 1);
575                         else
576                                 hwi_ring_eq_db(phba, eq->id, 0,
577                                                (num_ioeq_processed +
578                                                 num_mcceq_processed), 0, 1);
579
580                         return IRQ_HANDLED;
581                 } else
582                         return IRQ_NONE;
583         } else {
584                 cq = &phwi_context->be_cq[0];
585                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
586                                                 & EQE_VALID_MASK) {
587
588                         if (((eqe->dw[offsetof(struct amap_eq_entry,
589                              resource_id) / 32] &
590                              EQE_RESID_MASK) >> 16) != cq->id) {
591                                 spin_lock_irqsave(&phba->isr_lock, flags);
592                                 phba->todo_mcc_cq = 1;
593                                 spin_unlock_irqrestore(&phba->isr_lock, flags);
594                         } else {
595                                 spin_lock_irqsave(&phba->isr_lock, flags);
596                                 phba->todo_cq = 1;
597                                 spin_unlock_irqrestore(&phba->isr_lock, flags);
598                         }
599                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
600                         queue_tail_inc(eq);
601                         eqe = queue_tail_node(eq);
602                         num_ioeq_processed++;
603                 }
604                 if (phba->todo_cq || phba->todo_mcc_cq)
605                         queue_work(phba->wq, &phba->work_cqs);
606
607                 if (num_ioeq_processed) {
608                         hwi_ring_eq_db(phba, eq->id, 0,
609                                        num_ioeq_processed, 1, 1);
610                         return IRQ_HANDLED;
611                 } else
612                         return IRQ_NONE;
613         }
614 }
615
616 static int beiscsi_init_irqs(struct beiscsi_hba *phba)
617 {
618         struct pci_dev *pcidev = phba->pcidev;
619         struct hwi_controller *phwi_ctrlr;
620         struct hwi_context_memory *phwi_context;
621         int ret, msix_vec, i, j;
622         char desc[32];
623
624         phwi_ctrlr = phba->phwi_ctrlr;
625         phwi_context = phwi_ctrlr->phwi_ctxt;
626
627         if (phba->msix_enabled) {
628                 for (i = 0; i < phba->num_cpus; i++) {
629                         sprintf(desc, "beiscsi_msix_%04x", i);
630                         msix_vec = phba->msix_entries[i].vector;
631                         ret = request_irq(msix_vec, be_isr_msix, 0, desc,
632                                           &phwi_context->be_eq[i]);
633                         if (ret) {
634                                 shost_printk(KERN_ERR, phba->shost,
635                                              "beiscsi_init_irqs-Failed to"
636                                              "register msix for i = %d\n", i);
637                                 if (!i)
638                                         return ret;
639                                 goto free_msix_irqs;
640                         }
641                 }
642                 msix_vec = phba->msix_entries[i].vector;
643                 ret = request_irq(msix_vec, be_isr_mcc, 0, "beiscsi_msix_mcc",
644                                   &phwi_context->be_eq[i]);
645                 if (ret) {
646                         shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
647                                      "Failed to register beiscsi_msix_mcc\n");
648                         i++;
649                         goto free_msix_irqs;
650                 }
651
652         } else {
653                 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
654                                   "beiscsi", phba);
655                 if (ret) {
656                         shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
657                                      "Failed to register irq\\n");
658                         return ret;
659                 }
660         }
661         return 0;
662 free_msix_irqs:
663         for (j = i - 1; j == 0; j++)
664                 free_irq(msix_vec, &phwi_context->be_eq[j]);
665         return ret;
666 }
667
668 static void hwi_ring_cq_db(struct beiscsi_hba *phba,
669                            unsigned int id, unsigned int num_processed,
670                            unsigned char rearm, unsigned char event)
671 {
672         u32 val = 0;
673         val |= id & DB_CQ_RING_ID_MASK;
674         if (rearm)
675                 val |= 1 << DB_CQ_REARM_SHIFT;
676         val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
677         iowrite32(val, phba->db_va + DB_CQ_OFFSET);
678 }
679
680 static unsigned int
681 beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
682                           struct beiscsi_hba *phba,
683                           unsigned short cid,
684                           struct pdu_base *ppdu,
685                           unsigned long pdu_len,
686                           void *pbuffer, unsigned long buf_len)
687 {
688         struct iscsi_conn *conn = beiscsi_conn->conn;
689         struct iscsi_session *session = conn->session;
690         struct iscsi_task *task;
691         struct beiscsi_io_task *io_task;
692         struct iscsi_hdr *login_hdr;
693
694         switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
695                                                 PDUBASE_OPCODE_MASK) {
696         case ISCSI_OP_NOOP_IN:
697                 pbuffer = NULL;
698                 buf_len = 0;
699                 break;
700         case ISCSI_OP_ASYNC_EVENT:
701                 break;
702         case ISCSI_OP_REJECT:
703                 WARN_ON(!pbuffer);
704                 WARN_ON(!(buf_len == 48));
705                 SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
706                 break;
707         case ISCSI_OP_LOGIN_RSP:
708         case ISCSI_OP_TEXT_RSP:
709                 task = conn->login_task;
710                 io_task = task->dd_data;
711                 login_hdr = (struct iscsi_hdr *)ppdu;
712                 login_hdr->itt = io_task->libiscsi_itt;
713                 break;
714         default:
715                 shost_printk(KERN_WARNING, phba->shost,
716                              "Unrecognized opcode 0x%x in async msg\n",
717                              (ppdu->
718                              dw[offsetof(struct amap_pdu_base, opcode) / 32]
719                                                 & PDUBASE_OPCODE_MASK));
720                 return 1;
721         }
722
723         spin_lock_bh(&session->lock);
724         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
725         spin_unlock_bh(&session->lock);
726         return 0;
727 }
728
729 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
730 {
731         struct sgl_handle *psgl_handle;
732
733         if (phba->io_sgl_hndl_avbl) {
734                 SE_DEBUG(DBG_LVL_8,
735                          "In alloc_io_sgl_handle,io_sgl_alloc_index=%d\n",
736                          phba->io_sgl_alloc_index);
737                 psgl_handle = phba->io_sgl_hndl_base[phba->
738                                                 io_sgl_alloc_index];
739                 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
740                 phba->io_sgl_hndl_avbl--;
741                 if (phba->io_sgl_alloc_index == (phba->params.
742                                                  ios_per_ctrl - 1))
743                         phba->io_sgl_alloc_index = 0;
744                 else
745                         phba->io_sgl_alloc_index++;
746         } else
747                 psgl_handle = NULL;
748         return psgl_handle;
749 }
750
751 static void
752 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
753 {
754         SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d\n",
755                  phba->io_sgl_free_index);
756         if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
757                 /*
758                  * this can happen if clean_task is called on a task that
759                  * failed in xmit_task or alloc_pdu.
760                  */
761                  SE_DEBUG(DBG_LVL_8,
762                          "Double Free in IO SGL io_sgl_free_index=%d,"
763                          "value there=%p\n", phba->io_sgl_free_index,
764                          phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
765                 return;
766         }
767         phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
768         phba->io_sgl_hndl_avbl++;
769         if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
770                 phba->io_sgl_free_index = 0;
771         else
772                 phba->io_sgl_free_index++;
773 }
774
775 /**
776  * alloc_wrb_handle - To allocate a wrb handle
777  * @phba: The hba pointer
778  * @cid: The cid to use for allocation
779  *
780  * This happens under session_lock until submission to chip
781  */
782 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
783 {
784         struct hwi_wrb_context *pwrb_context;
785         struct hwi_controller *phwi_ctrlr;
786         struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
787
788         phwi_ctrlr = phba->phwi_ctrlr;
789         pwrb_context = &phwi_ctrlr->wrb_context[cid];
790         if (pwrb_context->wrb_handles_available >= 2) {
791                 pwrb_handle = pwrb_context->pwrb_handle_base[
792                                             pwrb_context->alloc_index];
793                 pwrb_context->wrb_handles_available--;
794                 if (pwrb_context->alloc_index ==
795                                                 (phba->params.wrbs_per_cxn - 1))
796                         pwrb_context->alloc_index = 0;
797                 else
798                         pwrb_context->alloc_index++;
799                 pwrb_handle_tmp = pwrb_context->pwrb_handle_base[
800                                                 pwrb_context->alloc_index];
801                 pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index;
802         } else
803                 pwrb_handle = NULL;
804         return pwrb_handle;
805 }
806
807 /**
808  * free_wrb_handle - To free the wrb handle back to pool
809  * @phba: The hba pointer
810  * @pwrb_context: The context to free from
811  * @pwrb_handle: The wrb_handle to free
812  *
813  * This happens under session_lock until submission to chip
814  */
815 static void
816 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
817                 struct wrb_handle *pwrb_handle)
818 {
819         pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
820         pwrb_context->wrb_handles_available++;
821         if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
822                 pwrb_context->free_index = 0;
823         else
824                 pwrb_context->free_index++;
825
826         SE_DEBUG(DBG_LVL_8,
827                  "FREE WRB: pwrb_handle=%p free_index=0x%x"
828                  "wrb_handles_available=%d\n",
829                  pwrb_handle, pwrb_context->free_index,
830                  pwrb_context->wrb_handles_available);
831 }
832
833 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
834 {
835         struct sgl_handle *psgl_handle;
836
837         if (phba->eh_sgl_hndl_avbl) {
838                 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
839                 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
840                 SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x\n",
841                          phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index);
842                 phba->eh_sgl_hndl_avbl--;
843                 if (phba->eh_sgl_alloc_index ==
844                     (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
845                      1))
846                         phba->eh_sgl_alloc_index = 0;
847                 else
848                         phba->eh_sgl_alloc_index++;
849         } else
850                 psgl_handle = NULL;
851         return psgl_handle;
852 }
853
854 void
855 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
856 {
857
858         SE_DEBUG(DBG_LVL_8, "In  free_mgmt_sgl_handle,eh_sgl_free_index=%d\n",
859                              phba->eh_sgl_free_index);
860         if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
861                 /*
862                  * this can happen if clean_task is called on a task that
863                  * failed in xmit_task or alloc_pdu.
864                  */
865                 SE_DEBUG(DBG_LVL_8,
866                          "Double Free in eh SGL ,eh_sgl_free_index=%d\n",
867                          phba->eh_sgl_free_index);
868                 return;
869         }
870         phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
871         phba->eh_sgl_hndl_avbl++;
872         if (phba->eh_sgl_free_index ==
873             (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
874                 phba->eh_sgl_free_index = 0;
875         else
876                 phba->eh_sgl_free_index++;
877 }
878
879 static void
880 be_complete_io(struct beiscsi_conn *beiscsi_conn,
881                struct iscsi_task *task, struct sol_cqe *psol)
882 {
883         struct beiscsi_io_task *io_task = task->dd_data;
884         struct be_status_bhs *sts_bhs =
885                                 (struct be_status_bhs *)io_task->cmd_bhs;
886         struct iscsi_conn *conn = beiscsi_conn->conn;
887         unsigned int sense_len;
888         unsigned char *sense;
889         u32 resid = 0, exp_cmdsn, max_cmdsn;
890         u8 rsp, status, flags;
891
892         exp_cmdsn = (psol->
893                         dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
894                         & SOL_EXP_CMD_SN_MASK);
895         max_cmdsn = ((psol->
896                         dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
897                         & SOL_EXP_CMD_SN_MASK) +
898                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
899                                 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
900         rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
901                                                 & SOL_RESP_MASK) >> 16);
902         status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
903                                                 & SOL_STS_MASK) >> 8);
904         flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
905                                         & SOL_FLAGS_MASK) >> 24) | 0x80;
906
907         task->sc->result = (DID_OK << 16) | status;
908         if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
909                 task->sc->result = DID_ERROR << 16;
910                 goto unmap;
911         }
912
913         /* bidi not initially supported */
914         if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
915                 resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
916                                 32] & SOL_RES_CNT_MASK);
917
918                 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
919                         task->sc->result = DID_ERROR << 16;
920
921                 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
922                         scsi_set_resid(task->sc, resid);
923                         if (!status && (scsi_bufflen(task->sc) - resid <
924                             task->sc->underflow))
925                                 task->sc->result = DID_ERROR << 16;
926                 }
927         }
928
929         if (status == SAM_STAT_CHECK_CONDITION) {
930                 unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
931                 sense = sts_bhs->sense_info + sizeof(unsigned short);
932                 sense_len =  cpu_to_be16(*slen);
933                 memcpy(task->sc->sense_buffer, sense,
934                        min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
935         }
936
937         if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
938                 if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
939                                                         & SOL_RES_CNT_MASK)
940                          conn->rxdata_octets += (psol->
941                              dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
942                              & SOL_RES_CNT_MASK);
943         }
944 unmap:
945         scsi_dma_unmap(io_task->scsi_cmnd);
946         iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
947 }
948
949 static void
950 be_complete_logout(struct beiscsi_conn *beiscsi_conn,
951                    struct iscsi_task *task, struct sol_cqe *psol)
952 {
953         struct iscsi_logout_rsp *hdr;
954         struct beiscsi_io_task *io_task = task->dd_data;
955         struct iscsi_conn *conn = beiscsi_conn->conn;
956
957         hdr = (struct iscsi_logout_rsp *)task->hdr;
958         hdr->opcode = ISCSI_OP_LOGOUT_RSP;
959         hdr->t2wait = 5;
960         hdr->t2retain = 0;
961         hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
962                                         & SOL_FLAGS_MASK) >> 24) | 0x80;
963         hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
964                                         32] & SOL_RESP_MASK);
965         hdr->exp_cmdsn = cpu_to_be32(psol->
966                         dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
967                                         & SOL_EXP_CMD_SN_MASK);
968         hdr->max_cmdsn = be32_to_cpu((psol->
969                          dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
970                                         & SOL_EXP_CMD_SN_MASK) +
971                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
972                                         / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
973         hdr->dlength[0] = 0;
974         hdr->dlength[1] = 0;
975         hdr->dlength[2] = 0;
976         hdr->hlength = 0;
977         hdr->itt = io_task->libiscsi_itt;
978         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
979 }
980
981 static void
982 be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
983                 struct iscsi_task *task, struct sol_cqe *psol)
984 {
985         struct iscsi_tm_rsp *hdr;
986         struct iscsi_conn *conn = beiscsi_conn->conn;
987         struct beiscsi_io_task *io_task = task->dd_data;
988
989         hdr = (struct iscsi_tm_rsp *)task->hdr;
990         hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
991         hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
992                                         & SOL_FLAGS_MASK) >> 24) | 0x80;
993         hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
994                                         32] & SOL_RESP_MASK);
995         hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
996                                     i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
997         hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
998                         i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
999                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1000                         / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1001         hdr->itt = io_task->libiscsi_itt;
1002         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1003 }
1004
1005 static void
1006 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
1007                        struct beiscsi_hba *phba, struct sol_cqe *psol)
1008 {
1009         struct hwi_wrb_context *pwrb_context;
1010         struct wrb_handle *pwrb_handle = NULL;
1011         struct hwi_controller *phwi_ctrlr;
1012         struct iscsi_task *task;
1013         struct beiscsi_io_task *io_task;
1014         struct iscsi_conn *conn = beiscsi_conn->conn;
1015         struct iscsi_session *session = conn->session;
1016
1017         phwi_ctrlr = phba->phwi_ctrlr;
1018         pwrb_context = &phwi_ctrlr->wrb_context[((psol->
1019                                 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1020                                 SOL_CID_MASK) >> 6) -
1021                                 phba->fw_config.iscsi_cid_start];
1022         pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
1023                                 dw[offsetof(struct amap_sol_cqe, wrb_index) /
1024                                 32] & SOL_WRB_INDEX_MASK) >> 16)];
1025         task = pwrb_handle->pio_handle;
1026
1027         io_task = task->dd_data;
1028         spin_lock(&phba->mgmt_sgl_lock);
1029         free_mgmt_sgl_handle(phba, io_task->psgl_handle);
1030         spin_unlock(&phba->mgmt_sgl_lock);
1031         spin_lock_bh(&session->lock);
1032         free_wrb_handle(phba, pwrb_context, pwrb_handle);
1033         spin_unlock_bh(&session->lock);
1034 }
1035
1036 static void
1037 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
1038                        struct iscsi_task *task, struct sol_cqe *psol)
1039 {
1040         struct iscsi_nopin *hdr;
1041         struct iscsi_conn *conn = beiscsi_conn->conn;
1042         struct beiscsi_io_task *io_task = task->dd_data;
1043
1044         hdr = (struct iscsi_nopin *)task->hdr;
1045         hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1046                         & SOL_FLAGS_MASK) >> 24) | 0x80;
1047         hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
1048                                      i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
1049         hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
1050                         i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
1051                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1052                         / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1053         hdr->opcode = ISCSI_OP_NOOP_IN;
1054         hdr->itt = io_task->libiscsi_itt;
1055         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1056 }
1057
1058 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1059                              struct beiscsi_hba *phba, struct sol_cqe *psol)
1060 {
1061         struct hwi_wrb_context *pwrb_context;
1062         struct wrb_handle *pwrb_handle;
1063         struct iscsi_wrb *pwrb = NULL;
1064         struct hwi_controller *phwi_ctrlr;
1065         struct iscsi_task *task;
1066         unsigned int type;
1067         struct iscsi_conn *conn = beiscsi_conn->conn;
1068         struct iscsi_session *session = conn->session;
1069
1070         phwi_ctrlr = phba->phwi_ctrlr;
1071         pwrb_context = &phwi_ctrlr->wrb_context[((psol->dw[offsetof
1072                                 (struct amap_sol_cqe, cid) / 32]
1073                                 & SOL_CID_MASK) >> 6) -
1074                                 phba->fw_config.iscsi_cid_start];
1075         pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
1076                                 dw[offsetof(struct amap_sol_cqe, wrb_index) /
1077                                 32] & SOL_WRB_INDEX_MASK) >> 16)];
1078         task = pwrb_handle->pio_handle;
1079         pwrb = pwrb_handle->pwrb;
1080         type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
1081                                  WRB_TYPE_MASK) >> 28;
1082
1083         spin_lock_bh(&session->lock);
1084         switch (type) {
1085         case HWH_TYPE_IO:
1086         case HWH_TYPE_IO_RD:
1087                 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
1088                      ISCSI_OP_NOOP_OUT)
1089                         be_complete_nopin_resp(beiscsi_conn, task, psol);
1090                 else
1091                         be_complete_io(beiscsi_conn, task, psol);
1092                 break;
1093
1094         case HWH_TYPE_LOGOUT:
1095                 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
1096                         be_complete_logout(beiscsi_conn, task, psol);
1097                 else
1098                         be_complete_tmf(beiscsi_conn, task, psol);
1099
1100                 break;
1101
1102         case HWH_TYPE_LOGIN:
1103                 SE_DEBUG(DBG_LVL_1,
1104                          "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd"
1105                          "- Solicited path\n");
1106                 break;
1107
1108         case HWH_TYPE_NOP:
1109                 be_complete_nopin_resp(beiscsi_conn, task, psol);
1110                 break;
1111
1112         default:
1113                 shost_printk(KERN_WARNING, phba->shost,
1114                                 "In hwi_complete_cmd, unknown type = %d"
1115                                 "wrb_index 0x%x CID 0x%x\n", type,
1116                                 ((psol->dw[offsetof(struct amap_iscsi_wrb,
1117                                 type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
1118                                 ((psol->dw[offsetof(struct amap_sol_cqe,
1119                                 cid) / 32] & SOL_CID_MASK) >> 6));
1120                 break;
1121         }
1122
1123         spin_unlock_bh(&session->lock);
1124 }
1125
1126 static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
1127                                           *pasync_ctx, unsigned int is_header,
1128                                           unsigned int host_write_ptr)
1129 {
1130         if (is_header)
1131                 return &pasync_ctx->async_entry[host_write_ptr].
1132                     header_busy_list;
1133         else
1134                 return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
1135 }
1136
1137 static struct async_pdu_handle *
1138 hwi_get_async_handle(struct beiscsi_hba *phba,
1139                      struct beiscsi_conn *beiscsi_conn,
1140                      struct hwi_async_pdu_context *pasync_ctx,
1141                      struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
1142 {
1143         struct be_bus_address phys_addr;
1144         struct list_head *pbusy_list;
1145         struct async_pdu_handle *pasync_handle = NULL;
1146         int buffer_len = 0;
1147         unsigned char buffer_index = -1;
1148         unsigned char is_header = 0;
1149
1150         phys_addr.u.a32.address_lo =
1151             pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
1152             ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1153                                                 & PDUCQE_DPL_MASK) >> 16);
1154         phys_addr.u.a32.address_hi =
1155             pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
1156
1157         phys_addr.u.a64.address =
1158                         *((unsigned long long *)(&phys_addr.u.a64.address));
1159
1160         switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
1161                         & PDUCQE_CODE_MASK) {
1162         case UNSOL_HDR_NOTIFY:
1163                 is_header = 1;
1164
1165                 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
1166                         (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1167                         index) / 32] & PDUCQE_INDEX_MASK));
1168
1169                 buffer_len = (unsigned int)(phys_addr.u.a64.address -
1170                                 pasync_ctx->async_header.pa_base.u.a64.address);
1171
1172                 buffer_index = buffer_len /
1173                                 pasync_ctx->async_header.buffer_size;
1174
1175                 break;
1176         case UNSOL_DATA_NOTIFY:
1177                 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
1178                                         dw[offsetof(struct amap_i_t_dpdu_cqe,
1179                                         index) / 32] & PDUCQE_INDEX_MASK));
1180                 buffer_len = (unsigned long)(phys_addr.u.a64.address -
1181                                         pasync_ctx->async_data.pa_base.u.
1182                                         a64.address);
1183                 buffer_index = buffer_len / pasync_ctx->async_data.buffer_size;
1184                 break;
1185         default:
1186                 pbusy_list = NULL;
1187                 shost_printk(KERN_WARNING, phba->shost,
1188                         "Unexpected code=%d\n",
1189                          pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1190                                         code) / 32] & PDUCQE_CODE_MASK);
1191                 return NULL;
1192         }
1193
1194         WARN_ON(!(buffer_index <= pasync_ctx->async_data.num_entries));
1195         WARN_ON(list_empty(pbusy_list));
1196         list_for_each_entry(pasync_handle, pbusy_list, link) {
1197                 WARN_ON(pasync_handle->consumed);
1198                 if (pasync_handle->index == buffer_index)
1199                         break;
1200         }
1201
1202         WARN_ON(!pasync_handle);
1203
1204         pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
1205                                              phba->fw_config.iscsi_cid_start;
1206         pasync_handle->is_header = is_header;
1207         pasync_handle->buffer_len = ((pdpdu_cqe->
1208                         dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1209                         & PDUCQE_DPL_MASK) >> 16);
1210
1211         *pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1212                         index) / 32] & PDUCQE_INDEX_MASK);
1213         return pasync_handle;
1214 }
1215
1216 static unsigned int
1217 hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
1218                            unsigned int is_header, unsigned int cq_index)
1219 {
1220         struct list_head *pbusy_list;
1221         struct async_pdu_handle *pasync_handle;
1222         unsigned int num_entries, writables = 0;
1223         unsigned int *pep_read_ptr, *pwritables;
1224
1225
1226         if (is_header) {
1227                 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1228                 pwritables = &pasync_ctx->async_header.writables;
1229                 num_entries = pasync_ctx->async_header.num_entries;
1230         } else {
1231                 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1232                 pwritables = &pasync_ctx->async_data.writables;
1233                 num_entries = pasync_ctx->async_data.num_entries;
1234         }
1235
1236         while ((*pep_read_ptr) != cq_index) {
1237                 (*pep_read_ptr)++;
1238                 *pep_read_ptr = (*pep_read_ptr) % num_entries;
1239
1240                 pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1241                                                      *pep_read_ptr);
1242                 if (writables == 0)
1243                         WARN_ON(list_empty(pbusy_list));
1244
1245                 if (!list_empty(pbusy_list)) {
1246                         pasync_handle = list_entry(pbusy_list->next,
1247                                                    struct async_pdu_handle,
1248                                                    link);
1249                         WARN_ON(!pasync_handle);
1250                         pasync_handle->consumed = 1;
1251                 }
1252
1253                 writables++;
1254         }
1255
1256         if (!writables) {
1257                 SE_DEBUG(DBG_LVL_1,
1258                          "Duplicate notification received - index 0x%x!!\n",
1259                          cq_index);
1260                 WARN_ON(1);
1261         }
1262
1263         *pwritables = *pwritables + writables;
1264         return 0;
1265 }
1266
1267 static unsigned int hwi_free_async_msg(struct beiscsi_hba *phba,
1268                                        unsigned int cri)
1269 {
1270         struct hwi_controller *phwi_ctrlr;
1271         struct hwi_async_pdu_context *pasync_ctx;
1272         struct async_pdu_handle *pasync_handle, *tmp_handle;
1273         struct list_head *plist;
1274         unsigned int i = 0;
1275
1276         phwi_ctrlr = phba->phwi_ctrlr;
1277         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1278
1279         plist  = &pasync_ctx->async_entry[cri].wait_queue.list;
1280
1281         list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1282                 list_del(&pasync_handle->link);
1283
1284                 if (i == 0) {
1285                         list_add_tail(&pasync_handle->link,
1286                                       &pasync_ctx->async_header.free_list);
1287                         pasync_ctx->async_header.free_entries++;
1288                         i++;
1289                 } else {
1290                         list_add_tail(&pasync_handle->link,
1291                                       &pasync_ctx->async_data.free_list);
1292                         pasync_ctx->async_data.free_entries++;
1293                         i++;
1294                 }
1295         }
1296
1297         INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1298         pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1299         pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1300         return 0;
1301 }
1302
1303 static struct phys_addr *
1304 hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1305                      unsigned int is_header, unsigned int host_write_ptr)
1306 {
1307         struct phys_addr *pasync_sge = NULL;
1308
1309         if (is_header)
1310                 pasync_sge = pasync_ctx->async_header.ring_base;
1311         else
1312                 pasync_sge = pasync_ctx->async_data.ring_base;
1313
1314         return pasync_sge + host_write_ptr;
1315 }
1316
1317 static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1318                                    unsigned int is_header)
1319 {
1320         struct hwi_controller *phwi_ctrlr;
1321         struct hwi_async_pdu_context *pasync_ctx;
1322         struct async_pdu_handle *pasync_handle;
1323         struct list_head *pfree_link, *pbusy_list;
1324         struct phys_addr *pasync_sge;
1325         unsigned int ring_id, num_entries;
1326         unsigned int host_write_num;
1327         unsigned int writables;
1328         unsigned int i = 0;
1329         u32 doorbell = 0;
1330
1331         phwi_ctrlr = phba->phwi_ctrlr;
1332         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1333
1334         if (is_header) {
1335                 num_entries = pasync_ctx->async_header.num_entries;
1336                 writables = min(pasync_ctx->async_header.writables,
1337                                 pasync_ctx->async_header.free_entries);
1338                 pfree_link = pasync_ctx->async_header.free_list.next;
1339                 host_write_num = pasync_ctx->async_header.host_write_ptr;
1340                 ring_id = phwi_ctrlr->default_pdu_hdr.id;
1341         } else {
1342                 num_entries = pasync_ctx->async_data.num_entries;
1343                 writables = min(pasync_ctx->async_data.writables,
1344                                 pasync_ctx->async_data.free_entries);
1345                 pfree_link = pasync_ctx->async_data.free_list.next;
1346                 host_write_num = pasync_ctx->async_data.host_write_ptr;
1347                 ring_id = phwi_ctrlr->default_pdu_data.id;
1348         }
1349
1350         writables = (writables / 8) * 8;
1351         if (writables) {
1352                 for (i = 0; i < writables; i++) {
1353                         pbusy_list =
1354                             hwi_get_async_busy_list(pasync_ctx, is_header,
1355                                                     host_write_num);
1356                         pasync_handle =
1357                             list_entry(pfree_link, struct async_pdu_handle,
1358                                                                 link);
1359                         WARN_ON(!pasync_handle);
1360                         pasync_handle->consumed = 0;
1361
1362                         pfree_link = pfree_link->next;
1363
1364                         pasync_sge = hwi_get_ring_address(pasync_ctx,
1365                                                 is_header, host_write_num);
1366
1367                         pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1368                         pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1369
1370                         list_move(&pasync_handle->link, pbusy_list);
1371
1372                         host_write_num++;
1373                         host_write_num = host_write_num % num_entries;
1374                 }
1375
1376                 if (is_header) {
1377                         pasync_ctx->async_header.host_write_ptr =
1378                                                         host_write_num;
1379                         pasync_ctx->async_header.free_entries -= writables;
1380                         pasync_ctx->async_header.writables -= writables;
1381                         pasync_ctx->async_header.busy_entries += writables;
1382                 } else {
1383                         pasync_ctx->async_data.host_write_ptr = host_write_num;
1384                         pasync_ctx->async_data.free_entries -= writables;
1385                         pasync_ctx->async_data.writables -= writables;
1386                         pasync_ctx->async_data.busy_entries += writables;
1387                 }
1388
1389                 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1390                 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1391                 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1392                 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1393                                         << DB_DEF_PDU_CQPROC_SHIFT;
1394
1395                 iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
1396         }
1397 }
1398
1399 static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1400                                          struct beiscsi_conn *beiscsi_conn,
1401                                          struct i_t_dpdu_cqe *pdpdu_cqe)
1402 {
1403         struct hwi_controller *phwi_ctrlr;
1404         struct hwi_async_pdu_context *pasync_ctx;
1405         struct async_pdu_handle *pasync_handle = NULL;
1406         unsigned int cq_index = -1;
1407
1408         phwi_ctrlr = phba->phwi_ctrlr;
1409         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1410
1411         pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1412                                              pdpdu_cqe, &cq_index);
1413         BUG_ON(pasync_handle->is_header != 0);
1414         if (pasync_handle->consumed == 0)
1415                 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1416                                            cq_index);
1417
1418         hwi_free_async_msg(phba, pasync_handle->cri);
1419         hwi_post_async_buffers(phba, pasync_handle->is_header);
1420 }
1421
1422 static unsigned int
1423 hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1424                   struct beiscsi_hba *phba,
1425                   struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1426 {
1427         struct list_head *plist;
1428         struct async_pdu_handle *pasync_handle;
1429         void *phdr = NULL;
1430         unsigned int hdr_len = 0, buf_len = 0;
1431         unsigned int status, index = 0, offset = 0;
1432         void *pfirst_buffer = NULL;
1433         unsigned int num_buf = 0;
1434
1435         plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1436
1437         list_for_each_entry(pasync_handle, plist, link) {
1438                 if (index == 0) {
1439                         phdr = pasync_handle->pbuffer;
1440                         hdr_len = pasync_handle->buffer_len;
1441                 } else {
1442                         buf_len = pasync_handle->buffer_len;
1443                         if (!num_buf) {
1444                                 pfirst_buffer = pasync_handle->pbuffer;
1445                                 num_buf++;
1446                         }
1447                         memcpy(pfirst_buffer + offset,
1448                                pasync_handle->pbuffer, buf_len);
1449                         offset = buf_len;
1450                 }
1451                 index++;
1452         }
1453
1454         status = beiscsi_process_async_pdu(beiscsi_conn, phba,
1455                                            (beiscsi_conn->beiscsi_conn_cid -
1456                                             phba->fw_config.iscsi_cid_start),
1457                                             phdr, hdr_len, pfirst_buffer,
1458                                             buf_len);
1459
1460         if (status == 0)
1461                 hwi_free_async_msg(phba, cri);
1462         return 0;
1463 }
1464
1465 static unsigned int
1466 hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1467                      struct beiscsi_hba *phba,
1468                      struct async_pdu_handle *pasync_handle)
1469 {
1470         struct hwi_async_pdu_context *pasync_ctx;
1471         struct hwi_controller *phwi_ctrlr;
1472         unsigned int bytes_needed = 0, status = 0;
1473         unsigned short cri = pasync_handle->cri;
1474         struct pdu_base *ppdu;
1475
1476         phwi_ctrlr = phba->phwi_ctrlr;
1477         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1478
1479         list_del(&pasync_handle->link);
1480         if (pasync_handle->is_header) {
1481                 pasync_ctx->async_header.busy_entries--;
1482                 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1483                         hwi_free_async_msg(phba, cri);
1484                         BUG();
1485                 }
1486
1487                 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1488                 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1489                 pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1490                                 (unsigned short)pasync_handle->buffer_len;
1491                 list_add_tail(&pasync_handle->link,
1492                               &pasync_ctx->async_entry[cri].wait_queue.list);
1493
1494                 ppdu = pasync_handle->pbuffer;
1495                 bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1496                         data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1497                         0xFFFF0000) | ((be16_to_cpu((ppdu->
1498                         dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1499                         & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1500
1501                 if (status == 0) {
1502                         pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1503                             bytes_needed;
1504
1505                         if (bytes_needed == 0)
1506                                 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1507                                                            pasync_ctx, cri);
1508                 }
1509         } else {
1510                 pasync_ctx->async_data.busy_entries--;
1511                 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1512                         list_add_tail(&pasync_handle->link,
1513                                       &pasync_ctx->async_entry[cri].wait_queue.
1514                                       list);
1515                         pasync_ctx->async_entry[cri].wait_queue.
1516                                 bytes_received +=
1517                                 (unsigned short)pasync_handle->buffer_len;
1518
1519                         if (pasync_ctx->async_entry[cri].wait_queue.
1520                             bytes_received >=
1521                             pasync_ctx->async_entry[cri].wait_queue.
1522                             bytes_needed)
1523                                 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1524                                                            pasync_ctx, cri);
1525                 }
1526         }
1527         return status;
1528 }
1529
1530 static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1531                                          struct beiscsi_hba *phba,
1532                                          struct i_t_dpdu_cqe *pdpdu_cqe)
1533 {
1534         struct hwi_controller *phwi_ctrlr;
1535         struct hwi_async_pdu_context *pasync_ctx;
1536         struct async_pdu_handle *pasync_handle = NULL;
1537         unsigned int cq_index = -1;
1538
1539         phwi_ctrlr = phba->phwi_ctrlr;
1540         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1541         pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1542                                              pdpdu_cqe, &cq_index);
1543
1544         if (pasync_handle->consumed == 0)
1545                 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1546                                            cq_index);
1547         hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
1548         hwi_post_async_buffers(phba, pasync_handle->is_header);
1549 }
1550
1551 static void  beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
1552 {
1553         struct be_queue_info *mcc_cq;
1554         struct  be_mcc_compl *mcc_compl;
1555         unsigned int num_processed = 0;
1556
1557         mcc_cq = &phba->ctrl.mcc_obj.cq;
1558         mcc_compl = queue_tail_node(mcc_cq);
1559         mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1560         while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
1561
1562                 if (num_processed >= 32) {
1563                         hwi_ring_cq_db(phba, mcc_cq->id,
1564                                         num_processed, 0, 0);
1565                         num_processed = 0;
1566                 }
1567                 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
1568                         /* Interpret flags as an async trailer */
1569                         if (is_link_state_evt(mcc_compl->flags))
1570                                 /* Interpret compl as a async link evt */
1571                                 beiscsi_async_link_state_process(phba,
1572                                 (struct be_async_event_link_state *) mcc_compl);
1573                         else
1574                                 SE_DEBUG(DBG_LVL_1,
1575                                         " Unsupported Async Event, flags"
1576                                         " = 0x%08x\n", mcc_compl->flags);
1577                 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
1578                         be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
1579                         atomic_dec(&phba->ctrl.mcc_obj.q.used);
1580                 }
1581
1582                 mcc_compl->flags = 0;
1583                 queue_tail_inc(mcc_cq);
1584                 mcc_compl = queue_tail_node(mcc_cq);
1585                 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1586                 num_processed++;
1587         }
1588
1589         if (num_processed > 0)
1590                 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0);
1591
1592 }
1593
1594 static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1595 {
1596         struct be_queue_info *cq;
1597         struct sol_cqe *sol;
1598         struct dmsg_cqe *dmsg;
1599         unsigned int num_processed = 0;
1600         unsigned int tot_nump = 0;
1601         struct beiscsi_conn *beiscsi_conn;
1602         struct beiscsi_endpoint *beiscsi_ep;
1603         struct iscsi_endpoint *ep;
1604         struct beiscsi_hba *phba;
1605
1606         cq = pbe_eq->cq;
1607         sol = queue_tail_node(cq);
1608         phba = pbe_eq->phba;
1609
1610         while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
1611                CQE_VALID_MASK) {
1612                 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1613
1614                 ep = phba->ep_array[(u32) ((sol->
1615                                    dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1616                                    SOL_CID_MASK) >> 6) -
1617                                    phba->fw_config.iscsi_cid_start];
1618
1619                 beiscsi_ep = ep->dd_data;
1620                 beiscsi_conn = beiscsi_ep->conn;
1621
1622                 if (num_processed >= 32) {
1623                         hwi_ring_cq_db(phba, cq->id,
1624                                         num_processed, 0, 0);
1625                         tot_nump += num_processed;
1626                         num_processed = 0;
1627                 }
1628
1629                 switch ((u32) sol->dw[offsetof(struct amap_sol_cqe, code) /
1630                         32] & CQE_CODE_MASK) {
1631                 case SOL_CMD_COMPLETE:
1632                         hwi_complete_cmd(beiscsi_conn, phba, sol);
1633                         break;
1634                 case DRIVERMSG_NOTIFY:
1635                         SE_DEBUG(DBG_LVL_8, "Received DRIVERMSG_NOTIFY\n");
1636                         dmsg = (struct dmsg_cqe *)sol;
1637                         hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
1638                         break;
1639                 case UNSOL_HDR_NOTIFY:
1640                         SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR_ NOTIFY\n");
1641                         hwi_process_default_pdu_ring(beiscsi_conn, phba,
1642                                              (struct i_t_dpdu_cqe *)sol);
1643                         break;
1644                 case UNSOL_DATA_NOTIFY:
1645                         SE_DEBUG(DBG_LVL_8, "Received UNSOL_DATA_NOTIFY\n");
1646                         hwi_process_default_pdu_ring(beiscsi_conn, phba,
1647                                              (struct i_t_dpdu_cqe *)sol);
1648                         break;
1649                 case CXN_INVALIDATE_INDEX_NOTIFY:
1650                 case CMD_INVALIDATED_NOTIFY:
1651                 case CXN_INVALIDATE_NOTIFY:
1652                         SE_DEBUG(DBG_LVL_1,
1653                                  "Ignoring CQ Error notification for cmd/cxn"
1654                                  "invalidate\n");
1655                         break;
1656                 case SOL_CMD_KILLED_DATA_DIGEST_ERR:
1657                 case CMD_KILLED_INVALID_STATSN_RCVD:
1658                 case CMD_KILLED_INVALID_R2T_RCVD:
1659                 case CMD_CXN_KILLED_LUN_INVALID:
1660                 case CMD_CXN_KILLED_ICD_INVALID:
1661                 case CMD_CXN_KILLED_ITT_INVALID:
1662                 case CMD_CXN_KILLED_SEQ_OUTOFORDER:
1663                 case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
1664                         SE_DEBUG(DBG_LVL_1,
1665                                  "CQ Error notification for cmd.. "
1666                                  "code %d cid 0x%x\n",
1667                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1668                                  32] & CQE_CODE_MASK,
1669                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1670                                  32] & SOL_CID_MASK));
1671                         break;
1672                 case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
1673                         SE_DEBUG(DBG_LVL_1,
1674                                  "Digest error on def pdu ring, dropping..\n");
1675                         hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
1676                                              (struct i_t_dpdu_cqe *) sol);
1677                         break;
1678                 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
1679                 case CXN_KILLED_BURST_LEN_MISMATCH:
1680                 case CXN_KILLED_AHS_RCVD:
1681                 case CXN_KILLED_HDR_DIGEST_ERR:
1682                 case CXN_KILLED_UNKNOWN_HDR:
1683                 case CXN_KILLED_STALE_ITT_TTT_RCVD:
1684                 case CXN_KILLED_INVALID_ITT_TTT_RCVD:
1685                 case CXN_KILLED_TIMED_OUT:
1686                 case CXN_KILLED_FIN_RCVD:
1687                 case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
1688                 case CXN_KILLED_BAD_WRB_INDEX_ERROR:
1689                 case CXN_KILLED_OVER_RUN_RESIDUAL:
1690                 case CXN_KILLED_UNDER_RUN_RESIDUAL:
1691                 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
1692                         SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
1693                                  "0x%x...\n",
1694                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1695                                  32] & CQE_CODE_MASK,
1696                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1697                                  32] & CQE_CID_MASK));
1698                         iscsi_conn_failure(beiscsi_conn->conn,
1699                                            ISCSI_ERR_CONN_FAILED);
1700                         break;
1701                 case CXN_KILLED_RST_SENT:
1702                 case CXN_KILLED_RST_RCVD:
1703                         SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
1704                                 "received/sent on CID 0x%x...\n",
1705                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1706                                  32] & CQE_CODE_MASK,
1707                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1708                                  32] & CQE_CID_MASK));
1709                         iscsi_conn_failure(beiscsi_conn->conn,
1710                                            ISCSI_ERR_CONN_FAILED);
1711                         break;
1712                 default:
1713                         SE_DEBUG(DBG_LVL_1, "CQ Error Invalid code= %d "
1714                                  "received on CID 0x%x...\n",
1715                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1716                                  32] & CQE_CODE_MASK,
1717                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1718                                  32] & CQE_CID_MASK));
1719                         break;
1720                 }
1721
1722                 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
1723                 queue_tail_inc(cq);
1724                 sol = queue_tail_node(cq);
1725                 num_processed++;
1726         }
1727
1728         if (num_processed > 0) {
1729                 tot_nump += num_processed;
1730                 hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
1731         }
1732         return tot_nump;
1733 }
1734
1735 void beiscsi_process_all_cqs(struct work_struct *work)
1736 {
1737         unsigned long flags;
1738         struct hwi_controller *phwi_ctrlr;
1739         struct hwi_context_memory *phwi_context;
1740         struct be_eq_obj *pbe_eq;
1741         struct beiscsi_hba *phba =
1742             container_of(work, struct beiscsi_hba, work_cqs);
1743
1744         phwi_ctrlr = phba->phwi_ctrlr;
1745         phwi_context = phwi_ctrlr->phwi_ctxt;
1746         if (phba->msix_enabled)
1747                 pbe_eq = &phwi_context->be_eq[phba->num_cpus];
1748         else
1749                 pbe_eq = &phwi_context->be_eq[0];
1750
1751         if (phba->todo_mcc_cq) {
1752                 spin_lock_irqsave(&phba->isr_lock, flags);
1753                 phba->todo_mcc_cq = 0;
1754                 spin_unlock_irqrestore(&phba->isr_lock, flags);
1755                 beiscsi_process_mcc_isr(phba);
1756         }
1757
1758         if (phba->todo_cq) {
1759                 spin_lock_irqsave(&phba->isr_lock, flags);
1760                 phba->todo_cq = 0;
1761                 spin_unlock_irqrestore(&phba->isr_lock, flags);
1762                 beiscsi_process_cq(pbe_eq);
1763         }
1764 }
1765
1766 static int be_iopoll(struct blk_iopoll *iop, int budget)
1767 {
1768         static unsigned int ret;
1769         struct beiscsi_hba *phba;
1770         struct be_eq_obj *pbe_eq;
1771
1772         pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
1773         ret = beiscsi_process_cq(pbe_eq);
1774         if (ret < budget) {
1775                 phba = pbe_eq->phba;
1776                 blk_iopoll_complete(iop);
1777                 SE_DEBUG(DBG_LVL_8, "rearm pbe_eq->q.id =%d\n", pbe_eq->q.id);
1778                 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
1779         }
1780         return ret;
1781 }
1782
1783 static void
1784 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
1785               unsigned int num_sg, struct beiscsi_io_task *io_task)
1786 {
1787         struct iscsi_sge *psgl;
1788         unsigned short sg_len, index;
1789         unsigned int sge_len = 0;
1790         unsigned long long addr;
1791         struct scatterlist *l_sg;
1792         unsigned int offset;
1793
1794         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1795                                       io_task->bhs_pa.u.a32.address_lo);
1796         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1797                                       io_task->bhs_pa.u.a32.address_hi);
1798
1799         l_sg = sg;
1800         for (index = 0; (index < num_sg) && (index < 2); index++,
1801                                                          sg = sg_next(sg)) {
1802                 if (index == 0) {
1803                         sg_len = sg_dma_len(sg);
1804                         addr = (u64) sg_dma_address(sg);
1805                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1806                                                 ((u32)(addr & 0xFFFFFFFF)));
1807                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1808                                                         ((u32)(addr >> 32)));
1809                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1810                                                         sg_len);
1811                         sge_len = sg_len;
1812                 } else {
1813                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
1814                                                         pwrb, sge_len);
1815                         sg_len = sg_dma_len(sg);
1816                         addr = (u64) sg_dma_address(sg);
1817                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
1818                                                 ((u32)(addr & 0xFFFFFFFF)));
1819                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
1820                                                         ((u32)(addr >> 32)));
1821                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
1822                                                         sg_len);
1823                 }
1824         }
1825         psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1826         memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
1827
1828         AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
1829
1830         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1831                         io_task->bhs_pa.u.a32.address_hi);
1832         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1833                         io_task->bhs_pa.u.a32.address_lo);
1834
1835         if (num_sg == 1) {
1836                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1837                                                                 1);
1838                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
1839                                                                 0);
1840         } else if (num_sg == 2) {
1841                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1842                                                                 0);
1843                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
1844                                                                 1);
1845         } else {
1846                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1847                                                                 0);
1848                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
1849                                                                 0);
1850         }
1851         sg = l_sg;
1852         psgl++;
1853         psgl++;
1854         offset = 0;
1855         for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
1856                 sg_len = sg_dma_len(sg);
1857                 addr = (u64) sg_dma_address(sg);
1858                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1859                                                 (addr & 0xFFFFFFFF));
1860                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1861                                                 (addr >> 32));
1862                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
1863                 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
1864                 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1865                 offset += sg_len;
1866         }
1867         psgl--;
1868         AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1869 }
1870
1871 static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
1872 {
1873         struct iscsi_sge *psgl;
1874         unsigned long long addr;
1875         struct beiscsi_io_task *io_task = task->dd_data;
1876         struct beiscsi_conn *beiscsi_conn = io_task->conn;
1877         struct beiscsi_hba *phba = beiscsi_conn->phba;
1878
1879         io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
1880         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1881                                 io_task->bhs_pa.u.a32.address_lo);
1882         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1883                                 io_task->bhs_pa.u.a32.address_hi);
1884
1885         if (task->data) {
1886                 if (task->data_count) {
1887                         AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
1888                         addr = (u64) pci_map_single(phba->pcidev,
1889                                                     task->data,
1890                                                     task->data_count, 1);
1891                 } else {
1892                         AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1893                         addr = 0;
1894                 }
1895                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1896                                                 ((u32)(addr & 0xFFFFFFFF)));
1897                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1898                                                 ((u32)(addr >> 32)));
1899                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1900                                                 task->data_count);
1901
1902                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
1903         } else {
1904                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1905                 addr = 0;
1906         }
1907
1908         psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1909
1910         AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
1911
1912         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1913                       io_task->bhs_pa.u.a32.address_hi);
1914         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1915                       io_task->bhs_pa.u.a32.address_lo);
1916         if (task->data) {
1917                 psgl++;
1918                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
1919                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
1920                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
1921                 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
1922                 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
1923                 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1924
1925                 psgl++;
1926                 if (task->data) {
1927                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1928                                                 ((u32)(addr & 0xFFFFFFFF)));
1929                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1930                                                 ((u32)(addr >> 32)));
1931                 }
1932                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
1933         }
1934         AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1935 }
1936
1937 static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
1938 {
1939         unsigned int num_cq_pages, num_async_pdu_buf_pages;
1940         unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
1941         unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
1942
1943         num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
1944                                       sizeof(struct sol_cqe));
1945         num_async_pdu_buf_pages =
1946                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1947                                        phba->params.defpdu_hdr_sz);
1948         num_async_pdu_buf_sgl_pages =
1949                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1950                                        sizeof(struct phys_addr));
1951         num_async_pdu_data_pages =
1952                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1953                                        phba->params.defpdu_data_sz);
1954         num_async_pdu_data_sgl_pages =
1955                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1956                                        sizeof(struct phys_addr));
1957
1958         phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
1959
1960         phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
1961                                                  BE_ISCSI_PDU_HEADER_SIZE;
1962         phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
1963                                             sizeof(struct hwi_context_memory);
1964
1965
1966         phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
1967             * (phba->params.wrbs_per_cxn)
1968             * phba->params.cxns_per_ctrl;
1969         wrb_sz_per_cxn =  sizeof(struct wrb_handle) *
1970                                  (phba->params.wrbs_per_cxn);
1971         phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
1972                                 phba->params.cxns_per_ctrl);
1973
1974         phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
1975                 phba->params.icds_per_ctrl;
1976         phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
1977                 phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
1978
1979         phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] =
1980                 num_async_pdu_buf_pages * PAGE_SIZE;
1981         phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] =
1982                 num_async_pdu_data_pages * PAGE_SIZE;
1983         phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] =
1984                 num_async_pdu_buf_sgl_pages * PAGE_SIZE;
1985         phba->mem_req[HWI_MEM_ASYNC_DATA_RING] =
1986                 num_async_pdu_data_sgl_pages * PAGE_SIZE;
1987         phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] =
1988                 phba->params.asyncpdus_per_ctrl *
1989                 sizeof(struct async_pdu_handle);
1990         phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] =
1991                 phba->params.asyncpdus_per_ctrl *
1992                 sizeof(struct async_pdu_handle);
1993         phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] =
1994                 sizeof(struct hwi_async_pdu_context) +
1995                 (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
1996 }
1997
1998 static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
1999 {
2000         struct be_mem_descriptor *mem_descr;
2001         dma_addr_t bus_add;
2002         struct mem_array *mem_arr, *mem_arr_orig;
2003         unsigned int i, j, alloc_size, curr_alloc_size;
2004
2005         phba->phwi_ctrlr = kmalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
2006         if (!phba->phwi_ctrlr)
2007                 return -ENOMEM;
2008
2009         phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
2010                                  GFP_KERNEL);
2011         if (!phba->init_mem) {
2012                 kfree(phba->phwi_ctrlr);
2013                 return -ENOMEM;
2014         }
2015
2016         mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
2017                                GFP_KERNEL);
2018         if (!mem_arr_orig) {
2019                 kfree(phba->init_mem);
2020                 kfree(phba->phwi_ctrlr);
2021                 return -ENOMEM;
2022         }
2023
2024         mem_descr = phba->init_mem;
2025         for (i = 0; i < SE_MEM_MAX; i++) {
2026                 j = 0;
2027                 mem_arr = mem_arr_orig;
2028                 alloc_size = phba->mem_req[i];
2029                 memset(mem_arr, 0, sizeof(struct mem_array) *
2030                        BEISCSI_MAX_FRAGS_INIT);
2031                 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
2032                 do {
2033                         mem_arr->virtual_address = pci_alloc_consistent(
2034                                                         phba->pcidev,
2035                                                         curr_alloc_size,
2036                                                         &bus_add);
2037                         if (!mem_arr->virtual_address) {
2038                                 if (curr_alloc_size <= BE_MIN_MEM_SIZE)
2039                                         goto free_mem;
2040                                 if (curr_alloc_size -
2041                                         rounddown_pow_of_two(curr_alloc_size))
2042                                         curr_alloc_size = rounddown_pow_of_two
2043                                                              (curr_alloc_size);
2044                                 else
2045                                         curr_alloc_size = curr_alloc_size / 2;
2046                         } else {
2047                                 mem_arr->bus_address.u.
2048                                     a64.address = (__u64) bus_add;
2049                                 mem_arr->size = curr_alloc_size;
2050                                 alloc_size -= curr_alloc_size;
2051                                 curr_alloc_size = min(be_max_phys_size *
2052                                                       1024, alloc_size);
2053                                 j++;
2054                                 mem_arr++;
2055                         }
2056                 } while (alloc_size);
2057                 mem_descr->num_elements = j;
2058                 mem_descr->size_in_bytes = phba->mem_req[i];
2059                 mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
2060                                                GFP_KERNEL);
2061                 if (!mem_descr->mem_array)
2062                         goto free_mem;
2063
2064                 memcpy(mem_descr->mem_array, mem_arr_orig,
2065                        sizeof(struct mem_array) * j);
2066                 mem_descr++;
2067         }
2068         kfree(mem_arr_orig);
2069         return 0;
2070 free_mem:
2071         mem_descr->num_elements = j;
2072         while ((i) || (j)) {
2073                 for (j = mem_descr->num_elements; j > 0; j--) {
2074                         pci_free_consistent(phba->pcidev,
2075                                             mem_descr->mem_array[j - 1].size,
2076                                             mem_descr->mem_array[j - 1].
2077                                             virtual_address,
2078                                             (unsigned long)mem_descr->
2079                                             mem_array[j - 1].
2080                                             bus_address.u.a64.address);
2081                 }
2082                 if (i) {
2083                         i--;
2084                         kfree(mem_descr->mem_array);
2085                         mem_descr--;
2086                 }
2087         }
2088         kfree(mem_arr_orig);
2089         kfree(phba->init_mem);
2090         kfree(phba->phwi_ctrlr);
2091         return -ENOMEM;
2092 }
2093
2094 static int beiscsi_get_memory(struct beiscsi_hba *phba)
2095 {
2096         beiscsi_find_mem_req(phba);
2097         return beiscsi_alloc_mem(phba);
2098 }
2099
2100 static void iscsi_init_global_templates(struct beiscsi_hba *phba)
2101 {
2102         struct pdu_data_out *pdata_out;
2103         struct pdu_nop_out *pnop_out;
2104         struct be_mem_descriptor *mem_descr;
2105
2106         mem_descr = phba->init_mem;
2107         mem_descr += ISCSI_MEM_GLOBAL_HEADER;
2108         pdata_out =
2109             (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
2110         memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2111
2112         AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
2113                       IIOC_SCSI_DATA);
2114
2115         pnop_out =
2116             (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
2117                                    virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
2118
2119         memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2120         AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
2121         AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
2122         AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
2123 }
2124
2125 static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
2126 {
2127         struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
2128         struct wrb_handle *pwrb_handle;
2129         struct hwi_controller *phwi_ctrlr;
2130         struct hwi_wrb_context *pwrb_context;
2131         struct iscsi_wrb *pwrb;
2132         unsigned int num_cxn_wrbh;
2133         unsigned int num_cxn_wrb, j, idx, index;
2134
2135         mem_descr_wrbh = phba->init_mem;
2136         mem_descr_wrbh += HWI_MEM_WRBH;
2137
2138         mem_descr_wrb = phba->init_mem;
2139         mem_descr_wrb += HWI_MEM_WRB;
2140
2141         idx = 0;
2142         pwrb_handle = mem_descr_wrbh->mem_array[idx].virtual_address;
2143         num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
2144                         ((sizeof(struct wrb_handle)) *
2145                          phba->params.wrbs_per_cxn));
2146         phwi_ctrlr = phba->phwi_ctrlr;
2147
2148         for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2149                 pwrb_context = &phwi_ctrlr->wrb_context[index];
2150                 pwrb_context->pwrb_handle_base =
2151                                 kzalloc(sizeof(struct wrb_handle *) *
2152                                         phba->params.wrbs_per_cxn, GFP_KERNEL);
2153                 pwrb_context->pwrb_handle_basestd =
2154                                 kzalloc(sizeof(struct wrb_handle *) *
2155                                         phba->params.wrbs_per_cxn, GFP_KERNEL);
2156                 if (num_cxn_wrbh) {
2157                         pwrb_context->alloc_index = 0;
2158                         pwrb_context->wrb_handles_available = 0;
2159                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2160                                 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2161                                 pwrb_context->pwrb_handle_basestd[j] =
2162                                                                 pwrb_handle;
2163                                 pwrb_context->wrb_handles_available++;
2164                                 pwrb_handle->wrb_index = j;
2165                                 pwrb_handle++;
2166                         }
2167                         pwrb_context->free_index = 0;
2168                         num_cxn_wrbh--;
2169                 } else {
2170                         idx++;
2171                         pwrb_handle =
2172                             mem_descr_wrbh->mem_array[idx].virtual_address;
2173                         num_cxn_wrbh =
2174                             ((mem_descr_wrbh->mem_array[idx].size) /
2175                              ((sizeof(struct wrb_handle)) *
2176                               phba->params.wrbs_per_cxn));
2177                         pwrb_context->alloc_index = 0;
2178                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2179                                 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2180                                 pwrb_context->pwrb_handle_basestd[j] =
2181                                     pwrb_handle;
2182                                 pwrb_context->wrb_handles_available++;
2183                                 pwrb_handle->wrb_index = j;
2184                                 pwrb_handle++;
2185                         }
2186                         pwrb_context->free_index = 0;
2187                         num_cxn_wrbh--;
2188                 }
2189         }
2190         idx = 0;
2191         pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2192         num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2193                       ((sizeof(struct iscsi_wrb) *
2194                         phba->params.wrbs_per_cxn));
2195         for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2196                 pwrb_context = &phwi_ctrlr->wrb_context[index];
2197                 if (num_cxn_wrb) {
2198                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2199                                 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2200                                 pwrb_handle->pwrb = pwrb;
2201                                 pwrb++;
2202                         }
2203                         num_cxn_wrb--;
2204                 } else {
2205                         idx++;
2206                         pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2207                         num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2208                                       ((sizeof(struct iscsi_wrb) *
2209                                         phba->params.wrbs_per_cxn));
2210                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2211                                 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2212                                 pwrb_handle->pwrb = pwrb;
2213                                 pwrb++;
2214                         }
2215                         num_cxn_wrb--;
2216                 }
2217         }
2218 }
2219
2220 static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2221 {
2222         struct hwi_controller *phwi_ctrlr;
2223         struct hba_parameters *p = &phba->params;
2224         struct hwi_async_pdu_context *pasync_ctx;
2225         struct async_pdu_handle *pasync_header_h, *pasync_data_h;
2226         unsigned int index;
2227         struct be_mem_descriptor *mem_descr;
2228
2229         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2230         mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
2231
2232         phwi_ctrlr = phba->phwi_ctrlr;
2233         phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
2234                                 mem_descr->mem_array[0].virtual_address;
2235         pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
2236         memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2237
2238         pasync_ctx->async_header.num_entries = p->asyncpdus_per_ctrl;
2239         pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz;
2240         pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
2241         pasync_ctx->async_data.num_entries = p->asyncpdus_per_ctrl;
2242
2243         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2244         mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
2245         if (mem_descr->mem_array[0].virtual_address) {
2246                 SE_DEBUG(DBG_LVL_8,
2247                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF"
2248                          "va=%p\n", mem_descr->mem_array[0].virtual_address);
2249         } else
2250                 shost_printk(KERN_WARNING, phba->shost,
2251                              "No Virtual address\n");
2252
2253         pasync_ctx->async_header.va_base =
2254                         mem_descr->mem_array[0].virtual_address;
2255
2256         pasync_ctx->async_header.pa_base.u.a64.address =
2257                         mem_descr->mem_array[0].bus_address.u.a64.address;
2258
2259         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2260         mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2261         if (mem_descr->mem_array[0].virtual_address) {
2262                 SE_DEBUG(DBG_LVL_8,
2263                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING"
2264                          "va=%p\n", mem_descr->mem_array[0].virtual_address);
2265         } else
2266                 shost_printk(KERN_WARNING, phba->shost,
2267                             "No Virtual address\n");
2268         pasync_ctx->async_header.ring_base =
2269                         mem_descr->mem_array[0].virtual_address;
2270
2271         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2272         mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
2273         if (mem_descr->mem_array[0].virtual_address) {
2274                 SE_DEBUG(DBG_LVL_8,
2275                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE"
2276                          "va=%p\n", mem_descr->mem_array[0].virtual_address);
2277         } else
2278                 shost_printk(KERN_WARNING, phba->shost,
2279                             "No Virtual address\n");
2280
2281         pasync_ctx->async_header.handle_base =
2282                         mem_descr->mem_array[0].virtual_address;
2283         pasync_ctx->async_header.writables = 0;
2284         INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
2285
2286         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2287         mem_descr += HWI_MEM_ASYNC_DATA_BUF;
2288         if (mem_descr->mem_array[0].virtual_address) {
2289                 SE_DEBUG(DBG_LVL_8,
2290                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
2291                          "va=%p\n", mem_descr->mem_array[0].virtual_address);
2292         } else
2293                 shost_printk(KERN_WARNING, phba->shost,
2294                             "No Virtual address\n");
2295         pasync_ctx->async_data.va_base =
2296                         mem_descr->mem_array[0].virtual_address;
2297         pasync_ctx->async_data.pa_base.u.a64.address =
2298                         mem_descr->mem_array[0].bus_address.u.a64.address;
2299
2300         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2301         mem_descr += HWI_MEM_ASYNC_DATA_RING;
2302         if (mem_descr->mem_array[0].virtual_address) {
2303                 SE_DEBUG(DBG_LVL_8,
2304                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING"
2305                          "va=%p\n", mem_descr->mem_array[0].virtual_address);
2306         } else
2307                 shost_printk(KERN_WARNING, phba->shost,
2308                              "No Virtual address\n");
2309
2310         pasync_ctx->async_data.ring_base =
2311                         mem_descr->mem_array[0].virtual_address;
2312
2313         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2314         mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
2315         if (!mem_descr->mem_array[0].virtual_address)
2316                 shost_printk(KERN_WARNING, phba->shost,
2317                             "No Virtual address\n");
2318
2319         pasync_ctx->async_data.handle_base =
2320                         mem_descr->mem_array[0].virtual_address;
2321         pasync_ctx->async_data.writables = 0;
2322         INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
2323
2324         pasync_header_h =
2325                 (struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
2326         pasync_data_h =
2327                 (struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
2328
2329         for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
2330                 pasync_header_h->cri = -1;
2331                 pasync_header_h->index = (char)index;
2332                 INIT_LIST_HEAD(&pasync_header_h->link);
2333                 pasync_header_h->pbuffer =
2334                         (void *)((unsigned long)
2335                         (pasync_ctx->async_header.va_base) +
2336                         (p->defpdu_hdr_sz * index));
2337
2338                 pasync_header_h->pa.u.a64.address =
2339                         pasync_ctx->async_header.pa_base.u.a64.address +
2340                         (p->defpdu_hdr_sz * index);
2341
2342                 list_add_tail(&pasync_header_h->link,
2343                                 &pasync_ctx->async_header.free_list);
2344                 pasync_header_h++;
2345                 pasync_ctx->async_header.free_entries++;
2346                 pasync_ctx->async_header.writables++;
2347
2348                 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
2349                 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
2350                                header_busy_list);
2351                 pasync_data_h->cri = -1;
2352                 pasync_data_h->index = (char)index;
2353                 INIT_LIST_HEAD(&pasync_data_h->link);
2354                 pasync_data_h->pbuffer =
2355                         (void *)((unsigned long)
2356                         (pasync_ctx->async_data.va_base) +
2357                         (p->defpdu_data_sz * index));
2358
2359                 pasync_data_h->pa.u.a64.address =
2360                     pasync_ctx->async_data.pa_base.u.a64.address +
2361                     (p->defpdu_data_sz * index);
2362
2363                 list_add_tail(&pasync_data_h->link,
2364                               &pasync_ctx->async_data.free_list);
2365                 pasync_data_h++;
2366                 pasync_ctx->async_data.free_entries++;
2367                 pasync_ctx->async_data.writables++;
2368
2369                 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
2370         }
2371
2372         pasync_ctx->async_header.host_write_ptr = 0;
2373         pasync_ctx->async_header.ep_read_ptr = -1;
2374         pasync_ctx->async_data.host_write_ptr = 0;
2375         pasync_ctx->async_data.ep_read_ptr = -1;
2376 }
2377
2378 static int
2379 be_sgl_create_contiguous(void *virtual_address,
2380                          u64 physical_address, u32 length,
2381                          struct be_dma_mem *sgl)
2382 {
2383         WARN_ON(!virtual_address);
2384         WARN_ON(!physical_address);
2385         WARN_ON(!length > 0);
2386         WARN_ON(!sgl);
2387
2388         sgl->va = virtual_address;
2389         sgl->dma = (unsigned long)physical_address;
2390         sgl->size = length;
2391
2392         return 0;
2393 }
2394
2395 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
2396 {
2397         memset(sgl, 0, sizeof(*sgl));
2398 }
2399
2400 static void
2401 hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
2402                      struct mem_array *pmem, struct be_dma_mem *sgl)
2403 {
2404         if (sgl->va)
2405                 be_sgl_destroy_contiguous(sgl);
2406
2407         be_sgl_create_contiguous(pmem->virtual_address,
2408                                  pmem->bus_address.u.a64.address,
2409                                  pmem->size, sgl);
2410 }
2411
2412 static void
2413 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
2414                            struct mem_array *pmem, struct be_dma_mem *sgl)
2415 {
2416         if (sgl->va)
2417                 be_sgl_destroy_contiguous(sgl);
2418
2419         be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
2420                                  pmem->bus_address.u.a64.address,
2421                                  pmem->size, sgl);
2422 }
2423
2424 static int be_fill_queue(struct be_queue_info *q,
2425                 u16 len, u16 entry_size, void *vaddress)
2426 {
2427         struct be_dma_mem *mem = &q->dma_mem;
2428
2429         memset(q, 0, sizeof(*q));
2430         q->len = len;
2431         q->entry_size = entry_size;
2432         mem->size = len * entry_size;
2433         mem->va = vaddress;
2434         if (!mem->va)
2435                 return -ENOMEM;
2436         memset(mem->va, 0, mem->size);
2437         return 0;
2438 }
2439
2440 static int beiscsi_create_eqs(struct beiscsi_hba *phba,
2441                              struct hwi_context_memory *phwi_context)
2442 {
2443         unsigned int i, num_eq_pages;
2444         int ret, eq_for_mcc;
2445         struct be_queue_info *eq;
2446         struct be_dma_mem *mem;
2447         void *eq_vaddress;
2448         dma_addr_t paddr;
2449
2450         num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
2451                                       sizeof(struct be_eq_entry));
2452
2453         if (phba->msix_enabled)
2454                 eq_for_mcc = 1;
2455         else
2456                 eq_for_mcc = 0;
2457         for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
2458                 eq = &phwi_context->be_eq[i].q;
2459                 mem = &eq->dma_mem;
2460                 phwi_context->be_eq[i].phba = phba;
2461                 eq_vaddress = pci_alloc_consistent(phba->pcidev,
2462                                                      num_eq_pages * PAGE_SIZE,
2463                                                      &paddr);
2464                 if (!eq_vaddress)
2465                         goto create_eq_error;
2466
2467                 mem->va = eq_vaddress;
2468                 ret = be_fill_queue(eq, phba->params.num_eq_entries,
2469                                     sizeof(struct be_eq_entry), eq_vaddress);
2470                 if (ret) {
2471                         shost_printk(KERN_ERR, phba->shost,
2472                                      "be_fill_queue Failed for EQ\n");
2473                         goto create_eq_error;
2474                 }
2475
2476                 mem->dma = paddr;
2477                 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
2478                                             phwi_context->cur_eqd);
2479                 if (ret) {
2480                         shost_printk(KERN_ERR, phba->shost,
2481                                      "beiscsi_cmd_eq_create"
2482                                      "Failedfor EQ\n");
2483                         goto create_eq_error;
2484                 }
2485                 SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id);
2486         }
2487         return 0;
2488 create_eq_error:
2489         for (i = 0; i < (phba->num_cpus + 1); i++) {
2490                 eq = &phwi_context->be_eq[i].q;
2491                 mem = &eq->dma_mem;
2492                 if (mem->va)
2493                         pci_free_consistent(phba->pcidev, num_eq_pages
2494                                             * PAGE_SIZE,
2495                                             mem->va, mem->dma);
2496         }
2497         return ret;
2498 }
2499
2500 static int beiscsi_create_cqs(struct beiscsi_hba *phba,
2501                              struct hwi_context_memory *phwi_context)
2502 {
2503         unsigned int i, num_cq_pages;
2504         int ret;
2505         struct be_queue_info *cq, *eq;
2506         struct be_dma_mem *mem;
2507         struct be_eq_obj *pbe_eq;
2508         void *cq_vaddress;
2509         dma_addr_t paddr;
2510
2511         num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2512                                       sizeof(struct sol_cqe));
2513
2514         for (i = 0; i < phba->num_cpus; i++) {
2515                 cq = &phwi_context->be_cq[i];
2516                 eq = &phwi_context->be_eq[i].q;
2517                 pbe_eq = &phwi_context->be_eq[i];
2518                 pbe_eq->cq = cq;
2519                 pbe_eq->phba = phba;
2520                 mem = &cq->dma_mem;
2521                 cq_vaddress = pci_alloc_consistent(phba->pcidev,
2522                                                      num_cq_pages * PAGE_SIZE,
2523                                                      &paddr);
2524                 if (!cq_vaddress)
2525                         goto create_cq_error;
2526                 ret = be_fill_queue(cq, phba->params.num_cq_entries,
2527                                     sizeof(struct sol_cqe), cq_vaddress);
2528                 if (ret) {
2529                         shost_printk(KERN_ERR, phba->shost,
2530                                      "be_fill_queue Failed for ISCSI CQ\n");
2531                         goto create_cq_error;
2532                 }
2533
2534                 mem->dma = paddr;
2535                 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
2536                                             false, 0);
2537                 if (ret) {
2538                         shost_printk(KERN_ERR, phba->shost,
2539                                      "beiscsi_cmd_eq_create"
2540                                      "Failed for ISCSI CQ\n");
2541                         goto create_cq_error;
2542                 }
2543                 SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n",
2544                                                  cq->id, eq->id);
2545                 SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n");
2546         }
2547         return 0;
2548
2549 create_cq_error:
2550         for (i = 0; i < phba->num_cpus; i++) {
2551                 cq = &phwi_context->be_cq[i];
2552                 mem = &cq->dma_mem;
2553                 if (mem->va)
2554                         pci_free_consistent(phba->pcidev, num_cq_pages
2555                                             * PAGE_SIZE,
2556                                             mem->va, mem->dma);
2557         }
2558         return ret;
2559
2560 }
2561
2562 static int
2563 beiscsi_create_def_hdr(struct beiscsi_hba *phba,
2564                        struct hwi_context_memory *phwi_context,
2565                        struct hwi_controller *phwi_ctrlr,
2566                        unsigned int def_pdu_ring_sz)
2567 {
2568         unsigned int idx;
2569         int ret;
2570         struct be_queue_info *dq, *cq;
2571         struct be_dma_mem *mem;
2572         struct be_mem_descriptor *mem_descr;
2573         void *dq_vaddress;
2574
2575         idx = 0;
2576         dq = &phwi_context->be_def_hdrq;
2577         cq = &phwi_context->be_cq[0];
2578         mem = &dq->dma_mem;
2579         mem_descr = phba->init_mem;
2580         mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2581         dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2582         ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
2583                             sizeof(struct phys_addr),
2584                             sizeof(struct phys_addr), dq_vaddress);
2585         if (ret) {
2586                 shost_printk(KERN_ERR, phba->shost,
2587                              "be_fill_queue Failed for DEF PDU HDR\n");
2588                 return ret;
2589         }
2590         mem->dma = (unsigned long)mem_descr->mem_array[idx].
2591                                   bus_address.u.a64.address;
2592         ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
2593                                               def_pdu_ring_sz,
2594                                               phba->params.defpdu_hdr_sz);
2595         if (ret) {
2596                 shost_printk(KERN_ERR, phba->shost,
2597                              "be_cmd_create_default_pdu_queue Failed DEFHDR\n");
2598                 return ret;
2599         }
2600         phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
2601         SE_DEBUG(DBG_LVL_8, "iscsi def pdu id is %d\n",
2602                  phwi_context->be_def_hdrq.id);
2603         hwi_post_async_buffers(phba, 1);
2604         return 0;
2605 }
2606
2607 static int
2608 beiscsi_create_def_data(struct beiscsi_hba *phba,
2609                         struct hwi_context_memory *phwi_context,
2610                         struct hwi_controller *phwi_ctrlr,
2611                         unsigned int def_pdu_ring_sz)
2612 {
2613         unsigned int idx;
2614         int ret;
2615         struct be_queue_info *dataq, *cq;
2616         struct be_dma_mem *mem;
2617         struct be_mem_descriptor *mem_descr;
2618         void *dq_vaddress;
2619
2620         idx = 0;
2621         dataq = &phwi_context->be_def_dataq;
2622         cq = &phwi_context->be_cq[0];
2623         mem = &dataq->dma_mem;
2624         mem_descr = phba->init_mem;
2625         mem_descr += HWI_MEM_ASYNC_DATA_RING;
2626         dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2627         ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
2628                             sizeof(struct phys_addr),
2629                             sizeof(struct phys_addr), dq_vaddress);
2630         if (ret) {
2631                 shost_printk(KERN_ERR, phba->shost,
2632                              "be_fill_queue Failed for DEF PDU DATA\n");
2633                 return ret;
2634         }
2635         mem->dma = (unsigned long)mem_descr->mem_array[idx].
2636                                   bus_address.u.a64.address;
2637         ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
2638                                               def_pdu_ring_sz,
2639                                               phba->params.defpdu_data_sz);
2640         if (ret) {
2641                 shost_printk(KERN_ERR, phba->shost,
2642                              "be_cmd_create_default_pdu_queue Failed"
2643                              " for DEF PDU DATA\n");
2644                 return ret;
2645         }
2646         phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
2647         SE_DEBUG(DBG_LVL_8, "iscsi def data id is %d\n",
2648                  phwi_context->be_def_dataq.id);
2649         hwi_post_async_buffers(phba, 0);
2650         SE_DEBUG(DBG_LVL_8, "DEFAULT PDU DATA RING CREATED\n");
2651         return 0;
2652 }
2653
2654 static int
2655 beiscsi_post_pages(struct beiscsi_hba *phba)
2656 {
2657         struct be_mem_descriptor *mem_descr;
2658         struct mem_array *pm_arr;
2659         unsigned int page_offset, i;
2660         struct be_dma_mem sgl;
2661         int status;
2662
2663         mem_descr = phba->init_mem;
2664         mem_descr += HWI_MEM_SGE;
2665         pm_arr = mem_descr->mem_array;
2666
2667         page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
2668                         phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
2669         for (i = 0; i < mem_descr->num_elements; i++) {
2670                 hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
2671                 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
2672                                                 page_offset,
2673                                                 (pm_arr->size / PAGE_SIZE));
2674                 page_offset += pm_arr->size / PAGE_SIZE;
2675                 if (status != 0) {
2676                         shost_printk(KERN_ERR, phba->shost,
2677                                      "post sgl failed.\n");
2678                         return status;
2679                 }
2680                 pm_arr++;
2681         }
2682         SE_DEBUG(DBG_LVL_8, "POSTED PAGES\n");
2683         return 0;
2684 }
2685
2686 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
2687 {
2688         struct be_dma_mem *mem = &q->dma_mem;
2689         if (mem->va)
2690                 pci_free_consistent(phba->pcidev, mem->size,
2691                         mem->va, mem->dma);
2692 }
2693
2694 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
2695                 u16 len, u16 entry_size)
2696 {
2697         struct be_dma_mem *mem = &q->dma_mem;
2698
2699         memset(q, 0, sizeof(*q));
2700         q->len = len;
2701         q->entry_size = entry_size;
2702         mem->size = len * entry_size;
2703         mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
2704         if (!mem->va)
2705                 return -ENOMEM;
2706         memset(mem->va, 0, mem->size);
2707         return 0;
2708 }
2709
2710 static int
2711 beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
2712                          struct hwi_context_memory *phwi_context,
2713                          struct hwi_controller *phwi_ctrlr)
2714 {
2715         unsigned int wrb_mem_index, offset, size, num_wrb_rings;
2716         u64 pa_addr_lo;
2717         unsigned int idx, num, i;
2718         struct mem_array *pwrb_arr;
2719         void *wrb_vaddr;
2720         struct be_dma_mem sgl;
2721         struct be_mem_descriptor *mem_descr;
2722         int status;
2723
2724         idx = 0;
2725         mem_descr = phba->init_mem;
2726         mem_descr += HWI_MEM_WRB;
2727         pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
2728                            GFP_KERNEL);
2729         if (!pwrb_arr) {
2730                 shost_printk(KERN_ERR, phba->shost,
2731                              "Memory alloc failed in create wrb ring.\n");
2732                 return -ENOMEM;
2733         }
2734         wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2735         pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
2736         num_wrb_rings = mem_descr->mem_array[idx].size /
2737                 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
2738
2739         for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
2740                 if (num_wrb_rings) {
2741                         pwrb_arr[num].virtual_address = wrb_vaddr;
2742                         pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
2743                         pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2744                                             sizeof(struct iscsi_wrb);
2745                         wrb_vaddr += pwrb_arr[num].size;
2746                         pa_addr_lo += pwrb_arr[num].size;
2747                         num_wrb_rings--;
2748                 } else {
2749                         idx++;
2750                         wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2751                         pa_addr_lo = mem_descr->mem_array[idx].\
2752                                         bus_address.u.a64.address;
2753                         num_wrb_rings = mem_descr->mem_array[idx].size /
2754                                         (phba->params.wrbs_per_cxn *
2755                                         sizeof(struct iscsi_wrb));
2756                         pwrb_arr[num].virtual_address = wrb_vaddr;
2757                         pwrb_arr[num].bus_address.u.a64.address\
2758                                                 = pa_addr_lo;
2759                         pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2760                                                  sizeof(struct iscsi_wrb);
2761                         wrb_vaddr += pwrb_arr[num].size;
2762                         pa_addr_lo   += pwrb_arr[num].size;
2763                         num_wrb_rings--;
2764                 }
2765         }
2766         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2767                 wrb_mem_index = 0;
2768                 offset = 0;
2769                 size = 0;
2770
2771                 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
2772                 status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
2773                                             &phwi_context->be_wrbq[i]);
2774                 if (status != 0) {
2775                         shost_printk(KERN_ERR, phba->shost,
2776                                      "wrbq create failed.");
2777                         kfree(pwrb_arr);
2778                         return status;
2779                 }
2780                 phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
2781                                                                    id;
2782         }
2783         kfree(pwrb_arr);
2784         return 0;
2785 }
2786
2787 static void free_wrb_handles(struct beiscsi_hba *phba)
2788 {
2789         unsigned int index;
2790         struct hwi_controller *phwi_ctrlr;
2791         struct hwi_wrb_context *pwrb_context;
2792
2793         phwi_ctrlr = phba->phwi_ctrlr;
2794         for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2795                 pwrb_context = &phwi_ctrlr->wrb_context[index];
2796                 kfree(pwrb_context->pwrb_handle_base);
2797                 kfree(pwrb_context->pwrb_handle_basestd);
2798         }
2799 }
2800
2801 static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
2802 {
2803         struct be_queue_info *q;
2804         struct be_ctrl_info *ctrl = &phba->ctrl;
2805
2806         q = &phba->ctrl.mcc_obj.q;
2807         if (q->created)
2808                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
2809         be_queue_free(phba, q);
2810
2811         q = &phba->ctrl.mcc_obj.cq;
2812         if (q->created)
2813                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2814         be_queue_free(phba, q);
2815 }
2816
2817 static void hwi_cleanup(struct beiscsi_hba *phba)
2818 {
2819         struct be_queue_info *q;
2820         struct be_ctrl_info *ctrl = &phba->ctrl;
2821         struct hwi_controller *phwi_ctrlr;
2822         struct hwi_context_memory *phwi_context;
2823         int i, eq_num;
2824
2825         phwi_ctrlr = phba->phwi_ctrlr;
2826         phwi_context = phwi_ctrlr->phwi_ctxt;
2827         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2828                 q = &phwi_context->be_wrbq[i];
2829                 if (q->created)
2830                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
2831         }
2832         free_wrb_handles(phba);
2833
2834         q = &phwi_context->be_def_hdrq;
2835         if (q->created)
2836                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2837
2838         q = &phwi_context->be_def_dataq;
2839         if (q->created)
2840                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2841
2842         beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
2843
2844         for (i = 0; i < (phba->num_cpus); i++) {
2845                 q = &phwi_context->be_cq[i];
2846                 if (q->created)
2847                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2848         }
2849         if (phba->msix_enabled)
2850                 eq_num = 1;
2851         else
2852                 eq_num = 0;
2853         for (i = 0; i < (phba->num_cpus + eq_num); i++) {
2854                 q = &phwi_context->be_eq[i].q;
2855                 if (q->created)
2856                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
2857         }
2858         be_mcc_queues_destroy(phba);
2859 }
2860
2861 static int be_mcc_queues_create(struct beiscsi_hba *phba,
2862                                 struct hwi_context_memory *phwi_context)
2863 {
2864         struct be_queue_info *q, *cq;
2865         struct be_ctrl_info *ctrl = &phba->ctrl;
2866
2867         /* Alloc MCC compl queue */
2868         cq = &phba->ctrl.mcc_obj.cq;
2869         if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
2870                         sizeof(struct be_mcc_compl)))
2871                 goto err;
2872         /* Ask BE to create MCC compl queue; */
2873         if (phba->msix_enabled) {
2874                 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
2875                                          [phba->num_cpus].q, false, true, 0))
2876                 goto mcc_cq_free;
2877         } else {
2878                 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
2879                                           false, true, 0))
2880                 goto mcc_cq_free;
2881         }
2882
2883         /* Alloc MCC queue */
2884         q = &phba->ctrl.mcc_obj.q;
2885         if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2886                 goto mcc_cq_destroy;
2887
2888         /* Ask BE to create MCC queue */
2889         if (beiscsi_cmd_mccq_create(phba, q, cq))
2890                 goto mcc_q_free;
2891
2892         return 0;
2893
2894 mcc_q_free:
2895         be_queue_free(phba, q);
2896 mcc_cq_destroy:
2897         beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
2898 mcc_cq_free:
2899         be_queue_free(phba, cq);
2900 err:
2901         return -ENOMEM;
2902 }
2903
2904 static int find_num_cpus(void)
2905 {
2906         int  num_cpus = 0;
2907
2908         num_cpus = num_online_cpus();
2909         if (num_cpus >= MAX_CPUS)
2910                 num_cpus = MAX_CPUS - 1;
2911
2912         SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", num_cpus);
2913         return num_cpus;
2914 }
2915
2916 static int hwi_init_port(struct beiscsi_hba *phba)
2917 {
2918         struct hwi_controller *phwi_ctrlr;
2919         struct hwi_context_memory *phwi_context;
2920         unsigned int def_pdu_ring_sz;
2921         struct be_ctrl_info *ctrl = &phba->ctrl;
2922         int status;
2923
2924         def_pdu_ring_sz =
2925                 phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
2926         phwi_ctrlr = phba->phwi_ctrlr;
2927         phwi_context = phwi_ctrlr->phwi_ctxt;
2928         phwi_context->max_eqd = 0;
2929         phwi_context->min_eqd = 0;
2930         phwi_context->cur_eqd = 64;
2931         be_cmd_fw_initialize(&phba->ctrl);
2932
2933         status = beiscsi_create_eqs(phba, phwi_context);
2934         if (status != 0) {
2935                 shost_printk(KERN_ERR, phba->shost, "EQ not created\n");
2936                 goto error;
2937         }
2938
2939         status = be_mcc_queues_create(phba, phwi_context);
2940         if (status != 0)
2941                 goto error;
2942
2943         status = mgmt_check_supported_fw(ctrl, phba);
2944         if (status != 0) {
2945                 shost_printk(KERN_ERR, phba->shost,
2946                              "Unsupported fw version\n");
2947                 goto error;
2948         }
2949
2950         status = beiscsi_create_cqs(phba, phwi_context);
2951         if (status != 0) {
2952                 shost_printk(KERN_ERR, phba->shost, "CQ not created\n");
2953                 goto error;
2954         }
2955
2956         status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
2957                                         def_pdu_ring_sz);
2958         if (status != 0) {
2959                 shost_printk(KERN_ERR, phba->shost,
2960                              "Default Header not created\n");
2961                 goto error;
2962         }
2963
2964         status = beiscsi_create_def_data(phba, phwi_context,
2965                                          phwi_ctrlr, def_pdu_ring_sz);
2966         if (status != 0) {
2967                 shost_printk(KERN_ERR, phba->shost,
2968                              "Default Data not created\n");
2969                 goto error;
2970         }
2971
2972         status = beiscsi_post_pages(phba);
2973         if (status != 0) {
2974                 shost_printk(KERN_ERR, phba->shost, "Post SGL Pages Failed\n");
2975                 goto error;
2976         }
2977
2978         status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
2979         if (status != 0) {
2980                 shost_printk(KERN_ERR, phba->shost,
2981                              "WRB Rings not created\n");
2982                 goto error;
2983         }
2984
2985         SE_DEBUG(DBG_LVL_8, "hwi_init_port success\n");
2986         return 0;
2987
2988 error:
2989         shost_printk(KERN_ERR, phba->shost, "hwi_init_port failed");
2990         hwi_cleanup(phba);
2991         return -ENOMEM;
2992 }
2993
2994 static int hwi_init_controller(struct beiscsi_hba *phba)
2995 {
2996         struct hwi_controller *phwi_ctrlr;
2997
2998         phwi_ctrlr = phba->phwi_ctrlr;
2999         if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
3000                 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
3001                     init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
3002                 SE_DEBUG(DBG_LVL_8, " phwi_ctrlr->phwi_ctxt=%p\n",
3003                          phwi_ctrlr->phwi_ctxt);
3004         } else {
3005                 shost_printk(KERN_ERR, phba->shost,
3006                              "HWI_MEM_ADDN_CONTEXT is more than one element."
3007                              "Failing to load\n");
3008                 return -ENOMEM;
3009         }
3010
3011         iscsi_init_global_templates(phba);
3012         beiscsi_init_wrb_handle(phba);
3013         hwi_init_async_pdu_ctx(phba);
3014         if (hwi_init_port(phba) != 0) {
3015                 shost_printk(KERN_ERR, phba->shost,
3016                              "hwi_init_controller failed\n");
3017                 return -ENOMEM;
3018         }
3019         return 0;
3020 }
3021
3022 static void beiscsi_free_mem(struct beiscsi_hba *phba)
3023 {
3024         struct be_mem_descriptor *mem_descr;
3025         int i, j;
3026
3027         mem_descr = phba->init_mem;
3028         i = 0;
3029         j = 0;
3030         for (i = 0; i < SE_MEM_MAX; i++) {
3031                 for (j = mem_descr->num_elements; j > 0; j--) {
3032                         pci_free_consistent(phba->pcidev,
3033                           mem_descr->mem_array[j - 1].size,
3034                           mem_descr->mem_array[j - 1].virtual_address,
3035                           (unsigned long)mem_descr->mem_array[j - 1].
3036                           bus_address.u.a64.address);
3037                 }
3038                 kfree(mem_descr->mem_array);
3039                 mem_descr++;
3040         }
3041         kfree(phba->init_mem);
3042         kfree(phba->phwi_ctrlr);
3043 }
3044
3045 static int beiscsi_init_controller(struct beiscsi_hba *phba)
3046 {
3047         int ret = -ENOMEM;
3048
3049         ret = beiscsi_get_memory(phba);
3050         if (ret < 0) {
3051                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe -"
3052                              "Failed in beiscsi_alloc_memory\n");
3053                 return ret;
3054         }
3055
3056         ret = hwi_init_controller(phba);
3057         if (ret)
3058                 goto free_init;
3059         SE_DEBUG(DBG_LVL_8, "Return success from beiscsi_init_controller");
3060         return 0;
3061
3062 free_init:
3063         beiscsi_free_mem(phba);
3064         return -ENOMEM;
3065 }
3066
3067 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
3068 {
3069         struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
3070         struct sgl_handle *psgl_handle;
3071         struct iscsi_sge *pfrag;
3072         unsigned int arr_index, i, idx;
3073
3074         phba->io_sgl_hndl_avbl = 0;
3075         phba->eh_sgl_hndl_avbl = 0;
3076
3077         mem_descr_sglh = phba->init_mem;
3078         mem_descr_sglh += HWI_MEM_SGLH;
3079         if (1 == mem_descr_sglh->num_elements) {
3080                 phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3081                                                  phba->params.ios_per_ctrl,
3082                                                  GFP_KERNEL);
3083                 if (!phba->io_sgl_hndl_base) {
3084                         shost_printk(KERN_ERR, phba->shost,
3085                                      "Mem Alloc Failed. Failing to load\n");
3086                         return -ENOMEM;
3087                 }
3088                 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3089                                                  (phba->params.icds_per_ctrl -
3090                                                  phba->params.ios_per_ctrl),
3091                                                  GFP_KERNEL);
3092                 if (!phba->eh_sgl_hndl_base) {
3093                         kfree(phba->io_sgl_hndl_base);
3094                         shost_printk(KERN_ERR, phba->shost,
3095                                      "Mem Alloc Failed. Failing to load\n");
3096                         return -ENOMEM;
3097                 }
3098         } else {
3099                 shost_printk(KERN_ERR, phba->shost,
3100                              "HWI_MEM_SGLH is more than one element."
3101                              "Failing to load\n");
3102                 return -ENOMEM;
3103         }
3104
3105         arr_index = 0;
3106         idx = 0;
3107         while (idx < mem_descr_sglh->num_elements) {
3108                 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
3109
3110                 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
3111                       sizeof(struct sgl_handle)); i++) {
3112                         if (arr_index < phba->params.ios_per_ctrl) {
3113                                 phba->io_sgl_hndl_base[arr_index] = psgl_handle;
3114                                 phba->io_sgl_hndl_avbl++;
3115                                 arr_index++;
3116                         } else {
3117                                 phba->eh_sgl_hndl_base[arr_index -
3118                                         phba->params.ios_per_ctrl] =
3119                                                                 psgl_handle;
3120                                 arr_index++;
3121                                 phba->eh_sgl_hndl_avbl++;
3122                         }
3123                         psgl_handle++;
3124                 }
3125                 idx++;
3126         }
3127         SE_DEBUG(DBG_LVL_8,
3128                  "phba->io_sgl_hndl_avbl=%d"
3129                  "phba->eh_sgl_hndl_avbl=%d\n",
3130                  phba->io_sgl_hndl_avbl,
3131                  phba->eh_sgl_hndl_avbl);
3132         mem_descr_sg = phba->init_mem;
3133         mem_descr_sg += HWI_MEM_SGE;
3134         SE_DEBUG(DBG_LVL_8, "\n mem_descr_sg->num_elements=%d\n",
3135                  mem_descr_sg->num_elements);
3136         arr_index = 0;
3137         idx = 0;
3138         while (idx < mem_descr_sg->num_elements) {
3139                 pfrag = mem_descr_sg->mem_array[idx].virtual_address;
3140
3141                 for (i = 0;
3142                      i < (mem_descr_sg->mem_array[idx].size) /
3143                      (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
3144                      i++) {
3145                         if (arr_index < phba->params.ios_per_ctrl)
3146                                 psgl_handle = phba->io_sgl_hndl_base[arr_index];
3147                         else
3148                                 psgl_handle = phba->eh_sgl_hndl_base[arr_index -
3149                                                 phba->params.ios_per_ctrl];
3150                         psgl_handle->pfrag = pfrag;
3151                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
3152                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
3153                         pfrag += phba->params.num_sge_per_io;
3154                         psgl_handle->sgl_index =
3155                                 phba->fw_config.iscsi_icd_start + arr_index++;
3156                 }
3157                 idx++;
3158         }
3159         phba->io_sgl_free_index = 0;
3160         phba->io_sgl_alloc_index = 0;
3161         phba->eh_sgl_free_index = 0;
3162         phba->eh_sgl_alloc_index = 0;
3163         return 0;
3164 }
3165
3166 static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3167 {
3168         int i, new_cid;
3169
3170         phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
3171                                   GFP_KERNEL);
3172         if (!phba->cid_array) {
3173                 shost_printk(KERN_ERR, phba->shost,
3174                              "Failed to allocate memory in "
3175                              "hba_setup_cid_tbls\n");
3176                 return -ENOMEM;
3177         }
3178         phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
3179                                  phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
3180         if (!phba->ep_array) {
3181                 shost_printk(KERN_ERR, phba->shost,
3182                              "Failed to allocate memory in "
3183                              "hba_setup_cid_tbls\n");
3184                 kfree(phba->cid_array);
3185                 return -ENOMEM;
3186         }
3187         new_cid = phba->fw_config.iscsi_cid_start;
3188         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3189                 phba->cid_array[i] = new_cid;
3190                 new_cid += 2;
3191         }
3192         phba->avlbl_cids = phba->params.cxns_per_ctrl;
3193         return 0;
3194 }
3195
3196 static void hwi_enable_intr(struct beiscsi_hba *phba)
3197 {
3198         struct be_ctrl_info *ctrl = &phba->ctrl;
3199         struct hwi_controller *phwi_ctrlr;
3200         struct hwi_context_memory *phwi_context;
3201         struct be_queue_info *eq;
3202         u8 __iomem *addr;
3203         u32 reg, i;
3204         u32 enabled;
3205
3206         phwi_ctrlr = phba->phwi_ctrlr;
3207         phwi_context = phwi_ctrlr->phwi_ctxt;
3208
3209         addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
3210                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
3211         reg = ioread32(addr);
3212         SE_DEBUG(DBG_LVL_8, "reg =x%08x\n", reg);
3213
3214         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3215         if (!enabled) {
3216                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3217                 SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p\n", reg, addr);
3218                 iowrite32(reg, addr);
3219                 if (!phba->msix_enabled) {
3220                         eq = &phwi_context->be_eq[0].q;
3221                         SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id);
3222                         hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3223                 } else {
3224                         for (i = 0; i <= phba->num_cpus; i++) {
3225                                 eq = &phwi_context->be_eq[i].q;
3226                                 SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id);
3227                                 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3228                         }
3229                 }
3230         }
3231 }
3232
3233 static void hwi_disable_intr(struct beiscsi_hba *phba)
3234 {
3235         struct be_ctrl_info *ctrl = &phba->ctrl;
3236
3237         u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
3238         u32 reg = ioread32(addr);
3239
3240         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3241         if (enabled) {
3242                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3243                 iowrite32(reg, addr);
3244         } else
3245                 shost_printk(KERN_WARNING, phba->shost,
3246                              "In hwi_disable_intr, Already Disabled\n");
3247 }
3248
3249 static int beiscsi_init_port(struct beiscsi_hba *phba)
3250 {
3251         int ret;
3252
3253         ret = beiscsi_init_controller(phba);
3254         if (ret < 0) {
3255                 shost_printk(KERN_ERR, phba->shost,
3256                              "beiscsi_dev_probe - Failed in"
3257                              "beiscsi_init_controller\n");
3258                 return ret;
3259         }
3260         ret = beiscsi_init_sgl_handle(phba);
3261         if (ret < 0) {
3262                 shost_printk(KERN_ERR, phba->shost,
3263                              "beiscsi_dev_probe - Failed in"
3264                              "beiscsi_init_sgl_handle\n");
3265                 goto do_cleanup_ctrlr;
3266         }
3267
3268         if (hba_setup_cid_tbls(phba)) {
3269                 shost_printk(KERN_ERR, phba->shost,
3270                              "Failed in hba_setup_cid_tbls\n");
3271                 kfree(phba->io_sgl_hndl_base);
3272                 kfree(phba->eh_sgl_hndl_base);
3273                 goto do_cleanup_ctrlr;
3274         }
3275
3276         return ret;
3277
3278 do_cleanup_ctrlr:
3279         hwi_cleanup(phba);
3280         return ret;
3281 }
3282
3283 static void hwi_purge_eq(struct beiscsi_hba *phba)
3284 {
3285         struct hwi_controller *phwi_ctrlr;
3286         struct hwi_context_memory *phwi_context;
3287         struct be_queue_info *eq;
3288         struct be_eq_entry *eqe = NULL;
3289         int i, eq_msix;
3290         unsigned int num_processed;
3291
3292         phwi_ctrlr = phba->phwi_ctrlr;
3293         phwi_context = phwi_ctrlr->phwi_ctxt;
3294         if (phba->msix_enabled)
3295                 eq_msix = 1;
3296         else
3297                 eq_msix = 0;
3298
3299         for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
3300                 eq = &phwi_context->be_eq[i].q;
3301                 eqe = queue_tail_node(eq);
3302                 num_processed = 0;
3303                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3304                                         & EQE_VALID_MASK) {
3305                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3306                         queue_tail_inc(eq);
3307                         eqe = queue_tail_node(eq);
3308                         num_processed++;
3309                 }
3310
3311                 if (num_processed)
3312                         hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1);
3313         }
3314 }
3315
3316 static void beiscsi_clean_port(struct beiscsi_hba *phba)
3317 {
3318         int mgmt_status;
3319
3320         mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
3321         if (mgmt_status)
3322                 shost_printk(KERN_WARNING, phba->shost,
3323                              "mgmt_epfw_cleanup FAILED\n");
3324
3325         hwi_purge_eq(phba);
3326         hwi_cleanup(phba);
3327         kfree(phba->io_sgl_hndl_base);
3328         kfree(phba->eh_sgl_hndl_base);
3329         kfree(phba->cid_array);
3330         kfree(phba->ep_array);
3331 }
3332
3333 void
3334 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
3335                            struct beiscsi_offload_params *params)
3336 {
3337         struct wrb_handle *pwrb_handle;
3338         struct iscsi_target_context_update_wrb *pwrb = NULL;
3339         struct be_mem_descriptor *mem_descr;
3340         struct beiscsi_hba *phba = beiscsi_conn->phba;
3341         u32 doorbell = 0;
3342
3343         /*
3344          * We can always use 0 here because it is reserved by libiscsi for
3345          * login/startup related tasks.
3346          */
3347         pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
3348                                        phba->fw_config.iscsi_cid_start));
3349         pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
3350         memset(pwrb, 0, sizeof(*pwrb));
3351         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3352                       max_burst_length, pwrb, params->dw[offsetof
3353                       (struct amap_beiscsi_offload_params,
3354                       max_burst_length) / 32]);
3355         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3356                       max_send_data_segment_length, pwrb,
3357                       params->dw[offsetof(struct amap_beiscsi_offload_params,
3358                       max_send_data_segment_length) / 32]);
3359         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3360                       first_burst_length,
3361                       pwrb,
3362                       params->dw[offsetof(struct amap_beiscsi_offload_params,
3363                       first_burst_length) / 32]);
3364
3365         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
3366                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3367                       erl) / 32] & OFFLD_PARAMS_ERL));
3368         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
3369                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3370                       dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
3371         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
3372                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3373                       hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
3374         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
3375                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3376                       ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
3377         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
3378                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3379                        imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
3380         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
3381                       pwrb,
3382                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3383                       exp_statsn) / 32] + 1));
3384         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
3385                       0x7);
3386         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
3387                       pwrb, pwrb_handle->wrb_index);
3388         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
3389                       pwrb, pwrb_handle->nxt_wrb_index);
3390         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3391                         session_state, pwrb, 0);
3392         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
3393                       pwrb, 1);
3394         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
3395                       pwrb, 0);
3396         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
3397                       0);
3398
3399         mem_descr = phba->init_mem;
3400         mem_descr += ISCSI_MEM_GLOBAL_HEADER;
3401
3402         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3403                         pad_buffer_addr_hi, pwrb,
3404                       mem_descr->mem_array[0].bus_address.u.a32.address_hi);
3405         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3406                         pad_buffer_addr_lo, pwrb,
3407                       mem_descr->mem_array[0].bus_address.u.a32.address_lo);
3408
3409         be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
3410
3411         doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3412         doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
3413                              << DB_DEF_PDU_WRB_INDEX_SHIFT;
3414         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3415
3416         iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3417 }
3418
3419 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
3420                               int *index, int *age)
3421 {
3422         *index = (int)itt;
3423         if (age)
3424                 *age = conn->session->age;
3425 }
3426
3427 /**
3428  * beiscsi_alloc_pdu - allocates pdu and related resources
3429  * @task: libiscsi task
3430  * @opcode: opcode of pdu for task
3431  *
3432  * This is called with the session lock held. It will allocate
3433  * the wrb and sgl if needed for the command. And it will prep
3434  * the pdu's itt. beiscsi_parse_pdu will later translate
3435  * the pdu itt to the libiscsi task itt.
3436  */
3437 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3438 {
3439         struct beiscsi_io_task *io_task = task->dd_data;
3440         struct iscsi_conn *conn = task->conn;
3441         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3442         struct beiscsi_hba *phba = beiscsi_conn->phba;
3443         struct hwi_wrb_context *pwrb_context;
3444         struct hwi_controller *phwi_ctrlr;
3445         itt_t itt;
3446         struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3447         dma_addr_t paddr;
3448
3449         io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
3450                                           GFP_KERNEL, &paddr);
3451         if (!io_task->cmd_bhs)
3452                 return -ENOMEM;
3453         io_task->bhs_pa.u.a64.address = paddr;
3454         io_task->libiscsi_itt = (itt_t)task->itt;
3455         io_task->pwrb_handle = alloc_wrb_handle(phba,
3456                                                 beiscsi_conn->beiscsi_conn_cid -
3457                                                 phba->fw_config.iscsi_cid_start
3458                                                 );
3459         io_task->conn = beiscsi_conn;
3460
3461         task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
3462         task->hdr_max = sizeof(struct be_cmd_bhs);
3463
3464         if (task->sc) {
3465                 spin_lock(&phba->io_sgl_lock);
3466                 io_task->psgl_handle = alloc_io_sgl_handle(phba);
3467                 spin_unlock(&phba->io_sgl_lock);
3468                 if (!io_task->psgl_handle)
3469                         goto free_hndls;
3470         } else {
3471                 io_task->scsi_cmnd = NULL;
3472                 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
3473                         if (!beiscsi_conn->login_in_progress) {
3474                                 spin_lock(&phba->mgmt_sgl_lock);
3475                                 io_task->psgl_handle = (struct sgl_handle *)
3476                                                 alloc_mgmt_sgl_handle(phba);
3477                                 spin_unlock(&phba->mgmt_sgl_lock);
3478                                 if (!io_task->psgl_handle)
3479                                         goto free_hndls;
3480
3481                                 beiscsi_conn->login_in_progress = 1;
3482                                 beiscsi_conn->plogin_sgl_handle =
3483                                                         io_task->psgl_handle;
3484                         } else {
3485                                 io_task->psgl_handle =
3486                                                 beiscsi_conn->plogin_sgl_handle;
3487                         }
3488                 } else {
3489                         spin_lock(&phba->mgmt_sgl_lock);
3490                         io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
3491                         spin_unlock(&phba->mgmt_sgl_lock);
3492                         if (!io_task->psgl_handle)
3493                                 goto free_hndls;
3494                 }
3495         }
3496         itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
3497                                  wrb_index << 16) | (unsigned int)
3498                                 (io_task->psgl_handle->sgl_index));
3499         io_task->pwrb_handle->pio_handle = task;
3500
3501         io_task->cmd_bhs->iscsi_hdr.itt = itt;
3502         return 0;
3503
3504 free_hndls:
3505         phwi_ctrlr = phba->phwi_ctrlr;
3506         pwrb_context = &phwi_ctrlr->wrb_context[
3507                         beiscsi_conn->beiscsi_conn_cid -
3508                         phba->fw_config.iscsi_cid_start];
3509         free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3510         io_task->pwrb_handle = NULL;
3511         pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3512                       io_task->bhs_pa.u.a64.address);
3513         SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed\n");
3514         return -ENOMEM;
3515 }
3516
3517 static void beiscsi_cleanup_task(struct iscsi_task *task)
3518 {
3519         struct beiscsi_io_task *io_task = task->dd_data;
3520         struct iscsi_conn *conn = task->conn;
3521         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3522         struct beiscsi_hba *phba = beiscsi_conn->phba;
3523         struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3524         struct hwi_wrb_context *pwrb_context;
3525         struct hwi_controller *phwi_ctrlr;
3526
3527         phwi_ctrlr = phba->phwi_ctrlr;
3528         pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
3529                         - phba->fw_config.iscsi_cid_start];
3530         if (io_task->pwrb_handle) {
3531                 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3532                 io_task->pwrb_handle = NULL;
3533         }
3534
3535         if (io_task->cmd_bhs) {
3536                 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3537                               io_task->bhs_pa.u.a64.address);
3538         }
3539
3540         if (task->sc) {
3541                 if (io_task->psgl_handle) {
3542                         spin_lock(&phba->io_sgl_lock);
3543                         free_io_sgl_handle(phba, io_task->psgl_handle);
3544                         spin_unlock(&phba->io_sgl_lock);
3545                         io_task->psgl_handle = NULL;
3546                 }
3547         } else {
3548                 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN)
3549                         return;
3550                 if (io_task->psgl_handle) {
3551                         spin_lock(&phba->mgmt_sgl_lock);
3552                         free_mgmt_sgl_handle(phba, io_task->psgl_handle);
3553                         spin_unlock(&phba->mgmt_sgl_lock);
3554                         io_task->psgl_handle = NULL;
3555                 }
3556         }
3557 }
3558
3559 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3560                           unsigned int num_sg, unsigned int xferlen,
3561                           unsigned int writedir)
3562 {
3563
3564         struct beiscsi_io_task *io_task = task->dd_data;
3565         struct iscsi_conn *conn = task->conn;
3566         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3567         struct beiscsi_hba *phba = beiscsi_conn->phba;
3568         struct iscsi_wrb *pwrb = NULL;
3569         unsigned int doorbell = 0;
3570
3571         pwrb = io_task->pwrb_handle->pwrb;
3572         io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
3573         io_task->bhs_len = sizeof(struct be_cmd_bhs);
3574
3575         if (writedir) {
3576                 memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48);
3577                 AMAP_SET_BITS(struct amap_pdu_data_out, itt,
3578                               &io_task->cmd_bhs->iscsi_data_pdu,
3579                               (unsigned int)io_task->cmd_bhs->iscsi_hdr.itt);
3580                 AMAP_SET_BITS(struct amap_pdu_data_out, opcode,
3581                               &io_task->cmd_bhs->iscsi_data_pdu,
3582                               ISCSI_OPCODE_SCSI_DATA_OUT);
3583                 AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
3584                               &io_task->cmd_bhs->iscsi_data_pdu, 1);
3585                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3586                               INI_WR_CMD);
3587                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3588         } else {
3589                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3590                               INI_RD_CMD);
3591                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
3592         }
3593         memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
3594                dw[offsetof(struct amap_pdu_data_out, lun) / 32],
3595                io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
3596
3597         AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
3598                       cpu_to_be16((unsigned short)io_task->cmd_bhs->iscsi_hdr.
3599                                   lun[0]));
3600         AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
3601         AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3602                       io_task->pwrb_handle->wrb_index);
3603         AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3604                       be32_to_cpu(task->cmdsn));
3605         AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3606                       io_task->psgl_handle->sgl_index);
3607
3608         hwi_write_sgl(pwrb, sg, num_sg, io_task);
3609
3610         AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3611                       io_task->pwrb_handle->nxt_wrb_index);
3612         be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3613
3614         doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3615         doorbell |= (io_task->pwrb_handle->wrb_index &
3616                      DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3617         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3618
3619         iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3620         return 0;
3621 }
3622
3623 static int beiscsi_mtask(struct iscsi_task *task)
3624 {
3625         struct beiscsi_io_task *io_task = task->dd_data;
3626         struct iscsi_conn *conn = task->conn;
3627         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3628         struct beiscsi_hba *phba = beiscsi_conn->phba;
3629         struct iscsi_wrb *pwrb = NULL;
3630         unsigned int doorbell = 0;
3631         unsigned int cid;
3632
3633         cid = beiscsi_conn->beiscsi_conn_cid;
3634         pwrb = io_task->pwrb_handle->pwrb;
3635         memset(pwrb, 0, sizeof(*pwrb));
3636         AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3637                       be32_to_cpu(task->cmdsn));
3638         AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3639                       io_task->pwrb_handle->wrb_index);
3640         AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3641                       io_task->psgl_handle->sgl_index);
3642
3643         switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
3644         case ISCSI_OP_LOGIN:
3645                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3646                               TGT_DM_CMD);
3647                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3648                 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
3649                 hwi_write_buffer(pwrb, task);
3650                 break;
3651         case ISCSI_OP_NOOP_OUT:
3652                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3653                               INI_RD_CMD);
3654                 if (task->hdr->ttt == ISCSI_RESERVED_TAG)
3655                         AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3656                 else
3657                         AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1);
3658                 hwi_write_buffer(pwrb, task);
3659                 break;
3660         case ISCSI_OP_TEXT:
3661                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3662                               TGT_DM_CMD);
3663                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3664                 hwi_write_buffer(pwrb, task);
3665                 break;
3666         case ISCSI_OP_SCSI_TMFUNC:
3667                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3668                               INI_TMF_CMD);
3669                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3670                 hwi_write_buffer(pwrb, task);
3671                 break;
3672         case ISCSI_OP_LOGOUT:
3673                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3674                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3675                               HWH_TYPE_LOGOUT);
3676                 hwi_write_buffer(pwrb, task);
3677                 break;
3678
3679         default:
3680                 SE_DEBUG(DBG_LVL_1, "opcode =%d Not supported\n",
3681                          task->hdr->opcode & ISCSI_OPCODE_MASK);
3682                 return -EINVAL;
3683         }
3684
3685         AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
3686                       task->data_count);
3687         AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3688                       io_task->pwrb_handle->nxt_wrb_index);
3689         be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3690
3691         doorbell |= cid & DB_WRB_POST_CID_MASK;
3692         doorbell |= (io_task->pwrb_handle->wrb_index &
3693                      DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3694         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3695         iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3696         return 0;
3697 }
3698
3699 static int beiscsi_task_xmit(struct iscsi_task *task)
3700 {
3701         struct beiscsi_io_task *io_task = task->dd_data;
3702         struct scsi_cmnd *sc = task->sc;
3703         struct scatterlist *sg;
3704         int num_sg;
3705         unsigned int  writedir = 0, xferlen = 0;
3706
3707         if (!sc)
3708                 return beiscsi_mtask(task);
3709
3710         io_task->scsi_cmnd = sc;
3711         num_sg = scsi_dma_map(sc);
3712         if (num_sg < 0) {
3713                 SE_DEBUG(DBG_LVL_1, " scsi_dma_map Failed\n")
3714                 return num_sg;
3715         }
3716         SE_DEBUG(DBG_LVL_4, "xferlen=0x%08x scmd=%p num_sg=%d sernum=%lu\n",
3717                   (scsi_bufflen(sc)), sc, num_sg, sc->serial_number);
3718         xferlen = scsi_bufflen(sc);
3719         sg = scsi_sglist(sc);
3720         if (sc->sc_data_direction == DMA_TO_DEVICE) {
3721                 writedir = 1;
3722                 SE_DEBUG(DBG_LVL_4, "task->imm_count=0x%08x\n",
3723                          task->imm_count);
3724         } else
3725                 writedir = 0;
3726         return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
3727 }
3728
3729 static void beiscsi_remove(struct pci_dev *pcidev)
3730 {
3731         struct beiscsi_hba *phba = NULL;
3732         struct hwi_controller *phwi_ctrlr;
3733         struct hwi_context_memory *phwi_context;
3734         struct be_eq_obj *pbe_eq;
3735         unsigned int i, msix_vec;
3736         u8 *real_offset = 0;
3737         u32 value = 0;
3738
3739         phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
3740         if (!phba) {
3741                 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
3742                 return;
3743         }
3744
3745         phwi_ctrlr = phba->phwi_ctrlr;
3746         phwi_context = phwi_ctrlr->phwi_ctxt;
3747         hwi_disable_intr(phba);
3748         if (phba->msix_enabled) {
3749                 for (i = 0; i <= phba->num_cpus; i++) {
3750                         msix_vec = phba->msix_entries[i].vector;
3751                         free_irq(msix_vec, &phwi_context->be_eq[i]);
3752                 }
3753         } else
3754                 if (phba->pcidev->irq)
3755                         free_irq(phba->pcidev->irq, phba);
3756         pci_disable_msix(phba->pcidev);
3757         destroy_workqueue(phba->wq);
3758         if (blk_iopoll_enabled)
3759                 for (i = 0; i < phba->num_cpus; i++) {
3760                         pbe_eq = &phwi_context->be_eq[i];
3761                         blk_iopoll_disable(&pbe_eq->iopoll);
3762                 }
3763
3764         beiscsi_clean_port(phba);
3765         beiscsi_free_mem(phba);
3766         real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
3767
3768         value = readl((void *)real_offset);
3769
3770         if (value & 0x00010000) {
3771                 value &= 0xfffeffff;
3772                 writel(value, (void *)real_offset);
3773         }
3774         beiscsi_unmap_pci_function(phba);
3775         pci_free_consistent(phba->pcidev,
3776                             phba->ctrl.mbox_mem_alloced.size,
3777                             phba->ctrl.mbox_mem_alloced.va,
3778                             phba->ctrl.mbox_mem_alloced.dma);
3779         iscsi_host_remove(phba->shost);
3780         pci_dev_put(phba->pcidev);
3781         iscsi_host_free(phba->shost);
3782 }
3783
3784 static void beiscsi_msix_enable(struct beiscsi_hba *phba)
3785 {
3786         int i, status;
3787
3788         for (i = 0; i <= phba->num_cpus; i++)
3789                 phba->msix_entries[i].entry = i;
3790
3791         status = pci_enable_msix(phba->pcidev, phba->msix_entries,
3792                                  (phba->num_cpus + 1));
3793         if (!status)
3794                 phba->msix_enabled = true;
3795
3796         return;
3797 }
3798
3799 static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3800                                 const struct pci_device_id *id)
3801 {
3802         struct beiscsi_hba *phba = NULL;
3803         struct hwi_controller *phwi_ctrlr;
3804         struct hwi_context_memory *phwi_context;
3805         struct be_eq_obj *pbe_eq;
3806         int ret, num_cpus, i;
3807         u8 *real_offset = 0;
3808         u32 value = 0;
3809
3810         ret = beiscsi_enable_pci(pcidev);
3811         if (ret < 0) {
3812                 dev_err(&pcidev->dev, "beiscsi_dev_probe-"
3813                         " Failed to enable pci device\n");
3814                 return ret;
3815         }
3816
3817         phba = beiscsi_hba_alloc(pcidev);
3818         if (!phba) {
3819                 dev_err(&pcidev->dev, "beiscsi_dev_probe-"
3820                         " Failed in beiscsi_hba_alloc\n");
3821                 goto disable_pci;
3822         }
3823
3824         switch (pcidev->device) {
3825         case BE_DEVICE_ID1:
3826         case OC_DEVICE_ID1:
3827         case OC_DEVICE_ID2:
3828                 phba->generation = BE_GEN2;
3829                 break;
3830         case BE_DEVICE_ID2:
3831         case OC_DEVICE_ID3:
3832                 phba->generation = BE_GEN3;
3833                 break;
3834         default:
3835                 phba->generation = 0;
3836         }
3837
3838         if (enable_msix)
3839                 num_cpus = find_num_cpus();
3840         else
3841                 num_cpus = 1;
3842         phba->num_cpus = num_cpus;
3843         SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", phba->num_cpus);
3844
3845         if (enable_msix)
3846                 beiscsi_msix_enable(phba);
3847         ret = be_ctrl_init(phba, pcidev);
3848         if (ret) {
3849                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3850                                 "Failed in be_ctrl_init\n");
3851                 goto hba_free;
3852         }
3853
3854         if (!num_hba) {
3855                 real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
3856                 value = readl((void *)real_offset);
3857                 if (value & 0x00010000) {
3858                         gcrashmode++;
3859                         shost_printk(KERN_ERR, phba->shost,
3860                                 "Loading Driver in crashdump mode\n");
3861                         ret = beiscsi_pci_soft_reset(phba);
3862                         if (ret) {
3863                                 shost_printk(KERN_ERR, phba->shost,
3864                                         "Reset Failed. Aborting Crashdump\n");
3865                                 goto hba_free;
3866                         }
3867                         ret = be_chk_reset_complete(phba);
3868                         if (ret) {
3869                                 shost_printk(KERN_ERR, phba->shost,
3870                                         "Failed to get out of reset."
3871                                         "Aborting Crashdump\n");
3872                                 goto hba_free;
3873                         }
3874                 } else {
3875                         value |= 0x00010000;
3876                         writel(value, (void *)real_offset);
3877                         num_hba++;
3878                 }
3879         }
3880
3881         spin_lock_init(&phba->io_sgl_lock);
3882         spin_lock_init(&phba->mgmt_sgl_lock);
3883         spin_lock_init(&phba->isr_lock);
3884         ret = mgmt_get_fw_config(&phba->ctrl, phba);
3885         if (ret != 0) {
3886                 shost_printk(KERN_ERR, phba->shost,
3887                              "Error getting fw config\n");
3888                 goto free_port;
3889         }
3890         phba->shost->max_id = phba->fw_config.iscsi_cid_count;
3891         beiscsi_get_params(phba);
3892         phba->shost->can_queue = phba->params.ios_per_ctrl;
3893         ret = beiscsi_init_port(phba);
3894         if (ret < 0) {
3895                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3896                              "Failed in beiscsi_init_port\n");
3897                 goto free_port;
3898         }
3899
3900         for (i = 0; i < MAX_MCC_CMD ; i++) {
3901                 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
3902                 phba->ctrl.mcc_tag[i] = i + 1;
3903                 phba->ctrl.mcc_numtag[i + 1] = 0;
3904                 phba->ctrl.mcc_tag_available++;
3905         }
3906
3907         phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
3908
3909         snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
3910                  phba->shost->host_no);
3911         phba->wq = create_workqueue(phba->wq_name);
3912         if (!phba->wq) {
3913                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3914                                 "Failed to allocate work queue\n");
3915                 goto free_twq;
3916         }
3917
3918         INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs);
3919
3920         phwi_ctrlr = phba->phwi_ctrlr;
3921         phwi_context = phwi_ctrlr->phwi_ctxt;
3922         if (blk_iopoll_enabled) {
3923                 for (i = 0; i < phba->num_cpus; i++) {
3924                         pbe_eq = &phwi_context->be_eq[i];
3925                         blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
3926                                         be_iopoll);
3927                         blk_iopoll_enable(&pbe_eq->iopoll);
3928                 }
3929         }
3930         ret = beiscsi_init_irqs(phba);
3931         if (ret < 0) {
3932                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3933                              "Failed to beiscsi_init_irqs\n");
3934                 goto free_blkenbld;
3935         }
3936         hwi_enable_intr(phba);
3937         SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED\n\n\n");
3938         return 0;
3939
3940 free_blkenbld:
3941         destroy_workqueue(phba->wq);
3942         if (blk_iopoll_enabled)
3943                 for (i = 0; i < phba->num_cpus; i++) {
3944                         pbe_eq = &phwi_context->be_eq[i];
3945                         blk_iopoll_disable(&pbe_eq->iopoll);
3946                 }
3947 free_twq:
3948         beiscsi_clean_port(phba);
3949         beiscsi_free_mem(phba);
3950 free_port:
3951         real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
3952
3953         value = readl((void *)real_offset);
3954
3955         if (value & 0x00010000) {
3956                 value &= 0xfffeffff;
3957                 writel(value, (void *)real_offset);
3958         }
3959
3960         pci_free_consistent(phba->pcidev,
3961                             phba->ctrl.mbox_mem_alloced.size,
3962                             phba->ctrl.mbox_mem_alloced.va,
3963                            phba->ctrl.mbox_mem_alloced.dma);
3964         beiscsi_unmap_pci_function(phba);
3965 hba_free:
3966         if (phba->msix_enabled)
3967                 pci_disable_msix(phba->pcidev);
3968         iscsi_host_remove(phba->shost);
3969         pci_dev_put(phba->pcidev);
3970         iscsi_host_free(phba->shost);
3971 disable_pci:
3972         pci_disable_device(pcidev);
3973         return ret;
3974 }
3975
3976 struct iscsi_transport beiscsi_iscsi_transport = {
3977         .owner = THIS_MODULE,
3978         .name = DRV_NAME,
3979         .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO |
3980                 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
3981         .param_mask = ISCSI_MAX_RECV_DLENGTH |
3982                 ISCSI_MAX_XMIT_DLENGTH |
3983                 ISCSI_HDRDGST_EN |
3984                 ISCSI_DATADGST_EN |
3985                 ISCSI_INITIAL_R2T_EN |
3986                 ISCSI_MAX_R2T |
3987                 ISCSI_IMM_DATA_EN |
3988                 ISCSI_FIRST_BURST |
3989                 ISCSI_MAX_BURST |
3990                 ISCSI_PDU_INORDER_EN |
3991                 ISCSI_DATASEQ_INORDER_EN |
3992                 ISCSI_ERL |
3993                 ISCSI_CONN_PORT |
3994                 ISCSI_CONN_ADDRESS |
3995                 ISCSI_EXP_STATSN |
3996                 ISCSI_PERSISTENT_PORT |
3997                 ISCSI_PERSISTENT_ADDRESS |
3998                 ISCSI_TARGET_NAME | ISCSI_TPGT |
3999                 ISCSI_USERNAME | ISCSI_PASSWORD |
4000                 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
4001                 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
4002                 ISCSI_LU_RESET_TMO |
4003                 ISCSI_PING_TMO | ISCSI_RECV_TMO |
4004                 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
4005         .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
4006                                 ISCSI_HOST_INITIATOR_NAME,
4007         .create_session = beiscsi_session_create,
4008         .destroy_session = beiscsi_session_destroy,
4009         .create_conn = beiscsi_conn_create,
4010         .bind_conn = beiscsi_conn_bind,
4011         .destroy_conn = iscsi_conn_teardown,
4012         .set_param = beiscsi_set_param,
4013         .get_conn_param = beiscsi_conn_get_param,
4014         .get_session_param = iscsi_session_get_param,
4015         .get_host_param = beiscsi_get_host_param,
4016         .start_conn = beiscsi_conn_start,
4017         .stop_conn = iscsi_conn_stop,
4018         .send_pdu = iscsi_conn_send_pdu,
4019         .xmit_task = beiscsi_task_xmit,
4020         .cleanup_task = beiscsi_cleanup_task,
4021         .alloc_pdu = beiscsi_alloc_pdu,
4022         .parse_pdu_itt = beiscsi_parse_pdu,
4023         .get_stats = beiscsi_conn_get_stats,
4024         .ep_connect = beiscsi_ep_connect,
4025         .ep_poll = beiscsi_ep_poll,
4026         .ep_disconnect = beiscsi_ep_disconnect,
4027         .session_recovery_timedout = iscsi_session_recovery_timedout,
4028 };
4029
4030 static struct pci_driver beiscsi_pci_driver = {
4031         .name = DRV_NAME,
4032         .probe = beiscsi_dev_probe,
4033         .remove = beiscsi_remove,
4034         .id_table = beiscsi_pci_id_table
4035 };
4036
4037
4038 static int __init beiscsi_module_init(void)
4039 {
4040         int ret;
4041
4042         beiscsi_scsi_transport =
4043                         iscsi_register_transport(&beiscsi_iscsi_transport);
4044         if (!beiscsi_scsi_transport) {
4045                 SE_DEBUG(DBG_LVL_1,
4046                          "beiscsi_module_init - Unable to  register beiscsi"
4047                          "transport.\n");
4048                 return -ENOMEM;
4049         }
4050         SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p\n",
4051                  &beiscsi_iscsi_transport);
4052
4053         ret = pci_register_driver(&beiscsi_pci_driver);
4054         if (ret) {
4055                 SE_DEBUG(DBG_LVL_1,
4056                          "beiscsi_module_init - Unable to  register"
4057                          "beiscsi pci driver.\n");
4058                 goto unregister_iscsi_transport;
4059         }
4060         return 0;
4061
4062 unregister_iscsi_transport:
4063         iscsi_unregister_transport(&beiscsi_iscsi_transport);
4064         return ret;
4065 }
4066
4067 static void __exit beiscsi_module_exit(void)
4068 {
4069         pci_unregister_driver(&beiscsi_pci_driver);
4070         iscsi_unregister_transport(&beiscsi_iscsi_transport);
4071 }
4072
4073 module_init(beiscsi_module_init);
4074 module_exit(beiscsi_module_exit);