]> Pileus Git - ~andy/linux/blob - drivers/net/vxge/vxge-config.c
vxge: Fix a possible memory leak in vxge_hw_device_initialize().
[~andy/linux] / drivers / net / vxge / vxge-config.c
1 /******************************************************************************
2  * This software may be used and distributed according to the terms of
3  * the GNU General Public License (GPL), incorporated herein by reference.
4  * Drivers based on or derived from this code fall under the GPL and must
5  * retain the authorship, copyright and license notice.  This file is not
6  * a complete program and may only be used when the entire operating
7  * system is licensed under the GPL.
8  * See the file COPYING in this distribution for more information.
9  *
10  * vxge-config.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
11  *                Virtualized Server Adapter.
12  * Copyright(c) 2002-2009 Neterion Inc.
13  ******************************************************************************/
14 #include <linux/vmalloc.h>
15 #include <linux/etherdevice.h>
16 #include <linux/pci.h>
17 #include <linux/pci_hotplug.h>
18
19 #include "vxge-traffic.h"
20 #include "vxge-config.h"
21
22 /*
23  * __vxge_hw_channel_allocate - Allocate memory for channel
24  * This function allocates required memory for the channel and various arrays
25  * in the channel
26  */
27 struct __vxge_hw_channel*
28 __vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
29                            enum __vxge_hw_channel_type type,
30         u32 length, u32 per_dtr_space, void *userdata)
31 {
32         struct __vxge_hw_channel *channel;
33         struct __vxge_hw_device *hldev;
34         int size = 0;
35         u32 vp_id;
36
37         hldev = vph->vpath->hldev;
38         vp_id = vph->vpath->vp_id;
39
40         switch (type) {
41         case VXGE_HW_CHANNEL_TYPE_FIFO:
42                 size = sizeof(struct __vxge_hw_fifo);
43                 break;
44         case VXGE_HW_CHANNEL_TYPE_RING:
45                 size = sizeof(struct __vxge_hw_ring);
46                 break;
47         default:
48                 break;
49         }
50
51         channel = kzalloc(size, GFP_KERNEL);
52         if (channel == NULL)
53                 goto exit0;
54         INIT_LIST_HEAD(&channel->item);
55
56         channel->common_reg = hldev->common_reg;
57         channel->first_vp_id = hldev->first_vp_id;
58         channel->type = type;
59         channel->devh = hldev;
60         channel->vph = vph;
61         channel->userdata = userdata;
62         channel->per_dtr_space = per_dtr_space;
63         channel->length = length;
64         channel->vp_id = vp_id;
65
66         channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
67         if (channel->work_arr == NULL)
68                 goto exit1;
69
70         channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
71         if (channel->free_arr == NULL)
72                 goto exit1;
73         channel->free_ptr = length;
74
75         channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
76         if (channel->reserve_arr == NULL)
77                 goto exit1;
78         channel->reserve_ptr = length;
79         channel->reserve_top = 0;
80
81         channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
82         if (channel->orig_arr == NULL)
83                 goto exit1;
84
85         return channel;
86 exit1:
87         __vxge_hw_channel_free(channel);
88
89 exit0:
90         return NULL;
91 }
92
93 /*
94  * __vxge_hw_channel_free - Free memory allocated for channel
95  * This function deallocates memory from the channel and various arrays
96  * in the channel
97  */
98 void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
99 {
100         kfree(channel->work_arr);
101         kfree(channel->free_arr);
102         kfree(channel->reserve_arr);
103         kfree(channel->orig_arr);
104         kfree(channel);
105 }
106
107 /*
108  * __vxge_hw_channel_initialize - Initialize a channel
109  * This function initializes a channel by properly setting the
110  * various references
111  */
112 enum vxge_hw_status
113 __vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
114 {
115         u32 i;
116         struct __vxge_hw_virtualpath *vpath;
117
118         vpath = channel->vph->vpath;
119
120         if ((channel->reserve_arr != NULL) && (channel->orig_arr != NULL)) {
121                 for (i = 0; i < channel->length; i++)
122                         channel->orig_arr[i] = channel->reserve_arr[i];
123         }
124
125         switch (channel->type) {
126         case VXGE_HW_CHANNEL_TYPE_FIFO:
127                 vpath->fifoh = (struct __vxge_hw_fifo *)channel;
128                 channel->stats = &((struct __vxge_hw_fifo *)
129                                 channel)->stats->common_stats;
130                 break;
131         case VXGE_HW_CHANNEL_TYPE_RING:
132                 vpath->ringh = (struct __vxge_hw_ring *)channel;
133                 channel->stats = &((struct __vxge_hw_ring *)
134                                 channel)->stats->common_stats;
135                 break;
136         default:
137                 break;
138         }
139
140         return VXGE_HW_OK;
141 }
142
143 /*
144  * __vxge_hw_channel_reset - Resets a channel
145  * This function resets a channel by properly setting the various references
146  */
147 enum vxge_hw_status
148 __vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
149 {
150         u32 i;
151
152         for (i = 0; i < channel->length; i++) {
153                 if (channel->reserve_arr != NULL)
154                         channel->reserve_arr[i] = channel->orig_arr[i];
155                 if (channel->free_arr != NULL)
156                         channel->free_arr[i] = NULL;
157                 if (channel->work_arr != NULL)
158                         channel->work_arr[i] = NULL;
159         }
160         channel->free_ptr = channel->length;
161         channel->reserve_ptr = channel->length;
162         channel->reserve_top = 0;
163         channel->post_index = 0;
164         channel->compl_index = 0;
165
166         return VXGE_HW_OK;
167 }
168
169 /*
170  * __vxge_hw_device_pci_e_init
171  * Initialize certain PCI/PCI-X configuration registers
172  * with recommended values. Save config space for future hw resets.
173  */
174 void
175 __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
176 {
177         u16 cmd = 0;
178
179         /* Set the PErr Repconse bit and SERR in PCI command register. */
180         pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd);
181         cmd |= 0x140;
182         pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd);
183
184         pci_save_state(hldev->pdev);
185
186         return;
187 }
188
189 /*
190  * __vxge_hw_device_register_poll
191  * Will poll certain register for specified amount of time.
192  * Will poll until masked bit is not cleared.
193  */
194 enum vxge_hw_status
195 __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
196 {
197         u64 val64;
198         u32 i = 0;
199         enum vxge_hw_status ret = VXGE_HW_FAIL;
200
201         udelay(10);
202
203         do {
204                 val64 = readq(reg);
205                 if (!(val64 & mask))
206                         return VXGE_HW_OK;
207                 udelay(100);
208         } while (++i <= 9);
209
210         i = 0;
211         do {
212                 val64 = readq(reg);
213                 if (!(val64 & mask))
214                         return VXGE_HW_OK;
215                 mdelay(1);
216         } while (++i <= max_millis);
217
218         return ret;
219 }
220
221  /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
222  * in progress
223  * This routine checks the vpath reset in progress register is turned zero
224  */
225 enum vxge_hw_status
226 __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
227 {
228         enum vxge_hw_status status;
229         status = __vxge_hw_device_register_poll(vpath_rst_in_prog,
230                         VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
231                         VXGE_HW_DEF_DEVICE_POLL_MILLIS);
232         return status;
233 }
234
235 /*
236  * __vxge_hw_device_toc_get
237  * This routine sets the swapper and reads the toc pointer and returns the
238  * memory mapped address of the toc
239  */
240 struct vxge_hw_toc_reg __iomem *
241 __vxge_hw_device_toc_get(void __iomem *bar0)
242 {
243         u64 val64;
244         struct vxge_hw_toc_reg __iomem *toc = NULL;
245         enum vxge_hw_status status;
246
247         struct vxge_hw_legacy_reg __iomem *legacy_reg =
248                 (struct vxge_hw_legacy_reg __iomem *)bar0;
249
250         status = __vxge_hw_legacy_swapper_set(legacy_reg);
251         if (status != VXGE_HW_OK)
252                 goto exit;
253
254         val64 = readq(&legacy_reg->toc_first_pointer);
255         toc = (struct vxge_hw_toc_reg __iomem *)(bar0+val64);
256 exit:
257         return toc;
258 }
259
260 /*
261  * __vxge_hw_device_reg_addr_get
262  * This routine sets the swapper and reads the toc pointer and initializes the
263  * register location pointers in the device object. It waits until the ric is
264  * completed initializing registers.
265  */
266 enum vxge_hw_status
267 __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
268 {
269         u64 val64;
270         u32 i;
271         enum vxge_hw_status status = VXGE_HW_OK;
272
273         hldev->legacy_reg = (struct vxge_hw_legacy_reg __iomem *)hldev->bar0;
274
275         hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0);
276         if (hldev->toc_reg  == NULL) {
277                 status = VXGE_HW_FAIL;
278                 goto exit;
279         }
280
281         val64 = readq(&hldev->toc_reg->toc_common_pointer);
282         hldev->common_reg =
283         (struct vxge_hw_common_reg __iomem *)(hldev->bar0 + val64);
284
285         val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer);
286         hldev->mrpcim_reg =
287                 (struct vxge_hw_mrpcim_reg __iomem *)(hldev->bar0 + val64);
288
289         for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) {
290                 val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]);
291                 hldev->srpcim_reg[i] =
292                         (struct vxge_hw_srpcim_reg __iomem *)
293                                 (hldev->bar0 + val64);
294         }
295
296         for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) {
297                 val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]);
298                 hldev->vpmgmt_reg[i] =
299                 (struct vxge_hw_vpmgmt_reg __iomem *)(hldev->bar0 + val64);
300         }
301
302         for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) {
303                 val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]);
304                 hldev->vpath_reg[i] =
305                         (struct vxge_hw_vpath_reg __iomem *)
306                                 (hldev->bar0 + val64);
307         }
308
309         val64 = readq(&hldev->toc_reg->toc_kdfc);
310
311         switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) {
312         case 0:
313                 hldev->kdfc = (u8 __iomem *)(hldev->bar0 +
314                         VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64));
315                 break;
316         default:
317                 break;
318         }
319
320         status = __vxge_hw_device_vpath_reset_in_prog_check(
321                         (u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog);
322 exit:
323         return status;
324 }
325
326 /*
327  * __vxge_hw_device_id_get
328  * This routine returns sets the device id and revision numbers into the device
329  * structure
330  */
331 void __vxge_hw_device_id_get(struct __vxge_hw_device *hldev)
332 {
333         u64 val64;
334
335         val64 = readq(&hldev->common_reg->titan_asic_id);
336         hldev->device_id =
337                 (u16)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(val64);
338
339         hldev->major_revision =
340                 (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(val64);
341
342         hldev->minor_revision =
343                 (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64);
344
345         return;
346 }
347
348 /*
349  * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
350  * This routine returns the Access Rights of the driver
351  */
352 static u32
353 __vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
354 {
355         u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH;
356
357         switch (host_type) {
358         case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
359                 if (func_id == 0) {
360                         access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
361                                         VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
362                 }
363                 break;
364         case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
365                 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
366                                 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
367                 break;
368         case VXGE_HW_NO_MR_SR_VH0_FUNCTION0:
369                 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
370                                 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
371                 break;
372         case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION:
373         case VXGE_HW_SR_VH_VIRTUAL_FUNCTION:
374         case VXGE_HW_MR_SR_VH0_INVALID_CONFIG:
375                 break;
376         case VXGE_HW_SR_VH_FUNCTION0:
377         case VXGE_HW_VH_NORMAL_FUNCTION:
378                 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
379                 break;
380         }
381
382         return access_rights;
383 }
384 /*
385  * __vxge_hw_device_is_privilaged
386  * This routine checks if the device function is privilaged or not
387  */
388
389 enum vxge_hw_status
390 __vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
391 {
392         if (__vxge_hw_device_access_rights_get(host_type,
393                 func_id) &
394                 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)
395                 return VXGE_HW_OK;
396         else
397                 return VXGE_HW_ERR_PRIVILAGED_OPEARATION;
398 }
399
400 /*
401  * __vxge_hw_device_host_info_get
402  * This routine returns the host type assignments
403  */
404 void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
405 {
406         u64 val64;
407         u32 i;
408
409         val64 = readq(&hldev->common_reg->host_type_assignments);
410
411         hldev->host_type =
412            (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
413
414         hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
415
416         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
417
418                 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
419                         continue;
420
421                 hldev->func_id =
422                         __vxge_hw_vpath_func_id_get(i, hldev->vpmgmt_reg[i]);
423
424                 hldev->access_rights = __vxge_hw_device_access_rights_get(
425                         hldev->host_type, hldev->func_id);
426
427                 hldev->first_vp_id = i;
428                 break;
429         }
430
431         return;
432 }
433
434 /*
435  * __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as
436  * link width and signalling rate.
437  */
438 static enum vxge_hw_status
439 __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
440 {
441         int exp_cap;
442         u16 lnk;
443
444         /* Get the negotiated link width and speed from PCI config space */
445         exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
446         pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
447
448         if ((lnk & PCI_EXP_LNKSTA_CLS) != 1)
449                 return VXGE_HW_ERR_INVALID_PCI_INFO;
450
451         switch ((lnk & PCI_EXP_LNKSTA_NLW) >> 4) {
452         case PCIE_LNK_WIDTH_RESRV:
453         case PCIE_LNK_X1:
454         case PCIE_LNK_X2:
455         case PCIE_LNK_X4:
456         case PCIE_LNK_X8:
457                 break;
458         default:
459                 return VXGE_HW_ERR_INVALID_PCI_INFO;
460         }
461
462         return VXGE_HW_OK;
463 }
464
465 /*
466  * __vxge_hw_device_initialize
467  * Initialize Titan-V hardware.
468  */
469 enum vxge_hw_status __vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
470 {
471         enum vxge_hw_status status = VXGE_HW_OK;
472
473         if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type,
474                                 hldev->func_id)) {
475                 /* Validate the pci-e link width and speed */
476                 status = __vxge_hw_verify_pci_e_info(hldev);
477                 if (status != VXGE_HW_OK)
478                         goto exit;
479         }
480
481 exit:
482         return status;
483 }
484
485 /**
486  * vxge_hw_device_hw_info_get - Get the hw information
487  * Returns the vpath mask that has the bits set for each vpath allocated
488  * for the driver, FW version information and the first mac addresse for
489  * each vpath
490  */
491 enum vxge_hw_status __devinit
492 vxge_hw_device_hw_info_get(void __iomem *bar0,
493                            struct vxge_hw_device_hw_info *hw_info)
494 {
495         u32 i;
496         u64 val64;
497         struct vxge_hw_toc_reg __iomem *toc;
498         struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
499         struct vxge_hw_common_reg __iomem *common_reg;
500         struct vxge_hw_vpath_reg __iomem *vpath_reg;
501         struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
502         enum vxge_hw_status status;
503
504         memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
505
506         toc = __vxge_hw_device_toc_get(bar0);
507         if (toc == NULL) {
508                 status = VXGE_HW_ERR_CRITICAL;
509                 goto exit;
510         }
511
512         val64 = readq(&toc->toc_common_pointer);
513         common_reg = (struct vxge_hw_common_reg __iomem *)(bar0 + val64);
514
515         status = __vxge_hw_device_vpath_reset_in_prog_check(
516                 (u64 __iomem *)&common_reg->vpath_rst_in_prog);
517         if (status != VXGE_HW_OK)
518                 goto exit;
519
520         hw_info->vpath_mask = readq(&common_reg->vpath_assignments);
521
522         val64 = readq(&common_reg->host_type_assignments);
523
524         hw_info->host_type =
525            (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
526
527         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
528
529                 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
530                         continue;
531
532                 val64 = readq(&toc->toc_vpmgmt_pointer[i]);
533
534                 vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *)
535                                 (bar0 + val64);
536
537                 hw_info->func_id = __vxge_hw_vpath_func_id_get(i, vpmgmt_reg);
538                 if (__vxge_hw_device_access_rights_get(hw_info->host_type,
539                         hw_info->func_id) &
540                         VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
541
542                         val64 = readq(&toc->toc_mrpcim_pointer);
543
544                         mrpcim_reg = (struct vxge_hw_mrpcim_reg __iomem *)
545                                         (bar0 + val64);
546
547                         writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask);
548                         wmb();
549                 }
550
551                 val64 = readq(&toc->toc_vpath_pointer[i]);
552
553                 vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
554
555                 hw_info->function_mode =
556                         __vxge_hw_vpath_pci_func_mode_get(i, vpath_reg);
557
558                 status = __vxge_hw_vpath_fw_ver_get(i, vpath_reg, hw_info);
559                 if (status != VXGE_HW_OK)
560                         goto exit;
561
562                 status = __vxge_hw_vpath_card_info_get(i, vpath_reg, hw_info);
563                 if (status != VXGE_HW_OK)
564                         goto exit;
565
566                 break;
567         }
568
569         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
570
571                 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
572                         continue;
573
574                 val64 = readq(&toc->toc_vpath_pointer[i]);
575                 vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
576
577                 status =  __vxge_hw_vpath_addr_get(i, vpath_reg,
578                                 hw_info->mac_addrs[i],
579                                 hw_info->mac_addr_masks[i]);
580                 if (status != VXGE_HW_OK)
581                         goto exit;
582         }
583 exit:
584         return status;
585 }
586
587 /*
588  * vxge_hw_device_initialize - Initialize Titan device.
589  * Initialize Titan device. Note that all the arguments of this public API
590  * are 'IN', including @hldev. Driver cooperates with
591  * OS to find new Titan device, locate its PCI and memory spaces.
592  *
593  * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
594  * to enable the latter to perform Titan hardware initialization.
595  */
596 enum vxge_hw_status __devinit
597 vxge_hw_device_initialize(
598         struct __vxge_hw_device **devh,
599         struct vxge_hw_device_attr *attr,
600         struct vxge_hw_device_config *device_config)
601 {
602         u32 i;
603         u32 nblocks = 0;
604         struct __vxge_hw_device *hldev = NULL;
605         enum vxge_hw_status status = VXGE_HW_OK;
606
607         status = __vxge_hw_device_config_check(device_config);
608         if (status != VXGE_HW_OK)
609                 goto exit;
610
611         hldev = (struct __vxge_hw_device *)
612                         vmalloc(sizeof(struct __vxge_hw_device));
613         if (hldev == NULL) {
614                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
615                 goto exit;
616         }
617
618         memset(hldev, 0, sizeof(struct __vxge_hw_device));
619         hldev->magic = VXGE_HW_DEVICE_MAGIC;
620
621         vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);
622
623         /* apply config */
624         memcpy(&hldev->config, device_config,
625                 sizeof(struct vxge_hw_device_config));
626
627         hldev->bar0 = attr->bar0;
628         hldev->pdev = attr->pdev;
629
630         hldev->uld_callbacks.link_up = attr->uld_callbacks.link_up;
631         hldev->uld_callbacks.link_down = attr->uld_callbacks.link_down;
632         hldev->uld_callbacks.crit_err = attr->uld_callbacks.crit_err;
633
634         __vxge_hw_device_pci_e_init(hldev);
635
636         status = __vxge_hw_device_reg_addr_get(hldev);
637         if (status != VXGE_HW_OK) {
638                 vfree(hldev);
639                 goto exit;
640         }
641         __vxge_hw_device_id_get(hldev);
642
643         __vxge_hw_device_host_info_get(hldev);
644
645         /* Incrementing for stats blocks */
646         nblocks++;
647
648         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
649
650                 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
651                         continue;
652
653                 if (device_config->vp_config[i].ring.enable ==
654                         VXGE_HW_RING_ENABLE)
655                         nblocks += device_config->vp_config[i].ring.ring_blocks;
656
657                 if (device_config->vp_config[i].fifo.enable ==
658                         VXGE_HW_FIFO_ENABLE)
659                         nblocks += device_config->vp_config[i].fifo.fifo_blocks;
660                 nblocks++;
661         }
662
663         if (__vxge_hw_blockpool_create(hldev,
664                 &hldev->block_pool,
665                 device_config->dma_blockpool_initial + nblocks,
666                 device_config->dma_blockpool_max + nblocks) != VXGE_HW_OK) {
667
668                 vxge_hw_device_terminate(hldev);
669                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
670                 goto exit;
671         }
672
673         status = __vxge_hw_device_initialize(hldev);
674
675         if (status != VXGE_HW_OK) {
676                 vxge_hw_device_terminate(hldev);
677                 goto exit;
678         }
679
680         *devh = hldev;
681 exit:
682         return status;
683 }
684
685 /*
686  * vxge_hw_device_terminate - Terminate Titan device.
687  * Terminate HW device.
688  */
689 void
690 vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
691 {
692         vxge_assert(hldev->magic == VXGE_HW_DEVICE_MAGIC);
693
694         hldev->magic = VXGE_HW_DEVICE_DEAD;
695         __vxge_hw_blockpool_destroy(&hldev->block_pool);
696         vfree(hldev);
697 }
698
699 /*
700  * vxge_hw_device_stats_get - Get the device hw statistics.
701  * Returns the vpath h/w stats for the device.
702  */
703 enum vxge_hw_status
704 vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
705                         struct vxge_hw_device_stats_hw_info *hw_stats)
706 {
707         u32 i;
708         enum vxge_hw_status status = VXGE_HW_OK;
709
710         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
711
712                 if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
713                         (hldev->virtual_paths[i].vp_open ==
714                                 VXGE_HW_VP_NOT_OPEN))
715                         continue;
716
717                 memcpy(hldev->virtual_paths[i].hw_stats_sav,
718                                 hldev->virtual_paths[i].hw_stats,
719                                 sizeof(struct vxge_hw_vpath_stats_hw_info));
720
721                 status = __vxge_hw_vpath_stats_get(
722                         &hldev->virtual_paths[i],
723                         hldev->virtual_paths[i].hw_stats);
724         }
725
726         memcpy(hw_stats, &hldev->stats.hw_dev_info_stats,
727                         sizeof(struct vxge_hw_device_stats_hw_info));
728
729         return status;
730 }
731
732 /*
733  * vxge_hw_driver_stats_get - Get the device sw statistics.
734  * Returns the vpath s/w stats for the device.
735  */
736 enum vxge_hw_status vxge_hw_driver_stats_get(
737                         struct __vxge_hw_device *hldev,
738                         struct vxge_hw_device_stats_sw_info *sw_stats)
739 {
740         enum vxge_hw_status status = VXGE_HW_OK;
741
742         memcpy(sw_stats, &hldev->stats.sw_dev_info_stats,
743                 sizeof(struct vxge_hw_device_stats_sw_info));
744
745         return status;
746 }
747
748 /*
749  * vxge_hw_mrpcim_stats_access - Access the statistics from the given location
750  *                           and offset and perform an operation
751  * Get the statistics from the given location and offset.
752  */
753 enum vxge_hw_status
754 vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev,
755                             u32 operation, u32 location, u32 offset, u64 *stat)
756 {
757         u64 val64;
758         enum vxge_hw_status status = VXGE_HW_OK;
759
760         status = __vxge_hw_device_is_privilaged(hldev->host_type,
761                         hldev->func_id);
762         if (status != VXGE_HW_OK)
763                 goto exit;
764
765         val64 = VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation) |
766                 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE |
767                 VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location) |
768                 VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset);
769
770         status = __vxge_hw_pio_mem_write64(val64,
771                                 &hldev->mrpcim_reg->xmac_stats_sys_cmd,
772                                 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE,
773                                 hldev->config.device_poll_millis);
774
775         if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
776                 *stat = readq(&hldev->mrpcim_reg->xmac_stats_sys_data);
777         else
778                 *stat = 0;
779 exit:
780         return status;
781 }
782
783 /*
784  * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
785  * Get the Statistics on aggregate port
786  */
787 enum vxge_hw_status
788 vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
789                                    struct vxge_hw_xmac_aggr_stats *aggr_stats)
790 {
791         u64 *val64;
792         int i;
793         u32 offset = VXGE_HW_STATS_AGGRn_OFFSET;
794         enum vxge_hw_status status = VXGE_HW_OK;
795
796         val64 = (u64 *)aggr_stats;
797
798         status = __vxge_hw_device_is_privilaged(hldev->host_type,
799                         hldev->func_id);
800         if (status != VXGE_HW_OK)
801                 goto exit;
802
803         for (i = 0; i < sizeof(struct vxge_hw_xmac_aggr_stats) / 8; i++) {
804                 status = vxge_hw_mrpcim_stats_access(hldev,
805                                         VXGE_HW_STATS_OP_READ,
806                                         VXGE_HW_STATS_LOC_AGGR,
807                                         ((offset + (104 * port)) >> 3), val64);
808                 if (status != VXGE_HW_OK)
809                         goto exit;
810
811                 offset += 8;
812                 val64++;
813         }
814 exit:
815         return status;
816 }
817
818 /*
819  * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
820  * Get the Statistics on port
821  */
822 enum vxge_hw_status
823 vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
824                                    struct vxge_hw_xmac_port_stats *port_stats)
825 {
826         u64 *val64;
827         enum vxge_hw_status status = VXGE_HW_OK;
828         int i;
829         u32 offset = 0x0;
830         val64 = (u64 *) port_stats;
831
832         status = __vxge_hw_device_is_privilaged(hldev->host_type,
833                         hldev->func_id);
834         if (status != VXGE_HW_OK)
835                 goto exit;
836
837         for (i = 0; i < sizeof(struct vxge_hw_xmac_port_stats) / 8; i++) {
838                 status = vxge_hw_mrpcim_stats_access(hldev,
839                                         VXGE_HW_STATS_OP_READ,
840                                         VXGE_HW_STATS_LOC_AGGR,
841                                         ((offset + (608 * port)) >> 3), val64);
842                 if (status != VXGE_HW_OK)
843                         goto exit;
844
845                 offset += 8;
846                 val64++;
847         }
848
849 exit:
850         return status;
851 }
852
853 /*
854  * vxge_hw_device_xmac_stats_get - Get the XMAC Statistics
855  * Get the XMAC Statistics
856  */
857 enum vxge_hw_status
858 vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev,
859                               struct vxge_hw_xmac_stats *xmac_stats)
860 {
861         enum vxge_hw_status status = VXGE_HW_OK;
862         u32 i;
863
864         status = vxge_hw_device_xmac_aggr_stats_get(hldev,
865                                         0, &xmac_stats->aggr_stats[0]);
866
867         if (status != VXGE_HW_OK)
868                 goto exit;
869
870         status = vxge_hw_device_xmac_aggr_stats_get(hldev,
871                                 1, &xmac_stats->aggr_stats[1]);
872         if (status != VXGE_HW_OK)
873                 goto exit;
874
875         for (i = 0; i <= VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
876
877                 status = vxge_hw_device_xmac_port_stats_get(hldev,
878                                         i, &xmac_stats->port_stats[i]);
879                 if (status != VXGE_HW_OK)
880                         goto exit;
881         }
882
883         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
884
885                 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
886                         continue;
887
888                 status = __vxge_hw_vpath_xmac_tx_stats_get(
889                                         &hldev->virtual_paths[i],
890                                         &xmac_stats->vpath_tx_stats[i]);
891                 if (status != VXGE_HW_OK)
892                         goto exit;
893
894                 status = __vxge_hw_vpath_xmac_rx_stats_get(
895                                         &hldev->virtual_paths[i],
896                                         &xmac_stats->vpath_rx_stats[i]);
897                 if (status != VXGE_HW_OK)
898                         goto exit;
899         }
900 exit:
901         return status;
902 }
903
904 /*
905  * vxge_hw_device_debug_set - Set the debug module, level and timestamp
906  * This routine is used to dynamically change the debug output
907  */
908 void vxge_hw_device_debug_set(struct __vxge_hw_device *hldev,
909                               enum vxge_debug_level level, u32 mask)
910 {
911         if (hldev == NULL)
912                 return;
913
914 #if defined(VXGE_DEBUG_TRACE_MASK) || \
915         defined(VXGE_DEBUG_ERR_MASK)
916         hldev->debug_module_mask = mask;
917         hldev->debug_level = level;
918 #endif
919
920 #if defined(VXGE_DEBUG_ERR_MASK)
921         hldev->level_err = level & VXGE_ERR;
922 #endif
923
924 #if defined(VXGE_DEBUG_TRACE_MASK)
925         hldev->level_trace = level & VXGE_TRACE;
926 #endif
927 }
928
929 /*
930  * vxge_hw_device_error_level_get - Get the error level
931  * This routine returns the current error level set
932  */
933 u32 vxge_hw_device_error_level_get(struct __vxge_hw_device *hldev)
934 {
935 #if defined(VXGE_DEBUG_ERR_MASK)
936         if (hldev == NULL)
937                 return VXGE_ERR;
938         else
939                 return hldev->level_err;
940 #else
941         return 0;
942 #endif
943 }
944
945 /*
946  * vxge_hw_device_trace_level_get - Get the trace level
947  * This routine returns the current trace level set
948  */
949 u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
950 {
951 #if defined(VXGE_DEBUG_TRACE_MASK)
952         if (hldev == NULL)
953                 return VXGE_TRACE;
954         else
955                 return hldev->level_trace;
956 #else
957         return 0;
958 #endif
959 }
960 /*
961  * vxge_hw_device_debug_mask_get - Get the debug mask
962  * This routine returns the current debug mask set
963  */
964 u32 vxge_hw_device_debug_mask_get(struct __vxge_hw_device *hldev)
965 {
966 #if defined(VXGE_DEBUG_TRACE_MASK) || defined(VXGE_DEBUG_ERR_MASK)
967         if (hldev == NULL)
968                 return 0;
969         return hldev->debug_module_mask;
970 #else
971         return 0;
972 #endif
973 }
974
975 /*
976  * vxge_hw_getpause_data -Pause frame frame generation and reception.
977  * Returns the Pause frame generation and reception capability of the NIC.
978  */
979 enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev,
980                                                  u32 port, u32 *tx, u32 *rx)
981 {
982         u64 val64;
983         enum vxge_hw_status status = VXGE_HW_OK;
984
985         if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
986                 status = VXGE_HW_ERR_INVALID_DEVICE;
987                 goto exit;
988         }
989
990         if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
991                 status = VXGE_HW_ERR_INVALID_PORT;
992                 goto exit;
993         }
994
995         if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
996                 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
997                 goto exit;
998         }
999
1000         val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1001         if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN)
1002                 *tx = 1;
1003         if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN)
1004                 *rx = 1;
1005 exit:
1006         return status;
1007 }
1008
1009 /*
1010  * vxge_hw_device_setpause_data -  set/reset pause frame generation.
1011  * It can be used to set or reset Pause frame generation or reception
1012  * support of the NIC.
1013  */
1014
1015 enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
1016                                                  u32 port, u32 tx, u32 rx)
1017 {
1018         u64 val64;
1019         enum vxge_hw_status status = VXGE_HW_OK;
1020
1021         if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
1022                 status = VXGE_HW_ERR_INVALID_DEVICE;
1023                 goto exit;
1024         }
1025
1026         if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
1027                 status = VXGE_HW_ERR_INVALID_PORT;
1028                 goto exit;
1029         }
1030
1031         status = __vxge_hw_device_is_privilaged(hldev->host_type,
1032                         hldev->func_id);
1033         if (status != VXGE_HW_OK)
1034                 goto exit;
1035
1036         val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1037         if (tx)
1038                 val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1039         else
1040                 val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1041         if (rx)
1042                 val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1043         else
1044                 val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1045
1046         writeq(val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1047 exit:
1048         return status;
1049 }
1050
1051 u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev)
1052 {
1053         int link_width, exp_cap;
1054         u16 lnk;
1055
1056         exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
1057         pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
1058         link_width = (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4;
1059         return link_width;
1060 }
1061
1062 /*
1063  * __vxge_hw_ring_block_memblock_idx - Return the memblock index
1064  * This function returns the index of memory block
1065  */
1066 static inline u32
1067 __vxge_hw_ring_block_memblock_idx(u8 *block)
1068 {
1069         return (u32)*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET));
1070 }
1071
1072 /*
1073  * __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index
1074  * This function sets index to a memory block
1075  */
1076 static inline void
1077 __vxge_hw_ring_block_memblock_idx_set(u8 *block, u32 memblock_idx)
1078 {
1079         *((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)) = memblock_idx;
1080 }
1081
1082 /*
1083  * __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer
1084  * in RxD block
1085  * Sets the next block pointer in RxD block
1086  */
1087 static inline void
1088 __vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
1089 {
1090         *((u64 *)(block + VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next;
1091 }
1092
1093 /*
1094  * __vxge_hw_ring_first_block_address_get - Returns the dma address of the
1095  *             first block
1096  * Returns the dma address of the first RxD block
1097  */
1098 u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
1099 {
1100         struct vxge_hw_mempool_dma *dma_object;
1101
1102         dma_object = ring->mempool->memblocks_dma_arr;
1103         vxge_assert(dma_object != NULL);
1104
1105         return dma_object->addr;
1106 }
1107
1108 /*
1109  * __vxge_hw_ring_item_dma_addr - Return the dma address of an item
1110  * This function returns the dma address of a given item
1111  */
1112 static dma_addr_t __vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool *mempoolh,
1113                                                void *item)
1114 {
1115         u32 memblock_idx;
1116         void *memblock;
1117         struct vxge_hw_mempool_dma *memblock_dma_object;
1118         ptrdiff_t dma_item_offset;
1119
1120         /* get owner memblock index */
1121         memblock_idx = __vxge_hw_ring_block_memblock_idx(item);
1122
1123         /* get owner memblock by memblock index */
1124         memblock = mempoolh->memblocks_arr[memblock_idx];
1125
1126         /* get memblock DMA object by memblock index */
1127         memblock_dma_object = mempoolh->memblocks_dma_arr + memblock_idx;
1128
1129         /* calculate offset in the memblock of this item */
1130         dma_item_offset = (u8 *)item - (u8 *)memblock;
1131
1132         return memblock_dma_object->addr + dma_item_offset;
1133 }
1134
1135 /*
1136  * __vxge_hw_ring_rxdblock_link - Link the RxD blocks
1137  * This function returns the dma address of a given item
1138  */
1139 static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool *mempoolh,
1140                                          struct __vxge_hw_ring *ring, u32 from,
1141                                          u32 to)
1142 {
1143         u8 *to_item , *from_item;
1144         dma_addr_t to_dma;
1145
1146         /* get "from" RxD block */
1147         from_item = mempoolh->items_arr[from];
1148         vxge_assert(from_item);
1149
1150         /* get "to" RxD block */
1151         to_item = mempoolh->items_arr[to];
1152         vxge_assert(to_item);
1153
1154         /* return address of the beginning of previous RxD block */
1155         to_dma = __vxge_hw_ring_item_dma_addr(mempoolh, to_item);
1156
1157         /* set next pointer for this RxD block to point on
1158          * previous item's DMA start address */
1159         __vxge_hw_ring_block_next_pointer_set(from_item, to_dma);
1160 }
1161
1162 /*
1163  * __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD
1164  * block callback
1165  * This function is callback passed to __vxge_hw_mempool_create to create memory
1166  * pool for RxD block
1167  */
1168 static void
1169 __vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
1170                                   u32 memblock_index,
1171                                   struct vxge_hw_mempool_dma *dma_object,
1172                                   u32 index, u32 is_last)
1173 {
1174         u32 i;
1175         void *item = mempoolh->items_arr[index];
1176         struct __vxge_hw_ring *ring =
1177                 (struct __vxge_hw_ring *)mempoolh->userdata;
1178
1179         /* format rxds array */
1180         for (i = 0; i < ring->rxds_per_block; i++) {
1181                 void *rxdblock_priv;
1182                 void *uld_priv;
1183                 struct vxge_hw_ring_rxd_1 *rxdp;
1184
1185                 u32 reserve_index = ring->channel.reserve_ptr -
1186                                 (index * ring->rxds_per_block + i + 1);
1187                 u32 memblock_item_idx;
1188
1189                 ring->channel.reserve_arr[reserve_index] = ((u8 *)item) +
1190                                                 i * ring->rxd_size;
1191
1192                 /* Note: memblock_item_idx is index of the item within
1193                  *       the memblock. For instance, in case of three RxD-blocks
1194                  *       per memblock this value can be 0, 1 or 2. */
1195                 rxdblock_priv = __vxge_hw_mempool_item_priv(mempoolh,
1196                                         memblock_index, item,
1197                                         &memblock_item_idx);
1198
1199                 rxdp = (struct vxge_hw_ring_rxd_1 *)
1200                                 ring->channel.reserve_arr[reserve_index];
1201
1202                 uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i);
1203
1204                 /* pre-format Host_Control */
1205                 rxdp->host_control = (u64)(size_t)uld_priv;
1206         }
1207
1208         __vxge_hw_ring_block_memblock_idx_set(item, memblock_index);
1209
1210         if (is_last) {
1211                 /* link last one with first one */
1212                 __vxge_hw_ring_rxdblock_link(mempoolh, ring, index, 0);
1213         }
1214
1215         if (index > 0) {
1216                 /* link this RxD block with previous one */
1217                 __vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index);
1218         }
1219
1220         return;
1221 }
1222
1223 /*
1224  * __vxge_hw_ring_replenish - Initial replenish of RxDs
1225  * This function replenishes the RxDs from reserve array to work array
1226  */
1227 enum vxge_hw_status
1228 vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
1229 {
1230         void *rxd;
1231         struct __vxge_hw_channel *channel;
1232         enum vxge_hw_status status = VXGE_HW_OK;
1233
1234         channel = &ring->channel;
1235
1236         while (vxge_hw_channel_dtr_count(channel) > 0) {
1237
1238                 status = vxge_hw_ring_rxd_reserve(ring, &rxd);
1239
1240                 vxge_assert(status == VXGE_HW_OK);
1241
1242                 if (ring->rxd_init) {
1243                         status = ring->rxd_init(rxd, channel->userdata);
1244                         if (status != VXGE_HW_OK) {
1245                                 vxge_hw_ring_rxd_free(ring, rxd);
1246                                 goto exit;
1247                         }
1248                 }
1249
1250                 vxge_hw_ring_rxd_post(ring, rxd);
1251         }
1252         status = VXGE_HW_OK;
1253 exit:
1254         return status;
1255 }
1256
1257 /*
1258  * __vxge_hw_ring_create - Create a Ring
1259  * This function creates Ring and initializes it.
1260  *
1261  */
1262 enum vxge_hw_status
1263 __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
1264                       struct vxge_hw_ring_attr *attr)
1265 {
1266         enum vxge_hw_status status = VXGE_HW_OK;
1267         struct __vxge_hw_ring *ring;
1268         u32 ring_length;
1269         struct vxge_hw_ring_config *config;
1270         struct __vxge_hw_device *hldev;
1271         u32 vp_id;
1272         struct vxge_hw_mempool_cbs ring_mp_callback;
1273
1274         if ((vp == NULL) || (attr == NULL)) {
1275                 status = VXGE_HW_FAIL;
1276                 goto exit;
1277         }
1278
1279         hldev = vp->vpath->hldev;
1280         vp_id = vp->vpath->vp_id;
1281
1282         config = &hldev->config.vp_config[vp_id].ring;
1283
1284         ring_length = config->ring_blocks *
1285                         vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
1286
1287         ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
1288                                                 VXGE_HW_CHANNEL_TYPE_RING,
1289                                                 ring_length,
1290                                                 attr->per_rxd_space,
1291                                                 attr->userdata);
1292
1293         if (ring == NULL) {
1294                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1295                 goto exit;
1296         }
1297
1298         vp->vpath->ringh = ring;
1299         ring->vp_id = vp_id;
1300         ring->vp_reg = vp->vpath->vp_reg;
1301         ring->common_reg = hldev->common_reg;
1302         ring->stats = &vp->vpath->sw_stats->ring_stats;
1303         ring->config = config;
1304         ring->callback = attr->callback;
1305         ring->rxd_init = attr->rxd_init;
1306         ring->rxd_term = attr->rxd_term;
1307         ring->buffer_mode = config->buffer_mode;
1308         ring->rxds_limit = config->rxds_limit;
1309
1310         ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
1311         ring->rxd_priv_size =
1312                 sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
1313         ring->per_rxd_space = attr->per_rxd_space;
1314
1315         ring->rxd_priv_size =
1316                 ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
1317                 VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
1318
1319         /* how many RxDs can fit into one block. Depends on configured
1320          * buffer_mode. */
1321         ring->rxds_per_block =
1322                 vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
1323
1324         /* calculate actual RxD block private size */
1325         ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
1326         ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
1327         ring->mempool = __vxge_hw_mempool_create(hldev,
1328                                 VXGE_HW_BLOCK_SIZE,
1329                                 VXGE_HW_BLOCK_SIZE,
1330                                 ring->rxdblock_priv_size,
1331                                 ring->config->ring_blocks,
1332                                 ring->config->ring_blocks,
1333                                 &ring_mp_callback,
1334                                 ring);
1335
1336         if (ring->mempool == NULL) {
1337                 __vxge_hw_ring_delete(vp);
1338                 return VXGE_HW_ERR_OUT_OF_MEMORY;
1339         }
1340
1341         status = __vxge_hw_channel_initialize(&ring->channel);
1342         if (status != VXGE_HW_OK) {
1343                 __vxge_hw_ring_delete(vp);
1344                 goto exit;
1345         }
1346
1347         /* Note:
1348          * Specifying rxd_init callback means two things:
1349          * 1) rxds need to be initialized by driver at channel-open time;
1350          * 2) rxds need to be posted at channel-open time
1351          *    (that's what the initial_replenish() below does)
1352          * Currently we don't have a case when the 1) is done without the 2).
1353          */
1354         if (ring->rxd_init) {
1355                 status = vxge_hw_ring_replenish(ring);
1356                 if (status != VXGE_HW_OK) {
1357                         __vxge_hw_ring_delete(vp);
1358                         goto exit;
1359                 }
1360         }
1361
1362         /* initial replenish will increment the counter in its post() routine,
1363          * we have to reset it */
1364         ring->stats->common_stats.usage_cnt = 0;
1365 exit:
1366         return status;
1367 }
1368
1369 /*
1370  * __vxge_hw_ring_abort - Returns the RxD
1371  * This function terminates the RxDs of ring
1372  */
1373 enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
1374 {
1375         void *rxdh;
1376         struct __vxge_hw_channel *channel;
1377
1378         channel = &ring->channel;
1379
1380         for (;;) {
1381                 vxge_hw_channel_dtr_try_complete(channel, &rxdh);
1382
1383                 if (rxdh == NULL)
1384                         break;
1385
1386                 vxge_hw_channel_dtr_complete(channel);
1387
1388                 if (ring->rxd_term)
1389                         ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
1390                                 channel->userdata);
1391
1392                 vxge_hw_channel_dtr_free(channel, rxdh);
1393         }
1394
1395         return VXGE_HW_OK;
1396 }
1397
1398 /*
1399  * __vxge_hw_ring_reset - Resets the ring
1400  * This function resets the ring during vpath reset operation
1401  */
1402 enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
1403 {
1404         enum vxge_hw_status status = VXGE_HW_OK;
1405         struct __vxge_hw_channel *channel;
1406
1407         channel = &ring->channel;
1408
1409         __vxge_hw_ring_abort(ring);
1410
1411         status = __vxge_hw_channel_reset(channel);
1412
1413         if (status != VXGE_HW_OK)
1414                 goto exit;
1415
1416         if (ring->rxd_init) {
1417                 status = vxge_hw_ring_replenish(ring);
1418                 if (status != VXGE_HW_OK)
1419                         goto exit;
1420         }
1421 exit:
1422         return status;
1423 }
1424
1425 /*
1426  * __vxge_hw_ring_delete - Removes the ring
1427  * This function freeup the memory pool and removes the ring
1428  */
1429 enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
1430 {
1431         struct __vxge_hw_ring *ring = vp->vpath->ringh;
1432
1433         __vxge_hw_ring_abort(ring);
1434
1435         if (ring->mempool)
1436                 __vxge_hw_mempool_destroy(ring->mempool);
1437
1438         vp->vpath->ringh = NULL;
1439         __vxge_hw_channel_free(&ring->channel);
1440
1441         return VXGE_HW_OK;
1442 }
1443
1444 /*
1445  * __vxge_hw_mempool_grow
1446  * Will resize mempool up to %num_allocate value.
1447  */
1448 enum vxge_hw_status
1449 __vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
1450                        u32 *num_allocated)
1451 {
1452         u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
1453         u32 n_items = mempool->items_per_memblock;
1454         u32 start_block_idx = mempool->memblocks_allocated;
1455         u32 end_block_idx = mempool->memblocks_allocated + num_allocate;
1456         enum vxge_hw_status status = VXGE_HW_OK;
1457
1458         *num_allocated = 0;
1459
1460         if (end_block_idx > mempool->memblocks_max) {
1461                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1462                 goto exit;
1463         }
1464
1465         for (i = start_block_idx; i < end_block_idx; i++) {
1466                 u32 j;
1467                 u32 is_last = ((end_block_idx - 1) == i);
1468                 struct vxge_hw_mempool_dma *dma_object =
1469                         mempool->memblocks_dma_arr + i;
1470                 void *the_memblock;
1471
1472                 /* allocate memblock's private part. Each DMA memblock
1473                  * has a space allocated for item's private usage upon
1474                  * mempool's user request. Each time mempool grows, it will
1475                  * allocate new memblock and its private part at once.
1476                  * This helps to minimize memory usage a lot. */
1477                 mempool->memblocks_priv_arr[i] =
1478                                 vmalloc(mempool->items_priv_size * n_items);
1479                 if (mempool->memblocks_priv_arr[i] == NULL) {
1480                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
1481                         goto exit;
1482                 }
1483
1484                 memset(mempool->memblocks_priv_arr[i], 0,
1485                              mempool->items_priv_size * n_items);
1486
1487                 /* allocate DMA-capable memblock */
1488                 mempool->memblocks_arr[i] =
1489                         __vxge_hw_blockpool_malloc(mempool->devh,
1490                                 mempool->memblock_size, dma_object);
1491                 if (mempool->memblocks_arr[i] == NULL) {
1492                         vfree(mempool->memblocks_priv_arr[i]);
1493                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
1494                         goto exit;
1495                 }
1496
1497                 (*num_allocated)++;
1498                 mempool->memblocks_allocated++;
1499
1500                 memset(mempool->memblocks_arr[i], 0, mempool->memblock_size);
1501
1502                 the_memblock = mempool->memblocks_arr[i];
1503
1504                 /* fill the items hash array */
1505                 for (j = 0; j < n_items; j++) {
1506                         u32 index = i * n_items + j;
1507
1508                         if (first_time && index >= mempool->items_initial)
1509                                 break;
1510
1511                         mempool->items_arr[index] =
1512                                 ((char *)the_memblock + j*mempool->item_size);
1513
1514                         /* let caller to do more job on each item */
1515                         if (mempool->item_func_alloc != NULL)
1516                                 mempool->item_func_alloc(mempool, i,
1517                                         dma_object, index, is_last);
1518
1519                         mempool->items_current = index + 1;
1520                 }
1521
1522                 if (first_time && mempool->items_current ==
1523                                         mempool->items_initial)
1524                         break;
1525         }
1526 exit:
1527         return status;
1528 }
1529
1530 /*
1531  * vxge_hw_mempool_create
1532  * This function will create memory pool object. Pool may grow but will
1533  * never shrink. Pool consists of number of dynamically allocated blocks
1534  * with size enough to hold %items_initial number of items. Memory is
1535  * DMA-able but client must map/unmap before interoperating with the device.
1536  */
1537 struct vxge_hw_mempool*
1538 __vxge_hw_mempool_create(
1539         struct __vxge_hw_device *devh,
1540         u32 memblock_size,
1541         u32 item_size,
1542         u32 items_priv_size,
1543         u32 items_initial,
1544         u32 items_max,
1545         struct vxge_hw_mempool_cbs *mp_callback,
1546         void *userdata)
1547 {
1548         enum vxge_hw_status status = VXGE_HW_OK;
1549         u32 memblocks_to_allocate;
1550         struct vxge_hw_mempool *mempool = NULL;
1551         u32 allocated;
1552
1553         if (memblock_size < item_size) {
1554                 status = VXGE_HW_FAIL;
1555                 goto exit;
1556         }
1557
1558         mempool = (struct vxge_hw_mempool *)
1559                         vmalloc(sizeof(struct vxge_hw_mempool));
1560         if (mempool == NULL) {
1561                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1562                 goto exit;
1563         }
1564         memset(mempool, 0, sizeof(struct vxge_hw_mempool));
1565
1566         mempool->devh                   = devh;
1567         mempool->memblock_size          = memblock_size;
1568         mempool->items_max              = items_max;
1569         mempool->items_initial          = items_initial;
1570         mempool->item_size              = item_size;
1571         mempool->items_priv_size        = items_priv_size;
1572         mempool->item_func_alloc        = mp_callback->item_func_alloc;
1573         mempool->userdata               = userdata;
1574
1575         mempool->memblocks_allocated = 0;
1576
1577         mempool->items_per_memblock = memblock_size / item_size;
1578
1579         mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
1580                                         mempool->items_per_memblock;
1581
1582         /* allocate array of memblocks */
1583         mempool->memblocks_arr =
1584                 (void **) vmalloc(sizeof(void *) * mempool->memblocks_max);
1585         if (mempool->memblocks_arr == NULL) {
1586                 __vxge_hw_mempool_destroy(mempool);
1587                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1588                 mempool = NULL;
1589                 goto exit;
1590         }
1591         memset(mempool->memblocks_arr, 0,
1592                 sizeof(void *) * mempool->memblocks_max);
1593
1594         /* allocate array of private parts of items per memblocks */
1595         mempool->memblocks_priv_arr =
1596                 (void **) vmalloc(sizeof(void *) * mempool->memblocks_max);
1597         if (mempool->memblocks_priv_arr == NULL) {
1598                 __vxge_hw_mempool_destroy(mempool);
1599                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1600                 mempool = NULL;
1601                 goto exit;
1602         }
1603         memset(mempool->memblocks_priv_arr, 0,
1604                     sizeof(void *) * mempool->memblocks_max);
1605
1606         /* allocate array of memblocks DMA objects */
1607         mempool->memblocks_dma_arr = (struct vxge_hw_mempool_dma *)
1608                 vmalloc(sizeof(struct vxge_hw_mempool_dma) *
1609                         mempool->memblocks_max);
1610
1611         if (mempool->memblocks_dma_arr == NULL) {
1612                 __vxge_hw_mempool_destroy(mempool);
1613                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1614                 mempool = NULL;
1615                 goto exit;
1616         }
1617         memset(mempool->memblocks_dma_arr, 0,
1618                         sizeof(struct vxge_hw_mempool_dma) *
1619                         mempool->memblocks_max);
1620
1621         /* allocate hash array of items */
1622         mempool->items_arr =
1623                 (void **) vmalloc(sizeof(void *) * mempool->items_max);
1624         if (mempool->items_arr == NULL) {
1625                 __vxge_hw_mempool_destroy(mempool);
1626                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1627                 mempool = NULL;
1628                 goto exit;
1629         }
1630         memset(mempool->items_arr, 0, sizeof(void *) * mempool->items_max);
1631
1632         /* calculate initial number of memblocks */
1633         memblocks_to_allocate = (mempool->items_initial +
1634                                  mempool->items_per_memblock - 1) /
1635                                                 mempool->items_per_memblock;
1636
1637         /* pre-allocate the mempool */
1638         status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate,
1639                                         &allocated);
1640         if (status != VXGE_HW_OK) {
1641                 __vxge_hw_mempool_destroy(mempool);
1642                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1643                 mempool = NULL;
1644                 goto exit;
1645         }
1646
1647 exit:
1648         return mempool;
1649 }
1650
1651 /*
1652  * vxge_hw_mempool_destroy
1653  */
1654 void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
1655 {
1656         u32 i, j;
1657         struct __vxge_hw_device *devh = mempool->devh;
1658
1659         for (i = 0; i < mempool->memblocks_allocated; i++) {
1660                 struct vxge_hw_mempool_dma *dma_object;
1661
1662                 vxge_assert(mempool->memblocks_arr[i]);
1663                 vxge_assert(mempool->memblocks_dma_arr + i);
1664
1665                 dma_object = mempool->memblocks_dma_arr + i;
1666
1667                 for (j = 0; j < mempool->items_per_memblock; j++) {
1668                         u32 index = i * mempool->items_per_memblock + j;
1669
1670                         /* to skip last partially filled(if any) memblock */
1671                         if (index >= mempool->items_current)
1672                                 break;
1673                 }
1674
1675                 vfree(mempool->memblocks_priv_arr[i]);
1676
1677                 __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
1678                                 mempool->memblock_size, dma_object);
1679         }
1680
1681         vfree(mempool->items_arr);
1682
1683         vfree(mempool->memblocks_dma_arr);
1684
1685         vfree(mempool->memblocks_priv_arr);
1686
1687         vfree(mempool->memblocks_arr);
1688
1689         vfree(mempool);
1690 }
1691
1692 /*
1693  * __vxge_hw_device_fifo_config_check - Check fifo configuration.
1694  * Check the fifo configuration
1695  */
1696 enum vxge_hw_status
1697 __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
1698 {
1699         if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
1700              (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
1701                 return VXGE_HW_BADCFG_FIFO_BLOCKS;
1702
1703         return VXGE_HW_OK;
1704 }
1705
1706 /*
1707  * __vxge_hw_device_vpath_config_check - Check vpath configuration.
1708  * Check the vpath configuration
1709  */
1710 enum vxge_hw_status
1711 __vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
1712 {
1713         enum vxge_hw_status status;
1714
1715         if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
1716                 (vp_config->min_bandwidth >
1717                                         VXGE_HW_VPATH_BANDWIDTH_MAX))
1718                 return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
1719
1720         status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
1721         if (status != VXGE_HW_OK)
1722                 return status;
1723
1724         if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
1725                 ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
1726                 (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
1727                 return VXGE_HW_BADCFG_VPATH_MTU;
1728
1729         if ((vp_config->rpa_strip_vlan_tag !=
1730                 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
1731                 (vp_config->rpa_strip_vlan_tag !=
1732                 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
1733                 (vp_config->rpa_strip_vlan_tag !=
1734                 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
1735                 return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
1736
1737         return VXGE_HW_OK;
1738 }
1739
1740 /*
1741  * __vxge_hw_device_config_check - Check device configuration.
1742  * Check the device configuration
1743  */
1744 enum vxge_hw_status
1745 __vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
1746 {
1747         u32 i;
1748         enum vxge_hw_status status;
1749
1750         if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
1751            (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
1752            (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
1753            (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
1754                 return VXGE_HW_BADCFG_INTR_MODE;
1755
1756         if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
1757            (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
1758                 return VXGE_HW_BADCFG_RTS_MAC_EN;
1759
1760         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1761                 status = __vxge_hw_device_vpath_config_check(
1762                                 &new_config->vp_config[i]);
1763                 if (status != VXGE_HW_OK)
1764                         return status;
1765         }
1766
1767         return VXGE_HW_OK;
1768 }
1769
1770 /*
1771  * vxge_hw_device_config_default_get - Initialize device config with defaults.
1772  * Initialize Titan device config with default values.
1773  */
1774 enum vxge_hw_status __devinit
1775 vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
1776 {
1777         u32 i;
1778
1779         device_config->dma_blockpool_initial =
1780                                         VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
1781         device_config->dma_blockpool_max = VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
1782         device_config->intr_mode = VXGE_HW_INTR_MODE_DEF;
1783         device_config->rth_en = VXGE_HW_RTH_DEFAULT;
1784         device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_DEFAULT;
1785         device_config->device_poll_millis =  VXGE_HW_DEF_DEVICE_POLL_MILLIS;
1786         device_config->rts_mac_en =  VXGE_HW_RTS_MAC_DEFAULT;
1787
1788         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1789
1790                 device_config->vp_config[i].vp_id = i;
1791
1792                 device_config->vp_config[i].min_bandwidth =
1793                                 VXGE_HW_VPATH_BANDWIDTH_DEFAULT;
1794
1795                 device_config->vp_config[i].ring.enable = VXGE_HW_RING_DEFAULT;
1796
1797                 device_config->vp_config[i].ring.ring_blocks =
1798                                 VXGE_HW_DEF_RING_BLOCKS;
1799
1800                 device_config->vp_config[i].ring.buffer_mode =
1801                                 VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT;
1802
1803                 device_config->vp_config[i].ring.scatter_mode =
1804                                 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT;
1805
1806                 device_config->vp_config[i].ring.rxds_limit =
1807                                 VXGE_HW_DEF_RING_RXDS_LIMIT;
1808
1809                 device_config->vp_config[i].fifo.enable = VXGE_HW_FIFO_ENABLE;
1810
1811                 device_config->vp_config[i].fifo.fifo_blocks =
1812                                 VXGE_HW_MIN_FIFO_BLOCKS;
1813
1814                 device_config->vp_config[i].fifo.max_frags =
1815                                 VXGE_HW_MAX_FIFO_FRAGS;
1816
1817                 device_config->vp_config[i].fifo.memblock_size =
1818                                 VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE;
1819
1820                 device_config->vp_config[i].fifo.alignment_size =
1821                                 VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE;
1822
1823                 device_config->vp_config[i].fifo.intr =
1824                                 VXGE_HW_FIFO_QUEUE_INTR_DEFAULT;
1825
1826                 device_config->vp_config[i].fifo.no_snoop_bits =
1827                                 VXGE_HW_FIFO_NO_SNOOP_DEFAULT;
1828                 device_config->vp_config[i].tti.intr_enable =
1829                                 VXGE_HW_TIM_INTR_DEFAULT;
1830
1831                 device_config->vp_config[i].tti.btimer_val =
1832                                 VXGE_HW_USE_FLASH_DEFAULT;
1833
1834                 device_config->vp_config[i].tti.timer_ac_en =
1835                                 VXGE_HW_USE_FLASH_DEFAULT;
1836
1837                 device_config->vp_config[i].tti.timer_ci_en =
1838                                 VXGE_HW_USE_FLASH_DEFAULT;
1839
1840                 device_config->vp_config[i].tti.timer_ri_en =
1841                                 VXGE_HW_USE_FLASH_DEFAULT;
1842
1843                 device_config->vp_config[i].tti.rtimer_val =
1844                                 VXGE_HW_USE_FLASH_DEFAULT;
1845
1846                 device_config->vp_config[i].tti.util_sel =
1847                                 VXGE_HW_USE_FLASH_DEFAULT;
1848
1849                 device_config->vp_config[i].tti.ltimer_val =
1850                                 VXGE_HW_USE_FLASH_DEFAULT;
1851
1852                 device_config->vp_config[i].tti.urange_a =
1853                                 VXGE_HW_USE_FLASH_DEFAULT;
1854
1855                 device_config->vp_config[i].tti.uec_a =
1856                                 VXGE_HW_USE_FLASH_DEFAULT;
1857
1858                 device_config->vp_config[i].tti.urange_b =
1859                                 VXGE_HW_USE_FLASH_DEFAULT;
1860
1861                 device_config->vp_config[i].tti.uec_b =
1862                                 VXGE_HW_USE_FLASH_DEFAULT;
1863
1864                 device_config->vp_config[i].tti.urange_c =
1865                                 VXGE_HW_USE_FLASH_DEFAULT;
1866
1867                 device_config->vp_config[i].tti.uec_c =
1868                                 VXGE_HW_USE_FLASH_DEFAULT;
1869
1870                 device_config->vp_config[i].tti.uec_d =
1871                                 VXGE_HW_USE_FLASH_DEFAULT;
1872
1873                 device_config->vp_config[i].rti.intr_enable =
1874                                 VXGE_HW_TIM_INTR_DEFAULT;
1875
1876                 device_config->vp_config[i].rti.btimer_val =
1877                                 VXGE_HW_USE_FLASH_DEFAULT;
1878
1879                 device_config->vp_config[i].rti.timer_ac_en =
1880                                 VXGE_HW_USE_FLASH_DEFAULT;
1881
1882                 device_config->vp_config[i].rti.timer_ci_en =
1883                                 VXGE_HW_USE_FLASH_DEFAULT;
1884
1885                 device_config->vp_config[i].rti.timer_ri_en =
1886                                 VXGE_HW_USE_FLASH_DEFAULT;
1887
1888                 device_config->vp_config[i].rti.rtimer_val =
1889                                 VXGE_HW_USE_FLASH_DEFAULT;
1890
1891                 device_config->vp_config[i].rti.util_sel =
1892                                 VXGE_HW_USE_FLASH_DEFAULT;
1893
1894                 device_config->vp_config[i].rti.ltimer_val =
1895                                 VXGE_HW_USE_FLASH_DEFAULT;
1896
1897                 device_config->vp_config[i].rti.urange_a =
1898                                 VXGE_HW_USE_FLASH_DEFAULT;
1899
1900                 device_config->vp_config[i].rti.uec_a =
1901                                 VXGE_HW_USE_FLASH_DEFAULT;
1902
1903                 device_config->vp_config[i].rti.urange_b =
1904                                 VXGE_HW_USE_FLASH_DEFAULT;
1905
1906                 device_config->vp_config[i].rti.uec_b =
1907                                 VXGE_HW_USE_FLASH_DEFAULT;
1908
1909                 device_config->vp_config[i].rti.urange_c =
1910                                 VXGE_HW_USE_FLASH_DEFAULT;
1911
1912                 device_config->vp_config[i].rti.uec_c =
1913                                 VXGE_HW_USE_FLASH_DEFAULT;
1914
1915                 device_config->vp_config[i].rti.uec_d =
1916                                 VXGE_HW_USE_FLASH_DEFAULT;
1917
1918                 device_config->vp_config[i].mtu =
1919                                 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU;
1920
1921                 device_config->vp_config[i].rpa_strip_vlan_tag =
1922                         VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT;
1923         }
1924
1925         return VXGE_HW_OK;
1926 }
1927
1928 /*
1929  * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
1930  * Set the swapper bits appropriately for the lagacy section.
1931  */
1932 enum vxge_hw_status
1933 __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
1934 {
1935         u64 val64;
1936         enum vxge_hw_status status = VXGE_HW_OK;
1937
1938         val64 = readq(&legacy_reg->toc_swapper_fb);
1939
1940         wmb();
1941
1942         switch (val64) {
1943
1944         case VXGE_HW_SWAPPER_INITIAL_VALUE:
1945                 return status;
1946
1947         case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
1948                 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
1949                         &legacy_reg->pifm_rd_swap_en);
1950                 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
1951                         &legacy_reg->pifm_rd_flip_en);
1952                 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
1953                         &legacy_reg->pifm_wr_swap_en);
1954                 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
1955                         &legacy_reg->pifm_wr_flip_en);
1956                 break;
1957
1958         case VXGE_HW_SWAPPER_BYTE_SWAPPED:
1959                 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
1960                         &legacy_reg->pifm_rd_swap_en);
1961                 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
1962                         &legacy_reg->pifm_wr_swap_en);
1963                 break;
1964
1965         case VXGE_HW_SWAPPER_BIT_FLIPPED:
1966                 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
1967                         &legacy_reg->pifm_rd_flip_en);
1968                 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
1969                         &legacy_reg->pifm_wr_flip_en);
1970                 break;
1971         }
1972
1973         wmb();
1974
1975         val64 = readq(&legacy_reg->toc_swapper_fb);
1976
1977         if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
1978                 status = VXGE_HW_ERR_SWAPPER_CTRL;
1979
1980         return status;
1981 }
1982
1983 /*
1984  * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
1985  * Set the swapper bits appropriately for the vpath.
1986  */
1987 enum vxge_hw_status
1988 __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
1989 {
1990 #ifndef __BIG_ENDIAN
1991         u64 val64;
1992
1993         val64 = readq(&vpath_reg->vpath_general_cfg1);
1994         wmb();
1995         val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN;
1996         writeq(val64, &vpath_reg->vpath_general_cfg1);
1997         wmb();
1998 #endif
1999         return VXGE_HW_OK;
2000 }
2001
2002 /*
2003  * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
2004  * Set the swapper bits appropriately for the vpath.
2005  */
2006 enum vxge_hw_status
2007 __vxge_hw_kdfc_swapper_set(
2008         struct vxge_hw_legacy_reg __iomem *legacy_reg,
2009         struct vxge_hw_vpath_reg __iomem *vpath_reg)
2010 {
2011         u64 val64;
2012
2013         val64 = readq(&legacy_reg->pifm_wr_swap_en);
2014
2015         if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) {
2016                 val64 = readq(&vpath_reg->kdfcctl_cfg0);
2017                 wmb();
2018
2019                 val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 |
2020                         VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1  |
2021                         VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2;
2022
2023                 writeq(val64, &vpath_reg->kdfcctl_cfg0);
2024                 wmb();
2025         }
2026
2027         return VXGE_HW_OK;
2028 }
2029
2030 /*
2031  * vxge_hw_mgmt_device_config - Retrieve device configuration.
2032  * Get device configuration. Permits to retrieve at run-time configuration
2033  * values that were used to initialize and configure the device.
2034  */
2035 enum vxge_hw_status
2036 vxge_hw_mgmt_device_config(struct __vxge_hw_device *hldev,
2037                            struct vxge_hw_device_config *dev_config, int size)
2038 {
2039
2040         if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC))
2041                 return VXGE_HW_ERR_INVALID_DEVICE;
2042
2043         if (size != sizeof(struct vxge_hw_device_config))
2044                 return VXGE_HW_ERR_VERSION_CONFLICT;
2045
2046         memcpy(dev_config, &hldev->config,
2047                 sizeof(struct vxge_hw_device_config));
2048
2049         return VXGE_HW_OK;
2050 }
2051
2052 /*
2053  * vxge_hw_mgmt_reg_read - Read Titan register.
2054  */
2055 enum vxge_hw_status
2056 vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev,
2057                       enum vxge_hw_mgmt_reg_type type,
2058                       u32 index, u32 offset, u64 *value)
2059 {
2060         enum vxge_hw_status status = VXGE_HW_OK;
2061
2062         if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
2063                 status = VXGE_HW_ERR_INVALID_DEVICE;
2064                 goto exit;
2065         }
2066
2067         switch (type) {
2068         case vxge_hw_mgmt_reg_type_legacy:
2069                 if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
2070                         status = VXGE_HW_ERR_INVALID_OFFSET;
2071                         break;
2072                 }
2073                 *value = readq((void __iomem *)hldev->legacy_reg + offset);
2074                 break;
2075         case vxge_hw_mgmt_reg_type_toc:
2076                 if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
2077                         status = VXGE_HW_ERR_INVALID_OFFSET;
2078                         break;
2079                 }
2080                 *value = readq((void __iomem *)hldev->toc_reg + offset);
2081                 break;
2082         case vxge_hw_mgmt_reg_type_common:
2083                 if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
2084                         status = VXGE_HW_ERR_INVALID_OFFSET;
2085                         break;
2086                 }
2087                 *value = readq((void __iomem *)hldev->common_reg + offset);
2088                 break;
2089         case vxge_hw_mgmt_reg_type_mrpcim:
2090                 if (!(hldev->access_rights &
2091                         VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
2092                         status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2093                         break;
2094                 }
2095                 if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
2096                         status = VXGE_HW_ERR_INVALID_OFFSET;
2097                         break;
2098                 }
2099                 *value = readq((void __iomem *)hldev->mrpcim_reg + offset);
2100                 break;
2101         case vxge_hw_mgmt_reg_type_srpcim:
2102                 if (!(hldev->access_rights &
2103                         VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
2104                         status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2105                         break;
2106                 }
2107                 if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
2108                         status = VXGE_HW_ERR_INVALID_INDEX;
2109                         break;
2110                 }
2111                 if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
2112                         status = VXGE_HW_ERR_INVALID_OFFSET;
2113                         break;
2114                 }
2115                 *value = readq((void __iomem *)hldev->srpcim_reg[index] +
2116                                 offset);
2117                 break;
2118         case vxge_hw_mgmt_reg_type_vpmgmt:
2119                 if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
2120                         (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2121                         status = VXGE_HW_ERR_INVALID_INDEX;
2122                         break;
2123                 }
2124                 if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
2125                         status = VXGE_HW_ERR_INVALID_OFFSET;
2126                         break;
2127                 }
2128                 *value = readq((void __iomem *)hldev->vpmgmt_reg[index] +
2129                                 offset);
2130                 break;
2131         case vxge_hw_mgmt_reg_type_vpath:
2132                 if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) ||
2133                         (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2134                         status = VXGE_HW_ERR_INVALID_INDEX;
2135                         break;
2136                 }
2137                 if (index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) {
2138                         status = VXGE_HW_ERR_INVALID_INDEX;
2139                         break;
2140                 }
2141                 if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
2142                         status = VXGE_HW_ERR_INVALID_OFFSET;
2143                         break;
2144                 }
2145                 *value = readq((void __iomem *)hldev->vpath_reg[index] +
2146                                 offset);
2147                 break;
2148         default:
2149                 status = VXGE_HW_ERR_INVALID_TYPE;
2150                 break;
2151         }
2152
2153 exit:
2154         return status;
2155 }
2156
2157 /*
2158  * vxge_hw_vpath_strip_fcs_check - Check for FCS strip.
2159  */
2160 enum vxge_hw_status
2161 vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
2162 {
2163         struct vxge_hw_vpmgmt_reg       __iomem *vpmgmt_reg;
2164         enum vxge_hw_status status = VXGE_HW_OK;
2165         int i = 0, j = 0;
2166
2167         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
2168                 if (!((vpath_mask) & vxge_mBIT(i)))
2169                         continue;
2170                 vpmgmt_reg = hldev->vpmgmt_reg[i];
2171                 for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) {
2172                         if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j])
2173                         & VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS)
2174                                 return VXGE_HW_FAIL;
2175                 }
2176         }
2177         return status;
2178 }
2179 /*
2180  * vxge_hw_mgmt_reg_Write - Write Titan register.
2181  */
2182 enum vxge_hw_status
2183 vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev,
2184                       enum vxge_hw_mgmt_reg_type type,
2185                       u32 index, u32 offset, u64 value)
2186 {
2187         enum vxge_hw_status status = VXGE_HW_OK;
2188
2189         if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
2190                 status = VXGE_HW_ERR_INVALID_DEVICE;
2191                 goto exit;
2192         }
2193
2194         switch (type) {
2195         case vxge_hw_mgmt_reg_type_legacy:
2196                 if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
2197                         status = VXGE_HW_ERR_INVALID_OFFSET;
2198                         break;
2199                 }
2200                 writeq(value, (void __iomem *)hldev->legacy_reg + offset);
2201                 break;
2202         case vxge_hw_mgmt_reg_type_toc:
2203                 if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
2204                         status = VXGE_HW_ERR_INVALID_OFFSET;
2205                         break;
2206                 }
2207                 writeq(value, (void __iomem *)hldev->toc_reg + offset);
2208                 break;
2209         case vxge_hw_mgmt_reg_type_common:
2210                 if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
2211                         status = VXGE_HW_ERR_INVALID_OFFSET;
2212                         break;
2213                 }
2214                 writeq(value, (void __iomem *)hldev->common_reg + offset);
2215                 break;
2216         case vxge_hw_mgmt_reg_type_mrpcim:
2217                 if (!(hldev->access_rights &
2218                         VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
2219                         status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2220                         break;
2221                 }
2222                 if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
2223                         status = VXGE_HW_ERR_INVALID_OFFSET;
2224                         break;
2225                 }
2226                 writeq(value, (void __iomem *)hldev->mrpcim_reg + offset);
2227                 break;
2228         case vxge_hw_mgmt_reg_type_srpcim:
2229                 if (!(hldev->access_rights &
2230                         VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
2231                         status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2232                         break;
2233                 }
2234                 if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
2235                         status = VXGE_HW_ERR_INVALID_INDEX;
2236                         break;
2237                 }
2238                 if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
2239                         status = VXGE_HW_ERR_INVALID_OFFSET;
2240                         break;
2241                 }
2242                 writeq(value, (void __iomem *)hldev->srpcim_reg[index] +
2243                         offset);
2244
2245                 break;
2246         case vxge_hw_mgmt_reg_type_vpmgmt:
2247                 if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
2248                         (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2249                         status = VXGE_HW_ERR_INVALID_INDEX;
2250                         break;
2251                 }
2252                 if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
2253                         status = VXGE_HW_ERR_INVALID_OFFSET;
2254                         break;
2255                 }
2256                 writeq(value, (void __iomem *)hldev->vpmgmt_reg[index] +
2257                         offset);
2258                 break;
2259         case vxge_hw_mgmt_reg_type_vpath:
2260                 if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES-1) ||
2261                         (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2262                         status = VXGE_HW_ERR_INVALID_INDEX;
2263                         break;
2264                 }
2265                 if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
2266                         status = VXGE_HW_ERR_INVALID_OFFSET;
2267                         break;
2268                 }
2269                 writeq(value, (void __iomem *)hldev->vpath_reg[index] +
2270                         offset);
2271                 break;
2272         default:
2273                 status = VXGE_HW_ERR_INVALID_TYPE;
2274                 break;
2275         }
2276 exit:
2277         return status;
2278 }
2279
2280 /*
2281  * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
2282  * list callback
2283  * This function is callback passed to __vxge_hw_mempool_create to create memory
2284  * pool for TxD list
2285  */
2286 static void
2287 __vxge_hw_fifo_mempool_item_alloc(
2288         struct vxge_hw_mempool *mempoolh,
2289         u32 memblock_index, struct vxge_hw_mempool_dma *dma_object,
2290         u32 index, u32 is_last)
2291 {
2292         u32 memblock_item_idx;
2293         struct __vxge_hw_fifo_txdl_priv *txdl_priv;
2294         struct vxge_hw_fifo_txd *txdp =
2295                 (struct vxge_hw_fifo_txd *)mempoolh->items_arr[index];
2296         struct __vxge_hw_fifo *fifo =
2297                         (struct __vxge_hw_fifo *)mempoolh->userdata;
2298         void *memblock = mempoolh->memblocks_arr[memblock_index];
2299
2300         vxge_assert(txdp);
2301
2302         txdp->host_control = (u64) (size_t)
2303         __vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp,
2304                                         &memblock_item_idx);
2305
2306         txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
2307
2308         vxge_assert(txdl_priv);
2309
2310         fifo->channel.reserve_arr[fifo->channel.reserve_ptr - 1 - index] = txdp;
2311
2312         /* pre-format HW's TxDL's private */
2313         txdl_priv->dma_offset = (char *)txdp - (char *)memblock;
2314         txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
2315         txdl_priv->dma_handle = dma_object->handle;
2316         txdl_priv->memblock   = memblock;
2317         txdl_priv->first_txdp = txdp;
2318         txdl_priv->next_txdl_priv = NULL;
2319         txdl_priv->alloc_frags = 0;
2320
2321         return;
2322 }
2323
2324 /*
2325  * __vxge_hw_fifo_create - Create a FIFO
2326  * This function creates FIFO and initializes it.
2327  */
2328 enum vxge_hw_status
2329 __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
2330                       struct vxge_hw_fifo_attr *attr)
2331 {
2332         enum vxge_hw_status status = VXGE_HW_OK;
2333         struct __vxge_hw_fifo *fifo;
2334         struct vxge_hw_fifo_config *config;
2335         u32 txdl_size, txdl_per_memblock;
2336         struct vxge_hw_mempool_cbs fifo_mp_callback;
2337         struct __vxge_hw_virtualpath *vpath;
2338
2339         if ((vp == NULL) || (attr == NULL)) {
2340                 status = VXGE_HW_ERR_INVALID_HANDLE;
2341                 goto exit;
2342         }
2343         vpath = vp->vpath;
2344         config = &vpath->hldev->config.vp_config[vpath->vp_id].fifo;
2345
2346         txdl_size = config->max_frags * sizeof(struct vxge_hw_fifo_txd);
2347
2348         txdl_per_memblock = config->memblock_size / txdl_size;
2349
2350         fifo = (struct __vxge_hw_fifo *)__vxge_hw_channel_allocate(vp,
2351                                         VXGE_HW_CHANNEL_TYPE_FIFO,
2352                                         config->fifo_blocks * txdl_per_memblock,
2353                                         attr->per_txdl_space, attr->userdata);
2354
2355         if (fifo == NULL) {
2356                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2357                 goto exit;
2358         }
2359
2360         vpath->fifoh = fifo;
2361         fifo->nofl_db = vpath->nofl_db;
2362
2363         fifo->vp_id = vpath->vp_id;
2364         fifo->vp_reg = vpath->vp_reg;
2365         fifo->stats = &vpath->sw_stats->fifo_stats;
2366
2367         fifo->config = config;
2368
2369         /* apply "interrupts per txdl" attribute */
2370         fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
2371
2372         if (fifo->config->intr)
2373                 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
2374
2375         fifo->no_snoop_bits = config->no_snoop_bits;
2376
2377         /*
2378          * FIFO memory management strategy:
2379          *
2380          * TxDL split into three independent parts:
2381          *      - set of TxD's
2382          *      - TxD HW private part
2383          *      - driver private part
2384          *
2385          * Adaptative memory allocation used. i.e. Memory allocated on
2386          * demand with the size which will fit into one memory block.
2387          * One memory block may contain more than one TxDL.
2388          *
2389          * During "reserve" operations more memory can be allocated on demand
2390          * for example due to FIFO full condition.
2391          *
2392          * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close
2393          * routine which will essentially stop the channel and free resources.
2394          */
2395
2396         /* TxDL common private size == TxDL private  +  driver private */
2397         fifo->priv_size =
2398                 sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space;
2399         fifo->priv_size = ((fifo->priv_size  +  VXGE_CACHE_LINE_SIZE - 1) /
2400                         VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
2401
2402         fifo->per_txdl_space = attr->per_txdl_space;
2403
2404         /* recompute txdl size to be cacheline aligned */
2405         fifo->txdl_size = txdl_size;
2406         fifo->txdl_per_memblock = txdl_per_memblock;
2407
2408         fifo->txdl_term = attr->txdl_term;
2409         fifo->callback = attr->callback;
2410
2411         if (fifo->txdl_per_memblock == 0) {
2412                 __vxge_hw_fifo_delete(vp);
2413                 status = VXGE_HW_ERR_INVALID_BLOCK_SIZE;
2414                 goto exit;
2415         }
2416
2417         fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
2418
2419         fifo->mempool =
2420                 __vxge_hw_mempool_create(vpath->hldev,
2421                         fifo->config->memblock_size,
2422                         fifo->txdl_size,
2423                         fifo->priv_size,
2424                         (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
2425                         (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
2426                         &fifo_mp_callback,
2427                         fifo);
2428
2429         if (fifo->mempool == NULL) {
2430                 __vxge_hw_fifo_delete(vp);
2431                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2432                 goto exit;
2433         }
2434
2435         status = __vxge_hw_channel_initialize(&fifo->channel);
2436         if (status != VXGE_HW_OK) {
2437                 __vxge_hw_fifo_delete(vp);
2438                 goto exit;
2439         }
2440
2441         vxge_assert(fifo->channel.reserve_ptr);
2442 exit:
2443         return status;
2444 }
2445
2446 /*
2447  * __vxge_hw_fifo_abort - Returns the TxD
2448  * This function terminates the TxDs of fifo
2449  */
2450 enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
2451 {
2452         void *txdlh;
2453
2454         for (;;) {
2455                 vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
2456
2457                 if (txdlh == NULL)
2458                         break;
2459
2460                 vxge_hw_channel_dtr_complete(&fifo->channel);
2461
2462                 if (fifo->txdl_term) {
2463                         fifo->txdl_term(txdlh,
2464                         VXGE_HW_TXDL_STATE_POSTED,
2465                         fifo->channel.userdata);
2466                 }
2467
2468                 vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
2469         }
2470
2471         return VXGE_HW_OK;
2472 }
2473
2474 /*
2475  * __vxge_hw_fifo_reset - Resets the fifo
2476  * This function resets the fifo during vpath reset operation
2477  */
2478 enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
2479 {
2480         enum vxge_hw_status status = VXGE_HW_OK;
2481
2482         __vxge_hw_fifo_abort(fifo);
2483         status = __vxge_hw_channel_reset(&fifo->channel);
2484
2485         return status;
2486 }
2487
2488 /*
2489  * __vxge_hw_fifo_delete - Removes the FIFO
2490  * This function freeup the memory pool and removes the FIFO
2491  */
2492 enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
2493 {
2494         struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
2495
2496         __vxge_hw_fifo_abort(fifo);
2497
2498         if (fifo->mempool)
2499                 __vxge_hw_mempool_destroy(fifo->mempool);
2500
2501         vp->vpath->fifoh = NULL;
2502
2503         __vxge_hw_channel_free(&fifo->channel);
2504
2505         return VXGE_HW_OK;
2506 }
2507
2508 /*
2509  * __vxge_hw_vpath_pci_read - Read the content of given address
2510  *                          in pci config space.
2511  * Read from the vpath pci config space.
2512  */
2513 enum vxge_hw_status
2514 __vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
2515                          u32 phy_func_0, u32 offset, u32 *val)
2516 {
2517         u64 val64;
2518         enum vxge_hw_status status = VXGE_HW_OK;
2519         struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2520
2521         val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);
2522
2523         if (phy_func_0)
2524                 val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;
2525
2526         writeq(val64, &vp_reg->pci_config_access_cfg1);
2527         wmb();
2528         writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
2529                         &vp_reg->pci_config_access_cfg2);
2530         wmb();
2531
2532         status = __vxge_hw_device_register_poll(
2533                         &vp_reg->pci_config_access_cfg2,
2534                         VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2535
2536         if (status != VXGE_HW_OK)
2537                 goto exit;
2538
2539         val64 = readq(&vp_reg->pci_config_access_status);
2540
2541         if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
2542                 status = VXGE_HW_FAIL;
2543                 *val = 0;
2544         } else
2545                 *val = (u32)vxge_bVALn(val64, 32, 32);
2546 exit:
2547         return status;
2548 }
2549
2550 /*
2551  * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
2552  * Returns the function number of the vpath.
2553  */
2554 u32
2555 __vxge_hw_vpath_func_id_get(u32 vp_id,
2556         struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
2557 {
2558         u64 val64;
2559
2560         val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
2561
2562         return
2563          (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
2564 }
2565
2566 /*
2567  * __vxge_hw_read_rts_ds - Program RTS steering critieria
2568  */
2569 static inline void
2570 __vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
2571                       u64 dta_struct_sel)
2572 {
2573         writeq(0, &vpath_reg->rts_access_steer_ctrl);
2574         wmb();
2575         writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0);
2576         writeq(0, &vpath_reg->rts_access_steer_data1);
2577         wmb();
2578         return;
2579 }
2580
2581
2582 /*
2583  * __vxge_hw_vpath_card_info_get - Get the serial numbers,
2584  * part number and product description.
2585  */
2586 enum vxge_hw_status
2587 __vxge_hw_vpath_card_info_get(
2588         u32 vp_id,
2589         struct vxge_hw_vpath_reg __iomem *vpath_reg,
2590         struct vxge_hw_device_hw_info *hw_info)
2591 {
2592         u32 i, j;
2593         u64 val64;
2594         u64 data1 = 0ULL;
2595         u64 data2 = 0ULL;
2596         enum vxge_hw_status status = VXGE_HW_OK;
2597         u8 *serial_number = hw_info->serial_number;
2598         u8 *part_number = hw_info->part_number;
2599         u8 *product_desc = hw_info->product_desc;
2600
2601         __vxge_hw_read_rts_ds(vpath_reg,
2602                 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER);
2603
2604         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2605                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2606                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2607                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2608                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2609                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2610
2611         status = __vxge_hw_pio_mem_write64(val64,
2612                                 &vpath_reg->rts_access_steer_ctrl,
2613                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2614                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2615
2616         if (status != VXGE_HW_OK)
2617                 return status;
2618
2619         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2620
2621         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2622                 data1 = readq(&vpath_reg->rts_access_steer_data0);
2623                 ((u64 *)serial_number)[0] = be64_to_cpu(data1);
2624
2625                 data2 = readq(&vpath_reg->rts_access_steer_data1);
2626                 ((u64 *)serial_number)[1] = be64_to_cpu(data2);
2627                 status = VXGE_HW_OK;
2628         } else
2629                 *serial_number = 0;
2630
2631         __vxge_hw_read_rts_ds(vpath_reg,
2632                         VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER);
2633
2634         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2635                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2636                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2637                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2638                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2639                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2640
2641         status = __vxge_hw_pio_mem_write64(val64,
2642                                 &vpath_reg->rts_access_steer_ctrl,
2643                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2644                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2645
2646         if (status != VXGE_HW_OK)
2647                 return status;
2648
2649         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2650
2651         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2652
2653                 data1 = readq(&vpath_reg->rts_access_steer_data0);
2654                 ((u64 *)part_number)[0] = be64_to_cpu(data1);
2655
2656                 data2 = readq(&vpath_reg->rts_access_steer_data1);
2657                 ((u64 *)part_number)[1] = be64_to_cpu(data2);
2658
2659                 status = VXGE_HW_OK;
2660
2661         } else
2662                 *part_number = 0;
2663
2664         j = 0;
2665
2666         for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
2667              i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
2668
2669                 __vxge_hw_read_rts_ds(vpath_reg, i);
2670
2671                 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2672                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2673                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2674                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2675                         VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2676                         VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2677
2678                 status = __vxge_hw_pio_mem_write64(val64,
2679                                 &vpath_reg->rts_access_steer_ctrl,
2680                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2681                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2682
2683                 if (status != VXGE_HW_OK)
2684                         return status;
2685
2686                 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2687
2688                 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2689
2690                         data1 = readq(&vpath_reg->rts_access_steer_data0);
2691                         ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
2692
2693                         data2 = readq(&vpath_reg->rts_access_steer_data1);
2694                         ((u64 *)product_desc)[j++] = be64_to_cpu(data2);
2695
2696                         status = VXGE_HW_OK;
2697                 } else
2698                         *product_desc = 0;
2699         }
2700
2701         return status;
2702 }
2703
2704 /*
2705  * __vxge_hw_vpath_fw_ver_get - Get the fw version
2706  * Returns FW Version
2707  */
2708 enum vxge_hw_status
2709 __vxge_hw_vpath_fw_ver_get(
2710         u32 vp_id,
2711         struct vxge_hw_vpath_reg __iomem *vpath_reg,
2712         struct vxge_hw_device_hw_info *hw_info)
2713 {
2714         u64 val64;
2715         u64 data1 = 0ULL;
2716         u64 data2 = 0ULL;
2717         struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
2718         struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
2719         struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
2720         struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
2721         enum vxge_hw_status status = VXGE_HW_OK;
2722
2723         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2724                 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY) |
2725                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2726                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2727                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2728                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2729
2730         status = __vxge_hw_pio_mem_write64(val64,
2731                                 &vpath_reg->rts_access_steer_ctrl,
2732                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2733                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2734
2735         if (status != VXGE_HW_OK)
2736                 goto exit;
2737
2738         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2739
2740         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2741
2742                 data1 = readq(&vpath_reg->rts_access_steer_data0);
2743                 data2 = readq(&vpath_reg->rts_access_steer_data1);
2744
2745                 fw_date->day =
2746                         (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(
2747                                                 data1);
2748                 fw_date->month =
2749                         (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(
2750                                                 data1);
2751                 fw_date->year =
2752                         (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(
2753                                                 data1);
2754
2755                 snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
2756                         fw_date->month, fw_date->day, fw_date->year);
2757
2758                 fw_version->major =
2759                     (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data1);
2760                 fw_version->minor =
2761                     (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data1);
2762                 fw_version->build =
2763                     (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data1);
2764
2765                 snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
2766                     fw_version->major, fw_version->minor, fw_version->build);
2767
2768                 flash_date->day =
2769                   (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data2);
2770                 flash_date->month =
2771                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data2);
2772                 flash_date->year =
2773                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data2);
2774
2775                 snprintf(flash_date->date, VXGE_HW_FW_STRLEN,
2776                         "%2.2d/%2.2d/%4.4d",
2777                         flash_date->month, flash_date->day, flash_date->year);
2778
2779                 flash_version->major =
2780                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data2);
2781                 flash_version->minor =
2782                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data2);
2783                 flash_version->build =
2784                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data2);
2785
2786                 snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
2787                         flash_version->major, flash_version->minor,
2788                         flash_version->build);
2789
2790                 status = VXGE_HW_OK;
2791
2792         } else
2793                 status = VXGE_HW_FAIL;
2794 exit:
2795         return status;
2796 }
2797
2798 /*
2799  * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
2800  * Returns pci function mode
2801  */
2802 u64
2803 __vxge_hw_vpath_pci_func_mode_get(
2804         u32  vp_id,
2805         struct vxge_hw_vpath_reg __iomem *vpath_reg)
2806 {
2807         u64 val64;
2808         u64 data1 = 0ULL;
2809         enum vxge_hw_status status = VXGE_HW_OK;
2810
2811         __vxge_hw_read_rts_ds(vpath_reg,
2812                 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE);
2813
2814         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2815                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2816                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2817                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2818                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2819                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2820
2821         status = __vxge_hw_pio_mem_write64(val64,
2822                                 &vpath_reg->rts_access_steer_ctrl,
2823                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2824                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2825
2826         if (status != VXGE_HW_OK)
2827                 goto exit;
2828
2829         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2830
2831         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2832                 data1 = readq(&vpath_reg->rts_access_steer_data0);
2833                 status = VXGE_HW_OK;
2834         } else {
2835                 data1 = 0;
2836                 status = VXGE_HW_FAIL;
2837         }
2838 exit:
2839         return data1;
2840 }
2841
2842 /**
2843  * vxge_hw_device_flick_link_led - Flick (blink) link LED.
2844  * @hldev: HW device.
2845  * @on_off: TRUE if flickering to be on, FALSE to be off
2846  *
2847  * Flicker the link LED.
2848  */
2849 enum vxge_hw_status
2850 vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev,
2851                                u64 on_off)
2852 {
2853         u64 val64;
2854         enum vxge_hw_status status = VXGE_HW_OK;
2855         struct vxge_hw_vpath_reg __iomem *vp_reg;
2856
2857         if (hldev == NULL) {
2858                 status = VXGE_HW_ERR_INVALID_DEVICE;
2859                 goto exit;
2860         }
2861
2862         vp_reg = hldev->vpath_reg[hldev->first_vp_id];
2863
2864         writeq(0, &vp_reg->rts_access_steer_ctrl);
2865         wmb();
2866         writeq(on_off, &vp_reg->rts_access_steer_data0);
2867         writeq(0, &vp_reg->rts_access_steer_data1);
2868         wmb();
2869
2870         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2871                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL) |
2872                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2873                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2874                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2875                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2876
2877         status = __vxge_hw_pio_mem_write64(val64,
2878                                 &vp_reg->rts_access_steer_ctrl,
2879                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2880                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2881 exit:
2882         return status;
2883 }
2884
2885 /*
2886  * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
2887  */
2888 enum vxge_hw_status
2889 __vxge_hw_vpath_rts_table_get(
2890         struct __vxge_hw_vpath_handle *vp,
2891         u32 action, u32 rts_table, u32 offset, u64 *data1, u64 *data2)
2892 {
2893         u64 val64;
2894         struct __vxge_hw_virtualpath *vpath;
2895         struct vxge_hw_vpath_reg __iomem *vp_reg;
2896
2897         enum vxge_hw_status status = VXGE_HW_OK;
2898
2899         if (vp == NULL) {
2900                 status = VXGE_HW_ERR_INVALID_HANDLE;
2901                 goto exit;
2902         }
2903
2904         vpath = vp->vpath;
2905         vp_reg = vpath->vp_reg;
2906
2907         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
2908                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
2909                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2910                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
2911
2912         if ((rts_table ==
2913                 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
2914             (rts_table ==
2915                 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
2916             (rts_table ==
2917                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
2918             (rts_table ==
2919                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
2920                 val64 = val64 | VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
2921         }
2922
2923         status = __vxge_hw_pio_mem_write64(val64,
2924                                 &vp_reg->rts_access_steer_ctrl,
2925                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2926                                 vpath->hldev->config.device_poll_millis);
2927
2928         if (status != VXGE_HW_OK)
2929                 goto exit;
2930
2931         val64 = readq(&vp_reg->rts_access_steer_ctrl);
2932
2933         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2934
2935                 *data1 = readq(&vp_reg->rts_access_steer_data0);
2936
2937                 if ((rts_table ==
2938                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
2939                 (rts_table ==
2940                 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
2941                         *data2 = readq(&vp_reg->rts_access_steer_data1);
2942                 }
2943                 status = VXGE_HW_OK;
2944         } else
2945                 status = VXGE_HW_FAIL;
2946 exit:
2947         return status;
2948 }
2949
2950 /*
2951  * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
2952  */
2953 enum vxge_hw_status
2954 __vxge_hw_vpath_rts_table_set(
2955         struct __vxge_hw_vpath_handle *vp, u32 action, u32 rts_table,
2956         u32 offset, u64 data1, u64 data2)
2957 {
2958         u64 val64;
2959         struct __vxge_hw_virtualpath *vpath;
2960         enum vxge_hw_status status = VXGE_HW_OK;
2961         struct vxge_hw_vpath_reg __iomem *vp_reg;
2962
2963         if (vp == NULL) {
2964                 status = VXGE_HW_ERR_INVALID_HANDLE;
2965                 goto exit;
2966         }
2967
2968         vpath = vp->vpath;
2969         vp_reg = vpath->vp_reg;
2970
2971         writeq(data1, &vp_reg->rts_access_steer_data0);
2972         wmb();
2973
2974         if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
2975             (rts_table ==
2976                 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
2977                 writeq(data2, &vp_reg->rts_access_steer_data1);
2978                 wmb();
2979         }
2980
2981         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
2982                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
2983                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2984                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
2985
2986         status = __vxge_hw_pio_mem_write64(val64,
2987                                 &vp_reg->rts_access_steer_ctrl,
2988                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2989                                 vpath->hldev->config.device_poll_millis);
2990
2991         if (status != VXGE_HW_OK)
2992                 goto exit;
2993
2994         val64 = readq(&vp_reg->rts_access_steer_ctrl);
2995
2996         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS)
2997                 status = VXGE_HW_OK;
2998         else
2999                 status = VXGE_HW_FAIL;
3000 exit:
3001         return status;
3002 }
3003
3004 /*
3005  * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
3006  *               from MAC address table.
3007  */
3008 enum vxge_hw_status
3009 __vxge_hw_vpath_addr_get(
3010         u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
3011         u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN])
3012 {
3013         u32 i;
3014         u64 val64;
3015         u64 data1 = 0ULL;
3016         u64 data2 = 0ULL;
3017         enum vxge_hw_status status = VXGE_HW_OK;
3018
3019         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
3020                 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY) |
3021                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
3022                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) |
3023                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3024                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
3025
3026         status = __vxge_hw_pio_mem_write64(val64,
3027                                 &vpath_reg->rts_access_steer_ctrl,
3028                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3029                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
3030
3031         if (status != VXGE_HW_OK)
3032                 goto exit;
3033
3034         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
3035
3036         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
3037
3038                 data1 = readq(&vpath_reg->rts_access_steer_data0);
3039                 data2 = readq(&vpath_reg->rts_access_steer_data1);
3040
3041                 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
3042                 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
3043                                                         data2);
3044
3045                 for (i = ETH_ALEN; i > 0; i--) {
3046                         macaddr[i-1] = (u8)(data1 & 0xFF);
3047                         data1 >>= 8;
3048
3049                         macaddr_mask[i-1] = (u8)(data2 & 0xFF);
3050                         data2 >>= 8;
3051                 }
3052                 status = VXGE_HW_OK;
3053         } else
3054                 status = VXGE_HW_FAIL;
3055 exit:
3056         return status;
3057 }
3058
3059 /*
3060  * vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing.
3061  */
3062 enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
3063                         struct __vxge_hw_vpath_handle *vp,
3064                         enum vxge_hw_rth_algoritms algorithm,
3065                         struct vxge_hw_rth_hash_types *hash_type,
3066                         u16 bucket_size)
3067 {
3068         u64 data0, data1;
3069         enum vxge_hw_status status = VXGE_HW_OK;
3070
3071         if (vp == NULL) {
3072                 status = VXGE_HW_ERR_INVALID_HANDLE;
3073                 goto exit;
3074         }
3075
3076         status = __vxge_hw_vpath_rts_table_get(vp,
3077                      VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
3078                      VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3079                         0, &data0, &data1);
3080
3081         data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
3082                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
3083
3084         data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN |
3085         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size) |
3086         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm);
3087
3088         if (hash_type->hash_type_tcpipv4_en)
3089                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN;
3090
3091         if (hash_type->hash_type_ipv4_en)
3092                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN;
3093
3094         if (hash_type->hash_type_tcpipv6_en)
3095                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN;
3096
3097         if (hash_type->hash_type_ipv6_en)
3098                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN;
3099
3100         if (hash_type->hash_type_tcpipv6ex_en)
3101                 data0 |=
3102                 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN;
3103
3104         if (hash_type->hash_type_ipv6ex_en)
3105                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN;
3106
3107         if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0))
3108                 data0 &= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3109         else
3110                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3111
3112         status = __vxge_hw_vpath_rts_table_set(vp,
3113                 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY,
3114                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3115                 0, data0, 0);
3116 exit:
3117         return status;
3118 }
3119
3120 static void
3121 vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1,
3122                                 u16 flag, u8 *itable)
3123 {
3124         switch (flag) {
3125         case 1:
3126                 *data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j)|
3127                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN |
3128                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
3129                         itable[j]);
3130         case 2:
3131                 *data0 |=
3132                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)|
3133                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN |
3134                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
3135                         itable[j]);
3136         case 3:
3137                 *data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)|
3138                         VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN |
3139                         VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
3140                         itable[j]);
3141         case 4:
3142                 *data1 |=
3143                         VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)|
3144                         VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN |
3145                         VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(
3146                         itable[j]);
3147         default:
3148                 return;
3149         }
3150 }
3151 /*
3152  * vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT).
3153  */
3154 enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
3155                         struct __vxge_hw_vpath_handle **vpath_handles,
3156                         u32 vpath_count,
3157                         u8 *mtable,
3158                         u8 *itable,
3159                         u32 itable_size)
3160 {
3161         u32 i, j, action, rts_table;
3162         u64 data0;
3163         u64 data1;
3164         u32 max_entries;
3165         enum vxge_hw_status status = VXGE_HW_OK;
3166         struct __vxge_hw_vpath_handle *vp = vpath_handles[0];
3167
3168         if (vp == NULL) {
3169                 status = VXGE_HW_ERR_INVALID_HANDLE;
3170                 goto exit;
3171         }
3172
3173         max_entries = (((u32)1) << itable_size);
3174
3175         if (vp->vpath->hldev->config.rth_it_type
3176                                 == VXGE_HW_RTH_IT_TYPE_SOLO_IT) {
3177                 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3178                 rts_table =
3179                         VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT;
3180
3181                 for (j = 0; j < max_entries; j++) {
3182
3183                         data1 = 0;
3184
3185                         data0 =
3186                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3187                                 itable[j]);
3188
3189                         status = __vxge_hw_vpath_rts_table_set(vpath_handles[0],
3190                                 action, rts_table, j, data0, data1);
3191
3192                         if (status != VXGE_HW_OK)
3193                                 goto exit;
3194                 }
3195
3196                 for (j = 0; j < max_entries; j++) {
3197
3198                         data1 = 0;
3199
3200                         data0 =
3201                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN |
3202                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3203                                 itable[j]);
3204
3205                         status = __vxge_hw_vpath_rts_table_set(
3206                                 vpath_handles[mtable[itable[j]]], action,
3207                                 rts_table, j, data0, data1);
3208
3209                         if (status != VXGE_HW_OK)
3210                                 goto exit;
3211                 }
3212         } else {
3213                 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3214                 rts_table =
3215                         VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT;
3216                 for (i = 0; i < vpath_count; i++) {
3217
3218                         for (j = 0; j < max_entries;) {
3219
3220                                 data0 = 0;
3221                                 data1 = 0;
3222
3223                                 while (j < max_entries) {
3224                                         if (mtable[itable[j]] != i) {
3225                                                 j++;
3226                                                 continue;
3227                                         }
3228                                         vxge_hw_rts_rth_data0_data1_get(j,
3229                                                 &data0, &data1, 1, itable);
3230                                         j++;
3231                                         break;
3232                                 }
3233
3234                                 while (j < max_entries) {
3235                                         if (mtable[itable[j]] != i) {
3236                                                 j++;
3237                                                 continue;
3238                                         }
3239                                         vxge_hw_rts_rth_data0_data1_get(j,
3240                                                 &data0, &data1, 2, itable);
3241                                         j++;
3242                                         break;
3243                                 }
3244
3245                                 while (j < max_entries) {
3246                                         if (mtable[itable[j]] != i) {
3247                                                 j++;
3248                                                 continue;
3249                                         }
3250                                         vxge_hw_rts_rth_data0_data1_get(j,
3251                                                 &data0, &data1, 3, itable);
3252                                         j++;
3253                                         break;
3254                                 }
3255
3256                                 while (j < max_entries) {
3257                                         if (mtable[itable[j]] != i) {
3258                                                 j++;
3259                                                 continue;
3260                                         }
3261                                         vxge_hw_rts_rth_data0_data1_get(j,
3262                                                 &data0, &data1, 4, itable);
3263                                         j++;
3264                                         break;
3265                                 }
3266
3267                                 if (data0 != 0) {
3268                                         status = __vxge_hw_vpath_rts_table_set(
3269                                                         vpath_handles[i],
3270                                                         action, rts_table,
3271                                                         0, data0, data1);
3272
3273                                         if (status != VXGE_HW_OK)
3274                                                 goto exit;
3275                                 }
3276                         }
3277                 }
3278         }
3279 exit:
3280         return status;
3281 }
3282
3283 /**
3284  * vxge_hw_vpath_check_leak - Check for memory leak
3285  * @ringh: Handle to the ring object used for receive
3286  *
3287  * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to
3288  * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred.
3289  * Returns: VXGE_HW_FAIL, if leak has occurred.
3290  *
3291  */
3292 enum vxge_hw_status
3293 vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ring)
3294 {
3295         enum vxge_hw_status status = VXGE_HW_OK;
3296         u64 rxd_new_count, rxd_spat;
3297
3298         if (ring == NULL)
3299                 return status;
3300
3301         rxd_new_count = readl(&ring->vp_reg->prc_rxd_doorbell);
3302         rxd_spat = readq(&ring->vp_reg->prc_cfg6);
3303         rxd_spat = VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat);
3304
3305         if (rxd_new_count >= rxd_spat)
3306                 status = VXGE_HW_FAIL;
3307
3308         return status;
3309 }
3310
3311 /*
3312  * __vxge_hw_vpath_mgmt_read
3313  * This routine reads the vpath_mgmt registers
3314  */
3315 static enum vxge_hw_status
3316 __vxge_hw_vpath_mgmt_read(
3317         struct __vxge_hw_device *hldev,
3318         struct __vxge_hw_virtualpath *vpath)
3319 {
3320         u32 i, mtu = 0, max_pyld = 0;
3321         u64 val64;
3322         enum vxge_hw_status status = VXGE_HW_OK;
3323
3324         for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
3325
3326                 val64 = readq(&vpath->vpmgmt_reg->
3327                                 rxmac_cfg0_port_vpmgmt_clone[i]);
3328                 max_pyld =
3329                         (u32)
3330                         VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
3331                         (val64);
3332                 if (mtu < max_pyld)
3333                         mtu = max_pyld;
3334         }
3335
3336         vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE;
3337
3338         val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp);
3339
3340         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3341                 if (val64 & vxge_mBIT(i))
3342                         vpath->vsport_number = i;
3343         }
3344
3345         val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone);
3346
3347         if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK)
3348                 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP);
3349         else
3350                 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
3351
3352         return status;
3353 }
3354
3355 /*
3356  * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
3357  * This routine checks the vpath_rst_in_prog register to see if
3358  * adapter completed the reset process for the vpath
3359  */
3360 enum vxge_hw_status
3361 __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
3362 {
3363         enum vxge_hw_status status;
3364
3365         status = __vxge_hw_device_register_poll(
3366                         &vpath->hldev->common_reg->vpath_rst_in_prog,
3367                         VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
3368                                 1 << (16 - vpath->vp_id)),
3369                         vpath->hldev->config.device_poll_millis);
3370
3371         return status;
3372 }
3373
3374 /*
3375  * __vxge_hw_vpath_reset
3376  * This routine resets the vpath on the device
3377  */
3378 enum vxge_hw_status
3379 __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
3380 {
3381         u64 val64;
3382         enum vxge_hw_status status = VXGE_HW_OK;
3383
3384         val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
3385
3386         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
3387                                 &hldev->common_reg->cmn_rsthdlr_cfg0);
3388
3389         return status;
3390 }
3391
3392 /*
3393  * __vxge_hw_vpath_sw_reset
3394  * This routine resets the vpath structures
3395  */
3396 enum vxge_hw_status
3397 __vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
3398 {
3399         enum vxge_hw_status status = VXGE_HW_OK;
3400         struct __vxge_hw_virtualpath *vpath;
3401
3402         vpath = (struct __vxge_hw_virtualpath *)&hldev->virtual_paths[vp_id];
3403
3404         if (vpath->ringh) {
3405                 status = __vxge_hw_ring_reset(vpath->ringh);
3406                 if (status != VXGE_HW_OK)
3407                         goto exit;
3408         }
3409
3410         if (vpath->fifoh)
3411                 status = __vxge_hw_fifo_reset(vpath->fifoh);
3412 exit:
3413         return status;
3414 }
3415
3416 /*
3417  * __vxge_hw_vpath_prc_configure
3418  * This routine configures the prc registers of virtual path using the config
3419  * passed
3420  */
3421 void
3422 __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3423 {
3424         u64 val64;
3425         struct __vxge_hw_virtualpath *vpath;
3426         struct vxge_hw_vp_config *vp_config;
3427         struct vxge_hw_vpath_reg __iomem *vp_reg;
3428
3429         vpath = &hldev->virtual_paths[vp_id];
3430         vp_reg = vpath->vp_reg;
3431         vp_config = vpath->vp_config;
3432
3433         if (vp_config->ring.enable == VXGE_HW_RING_DISABLE)
3434                 return;
3435
3436         val64 = readq(&vp_reg->prc_cfg1);
3437         val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE;
3438         writeq(val64, &vp_reg->prc_cfg1);
3439
3440         val64 = readq(&vpath->vp_reg->prc_cfg6);
3441         val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN;
3442         writeq(val64, &vpath->vp_reg->prc_cfg6);
3443
3444         val64 = readq(&vp_reg->prc_cfg7);
3445
3446         if (vpath->vp_config->ring.scatter_mode !=
3447                 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT) {
3448
3449                 val64 &= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3);
3450
3451                 switch (vpath->vp_config->ring.scatter_mode) {
3452                 case VXGE_HW_RING_SCATTER_MODE_A:
3453                         val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3454                                         VXGE_HW_PRC_CFG7_SCATTER_MODE_A);
3455                         break;
3456                 case VXGE_HW_RING_SCATTER_MODE_B:
3457                         val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3458                                         VXGE_HW_PRC_CFG7_SCATTER_MODE_B);
3459                         break;
3460                 case VXGE_HW_RING_SCATTER_MODE_C:
3461                         val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3462                                         VXGE_HW_PRC_CFG7_SCATTER_MODE_C);
3463                         break;
3464                 }
3465         }
3466
3467         writeq(val64, &vp_reg->prc_cfg7);
3468
3469         writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
3470                                 __vxge_hw_ring_first_block_address_get(
3471                                         vpath->ringh) >> 3), &vp_reg->prc_cfg5);
3472
3473         val64 = readq(&vp_reg->prc_cfg4);
3474         val64 |= VXGE_HW_PRC_CFG4_IN_SVC;
3475         val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
3476
3477         val64 |= VXGE_HW_PRC_CFG4_RING_MODE(
3478                         VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER);
3479
3480         if (hldev->config.rth_en == VXGE_HW_RTH_DISABLE)
3481                 val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE;
3482         else
3483                 val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE;
3484
3485         writeq(val64, &vp_reg->prc_cfg4);
3486         return;
3487 }
3488
3489 /*
3490  * __vxge_hw_vpath_kdfc_configure
3491  * This routine configures the kdfc registers of virtual path using the
3492  * config passed
3493  */
3494 enum vxge_hw_status
3495 __vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3496 {
3497         u64 val64;
3498         u64 vpath_stride;
3499         enum vxge_hw_status status = VXGE_HW_OK;
3500         struct __vxge_hw_virtualpath *vpath;
3501         struct vxge_hw_vpath_reg __iomem *vp_reg;
3502
3503         vpath = &hldev->virtual_paths[vp_id];
3504         vp_reg = vpath->vp_reg;
3505         status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg);
3506
3507         if (status != VXGE_HW_OK)
3508                 goto exit;
3509
3510         val64 = readq(&vp_reg->kdfc_drbl_triplet_total);
3511
3512         vpath->max_kdfc_db =
3513                 (u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
3514                         val64+1)/2;
3515
3516         if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
3517
3518                 vpath->max_nofl_db = vpath->max_kdfc_db;
3519
3520                 if (vpath->max_nofl_db <
3521                         ((vpath->vp_config->fifo.memblock_size /
3522                         (vpath->vp_config->fifo.max_frags *
3523                         sizeof(struct vxge_hw_fifo_txd))) *
3524                         vpath->vp_config->fifo.fifo_blocks)) {
3525
3526                         return VXGE_HW_BADCFG_FIFO_BLOCKS;
3527                 }
3528                 val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
3529                                 (vpath->max_nofl_db*2)-1);
3530         }
3531
3532         writeq(val64, &vp_reg->kdfc_fifo_trpl_partition);
3533
3534         writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE,
3535                 &vp_reg->kdfc_fifo_trpl_ctrl);
3536
3537         val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl);
3538
3539         val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
3540                    VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));
3541
3542         val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
3543                  VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) |
3544 #ifndef __BIG_ENDIAN
3545                  VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN |
3546 #endif
3547                  VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);
3548
3549         writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl);
3550         writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address);
3551         wmb();
3552         vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride);
3553
3554         vpath->nofl_db =
3555                 (struct __vxge_hw_non_offload_db_wrapper __iomem *)
3556                 (hldev->kdfc + (vp_id *
3557                 VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
3558                                         vpath_stride)));
3559 exit:
3560         return status;
3561 }
3562
3563 /*
3564  * __vxge_hw_vpath_mac_configure
3565  * This routine configures the mac of virtual path using the config passed
3566  */
3567 enum vxge_hw_status
3568 __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3569 {
3570         u64 val64;
3571         enum vxge_hw_status status = VXGE_HW_OK;
3572         struct __vxge_hw_virtualpath *vpath;
3573         struct vxge_hw_vp_config *vp_config;
3574         struct vxge_hw_vpath_reg __iomem *vp_reg;
3575
3576         vpath = &hldev->virtual_paths[vp_id];
3577         vp_reg = vpath->vp_reg;
3578         vp_config = vpath->vp_config;
3579
3580         writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
3581                         vpath->vsport_number), &vp_reg->xmac_vsport_choice);
3582
3583         if (vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
3584
3585                 val64 = readq(&vp_reg->xmac_rpa_vcfg);
3586
3587                 if (vp_config->rpa_strip_vlan_tag !=
3588                         VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) {
3589                         if (vp_config->rpa_strip_vlan_tag)
3590                                 val64 |= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
3591                         else
3592                                 val64 &= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
3593                 }
3594
3595                 writeq(val64, &vp_reg->xmac_rpa_vcfg);
3596                 val64 = readq(&vp_reg->rxmac_vcfg0);
3597
3598                 if (vp_config->mtu !=
3599                                 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) {
3600                         val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
3601                         if ((vp_config->mtu  +
3602                                 VXGE_HW_MAC_HEADER_MAX_SIZE) < vpath->max_mtu)
3603                                 val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
3604                                         vp_config->mtu  +
3605                                         VXGE_HW_MAC_HEADER_MAX_SIZE);
3606                         else
3607                                 val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
3608                                         vpath->max_mtu);
3609                 }
3610
3611                 writeq(val64, &vp_reg->rxmac_vcfg0);
3612
3613                 val64 = readq(&vp_reg->rxmac_vcfg1);
3614
3615                 val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
3616                         VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE);
3617
3618                 if (hldev->config.rth_it_type ==
3619                                 VXGE_HW_RTH_IT_TYPE_MULTI_IT) {
3620                         val64 |= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(
3621                                 0x2) |
3622                                 VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE;
3623                 }
3624
3625                 writeq(val64, &vp_reg->rxmac_vcfg1);
3626         }
3627         return status;
3628 }
3629
3630 /*
3631  * __vxge_hw_vpath_tim_configure
3632  * This routine configures the tim registers of virtual path using the config
3633  * passed
3634  */
3635 enum vxge_hw_status
3636 __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3637 {
3638         u64 val64;
3639         enum vxge_hw_status status = VXGE_HW_OK;
3640         struct __vxge_hw_virtualpath *vpath;
3641         struct vxge_hw_vpath_reg __iomem *vp_reg;
3642         struct vxge_hw_vp_config *config;
3643
3644         vpath = &hldev->virtual_paths[vp_id];
3645         vp_reg = vpath->vp_reg;
3646         config = vpath->vp_config;
3647
3648         writeq((u64)0, &vp_reg->tim_dest_addr);
3649         writeq((u64)0, &vp_reg->tim_vpath_map);
3650         writeq((u64)0, &vp_reg->tim_bitmap);
3651         writeq((u64)0, &vp_reg->tim_remap);
3652
3653         if (config->ring.enable == VXGE_HW_RING_ENABLE)
3654                 writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
3655                         (vp_id * VXGE_HW_MAX_INTR_PER_VP) +
3656                         VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn);
3657
3658         val64 = readq(&vp_reg->tim_pci_cfg);
3659         val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD;
3660         writeq(val64, &vp_reg->tim_pci_cfg);
3661
3662         if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
3663
3664                 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3665
3666                 if (config->tti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3667                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3668                                 0x3ffffff);
3669                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3670                                         config->tti.btimer_val);
3671                 }
3672
3673                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
3674
3675                 if (config->tti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
3676                         if (config->tti.timer_ac_en)
3677                                 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3678                         else
3679                                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3680                 }
3681
3682                 if (config->tti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
3683                         if (config->tti.timer_ci_en)
3684                                 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3685                         else
3686                                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3687                 }
3688
3689                 if (config->tti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
3690                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
3691                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
3692                                         config->tti.urange_a);
3693                 }
3694
3695                 if (config->tti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
3696                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
3697                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
3698                                         config->tti.urange_b);
3699                 }
3700
3701                 if (config->tti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
3702                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
3703                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
3704                                         config->tti.urange_c);
3705                 }
3706
3707                 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3708                 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
3709
3710                 if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
3711                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
3712                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
3713                                                 config->tti.uec_a);
3714                 }
3715
3716                 if (config->tti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
3717                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
3718                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
3719                                                 config->tti.uec_b);
3720                 }
3721
3722                 if (config->tti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
3723                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
3724                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
3725                                                 config->tti.uec_c);
3726                 }
3727
3728                 if (config->tti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
3729                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
3730                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
3731                                                 config->tti.uec_d);
3732                 }
3733
3734                 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
3735                 val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
3736
3737                 if (config->tti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
3738                         if (config->tti.timer_ri_en)
3739                                 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3740                         else
3741                                 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3742                 }
3743
3744                 if (config->tti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3745                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3746                                         0x3ffffff);
3747                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3748                                         config->tti.rtimer_val);
3749                 }
3750
3751                 if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
3752                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
3753                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
3754                                         config->tti.util_sel);
3755                 }
3756
3757                 if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3758                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3759                                         0x3ffffff);
3760                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3761                                         config->tti.ltimer_val);
3762                 }
3763
3764                 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
3765         }
3766
3767         if (config->ring.enable == VXGE_HW_RING_ENABLE) {
3768
3769                 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
3770
3771                 if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3772                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3773                                         0x3ffffff);
3774                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3775                                         config->rti.btimer_val);
3776                 }
3777
3778                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
3779
3780                 if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
3781                         if (config->rti.timer_ac_en)
3782                                 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3783                         else
3784                                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3785                 }
3786
3787                 if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
3788                         if (config->rti.timer_ci_en)
3789                                 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3790                         else
3791                                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3792                 }
3793
3794                 if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
3795                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
3796                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
3797                                         config->rti.urange_a);
3798                 }
3799
3800                 if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
3801                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
3802                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
3803                                         config->rti.urange_b);
3804                 }
3805
3806                 if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
3807                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
3808                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
3809                                         config->rti.urange_c);
3810                 }
3811
3812                 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
3813                 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
3814
3815                 if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
3816                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
3817                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
3818                                                 config->rti.uec_a);
3819                 }
3820
3821                 if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
3822                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
3823                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
3824                                                 config->rti.uec_b);
3825                 }
3826
3827                 if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
3828                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
3829                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
3830                                                 config->rti.uec_c);
3831                 }
3832
3833                 if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
3834                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
3835                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
3836                                                 config->rti.uec_d);
3837                 }
3838
3839                 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
3840                 val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
3841
3842                 if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
3843                         if (config->rti.timer_ri_en)
3844                                 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3845                         else
3846                                 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3847                 }
3848
3849                 if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3850                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3851                                         0x3ffffff);
3852                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3853                                         config->rti.rtimer_val);
3854                 }
3855
3856                 if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
3857                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
3858                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
3859                                         config->rti.util_sel);
3860                 }
3861
3862                 if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3863                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3864                                         0x3ffffff);
3865                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3866                                         config->rti.ltimer_val);
3867                 }
3868
3869                 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
3870         }
3871
3872         val64 = 0;
3873         writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]);
3874         writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]);
3875         writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]);
3876         writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]);
3877         writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
3878         writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
3879
3880         return status;
3881 }
3882
3883 void
3884 vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
3885 {
3886         struct __vxge_hw_virtualpath *vpath;
3887         struct vxge_hw_vpath_reg __iomem *vp_reg;
3888         struct vxge_hw_vp_config *config;
3889         u64 val64;
3890
3891         vpath = &hldev->virtual_paths[vp_id];
3892         vp_reg = vpath->vp_reg;
3893         config = vpath->vp_config;
3894
3895         if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
3896                 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3897
3898                 if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
3899                         config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
3900                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3901                         writeq(val64,
3902                         &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3903                 }
3904         }
3905         return;
3906 }
3907 /*
3908  * __vxge_hw_vpath_initialize
3909  * This routine is the final phase of init which initializes the
3910  * registers of the vpath using the configuration passed.
3911  */
3912 enum vxge_hw_status
3913 __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
3914 {
3915         u64 val64;
3916         u32 val32;
3917         enum vxge_hw_status status = VXGE_HW_OK;
3918         struct __vxge_hw_virtualpath *vpath;
3919         struct vxge_hw_vpath_reg __iomem *vp_reg;
3920
3921         vpath = &hldev->virtual_paths[vp_id];
3922
3923         if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
3924                 status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
3925                 goto exit;
3926         }
3927         vp_reg = vpath->vp_reg;
3928
3929         status =  __vxge_hw_vpath_swapper_set(vpath->vp_reg);
3930
3931         if (status != VXGE_HW_OK)
3932                 goto exit;
3933
3934         status =  __vxge_hw_vpath_mac_configure(hldev, vp_id);
3935
3936         if (status != VXGE_HW_OK)
3937                 goto exit;
3938
3939         status =  __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
3940
3941         if (status != VXGE_HW_OK)
3942                 goto exit;
3943
3944         status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
3945
3946         if (status != VXGE_HW_OK)
3947                 goto exit;
3948
3949         val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
3950
3951         /* Get MRRS value from device control */
3952         status  = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
3953
3954         if (status == VXGE_HW_OK) {
3955                 val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
3956                 val64 &=
3957                     ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
3958                 val64 |=
3959                     VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32);
3960
3961                 val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE;
3962         }
3963
3964         val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
3965         val64 |=
3966             VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
3967                     VXGE_HW_MAX_PAYLOAD_SIZE_512);
3968
3969         val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN;
3970         writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl);
3971
3972 exit:
3973         return status;
3974 }
3975
3976 /*
3977  * __vxge_hw_vp_initialize - Initialize Virtual Path structure
3978  * This routine is the initial phase of init which resets the vpath and
3979  * initializes the software support structures.
3980  */
3981 enum vxge_hw_status
3982 __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
3983                         struct vxge_hw_vp_config *config)
3984 {
3985         struct __vxge_hw_virtualpath *vpath;
3986         enum vxge_hw_status status = VXGE_HW_OK;
3987
3988         if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
3989                 status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
3990                 goto exit;
3991         }
3992
3993         vpath = &hldev->virtual_paths[vp_id];
3994
3995         vpath->vp_id = vp_id;
3996         vpath->vp_open = VXGE_HW_VP_OPEN;
3997         vpath->hldev = hldev;
3998         vpath->vp_config = config;
3999         vpath->vp_reg = hldev->vpath_reg[vp_id];
4000         vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
4001
4002         __vxge_hw_vpath_reset(hldev, vp_id);
4003
4004         status = __vxge_hw_vpath_reset_check(vpath);
4005
4006         if (status != VXGE_HW_OK) {
4007                 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4008                 goto exit;
4009         }
4010
4011         status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
4012
4013         if (status != VXGE_HW_OK) {
4014                 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4015                 goto exit;
4016         }
4017
4018         INIT_LIST_HEAD(&vpath->vpath_handles);
4019
4020         vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id];
4021
4022         VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0,
4023                 hldev->tim_int_mask1, vp_id);
4024
4025         status = __vxge_hw_vpath_initialize(hldev, vp_id);
4026
4027         if (status != VXGE_HW_OK)
4028                 __vxge_hw_vp_terminate(hldev, vp_id);
4029 exit:
4030         return status;
4031 }
4032
4033 /*
4034  * __vxge_hw_vp_terminate - Terminate Virtual Path structure
4035  * This routine closes all channels it opened and freeup memory
4036  */
4037 void
4038 __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
4039 {
4040         struct __vxge_hw_virtualpath *vpath;
4041
4042         vpath = &hldev->virtual_paths[vp_id];
4043
4044         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
4045                 goto exit;
4046
4047         VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
4048                 vpath->hldev->tim_int_mask1, vpath->vp_id);
4049         hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
4050
4051         memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4052 exit:
4053         return;
4054 }
4055
4056 /*
4057  * vxge_hw_vpath_mtu_set - Set MTU.
4058  * Set new MTU value. Example, to use jumbo frames:
4059  * vxge_hw_vpath_mtu_set(my_device, 9600);
4060  */
4061 enum vxge_hw_status
4062 vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle *vp, u32 new_mtu)
4063 {
4064         u64 val64;
4065         enum vxge_hw_status status = VXGE_HW_OK;
4066         struct __vxge_hw_virtualpath *vpath;
4067
4068         if (vp == NULL) {
4069                 status = VXGE_HW_ERR_INVALID_HANDLE;
4070                 goto exit;
4071         }
4072         vpath = vp->vpath;
4073
4074         new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE;
4075
4076         if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu))
4077                 status = VXGE_HW_ERR_INVALID_MTU_SIZE;
4078
4079         val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
4080
4081         val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
4082         val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu);
4083
4084         writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
4085
4086         vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE;
4087
4088 exit:
4089         return status;
4090 }
4091
4092 /*
4093  * vxge_hw_vpath_open - Open a virtual path on a given adapter
4094  * This function is used to open access to virtual path of an
4095  * adapter for offload, GRO operations. This function returns
4096  * synchronously.
4097  */
4098 enum vxge_hw_status
4099 vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
4100                    struct vxge_hw_vpath_attr *attr,
4101                    struct __vxge_hw_vpath_handle **vpath_handle)
4102 {
4103         struct __vxge_hw_virtualpath *vpath;
4104         struct __vxge_hw_vpath_handle *vp;
4105         enum vxge_hw_status status;
4106
4107         vpath = &hldev->virtual_paths[attr->vp_id];
4108
4109         if (vpath->vp_open == VXGE_HW_VP_OPEN) {
4110                 status = VXGE_HW_ERR_INVALID_STATE;
4111                 goto vpath_open_exit1;
4112         }
4113
4114         status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
4115                         &hldev->config.vp_config[attr->vp_id]);
4116
4117         if (status != VXGE_HW_OK)
4118                 goto vpath_open_exit1;
4119
4120         vp = (struct __vxge_hw_vpath_handle *)
4121                 vmalloc(sizeof(struct __vxge_hw_vpath_handle));
4122         if (vp == NULL) {
4123                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4124                 goto vpath_open_exit2;
4125         }
4126
4127         memset(vp, 0, sizeof(struct __vxge_hw_vpath_handle));
4128
4129         vp->vpath = vpath;
4130
4131         if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4132                 status = __vxge_hw_fifo_create(vp, &attr->fifo_attr);
4133                 if (status != VXGE_HW_OK)
4134                         goto vpath_open_exit6;
4135         }
4136
4137         if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
4138                 status = __vxge_hw_ring_create(vp, &attr->ring_attr);
4139                 if (status != VXGE_HW_OK)
4140                         goto vpath_open_exit7;
4141
4142                 __vxge_hw_vpath_prc_configure(hldev, attr->vp_id);
4143         }
4144
4145         vpath->fifoh->tx_intr_num =
4146                 (attr->vp_id * VXGE_HW_MAX_INTR_PER_VP)  +
4147                         VXGE_HW_VPATH_INTR_TX;
4148
4149         vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
4150                                 VXGE_HW_BLOCK_SIZE);
4151
4152         if (vpath->stats_block == NULL) {
4153                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4154                 goto vpath_open_exit8;
4155         }
4156
4157         vpath->hw_stats = (struct vxge_hw_vpath_stats_hw_info *)vpath->
4158                         stats_block->memblock;
4159         memset(vpath->hw_stats, 0,
4160                 sizeof(struct vxge_hw_vpath_stats_hw_info));
4161
4162         hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] =
4163                                                 vpath->hw_stats;
4164
4165         vpath->hw_stats_sav =
4166                 &hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id];
4167         memset(vpath->hw_stats_sav, 0,
4168                         sizeof(struct vxge_hw_vpath_stats_hw_info));
4169
4170         writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg);
4171
4172         status = vxge_hw_vpath_stats_enable(vp);
4173         if (status != VXGE_HW_OK)
4174                 goto vpath_open_exit8;
4175
4176         list_add(&vp->item, &vpath->vpath_handles);
4177
4178         hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id);
4179
4180         *vpath_handle = vp;
4181
4182         attr->fifo_attr.userdata = vpath->fifoh;
4183         attr->ring_attr.userdata = vpath->ringh;
4184
4185         return VXGE_HW_OK;
4186
4187 vpath_open_exit8:
4188         if (vpath->ringh != NULL)
4189                 __vxge_hw_ring_delete(vp);
4190 vpath_open_exit7:
4191         if (vpath->fifoh != NULL)
4192                 __vxge_hw_fifo_delete(vp);
4193 vpath_open_exit6:
4194         vfree(vp);
4195 vpath_open_exit2:
4196         __vxge_hw_vp_terminate(hldev, attr->vp_id);
4197 vpath_open_exit1:
4198
4199         return status;
4200 }
4201
4202 /**
4203  * vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath
4204  * (vpath) open
4205  * @vp: Handle got from previous vpath open
4206  *
4207  * This function is used to close access to virtual path opened
4208  * earlier.
4209  */
4210 void
4211 vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
4212 {
4213         struct __vxge_hw_virtualpath *vpath = NULL;
4214         u64 new_count, val64, val164;
4215         struct __vxge_hw_ring *ring;
4216
4217         vpath = vp->vpath;
4218         ring = vpath->ringh;
4219
4220         new_count = readq(&vpath->vp_reg->rxdmem_size);
4221         new_count &= 0x1fff;
4222         val164 = (VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count));
4223
4224         writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
4225                 &vpath->vp_reg->prc_rxd_doorbell);
4226         readl(&vpath->vp_reg->prc_rxd_doorbell);
4227
4228         val164 /= 2;
4229         val64 = readq(&vpath->vp_reg->prc_cfg6);
4230         val64 = VXGE_HW_PRC_CFG6_RXD_SPAT(val64);
4231         val64 &= 0x1ff;
4232
4233         /*
4234          * Each RxD is of 4 qwords
4235          */
4236         new_count -= (val64 + 1);
4237         val64 = min(val164, new_count) / 4;
4238
4239         ring->rxds_limit = min(ring->rxds_limit, val64);
4240         if (ring->rxds_limit < 4)
4241                 ring->rxds_limit = 4;
4242 }
4243
4244 /*
4245  * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
4246  * This function is used to close access to virtual path opened
4247  * earlier.
4248  */
4249 enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
4250 {
4251         struct __vxge_hw_virtualpath *vpath = NULL;
4252         struct __vxge_hw_device *devh = NULL;
4253         u32 vp_id = vp->vpath->vp_id;
4254         u32 is_empty = TRUE;
4255         enum vxge_hw_status status = VXGE_HW_OK;
4256
4257         vpath = vp->vpath;
4258         devh = vpath->hldev;
4259
4260         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4261                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4262                 goto vpath_close_exit;
4263         }
4264
4265         list_del(&vp->item);
4266
4267         if (!list_empty(&vpath->vpath_handles)) {
4268                 list_add(&vp->item, &vpath->vpath_handles);
4269                 is_empty = FALSE;
4270         }
4271
4272         if (!is_empty) {
4273                 status = VXGE_HW_FAIL;
4274                 goto vpath_close_exit;
4275         }
4276
4277         devh->vpaths_deployed &= ~vxge_mBIT(vp_id);
4278
4279         if (vpath->ringh != NULL)
4280                 __vxge_hw_ring_delete(vp);
4281
4282         if (vpath->fifoh != NULL)
4283                 __vxge_hw_fifo_delete(vp);
4284
4285         if (vpath->stats_block != NULL)
4286                 __vxge_hw_blockpool_block_free(devh, vpath->stats_block);
4287
4288         vfree(vp);
4289
4290         __vxge_hw_vp_terminate(devh, vp_id);
4291
4292         vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
4293
4294 vpath_close_exit:
4295         return status;
4296 }
4297
4298 /*
4299  * vxge_hw_vpath_reset - Resets vpath
4300  * This function is used to request a reset of vpath
4301  */
4302 enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle *vp)
4303 {
4304         enum vxge_hw_status status;
4305         u32 vp_id;
4306         struct __vxge_hw_virtualpath *vpath = vp->vpath;
4307
4308         vp_id = vpath->vp_id;
4309
4310         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4311                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4312                 goto exit;
4313         }
4314
4315         status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
4316         if (status == VXGE_HW_OK)
4317                 vpath->sw_stats->soft_reset_cnt++;
4318 exit:
4319         return status;
4320 }
4321
4322 /*
4323  * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
4324  * This function poll's for the vpath reset completion and re initializes
4325  * the vpath.
4326  */
4327 enum vxge_hw_status
4328 vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle *vp)
4329 {
4330         struct __vxge_hw_virtualpath *vpath = NULL;
4331         enum vxge_hw_status status;
4332         struct __vxge_hw_device *hldev;
4333         u32 vp_id;
4334
4335         vp_id = vp->vpath->vp_id;
4336         vpath = vp->vpath;
4337         hldev = vpath->hldev;
4338
4339         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4340                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4341                 goto exit;
4342         }
4343
4344         status = __vxge_hw_vpath_reset_check(vpath);
4345         if (status != VXGE_HW_OK)
4346                 goto exit;
4347
4348         status = __vxge_hw_vpath_sw_reset(hldev, vp_id);
4349         if (status != VXGE_HW_OK)
4350                 goto exit;
4351
4352         status = __vxge_hw_vpath_initialize(hldev, vp_id);
4353         if (status != VXGE_HW_OK)
4354                 goto exit;
4355
4356         if (vpath->ringh != NULL)
4357                 __vxge_hw_vpath_prc_configure(hldev, vp_id);
4358
4359         memset(vpath->hw_stats, 0,
4360                 sizeof(struct vxge_hw_vpath_stats_hw_info));
4361
4362         memset(vpath->hw_stats_sav, 0,
4363                 sizeof(struct vxge_hw_vpath_stats_hw_info));
4364
4365         writeq(vpath->stats_block->dma_addr,
4366                 &vpath->vp_reg->stats_cfg);
4367
4368         status = vxge_hw_vpath_stats_enable(vp);
4369
4370 exit:
4371         return status;
4372 }
4373
4374 /*
4375  * vxge_hw_vpath_enable - Enable vpath.
4376  * This routine clears the vpath reset thereby enabling a vpath
4377  * to start forwarding frames and generating interrupts.
4378  */
4379 void
4380 vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
4381 {
4382         struct __vxge_hw_device *hldev;
4383         u64 val64;
4384
4385         hldev = vp->vpath->hldev;
4386
4387         val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
4388                 1 << (16 - vp->vpath->vp_id));
4389
4390         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
4391                 &hldev->common_reg->cmn_rsthdlr_cfg1);
4392 }
4393
4394 /*
4395  * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
4396  * Enable the DMA vpath statistics. The function is to be called to re-enable
4397  * the adapter to update stats into the host memory
4398  */
4399 enum vxge_hw_status
4400 vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
4401 {
4402         enum vxge_hw_status status = VXGE_HW_OK;
4403         struct __vxge_hw_virtualpath *vpath;
4404
4405         vpath = vp->vpath;
4406
4407         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4408                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4409                 goto exit;
4410         }
4411
4412         memcpy(vpath->hw_stats_sav, vpath->hw_stats,
4413                         sizeof(struct vxge_hw_vpath_stats_hw_info));
4414
4415         status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
4416 exit:
4417         return status;
4418 }
4419
4420 /*
4421  * __vxge_hw_vpath_stats_access - Get the statistics from the given location
4422  *                           and offset and perform an operation
4423  */
4424 enum vxge_hw_status
4425 __vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
4426                              u32 operation, u32 offset, u64 *stat)
4427 {
4428         u64 val64;
4429         enum vxge_hw_status status = VXGE_HW_OK;
4430         struct vxge_hw_vpath_reg __iomem *vp_reg;
4431
4432         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4433                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4434                 goto vpath_stats_access_exit;
4435         }
4436
4437         vp_reg = vpath->vp_reg;
4438
4439         val64 =  VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
4440                  VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
4441                  VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
4442
4443         status = __vxge_hw_pio_mem_write64(val64,
4444                                 &vp_reg->xmac_stats_access_cmd,
4445                                 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
4446                                 vpath->hldev->config.device_poll_millis);
4447
4448         if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
4449                 *stat = readq(&vp_reg->xmac_stats_access_data);
4450         else
4451                 *stat = 0;
4452
4453 vpath_stats_access_exit:
4454         return status;
4455 }
4456
4457 /*
4458  * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
4459  */
4460 enum vxge_hw_status
4461 __vxge_hw_vpath_xmac_tx_stats_get(
4462         struct __vxge_hw_virtualpath *vpath,
4463         struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
4464 {
4465         u64 *val64;
4466         int i;
4467         u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
4468         enum vxge_hw_status status = VXGE_HW_OK;
4469
4470         val64 = (u64 *) vpath_tx_stats;
4471
4472         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4473                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4474                 goto exit;
4475         }
4476
4477         for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
4478                 status = __vxge_hw_vpath_stats_access(vpath,
4479                                         VXGE_HW_STATS_OP_READ,
4480                                         offset, val64);
4481                 if (status != VXGE_HW_OK)
4482                         goto exit;
4483                 offset++;
4484                 val64++;
4485         }
4486 exit:
4487         return status;
4488 }
4489
4490 /*
4491  * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
4492  */
4493 enum vxge_hw_status
4494 __vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
4495                         struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
4496 {
4497         u64 *val64;
4498         enum vxge_hw_status status = VXGE_HW_OK;
4499         int i;
4500         u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
4501         val64 = (u64 *) vpath_rx_stats;
4502
4503         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4504                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4505                 goto exit;
4506         }
4507         for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
4508                 status = __vxge_hw_vpath_stats_access(vpath,
4509                                         VXGE_HW_STATS_OP_READ,
4510                                         offset >> 3, val64);
4511                 if (status != VXGE_HW_OK)
4512                         goto exit;
4513
4514                 offset += 8;
4515                 val64++;
4516         }
4517 exit:
4518         return status;
4519 }
4520
4521 /*
4522  * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
4523  */
4524 enum vxge_hw_status __vxge_hw_vpath_stats_get(
4525                         struct __vxge_hw_virtualpath *vpath,
4526                         struct vxge_hw_vpath_stats_hw_info *hw_stats)
4527 {
4528         u64 val64;
4529         enum vxge_hw_status status = VXGE_HW_OK;
4530         struct vxge_hw_vpath_reg __iomem *vp_reg;
4531
4532         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4533                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4534                 goto exit;
4535         }
4536         vp_reg = vpath->vp_reg;
4537
4538         val64 = readq(&vp_reg->vpath_debug_stats0);
4539         hw_stats->ini_num_mwr_sent =
4540                 (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
4541
4542         val64 = readq(&vp_reg->vpath_debug_stats1);
4543         hw_stats->ini_num_mrd_sent =
4544                 (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
4545
4546         val64 = readq(&vp_reg->vpath_debug_stats2);
4547         hw_stats->ini_num_cpl_rcvd =
4548                 (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
4549
4550         val64 = readq(&vp_reg->vpath_debug_stats3);
4551         hw_stats->ini_num_mwr_byte_sent =
4552                 VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
4553
4554         val64 = readq(&vp_reg->vpath_debug_stats4);
4555         hw_stats->ini_num_cpl_byte_rcvd =
4556                 VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
4557
4558         val64 = readq(&vp_reg->vpath_debug_stats5);
4559         hw_stats->wrcrdtarb_xoff =
4560                 (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
4561
4562         val64 = readq(&vp_reg->vpath_debug_stats6);
4563         hw_stats->rdcrdtarb_xoff =
4564                 (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
4565
4566         val64 = readq(&vp_reg->vpath_genstats_count01);
4567         hw_stats->vpath_genstats_count0 =
4568         (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
4569                 val64);
4570
4571         val64 = readq(&vp_reg->vpath_genstats_count01);
4572         hw_stats->vpath_genstats_count1 =
4573         (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
4574                 val64);
4575
4576         val64 = readq(&vp_reg->vpath_genstats_count23);
4577         hw_stats->vpath_genstats_count2 =
4578         (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
4579                 val64);
4580
4581         val64 = readq(&vp_reg->vpath_genstats_count01);
4582         hw_stats->vpath_genstats_count3 =
4583         (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
4584                 val64);
4585
4586         val64 = readq(&vp_reg->vpath_genstats_count4);
4587         hw_stats->vpath_genstats_count4 =
4588         (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
4589                 val64);
4590
4591         val64 = readq(&vp_reg->vpath_genstats_count5);
4592         hw_stats->vpath_genstats_count5 =
4593         (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
4594                 val64);
4595
4596         status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
4597         if (status != VXGE_HW_OK)
4598                 goto exit;
4599
4600         status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
4601         if (status != VXGE_HW_OK)
4602                 goto exit;
4603
4604         VXGE_HW_VPATH_STATS_PIO_READ(
4605                 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
4606
4607         hw_stats->prog_event_vnum0 =
4608                         (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
4609
4610         hw_stats->prog_event_vnum1 =
4611                         (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
4612
4613         VXGE_HW_VPATH_STATS_PIO_READ(
4614                 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
4615
4616         hw_stats->prog_event_vnum2 =
4617                         (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
4618
4619         hw_stats->prog_event_vnum3 =
4620                         (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
4621
4622         val64 = readq(&vp_reg->rx_multi_cast_stats);
4623         hw_stats->rx_multi_cast_frame_discard =
4624                 (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
4625
4626         val64 = readq(&vp_reg->rx_frm_transferred);
4627         hw_stats->rx_frm_transferred =
4628                 (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
4629
4630         val64 = readq(&vp_reg->rxd_returned);
4631         hw_stats->rxd_returned =
4632                 (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
4633
4634         val64 = readq(&vp_reg->dbg_stats_rx_mpa);
4635         hw_stats->rx_mpa_len_fail_frms =
4636                 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
4637         hw_stats->rx_mpa_mrk_fail_frms =
4638                 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
4639         hw_stats->rx_mpa_crc_fail_frms =
4640                 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
4641
4642         val64 = readq(&vp_reg->dbg_stats_rx_fau);
4643         hw_stats->rx_permitted_frms =
4644                 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
4645         hw_stats->rx_vp_reset_discarded_frms =
4646         (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
4647         hw_stats->rx_wol_frms =
4648                 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
4649
4650         val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
4651         hw_stats->tx_vp_reset_discarded_frms =
4652         (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
4653                 val64);
4654 exit:
4655         return status;
4656 }
4657
4658 /*
4659  * __vxge_hw_blockpool_create - Create block pool
4660  */
4661
4662 enum vxge_hw_status
4663 __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
4664                            struct __vxge_hw_blockpool *blockpool,
4665                            u32 pool_size,
4666                            u32 pool_max)
4667 {
4668         u32 i;
4669         struct __vxge_hw_blockpool_entry *entry = NULL;
4670         void *memblock;
4671         dma_addr_t dma_addr;
4672         struct pci_dev *dma_handle;
4673         struct pci_dev *acc_handle;
4674         enum vxge_hw_status status = VXGE_HW_OK;
4675
4676         if (blockpool == NULL) {
4677                 status = VXGE_HW_FAIL;
4678                 goto blockpool_create_exit;
4679         }
4680
4681         blockpool->hldev = hldev;
4682         blockpool->block_size = VXGE_HW_BLOCK_SIZE;
4683         blockpool->pool_size = 0;
4684         blockpool->pool_max = pool_max;
4685         blockpool->req_out = 0;
4686
4687         INIT_LIST_HEAD(&blockpool->free_block_list);
4688         INIT_LIST_HEAD(&blockpool->free_entry_list);
4689
4690         for (i = 0; i < pool_size + pool_max; i++) {
4691                 entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
4692                                 GFP_KERNEL);
4693                 if (entry == NULL) {
4694                         __vxge_hw_blockpool_destroy(blockpool);
4695                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
4696                         goto blockpool_create_exit;
4697                 }
4698                 list_add(&entry->item, &blockpool->free_entry_list);
4699         }
4700
4701         for (i = 0; i < pool_size; i++) {
4702
4703                 memblock = vxge_os_dma_malloc(
4704                                 hldev->pdev,
4705                                 VXGE_HW_BLOCK_SIZE,
4706                                 &dma_handle,
4707                                 &acc_handle);
4708
4709                 if (memblock == NULL) {
4710                         __vxge_hw_blockpool_destroy(blockpool);
4711                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
4712                         goto blockpool_create_exit;
4713                 }
4714
4715                 dma_addr = pci_map_single(hldev->pdev, memblock,
4716                                 VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
4717
4718                 if (unlikely(pci_dma_mapping_error(hldev->pdev,
4719                                 dma_addr))) {
4720
4721                         vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
4722                         __vxge_hw_blockpool_destroy(blockpool);
4723                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
4724                         goto blockpool_create_exit;
4725                 }
4726
4727                 if (!list_empty(&blockpool->free_entry_list))
4728                         entry = (struct __vxge_hw_blockpool_entry *)
4729                                 list_first_entry(&blockpool->free_entry_list,
4730                                         struct __vxge_hw_blockpool_entry,
4731                                         item);
4732
4733                 if (entry == NULL)
4734                         entry =
4735                             kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
4736                                         GFP_KERNEL);
4737                 if (entry != NULL) {
4738                         list_del(&entry->item);
4739                         entry->length = VXGE_HW_BLOCK_SIZE;
4740                         entry->memblock = memblock;
4741                         entry->dma_addr = dma_addr;
4742                         entry->acc_handle = acc_handle;
4743                         entry->dma_handle = dma_handle;
4744                         list_add(&entry->item,
4745                                           &blockpool->free_block_list);
4746                         blockpool->pool_size++;
4747                 } else {
4748                         __vxge_hw_blockpool_destroy(blockpool);
4749                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
4750                         goto blockpool_create_exit;
4751                 }
4752         }
4753
4754 blockpool_create_exit:
4755         return status;
4756 }
4757
4758 /*
4759  * __vxge_hw_blockpool_destroy - Deallocates the block pool
4760  */
4761
4762 void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
4763 {
4764
4765         struct __vxge_hw_device *hldev;
4766         struct list_head *p, *n;
4767         u16 ret;
4768
4769         if (blockpool == NULL) {
4770                 ret = 1;
4771                 goto exit;
4772         }
4773
4774         hldev = blockpool->hldev;
4775
4776         list_for_each_safe(p, n, &blockpool->free_block_list) {
4777
4778                 pci_unmap_single(hldev->pdev,
4779                         ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
4780                         ((struct __vxge_hw_blockpool_entry *)p)->length,
4781                         PCI_DMA_BIDIRECTIONAL);
4782
4783                 vxge_os_dma_free(hldev->pdev,
4784                         ((struct __vxge_hw_blockpool_entry *)p)->memblock,
4785                         &((struct __vxge_hw_blockpool_entry *) p)->acc_handle);
4786
4787                 list_del(
4788                         &((struct __vxge_hw_blockpool_entry *)p)->item);
4789                 kfree(p);
4790                 blockpool->pool_size--;
4791         }
4792
4793         list_for_each_safe(p, n, &blockpool->free_entry_list) {
4794                 list_del(
4795                         &((struct __vxge_hw_blockpool_entry *)p)->item);
4796                 kfree((void *)p);
4797         }
4798         ret = 0;
4799 exit:
4800         return;
4801 }
4802
4803 /*
4804  * __vxge_hw_blockpool_blocks_add - Request additional blocks
4805  */
4806 static
4807 void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
4808 {
4809         u32 nreq = 0, i;
4810
4811         if ((blockpool->pool_size  +  blockpool->req_out) <
4812                 VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
4813                 nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
4814                 blockpool->req_out += nreq;
4815         }
4816
4817         for (i = 0; i < nreq; i++)
4818                 vxge_os_dma_malloc_async(
4819                         ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
4820                         blockpool->hldev, VXGE_HW_BLOCK_SIZE);
4821 }
4822
4823 /*
4824  * __vxge_hw_blockpool_blocks_remove - Free additional blocks
4825  */
4826 static
4827 void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
4828 {
4829         struct list_head *p, *n;
4830
4831         list_for_each_safe(p, n, &blockpool->free_block_list) {
4832
4833                 if (blockpool->pool_size < blockpool->pool_max)
4834                         break;
4835
4836                 pci_unmap_single(
4837                         ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
4838                         ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
4839                         ((struct __vxge_hw_blockpool_entry *)p)->length,
4840                         PCI_DMA_BIDIRECTIONAL);
4841
4842                 vxge_os_dma_free(
4843                         ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
4844                         ((struct __vxge_hw_blockpool_entry *)p)->memblock,
4845                         &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
4846
4847                 list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
4848
4849                 list_add(p, &blockpool->free_entry_list);
4850
4851                 blockpool->pool_size--;
4852
4853         }
4854 }
4855
4856 /*
4857  * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
4858  * Adds a block to block pool
4859  */
4860 void vxge_hw_blockpool_block_add(
4861                         struct __vxge_hw_device *devh,
4862                         void *block_addr,
4863                         u32 length,
4864                         struct pci_dev *dma_h,
4865                         struct pci_dev *acc_handle)
4866 {
4867         struct __vxge_hw_blockpool  *blockpool;
4868         struct __vxge_hw_blockpool_entry  *entry = NULL;
4869         dma_addr_t dma_addr;
4870         enum vxge_hw_status status = VXGE_HW_OK;
4871         u32 req_out;
4872
4873         blockpool = &devh->block_pool;
4874
4875         if (block_addr == NULL) {
4876                 blockpool->req_out--;
4877                 status = VXGE_HW_FAIL;
4878                 goto exit;
4879         }
4880
4881         dma_addr = pci_map_single(devh->pdev, block_addr, length,
4882                                 PCI_DMA_BIDIRECTIONAL);
4883
4884         if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
4885
4886                 vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
4887                 blockpool->req_out--;
4888                 status = VXGE_HW_FAIL;
4889                 goto exit;
4890         }
4891
4892
4893         if (!list_empty(&blockpool->free_entry_list))
4894                 entry = (struct __vxge_hw_blockpool_entry *)
4895                         list_first_entry(&blockpool->free_entry_list,
4896                                 struct __vxge_hw_blockpool_entry,
4897                                 item);
4898
4899         if (entry == NULL)
4900                 entry = (struct __vxge_hw_blockpool_entry *)
4901                         vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
4902         else
4903                 list_del(&entry->item);
4904
4905         if (entry != NULL) {
4906                 entry->length = length;
4907                 entry->memblock = block_addr;
4908                 entry->dma_addr = dma_addr;
4909                 entry->acc_handle = acc_handle;
4910                 entry->dma_handle = dma_h;
4911                 list_add(&entry->item, &blockpool->free_block_list);
4912                 blockpool->pool_size++;
4913                 status = VXGE_HW_OK;
4914         } else
4915                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4916
4917         blockpool->req_out--;
4918
4919         req_out = blockpool->req_out;
4920 exit:
4921         return;
4922 }
4923
4924 /*
4925  * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
4926  * Allocates a block of memory of given size, either from block pool
4927  * or by calling vxge_os_dma_malloc()
4928  */
4929 void *
4930 __vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
4931                                 struct vxge_hw_mempool_dma *dma_object)
4932 {
4933         struct __vxge_hw_blockpool_entry *entry = NULL;
4934         struct __vxge_hw_blockpool  *blockpool;
4935         void *memblock = NULL;
4936         enum vxge_hw_status status = VXGE_HW_OK;
4937
4938         blockpool = &devh->block_pool;
4939
4940         if (size != blockpool->block_size) {
4941
4942                 memblock = vxge_os_dma_malloc(devh->pdev, size,
4943                                                 &dma_object->handle,
4944                                                 &dma_object->acc_handle);
4945
4946                 if (memblock == NULL) {
4947                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
4948                         goto exit;
4949                 }
4950
4951                 dma_object->addr = pci_map_single(devh->pdev, memblock, size,
4952                                         PCI_DMA_BIDIRECTIONAL);
4953
4954                 if (unlikely(pci_dma_mapping_error(devh->pdev,
4955                                 dma_object->addr))) {
4956                         vxge_os_dma_free(devh->pdev, memblock,
4957                                 &dma_object->acc_handle);
4958                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
4959                         goto exit;
4960                 }
4961
4962         } else {
4963
4964                 if (!list_empty(&blockpool->free_block_list))
4965                         entry = (struct __vxge_hw_blockpool_entry *)
4966                                 list_first_entry(&blockpool->free_block_list,
4967                                         struct __vxge_hw_blockpool_entry,
4968                                         item);
4969
4970                 if (entry != NULL) {
4971                         list_del(&entry->item);
4972                         dma_object->addr = entry->dma_addr;
4973                         dma_object->handle = entry->dma_handle;
4974                         dma_object->acc_handle = entry->acc_handle;
4975                         memblock = entry->memblock;
4976
4977                         list_add(&entry->item,
4978                                 &blockpool->free_entry_list);
4979                         blockpool->pool_size--;
4980                 }
4981
4982                 if (memblock != NULL)
4983                         __vxge_hw_blockpool_blocks_add(blockpool);
4984         }
4985 exit:
4986         return memblock;
4987 }
4988
4989 /*
4990  * __vxge_hw_blockpool_free - Frees the memory allcoated with
4991                                 __vxge_hw_blockpool_malloc
4992  */
4993 void
4994 __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
4995                         void *memblock, u32 size,
4996                         struct vxge_hw_mempool_dma *dma_object)
4997 {
4998         struct __vxge_hw_blockpool_entry *entry = NULL;
4999         struct __vxge_hw_blockpool  *blockpool;
5000         enum vxge_hw_status status = VXGE_HW_OK;
5001
5002         blockpool = &devh->block_pool;
5003
5004         if (size != blockpool->block_size) {
5005                 pci_unmap_single(devh->pdev, dma_object->addr, size,
5006                         PCI_DMA_BIDIRECTIONAL);
5007                 vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
5008         } else {
5009
5010                 if (!list_empty(&blockpool->free_entry_list))
5011                         entry = (struct __vxge_hw_blockpool_entry *)
5012                                 list_first_entry(&blockpool->free_entry_list,
5013                                         struct __vxge_hw_blockpool_entry,
5014                                         item);
5015
5016                 if (entry == NULL)
5017                         entry = (struct __vxge_hw_blockpool_entry *)
5018                                 vmalloc(sizeof(
5019                                         struct __vxge_hw_blockpool_entry));
5020                 else
5021                         list_del(&entry->item);
5022
5023                 if (entry != NULL) {
5024                         entry->length = size;
5025                         entry->memblock = memblock;
5026                         entry->dma_addr = dma_object->addr;
5027                         entry->acc_handle = dma_object->acc_handle;
5028                         entry->dma_handle = dma_object->handle;
5029                         list_add(&entry->item,
5030                                         &blockpool->free_block_list);
5031                         blockpool->pool_size++;
5032                         status = VXGE_HW_OK;
5033                 } else
5034                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
5035
5036                 if (status == VXGE_HW_OK)
5037                         __vxge_hw_blockpool_blocks_remove(blockpool);
5038         }
5039
5040         return;
5041 }
5042
5043 /*
5044  * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
5045  * This function allocates a block from block pool or from the system
5046  */
5047 struct __vxge_hw_blockpool_entry *
5048 __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
5049 {
5050         struct __vxge_hw_blockpool_entry *entry = NULL;
5051         struct __vxge_hw_blockpool  *blockpool;
5052
5053         blockpool = &devh->block_pool;
5054
5055         if (size == blockpool->block_size) {
5056
5057                 if (!list_empty(&blockpool->free_block_list))
5058                         entry = (struct __vxge_hw_blockpool_entry *)
5059                                 list_first_entry(&blockpool->free_block_list,
5060                                         struct __vxge_hw_blockpool_entry,
5061                                         item);
5062
5063                 if (entry != NULL) {
5064                         list_del(&entry->item);
5065                         blockpool->pool_size--;
5066                 }
5067         }
5068
5069         if (entry != NULL)
5070                 __vxge_hw_blockpool_blocks_add(blockpool);
5071
5072         return entry;
5073 }
5074
5075 /*
5076  * __vxge_hw_blockpool_block_free - Frees a block from block pool
5077  * @devh: Hal device
5078  * @entry: Entry of block to be freed
5079  *
5080  * This function frees a block from block pool
5081  */
5082 void
5083 __vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
5084                         struct __vxge_hw_blockpool_entry *entry)
5085 {
5086         struct __vxge_hw_blockpool  *blockpool;
5087
5088         blockpool = &devh->block_pool;
5089
5090         if (entry->length == blockpool->block_size) {
5091                 list_add(&entry->item, &blockpool->free_block_list);
5092                 blockpool->pool_size++;
5093         }
5094
5095         __vxge_hw_blockpool_blocks_remove(blockpool);
5096
5097         return;
5098 }