2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
23 bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
25 ib->coalescing_timeo = coalescing_timeo;
26 ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
27 (u32)ib->coalescing_timeo, 0);
32 #define bna_rxf_vlan_cfg_soft_reset(rxf) \
34 (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; \
35 (rxf)->vlan_strip_pending = true; \
38 #define bna_rxf_rss_cfg_soft_reset(rxf) \
40 if ((rxf)->rss_status == BNA_STATUS_T_ENABLED) \
41 (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING | \
42 BNA_RSS_F_CFG_PENDING | \
43 BNA_RSS_F_STATUS_PENDING); \
46 static int bna_rxf_cfg_apply(struct bna_rxf *rxf);
47 static void bna_rxf_cfg_reset(struct bna_rxf *rxf);
48 static int bna_rxf_fltr_clear(struct bna_rxf *rxf);
49 static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf);
50 static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf);
51 static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf);
52 static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf);
53 static int bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf,
54 enum bna_cleanup_type cleanup);
55 static int bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf,
56 enum bna_cleanup_type cleanup);
57 static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf,
58 enum bna_cleanup_type cleanup);
60 bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
62 bfa_fsm_state_decl(bna_rxf, paused, struct bna_rxf,
64 bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf,
66 bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
68 bfa_fsm_state_decl(bna_rxf, fltr_clr_wait, struct bna_rxf,
70 bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf,
74 bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
76 call_rxf_stop_cbfn(rxf);
80 bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
84 if (rxf->flags & BNA_RXF_F_PAUSED) {
85 bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
86 call_rxf_start_cbfn(rxf);
88 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
92 call_rxf_stop_cbfn(rxf);
100 call_rxf_cam_fltr_cbfn(rxf);
104 rxf->flags |= BNA_RXF_F_PAUSED;
105 call_rxf_pause_cbfn(rxf);
109 rxf->flags &= ~BNA_RXF_F_PAUSED;
110 call_rxf_resume_cbfn(rxf);
119 bna_rxf_sm_paused_entry(struct bna_rxf *rxf)
121 call_rxf_pause_cbfn(rxf);
125 bna_rxf_sm_paused(struct bna_rxf *rxf, enum bna_rxf_event event)
130 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
134 call_rxf_cam_fltr_cbfn(rxf);
138 rxf->flags &= ~BNA_RXF_F_PAUSED;
139 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
148 bna_rxf_sm_cfg_wait_entry(struct bna_rxf *rxf)
150 if (!bna_rxf_cfg_apply(rxf)) {
151 /* No more pending config updates */
152 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
157 bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
161 bfa_fsm_set_state(rxf, bna_rxf_sm_last_resp_wait);
165 bna_rxf_cfg_reset(rxf);
166 call_rxf_start_cbfn(rxf);
167 call_rxf_cam_fltr_cbfn(rxf);
168 call_rxf_resume_cbfn(rxf);
169 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
177 rxf->flags |= BNA_RXF_F_PAUSED;
178 call_rxf_start_cbfn(rxf);
179 bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
183 if (!bna_rxf_cfg_apply(rxf)) {
184 /* No more pending config updates */
185 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
195 bna_rxf_sm_started_entry(struct bna_rxf *rxf)
197 call_rxf_start_cbfn(rxf);
198 call_rxf_cam_fltr_cbfn(rxf);
199 call_rxf_resume_cbfn(rxf);
203 bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
208 bna_rxf_cfg_reset(rxf);
209 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
213 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
217 rxf->flags |= BNA_RXF_F_PAUSED;
218 if (!bna_rxf_fltr_clear(rxf))
219 bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
221 bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
230 bna_rxf_sm_fltr_clr_wait_entry(struct bna_rxf *rxf)
235 bna_rxf_sm_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
239 bna_rxf_cfg_reset(rxf);
240 call_rxf_pause_cbfn(rxf);
241 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
245 if (!bna_rxf_fltr_clear(rxf)) {
246 /* No more pending CAM entries to clear */
247 bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
257 bna_rxf_sm_last_resp_wait_entry(struct bna_rxf *rxf)
262 bna_rxf_sm_last_resp_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
267 bna_rxf_cfg_reset(rxf);
268 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
277 bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac,
278 enum bfi_enet_h2i_msgs req_type)
280 struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req;
282 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid);
283 req->mh.num_entries = htons(
284 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req)));
285 memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
286 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
287 sizeof(struct bfi_enet_ucast_req), &req->mh);
288 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
292 bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac)
294 struct bfi_enet_mcast_add_req *req =
295 &rxf->bfi_enet_cmd.mcast_add_req;
297 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_ADD_REQ,
299 req->mh.num_entries = htons(
300 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req)));
301 memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
302 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
303 sizeof(struct bfi_enet_mcast_add_req), &req->mh);
304 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
308 bna_bfi_mcast_del_req(struct bna_rxf *rxf, u16 handle)
310 struct bfi_enet_mcast_del_req *req =
311 &rxf->bfi_enet_cmd.mcast_del_req;
313 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_DEL_REQ,
315 req->mh.num_entries = htons(
316 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req)));
317 req->handle = htons(handle);
318 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
319 sizeof(struct bfi_enet_mcast_del_req), &req->mh);
320 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
324 bna_bfi_mcast_filter_req(struct bna_rxf *rxf, enum bna_status status)
326 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
328 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
329 BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid);
330 req->mh.num_entries = htons(
331 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
332 req->enable = status;
333 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
334 sizeof(struct bfi_enet_enable_req), &req->mh);
335 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
339 bna_bfi_rx_promisc_req(struct bna_rxf *rxf, enum bna_status status)
341 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
343 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
344 BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid);
345 req->mh.num_entries = htons(
346 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
347 req->enable = status;
348 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
349 sizeof(struct bfi_enet_enable_req), &req->mh);
350 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
354 bna_bfi_rx_vlan_filter_set(struct bna_rxf *rxf, u8 block_idx)
356 struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req;
360 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
361 BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid);
362 req->mh.num_entries = htons(
363 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req)));
364 req->block_idx = block_idx;
365 for (i = 0; i < (BFI_ENET_VLAN_BLOCK_SIZE / 32); i++) {
366 j = (block_idx * (BFI_ENET_VLAN_BLOCK_SIZE / 32)) + i;
367 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED)
369 htonl(rxf->vlan_filter_table[j]);
371 req->bit_mask[i] = 0xFFFFFFFF;
373 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
374 sizeof(struct bfi_enet_rx_vlan_req), &req->mh);
375 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
379 bna_bfi_vlan_strip_enable(struct bna_rxf *rxf)
381 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
383 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
384 BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ, 0, rxf->rx->rid);
385 req->mh.num_entries = htons(
386 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
387 req->enable = rxf->vlan_strip_status;
388 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
389 sizeof(struct bfi_enet_enable_req), &req->mh);
390 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
394 bna_bfi_rit_cfg(struct bna_rxf *rxf)
396 struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req;
398 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
399 BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid);
400 req->mh.num_entries = htons(
401 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req)));
402 req->size = htons(rxf->rit_size);
403 memcpy(&req->table[0], rxf->rit, rxf->rit_size);
404 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
405 sizeof(struct bfi_enet_rit_req), &req->mh);
406 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
410 bna_bfi_rss_cfg(struct bna_rxf *rxf)
412 struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req;
415 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
416 BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid);
417 req->mh.num_entries = htons(
418 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req)));
419 req->cfg.type = rxf->rss_cfg.hash_type;
420 req->cfg.mask = rxf->rss_cfg.hash_mask;
421 for (i = 0; i < BFI_ENET_RSS_KEY_LEN; i++)
423 htonl(rxf->rss_cfg.toeplitz_hash_key[i]);
424 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
425 sizeof(struct bfi_enet_rss_cfg_req), &req->mh);
426 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
430 bna_bfi_rss_enable(struct bna_rxf *rxf)
432 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
434 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
435 BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid);
436 req->mh.num_entries = htons(
437 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
438 req->enable = rxf->rss_status;
439 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
440 sizeof(struct bfi_enet_enable_req), &req->mh);
441 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
444 /* This function gets the multicast MAC that has already been added to CAM */
445 static struct bna_mac *
446 bna_rxf_mcmac_get(struct bna_rxf *rxf, u8 *mac_addr)
449 struct list_head *qe;
451 list_for_each(qe, &rxf->mcast_active_q) {
452 mac = (struct bna_mac *)qe;
453 if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
457 list_for_each(qe, &rxf->mcast_pending_del_q) {
458 mac = (struct bna_mac *)qe;
459 if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
466 static struct bna_mcam_handle *
467 bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle)
469 struct bna_mcam_handle *mchandle;
470 struct list_head *qe;
472 list_for_each(qe, &rxf->mcast_handle_q) {
473 mchandle = (struct bna_mcam_handle *)qe;
474 if (mchandle->handle == handle)
482 bna_rxf_mchandle_attach(struct bna_rxf *rxf, u8 *mac_addr, int handle)
484 struct bna_mac *mcmac;
485 struct bna_mcam_handle *mchandle;
487 mcmac = bna_rxf_mcmac_get(rxf, mac_addr);
488 mchandle = bna_rxf_mchandle_get(rxf, handle);
489 if (mchandle == NULL) {
490 mchandle = bna_mcam_mod_handle_get(&rxf->rx->bna->mcam_mod);
491 mchandle->handle = handle;
492 mchandle->refcnt = 0;
493 list_add_tail(&mchandle->qe, &rxf->mcast_handle_q);
496 mcmac->handle = mchandle;
500 bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac,
501 enum bna_cleanup_type cleanup)
503 struct bna_mcam_handle *mchandle;
506 mchandle = mac->handle;
507 if (mchandle == NULL)
511 if (mchandle->refcnt == 0) {
512 if (cleanup == BNA_HARD_CLEANUP) {
513 bna_bfi_mcast_del_req(rxf, mchandle->handle);
516 list_del(&mchandle->qe);
517 bfa_q_qe_init(&mchandle->qe);
518 bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle);
526 bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
528 struct bna_mac *mac = NULL;
529 struct list_head *qe;
532 /* Delete multicast entries previousely added */
533 while (!list_empty(&rxf->mcast_pending_del_q)) {
534 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
536 mac = (struct bna_mac *)qe;
537 ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
538 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
543 /* Add multicast entries */
544 if (!list_empty(&rxf->mcast_pending_add_q)) {
545 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
547 mac = (struct bna_mac *)qe;
548 list_add_tail(&mac->qe, &rxf->mcast_active_q);
549 bna_bfi_mcast_add_req(rxf, mac);
557 bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf)
559 u8 vlan_pending_bitmask;
562 if (rxf->vlan_pending_bitmask) {
563 vlan_pending_bitmask = rxf->vlan_pending_bitmask;
564 while (!(vlan_pending_bitmask & 0x1)) {
566 vlan_pending_bitmask >>= 1;
568 rxf->vlan_pending_bitmask &= ~(1 << block_idx);
569 bna_bfi_rx_vlan_filter_set(rxf, block_idx);
577 bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
579 struct list_head *qe;
583 /* Throw away delete pending mcast entries */
584 while (!list_empty(&rxf->mcast_pending_del_q)) {
585 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
587 mac = (struct bna_mac *)qe;
588 ret = bna_rxf_mcast_del(rxf, mac, cleanup);
589 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
594 /* Move active mcast entries to pending_add_q */
595 while (!list_empty(&rxf->mcast_active_q)) {
596 bfa_q_deq(&rxf->mcast_active_q, &qe);
598 list_add_tail(qe, &rxf->mcast_pending_add_q);
599 mac = (struct bna_mac *)qe;
600 if (bna_rxf_mcast_del(rxf, mac, cleanup))
608 bna_rxf_rss_cfg_apply(struct bna_rxf *rxf)
610 if (rxf->rss_pending) {
611 if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) {
612 rxf->rss_pending &= ~BNA_RSS_F_RIT_PENDING;
613 bna_bfi_rit_cfg(rxf);
617 if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) {
618 rxf->rss_pending &= ~BNA_RSS_F_CFG_PENDING;
619 bna_bfi_rss_cfg(rxf);
623 if (rxf->rss_pending & BNA_RSS_F_STATUS_PENDING) {
624 rxf->rss_pending &= ~BNA_RSS_F_STATUS_PENDING;
625 bna_bfi_rss_enable(rxf);
634 bna_rxf_cfg_apply(struct bna_rxf *rxf)
636 if (bna_rxf_ucast_cfg_apply(rxf))
639 if (bna_rxf_mcast_cfg_apply(rxf))
642 if (bna_rxf_promisc_cfg_apply(rxf))
645 if (bna_rxf_allmulti_cfg_apply(rxf))
648 if (bna_rxf_vlan_cfg_apply(rxf))
651 if (bna_rxf_vlan_strip_cfg_apply(rxf))
654 if (bna_rxf_rss_cfg_apply(rxf))
660 /* Only software reset */
662 bna_rxf_fltr_clear(struct bna_rxf *rxf)
664 if (bna_rxf_ucast_cfg_reset(rxf, BNA_HARD_CLEANUP))
667 if (bna_rxf_mcast_cfg_reset(rxf, BNA_HARD_CLEANUP))
670 if (bna_rxf_promisc_cfg_reset(rxf, BNA_HARD_CLEANUP))
673 if (bna_rxf_allmulti_cfg_reset(rxf, BNA_HARD_CLEANUP))
680 bna_rxf_cfg_reset(struct bna_rxf *rxf)
682 bna_rxf_ucast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
683 bna_rxf_mcast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
684 bna_rxf_promisc_cfg_reset(rxf, BNA_SOFT_CLEANUP);
685 bna_rxf_allmulti_cfg_reset(rxf, BNA_SOFT_CLEANUP);
686 bna_rxf_vlan_cfg_soft_reset(rxf);
687 bna_rxf_rss_cfg_soft_reset(rxf);
691 bna_rit_init(struct bna_rxf *rxf, int rit_size)
693 struct bna_rx *rx = rxf->rx;
695 struct list_head *qe;
698 rxf->rit_size = rit_size;
699 list_for_each(qe, &rx->rxp_q) {
700 rxp = (struct bna_rxp *)qe;
701 rxf->rit[offset] = rxp->cq.ccb->id;
708 bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr)
710 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
714 bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf,
715 struct bfi_msgq_mhdr *msghdr)
717 struct bfi_enet_rsp *rsp =
718 (struct bfi_enet_rsp *)msghdr;
721 /* Clear ucast from cache */
722 rxf->ucast_active_set = 0;
725 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
729 bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
730 struct bfi_msgq_mhdr *msghdr)
732 struct bfi_enet_mcast_add_req *req =
733 &rxf->bfi_enet_cmd.mcast_add_req;
734 struct bfi_enet_mcast_add_rsp *rsp =
735 (struct bfi_enet_mcast_add_rsp *)msghdr;
737 bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr,
739 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
743 bna_rxf_init(struct bna_rxf *rxf,
745 struct bna_rx_config *q_config,
746 struct bna_res_info *res_info)
750 INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
751 INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
752 rxf->ucast_pending_set = 0;
753 rxf->ucast_active_set = 0;
754 INIT_LIST_HEAD(&rxf->ucast_active_q);
755 rxf->ucast_pending_mac = NULL;
757 INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
758 INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
759 INIT_LIST_HEAD(&rxf->mcast_active_q);
760 INIT_LIST_HEAD(&rxf->mcast_handle_q);
762 if (q_config->paused)
763 rxf->flags |= BNA_RXF_F_PAUSED;
766 res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva;
767 bna_rit_init(rxf, q_config->num_paths);
769 rxf->rss_status = q_config->rss_status;
770 if (rxf->rss_status == BNA_STATUS_T_ENABLED) {
771 rxf->rss_cfg = q_config->rss_config;
772 rxf->rss_pending |= BNA_RSS_F_CFG_PENDING;
773 rxf->rss_pending |= BNA_RSS_F_RIT_PENDING;
774 rxf->rss_pending |= BNA_RSS_F_STATUS_PENDING;
777 rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
778 memset(rxf->vlan_filter_table, 0,
779 (sizeof(u32) * (BFI_ENET_VLAN_ID_MAX / 32)));
780 rxf->vlan_filter_table[0] |= 1; /* for pure priority tagged frames */
781 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
783 rxf->vlan_strip_status = q_config->vlan_strip_status;
785 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
789 bna_rxf_uninit(struct bna_rxf *rxf)
793 rxf->ucast_pending_set = 0;
794 rxf->ucast_active_set = 0;
796 while (!list_empty(&rxf->ucast_pending_add_q)) {
797 bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
798 bfa_q_qe_init(&mac->qe);
799 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
802 if (rxf->ucast_pending_mac) {
803 bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
804 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod,
805 rxf->ucast_pending_mac);
806 rxf->ucast_pending_mac = NULL;
809 while (!list_empty(&rxf->mcast_pending_add_q)) {
810 bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
811 bfa_q_qe_init(&mac->qe);
812 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
815 rxf->rxmode_pending = 0;
816 rxf->rxmode_pending_bitmask = 0;
817 if (rxf->rx->bna->promisc_rid == rxf->rx->rid)
818 rxf->rx->bna->promisc_rid = BFI_INVALID_RID;
819 if (rxf->rx->bna->default_mode_rid == rxf->rx->rid)
820 rxf->rx->bna->default_mode_rid = BFI_INVALID_RID;
822 rxf->rss_pending = 0;
823 rxf->vlan_strip_pending = false;
831 bna_rx_cb_rxf_started(struct bna_rx *rx)
833 bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
837 bna_rxf_start(struct bna_rxf *rxf)
839 rxf->start_cbfn = bna_rx_cb_rxf_started;
840 rxf->start_cbarg = rxf->rx;
841 bfa_fsm_send_event(rxf, RXF_E_START);
845 bna_rx_cb_rxf_stopped(struct bna_rx *rx)
847 bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
851 bna_rxf_stop(struct bna_rxf *rxf)
853 rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
854 rxf->stop_cbarg = rxf->rx;
855 bfa_fsm_send_event(rxf, RXF_E_STOP);
859 bna_rxf_fail(struct bna_rxf *rxf)
861 bfa_fsm_send_event(rxf, RXF_E_FAIL);
865 bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
866 void (*cbfn)(struct bnad *, struct bna_rx *))
868 struct bna_rxf *rxf = &rx->rxf;
870 if (rxf->ucast_pending_mac == NULL) {
871 rxf->ucast_pending_mac =
872 bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
873 if (rxf->ucast_pending_mac == NULL)
874 return BNA_CB_UCAST_CAM_FULL;
875 bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
878 memcpy(rxf->ucast_pending_mac->addr, ucmac, ETH_ALEN);
879 rxf->ucast_pending_set = 1;
880 rxf->cam_fltr_cbfn = cbfn;
881 rxf->cam_fltr_cbarg = rx->bna->bnad;
883 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
885 return BNA_CB_SUCCESS;
889 bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
890 void (*cbfn)(struct bnad *, struct bna_rx *))
892 struct bna_rxf *rxf = &rx->rxf;
895 /* Check if already added or pending addition */
896 if (bna_mac_find(&rxf->mcast_active_q, addr) ||
897 bna_mac_find(&rxf->mcast_pending_add_q, addr)) {
899 cbfn(rx->bna->bnad, rx);
900 return BNA_CB_SUCCESS;
903 mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
905 return BNA_CB_MCAST_LIST_FULL;
906 bfa_q_qe_init(&mac->qe);
907 memcpy(mac->addr, addr, ETH_ALEN);
908 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
910 rxf->cam_fltr_cbfn = cbfn;
911 rxf->cam_fltr_cbarg = rx->bna->bnad;
913 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
915 return BNA_CB_SUCCESS;
919 bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
920 void (*cbfn)(struct bnad *, struct bna_rx *))
922 struct bna_rxf *rxf = &rx->rxf;
923 struct list_head list_head;
924 struct list_head *qe;
930 INIT_LIST_HEAD(&list_head);
931 for (i = 0, mcaddr = mclist; i < count; i++) {
932 mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
935 bfa_q_qe_init(&mac->qe);
936 memcpy(mac->addr, mcaddr, ETH_ALEN);
937 list_add_tail(&mac->qe, &list_head);
942 /* Purge the pending_add_q */
943 while (!list_empty(&rxf->mcast_pending_add_q)) {
944 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
946 mac = (struct bna_mac *)qe;
947 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
950 /* Schedule active_q entries for deletion */
951 while (!list_empty(&rxf->mcast_active_q)) {
952 bfa_q_deq(&rxf->mcast_active_q, &qe);
953 mac = (struct bna_mac *)qe;
954 bfa_q_qe_init(&mac->qe);
955 list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
958 /* Add the new entries */
959 while (!list_empty(&list_head)) {
960 bfa_q_deq(&list_head, &qe);
961 mac = (struct bna_mac *)qe;
962 bfa_q_qe_init(&mac->qe);
963 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
966 rxf->cam_fltr_cbfn = cbfn;
967 rxf->cam_fltr_cbarg = rx->bna->bnad;
968 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
970 return BNA_CB_SUCCESS;
973 while (!list_empty(&list_head)) {
974 bfa_q_deq(&list_head, &qe);
975 mac = (struct bna_mac *)qe;
976 bfa_q_qe_init(&mac->qe);
977 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
980 return BNA_CB_MCAST_LIST_FULL;
984 bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
986 struct bna_rxf *rxf = &rx->rxf;
987 int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
988 int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
989 int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
991 rxf->vlan_filter_table[index] |= bit;
992 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
993 rxf->vlan_pending_bitmask |= (1 << group_id);
994 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
999 bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
1001 struct bna_rxf *rxf = &rx->rxf;
1002 int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
1003 int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
1004 int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
1006 rxf->vlan_filter_table[index] &= ~bit;
1007 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
1008 rxf->vlan_pending_bitmask |= (1 << group_id);
1009 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1014 bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
1016 struct bna_mac *mac = NULL;
1017 struct list_head *qe;
1019 /* Delete MAC addresses previousely added */
1020 if (!list_empty(&rxf->ucast_pending_del_q)) {
1021 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
1023 mac = (struct bna_mac *)qe;
1024 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1025 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
1029 /* Set default unicast MAC */
1030 if (rxf->ucast_pending_set) {
1031 rxf->ucast_pending_set = 0;
1032 memcpy(rxf->ucast_active_mac.addr,
1033 rxf->ucast_pending_mac->addr, ETH_ALEN);
1034 rxf->ucast_active_set = 1;
1035 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1036 BFI_ENET_H2I_MAC_UCAST_SET_REQ);
1040 /* Add additional MAC entries */
1041 if (!list_empty(&rxf->ucast_pending_add_q)) {
1042 bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
1044 mac = (struct bna_mac *)qe;
1045 list_add_tail(&mac->qe, &rxf->ucast_active_q);
1046 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
1054 bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1056 struct list_head *qe;
1057 struct bna_mac *mac;
1059 /* Throw away delete pending ucast entries */
1060 while (!list_empty(&rxf->ucast_pending_del_q)) {
1061 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
1063 mac = (struct bna_mac *)qe;
1064 if (cleanup == BNA_SOFT_CLEANUP)
1065 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
1067 bna_bfi_ucast_req(rxf, mac,
1068 BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1069 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
1074 /* Move active ucast entries to pending_add_q */
1075 while (!list_empty(&rxf->ucast_active_q)) {
1076 bfa_q_deq(&rxf->ucast_active_q, &qe);
1078 list_add_tail(qe, &rxf->ucast_pending_add_q);
1079 if (cleanup == BNA_HARD_CLEANUP) {
1080 mac = (struct bna_mac *)qe;
1081 bna_bfi_ucast_req(rxf, mac,
1082 BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1087 if (rxf->ucast_active_set) {
1088 rxf->ucast_pending_set = 1;
1089 rxf->ucast_active_set = 0;
1090 if (cleanup == BNA_HARD_CLEANUP) {
1091 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1092 BFI_ENET_H2I_MAC_UCAST_CLR_REQ);
1101 bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf)
1103 struct bna *bna = rxf->rx->bna;
1105 /* Enable/disable promiscuous mode */
1106 if (is_promisc_enable(rxf->rxmode_pending,
1107 rxf->rxmode_pending_bitmask)) {
1108 /* move promisc configuration from pending -> active */
1109 promisc_inactive(rxf->rxmode_pending,
1110 rxf->rxmode_pending_bitmask);
1111 rxf->rxmode_active |= BNA_RXMODE_PROMISC;
1112 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_ENABLED);
1114 } else if (is_promisc_disable(rxf->rxmode_pending,
1115 rxf->rxmode_pending_bitmask)) {
1116 /* move promisc configuration from pending -> active */
1117 promisc_inactive(rxf->rxmode_pending,
1118 rxf->rxmode_pending_bitmask);
1119 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1120 bna->promisc_rid = BFI_INVALID_RID;
1121 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1129 bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1131 struct bna *bna = rxf->rx->bna;
1133 /* Clear pending promisc mode disable */
1134 if (is_promisc_disable(rxf->rxmode_pending,
1135 rxf->rxmode_pending_bitmask)) {
1136 promisc_inactive(rxf->rxmode_pending,
1137 rxf->rxmode_pending_bitmask);
1138 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1139 bna->promisc_rid = BFI_INVALID_RID;
1140 if (cleanup == BNA_HARD_CLEANUP) {
1141 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1146 /* Move promisc mode config from active -> pending */
1147 if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1148 promisc_enable(rxf->rxmode_pending,
1149 rxf->rxmode_pending_bitmask);
1150 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1151 if (cleanup == BNA_HARD_CLEANUP) {
1152 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1161 bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf)
1163 /* Enable/disable allmulti mode */
1164 if (is_allmulti_enable(rxf->rxmode_pending,
1165 rxf->rxmode_pending_bitmask)) {
1166 /* move allmulti configuration from pending -> active */
1167 allmulti_inactive(rxf->rxmode_pending,
1168 rxf->rxmode_pending_bitmask);
1169 rxf->rxmode_active |= BNA_RXMODE_ALLMULTI;
1170 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_DISABLED);
1172 } else if (is_allmulti_disable(rxf->rxmode_pending,
1173 rxf->rxmode_pending_bitmask)) {
1174 /* move allmulti configuration from pending -> active */
1175 allmulti_inactive(rxf->rxmode_pending,
1176 rxf->rxmode_pending_bitmask);
1177 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1178 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1186 bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1188 /* Clear pending allmulti mode disable */
1189 if (is_allmulti_disable(rxf->rxmode_pending,
1190 rxf->rxmode_pending_bitmask)) {
1191 allmulti_inactive(rxf->rxmode_pending,
1192 rxf->rxmode_pending_bitmask);
1193 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1194 if (cleanup == BNA_HARD_CLEANUP) {
1195 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1200 /* Move allmulti mode config from active -> pending */
1201 if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1202 allmulti_enable(rxf->rxmode_pending,
1203 rxf->rxmode_pending_bitmask);
1204 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1205 if (cleanup == BNA_HARD_CLEANUP) {
1206 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1215 bna_rxf_promisc_enable(struct bna_rxf *rxf)
1217 struct bna *bna = rxf->rx->bna;
1220 if (is_promisc_enable(rxf->rxmode_pending,
1221 rxf->rxmode_pending_bitmask) ||
1222 (rxf->rxmode_active & BNA_RXMODE_PROMISC)) {
1223 /* Do nothing if pending enable or already enabled */
1224 } else if (is_promisc_disable(rxf->rxmode_pending,
1225 rxf->rxmode_pending_bitmask)) {
1226 /* Turn off pending disable command */
1227 promisc_inactive(rxf->rxmode_pending,
1228 rxf->rxmode_pending_bitmask);
1230 /* Schedule enable */
1231 promisc_enable(rxf->rxmode_pending,
1232 rxf->rxmode_pending_bitmask);
1233 bna->promisc_rid = rxf->rx->rid;
1241 bna_rxf_promisc_disable(struct bna_rxf *rxf)
1243 struct bna *bna = rxf->rx->bna;
1246 if (is_promisc_disable(rxf->rxmode_pending,
1247 rxf->rxmode_pending_bitmask) ||
1248 (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) {
1249 /* Do nothing if pending disable or already disabled */
1250 } else if (is_promisc_enable(rxf->rxmode_pending,
1251 rxf->rxmode_pending_bitmask)) {
1252 /* Turn off pending enable command */
1253 promisc_inactive(rxf->rxmode_pending,
1254 rxf->rxmode_pending_bitmask);
1255 bna->promisc_rid = BFI_INVALID_RID;
1256 } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1257 /* Schedule disable */
1258 promisc_disable(rxf->rxmode_pending,
1259 rxf->rxmode_pending_bitmask);
1267 bna_rxf_allmulti_enable(struct bna_rxf *rxf)
1271 if (is_allmulti_enable(rxf->rxmode_pending,
1272 rxf->rxmode_pending_bitmask) ||
1273 (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) {
1274 /* Do nothing if pending enable or already enabled */
1275 } else if (is_allmulti_disable(rxf->rxmode_pending,
1276 rxf->rxmode_pending_bitmask)) {
1277 /* Turn off pending disable command */
1278 allmulti_inactive(rxf->rxmode_pending,
1279 rxf->rxmode_pending_bitmask);
1281 /* Schedule enable */
1282 allmulti_enable(rxf->rxmode_pending,
1283 rxf->rxmode_pending_bitmask);
1291 bna_rxf_allmulti_disable(struct bna_rxf *rxf)
1295 if (is_allmulti_disable(rxf->rxmode_pending,
1296 rxf->rxmode_pending_bitmask) ||
1297 (!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) {
1298 /* Do nothing if pending disable or already disabled */
1299 } else if (is_allmulti_enable(rxf->rxmode_pending,
1300 rxf->rxmode_pending_bitmask)) {
1301 /* Turn off pending enable command */
1302 allmulti_inactive(rxf->rxmode_pending,
1303 rxf->rxmode_pending_bitmask);
1304 } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1305 /* Schedule disable */
1306 allmulti_disable(rxf->rxmode_pending,
1307 rxf->rxmode_pending_bitmask);
1315 bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf)
1317 if (rxf->vlan_strip_pending) {
1318 rxf->vlan_strip_pending = false;
1319 bna_bfi_vlan_strip_enable(rxf);
1328 #define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1329 (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1331 #define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
1332 (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1334 #define call_rx_stop_cbfn(rx) \
1336 if ((rx)->stop_cbfn) { \
1337 void (*cbfn)(void *, struct bna_rx *); \
1339 cbfn = (rx)->stop_cbfn; \
1340 cbarg = (rx)->stop_cbarg; \
1341 (rx)->stop_cbfn = NULL; \
1342 (rx)->stop_cbarg = NULL; \
1347 #define call_rx_stall_cbfn(rx) \
1349 if ((rx)->rx_stall_cbfn) \
1350 (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx)); \
1353 #define bfi_enet_datapath_q_init(bfi_q, bna_qpt) \
1355 struct bna_dma_addr cur_q_addr = \
1356 *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr)); \
1357 (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb; \
1358 (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb; \
1359 (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb; \
1360 (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb; \
1361 (bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \
1362 (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\
1365 static void bna_bfi_rx_enet_start(struct bna_rx *rx);
1366 static void bna_rx_enet_stop(struct bna_rx *rx);
1367 static void bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx);
1369 bfa_fsm_state_decl(bna_rx, stopped,
1370 struct bna_rx, enum bna_rx_event);
1371 bfa_fsm_state_decl(bna_rx, start_wait,
1372 struct bna_rx, enum bna_rx_event);
1373 bfa_fsm_state_decl(bna_rx, start_stop_wait,
1374 struct bna_rx, enum bna_rx_event);
1375 bfa_fsm_state_decl(bna_rx, rxf_start_wait,
1376 struct bna_rx, enum bna_rx_event);
1377 bfa_fsm_state_decl(bna_rx, started,
1378 struct bna_rx, enum bna_rx_event);
1379 bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
1380 struct bna_rx, enum bna_rx_event);
1381 bfa_fsm_state_decl(bna_rx, stop_wait,
1382 struct bna_rx, enum bna_rx_event);
1383 bfa_fsm_state_decl(bna_rx, cleanup_wait,
1384 struct bna_rx, enum bna_rx_event);
1385 bfa_fsm_state_decl(bna_rx, failed,
1386 struct bna_rx, enum bna_rx_event);
1387 bfa_fsm_state_decl(bna_rx, quiesce_wait,
1388 struct bna_rx, enum bna_rx_event);
1390 static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
1392 call_rx_stop_cbfn(rx);
1395 static void bna_rx_sm_stopped(struct bna_rx *rx,
1396 enum bna_rx_event event)
1400 bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1404 call_rx_stop_cbfn(rx);
1412 bfa_sm_fault(event);
1417 static void bna_rx_sm_start_wait_entry(struct bna_rx *rx)
1419 bna_bfi_rx_enet_start(rx);
1423 bna_rx_sm_stop_wait_entry(struct bna_rx *rx)
1428 bna_rx_sm_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1433 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1434 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1438 bna_rx_enet_stop(rx);
1442 bfa_sm_fault(event);
1447 static void bna_rx_sm_start_wait(struct bna_rx *rx,
1448 enum bna_rx_event event)
1452 bfa_fsm_set_state(rx, bna_rx_sm_start_stop_wait);
1456 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1460 bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
1464 bfa_sm_fault(event);
1469 static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
1471 rx->rx_post_cbfn(rx->bna->bnad, rx);
1472 bna_rxf_start(&rx->rxf);
1476 bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
1481 bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1485 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1486 bna_rxf_fail(&rx->rxf);
1487 call_rx_stall_cbfn(rx);
1488 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1491 case RX_E_RXF_STARTED:
1492 bna_rxf_stop(&rx->rxf);
1495 case RX_E_RXF_STOPPED:
1496 bfa_fsm_set_state(rx, bna_rx_sm_stop_wait);
1497 call_rx_stall_cbfn(rx);
1498 bna_rx_enet_stop(rx);
1502 bfa_sm_fault(event);
1509 bna_rx_sm_start_stop_wait_entry(struct bna_rx *rx)
1514 bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1519 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1523 bna_rx_enet_stop(rx);
1527 bfa_sm_fault(event);
1532 bna_rx_sm_started_entry(struct bna_rx *rx)
1534 struct bna_rxp *rxp;
1535 struct list_head *qe_rxp;
1536 int is_regular = (rx->type == BNA_RX_T_REGULAR);
1539 list_for_each(qe_rxp, &rx->rxp_q) {
1540 rxp = (struct bna_rxp *)qe_rxp;
1541 bna_ib_start(rx->bna, &rxp->cq.ib, is_regular);
1544 bna_ethport_cb_rx_started(&rx->bna->ethport);
1548 bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
1552 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1553 bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1554 bna_rxf_stop(&rx->rxf);
1558 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1559 bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1560 bna_rxf_fail(&rx->rxf);
1561 call_rx_stall_cbfn(rx);
1562 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1566 bfa_sm_fault(event);
1571 static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
1572 enum bna_rx_event event)
1576 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1580 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1581 bna_rxf_fail(&rx->rxf);
1582 call_rx_stall_cbfn(rx);
1583 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1586 case RX_E_RXF_STARTED:
1587 bfa_fsm_set_state(rx, bna_rx_sm_started);
1591 bfa_sm_fault(event);
1597 bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx)
1602 bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event)
1606 case RX_E_RXF_STOPPED:
1610 case RX_E_CLEANUP_DONE:
1611 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1615 bfa_sm_fault(event);
1621 bna_rx_sm_failed_entry(struct bna_rx *rx)
1626 bna_rx_sm_failed(struct bna_rx *rx, enum bna_rx_event event)
1630 bfa_fsm_set_state(rx, bna_rx_sm_quiesce_wait);
1634 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1638 case RX_E_RXF_STARTED:
1639 case RX_E_RXF_STOPPED:
1643 case RX_E_CLEANUP_DONE:
1644 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1648 bfa_sm_fault(event);
1653 bna_rx_sm_quiesce_wait_entry(struct bna_rx *rx)
1658 bna_rx_sm_quiesce_wait(struct bna_rx *rx, enum bna_rx_event event)
1662 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1666 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1669 case RX_E_CLEANUP_DONE:
1670 bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1674 bfa_sm_fault(event);
1680 bna_bfi_rx_enet_start(struct bna_rx *rx)
1682 struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req;
1683 struct bna_rxp *rxp = NULL;
1684 struct bna_rxq *q0 = NULL, *q1 = NULL;
1685 struct list_head *rxp_qe;
1688 bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
1689 BFI_ENET_H2I_RX_CFG_SET_REQ, 0, rx->rid);
1690 cfg_req->mh.num_entries = htons(
1691 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req)));
1693 cfg_req->num_queue_sets = rx->num_paths;
1694 for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
1696 i++, rxp_qe = bfa_q_next(rxp_qe)) {
1697 rxp = (struct bna_rxp *)rxp_qe;
1699 GET_RXQS(rxp, q0, q1);
1700 switch (rxp->type) {
1704 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q,
1706 cfg_req->q_cfg[i].qs.rx_buffer_size =
1707 htons((u16)q1->buffer_size);
1710 case BNA_RXP_SINGLE:
1711 /* Large/Single RxQ */
1712 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q,
1715 bna_enet_mtu_get(&rx->bna->enet);
1716 cfg_req->q_cfg[i].ql.rx_buffer_size =
1717 htons((u16)q0->buffer_size);
1724 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q,
1727 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
1728 rxp->cq.ib.ib_seg_host_addr.lsb;
1729 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
1730 rxp->cq.ib.ib_seg_host_addr.msb;
1731 cfg_req->q_cfg[i].ib.intr.msix_index =
1732 htons((u16)rxp->cq.ib.intr_vector);
1735 cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_DISABLED;
1736 cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
1737 cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
1738 cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_DISABLED;
1739 cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX)
1740 ? BNA_STATUS_T_ENABLED :
1741 BNA_STATUS_T_DISABLED;
1742 cfg_req->ib_cfg.coalescing_timeout =
1743 htonl((u32)rxp->cq.ib.coalescing_timeo);
1744 cfg_req->ib_cfg.inter_pkt_timeout =
1745 htonl((u32)rxp->cq.ib.interpkt_timeo);
1746 cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count;
1748 switch (rxp->type) {
1750 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_LARGE_SMALL;
1754 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_HDS;
1755 cfg_req->rx_cfg.hds.type = rx->hds_cfg.hdr_type;
1756 cfg_req->rx_cfg.hds.force_offset = rx->hds_cfg.forced_offset;
1757 cfg_req->rx_cfg.hds.max_header_size = rx->hds_cfg.forced_offset;
1760 case BNA_RXP_SINGLE:
1761 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_SINGLE;
1767 cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status;
1769 bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL,
1770 sizeof(struct bfi_enet_rx_cfg_req), &cfg_req->mh);
1771 bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1775 bna_bfi_rx_enet_stop(struct bna_rx *rx)
1777 struct bfi_enet_req *req = &rx->bfi_enet_cmd.req;
1779 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
1780 BFI_ENET_H2I_RX_CFG_CLR_REQ, 0, rx->rid);
1781 req->mh.num_entries = htons(
1782 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
1783 bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
1785 bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1789 bna_rx_enet_stop(struct bna_rx *rx)
1791 struct bna_rxp *rxp;
1792 struct list_head *qe_rxp;
1795 list_for_each(qe_rxp, &rx->rxp_q) {
1796 rxp = (struct bna_rxp *)qe_rxp;
1797 bna_ib_stop(rx->bna, &rxp->cq.ib);
1800 bna_bfi_rx_enet_stop(rx);
1804 bna_rx_res_check(struct bna_rx_mod *rx_mod, struct bna_rx_config *rx_cfg)
1806 if ((rx_mod->rx_free_count == 0) ||
1807 (rx_mod->rxp_free_count == 0) ||
1808 (rx_mod->rxq_free_count == 0))
1811 if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
1812 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1813 (rx_mod->rxq_free_count < rx_cfg->num_paths))
1816 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1817 (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
1824 static struct bna_rxq *
1825 bna_rxq_get(struct bna_rx_mod *rx_mod)
1827 struct bna_rxq *rxq = NULL;
1828 struct list_head *qe = NULL;
1830 bfa_q_deq(&rx_mod->rxq_free_q, &qe);
1831 rx_mod->rxq_free_count--;
1832 rxq = (struct bna_rxq *)qe;
1833 bfa_q_qe_init(&rxq->qe);
1839 bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
1841 bfa_q_qe_init(&rxq->qe);
1842 list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
1843 rx_mod->rxq_free_count++;
1846 static struct bna_rxp *
1847 bna_rxp_get(struct bna_rx_mod *rx_mod)
1849 struct list_head *qe = NULL;
1850 struct bna_rxp *rxp = NULL;
1852 bfa_q_deq(&rx_mod->rxp_free_q, &qe);
1853 rx_mod->rxp_free_count--;
1854 rxp = (struct bna_rxp *)qe;
1855 bfa_q_qe_init(&rxp->qe);
1861 bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
1863 bfa_q_qe_init(&rxp->qe);
1864 list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
1865 rx_mod->rxp_free_count++;
1868 static struct bna_rx *
1869 bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
1871 struct list_head *qe = NULL;
1872 struct bna_rx *rx = NULL;
1874 if (type == BNA_RX_T_REGULAR) {
1875 bfa_q_deq(&rx_mod->rx_free_q, &qe);
1877 bfa_q_deq_tail(&rx_mod->rx_free_q, &qe);
1879 rx_mod->rx_free_count--;
1880 rx = (struct bna_rx *)qe;
1881 bfa_q_qe_init(&rx->qe);
1882 list_add_tail(&rx->qe, &rx_mod->rx_active_q);
1889 bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
1891 struct list_head *prev_qe = NULL;
1892 struct list_head *qe;
1894 bfa_q_qe_init(&rx->qe);
1896 list_for_each(qe, &rx_mod->rx_free_q) {
1897 if (((struct bna_rx *)qe)->rid < rx->rid)
1903 if (prev_qe == NULL) {
1904 /* This is the first entry */
1905 bfa_q_enq_head(&rx_mod->rx_free_q, &rx->qe);
1906 } else if (bfa_q_next(prev_qe) == &rx_mod->rx_free_q) {
1907 /* This is the last entry */
1908 list_add_tail(&rx->qe, &rx_mod->rx_free_q);
1910 /* Somewhere in the middle */
1911 bfa_q_next(&rx->qe) = bfa_q_next(prev_qe);
1912 bfa_q_prev(&rx->qe) = prev_qe;
1913 bfa_q_next(prev_qe) = &rx->qe;
1914 bfa_q_prev(bfa_q_next(&rx->qe)) = &rx->qe;
1917 rx_mod->rx_free_count++;
1921 bna_rxp_add_rxqs(struct bna_rxp *rxp, struct bna_rxq *q0,
1924 switch (rxp->type) {
1925 case BNA_RXP_SINGLE:
1926 rxp->rxq.single.only = q0;
1927 rxp->rxq.single.reserved = NULL;
1930 rxp->rxq.slr.large = q0;
1931 rxp->rxq.slr.small = q1;
1934 rxp->rxq.hds.data = q0;
1935 rxp->rxq.hds.hdr = q1;
1943 bna_rxq_qpt_setup(struct bna_rxq *rxq,
1944 struct bna_rxp *rxp,
1947 struct bna_mem_descr *qpt_mem,
1948 struct bna_mem_descr *swqpt_mem,
1949 struct bna_mem_descr *page_mem)
1953 struct bna_dma_addr bna_dma;
1956 rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
1957 rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
1958 rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
1959 rxq->qpt.page_count = page_count;
1960 rxq->qpt.page_size = page_size;
1962 rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
1963 rxq->rcb->sw_q = page_mem->kva;
1965 kva = page_mem->kva;
1966 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
1968 for (i = 0; i < rxq->qpt.page_count; i++) {
1969 rxq->rcb->sw_qpt[i] = kva;
1972 BNA_SET_DMA_ADDR(dma, &bna_dma);
1973 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
1975 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
1982 bna_rxp_cqpt_setup(struct bna_rxp *rxp,
1985 struct bna_mem_descr *qpt_mem,
1986 struct bna_mem_descr *swqpt_mem,
1987 struct bna_mem_descr *page_mem)
1991 struct bna_dma_addr bna_dma;
1994 rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
1995 rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
1996 rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
1997 rxp->cq.qpt.page_count = page_count;
1998 rxp->cq.qpt.page_size = page_size;
2000 rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
2001 rxp->cq.ccb->sw_q = page_mem->kva;
2003 kva = page_mem->kva;
2004 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
2006 for (i = 0; i < rxp->cq.qpt.page_count; i++) {
2007 rxp->cq.ccb->sw_qpt[i] = kva;
2010 BNA_SET_DMA_ADDR(dma, &bna_dma);
2011 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
2013 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
2020 bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx)
2022 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2024 bfa_wc_down(&rx_mod->rx_stop_wc);
2028 bna_rx_mod_cb_rx_stopped_all(void *arg)
2030 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2032 if (rx_mod->stop_cbfn)
2033 rx_mod->stop_cbfn(&rx_mod->bna->enet);
2034 rx_mod->stop_cbfn = NULL;
2038 bna_rx_start(struct bna_rx *rx)
2040 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2041 if (rx->rx_flags & BNA_RX_F_ENABLED)
2042 bfa_fsm_send_event(rx, RX_E_START);
2046 bna_rx_stop(struct bna_rx *rx)
2048 rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
2049 if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
2050 bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx);
2052 rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
2053 rx->stop_cbarg = &rx->bna->rx_mod;
2054 bfa_fsm_send_event(rx, RX_E_STOP);
2059 bna_rx_fail(struct bna_rx *rx)
2061 /* Indicate Enet is not enabled, and failed */
2062 rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
2063 bfa_fsm_send_event(rx, RX_E_FAIL);
2067 bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2070 struct list_head *qe;
2072 rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED;
2073 if (type == BNA_RX_T_LOOPBACK)
2074 rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK;
2076 list_for_each(qe, &rx_mod->rx_active_q) {
2077 rx = (struct bna_rx *)qe;
2078 if (rx->type == type)
2084 bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2087 struct list_head *qe;
2089 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2090 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2092 rx_mod->stop_cbfn = bna_enet_cb_rx_stopped;
2094 bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod);
2096 list_for_each(qe, &rx_mod->rx_active_q) {
2097 rx = (struct bna_rx *)qe;
2098 if (rx->type == type) {
2099 bfa_wc_up(&rx_mod->rx_stop_wc);
2104 bfa_wc_wait(&rx_mod->rx_stop_wc);
2108 bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
2111 struct list_head *qe;
2113 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2114 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2116 list_for_each(qe, &rx_mod->rx_active_q) {
2117 rx = (struct bna_rx *)qe;
2122 void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
2123 struct bna_res_info *res_info)
2126 struct bna_rx *rx_ptr;
2127 struct bna_rxp *rxp_ptr;
2128 struct bna_rxq *rxq_ptr;
2133 rx_mod->rx = (struct bna_rx *)
2134 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
2135 rx_mod->rxp = (struct bna_rxp *)
2136 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
2137 rx_mod->rxq = (struct bna_rxq *)
2138 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
2140 /* Initialize the queues */
2141 INIT_LIST_HEAD(&rx_mod->rx_free_q);
2142 rx_mod->rx_free_count = 0;
2143 INIT_LIST_HEAD(&rx_mod->rxq_free_q);
2144 rx_mod->rxq_free_count = 0;
2145 INIT_LIST_HEAD(&rx_mod->rxp_free_q);
2146 rx_mod->rxp_free_count = 0;
2147 INIT_LIST_HEAD(&rx_mod->rx_active_q);
2149 /* Build RX queues */
2150 for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2151 rx_ptr = &rx_mod->rx[index];
2153 bfa_q_qe_init(&rx_ptr->qe);
2154 INIT_LIST_HEAD(&rx_ptr->rxp_q);
2156 rx_ptr->rid = index;
2157 rx_ptr->stop_cbfn = NULL;
2158 rx_ptr->stop_cbarg = NULL;
2160 list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
2161 rx_mod->rx_free_count++;
2164 /* build RX-path queue */
2165 for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2166 rxp_ptr = &rx_mod->rxp[index];
2167 bfa_q_qe_init(&rxp_ptr->qe);
2168 list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
2169 rx_mod->rxp_free_count++;
2172 /* build RXQ queue */
2173 for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) {
2174 rxq_ptr = &rx_mod->rxq[index];
2175 bfa_q_qe_init(&rxq_ptr->qe);
2176 list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
2177 rx_mod->rxq_free_count++;
2182 bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
2184 struct list_head *qe;
2188 list_for_each(qe, &rx_mod->rx_free_q)
2192 list_for_each(qe, &rx_mod->rxp_free_q)
2196 list_for_each(qe, &rx_mod->rxq_free_q)
2203 bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2205 struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp;
2206 struct bna_rxp *rxp = NULL;
2207 struct bna_rxq *q0 = NULL, *q1 = NULL;
2208 struct list_head *rxp_qe;
2211 bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp,
2212 sizeof(struct bfi_enet_rx_cfg_rsp));
2214 rx->hw_id = cfg_rsp->hw_id;
2216 for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
2218 i++, rxp_qe = bfa_q_next(rxp_qe)) {
2219 rxp = (struct bna_rxp *)rxp_qe;
2220 GET_RXQS(rxp, q0, q1);
2222 /* Setup doorbells */
2223 rxp->cq.ccb->i_dbell->doorbell_addr =
2224 rx->bna->pcidev.pci_bar_kva
2225 + ntohl(cfg_rsp->q_handles[i].i_dbell);
2226 rxp->hw_id = cfg_rsp->q_handles[i].hw_cqid;
2228 rx->bna->pcidev.pci_bar_kva
2229 + ntohl(cfg_rsp->q_handles[i].ql_dbell);
2230 q0->hw_id = cfg_rsp->q_handles[i].hw_lqid;
2233 rx->bna->pcidev.pci_bar_kva
2234 + ntohl(cfg_rsp->q_handles[i].qs_dbell);
2235 q1->hw_id = cfg_rsp->q_handles[i].hw_sqid;
2238 /* Initialize producer/consumer indexes */
2239 (*rxp->cq.ccb->hw_producer_index) = 0;
2240 rxp->cq.ccb->producer_index = 0;
2241 q0->rcb->producer_index = q0->rcb->consumer_index = 0;
2243 q1->rcb->producer_index = q1->rcb->consumer_index = 0;
2246 bfa_fsm_send_event(rx, RX_E_STARTED);
2250 bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2252 bfa_fsm_send_event(rx, RX_E_STOPPED);
2256 bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
2258 u32 cq_size, hq_size, dq_size;
2259 u32 cpage_count, hpage_count, dpage_count;
2260 struct bna_mem_info *mem_info;
2265 dq_depth = q_cfg->q_depth;
2266 hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth);
2267 cq_depth = dq_depth + hq_depth;
2269 BNA_TO_POWER_OF_2_HIGH(cq_depth);
2270 cq_size = cq_depth * BFI_CQ_WI_SIZE;
2271 cq_size = ALIGN(cq_size, PAGE_SIZE);
2272 cpage_count = SIZE_TO_PAGES(cq_size);
2274 BNA_TO_POWER_OF_2_HIGH(dq_depth);
2275 dq_size = dq_depth * BFI_RXQ_WI_SIZE;
2276 dq_size = ALIGN(dq_size, PAGE_SIZE);
2277 dpage_count = SIZE_TO_PAGES(dq_size);
2279 if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
2280 BNA_TO_POWER_OF_2_HIGH(hq_depth);
2281 hq_size = hq_depth * BFI_RXQ_WI_SIZE;
2282 hq_size = ALIGN(hq_size, PAGE_SIZE);
2283 hpage_count = SIZE_TO_PAGES(hq_size);
2287 res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
2288 mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
2289 mem_info->mem_type = BNA_MEM_T_KVA;
2290 mem_info->len = sizeof(struct bna_ccb);
2291 mem_info->num = q_cfg->num_paths;
2293 res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
2294 mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
2295 mem_info->mem_type = BNA_MEM_T_KVA;
2296 mem_info->len = sizeof(struct bna_rcb);
2297 mem_info->num = BNA_GET_RXQS(q_cfg);
2299 res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
2300 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
2301 mem_info->mem_type = BNA_MEM_T_DMA;
2302 mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
2303 mem_info->num = q_cfg->num_paths;
2305 res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
2306 mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
2307 mem_info->mem_type = BNA_MEM_T_KVA;
2308 mem_info->len = cpage_count * sizeof(void *);
2309 mem_info->num = q_cfg->num_paths;
2311 res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
2312 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
2313 mem_info->mem_type = BNA_MEM_T_DMA;
2314 mem_info->len = PAGE_SIZE * cpage_count;
2315 mem_info->num = q_cfg->num_paths;
2317 res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
2318 mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
2319 mem_info->mem_type = BNA_MEM_T_DMA;
2320 mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
2321 mem_info->num = q_cfg->num_paths;
2323 res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
2324 mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
2325 mem_info->mem_type = BNA_MEM_T_KVA;
2326 mem_info->len = dpage_count * sizeof(void *);
2327 mem_info->num = q_cfg->num_paths;
2329 res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
2330 mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
2331 mem_info->mem_type = BNA_MEM_T_DMA;
2332 mem_info->len = PAGE_SIZE * dpage_count;
2333 mem_info->num = q_cfg->num_paths;
2335 res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
2336 mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
2337 mem_info->mem_type = BNA_MEM_T_DMA;
2338 mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
2339 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2341 res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
2342 mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
2343 mem_info->mem_type = BNA_MEM_T_KVA;
2344 mem_info->len = hpage_count * sizeof(void *);
2345 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2347 res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
2348 mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
2349 mem_info->mem_type = BNA_MEM_T_DMA;
2350 mem_info->len = PAGE_SIZE * hpage_count;
2351 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2353 res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
2354 mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info;
2355 mem_info->mem_type = BNA_MEM_T_DMA;
2356 mem_info->len = BFI_IBIDX_SIZE;
2357 mem_info->num = q_cfg->num_paths;
2359 res_info[BNA_RX_RES_MEM_T_RIT].res_type = BNA_RES_T_MEM;
2360 mem_info = &res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info;
2361 mem_info->mem_type = BNA_MEM_T_KVA;
2362 mem_info->len = BFI_ENET_RSS_RIT_MAX;
2365 res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
2366 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
2367 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
2371 bna_rx_create(struct bna *bna, struct bnad *bnad,
2372 struct bna_rx_config *rx_cfg,
2373 const struct bna_rx_event_cbfn *rx_cbfn,
2374 struct bna_res_info *res_info,
2377 struct bna_rx_mod *rx_mod = &bna->rx_mod;
2379 struct bna_rxp *rxp;
2382 struct bna_intr_info *intr_info;
2384 struct bna_mem_descr *ccb_mem;
2385 struct bna_mem_descr *rcb_mem;
2386 struct bna_mem_descr *unmapq_mem;
2387 struct bna_mem_descr *cqpt_mem;
2388 struct bna_mem_descr *cswqpt_mem;
2389 struct bna_mem_descr *cpage_mem;
2390 struct bna_mem_descr *hqpt_mem;
2391 struct bna_mem_descr *dqpt_mem;
2392 struct bna_mem_descr *hsqpt_mem;
2393 struct bna_mem_descr *dsqpt_mem;
2394 struct bna_mem_descr *hpage_mem;
2395 struct bna_mem_descr *dpage_mem;
2397 int dpage_count, hpage_count, rcb_idx;
2399 if (!bna_rx_res_check(rx_mod, rx_cfg))
2402 intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2403 ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
2404 rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
2405 unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0];
2406 cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
2407 cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
2408 cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
2409 hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
2410 dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
2411 hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
2412 dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
2413 hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
2414 dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
2416 page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.len /
2419 dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.len /
2422 hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.len /
2425 rx = bna_rx_get(rx_mod, rx_cfg->rx_type);
2428 INIT_LIST_HEAD(&rx->rxp_q);
2429 rx->stop_cbfn = NULL;
2430 rx->stop_cbarg = NULL;
2433 rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
2434 rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
2435 rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
2436 rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
2437 rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn;
2438 /* Following callbacks are mandatory */
2439 rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
2440 rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
2442 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_STARTED) {
2444 case BNA_RX_T_REGULAR:
2445 if (!(rx->bna->rx_mod.flags &
2446 BNA_RX_MOD_F_ENET_LOOPBACK))
2447 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2449 case BNA_RX_T_LOOPBACK:
2450 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_LOOPBACK)
2451 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2456 rx->num_paths = rx_cfg->num_paths;
2457 for (i = 0, rcb_idx = 0; i < rx->num_paths; i++) {
2458 rxp = bna_rxp_get(rx_mod);
2459 list_add_tail(&rxp->qe, &rx->rxp_q);
2460 rxp->type = rx_cfg->rxp_type;
2464 q0 = bna_rxq_get(rx_mod);
2465 if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
2468 q1 = bna_rxq_get(rx_mod);
2470 if (1 == intr_info->num)
2471 rxp->vector = intr_info->idl[0].vector;
2473 rxp->vector = intr_info->idl[i].vector;
2477 rxp->cq.ib.ib_seg_host_addr.lsb =
2478 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
2479 rxp->cq.ib.ib_seg_host_addr.msb =
2480 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
2481 rxp->cq.ib.ib_seg_host_addr_kva =
2482 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
2483 rxp->cq.ib.intr_type = intr_info->intr_type;
2484 if (intr_info->intr_type == BNA_INTR_T_MSIX)
2485 rxp->cq.ib.intr_vector = rxp->vector;
2487 rxp->cq.ib.intr_vector = (1 << rxp->vector);
2488 rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo;
2489 rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT;
2490 rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
2492 bna_rxp_add_rxqs(rxp, q0, q1);
2499 q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2500 q0->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
2502 q0->rcb->q_depth = rx_cfg->q_depth;
2504 q0->rcb->bnad = bna->bnad;
2506 q0->rx_packets = q0->rx_bytes = 0;
2507 q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0;
2509 bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE,
2510 &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]);
2512 if (rx->rcb_setup_cbfn)
2513 rx->rcb_setup_cbfn(bnad, q0->rcb);
2521 q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2522 q1->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
2524 q1->rcb->q_depth = rx_cfg->q_depth;
2526 q1->rcb->bnad = bna->bnad;
2528 q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ?
2529 rx_cfg->hds_config.forced_offset
2530 : rx_cfg->small_buff_size;
2531 q1->rx_packets = q1->rx_bytes = 0;
2532 q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
2534 bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE,
2535 &hqpt_mem[i], &hsqpt_mem[i],
2538 if (rx->rcb_setup_cbfn)
2539 rx->rcb_setup_cbfn(bnad, q1->rcb);
2544 rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
2545 rxp->cq.ccb->q_depth = rx_cfg->q_depth +
2546 ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
2547 0 : rx_cfg->q_depth);
2548 rxp->cq.ccb->cq = &rxp->cq;
2549 rxp->cq.ccb->rcb[0] = q0->rcb;
2550 q0->rcb->ccb = rxp->cq.ccb;
2552 rxp->cq.ccb->rcb[1] = q1->rcb;
2553 q1->rcb->ccb = rxp->cq.ccb;
2555 rxp->cq.ccb->hw_producer_index =
2556 (u32 *)rxp->cq.ib.ib_seg_host_addr_kva;
2557 rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell;
2558 rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type;
2559 rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector;
2560 rxp->cq.ccb->rx_coalescing_timeo =
2561 rxp->cq.ib.coalescing_timeo;
2562 rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
2563 rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
2564 rxp->cq.ccb->bnad = bna->bnad;
2565 rxp->cq.ccb->id = i;
2567 bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
2568 &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[i]);
2570 if (rx->ccb_setup_cbfn)
2571 rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
2574 rx->hds_cfg = rx_cfg->hds_config;
2576 bna_rxf_init(&rx->rxf, rx, rx_cfg, res_info);
2578 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
2580 rx_mod->rid_mask |= (1 << rx->rid);
2586 bna_rx_destroy(struct bna_rx *rx)
2588 struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
2589 struct bna_rxq *q0 = NULL;
2590 struct bna_rxq *q1 = NULL;
2591 struct bna_rxp *rxp;
2592 struct list_head *qe;
2594 bna_rxf_uninit(&rx->rxf);
2596 while (!list_empty(&rx->rxp_q)) {
2597 bfa_q_deq(&rx->rxp_q, &rxp);
2598 GET_RXQS(rxp, q0, q1);
2599 if (rx->rcb_destroy_cbfn)
2600 rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
2604 bna_rxq_put(rx_mod, q0);
2607 if (rx->rcb_destroy_cbfn)
2608 rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
2612 bna_rxq_put(rx_mod, q1);
2614 rxp->rxq.slr.large = NULL;
2615 rxp->rxq.slr.small = NULL;
2617 if (rx->ccb_destroy_cbfn)
2618 rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
2621 bna_rxp_put(rx_mod, rxp);
2624 list_for_each(qe, &rx_mod->rx_active_q) {
2625 if (qe == &rx->qe) {
2627 bfa_q_qe_init(&rx->qe);
2632 rx_mod->rid_mask &= ~(1 << rx->rid);
2636 bna_rx_put(rx_mod, rx);
2640 bna_rx_enable(struct bna_rx *rx)
2642 if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
2645 rx->rx_flags |= BNA_RX_F_ENABLED;
2646 if (rx->rx_flags & BNA_RX_F_ENET_STARTED)
2647 bfa_fsm_send_event(rx, RX_E_START);
2651 bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
2652 void (*cbfn)(void *, struct bna_rx *))
2654 if (type == BNA_SOFT_CLEANUP) {
2655 /* h/w should not be accessed. Treat we're stopped */
2656 (*cbfn)(rx->bna->bnad, rx);
2658 rx->stop_cbfn = cbfn;
2659 rx->stop_cbarg = rx->bna->bnad;
2661 rx->rx_flags &= ~BNA_RX_F_ENABLED;
2663 bfa_fsm_send_event(rx, RX_E_STOP);
2668 bna_rx_cleanup_complete(struct bna_rx *rx)
2670 bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE);
2674 bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
2675 enum bna_rxmode bitmask,
2676 void (*cbfn)(struct bnad *, struct bna_rx *))
2678 struct bna_rxf *rxf = &rx->rxf;
2679 int need_hw_config = 0;
2683 if (is_promisc_enable(new_mode, bitmask)) {
2684 /* If promisc mode is already enabled elsewhere in the system */
2685 if ((rx->bna->promisc_rid != BFI_INVALID_RID) &&
2686 (rx->bna->promisc_rid != rxf->rx->rid))
2689 /* If default mode is already enabled in the system */
2690 if (rx->bna->default_mode_rid != BFI_INVALID_RID)
2693 /* Trying to enable promiscuous and default mode together */
2694 if (is_default_enable(new_mode, bitmask))
2698 if (is_default_enable(new_mode, bitmask)) {
2699 /* If default mode is already enabled elsewhere in the system */
2700 if ((rx->bna->default_mode_rid != BFI_INVALID_RID) &&
2701 (rx->bna->default_mode_rid != rxf->rx->rid)) {
2705 /* If promiscuous mode is already enabled in the system */
2706 if (rx->bna->promisc_rid != BFI_INVALID_RID)
2710 /* Process the commands */
2712 if (is_promisc_enable(new_mode, bitmask)) {
2713 if (bna_rxf_promisc_enable(rxf))
2715 } else if (is_promisc_disable(new_mode, bitmask)) {
2716 if (bna_rxf_promisc_disable(rxf))
2720 if (is_allmulti_enable(new_mode, bitmask)) {
2721 if (bna_rxf_allmulti_enable(rxf))
2723 } else if (is_allmulti_disable(new_mode, bitmask)) {
2724 if (bna_rxf_allmulti_disable(rxf))
2728 /* Trigger h/w if needed */
2730 if (need_hw_config) {
2731 rxf->cam_fltr_cbfn = cbfn;
2732 rxf->cam_fltr_cbarg = rx->bna->bnad;
2733 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2735 (*cbfn)(rx->bna->bnad, rx);
2737 return BNA_CB_SUCCESS;
2744 bna_rx_vlanfilter_enable(struct bna_rx *rx)
2746 struct bna_rxf *rxf = &rx->rxf;
2748 if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) {
2749 rxf->vlan_filter_status = BNA_STATUS_T_ENABLED;
2750 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
2751 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2756 bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
2758 struct bna_rxp *rxp;
2759 struct list_head *qe;
2761 list_for_each(qe, &rx->rxp_q) {
2762 rxp = (struct bna_rxp *)qe;
2763 rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
2764 bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo);
2769 bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX])
2773 for (i = 0; i < BNA_LOAD_T_MAX; i++)
2774 for (j = 0; j < BNA_BIAS_T_MAX; j++)
2775 bna->rx_mod.dim_vector[i][j] = vector[i][j];
2779 bna_rx_dim_update(struct bna_ccb *ccb)
2781 struct bna *bna = ccb->cq->rx->bna;
2783 u32 pkt_rt, small_rt, large_rt;
2784 u8 coalescing_timeo;
2786 if ((ccb->pkt_rate.small_pkt_cnt == 0) &&
2787 (ccb->pkt_rate.large_pkt_cnt == 0))
2790 /* Arrive at preconfigured coalescing timeo value based on pkt rate */
2792 small_rt = ccb->pkt_rate.small_pkt_cnt;
2793 large_rt = ccb->pkt_rate.large_pkt_cnt;
2795 pkt_rt = small_rt + large_rt;
2797 if (pkt_rt < BNA_PKT_RATE_10K)
2798 load = BNA_LOAD_T_LOW_4;
2799 else if (pkt_rt < BNA_PKT_RATE_20K)
2800 load = BNA_LOAD_T_LOW_3;
2801 else if (pkt_rt < BNA_PKT_RATE_30K)
2802 load = BNA_LOAD_T_LOW_2;
2803 else if (pkt_rt < BNA_PKT_RATE_40K)
2804 load = BNA_LOAD_T_LOW_1;
2805 else if (pkt_rt < BNA_PKT_RATE_50K)
2806 load = BNA_LOAD_T_HIGH_1;
2807 else if (pkt_rt < BNA_PKT_RATE_60K)
2808 load = BNA_LOAD_T_HIGH_2;
2809 else if (pkt_rt < BNA_PKT_RATE_80K)
2810 load = BNA_LOAD_T_HIGH_3;
2812 load = BNA_LOAD_T_HIGH_4;
2814 if (small_rt > (large_rt << 1))
2819 ccb->pkt_rate.small_pkt_cnt = 0;
2820 ccb->pkt_rate.large_pkt_cnt = 0;
2822 coalescing_timeo = bna->rx_mod.dim_vector[load][bias];
2823 ccb->rx_coalescing_timeo = coalescing_timeo;
2826 bna_ib_coalescing_timeo_set(&ccb->cq->ib, coalescing_timeo);
2829 const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
2842 #define call_tx_stop_cbfn(tx) \
2844 if ((tx)->stop_cbfn) { \
2845 void (*cbfn)(void *, struct bna_tx *); \
2847 cbfn = (tx)->stop_cbfn; \
2848 cbarg = (tx)->stop_cbarg; \
2849 (tx)->stop_cbfn = NULL; \
2850 (tx)->stop_cbarg = NULL; \
2851 cbfn(cbarg, (tx)); \
2855 #define call_tx_prio_change_cbfn(tx) \
2857 if ((tx)->prio_change_cbfn) { \
2858 void (*cbfn)(struct bnad *, struct bna_tx *); \
2859 cbfn = (tx)->prio_change_cbfn; \
2860 (tx)->prio_change_cbfn = NULL; \
2861 cbfn((tx)->bna->bnad, (tx)); \
2865 static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx);
2866 static void bna_bfi_tx_enet_start(struct bna_tx *tx);
2867 static void bna_tx_enet_stop(struct bna_tx *tx);
2875 TX_E_PRIO_CHANGE = 6,
2876 TX_E_CLEANUP_DONE = 7,
2880 bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx, enum bna_tx_event);
2881 bfa_fsm_state_decl(bna_tx, start_wait, struct bna_tx, enum bna_tx_event);
2882 bfa_fsm_state_decl(bna_tx, started, struct bna_tx, enum bna_tx_event);
2883 bfa_fsm_state_decl(bna_tx, stop_wait, struct bna_tx, enum bna_tx_event);
2884 bfa_fsm_state_decl(bna_tx, cleanup_wait, struct bna_tx,
2886 bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
2888 bfa_fsm_state_decl(bna_tx, prio_cleanup_wait, struct bna_tx,
2890 bfa_fsm_state_decl(bna_tx, failed, struct bna_tx, enum bna_tx_event);
2891 bfa_fsm_state_decl(bna_tx, quiesce_wait, struct bna_tx,
2895 bna_tx_sm_stopped_entry(struct bna_tx *tx)
2897 call_tx_stop_cbfn(tx);
2901 bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
2905 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
2909 call_tx_stop_cbfn(tx);
2916 case TX_E_PRIO_CHANGE:
2917 call_tx_prio_change_cbfn(tx);
2920 case TX_E_BW_UPDATE:
2925 bfa_sm_fault(event);
2930 bna_tx_sm_start_wait_entry(struct bna_tx *tx)
2932 bna_bfi_tx_enet_start(tx);
2936 bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event)
2940 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
2941 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
2945 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
2946 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
2950 if (tx->flags & (BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED)) {
2951 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED |
2952 BNA_TX_F_BW_UPDATED);
2953 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
2955 bfa_fsm_set_state(tx, bna_tx_sm_started);
2958 case TX_E_PRIO_CHANGE:
2959 tx->flags |= BNA_TX_F_PRIO_CHANGED;
2962 case TX_E_BW_UPDATE:
2963 tx->flags |= BNA_TX_F_BW_UPDATED;
2967 bfa_sm_fault(event);
2972 bna_tx_sm_started_entry(struct bna_tx *tx)
2974 struct bna_txq *txq;
2975 struct list_head *qe;
2976 int is_regular = (tx->type == BNA_TX_T_REGULAR);
2978 list_for_each(qe, &tx->txq_q) {
2979 txq = (struct bna_txq *)qe;
2980 txq->tcb->priority = txq->priority;
2982 bna_ib_start(tx->bna, &txq->ib, is_regular);
2984 tx->tx_resume_cbfn(tx->bna->bnad, tx);
2988 bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
2992 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
2993 tx->tx_stall_cbfn(tx->bna->bnad, tx);
2994 bna_tx_enet_stop(tx);
2998 bfa_fsm_set_state(tx, bna_tx_sm_failed);
2999 tx->tx_stall_cbfn(tx->bna->bnad, tx);
3000 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3003 case TX_E_PRIO_CHANGE:
3004 case TX_E_BW_UPDATE:
3005 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
3009 bfa_sm_fault(event);
3014 bna_tx_sm_stop_wait_entry(struct bna_tx *tx)
3019 bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3024 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3025 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3030 * We are here due to start_wait -> stop_wait transition on
3033 bna_tx_enet_stop(tx);
3036 case TX_E_PRIO_CHANGE:
3037 case TX_E_BW_UPDATE:
3042 bfa_sm_fault(event);
3047 bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx)
3052 bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3056 case TX_E_PRIO_CHANGE:
3057 case TX_E_BW_UPDATE:
3061 case TX_E_CLEANUP_DONE:
3062 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3066 bfa_sm_fault(event);
3071 bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
3073 tx->tx_stall_cbfn(tx->bna->bnad, tx);
3074 bna_tx_enet_stop(tx);
3078 bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3082 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3086 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3087 call_tx_prio_change_cbfn(tx);
3088 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3092 bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait);
3095 case TX_E_PRIO_CHANGE:
3096 case TX_E_BW_UPDATE:
3101 bfa_sm_fault(event);
3106 bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx)
3108 call_tx_prio_change_cbfn(tx);
3109 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3113 bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3117 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3121 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3124 case TX_E_PRIO_CHANGE:
3125 case TX_E_BW_UPDATE:
3129 case TX_E_CLEANUP_DONE:
3130 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3134 bfa_sm_fault(event);
3139 bna_tx_sm_failed_entry(struct bna_tx *tx)
3144 bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event)
3148 bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait);
3152 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3159 case TX_E_CLEANUP_DONE:
3160 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3164 bfa_sm_fault(event);
3169 bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx)
3174 bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event)
3178 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3182 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3185 case TX_E_CLEANUP_DONE:
3186 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3189 case TX_E_BW_UPDATE:
3194 bfa_sm_fault(event);
3199 bna_bfi_tx_enet_start(struct bna_tx *tx)
3201 struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req;
3202 struct bna_txq *txq = NULL;
3203 struct list_head *qe;
3206 bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
3207 BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid);
3208 cfg_req->mh.num_entries = htons(
3209 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req)));
3211 cfg_req->num_queues = tx->num_txq;
3212 for (i = 0, qe = bfa_q_first(&tx->txq_q);
3214 i++, qe = bfa_q_next(qe)) {
3215 txq = (struct bna_txq *)qe;
3217 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt);
3218 cfg_req->q_cfg[i].q.priority = txq->priority;
3220 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
3221 txq->ib.ib_seg_host_addr.lsb;
3222 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
3223 txq->ib.ib_seg_host_addr.msb;
3224 cfg_req->q_cfg[i].ib.intr.msix_index =
3225 htons((u16)txq->ib.intr_vector);
3228 cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_ENABLED;
3229 cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
3230 cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
3231 cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_ENABLED;
3232 cfg_req->ib_cfg.msix = (txq->ib.intr_type == BNA_INTR_T_MSIX)
3233 ? BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
3234 cfg_req->ib_cfg.coalescing_timeout =
3235 htonl((u32)txq->ib.coalescing_timeo);
3236 cfg_req->ib_cfg.inter_pkt_timeout =
3237 htonl((u32)txq->ib.interpkt_timeo);
3238 cfg_req->ib_cfg.inter_pkt_count = (u8)txq->ib.interpkt_count;
3240 cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI;
3241 cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id);
3242 cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_DISABLED;
3243 cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED;
3245 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL,
3246 sizeof(struct bfi_enet_tx_cfg_req), &cfg_req->mh);
3247 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3251 bna_bfi_tx_enet_stop(struct bna_tx *tx)
3253 struct bfi_enet_req *req = &tx->bfi_enet_cmd.req;
3255 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
3256 BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid);
3257 req->mh.num_entries = htons(
3258 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
3259 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
3261 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3265 bna_tx_enet_stop(struct bna_tx *tx)
3267 struct bna_txq *txq;
3268 struct list_head *qe;
3271 list_for_each(qe, &tx->txq_q) {
3272 txq = (struct bna_txq *)qe;
3273 bna_ib_stop(tx->bna, &txq->ib);
3276 bna_bfi_tx_enet_stop(tx);
3280 bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
3281 struct bna_mem_descr *qpt_mem,
3282 struct bna_mem_descr *swqpt_mem,
3283 struct bna_mem_descr *page_mem)
3287 struct bna_dma_addr bna_dma;
3290 txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
3291 txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
3292 txq->qpt.kv_qpt_ptr = qpt_mem->kva;
3293 txq->qpt.page_count = page_count;
3294 txq->qpt.page_size = page_size;
3296 txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
3297 txq->tcb->sw_q = page_mem->kva;
3299 kva = page_mem->kva;
3300 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
3302 for (i = 0; i < page_count; i++) {
3303 txq->tcb->sw_qpt[i] = kva;
3306 BNA_SET_DMA_ADDR(dma, &bna_dma);
3307 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
3309 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
3315 static struct bna_tx *
3316 bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3318 struct list_head *qe = NULL;
3319 struct bna_tx *tx = NULL;
3321 if (list_empty(&tx_mod->tx_free_q))
3323 if (type == BNA_TX_T_REGULAR) {
3324 bfa_q_deq(&tx_mod->tx_free_q, &qe);
3326 bfa_q_deq_tail(&tx_mod->tx_free_q, &qe);
3328 tx = (struct bna_tx *)qe;
3329 bfa_q_qe_init(&tx->qe);
3336 bna_tx_free(struct bna_tx *tx)
3338 struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
3339 struct bna_txq *txq;
3340 struct list_head *prev_qe;
3341 struct list_head *qe;
3343 while (!list_empty(&tx->txq_q)) {
3344 bfa_q_deq(&tx->txq_q, &txq);
3345 bfa_q_qe_init(&txq->qe);
3348 list_add_tail(&txq->qe, &tx_mod->txq_free_q);
3351 list_for_each(qe, &tx_mod->tx_active_q) {
3352 if (qe == &tx->qe) {
3354 bfa_q_qe_init(&tx->qe);
3363 list_for_each(qe, &tx_mod->tx_free_q) {
3364 if (((struct bna_tx *)qe)->rid < tx->rid)
3371 if (prev_qe == NULL) {
3372 /* This is the first entry */
3373 bfa_q_enq_head(&tx_mod->tx_free_q, &tx->qe);
3374 } else if (bfa_q_next(prev_qe) == &tx_mod->tx_free_q) {
3375 /* This is the last entry */
3376 list_add_tail(&tx->qe, &tx_mod->tx_free_q);
3378 /* Somewhere in the middle */
3379 bfa_q_next(&tx->qe) = bfa_q_next(prev_qe);
3380 bfa_q_prev(&tx->qe) = prev_qe;
3381 bfa_q_next(prev_qe) = &tx->qe;
3382 bfa_q_prev(bfa_q_next(&tx->qe)) = &tx->qe;
3387 bna_tx_start(struct bna_tx *tx)
3389 tx->flags |= BNA_TX_F_ENET_STARTED;
3390 if (tx->flags & BNA_TX_F_ENABLED)
3391 bfa_fsm_send_event(tx, TX_E_START);
3395 bna_tx_stop(struct bna_tx *tx)
3397 tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
3398 tx->stop_cbarg = &tx->bna->tx_mod;
3400 tx->flags &= ~BNA_TX_F_ENET_STARTED;
3401 bfa_fsm_send_event(tx, TX_E_STOP);
3405 bna_tx_fail(struct bna_tx *tx)
3407 tx->flags &= ~BNA_TX_F_ENET_STARTED;
3408 bfa_fsm_send_event(tx, TX_E_FAIL);
3412 bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3414 struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp;
3415 struct bna_txq *txq = NULL;
3416 struct list_head *qe;
3419 bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp,
3420 sizeof(struct bfi_enet_tx_cfg_rsp));
3422 tx->hw_id = cfg_rsp->hw_id;
3424 for (i = 0, qe = bfa_q_first(&tx->txq_q);
3425 i < tx->num_txq; i++, qe = bfa_q_next(qe)) {
3426 txq = (struct bna_txq *)qe;
3428 /* Setup doorbells */
3429 txq->tcb->i_dbell->doorbell_addr =
3430 tx->bna->pcidev.pci_bar_kva
3431 + ntohl(cfg_rsp->q_handles[i].i_dbell);
3433 tx->bna->pcidev.pci_bar_kva
3434 + ntohl(cfg_rsp->q_handles[i].q_dbell);
3435 txq->hw_id = cfg_rsp->q_handles[i].hw_qid;
3437 /* Initialize producer/consumer indexes */
3438 (*txq->tcb->hw_consumer_index) = 0;
3439 txq->tcb->producer_index = txq->tcb->consumer_index = 0;
3442 bfa_fsm_send_event(tx, TX_E_STARTED);
3446 bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3448 bfa_fsm_send_event(tx, TX_E_STOPPED);
3452 bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod)
3455 struct list_head *qe;
3457 list_for_each(qe, &tx_mod->tx_active_q) {
3458 tx = (struct bna_tx *)qe;
3459 bfa_fsm_send_event(tx, TX_E_BW_UPDATE);
3464 bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
3468 struct bna_mem_info *mem_info;
3470 res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
3471 mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
3472 mem_info->mem_type = BNA_MEM_T_KVA;
3473 mem_info->len = sizeof(struct bna_tcb);
3474 mem_info->num = num_txq;
3476 q_size = txq_depth * BFI_TXQ_WI_SIZE;
3477 q_size = ALIGN(q_size, PAGE_SIZE);
3478 page_count = q_size >> PAGE_SHIFT;
3480 res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
3481 mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
3482 mem_info->mem_type = BNA_MEM_T_DMA;
3483 mem_info->len = page_count * sizeof(struct bna_dma_addr);
3484 mem_info->num = num_txq;
3486 res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
3487 mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
3488 mem_info->mem_type = BNA_MEM_T_KVA;
3489 mem_info->len = page_count * sizeof(void *);
3490 mem_info->num = num_txq;
3492 res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
3493 mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
3494 mem_info->mem_type = BNA_MEM_T_DMA;
3495 mem_info->len = PAGE_SIZE * page_count;
3496 mem_info->num = num_txq;
3498 res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
3499 mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info;
3500 mem_info->mem_type = BNA_MEM_T_DMA;
3501 mem_info->len = BFI_IBIDX_SIZE;
3502 mem_info->num = num_txq;
3504 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
3505 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
3507 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
3511 bna_tx_create(struct bna *bna, struct bnad *bnad,
3512 struct bna_tx_config *tx_cfg,
3513 const struct bna_tx_event_cbfn *tx_cbfn,
3514 struct bna_res_info *res_info, void *priv)
3516 struct bna_intr_info *intr_info;
3517 struct bna_tx_mod *tx_mod = &bna->tx_mod;
3519 struct bna_txq *txq;
3520 struct list_head *qe;
3524 intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
3525 page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len) /
3532 if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
3537 tx = bna_tx_get(tx_mod, tx_cfg->tx_type);
3545 INIT_LIST_HEAD(&tx->txq_q);
3546 for (i = 0; i < tx_cfg->num_txq; i++) {
3547 if (list_empty(&tx_mod->txq_free_q))
3550 bfa_q_deq(&tx_mod->txq_free_q, &txq);
3551 bfa_q_qe_init(&txq->qe);
3552 list_add_tail(&txq->qe, &tx->txq_q);
3562 tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
3563 tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
3564 /* Following callbacks are mandatory */
3565 tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
3566 tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
3567 tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
3569 list_add_tail(&tx->qe, &tx_mod->tx_active_q);
3571 tx->num_txq = tx_cfg->num_txq;
3574 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) {
3576 case BNA_TX_T_REGULAR:
3577 if (!(tx->bna->tx_mod.flags &
3578 BNA_TX_MOD_F_ENET_LOOPBACK))
3579 tx->flags |= BNA_TX_F_ENET_STARTED;
3581 case BNA_TX_T_LOOPBACK:
3582 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK)
3583 tx->flags |= BNA_TX_F_ENET_STARTED;
3591 list_for_each(qe, &tx->txq_q) {
3592 txq = (struct bna_txq *)qe;
3593 txq->tcb = (struct bna_tcb *)
3594 res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
3595 txq->tx_packets = 0;
3599 txq->ib.ib_seg_host_addr.lsb =
3600 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
3601 txq->ib.ib_seg_host_addr.msb =
3602 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
3603 txq->ib.ib_seg_host_addr_kva =
3604 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
3605 txq->ib.intr_type = intr_info->intr_type;
3606 txq->ib.intr_vector = (intr_info->num == 1) ?
3607 intr_info->idl[0].vector :
3608 intr_info->idl[i].vector;
3609 if (intr_info->intr_type == BNA_INTR_T_INTX)
3610 txq->ib.intr_vector = (1 << txq->ib.intr_vector);
3611 txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo;
3612 txq->ib.interpkt_timeo = BFI_TX_INTERPKT_TIMEO;
3613 txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT;
3617 txq->tcb->q_depth = tx_cfg->txq_depth;
3618 txq->tcb->unmap_q = (void *)
3619 res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
3620 txq->tcb->hw_consumer_index =
3621 (u32 *)txq->ib.ib_seg_host_addr_kva;
3622 txq->tcb->i_dbell = &txq->ib.door_bell;
3623 txq->tcb->intr_type = txq->ib.intr_type;
3624 txq->tcb->intr_vector = txq->ib.intr_vector;
3625 txq->tcb->txq = txq;
3626 txq->tcb->bnad = bnad;
3629 /* QPT, SWQPT, Pages */
3630 bna_txq_qpt_setup(txq, page_count, PAGE_SIZE,
3631 &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
3632 &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
3633 &res_info[BNA_TX_RES_MEM_T_PAGE].
3634 res_u.mem_info.mdl[i]);
3636 /* Callback to bnad for setting up TCB */
3637 if (tx->tcb_setup_cbfn)
3638 (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
3640 if (tx_cfg->num_txq == BFI_TX_MAX_PRIO)
3641 txq->priority = txq->tcb->id;
3643 txq->priority = tx_mod->default_prio;
3648 tx->txf_vlan_id = 0;
3650 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3652 tx_mod->rid_mask |= (1 << tx->rid);
3662 bna_tx_destroy(struct bna_tx *tx)
3664 struct bna_txq *txq;
3665 struct list_head *qe;
3667 list_for_each(qe, &tx->txq_q) {
3668 txq = (struct bna_txq *)qe;
3669 if (tx->tcb_destroy_cbfn)
3670 (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
3673 tx->bna->tx_mod.rid_mask &= ~(1 << tx->rid);
3678 bna_tx_enable(struct bna_tx *tx)
3680 if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
3683 tx->flags |= BNA_TX_F_ENABLED;
3685 if (tx->flags & BNA_TX_F_ENET_STARTED)
3686 bfa_fsm_send_event(tx, TX_E_START);
3690 bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
3691 void (*cbfn)(void *, struct bna_tx *))
3693 if (type == BNA_SOFT_CLEANUP) {
3694 (*cbfn)(tx->bna->bnad, tx);
3698 tx->stop_cbfn = cbfn;
3699 tx->stop_cbarg = tx->bna->bnad;
3701 tx->flags &= ~BNA_TX_F_ENABLED;
3703 bfa_fsm_send_event(tx, TX_E_STOP);
3707 bna_tx_cleanup_complete(struct bna_tx *tx)
3709 bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE);
3713 bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx)
3715 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3717 bfa_wc_down(&tx_mod->tx_stop_wc);
3721 bna_tx_mod_cb_tx_stopped_all(void *arg)
3723 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3725 if (tx_mod->stop_cbfn)
3726 tx_mod->stop_cbfn(&tx_mod->bna->enet);
3727 tx_mod->stop_cbfn = NULL;
3731 bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
3732 struct bna_res_info *res_info)
3739 tx_mod->tx = (struct bna_tx *)
3740 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
3741 tx_mod->txq = (struct bna_txq *)
3742 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
3744 INIT_LIST_HEAD(&tx_mod->tx_free_q);
3745 INIT_LIST_HEAD(&tx_mod->tx_active_q);
3747 INIT_LIST_HEAD(&tx_mod->txq_free_q);
3749 for (i = 0; i < bna->ioceth.attr.num_txq; i++) {
3750 tx_mod->tx[i].rid = i;
3751 bfa_q_qe_init(&tx_mod->tx[i].qe);
3752 list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
3753 bfa_q_qe_init(&tx_mod->txq[i].qe);
3754 list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
3757 tx_mod->prio_map = BFI_TX_PRIO_MAP_ALL;
3758 tx_mod->default_prio = 0;
3759 tx_mod->iscsi_over_cee = BNA_STATUS_T_DISABLED;
3760 tx_mod->iscsi_prio = -1;
3764 bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
3766 struct list_head *qe;
3770 list_for_each(qe, &tx_mod->tx_free_q)
3774 list_for_each(qe, &tx_mod->txq_free_q)
3781 bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3784 struct list_head *qe;
3786 tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED;
3787 if (type == BNA_TX_T_LOOPBACK)
3788 tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK;
3790 list_for_each(qe, &tx_mod->tx_active_q) {
3791 tx = (struct bna_tx *)qe;
3792 if (tx->type == type)
3798 bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3801 struct list_head *qe;
3803 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3804 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3806 tx_mod->stop_cbfn = bna_enet_cb_tx_stopped;
3808 bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod);
3810 list_for_each(qe, &tx_mod->tx_active_q) {
3811 tx = (struct bna_tx *)qe;
3812 if (tx->type == type) {
3813 bfa_wc_up(&tx_mod->tx_stop_wc);
3818 bfa_wc_wait(&tx_mod->tx_stop_wc);
3822 bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
3825 struct list_head *qe;
3827 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3828 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3830 list_for_each(qe, &tx_mod->tx_active_q) {
3831 tx = (struct bna_tx *)qe;
3837 bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
3839 struct bna_txq *txq;
3840 struct list_head *qe;
3842 list_for_each(qe, &tx->txq_q) {
3843 txq = (struct bna_txq *)qe;
3844 bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo);