Lines Matching +full:num +full:- +full:rxq
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Linux network driver for QLogic BR-series Converged Network Adapter.
6 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
7 * Copyright (c) 2014-2015 QLogic Corporation
18 ib->coalescing_timeo = coalescing_timeo; in bna_ib_coalescing_timeo_set()
19 ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK( in bna_ib_coalescing_timeo_set()
20 (u32)ib->coalescing_timeo, 0); in bna_ib_coalescing_timeo_set()
27 (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; \
28 (rxf)->vlan_strip_pending = true; \
33 if ((rxf)->rss_status == BNA_STATUS_T_ENABLED) \
34 (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING | \
80 /* No-op */ in bna_rxf_sm_stopped()
117 /* No-op */ in bna_rxf_sm_cfg_wait()
182 struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req; in bna_bfi_ucast_req()
184 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid); in bna_bfi_ucast_req()
185 req->mh.num_entries = htons( in bna_bfi_ucast_req()
187 ether_addr_copy(req->mac_addr, mac->addr); in bna_bfi_ucast_req()
188 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, in bna_bfi_ucast_req()
189 sizeof(struct bfi_enet_ucast_req), &req->mh); in bna_bfi_ucast_req()
190 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); in bna_bfi_ucast_req()
197 &rxf->bfi_enet_cmd.mcast_add_req; in bna_bfi_mcast_add_req()
199 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_ADD_REQ, in bna_bfi_mcast_add_req()
200 0, rxf->rx->rid); in bna_bfi_mcast_add_req()
201 req->mh.num_entries = htons( in bna_bfi_mcast_add_req()
203 ether_addr_copy(req->mac_addr, mac->addr); in bna_bfi_mcast_add_req()
204 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, in bna_bfi_mcast_add_req()
205 sizeof(struct bfi_enet_mcast_add_req), &req->mh); in bna_bfi_mcast_add_req()
206 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); in bna_bfi_mcast_add_req()
213 &rxf->bfi_enet_cmd.mcast_del_req; in bna_bfi_mcast_del_req()
215 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_DEL_REQ, in bna_bfi_mcast_del_req()
216 0, rxf->rx->rid); in bna_bfi_mcast_del_req()
217 req->mh.num_entries = htons( in bna_bfi_mcast_del_req()
219 req->handle = htons(handle); in bna_bfi_mcast_del_req()
220 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, in bna_bfi_mcast_del_req()
221 sizeof(struct bfi_enet_mcast_del_req), &req->mh); in bna_bfi_mcast_del_req()
222 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); in bna_bfi_mcast_del_req()
228 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; in bna_bfi_mcast_filter_req()
230 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, in bna_bfi_mcast_filter_req()
231 BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid); in bna_bfi_mcast_filter_req()
232 req->mh.num_entries = htons( in bna_bfi_mcast_filter_req()
234 req->enable = status; in bna_bfi_mcast_filter_req()
235 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, in bna_bfi_mcast_filter_req()
236 sizeof(struct bfi_enet_enable_req), &req->mh); in bna_bfi_mcast_filter_req()
237 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); in bna_bfi_mcast_filter_req()
243 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; in bna_bfi_rx_promisc_req()
245 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, in bna_bfi_rx_promisc_req()
246 BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid); in bna_bfi_rx_promisc_req()
247 req->mh.num_entries = htons( in bna_bfi_rx_promisc_req()
249 req->enable = status; in bna_bfi_rx_promisc_req()
250 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, in bna_bfi_rx_promisc_req()
251 sizeof(struct bfi_enet_enable_req), &req->mh); in bna_bfi_rx_promisc_req()
252 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); in bna_bfi_rx_promisc_req()
258 struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req; in bna_bfi_rx_vlan_filter_set()
262 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, in bna_bfi_rx_vlan_filter_set()
263 BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid); in bna_bfi_rx_vlan_filter_set()
264 req->mh.num_entries = htons( in bna_bfi_rx_vlan_filter_set()
266 req->block_idx = block_idx; in bna_bfi_rx_vlan_filter_set()
269 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) in bna_bfi_rx_vlan_filter_set()
270 req->bit_mask[i] = in bna_bfi_rx_vlan_filter_set()
271 htonl(rxf->vlan_filter_table[j]); in bna_bfi_rx_vlan_filter_set()
273 req->bit_mask[i] = 0xFFFFFFFF; in bna_bfi_rx_vlan_filter_set()
275 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, in bna_bfi_rx_vlan_filter_set()
276 sizeof(struct bfi_enet_rx_vlan_req), &req->mh); in bna_bfi_rx_vlan_filter_set()
277 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); in bna_bfi_rx_vlan_filter_set()
283 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; in bna_bfi_vlan_strip_enable()
285 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, in bna_bfi_vlan_strip_enable()
286 BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ, 0, rxf->rx->rid); in bna_bfi_vlan_strip_enable()
287 req->mh.num_entries = htons( in bna_bfi_vlan_strip_enable()
289 req->enable = rxf->vlan_strip_status; in bna_bfi_vlan_strip_enable()
290 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, in bna_bfi_vlan_strip_enable()
291 sizeof(struct bfi_enet_enable_req), &req->mh); in bna_bfi_vlan_strip_enable()
292 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); in bna_bfi_vlan_strip_enable()
298 struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req; in bna_bfi_rit_cfg()
300 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, in bna_bfi_rit_cfg()
301 BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid); in bna_bfi_rit_cfg()
302 req->mh.num_entries = htons( in bna_bfi_rit_cfg()
304 req->size = htons(rxf->rit_size); in bna_bfi_rit_cfg()
305 memcpy(&req->table[0], rxf->rit, rxf->rit_size); in bna_bfi_rit_cfg()
306 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, in bna_bfi_rit_cfg()
307 sizeof(struct bfi_enet_rit_req), &req->mh); in bna_bfi_rit_cfg()
308 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); in bna_bfi_rit_cfg()
314 struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req; in bna_bfi_rss_cfg()
317 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, in bna_bfi_rss_cfg()
318 BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid); in bna_bfi_rss_cfg()
319 req->mh.num_entries = htons( in bna_bfi_rss_cfg()
321 req->cfg.type = rxf->rss_cfg.hash_type; in bna_bfi_rss_cfg()
322 req->cfg.mask = rxf->rss_cfg.hash_mask; in bna_bfi_rss_cfg()
324 req->cfg.key[i] = in bna_bfi_rss_cfg()
325 htonl(rxf->rss_cfg.toeplitz_hash_key[i]); in bna_bfi_rss_cfg()
326 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, in bna_bfi_rss_cfg()
327 sizeof(struct bfi_enet_rss_cfg_req), &req->mh); in bna_bfi_rss_cfg()
328 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); in bna_bfi_rss_cfg()
334 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; in bna_bfi_rss_enable()
336 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, in bna_bfi_rss_enable()
337 BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid); in bna_bfi_rss_enable()
338 req->mh.num_entries = htons( in bna_bfi_rss_enable()
340 req->enable = rxf->rss_status; in bna_bfi_rss_enable()
341 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, in bna_bfi_rss_enable()
342 sizeof(struct bfi_enet_enable_req), &req->mh); in bna_bfi_rss_enable()
343 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); in bna_bfi_rss_enable()
352 list_for_each_entry(mac, &rxf->mcast_active_q, qe) in bna_rxf_mcmac_get()
353 if (ether_addr_equal(mac->addr, mac_addr)) in bna_rxf_mcmac_get()
356 list_for_each_entry(mac, &rxf->mcast_pending_del_q, qe) in bna_rxf_mcmac_get()
357 if (ether_addr_equal(mac->addr, mac_addr)) in bna_rxf_mcmac_get()
368 list_for_each_entry(mchandle, &rxf->mcast_handle_q, qe) in bna_rxf_mchandle_get()
369 if (mchandle->handle == handle) in bna_rxf_mchandle_get()
384 mchandle = bna_mcam_mod_handle_get(&rxf->rx->bna->mcam_mod); in bna_rxf_mchandle_attach()
385 mchandle->handle = handle; in bna_rxf_mchandle_attach()
386 mchandle->refcnt = 0; in bna_rxf_mchandle_attach()
387 list_add_tail(&mchandle->qe, &rxf->mcast_handle_q); in bna_rxf_mchandle_attach()
389 mchandle->refcnt++; in bna_rxf_mchandle_attach()
390 mcmac->handle = mchandle; in bna_rxf_mchandle_attach()
400 mchandle = mac->handle; in bna_rxf_mcast_del()
404 mchandle->refcnt--; in bna_rxf_mcast_del()
405 if (mchandle->refcnt == 0) { in bna_rxf_mcast_del()
407 bna_bfi_mcast_del_req(rxf, mchandle->handle); in bna_rxf_mcast_del()
410 list_del(&mchandle->qe); in bna_rxf_mcast_del()
411 bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle); in bna_rxf_mcast_del()
413 mac->handle = NULL; in bna_rxf_mcast_del()
425 while (!list_empty(&rxf->mcast_pending_del_q)) { in bna_rxf_mcast_cfg_apply()
426 mac = list_first_entry(&rxf->mcast_pending_del_q, in bna_rxf_mcast_cfg_apply()
429 list_move_tail(&mac->qe, bna_mcam_mod_del_q(rxf->rx->bna)); in bna_rxf_mcast_cfg_apply()
435 if (!list_empty(&rxf->mcast_pending_add_q)) { in bna_rxf_mcast_cfg_apply()
436 mac = list_first_entry(&rxf->mcast_pending_add_q, in bna_rxf_mcast_cfg_apply()
438 list_move_tail(&mac->qe, &rxf->mcast_active_q); in bna_rxf_mcast_cfg_apply()
452 if (rxf->vlan_pending_bitmask) { in bna_rxf_vlan_cfg_apply()
453 vlan_pending_bitmask = rxf->vlan_pending_bitmask; in bna_rxf_vlan_cfg_apply()
458 rxf->vlan_pending_bitmask &= ~BIT(block_idx); in bna_rxf_vlan_cfg_apply()
473 while (!list_empty(&rxf->mcast_pending_del_q)) { in bna_rxf_mcast_cfg_reset()
474 mac = list_first_entry(&rxf->mcast_pending_del_q, in bna_rxf_mcast_cfg_reset()
477 list_move_tail(&mac->qe, bna_mcam_mod_del_q(rxf->rx->bna)); in bna_rxf_mcast_cfg_reset()
483 while (!list_empty(&rxf->mcast_active_q)) { in bna_rxf_mcast_cfg_reset()
484 mac = list_first_entry(&rxf->mcast_active_q, in bna_rxf_mcast_cfg_reset()
486 list_move_tail(&mac->qe, &rxf->mcast_pending_add_q); in bna_rxf_mcast_cfg_reset()
497 if (rxf->rss_pending) { in bna_rxf_rss_cfg_apply()
498 if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) { in bna_rxf_rss_cfg_apply()
499 rxf->rss_pending &= ~BNA_RSS_F_RIT_PENDING; in bna_rxf_rss_cfg_apply()
504 if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) { in bna_rxf_rss_cfg_apply()
505 rxf->rss_pending &= ~BNA_RSS_F_CFG_PENDING; in bna_rxf_rss_cfg_apply()
510 if (rxf->rss_pending & BNA_RSS_F_STATUS_PENDING) { in bna_rxf_rss_cfg_apply()
511 rxf->rss_pending &= ~BNA_RSS_F_STATUS_PENDING; in bna_rxf_rss_cfg_apply()
561 struct bna_rx *rx = rxf->rx; in bna_rit_init()
565 rxf->rit_size = rit_size; in bna_rit_init()
566 list_for_each_entry(rxp, &rx->rxp_q, qe) { in bna_rit_init()
567 rxf->rit[offset] = rxp->cq.ccb->id; in bna_rit_init()
585 if (rsp->error) { in bna_bfi_rxf_ucast_set_rsp()
587 rxf->ucast_active_set = 0; in bna_bfi_rxf_ucast_set_rsp()
598 &rxf->bfi_enet_cmd.mcast_add_req; in bna_bfi_rxf_mcast_add_rsp()
602 bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr, in bna_bfi_rxf_mcast_add_rsp()
603 ntohs(rsp->handle)); in bna_bfi_rxf_mcast_add_rsp()
613 rxf->rx = rx; in bna_rxf_init()
615 INIT_LIST_HEAD(&rxf->ucast_pending_add_q); in bna_rxf_init()
616 INIT_LIST_HEAD(&rxf->ucast_pending_del_q); in bna_rxf_init()
617 rxf->ucast_pending_set = 0; in bna_rxf_init()
618 rxf->ucast_active_set = 0; in bna_rxf_init()
619 INIT_LIST_HEAD(&rxf->ucast_active_q); in bna_rxf_init()
620 rxf->ucast_pending_mac = NULL; in bna_rxf_init()
622 INIT_LIST_HEAD(&rxf->mcast_pending_add_q); in bna_rxf_init()
623 INIT_LIST_HEAD(&rxf->mcast_pending_del_q); in bna_rxf_init()
624 INIT_LIST_HEAD(&rxf->mcast_active_q); in bna_rxf_init()
625 INIT_LIST_HEAD(&rxf->mcast_handle_q); in bna_rxf_init()
627 rxf->rit = (u8 *) in bna_rxf_init()
629 bna_rit_init(rxf, q_config->num_paths); in bna_rxf_init()
631 rxf->rss_status = q_config->rss_status; in bna_rxf_init()
632 if (rxf->rss_status == BNA_STATUS_T_ENABLED) { in bna_rxf_init()
633 rxf->rss_cfg = q_config->rss_config; in bna_rxf_init()
634 rxf->rss_pending |= BNA_RSS_F_CFG_PENDING; in bna_rxf_init()
635 rxf->rss_pending |= BNA_RSS_F_RIT_PENDING; in bna_rxf_init()
636 rxf->rss_pending |= BNA_RSS_F_STATUS_PENDING; in bna_rxf_init()
639 rxf->vlan_filter_status = BNA_STATUS_T_DISABLED; in bna_rxf_init()
640 memset(rxf->vlan_filter_table, 0, in bna_rxf_init()
642 rxf->vlan_filter_table[0] |= 1; /* for pure priority tagged frames */ in bna_rxf_init()
643 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; in bna_rxf_init()
645 rxf->vlan_strip_status = q_config->vlan_strip_status; in bna_rxf_init()
655 rxf->ucast_pending_set = 0; in bna_rxf_uninit()
656 rxf->ucast_active_set = 0; in bna_rxf_uninit()
658 while (!list_empty(&rxf->ucast_pending_add_q)) { in bna_rxf_uninit()
659 mac = list_first_entry(&rxf->ucast_pending_add_q, in bna_rxf_uninit()
661 list_move_tail(&mac->qe, bna_ucam_mod_free_q(rxf->rx->bna)); in bna_rxf_uninit()
664 if (rxf->ucast_pending_mac) { in bna_rxf_uninit()
665 list_add_tail(&rxf->ucast_pending_mac->qe, in bna_rxf_uninit()
666 bna_ucam_mod_free_q(rxf->rx->bna)); in bna_rxf_uninit()
667 rxf->ucast_pending_mac = NULL; in bna_rxf_uninit()
670 while (!list_empty(&rxf->mcast_pending_add_q)) { in bna_rxf_uninit()
671 mac = list_first_entry(&rxf->mcast_pending_add_q, in bna_rxf_uninit()
673 list_move_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna)); in bna_rxf_uninit()
676 rxf->rxmode_pending = 0; in bna_rxf_uninit()
677 rxf->rxmode_pending_bitmask = 0; in bna_rxf_uninit()
678 if (rxf->rx->bna->promisc_rid == rxf->rx->rid) in bna_rxf_uninit()
679 rxf->rx->bna->promisc_rid = BFI_INVALID_RID; in bna_rxf_uninit()
680 if (rxf->rx->bna->default_mode_rid == rxf->rx->rid) in bna_rxf_uninit()
681 rxf->rx->bna->default_mode_rid = BFI_INVALID_RID; in bna_rxf_uninit()
683 rxf->rss_pending = 0; in bna_rxf_uninit()
684 rxf->vlan_strip_pending = false; in bna_rxf_uninit()
686 rxf->rx = NULL; in bna_rxf_uninit()
698 rxf->start_cbfn = bna_rx_cb_rxf_started; in bna_rxf_start()
699 rxf->start_cbarg = rxf->rx; in bna_rxf_start()
712 rxf->stop_cbfn = bna_rx_cb_rxf_stopped; in bna_rxf_stop()
713 rxf->stop_cbarg = rxf->rx; in bna_rxf_stop()
726 struct bna_rxf *rxf = &rx->rxf; in bna_rx_ucast_set()
728 if (rxf->ucast_pending_mac == NULL) { in bna_rx_ucast_set()
729 rxf->ucast_pending_mac = in bna_rx_ucast_set()
730 bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna)); in bna_rx_ucast_set()
731 if (rxf->ucast_pending_mac == NULL) in bna_rx_ucast_set()
735 ether_addr_copy(rxf->ucast_pending_mac->addr, ucmac); in bna_rx_ucast_set()
736 rxf->ucast_pending_set = 1; in bna_rx_ucast_set()
737 rxf->cam_fltr_cbfn = NULL; in bna_rx_ucast_set()
738 rxf->cam_fltr_cbarg = rx->bna->bnad; in bna_rx_ucast_set()
749 struct bna_rxf *rxf = &rx->rxf; in bna_rx_mcast_add()
753 if (bna_mac_find(&rxf->mcast_active_q, addr) || in bna_rx_mcast_add()
754 bna_mac_find(&rxf->mcast_pending_add_q, addr)) { in bna_rx_mcast_add()
756 cbfn(rx->bna->bnad, rx); in bna_rx_mcast_add()
760 mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna)); in bna_rx_mcast_add()
763 ether_addr_copy(mac->addr, addr); in bna_rx_mcast_add()
764 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q); in bna_rx_mcast_add()
766 rxf->cam_fltr_cbfn = cbfn; in bna_rx_mcast_add()
767 rxf->cam_fltr_cbarg = rx->bna->bnad; in bna_rx_mcast_add()
777 struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod; in bna_rx_ucast_listset()
778 struct bna_rxf *rxf = &rx->rxf; in bna_rx_ucast_listset()
785 while (!list_empty(&rxf->ucast_pending_add_q)) { in bna_rx_ucast_listset()
786 mac = list_first_entry(&rxf->ucast_pending_add_q, in bna_rx_ucast_listset()
788 list_move_tail(&mac->qe, &ucam_mod->free_q); in bna_rx_ucast_listset()
792 while (!list_empty(&rxf->ucast_active_q)) { in bna_rx_ucast_listset()
793 mac = list_first_entry(&rxf->ucast_active_q, in bna_rx_ucast_listset()
795 del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q); in bna_rx_ucast_listset()
796 ether_addr_copy(del_mac->addr, mac->addr); in bna_rx_ucast_listset()
797 del_mac->handle = mac->handle; in bna_rx_ucast_listset()
798 list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q); in bna_rx_ucast_listset()
799 list_move_tail(&mac->qe, &ucam_mod->free_q); in bna_rx_ucast_listset()
805 mac = bna_cam_mod_mac_get(&ucam_mod->free_q); in bna_rx_ucast_listset()
808 ether_addr_copy(mac->addr, mcaddr); in bna_rx_ucast_listset()
809 list_add_tail(&mac->qe, &list_head); in bna_rx_ucast_listset()
816 list_move_tail(&mac->qe, &rxf->ucast_pending_add_q); in bna_rx_ucast_listset()
826 list_move_tail(&mac->qe, &ucam_mod->free_q); in bna_rx_ucast_listset()
835 struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod; in bna_rx_mcast_listset()
836 struct bna_rxf *rxf = &rx->rxf; in bna_rx_mcast_listset()
843 while (!list_empty(&rxf->mcast_pending_add_q)) { in bna_rx_mcast_listset()
844 mac = list_first_entry(&rxf->mcast_pending_add_q, in bna_rx_mcast_listset()
846 list_move_tail(&mac->qe, &mcam_mod->free_q); in bna_rx_mcast_listset()
850 while (!list_empty(&rxf->mcast_active_q)) { in bna_rx_mcast_listset()
851 mac = list_first_entry(&rxf->mcast_active_q, in bna_rx_mcast_listset()
853 del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q); in bna_rx_mcast_listset()
854 ether_addr_copy(del_mac->addr, mac->addr); in bna_rx_mcast_listset()
855 del_mac->handle = mac->handle; in bna_rx_mcast_listset()
856 list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q); in bna_rx_mcast_listset()
857 mac->handle = NULL; in bna_rx_mcast_listset()
858 list_move_tail(&mac->qe, &mcam_mod->free_q); in bna_rx_mcast_listset()
864 mac = bna_cam_mod_mac_get(&mcam_mod->free_q); in bna_rx_mcast_listset()
867 ether_addr_copy(mac->addr, mcaddr); in bna_rx_mcast_listset()
868 list_add_tail(&mac->qe, &list_head); in bna_rx_mcast_listset()
876 list_move_tail(&mac->qe, &rxf->mcast_pending_add_q); in bna_rx_mcast_listset()
886 list_move_tail(&mac->qe, &mcam_mod->free_q); in bna_rx_mcast_listset()
895 struct bna_rxf *rxf = &rx->rxf; in bna_rx_mcast_delall()
900 while (!list_empty(&rxf->mcast_pending_add_q)) { in bna_rx_mcast_delall()
901 mac = list_first_entry(&rxf->mcast_pending_add_q, in bna_rx_mcast_delall()
903 list_move_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna)); in bna_rx_mcast_delall()
907 while (!list_empty(&rxf->mcast_active_q)) { in bna_rx_mcast_delall()
908 mac = list_first_entry(&rxf->mcast_active_q, in bna_rx_mcast_delall()
910 list_del(&mac->qe); in bna_rx_mcast_delall()
911 del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna)); in bna_rx_mcast_delall()
913 list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q); in bna_rx_mcast_delall()
914 mac->handle = NULL; in bna_rx_mcast_delall()
915 list_add_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna)); in bna_rx_mcast_delall()
926 struct bna_rxf *rxf = &rx->rxf; in bna_rx_vlan_add()
931 rxf->vlan_filter_table[index] |= bit; in bna_rx_vlan_add()
932 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) { in bna_rx_vlan_add()
933 rxf->vlan_pending_bitmask |= BIT(group_id); in bna_rx_vlan_add()
941 struct bna_rxf *rxf = &rx->rxf; in bna_rx_vlan_del()
946 rxf->vlan_filter_table[index] &= ~bit; in bna_rx_vlan_del()
947 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) { in bna_rx_vlan_del()
948 rxf->vlan_pending_bitmask |= BIT(group_id); in bna_rx_vlan_del()
959 if (!list_empty(&rxf->ucast_pending_del_q)) { in bna_rxf_ucast_cfg_apply()
960 mac = list_first_entry(&rxf->ucast_pending_del_q, in bna_rxf_ucast_cfg_apply()
963 list_move_tail(&mac->qe, bna_ucam_mod_del_q(rxf->rx->bna)); in bna_rxf_ucast_cfg_apply()
968 if (rxf->ucast_pending_set) { in bna_rxf_ucast_cfg_apply()
969 rxf->ucast_pending_set = 0; in bna_rxf_ucast_cfg_apply()
970 ether_addr_copy(rxf->ucast_active_mac.addr, in bna_rxf_ucast_cfg_apply()
971 rxf->ucast_pending_mac->addr); in bna_rxf_ucast_cfg_apply()
972 rxf->ucast_active_set = 1; in bna_rxf_ucast_cfg_apply()
973 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac, in bna_rxf_ucast_cfg_apply()
979 if (!list_empty(&rxf->ucast_pending_add_q)) { in bna_rxf_ucast_cfg_apply()
980 mac = list_first_entry(&rxf->ucast_pending_add_q, in bna_rxf_ucast_cfg_apply()
982 list_move_tail(&mac->qe, &rxf->ucast_active_q); in bna_rxf_ucast_cfg_apply()
996 while (!list_empty(&rxf->ucast_pending_del_q)) { in bna_rxf_ucast_cfg_reset()
997 mac = list_first_entry(&rxf->ucast_pending_del_q, in bna_rxf_ucast_cfg_reset()
1000 list_move_tail(&mac->qe, in bna_rxf_ucast_cfg_reset()
1001 bna_ucam_mod_del_q(rxf->rx->bna)); in bna_rxf_ucast_cfg_reset()
1005 list_move_tail(&mac->qe, in bna_rxf_ucast_cfg_reset()
1006 bna_ucam_mod_del_q(rxf->rx->bna)); in bna_rxf_ucast_cfg_reset()
1012 while (!list_empty(&rxf->ucast_active_q)) { in bna_rxf_ucast_cfg_reset()
1013 mac = list_first_entry(&rxf->ucast_active_q, in bna_rxf_ucast_cfg_reset()
1015 list_move_tail(&mac->qe, &rxf->ucast_pending_add_q); in bna_rxf_ucast_cfg_reset()
1023 if (rxf->ucast_active_set) { in bna_rxf_ucast_cfg_reset()
1024 rxf->ucast_pending_set = 1; in bna_rxf_ucast_cfg_reset()
1025 rxf->ucast_active_set = 0; in bna_rxf_ucast_cfg_reset()
1027 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac, in bna_rxf_ucast_cfg_reset()
1039 struct bna *bna = rxf->rx->bna; in bna_rxf_promisc_cfg_apply()
1042 if (is_promisc_enable(rxf->rxmode_pending, in bna_rxf_promisc_cfg_apply()
1043 rxf->rxmode_pending_bitmask)) { in bna_rxf_promisc_cfg_apply()
1044 /* move promisc configuration from pending -> active */ in bna_rxf_promisc_cfg_apply()
1045 promisc_inactive(rxf->rxmode_pending, in bna_rxf_promisc_cfg_apply()
1046 rxf->rxmode_pending_bitmask); in bna_rxf_promisc_cfg_apply()
1047 rxf->rxmode_active |= BNA_RXMODE_PROMISC; in bna_rxf_promisc_cfg_apply()
1050 } else if (is_promisc_disable(rxf->rxmode_pending, in bna_rxf_promisc_cfg_apply()
1051 rxf->rxmode_pending_bitmask)) { in bna_rxf_promisc_cfg_apply()
1052 /* move promisc configuration from pending -> active */ in bna_rxf_promisc_cfg_apply()
1053 promisc_inactive(rxf->rxmode_pending, in bna_rxf_promisc_cfg_apply()
1054 rxf->rxmode_pending_bitmask); in bna_rxf_promisc_cfg_apply()
1055 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC; in bna_rxf_promisc_cfg_apply()
1056 bna->promisc_rid = BFI_INVALID_RID; in bna_rxf_promisc_cfg_apply()
1067 struct bna *bna = rxf->rx->bna; in bna_rxf_promisc_cfg_reset()
1070 if (is_promisc_disable(rxf->rxmode_pending, in bna_rxf_promisc_cfg_reset()
1071 rxf->rxmode_pending_bitmask)) { in bna_rxf_promisc_cfg_reset()
1072 promisc_inactive(rxf->rxmode_pending, in bna_rxf_promisc_cfg_reset()
1073 rxf->rxmode_pending_bitmask); in bna_rxf_promisc_cfg_reset()
1074 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC; in bna_rxf_promisc_cfg_reset()
1075 bna->promisc_rid = BFI_INVALID_RID; in bna_rxf_promisc_cfg_reset()
1082 /* Move promisc mode config from active -> pending */ in bna_rxf_promisc_cfg_reset()
1083 if (rxf->rxmode_active & BNA_RXMODE_PROMISC) { in bna_rxf_promisc_cfg_reset()
1084 promisc_enable(rxf->rxmode_pending, in bna_rxf_promisc_cfg_reset()
1085 rxf->rxmode_pending_bitmask); in bna_rxf_promisc_cfg_reset()
1086 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC; in bna_rxf_promisc_cfg_reset()
1100 if (is_allmulti_enable(rxf->rxmode_pending, in bna_rxf_allmulti_cfg_apply()
1101 rxf->rxmode_pending_bitmask)) { in bna_rxf_allmulti_cfg_apply()
1102 /* move allmulti configuration from pending -> active */ in bna_rxf_allmulti_cfg_apply()
1103 allmulti_inactive(rxf->rxmode_pending, in bna_rxf_allmulti_cfg_apply()
1104 rxf->rxmode_pending_bitmask); in bna_rxf_allmulti_cfg_apply()
1105 rxf->rxmode_active |= BNA_RXMODE_ALLMULTI; in bna_rxf_allmulti_cfg_apply()
1108 } else if (is_allmulti_disable(rxf->rxmode_pending, in bna_rxf_allmulti_cfg_apply()
1109 rxf->rxmode_pending_bitmask)) { in bna_rxf_allmulti_cfg_apply()
1110 /* move allmulti configuration from pending -> active */ in bna_rxf_allmulti_cfg_apply()
1111 allmulti_inactive(rxf->rxmode_pending, in bna_rxf_allmulti_cfg_apply()
1112 rxf->rxmode_pending_bitmask); in bna_rxf_allmulti_cfg_apply()
1113 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI; in bna_rxf_allmulti_cfg_apply()
1125 if (is_allmulti_disable(rxf->rxmode_pending, in bna_rxf_allmulti_cfg_reset()
1126 rxf->rxmode_pending_bitmask)) { in bna_rxf_allmulti_cfg_reset()
1127 allmulti_inactive(rxf->rxmode_pending, in bna_rxf_allmulti_cfg_reset()
1128 rxf->rxmode_pending_bitmask); in bna_rxf_allmulti_cfg_reset()
1129 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI; in bna_rxf_allmulti_cfg_reset()
1136 /* Move allmulti mode config from active -> pending */ in bna_rxf_allmulti_cfg_reset()
1137 if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) { in bna_rxf_allmulti_cfg_reset()
1138 allmulti_enable(rxf->rxmode_pending, in bna_rxf_allmulti_cfg_reset()
1139 rxf->rxmode_pending_bitmask); in bna_rxf_allmulti_cfg_reset()
1140 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI; in bna_rxf_allmulti_cfg_reset()
1153 struct bna *bna = rxf->rx->bna; in bna_rxf_promisc_enable()
1156 if (is_promisc_enable(rxf->rxmode_pending, in bna_rxf_promisc_enable()
1157 rxf->rxmode_pending_bitmask) || in bna_rxf_promisc_enable()
1158 (rxf->rxmode_active & BNA_RXMODE_PROMISC)) { in bna_rxf_promisc_enable()
1160 } else if (is_promisc_disable(rxf->rxmode_pending, in bna_rxf_promisc_enable()
1161 rxf->rxmode_pending_bitmask)) { in bna_rxf_promisc_enable()
1163 promisc_inactive(rxf->rxmode_pending, in bna_rxf_promisc_enable()
1164 rxf->rxmode_pending_bitmask); in bna_rxf_promisc_enable()
1167 promisc_enable(rxf->rxmode_pending, in bna_rxf_promisc_enable()
1168 rxf->rxmode_pending_bitmask); in bna_rxf_promisc_enable()
1169 bna->promisc_rid = rxf->rx->rid; in bna_rxf_promisc_enable()
1179 struct bna *bna = rxf->rx->bna; in bna_rxf_promisc_disable()
1182 if (is_promisc_disable(rxf->rxmode_pending, in bna_rxf_promisc_disable()
1183 rxf->rxmode_pending_bitmask) || in bna_rxf_promisc_disable()
1184 (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) { in bna_rxf_promisc_disable()
1186 } else if (is_promisc_enable(rxf->rxmode_pending, in bna_rxf_promisc_disable()
1187 rxf->rxmode_pending_bitmask)) { in bna_rxf_promisc_disable()
1189 promisc_inactive(rxf->rxmode_pending, in bna_rxf_promisc_disable()
1190 rxf->rxmode_pending_bitmask); in bna_rxf_promisc_disable()
1191 bna->promisc_rid = BFI_INVALID_RID; in bna_rxf_promisc_disable()
1192 } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) { in bna_rxf_promisc_disable()
1194 promisc_disable(rxf->rxmode_pending, in bna_rxf_promisc_disable()
1195 rxf->rxmode_pending_bitmask); in bna_rxf_promisc_disable()
1207 if (is_allmulti_enable(rxf->rxmode_pending, in bna_rxf_allmulti_enable()
1208 rxf->rxmode_pending_bitmask) || in bna_rxf_allmulti_enable()
1209 (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) { in bna_rxf_allmulti_enable()
1211 } else if (is_allmulti_disable(rxf->rxmode_pending, in bna_rxf_allmulti_enable()
1212 rxf->rxmode_pending_bitmask)) { in bna_rxf_allmulti_enable()
1214 allmulti_inactive(rxf->rxmode_pending, in bna_rxf_allmulti_enable()
1215 rxf->rxmode_pending_bitmask); in bna_rxf_allmulti_enable()
1218 allmulti_enable(rxf->rxmode_pending, in bna_rxf_allmulti_enable()
1219 rxf->rxmode_pending_bitmask); in bna_rxf_allmulti_enable()
1231 if (is_allmulti_disable(rxf->rxmode_pending, in bna_rxf_allmulti_disable()
1232 rxf->rxmode_pending_bitmask) || in bna_rxf_allmulti_disable()
1233 (!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) { in bna_rxf_allmulti_disable()
1235 } else if (is_allmulti_enable(rxf->rxmode_pending, in bna_rxf_allmulti_disable()
1236 rxf->rxmode_pending_bitmask)) { in bna_rxf_allmulti_disable()
1238 allmulti_inactive(rxf->rxmode_pending, in bna_rxf_allmulti_disable()
1239 rxf->rxmode_pending_bitmask); in bna_rxf_allmulti_disable()
1240 } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) { in bna_rxf_allmulti_disable()
1242 allmulti_disable(rxf->rxmode_pending, in bna_rxf_allmulti_disable()
1243 rxf->rxmode_pending_bitmask); in bna_rxf_allmulti_disable()
1253 if (rxf->vlan_strip_pending) { in bna_rxf_vlan_strip_cfg_apply()
1254 rxf->vlan_strip_pending = false; in bna_rxf_vlan_strip_cfg_apply()
1264 #define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1265 (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1268 (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1272 if ((rx)->stop_cbfn) { \
1275 cbfn = (rx)->stop_cbfn; \
1276 cbarg = (rx)->stop_cbarg; \
1277 (rx)->stop_cbfn = NULL; \
1278 (rx)->stop_cbarg = NULL; \
1285 if ((rx)->rx_stall_cbfn) \
1286 (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx)); \
1292 *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr)); \
1293 (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb; \
1294 (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb; \
1295 (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb; \
1296 (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb; \
1297 (bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \
1298 (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\
1344 /* no-op */ in bna_rx_sm_stopped()
1370 rx->rx_cleanup_cbfn(rx->bna->bnad, rx); in bna_rx_sm_stop_wait()
1407 rx->rx_post_cbfn(rx->bna->bnad, rx); in bna_rx_sm_rxf_start_wait_entry()
1408 bna_rxf_start(&rx->rxf); in bna_rx_sm_rxf_start_wait_entry()
1422 bna_rxf_fail(&rx->rxf); in bna_rx_sm_rxf_stop_wait()
1424 rx->rx_cleanup_cbfn(rx->bna->bnad, rx); in bna_rx_sm_rxf_stop_wait()
1428 bna_rxf_stop(&rx->rxf); in bna_rx_sm_rxf_stop_wait()
1471 int is_regular = (rx->type == BNA_RX_T_REGULAR); in bna_rx_sm_started_entry()
1474 list_for_each_entry(rxp, &rx->rxp_q, qe) in bna_rx_sm_started_entry()
1475 bna_ib_start(rx->bna, &rxp->cq.ib, is_regular); in bna_rx_sm_started_entry()
1477 bna_ethport_cb_rx_started(&rx->bna->ethport); in bna_rx_sm_started_entry()
1486 bna_ethport_cb_rx_stopped(&rx->bna->ethport); in bna_rx_sm_started()
1487 bna_rxf_stop(&rx->rxf); in bna_rx_sm_started()
1492 bna_ethport_cb_rx_stopped(&rx->bna->ethport); in bna_rx_sm_started()
1493 bna_rxf_fail(&rx->rxf); in bna_rx_sm_started()
1495 rx->rx_cleanup_cbfn(rx->bna->bnad, rx); in bna_rx_sm_started()
1514 bna_rxf_fail(&rx->rxf); in bna_rx_sm_rxf_start_wait()
1516 rx->rx_cleanup_cbfn(rx->bna->bnad, rx); in bna_rx_sm_rxf_start_wait()
1540 /* No-op */ in bna_rx_sm_cleanup_wait()
1573 /* No-op */ in bna_rx_sm_failed()
1615 struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req; in bna_bfi_rx_enet_start()
1620 bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET, in bna_bfi_rx_enet_start()
1621 BFI_ENET_H2I_RX_CFG_SET_REQ, 0, rx->rid); in bna_bfi_rx_enet_start()
1622 cfg_req->mh.num_entries = htons( in bna_bfi_rx_enet_start()
1625 cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet); in bna_bfi_rx_enet_start()
1626 cfg_req->num_queue_sets = rx->num_paths; in bna_bfi_rx_enet_start()
1627 for (i = 0; i < rx->num_paths; i++) { in bna_bfi_rx_enet_start()
1629 : list_first_entry(&rx->rxp_q, struct bna_rxp, qe); in bna_bfi_rx_enet_start()
1631 switch (rxp->type) { in bna_bfi_rx_enet_start()
1634 /* Small RxQ */ in bna_bfi_rx_enet_start()
1635 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q, in bna_bfi_rx_enet_start()
1636 &q1->qpt); in bna_bfi_rx_enet_start()
1637 cfg_req->q_cfg[i].qs.rx_buffer_size = in bna_bfi_rx_enet_start()
1638 htons((u16)q1->buffer_size); in bna_bfi_rx_enet_start()
1642 /* Large/Single RxQ */ in bna_bfi_rx_enet_start()
1643 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q, in bna_bfi_rx_enet_start()
1644 &q0->qpt); in bna_bfi_rx_enet_start()
1645 if (q0->multi_buffer) in bna_bfi_rx_enet_start()
1646 /* multi-buffer is enabled by allocating in bna_bfi_rx_enet_start()
1648 * q0->buffer_size should be initialized to in bna_bfi_rx_enet_start()
1651 cfg_req->rx_cfg.multi_buffer = in bna_bfi_rx_enet_start()
1654 q0->buffer_size = in bna_bfi_rx_enet_start()
1655 bna_enet_mtu_get(&rx->bna->enet); in bna_bfi_rx_enet_start()
1656 cfg_req->q_cfg[i].ql.rx_buffer_size = in bna_bfi_rx_enet_start()
1657 htons((u16)q0->buffer_size); in bna_bfi_rx_enet_start()
1664 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q, in bna_bfi_rx_enet_start()
1665 &rxp->cq.qpt); in bna_bfi_rx_enet_start()
1667 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo = in bna_bfi_rx_enet_start()
1668 rxp->cq.ib.ib_seg_host_addr.lsb; in bna_bfi_rx_enet_start()
1669 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi = in bna_bfi_rx_enet_start()
1670 rxp->cq.ib.ib_seg_host_addr.msb; in bna_bfi_rx_enet_start()
1671 cfg_req->q_cfg[i].ib.intr.msix_index = in bna_bfi_rx_enet_start()
1672 htons((u16)rxp->cq.ib.intr_vector); in bna_bfi_rx_enet_start()
1675 cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_DISABLED; in bna_bfi_rx_enet_start()
1676 cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED; in bna_bfi_rx_enet_start()
1677 cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED; in bna_bfi_rx_enet_start()
1678 cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_DISABLED; in bna_bfi_rx_enet_start()
1679 cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX) in bna_bfi_rx_enet_start()
1682 cfg_req->ib_cfg.coalescing_timeout = in bna_bfi_rx_enet_start()
1683 htonl((u32)rxp->cq.ib.coalescing_timeo); in bna_bfi_rx_enet_start()
1684 cfg_req->ib_cfg.inter_pkt_timeout = in bna_bfi_rx_enet_start()
1685 htonl((u32)rxp->cq.ib.interpkt_timeo); in bna_bfi_rx_enet_start()
1686 cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count; in bna_bfi_rx_enet_start()
1688 switch (rxp->type) { in bna_bfi_rx_enet_start()
1690 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_LARGE_SMALL; in bna_bfi_rx_enet_start()
1694 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_HDS; in bna_bfi_rx_enet_start()
1695 cfg_req->rx_cfg.hds.type = rx->hds_cfg.hdr_type; in bna_bfi_rx_enet_start()
1696 cfg_req->rx_cfg.hds.force_offset = rx->hds_cfg.forced_offset; in bna_bfi_rx_enet_start()
1697 cfg_req->rx_cfg.hds.max_header_size = rx->hds_cfg.forced_offset; in bna_bfi_rx_enet_start()
1701 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_SINGLE; in bna_bfi_rx_enet_start()
1707 cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status; in bna_bfi_rx_enet_start()
1709 bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, in bna_bfi_rx_enet_start()
1710 sizeof(struct bfi_enet_rx_cfg_req), &cfg_req->mh); in bna_bfi_rx_enet_start()
1711 bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd); in bna_bfi_rx_enet_start()
1717 struct bfi_enet_req *req = &rx->bfi_enet_cmd.req; in bna_bfi_rx_enet_stop()
1719 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, in bna_bfi_rx_enet_stop()
1720 BFI_ENET_H2I_RX_CFG_CLR_REQ, 0, rx->rid); in bna_bfi_rx_enet_stop()
1721 req->mh.num_entries = htons( in bna_bfi_rx_enet_stop()
1723 bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req), in bna_bfi_rx_enet_stop()
1724 &req->mh); in bna_bfi_rx_enet_stop()
1725 bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd); in bna_bfi_rx_enet_stop()
1734 list_for_each_entry(rxp, &rx->rxp_q, qe) in bna_rx_enet_stop()
1735 bna_ib_stop(rx->bna, &rxp->cq.ib); in bna_rx_enet_stop()
1743 if ((rx_mod->rx_free_count == 0) || in bna_rx_res_check()
1744 (rx_mod->rxp_free_count == 0) || in bna_rx_res_check()
1745 (rx_mod->rxq_free_count == 0)) in bna_rx_res_check()
1748 if (rx_cfg->rxp_type == BNA_RXP_SINGLE) { in bna_rx_res_check()
1749 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) || in bna_rx_res_check()
1750 (rx_mod->rxq_free_count < rx_cfg->num_paths)) in bna_rx_res_check()
1753 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) || in bna_rx_res_check()
1754 (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths))) in bna_rx_res_check()
1764 struct bna_rxq *rxq = NULL; in bna_rxq_get() local
1766 rxq = list_first_entry(&rx_mod->rxq_free_q, struct bna_rxq, qe); in bna_rxq_get()
1767 list_del(&rxq->qe); in bna_rxq_get()
1768 rx_mod->rxq_free_count--; in bna_rxq_get()
1770 return rxq; in bna_rxq_get()
1774 bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq) in bna_rxq_put() argument
1776 list_add_tail(&rxq->qe, &rx_mod->rxq_free_q); in bna_rxq_put()
1777 rx_mod->rxq_free_count++; in bna_rxq_put()
1785 rxp = list_first_entry(&rx_mod->rxp_free_q, struct bna_rxp, qe); in bna_rxp_get()
1786 list_del(&rxp->qe); in bna_rxp_get()
1787 rx_mod->rxp_free_count--; in bna_rxp_get()
1795 list_add_tail(&rxp->qe, &rx_mod->rxp_free_q); in bna_rxp_put()
1796 rx_mod->rxp_free_count++; in bna_rxp_put()
1804 BUG_ON(list_empty(&rx_mod->rx_free_q)); in bna_rx_get()
1806 rx = list_first_entry(&rx_mod->rx_free_q, struct bna_rx, qe); in bna_rx_get()
1808 rx = list_last_entry(&rx_mod->rx_free_q, struct bna_rx, qe); in bna_rx_get()
1810 rx_mod->rx_free_count--; in bna_rx_get()
1811 list_move_tail(&rx->qe, &rx_mod->rx_active_q); in bna_rx_get()
1812 rx->type = type; in bna_rx_get()
1822 list_for_each_prev(qe, &rx_mod->rx_free_q) in bna_rx_put()
1823 if (((struct bna_rx *)qe)->rid < rx->rid) in bna_rx_put()
1826 list_add(&rx->qe, qe); in bna_rx_put()
1827 rx_mod->rx_free_count++; in bna_rx_put()
1834 switch (rxp->type) { in bna_rxp_add_rxqs()
1836 rxp->rxq.single.only = q0; in bna_rxp_add_rxqs()
1837 rxp->rxq.single.reserved = NULL; in bna_rxp_add_rxqs()
1840 rxp->rxq.slr.large = q0; in bna_rxp_add_rxqs()
1841 rxp->rxq.slr.small = q1; in bna_rxp_add_rxqs()
1844 rxp->rxq.hds.data = q0; in bna_rxp_add_rxqs()
1845 rxp->rxq.hds.hdr = q1; in bna_rxp_add_rxqs()
1853 bna_rxq_qpt_setup(struct bna_rxq *rxq, in bna_rxq_qpt_setup() argument
1866 rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; in bna_rxq_qpt_setup()
1867 rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; in bna_rxq_qpt_setup()
1868 rxq->qpt.kv_qpt_ptr = qpt_mem->kva; in bna_rxq_qpt_setup()
1869 rxq->qpt.page_count = page_count; in bna_rxq_qpt_setup()
1870 rxq->qpt.page_size = page_size; in bna_rxq_qpt_setup()
1872 rxq->rcb->sw_qpt = (void **) swqpt_mem->kva; in bna_rxq_qpt_setup()
1873 rxq->rcb->sw_q = page_mem->kva; in bna_rxq_qpt_setup()
1875 kva = page_mem->kva; in bna_rxq_qpt_setup()
1876 BNA_GET_DMA_ADDR(&page_mem->dma, dma); in bna_rxq_qpt_setup()
1878 for (i = 0; i < rxq->qpt.page_count; i++) { in bna_rxq_qpt_setup()
1879 rxq->rcb->sw_qpt[i] = kva; in bna_rxq_qpt_setup()
1883 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb = in bna_rxq_qpt_setup()
1885 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb = in bna_rxq_qpt_setup()
1904 rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; in bna_rxp_cqpt_setup()
1905 rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; in bna_rxp_cqpt_setup()
1906 rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva; in bna_rxp_cqpt_setup()
1907 rxp->cq.qpt.page_count = page_count; in bna_rxp_cqpt_setup()
1908 rxp->cq.qpt.page_size = page_size; in bna_rxp_cqpt_setup()
1910 rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva; in bna_rxp_cqpt_setup()
1911 rxp->cq.ccb->sw_q = page_mem->kva; in bna_rxp_cqpt_setup()
1913 kva = page_mem->kva; in bna_rxp_cqpt_setup()
1914 BNA_GET_DMA_ADDR(&page_mem->dma, dma); in bna_rxp_cqpt_setup()
1916 for (i = 0; i < rxp->cq.qpt.page_count; i++) { in bna_rxp_cqpt_setup()
1917 rxp->cq.ccb->sw_qpt[i] = kva; in bna_rxp_cqpt_setup()
1921 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb = in bna_rxp_cqpt_setup()
1923 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb = in bna_rxp_cqpt_setup()
1934 bfa_wc_down(&rx_mod->rx_stop_wc); in bna_rx_mod_cb_rx_stopped()
1942 if (rx_mod->stop_cbfn) in bna_rx_mod_cb_rx_stopped_all()
1943 rx_mod->stop_cbfn(&rx_mod->bna->enet); in bna_rx_mod_cb_rx_stopped_all()
1944 rx_mod->stop_cbfn = NULL; in bna_rx_mod_cb_rx_stopped_all()
1950 rx->rx_flags |= BNA_RX_F_ENET_STARTED; in bna_rx_start()
1951 if (rx->rx_flags & BNA_RX_F_ENABLED) in bna_rx_start()
1958 rx->rx_flags &= ~BNA_RX_F_ENET_STARTED; in bna_rx_stop()
1959 if (rx->fsm == bna_rx_sm_stopped) in bna_rx_stop()
1960 bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx); in bna_rx_stop()
1962 rx->stop_cbfn = bna_rx_mod_cb_rx_stopped; in bna_rx_stop()
1963 rx->stop_cbarg = &rx->bna->rx_mod; in bna_rx_stop()
1972 rx->rx_flags &= ~BNA_RX_F_ENET_STARTED; in bna_rx_fail()
1981 rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED; in bna_rx_mod_start()
1983 rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK; in bna_rx_mod_start()
1985 list_for_each_entry(rx, &rx_mod->rx_active_q, qe) in bna_rx_mod_start()
1986 if (rx->type == type) in bna_rx_mod_start()
1995 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED; in bna_rx_mod_stop()
1996 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK; in bna_rx_mod_stop()
1998 rx_mod->stop_cbfn = bna_enet_cb_rx_stopped; in bna_rx_mod_stop()
2000 bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod); in bna_rx_mod_stop()
2002 list_for_each_entry(rx, &rx_mod->rx_active_q, qe) in bna_rx_mod_stop()
2003 if (rx->type == type) { in bna_rx_mod_stop()
2004 bfa_wc_up(&rx_mod->rx_stop_wc); in bna_rx_mod_stop()
2008 bfa_wc_wait(&rx_mod->rx_stop_wc); in bna_rx_mod_stop()
2016 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED; in bna_rx_mod_fail()
2017 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK; in bna_rx_mod_fail()
2019 list_for_each_entry(rx, &rx_mod->rx_active_q, qe) in bna_rx_mod_fail()
2031 rx_mod->bna = bna; in bna_rx_mod_init()
2032 rx_mod->flags = 0; in bna_rx_mod_init()
2034 rx_mod->rx = (struct bna_rx *) in bna_rx_mod_init()
2036 rx_mod->rxp = (struct bna_rxp *) in bna_rx_mod_init()
2038 rx_mod->rxq = (struct bna_rxq *) in bna_rx_mod_init()
2042 INIT_LIST_HEAD(&rx_mod->rx_free_q); in bna_rx_mod_init()
2043 rx_mod->rx_free_count = 0; in bna_rx_mod_init()
2044 INIT_LIST_HEAD(&rx_mod->rxq_free_q); in bna_rx_mod_init()
2045 rx_mod->rxq_free_count = 0; in bna_rx_mod_init()
2046 INIT_LIST_HEAD(&rx_mod->rxp_free_q); in bna_rx_mod_init()
2047 rx_mod->rxp_free_count = 0; in bna_rx_mod_init()
2048 INIT_LIST_HEAD(&rx_mod->rx_active_q); in bna_rx_mod_init()
2051 for (index = 0; index < bna->ioceth.attr.num_rxp; index++) { in bna_rx_mod_init()
2052 rx_ptr = &rx_mod->rx[index]; in bna_rx_mod_init()
2054 INIT_LIST_HEAD(&rx_ptr->rxp_q); in bna_rx_mod_init()
2055 rx_ptr->bna = NULL; in bna_rx_mod_init()
2056 rx_ptr->rid = index; in bna_rx_mod_init()
2057 rx_ptr->stop_cbfn = NULL; in bna_rx_mod_init()
2058 rx_ptr->stop_cbarg = NULL; in bna_rx_mod_init()
2060 list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q); in bna_rx_mod_init()
2061 rx_mod->rx_free_count++; in bna_rx_mod_init()
2064 /* build RX-path queue */ in bna_rx_mod_init()
2065 for (index = 0; index < bna->ioceth.attr.num_rxp; index++) { in bna_rx_mod_init()
2066 rxp_ptr = &rx_mod->rxp[index]; in bna_rx_mod_init()
2067 list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q); in bna_rx_mod_init()
2068 rx_mod->rxp_free_count++; in bna_rx_mod_init()
2071 /* build RXQ queue */ in bna_rx_mod_init()
2072 for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) { in bna_rx_mod_init()
2073 rxq_ptr = &rx_mod->rxq[index]; in bna_rx_mod_init()
2074 list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q); in bna_rx_mod_init()
2075 rx_mod->rxq_free_count++; in bna_rx_mod_init()
2082 rx_mod->bna = NULL; in bna_rx_mod_uninit()
2088 struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp; in bna_bfi_rx_enet_start_rsp()
2093 bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp, in bna_bfi_rx_enet_start_rsp()
2096 rx->hw_id = cfg_rsp->hw_id; in bna_bfi_rx_enet_start_rsp()
2098 for (i = 0, rxp = list_first_entry(&rx->rxp_q, struct bna_rxp, qe); in bna_bfi_rx_enet_start_rsp()
2099 i < rx->num_paths; i++, rxp = list_next_entry(rxp, qe)) { in bna_bfi_rx_enet_start_rsp()
2103 rxp->cq.ccb->i_dbell->doorbell_addr = in bna_bfi_rx_enet_start_rsp()
2104 rx->bna->pcidev.pci_bar_kva in bna_bfi_rx_enet_start_rsp()
2105 + ntohl(cfg_rsp->q_handles[i].i_dbell); in bna_bfi_rx_enet_start_rsp()
2106 rxp->hw_id = cfg_rsp->q_handles[i].hw_cqid; in bna_bfi_rx_enet_start_rsp()
2107 q0->rcb->q_dbell = in bna_bfi_rx_enet_start_rsp()
2108 rx->bna->pcidev.pci_bar_kva in bna_bfi_rx_enet_start_rsp()
2109 + ntohl(cfg_rsp->q_handles[i].ql_dbell); in bna_bfi_rx_enet_start_rsp()
2110 q0->hw_id = cfg_rsp->q_handles[i].hw_lqid; in bna_bfi_rx_enet_start_rsp()
2112 q1->rcb->q_dbell = in bna_bfi_rx_enet_start_rsp()
2113 rx->bna->pcidev.pci_bar_kva in bna_bfi_rx_enet_start_rsp()
2114 + ntohl(cfg_rsp->q_handles[i].qs_dbell); in bna_bfi_rx_enet_start_rsp()
2115 q1->hw_id = cfg_rsp->q_handles[i].hw_sqid; in bna_bfi_rx_enet_start_rsp()
2119 (*rxp->cq.ccb->hw_producer_index) = 0; in bna_bfi_rx_enet_start_rsp()
2120 rxp->cq.ccb->producer_index = 0; in bna_bfi_rx_enet_start_rsp()
2121 q0->rcb->producer_index = q0->rcb->consumer_index = 0; in bna_bfi_rx_enet_start_rsp()
2123 q1->rcb->producer_index = q1->rcb->consumer_index = 0; in bna_bfi_rx_enet_start_rsp()
2145 dq_depth = q_cfg->q0_depth; in bna_rx_res_req()
2146 hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth); in bna_rx_res_req()
2158 if (BNA_RXP_SINGLE != q_cfg->rxp_type) { in bna_rx_res_req()
2168 mem_info->mem_type = BNA_MEM_T_KVA; in bna_rx_res_req()
2169 mem_info->len = sizeof(struct bna_ccb); in bna_rx_res_req()
2170 mem_info->num = q_cfg->num_paths; in bna_rx_res_req()
2174 mem_info->mem_type = BNA_MEM_T_KVA; in bna_rx_res_req()
2175 mem_info->len = sizeof(struct bna_rcb); in bna_rx_res_req()
2176 mem_info->num = BNA_GET_RXQS(q_cfg); in bna_rx_res_req()
2180 mem_info->mem_type = BNA_MEM_T_DMA; in bna_rx_res_req()
2181 mem_info->len = cpage_count * sizeof(struct bna_dma_addr); in bna_rx_res_req()
2182 mem_info->num = q_cfg->num_paths; in bna_rx_res_req()
2186 mem_info->mem_type = BNA_MEM_T_KVA; in bna_rx_res_req()
2187 mem_info->len = cpage_count * sizeof(void *); in bna_rx_res_req()
2188 mem_info->num = q_cfg->num_paths; in bna_rx_res_req()
2192 mem_info->mem_type = BNA_MEM_T_DMA; in bna_rx_res_req()
2193 mem_info->len = PAGE_SIZE * cpage_count; in bna_rx_res_req()
2194 mem_info->num = q_cfg->num_paths; in bna_rx_res_req()
2198 mem_info->mem_type = BNA_MEM_T_DMA; in bna_rx_res_req()
2199 mem_info->len = dpage_count * sizeof(struct bna_dma_addr); in bna_rx_res_req()
2200 mem_info->num = q_cfg->num_paths; in bna_rx_res_req()
2204 mem_info->mem_type = BNA_MEM_T_KVA; in bna_rx_res_req()
2205 mem_info->len = dpage_count * sizeof(void *); in bna_rx_res_req()
2206 mem_info->num = q_cfg->num_paths; in bna_rx_res_req()
2210 mem_info->mem_type = BNA_MEM_T_DMA; in bna_rx_res_req()
2211 mem_info->len = PAGE_SIZE * dpage_count; in bna_rx_res_req()
2212 mem_info->num = q_cfg->num_paths; in bna_rx_res_req()
2216 mem_info->mem_type = BNA_MEM_T_DMA; in bna_rx_res_req()
2217 mem_info->len = hpage_count * sizeof(struct bna_dma_addr); in bna_rx_res_req()
2218 mem_info->num = (hpage_count ? q_cfg->num_paths : 0); in bna_rx_res_req()
2222 mem_info->mem_type = BNA_MEM_T_KVA; in bna_rx_res_req()
2223 mem_info->len = hpage_count * sizeof(void *); in bna_rx_res_req()
2224 mem_info->num = (hpage_count ? q_cfg->num_paths : 0); in bna_rx_res_req()
2228 mem_info->mem_type = BNA_MEM_T_DMA; in bna_rx_res_req()
2229 mem_info->len = PAGE_SIZE * hpage_count; in bna_rx_res_req()
2230 mem_info->num = (hpage_count ? q_cfg->num_paths : 0); in bna_rx_res_req()
2234 mem_info->mem_type = BNA_MEM_T_DMA; in bna_rx_res_req()
2235 mem_info->len = BFI_IBIDX_SIZE; in bna_rx_res_req()
2236 mem_info->num = q_cfg->num_paths; in bna_rx_res_req()
2240 mem_info->mem_type = BNA_MEM_T_KVA; in bna_rx_res_req()
2241 mem_info->len = BFI_ENET_RSS_RIT_MAX; in bna_rx_res_req()
2242 mem_info->num = 1; in bna_rx_res_req()
2246 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths; in bna_rx_res_req()
2256 struct bna_rx_mod *rx_mod = &bna->rx_mod; in bna_rx_create()
2307 rx = bna_rx_get(rx_mod, rx_cfg->rx_type); in bna_rx_create()
2308 rx->bna = bna; in bna_rx_create()
2309 rx->rx_flags = 0; in bna_rx_create()
2310 INIT_LIST_HEAD(&rx->rxp_q); in bna_rx_create()
2311 rx->stop_cbfn = NULL; in bna_rx_create()
2312 rx->stop_cbarg = NULL; in bna_rx_create()
2313 rx->priv = priv; in bna_rx_create()
2315 rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn; in bna_rx_create()
2316 rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn; in bna_rx_create()
2317 rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn; in bna_rx_create()
2318 rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn; in bna_rx_create()
2319 rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn; in bna_rx_create()
2321 rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn; in bna_rx_create()
2322 rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn; in bna_rx_create()
2324 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_STARTED) { in bna_rx_create()
2325 switch (rx->type) { in bna_rx_create()
2327 if (!(rx->bna->rx_mod.flags & in bna_rx_create()
2329 rx->rx_flags |= BNA_RX_F_ENET_STARTED; in bna_rx_create()
2332 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_LOOPBACK) in bna_rx_create()
2333 rx->rx_flags |= BNA_RX_F_ENET_STARTED; in bna_rx_create()
2338 rx->num_paths = rx_cfg->num_paths; in bna_rx_create()
2340 i < rx->num_paths; i++) { in bna_rx_create()
2342 list_add_tail(&rxp->qe, &rx->rxp_q); in bna_rx_create()
2343 rxp->type = rx_cfg->rxp_type; in bna_rx_create()
2344 rxp->rx = rx; in bna_rx_create()
2345 rxp->cq.rx = rx; in bna_rx_create()
2348 if (BNA_RXP_SINGLE == rx_cfg->rxp_type) in bna_rx_create()
2353 if (1 == intr_info->num) in bna_rx_create()
2354 rxp->vector = intr_info->idl[0].vector; in bna_rx_create()
2356 rxp->vector = intr_info->idl[i].vector; in bna_rx_create()
2360 rxp->cq.ib.ib_seg_host_addr.lsb = in bna_rx_create()
2362 rxp->cq.ib.ib_seg_host_addr.msb = in bna_rx_create()
2364 rxp->cq.ib.ib_seg_host_addr_kva = in bna_rx_create()
2366 rxp->cq.ib.intr_type = intr_info->intr_type; in bna_rx_create()
2367 if (intr_info->intr_type == BNA_INTR_T_MSIX) in bna_rx_create()
2368 rxp->cq.ib.intr_vector = rxp->vector; in bna_rx_create()
2370 rxp->cq.ib.intr_vector = BIT(rxp->vector); in bna_rx_create()
2371 rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo; in bna_rx_create()
2372 rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT; in bna_rx_create()
2373 rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO; in bna_rx_create()
2379 q0->rx = rx; in bna_rx_create()
2380 q0->rxp = rxp; in bna_rx_create()
2382 q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva; in bna_rx_create()
2383 q0->rcb->unmap_q = (void *)dqunmap_mem[dq_idx].kva; in bna_rx_create()
2385 q0->rcb->q_depth = rx_cfg->q0_depth; in bna_rx_create()
2386 q0->q_depth = rx_cfg->q0_depth; in bna_rx_create()
2387 q0->multi_buffer = rx_cfg->q0_multi_buf; in bna_rx_create()
2388 q0->buffer_size = rx_cfg->q0_buf_size; in bna_rx_create()
2389 q0->num_vecs = rx_cfg->q0_num_vecs; in bna_rx_create()
2390 q0->rcb->rxq = q0; in bna_rx_create()
2391 q0->rcb->bnad = bna->bnad; in bna_rx_create()
2392 q0->rcb->id = 0; in bna_rx_create()
2393 q0->rx_packets = q0->rx_bytes = 0; in bna_rx_create()
2394 q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0; in bna_rx_create()
2395 q0->rxbuf_map_failed = 0; in bna_rx_create()
2400 if (rx->rcb_setup_cbfn) in bna_rx_create()
2401 rx->rcb_setup_cbfn(bnad, q0->rcb); in bna_rx_create()
2406 q1->rx = rx; in bna_rx_create()
2407 q1->rxp = rxp; in bna_rx_create()
2409 q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva; in bna_rx_create()
2410 q1->rcb->unmap_q = (void *)hqunmap_mem[hq_idx].kva; in bna_rx_create()
2412 q1->rcb->q_depth = rx_cfg->q1_depth; in bna_rx_create()
2413 q1->q_depth = rx_cfg->q1_depth; in bna_rx_create()
2414 q1->multi_buffer = BNA_STATUS_T_DISABLED; in bna_rx_create()
2415 q1->num_vecs = 1; in bna_rx_create()
2416 q1->rcb->rxq = q1; in bna_rx_create()
2417 q1->rcb->bnad = bna->bnad; in bna_rx_create()
2418 q1->rcb->id = 1; in bna_rx_create()
2419 q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ? in bna_rx_create()
2420 rx_cfg->hds_config.forced_offset in bna_rx_create()
2421 : rx_cfg->q1_buf_size; in bna_rx_create()
2422 q1->rx_packets = q1->rx_bytes = 0; in bna_rx_create()
2423 q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0; in bna_rx_create()
2424 q1->rxbuf_map_failed = 0; in bna_rx_create()
2430 if (rx->rcb_setup_cbfn) in bna_rx_create()
2431 rx->rcb_setup_cbfn(bnad, q1->rcb); in bna_rx_create()
2436 rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva; in bna_rx_create()
2437 cq_depth = rx_cfg->q0_depth + in bna_rx_create()
2438 ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ? in bna_rx_create()
2439 0 : rx_cfg->q1_depth); in bna_rx_create()
2440 /* if multi-buffer is enabled sum of q0_depth in bna_rx_create()
2444 rxp->cq.ccb->q_depth = cq_depth; in bna_rx_create()
2445 rxp->cq.ccb->cq = &rxp->cq; in bna_rx_create()
2446 rxp->cq.ccb->rcb[0] = q0->rcb; in bna_rx_create()
2447 q0->rcb->ccb = rxp->cq.ccb; in bna_rx_create()
2449 rxp->cq.ccb->rcb[1] = q1->rcb; in bna_rx_create()
2450 q1->rcb->ccb = rxp->cq.ccb; in bna_rx_create()
2452 rxp->cq.ccb->hw_producer_index = in bna_rx_create()
2453 (u32 *)rxp->cq.ib.ib_seg_host_addr_kva; in bna_rx_create()
2454 rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell; in bna_rx_create()
2455 rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type; in bna_rx_create()
2456 rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector; in bna_rx_create()
2457 rxp->cq.ccb->rx_coalescing_timeo = in bna_rx_create()
2458 rxp->cq.ib.coalescing_timeo; in bna_rx_create()
2459 rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0; in bna_rx_create()
2460 rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0; in bna_rx_create()
2461 rxp->cq.ccb->bnad = bna->bnad; in bna_rx_create()
2462 rxp->cq.ccb->id = i; in bna_rx_create()
2467 if (rx->ccb_setup_cbfn) in bna_rx_create()
2468 rx->ccb_setup_cbfn(bnad, rxp->cq.ccb); in bna_rx_create()
2471 rx->hds_cfg = rx_cfg->hds_config; in bna_rx_create()
2473 bna_rxf_init(&rx->rxf, rx, rx_cfg, res_info); in bna_rx_create()
2477 rx_mod->rid_mask |= BIT(rx->rid); in bna_rx_create()
2485 struct bna_rx_mod *rx_mod = &rx->bna->rx_mod; in bna_rx_destroy()
2491 bna_rxf_uninit(&rx->rxf); in bna_rx_destroy()
2493 while (!list_empty(&rx->rxp_q)) { in bna_rx_destroy()
2494 rxp = list_first_entry(&rx->rxp_q, struct bna_rxp, qe); in bna_rx_destroy()
2495 list_del(&rxp->qe); in bna_rx_destroy()
2497 if (rx->rcb_destroy_cbfn) in bna_rx_destroy()
2498 rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb); in bna_rx_destroy()
2499 q0->rcb = NULL; in bna_rx_destroy()
2500 q0->rxp = NULL; in bna_rx_destroy()
2501 q0->rx = NULL; in bna_rx_destroy()
2505 if (rx->rcb_destroy_cbfn) in bna_rx_destroy()
2506 rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb); in bna_rx_destroy()
2507 q1->rcb = NULL; in bna_rx_destroy()
2508 q1->rxp = NULL; in bna_rx_destroy()
2509 q1->rx = NULL; in bna_rx_destroy()
2512 rxp->rxq.slr.large = NULL; in bna_rx_destroy()
2513 rxp->rxq.slr.small = NULL; in bna_rx_destroy()
2515 if (rx->ccb_destroy_cbfn) in bna_rx_destroy()
2516 rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb); in bna_rx_destroy()
2517 rxp->cq.ccb = NULL; in bna_rx_destroy()
2518 rxp->rx = NULL; in bna_rx_destroy()
2522 list_for_each(qe, &rx_mod->rx_active_q) in bna_rx_destroy()
2523 if (qe == &rx->qe) { in bna_rx_destroy()
2524 list_del(&rx->qe); in bna_rx_destroy()
2528 rx_mod->rid_mask &= ~BIT(rx->rid); in bna_rx_destroy()
2530 rx->bna = NULL; in bna_rx_destroy()
2531 rx->priv = NULL; in bna_rx_destroy()
2538 if (rx->fsm != bna_rx_sm_stopped) in bna_rx_enable()
2541 rx->rx_flags |= BNA_RX_F_ENABLED; in bna_rx_enable()
2542 if (rx->rx_flags & BNA_RX_F_ENET_STARTED) in bna_rx_enable()
2552 (*cbfn)(rx->bna->bnad, rx); in bna_rx_disable()
2554 rx->stop_cbfn = cbfn; in bna_rx_disable()
2555 rx->stop_cbarg = rx->bna->bnad; in bna_rx_disable()
2557 rx->rx_flags &= ~BNA_RX_F_ENABLED; in bna_rx_disable()
2572 struct bna_rxf *rxf = &rx->rxf; in bna_rx_vlan_strip_enable()
2574 if (rxf->vlan_strip_status == BNA_STATUS_T_DISABLED) { in bna_rx_vlan_strip_enable()
2575 rxf->vlan_strip_status = BNA_STATUS_T_ENABLED; in bna_rx_vlan_strip_enable()
2576 rxf->vlan_strip_pending = true; in bna_rx_vlan_strip_enable()
2584 struct bna_rxf *rxf = &rx->rxf; in bna_rx_vlan_strip_disable()
2586 if (rxf->vlan_strip_status != BNA_STATUS_T_DISABLED) { in bna_rx_vlan_strip_disable()
2587 rxf->vlan_strip_status = BNA_STATUS_T_DISABLED; in bna_rx_vlan_strip_disable()
2588 rxf->vlan_strip_pending = true; in bna_rx_vlan_strip_disable()
2597 struct bna_rxf *rxf = &rx->rxf; in bna_rx_mode_set()
2604 if ((rx->bna->promisc_rid != BFI_INVALID_RID) && in bna_rx_mode_set()
2605 (rx->bna->promisc_rid != rxf->rx->rid)) in bna_rx_mode_set()
2609 if (rx->bna->default_mode_rid != BFI_INVALID_RID) in bna_rx_mode_set()
2619 if ((rx->bna->default_mode_rid != BFI_INVALID_RID) && in bna_rx_mode_set()
2620 (rx->bna->default_mode_rid != rxf->rx->rid)) { in bna_rx_mode_set()
2625 if (rx->bna->promisc_rid != BFI_INVALID_RID) in bna_rx_mode_set()
2650 rxf->cam_fltr_cbfn = NULL; in bna_rx_mode_set()
2651 rxf->cam_fltr_cbarg = rx->bna->bnad; in bna_rx_mode_set()
2664 struct bna_rxf *rxf = &rx->rxf; in bna_rx_vlanfilter_enable()
2666 if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) { in bna_rx_vlanfilter_enable()
2667 rxf->vlan_filter_status = BNA_STATUS_T_ENABLED; in bna_rx_vlanfilter_enable()
2668 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; in bna_rx_vlanfilter_enable()
2678 list_for_each_entry(rxp, &rx->rxp_q, qe) { in bna_rx_coalescing_timeo_set()
2679 rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo; in bna_rx_coalescing_timeo_set()
2680 bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo); in bna_rx_coalescing_timeo_set()
2691 bna->rx_mod.dim_vector[i][j] = vector[i][j]; in bna_rx_dim_reconfig()
2697 struct bna *bna = ccb->cq->rx->bna; in bna_rx_dim_update()
2702 if ((ccb->pkt_rate.small_pkt_cnt == 0) && in bna_rx_dim_update()
2703 (ccb->pkt_rate.large_pkt_cnt == 0)) in bna_rx_dim_update()
2708 small_rt = ccb->pkt_rate.small_pkt_cnt; in bna_rx_dim_update()
2709 large_rt = ccb->pkt_rate.large_pkt_cnt; in bna_rx_dim_update()
2735 ccb->pkt_rate.small_pkt_cnt = 0; in bna_rx_dim_update()
2736 ccb->pkt_rate.large_pkt_cnt = 0; in bna_rx_dim_update()
2738 coalescing_timeo = bna->rx_mod.dim_vector[load][bias]; in bna_rx_dim_update()
2739 ccb->rx_coalescing_timeo = coalescing_timeo; in bna_rx_dim_update()
2742 bna_ib_coalescing_timeo_set(&ccb->cq->ib, coalescing_timeo); in bna_rx_dim_update()
2760 if ((tx)->stop_cbfn) { \
2763 cbfn = (tx)->stop_cbfn; \
2764 cbarg = (tx)->stop_cbarg; \
2765 (tx)->stop_cbfn = NULL; \
2766 (tx)->stop_cbarg = NULL; \
2818 /* No-op */ in bna_tx_sm_stopped()
2822 /* No-op */ in bna_tx_sm_stopped()
2841 tx->flags &= ~BNA_TX_F_BW_UPDATED; in bna_tx_sm_start_wait()
2846 tx->flags &= ~BNA_TX_F_BW_UPDATED; in bna_tx_sm_start_wait()
2851 if (tx->flags & BNA_TX_F_BW_UPDATED) { in bna_tx_sm_start_wait()
2852 tx->flags &= ~BNA_TX_F_BW_UPDATED; in bna_tx_sm_start_wait()
2859 tx->flags |= BNA_TX_F_BW_UPDATED; in bna_tx_sm_start_wait()
2871 int is_regular = (tx->type == BNA_TX_T_REGULAR); in bna_tx_sm_started_entry()
2873 list_for_each_entry(txq, &tx->txq_q, qe) { in bna_tx_sm_started_entry()
2874 txq->tcb->priority = txq->priority; in bna_tx_sm_started_entry()
2876 bna_ib_start(tx->bna, &txq->ib, is_regular); in bna_tx_sm_started_entry()
2878 tx->tx_resume_cbfn(tx->bna->bnad, tx); in bna_tx_sm_started_entry()
2887 tx->tx_stall_cbfn(tx->bna->bnad, tx); in bna_tx_sm_started()
2893 tx->tx_stall_cbfn(tx->bna->bnad, tx); in bna_tx_sm_started()
2894 tx->tx_cleanup_cbfn(tx->bna->bnad, tx); in bna_tx_sm_started()
2918 tx->tx_cleanup_cbfn(tx->bna->bnad, tx); in bna_tx_sm_stop_wait()
2923 * We are here due to start_wait -> stop_wait transition on in bna_tx_sm_stop_wait()
2930 /* No-op */ in bna_tx_sm_stop_wait()
2949 /* No-op */ in bna_tx_sm_cleanup_wait()
2964 tx->tx_stall_cbfn(tx->bna->bnad, tx); in bna_tx_sm_prio_stop_wait_entry()
2978 tx->tx_cleanup_cbfn(tx->bna->bnad, tx); in bna_tx_sm_prio_stop_wait()
2986 /* No-op */ in bna_tx_sm_prio_stop_wait()
2997 tx->tx_cleanup_cbfn(tx->bna->bnad, tx); in bna_tx_sm_prio_cleanup_wait_entry()
3013 /* No-op */ in bna_tx_sm_prio_cleanup_wait()
3043 /* No-op */ in bna_tx_sm_failed()
3077 /* No-op */ in bna_tx_sm_quiesce_wait()
3088 struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req; in bna_bfi_tx_enet_start()
3092 bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET, in bna_bfi_tx_enet_start()
3093 BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid); in bna_bfi_tx_enet_start()
3094 cfg_req->mh.num_entries = htons( in bna_bfi_tx_enet_start()
3097 cfg_req->num_queues = tx->num_txq; in bna_bfi_tx_enet_start()
3098 for (i = 0; i < tx->num_txq; i++) { in bna_bfi_tx_enet_start()
3100 : list_first_entry(&tx->txq_q, struct bna_txq, qe); in bna_bfi_tx_enet_start()
3101 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt); in bna_bfi_tx_enet_start()
3102 cfg_req->q_cfg[i].q.priority = txq->priority; in bna_bfi_tx_enet_start()
3104 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo = in bna_bfi_tx_enet_start()
3105 txq->ib.ib_seg_host_addr.lsb; in bna_bfi_tx_enet_start()
3106 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi = in bna_bfi_tx_enet_start()
3107 txq->ib.ib_seg_host_addr.msb; in bna_bfi_tx_enet_start()
3108 cfg_req->q_cfg[i].ib.intr.msix_index = in bna_bfi_tx_enet_start()
3109 htons((u16)txq->ib.intr_vector); in bna_bfi_tx_enet_start()
3112 cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_ENABLED; in bna_bfi_tx_enet_start()
3113 cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED; in bna_bfi_tx_enet_start()
3114 cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED; in bna_bfi_tx_enet_start()
3115 cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_ENABLED; in bna_bfi_tx_enet_start()
3116 cfg_req->ib_cfg.msix = (txq->ib.intr_type == BNA_INTR_T_MSIX) in bna_bfi_tx_enet_start()
3118 cfg_req->ib_cfg.coalescing_timeout = in bna_bfi_tx_enet_start()
3119 htonl((u32)txq->ib.coalescing_timeo); in bna_bfi_tx_enet_start()
3120 cfg_req->ib_cfg.inter_pkt_timeout = in bna_bfi_tx_enet_start()
3121 htonl((u32)txq->ib.interpkt_timeo); in bna_bfi_tx_enet_start()
3122 cfg_req->ib_cfg.inter_pkt_count = (u8)txq->ib.interpkt_count; in bna_bfi_tx_enet_start()
3124 cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI; in bna_bfi_tx_enet_start()
3125 cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id); in bna_bfi_tx_enet_start()
3126 cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_ENABLED; in bna_bfi_tx_enet_start()
3127 cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED; in bna_bfi_tx_enet_start()
3129 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, in bna_bfi_tx_enet_start()
3130 sizeof(struct bfi_enet_tx_cfg_req), &cfg_req->mh); in bna_bfi_tx_enet_start()
3131 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd); in bna_bfi_tx_enet_start()
3137 struct bfi_enet_req *req = &tx->bfi_enet_cmd.req; in bna_bfi_tx_enet_stop()
3139 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, in bna_bfi_tx_enet_stop()
3140 BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid); in bna_bfi_tx_enet_stop()
3141 req->mh.num_entries = htons( in bna_bfi_tx_enet_stop()
3143 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req), in bna_bfi_tx_enet_stop()
3144 &req->mh); in bna_bfi_tx_enet_stop()
3145 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd); in bna_bfi_tx_enet_stop()
3154 list_for_each_entry(txq, &tx->txq_q, qe) in bna_tx_enet_stop()
3155 bna_ib_stop(tx->bna, &txq->ib); in bna_tx_enet_stop()
3171 txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; in bna_txq_qpt_setup()
3172 txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; in bna_txq_qpt_setup()
3173 txq->qpt.kv_qpt_ptr = qpt_mem->kva; in bna_txq_qpt_setup()
3174 txq->qpt.page_count = page_count; in bna_txq_qpt_setup()
3175 txq->qpt.page_size = page_size; in bna_txq_qpt_setup()
3177 txq->tcb->sw_qpt = (void **) swqpt_mem->kva; in bna_txq_qpt_setup()
3178 txq->tcb->sw_q = page_mem->kva; in bna_txq_qpt_setup()
3180 kva = page_mem->kva; in bna_txq_qpt_setup()
3181 BNA_GET_DMA_ADDR(&page_mem->dma, dma); in bna_txq_qpt_setup()
3184 txq->tcb->sw_qpt[i] = kva; in bna_txq_qpt_setup()
3188 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb = in bna_txq_qpt_setup()
3190 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb = in bna_txq_qpt_setup()
3201 if (list_empty(&tx_mod->tx_free_q)) in bna_tx_get()
3204 tx = list_first_entry(&tx_mod->tx_free_q, struct bna_tx, qe); in bna_tx_get()
3206 tx = list_last_entry(&tx_mod->tx_free_q, struct bna_tx, qe); in bna_tx_get()
3207 list_del(&tx->qe); in bna_tx_get()
3208 tx->type = type; in bna_tx_get()
3216 struct bna_tx_mod *tx_mod = &tx->bna->tx_mod; in bna_tx_free()
3220 while (!list_empty(&tx->txq_q)) { in bna_tx_free()
3221 txq = list_first_entry(&tx->txq_q, struct bna_txq, qe); in bna_tx_free()
3222 txq->tcb = NULL; in bna_tx_free()
3223 txq->tx = NULL; in bna_tx_free()
3224 list_move_tail(&txq->qe, &tx_mod->txq_free_q); in bna_tx_free()
3227 list_for_each(qe, &tx_mod->tx_active_q) { in bna_tx_free()
3228 if (qe == &tx->qe) { in bna_tx_free()
3229 list_del(&tx->qe); in bna_tx_free()
3234 tx->bna = NULL; in bna_tx_free()
3235 tx->priv = NULL; in bna_tx_free()
3237 list_for_each_prev(qe, &tx_mod->tx_free_q) in bna_tx_free()
3238 if (((struct bna_tx *)qe)->rid < tx->rid) in bna_tx_free()
3241 list_add(&tx->qe, qe); in bna_tx_free()
3247 tx->flags |= BNA_TX_F_ENET_STARTED; in bna_tx_start()
3248 if (tx->flags & BNA_TX_F_ENABLED) in bna_tx_start()
3255 tx->stop_cbfn = bna_tx_mod_cb_tx_stopped; in bna_tx_stop()
3256 tx->stop_cbarg = &tx->bna->tx_mod; in bna_tx_stop()
3258 tx->flags &= ~BNA_TX_F_ENET_STARTED; in bna_tx_stop()
3265 tx->flags &= ~BNA_TX_F_ENET_STARTED; in bna_tx_fail()
3272 struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp; in bna_bfi_tx_enet_start_rsp()
3276 bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp, in bna_bfi_tx_enet_start_rsp()
3279 tx->hw_id = cfg_rsp->hw_id; in bna_bfi_tx_enet_start_rsp()
3281 for (i = 0, txq = list_first_entry(&tx->txq_q, struct bna_txq, qe); in bna_bfi_tx_enet_start_rsp()
3282 i < tx->num_txq; i++, txq = list_next_entry(txq, qe)) { in bna_bfi_tx_enet_start_rsp()
3284 txq->tcb->i_dbell->doorbell_addr = in bna_bfi_tx_enet_start_rsp()
3285 tx->bna->pcidev.pci_bar_kva in bna_bfi_tx_enet_start_rsp()
3286 + ntohl(cfg_rsp->q_handles[i].i_dbell); in bna_bfi_tx_enet_start_rsp()
3287 txq->tcb->q_dbell = in bna_bfi_tx_enet_start_rsp()
3288 tx->bna->pcidev.pci_bar_kva in bna_bfi_tx_enet_start_rsp()
3289 + ntohl(cfg_rsp->q_handles[i].q_dbell); in bna_bfi_tx_enet_start_rsp()
3290 txq->hw_id = cfg_rsp->q_handles[i].hw_qid; in bna_bfi_tx_enet_start_rsp()
3293 (*txq->tcb->hw_consumer_index) = 0; in bna_bfi_tx_enet_start_rsp()
3294 txq->tcb->producer_index = txq->tcb->consumer_index = 0; in bna_bfi_tx_enet_start_rsp()
3311 list_for_each_entry(tx, &tx_mod->tx_active_q, qe) in bna_bfi_bw_update_aen()
3324 mem_info->mem_type = BNA_MEM_T_KVA; in bna_tx_res_req()
3325 mem_info->len = sizeof(struct bna_tcb); in bna_tx_res_req()
3326 mem_info->num = num_txq; in bna_tx_res_req()
3334 mem_info->mem_type = BNA_MEM_T_DMA; in bna_tx_res_req()
3335 mem_info->len = page_count * sizeof(struct bna_dma_addr); in bna_tx_res_req()
3336 mem_info->num = num_txq; in bna_tx_res_req()
3340 mem_info->mem_type = BNA_MEM_T_KVA; in bna_tx_res_req()
3341 mem_info->len = page_count * sizeof(void *); in bna_tx_res_req()
3342 mem_info->num = num_txq; in bna_tx_res_req()
3346 mem_info->mem_type = BNA_MEM_T_DMA; in bna_tx_res_req()
3347 mem_info->len = PAGE_SIZE * page_count; in bna_tx_res_req()
3348 mem_info->num = num_txq; in bna_tx_res_req()
3352 mem_info->mem_type = BNA_MEM_T_DMA; in bna_tx_res_req()
3353 mem_info->len = BFI_IBIDX_SIZE; in bna_tx_res_req()
3354 mem_info->num = num_txq; in bna_tx_res_req()
3359 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq; in bna_tx_res_req()
3369 struct bna_tx_mod *tx_mod = &bna->tx_mod; in bna_tx_create()
3383 if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq)) in bna_tx_create()
3388 tx = bna_tx_get(tx_mod, tx_cfg->tx_type); in bna_tx_create()
3391 tx->bna = bna; in bna_tx_create()
3392 tx->priv = priv; in bna_tx_create()
3396 INIT_LIST_HEAD(&tx->txq_q); in bna_tx_create()
3397 for (i = 0; i < tx_cfg->num_txq; i++) { in bna_tx_create()
3398 if (list_empty(&tx_mod->txq_free_q)) in bna_tx_create()
3401 txq = list_first_entry(&tx_mod->txq_free_q, struct bna_txq, qe); in bna_tx_create()
3402 list_move_tail(&txq->qe, &tx->txq_q); in bna_tx_create()
3403 txq->tx = tx; in bna_tx_create()
3412 tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn; in bna_tx_create()
3413 tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn; in bna_tx_create()
3415 tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn; in bna_tx_create()
3416 tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn; in bna_tx_create()
3417 tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn; in bna_tx_create()
3419 list_add_tail(&tx->qe, &tx_mod->tx_active_q); in bna_tx_create()
3421 tx->num_txq = tx_cfg->num_txq; in bna_tx_create()
3423 tx->flags = 0; in bna_tx_create()
3424 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) { in bna_tx_create()
3425 switch (tx->type) { in bna_tx_create()
3427 if (!(tx->bna->tx_mod.flags & in bna_tx_create()
3429 tx->flags |= BNA_TX_F_ENET_STARTED; in bna_tx_create()
3432 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK) in bna_tx_create()
3433 tx->flags |= BNA_TX_F_ENET_STARTED; in bna_tx_create()
3441 list_for_each_entry(txq, &tx->txq_q, qe) { in bna_tx_create()
3442 txq->tcb = (struct bna_tcb *) in bna_tx_create()
3444 txq->tx_packets = 0; in bna_tx_create()
3445 txq->tx_bytes = 0; in bna_tx_create()
3448 txq->ib.ib_seg_host_addr.lsb = in bna_tx_create()
3450 txq->ib.ib_seg_host_addr.msb = in bna_tx_create()
3452 txq->ib.ib_seg_host_addr_kva = in bna_tx_create()
3454 txq->ib.intr_type = intr_info->intr_type; in bna_tx_create()
3455 txq->ib.intr_vector = (intr_info->num == 1) ? in bna_tx_create()
3456 intr_info->idl[0].vector : in bna_tx_create()
3457 intr_info->idl[i].vector; in bna_tx_create()
3458 if (intr_info->intr_type == BNA_INTR_T_INTX) in bna_tx_create()
3459 txq->ib.intr_vector = BIT(txq->ib.intr_vector); in bna_tx_create()
3460 txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo; in bna_tx_create()
3461 txq->ib.interpkt_timeo = BFI_TX_INTERPKT_TIMEO; in bna_tx_create()
3462 txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT; in bna_tx_create()
3466 txq->tcb->q_depth = tx_cfg->txq_depth; in bna_tx_create()
3467 txq->tcb->unmap_q = (void *) in bna_tx_create()
3469 txq->tcb->hw_consumer_index = in bna_tx_create()
3470 (u32 *)txq->ib.ib_seg_host_addr_kva; in bna_tx_create()
3471 txq->tcb->i_dbell = &txq->ib.door_bell; in bna_tx_create()
3472 txq->tcb->intr_type = txq->ib.intr_type; in bna_tx_create()
3473 txq->tcb->intr_vector = txq->ib.intr_vector; in bna_tx_create()
3474 txq->tcb->txq = txq; in bna_tx_create()
3475 txq->tcb->bnad = bnad; in bna_tx_create()
3476 txq->tcb->id = i; in bna_tx_create()
3486 if (tx->tcb_setup_cbfn) in bna_tx_create()
3487 (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb); in bna_tx_create()
3489 if (tx_cfg->num_txq == BFI_TX_MAX_PRIO) in bna_tx_create()
3490 txq->priority = txq->tcb->id; in bna_tx_create()
3492 txq->priority = tx_mod->default_prio; in bna_tx_create()
3497 tx->txf_vlan_id = 0; in bna_tx_create()
3501 tx_mod->rid_mask |= BIT(tx->rid); in bna_tx_create()
3515 list_for_each_entry(txq, &tx->txq_q, qe) in bna_tx_destroy()
3516 if (tx->tcb_destroy_cbfn) in bna_tx_destroy()
3517 (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb); in bna_tx_destroy()
3519 tx->bna->tx_mod.rid_mask &= ~BIT(tx->rid); in bna_tx_destroy()
3526 if (tx->fsm != bna_tx_sm_stopped) in bna_tx_enable()
3529 tx->flags |= BNA_TX_F_ENABLED; in bna_tx_enable()
3531 if (tx->flags & BNA_TX_F_ENET_STARTED) in bna_tx_enable()
3540 (*cbfn)(tx->bna->bnad, tx); in bna_tx_disable()
3544 tx->stop_cbfn = cbfn; in bna_tx_disable()
3545 tx->stop_cbarg = tx->bna->bnad; in bna_tx_disable()
3547 tx->flags &= ~BNA_TX_F_ENABLED; in bna_tx_disable()
3563 bfa_wc_down(&tx_mod->tx_stop_wc); in bna_tx_mod_cb_tx_stopped()
3571 if (tx_mod->stop_cbfn) in bna_tx_mod_cb_tx_stopped_all()
3572 tx_mod->stop_cbfn(&tx_mod->bna->enet); in bna_tx_mod_cb_tx_stopped_all()
3573 tx_mod->stop_cbfn = NULL; in bna_tx_mod_cb_tx_stopped_all()
3582 tx_mod->bna = bna; in bna_tx_mod_init()
3583 tx_mod->flags = 0; in bna_tx_mod_init()
3585 tx_mod->tx = (struct bna_tx *) in bna_tx_mod_init()
3587 tx_mod->txq = (struct bna_txq *) in bna_tx_mod_init()
3590 INIT_LIST_HEAD(&tx_mod->tx_free_q); in bna_tx_mod_init()
3591 INIT_LIST_HEAD(&tx_mod->tx_active_q); in bna_tx_mod_init()
3593 INIT_LIST_HEAD(&tx_mod->txq_free_q); in bna_tx_mod_init()
3595 for (i = 0; i < bna->ioceth.attr.num_txq; i++) { in bna_tx_mod_init()
3596 tx_mod->tx[i].rid = i; in bna_tx_mod_init()
3597 list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q); in bna_tx_mod_init()
3598 list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q); in bna_tx_mod_init()
3601 tx_mod->prio_map = BFI_TX_PRIO_MAP_ALL; in bna_tx_mod_init()
3602 tx_mod->default_prio = 0; in bna_tx_mod_init()
3603 tx_mod->iscsi_over_cee = BNA_STATUS_T_DISABLED; in bna_tx_mod_init()
3604 tx_mod->iscsi_prio = -1; in bna_tx_mod_init()
3610 tx_mod->bna = NULL; in bna_tx_mod_uninit()
3618 tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED; in bna_tx_mod_start()
3620 tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK; in bna_tx_mod_start()
3622 list_for_each_entry(tx, &tx_mod->tx_active_q, qe) in bna_tx_mod_start()
3623 if (tx->type == type) in bna_tx_mod_start()
3632 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED; in bna_tx_mod_stop()
3633 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK; in bna_tx_mod_stop()
3635 tx_mod->stop_cbfn = bna_enet_cb_tx_stopped; in bna_tx_mod_stop()
3637 bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod); in bna_tx_mod_stop()
3639 list_for_each_entry(tx, &tx_mod->tx_active_q, qe) in bna_tx_mod_stop()
3640 if (tx->type == type) { in bna_tx_mod_stop()
3641 bfa_wc_up(&tx_mod->tx_stop_wc); in bna_tx_mod_stop()
3645 bfa_wc_wait(&tx_mod->tx_stop_wc); in bna_tx_mod_stop()
3653 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED; in bna_tx_mod_fail()
3654 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK; in bna_tx_mod_fail()
3656 list_for_each_entry(tx, &tx_mod->tx_active_q, qe) in bna_tx_mod_fail()
3665 list_for_each_entry(txq, &tx->txq_q, qe) in bna_tx_coalescing_timeo_set()
3666 bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo); in bna_tx_coalescing_timeo_set()