Lines Matching +full:tx +full:- +full:port +full:- +full:mapping
3 * Copyright (c) 2007-2013 Broadcom Corporation
47 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll); in bnx2x_add_all_napi_cnic()
57 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll); in bnx2x_add_all_napi()
74 * bnx2x_move_fp - move content of the fastpath structure.
80 * Makes sure the contents of the bp->fp[to].napi is kept
88 struct bnx2x_fastpath *from_fp = &bp->fp[from]; in bnx2x_move_fp()
89 struct bnx2x_fastpath *to_fp = &bp->fp[to]; in bnx2x_move_fp()
90 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from]; in bnx2x_move_fp()
91 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to]; in bnx2x_move_fp()
92 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from]; in bnx2x_move_fp()
93 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to]; in bnx2x_move_fp()
96 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info; in bnx2x_move_fp()
99 from_fp->napi = to_fp->napi; in bnx2x_move_fp()
103 to_fp->index = to; in bnx2x_move_fp()
108 to_fp->tpa_info = old_tpa_info; in bnx2x_move_fp()
121 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos; in bnx2x_move_fp()
122 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) * in bnx2x_move_fp()
123 (bp)->max_cos; in bnx2x_move_fp()
129 memcpy(&bp->bnx2x_txq[new_txdata_index], in bnx2x_move_fp()
130 &bp->bnx2x_txq[old_txdata_index], in bnx2x_move_fp()
132 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index]; in bnx2x_move_fp()
136 * bnx2x_fill_fw_str - Fill buffer with FW version string.
149 bnx2x_get_ext_phy_fw_version(&bp->link_params, in bnx2x_fill_fw_str()
154 bp->fw_ver, in bnx2x_fill_fw_str()
155 (bp->common.bc_ver & 0xff0000) >> 16, in bnx2x_fill_fw_str()
156 (bp->common.bc_ver & 0xff00) >> 8, in bnx2x_fill_fw_str()
157 (bp->common.bc_ver & 0xff), in bnx2x_fill_fw_str()
165 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
174 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer in bnx2x_shrink_eth_fp()
177 for (cos = 1; cos < bp->max_cos; cos++) { in bnx2x_shrink_eth_fp()
178 for (i = 0; i < old_eth_num - delta; i++) { in bnx2x_shrink_eth_fp()
179 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_shrink_eth_fp()
180 int new_idx = cos * (old_eth_num - delta) + i; in bnx2x_shrink_eth_fp()
182 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos], in bnx2x_shrink_eth_fp()
184 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx]; in bnx2x_shrink_eth_fp()
189 int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
198 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx]; in bnx2x_free_tx_pkt()
201 struct sk_buff *skb = tx_buf->skb; in bnx2x_free_tx_pkt()
202 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; in bnx2x_free_tx_pkt()
207 prefetch(&skb->end); in bnx2x_free_tx_pkt()
209 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", in bnx2x_free_tx_pkt()
210 txdata->txq_index, idx, tx_buf, skb); in bnx2x_free_tx_pkt()
212 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; in bnx2x_free_tx_pkt()
214 nbd = le16_to_cpu(tx_start_bd->nbd) - 1; in bnx2x_free_tx_pkt()
216 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) { in bnx2x_free_tx_pkt()
221 new_cons = nbd + tx_buf->first_bd; in bnx2x_free_tx_pkt()
227 --nbd; in bnx2x_free_tx_pkt()
230 if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) { in bnx2x_free_tx_pkt()
232 --nbd; in bnx2x_free_tx_pkt()
236 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */ in bnx2x_free_tx_pkt()
237 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { in bnx2x_free_tx_pkt()
238 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; in bnx2x_free_tx_pkt()
240 --nbd; in bnx2x_free_tx_pkt()
245 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), in bnx2x_free_tx_pkt()
252 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; in bnx2x_free_tx_pkt()
253 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), in bnx2x_free_tx_pkt()
255 if (--nbd) in bnx2x_free_tx_pkt()
263 (*bytes_compl) += skb->len; in bnx2x_free_tx_pkt()
267 tx_buf->first_bd = 0; in bnx2x_free_tx_pkt()
268 tx_buf->skb = NULL; in bnx2x_free_tx_pkt()
276 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons; in bnx2x_tx_int()
280 if (unlikely(bp->panic)) in bnx2x_tx_int()
281 return -1; in bnx2x_tx_int()
284 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index); in bnx2x_tx_int()
285 hw_cons = le16_to_cpu(*txdata->tx_cons_sb); in bnx2x_tx_int()
286 sw_cons = txdata->tx_pkt_cons; in bnx2x_tx_int()
298 txdata->txq_index, hw_cons, sw_cons, pkt_cons); in bnx2x_tx_int()
308 txdata->tx_pkt_cons = sw_cons; in bnx2x_tx_int()
309 txdata->tx_bd_cons = bd_cons; in bnx2x_tx_int()
323 /* Taking tx_lock() is needed to prevent re-enabling the queue in bnx2x_tx_int()
328 * stops the queue->sees fresh tx_bd_cons->releases the queue-> in bnx2x_tx_int()
329 * sends some packets consuming the whole queue again-> in bnx2x_tx_int()
336 (bp->state == BNX2X_STATE_OPEN) && in bnx2x_tx_int()
348 u16 last_max = fp->last_max_sge; in bnx2x_update_last_max_sge()
351 fp->last_max_sge = idx; in bnx2x_update_last_max_sge()
358 struct bnx2x *bp = fp->bp; in bnx2x_update_sge_prod()
368 BIT_VEC64_CLEAR_BIT(fp->sge_mask, in bnx2x_update_sge_prod()
369 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i]))); in bnx2x_update_sge_prod()
371 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n", in bnx2x_update_sge_prod()
372 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1])); in bnx2x_update_sge_prod()
375 prefetch((void *)(fp->sge_mask)); in bnx2x_update_sge_prod()
377 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1])); in bnx2x_update_sge_prod()
379 last_max = RX_SGE(fp->last_max_sge); in bnx2x_update_sge_prod()
381 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT; in bnx2x_update_sge_prod()
389 if (likely(fp->sge_mask[i])) in bnx2x_update_sge_prod()
392 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK; in bnx2x_update_sge_prod()
397 fp->rx_sge_prod += delta; in bnx2x_update_sge_prod()
398 /* clear page-end entries */ in bnx2x_update_sge_prod()
403 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n", in bnx2x_update_sge_prod()
404 fp->last_max_sge, fp->rx_sge_prod); in bnx2x_update_sge_prod()
415 if ((bp->dev->features & NETIF_F_RXHASH) && in bnx2x_get_rxhash()
416 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) { in bnx2x_get_rxhash()
419 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE; in bnx2x_get_rxhash()
424 return le32_to_cpu(cqe->rss_hash_result); in bnx2x_get_rxhash()
434 struct bnx2x *bp = fp->bp; in bnx2x_tpa_start()
435 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; in bnx2x_tpa_start()
436 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; in bnx2x_tpa_start()
437 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; in bnx2x_tpa_start()
438 dma_addr_t mapping; in bnx2x_tpa_start() local
439 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; in bnx2x_tpa_start()
440 struct sw_rx_bd *first_buf = &tpa_info->first_buf; in bnx2x_tpa_start()
443 if (tpa_info->tpa_state != BNX2X_TPA_STOP) in bnx2x_tpa_start()
447 mapping = dma_map_single(&bp->pdev->dev, in bnx2x_tpa_start()
448 first_buf->data + NET_SKB_PAD, in bnx2x_tpa_start()
449 fp->rx_buf_size, DMA_FROM_DEVICE); in bnx2x_tpa_start()
451 * ...if it fails - move the skb from the consumer to the producer in bnx2x_tpa_start()
456 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { in bnx2x_tpa_start()
459 tpa_info->tpa_state = BNX2X_TPA_ERROR; in bnx2x_tpa_start()
464 prod_rx_buf->data = first_buf->data; in bnx2x_tpa_start()
465 dma_unmap_addr_set(prod_rx_buf, mapping, mapping); in bnx2x_tpa_start()
467 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); in bnx2x_tpa_start()
468 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); in bnx2x_tpa_start()
474 tpa_info->parsing_flags = in bnx2x_tpa_start()
475 le16_to_cpu(cqe->pars_flags.flags); in bnx2x_tpa_start()
476 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); in bnx2x_tpa_start()
477 tpa_info->tpa_state = BNX2X_TPA_START; in bnx2x_tpa_start()
478 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd); in bnx2x_tpa_start()
479 tpa_info->placement_offset = cqe->placement_offset; in bnx2x_tpa_start()
480 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type); in bnx2x_tpa_start()
481 if (fp->mode == TPA_MODE_GRO) { in bnx2x_tpa_start()
482 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len); in bnx2x_tpa_start()
483 tpa_info->full_page = SGE_PAGES / gro_size * gro_size; in bnx2x_tpa_start()
484 tpa_info->gro_size = gro_size; in bnx2x_tpa_start()
488 fp->tpa_queue_used |= (1 << queue); in bnx2x_tpa_start()
489 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n", in bnx2x_tpa_start()
490 fp->tpa_queue_used); in bnx2x_tpa_start()
500 * bnx2x_set_gro_params - compute GRO values
525 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; in bnx2x_set_gro_params()
528 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; in bnx2x_set_gro_params()
539 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len; in bnx2x_set_gro_params()
541 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count in bnx2x_set_gro_params()
542 * to skb_shinfo(skb)->gso_segs in bnx2x_set_gro_params()
544 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs; in bnx2x_set_gro_params()
550 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; in bnx2x_alloc_rx_sge()
551 struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; in bnx2x_alloc_rx_sge()
552 struct bnx2x_alloc_pool *pool = &fp->page_pool; in bnx2x_alloc_rx_sge()
553 dma_addr_t mapping; in bnx2x_alloc_rx_sge() local
555 if (!pool->page) { in bnx2x_alloc_rx_sge()
556 pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT); in bnx2x_alloc_rx_sge()
557 if (unlikely(!pool->page)) in bnx2x_alloc_rx_sge()
558 return -ENOMEM; in bnx2x_alloc_rx_sge()
560 pool->offset = 0; in bnx2x_alloc_rx_sge()
563 mapping = dma_map_page(&bp->pdev->dev, pool->page, in bnx2x_alloc_rx_sge()
564 pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE); in bnx2x_alloc_rx_sge()
565 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { in bnx2x_alloc_rx_sge()
567 return -ENOMEM; in bnx2x_alloc_rx_sge()
570 sw_buf->page = pool->page; in bnx2x_alloc_rx_sge()
571 sw_buf->offset = pool->offset; in bnx2x_alloc_rx_sge()
573 dma_unmap_addr_set(sw_buf, mapping, mapping); in bnx2x_alloc_rx_sge()
575 sge->addr_hi = cpu_to_le32(U64_HI(mapping)); in bnx2x_alloc_rx_sge()
576 sge->addr_lo = cpu_to_le32(U64_LO(mapping)); in bnx2x_alloc_rx_sge()
578 pool->offset += SGE_PAGE_SIZE; in bnx2x_alloc_rx_sge()
579 if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE) in bnx2x_alloc_rx_sge()
580 get_page(pool->page); in bnx2x_alloc_rx_sge()
582 pool->page = NULL; in bnx2x_alloc_rx_sge()
596 u16 len_on_bd = tpa_info->len_on_bd; in bnx2x_fill_frag_skb()
599 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd; in bnx2x_fill_frag_skb()
601 if (fp->mode == TPA_MODE_GRO) { in bnx2x_fill_frag_skb()
602 gro_size = tpa_info->gro_size; in bnx2x_fill_frag_skb()
603 full_page = tpa_info->full_page; in bnx2x_fill_frag_skb()
608 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd, in bnx2x_fill_frag_skb()
609 le16_to_cpu(cqe->pkt_len), in bnx2x_fill_frag_skb()
610 le16_to_cpu(cqe->num_of_coalesced_segs)); in bnx2x_fill_frag_skb()
616 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len); in bnx2x_fill_frag_skb()
618 return -EINVAL; in bnx2x_fill_frag_skb()
624 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j])); in bnx2x_fill_frag_skb()
628 if (fp->mode == TPA_MODE_GRO) in bnx2x_fill_frag_skb()
633 rx_pg = &fp->rx_page_ring[sge_idx]; in bnx2x_fill_frag_skb()
640 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; in bnx2x_fill_frag_skb()
644 dma_unmap_page(&bp->pdev->dev, in bnx2x_fill_frag_skb()
645 dma_unmap_addr(&old_rx_pg, mapping), in bnx2x_fill_frag_skb()
648 if (fp->mode == TPA_MODE_LRO) in bnx2x_fill_frag_skb()
654 for (rem = frag_len; rem > 0; rem -= gro_size) { in bnx2x_fill_frag_skb()
666 skb->data_len += frag_len; in bnx2x_fill_frag_skb()
667 skb->truesize += SGE_PAGES; in bnx2x_fill_frag_skb()
668 skb->len += frag_len; in bnx2x_fill_frag_skb()
670 frag_size -= frag_len; in bnx2x_fill_frag_skb()
681 if (fp->rx_frag_size) in bnx2x_build_skb()
682 skb = build_skb(data, fp->rx_frag_size); in bnx2x_build_skb()
690 if (fp->rx_frag_size) in bnx2x_frag_free()
698 if (fp->rx_frag_size) { in bnx2x_frag_alloc()
703 return napi_alloc_frag(fp->rx_frag_size); in bnx2x_frag_alloc()
706 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask); in bnx2x_frag_alloc()
718 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb), in bnx2x_gro_ip_csum()
719 iph->saddr, iph->daddr, 0); in bnx2x_gro_ip_csum()
730 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb), in bnx2x_gro_ipv6_csum()
731 &iph->saddr, &iph->daddr, 0); in bnx2x_gro_ipv6_csum()
747 if (skb_shinfo(skb)->gso_size) { in bnx2x_gro_receive()
748 switch (be16_to_cpu(skb->protocol)) { in bnx2x_gro_receive()
756 netdev_WARN_ONCE(bp->dev, in bnx2x_gro_receive()
758 be16_to_cpu(skb->protocol)); in bnx2x_gro_receive()
762 skb_record_rx_queue(skb, fp->rx_queue); in bnx2x_gro_receive()
763 napi_gro_receive(&fp->napi, skb); in bnx2x_gro_receive()
772 struct sw_rx_bd *rx_buf = &tpa_info->first_buf; in bnx2x_tpa_stop()
773 u8 pad = tpa_info->placement_offset; in bnx2x_tpa_stop()
774 u16 len = tpa_info->len_on_bd; in bnx2x_tpa_stop()
776 u8 *new_data, *data = rx_buf->data; in bnx2x_tpa_stop()
777 u8 old_tpa_state = tpa_info->tpa_state; in bnx2x_tpa_stop()
779 tpa_info->tpa_state = BNX2X_TPA_STOP; in bnx2x_tpa_stop()
781 /* If we there was an error during the handling of the TPA_START - in bnx2x_tpa_stop()
792 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), in bnx2x_tpa_stop()
793 fp->rx_buf_size, DMA_FROM_DEVICE); in bnx2x_tpa_stop()
799 if (pad + len > fp->rx_buf_size) { in bnx2x_tpa_stop()
801 pad, len, fp->rx_buf_size); in bnx2x_tpa_stop()
810 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type); in bnx2x_tpa_stop()
812 skb->protocol = eth_type_trans(skb, bp->dev); in bnx2x_tpa_stop()
813 skb->ip_summed = CHECKSUM_UNNECESSARY; in bnx2x_tpa_stop()
817 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN) in bnx2x_tpa_stop()
818 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag); in bnx2x_tpa_stop()
822 "Failed to allocate new pages - dropping packet!\n"); in bnx2x_tpa_stop()
827 rx_buf->data = new_data; in bnx2x_tpa_stop()
836 "Failed to allocate or map a new skb - dropping packet!\n"); in bnx2x_tpa_stop()
837 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++; in bnx2x_tpa_stop()
844 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index]; in bnx2x_alloc_rx_data()
845 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; in bnx2x_alloc_rx_data()
846 dma_addr_t mapping; in bnx2x_alloc_rx_data() local
850 return -ENOMEM; in bnx2x_alloc_rx_data()
852 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD, in bnx2x_alloc_rx_data()
853 fp->rx_buf_size, in bnx2x_alloc_rx_data()
855 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { in bnx2x_alloc_rx_data()
858 return -ENOMEM; in bnx2x_alloc_rx_data()
861 rx_buf->data = data; in bnx2x_alloc_rx_data()
862 dma_unmap_addr_set(rx_buf, mapping, mapping); in bnx2x_alloc_rx_data()
864 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); in bnx2x_alloc_rx_data()
865 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); in bnx2x_alloc_rx_data()
880 if (cqe->fast_path_cqe.status_flags & in bnx2x_csum_validate()
886 if (cqe->fast_path_cqe.type_error_flags & in bnx2x_csum_validate()
889 qstats->hw_csum_err++; in bnx2x_csum_validate()
891 skb->ip_summed = CHECKSUM_UNNECESSARY; in bnx2x_csum_validate()
896 struct bnx2x *bp = fp->bp; in bnx2x_rx_int()
904 if (unlikely(bp->panic)) in bnx2x_rx_int()
910 bd_cons = fp->rx_bd_cons; in bnx2x_rx_int()
911 bd_prod = fp->rx_bd_prod; in bnx2x_rx_int()
913 sw_comp_cons = fp->rx_comp_cons; in bnx2x_rx_int()
914 sw_comp_prod = fp->rx_comp_prod; in bnx2x_rx_int()
917 cqe = &fp->rx_comp_ring[comp_ring_cons]; in bnx2x_rx_int()
918 cqe_fp = &cqe->fast_path_cqe; in bnx2x_rx_int()
921 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons); in bnx2x_rx_int()
934 if (unlikely(bp->panic)) in bnx2x_rx_int()
946 * stale data. Without the barrier TPA state-machine might in bnx2x_rx_int()
948 * provided with incorrect packet description - these lead in bnx2x_rx_int()
953 cqe_fp_flags = cqe_fp->type_error_flags; in bnx2x_rx_int()
959 cqe_fp_flags, cqe_fp->status_flags, in bnx2x_rx_int()
960 le32_to_cpu(cqe_fp->rss_hash_result), in bnx2x_rx_int()
961 le16_to_cpu(cqe_fp->vlan_tag), in bnx2x_rx_int()
962 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len)); in bnx2x_rx_int()
970 rx_buf = &fp->rx_buf_ring[bd_cons]; in bnx2x_rx_int()
971 data = rx_buf->data; in bnx2x_rx_int()
978 if (fp->mode == TPA_MODE_DISABLED && in bnx2x_rx_int()
986 u16 queue = cqe_fp->queue_index; in bnx2x_rx_int()
997 queue = cqe->end_agg_cqe.queue_index; in bnx2x_rx_int()
998 tpa_info = &fp->tpa_info[queue]; in bnx2x_rx_int()
1003 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) - in bnx2x_rx_int()
1004 tpa_info->len_on_bd; in bnx2x_rx_int()
1006 if (fp->mode == TPA_MODE_GRO) in bnx2x_rx_int()
1007 pages = (frag_size + tpa_info->full_page - 1) / in bnx2x_rx_int()
1008 tpa_info->full_page; in bnx2x_rx_int()
1014 &cqe->end_agg_cqe, comp_ring_cons); in bnx2x_rx_int()
1016 if (bp->panic) in bnx2x_rx_int()
1020 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe); in bnx2x_rx_int()
1024 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len); in bnx2x_rx_int()
1025 pad = cqe_fp->placement_offset; in bnx2x_rx_int()
1026 dma_sync_single_for_cpu(&bp->pdev->dev, in bnx2x_rx_int()
1027 dma_unmap_addr(rx_buf, mapping), in bnx2x_rx_int()
1037 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++; in bnx2x_rx_int()
1044 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) && in bnx2x_rx_int()
1046 skb = napi_alloc_skb(&fp->napi, len); in bnx2x_rx_int()
1050 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; in bnx2x_rx_int()
1053 memcpy(skb->data, data + pad, len); in bnx2x_rx_int()
1058 dma_unmap_single(&bp->pdev->dev, in bnx2x_rx_int()
1059 dma_unmap_addr(rx_buf, mapping), in bnx2x_rx_int()
1060 fp->rx_buf_size, in bnx2x_rx_int()
1065 bnx2x_fp_qstats(bp, fp)-> in bnx2x_rx_int()
1073 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; in bnx2x_rx_int()
1081 skb->protocol = eth_type_trans(skb, bp->dev); in bnx2x_rx_int()
1083 /* Set Toeplitz hash for a none-LRO skb */ in bnx2x_rx_int()
1089 if (bp->dev->features & NETIF_F_RXCSUM) in bnx2x_rx_int()
1093 skb_record_rx_queue(skb, fp->rx_queue); in bnx2x_rx_int()
1096 if (unlikely(cqe->fast_path_cqe.type_error_flags & in bnx2x_rx_int()
1100 if (le16_to_cpu(cqe_fp->pars_flags.flags) & in bnx2x_rx_int()
1103 le16_to_cpu(cqe_fp->vlan_tag)); in bnx2x_rx_int()
1105 napi_gro_receive(&fp->napi, skb); in bnx2x_rx_int()
1107 rx_buf->data = NULL; in bnx2x_rx_int()
1124 cqe = &fp->rx_comp_ring[comp_ring_cons]; in bnx2x_rx_int()
1125 cqe_fp = &cqe->fast_path_cqe; in bnx2x_rx_int()
1128 fp->rx_bd_cons = bd_cons; in bnx2x_rx_int()
1129 fp->rx_bd_prod = bd_prod_fw; in bnx2x_rx_int()
1130 fp->rx_comp_cons = sw_comp_cons; in bnx2x_rx_int()
1131 fp->rx_comp_prod = sw_comp_prod; in bnx2x_rx_int()
1135 fp->rx_sge_prod); in bnx2x_rx_int()
1143 struct bnx2x *bp = fp->bp; in bnx2x_msix_fp_int()
1147 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n", in bnx2x_msix_fp_int()
1148 fp->index, fp->fw_sb_id, fp->igu_sb_id); in bnx2x_msix_fp_int()
1150 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); in bnx2x_msix_fp_int()
1153 if (unlikely(bp->panic)) in bnx2x_msix_fp_int()
1157 /* Handle Rx and Tx according to MSI-X vector */ in bnx2x_msix_fp_int()
1159 prefetch(fp->txdata_ptr[cos]->tx_cons_sb); in bnx2x_msix_fp_int()
1161 prefetch(&fp->sb_running_index[SM_RX_ID]); in bnx2x_msix_fp_int()
1162 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi)); in bnx2x_msix_fp_int()
1167 /* HW Lock for shared dual port PHYs */
1170 mutex_lock(&bp->port.phy_mutex); in bnx2x_acquire_phy_lock()
1179 mutex_unlock(&bp->port.phy_mutex); in bnx2x_release_phy_lock()
1185 u16 line_speed = bp->link_vars.line_speed; in bnx2x_get_mf_speed()
1188 bp->mf_config[BP_VN(bp)]); in bnx2x_get_mf_speed()
1207 * bnx2x_fill_report_data - fill link report data to report
1212 * It uses a none-atomic bit operations because is called under the mutex.
1221 data->line_speed = bnx2x_get_mf_speed(bp); in bnx2x_fill_report_data()
1224 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS)) in bnx2x_fill_report_data()
1226 &data->link_report_flags); in bnx2x_fill_report_data()
1230 &data->link_report_flags); in bnx2x_fill_report_data()
1233 if (bp->link_vars.duplex == DUPLEX_FULL) in bnx2x_fill_report_data()
1235 &data->link_report_flags); in bnx2x_fill_report_data()
1238 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) in bnx2x_fill_report_data()
1240 &data->link_report_flags); in bnx2x_fill_report_data()
1242 /* Tx Flow Control is ON */ in bnx2x_fill_report_data()
1243 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) in bnx2x_fill_report_data()
1245 &data->link_report_flags); in bnx2x_fill_report_data()
1247 *data = bp->vf_link_vars; in bnx2x_fill_report_data()
1252 * bnx2x_link_report - report link status to OS.
1269 * __bnx2x_link_report - report link status to OS.
1280 if (bp->force_link_down) { in __bnx2x_link_report()
1281 bp->link_vars.link_up = 0; in __bnx2x_link_report()
1293 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) || in __bnx2x_link_report()
1295 &bp->last_reported_link.link_report_flags) && in __bnx2x_link_report()
1300 bp->link_cnt++; in __bnx2x_link_report()
1302 /* We are going to report a new link parameters now - in __bnx2x_link_report()
1305 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data)); in __bnx2x_link_report()
1313 netif_carrier_off(bp->dev); in __bnx2x_link_report()
1314 netdev_err(bp->dev, "NIC Link is Down\n"); in __bnx2x_link_report()
1320 netif_carrier_on(bp->dev); in __bnx2x_link_report()
1337 flow = "ON - receive & transmit"; in __bnx2x_link_report()
1339 flow = "ON - receive"; in __bnx2x_link_report()
1341 flow = "ON - transmit"; in __bnx2x_link_report()
1346 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", in __bnx2x_link_report()
1358 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2]; in bnx2x_set_next_page_sgl()
1359 sge->addr_hi = in bnx2x_set_next_page_sgl()
1360 cpu_to_le32(U64_HI(fp->rx_sge_mapping + in bnx2x_set_next_page_sgl()
1363 sge->addr_lo = in bnx2x_set_next_page_sgl()
1364 cpu_to_le32(U64_LO(fp->rx_sge_mapping + in bnx2x_set_next_page_sgl()
1375 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i]; in bnx2x_free_tpa_pool()
1376 struct sw_rx_bd *first_buf = &tpa_info->first_buf; in bnx2x_free_tpa_pool()
1377 u8 *data = first_buf->data; in bnx2x_free_tpa_pool()
1383 if (tpa_info->tpa_state == BNX2X_TPA_START) in bnx2x_free_tpa_pool()
1384 dma_unmap_single(&bp->pdev->dev, in bnx2x_free_tpa_pool()
1385 dma_unmap_addr(first_buf, mapping), in bnx2x_free_tpa_pool()
1386 fp->rx_buf_size, DMA_FROM_DEVICE); in bnx2x_free_tpa_pool()
1388 first_buf->data = NULL; in bnx2x_free_tpa_pool()
1397 struct bnx2x_fastpath *fp = &bp->fp[j]; in bnx2x_init_rx_rings_cnic()
1399 fp->rx_bd_cons = 0; in bnx2x_init_rx_rings_cnic()
1406 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, in bnx2x_init_rx_rings_cnic()
1407 fp->rx_sge_prod); in bnx2x_init_rx_rings_cnic()
1419 struct bnx2x_fastpath *fp = &bp->fp[j]; in bnx2x_init_rx_rings()
1422 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); in bnx2x_init_rx_rings()
1424 if (fp->mode != TPA_MODE_DISABLED) { in bnx2x_init_rx_rings()
1425 /* Fill the per-aggregation pool */ in bnx2x_init_rx_rings()
1428 &fp->tpa_info[i]; in bnx2x_init_rx_rings()
1430 &tpa_info->first_buf; in bnx2x_init_rx_rings()
1432 first_buf->data = in bnx2x_init_rx_rings()
1434 if (!first_buf->data) { in bnx2x_init_rx_rings()
1435 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n", in bnx2x_init_rx_rings()
1438 fp->mode = TPA_MODE_DISABLED; in bnx2x_init_rx_rings()
1441 dma_unmap_addr_set(first_buf, mapping, 0); in bnx2x_init_rx_rings()
1442 tpa_info->tpa_state = BNX2X_TPA_STOP; in bnx2x_init_rx_rings()
1466 fp->mode = TPA_MODE_DISABLED; in bnx2x_init_rx_rings()
1473 fp->rx_sge_prod = ring_prod; in bnx2x_init_rx_rings()
1478 struct bnx2x_fastpath *fp = &bp->fp[j]; in bnx2x_init_rx_rings()
1480 fp->rx_bd_cons = 0; in bnx2x_init_rx_rings()
1487 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, in bnx2x_init_rx_rings()
1488 fp->rx_sge_prod); in bnx2x_init_rx_rings()
1496 U64_LO(fp->rx_comp_mapping)); in bnx2x_init_rx_rings()
1499 U64_HI(fp->rx_comp_mapping)); in bnx2x_init_rx_rings()
1507 struct bnx2x *bp = fp->bp; in bnx2x_free_tx_skbs_queue()
1510 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; in bnx2x_free_tx_skbs_queue()
1513 u16 sw_prod = txdata->tx_pkt_prod; in bnx2x_free_tx_skbs_queue()
1514 u16 sw_cons = txdata->tx_pkt_cons; in bnx2x_free_tx_skbs_queue()
1523 netdev_get_tx_queue(bp->dev, in bnx2x_free_tx_skbs_queue()
1524 txdata->txq_index)); in bnx2x_free_tx_skbs_queue()
1533 bnx2x_free_tx_skbs_queue(&bp->fp[i]); in bnx2x_free_tx_skbs_cnic()
1542 bnx2x_free_tx_skbs_queue(&bp->fp[i]); in bnx2x_free_tx_skbs()
1548 struct bnx2x *bp = fp->bp; in bnx2x_free_rx_bds()
1552 if (fp->rx_buf_ring == NULL) in bnx2x_free_rx_bds()
1556 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i]; in bnx2x_free_rx_bds()
1557 u8 *data = rx_buf->data; in bnx2x_free_rx_bds()
1561 dma_unmap_single(&bp->pdev->dev, in bnx2x_free_rx_bds()
1562 dma_unmap_addr(rx_buf, mapping), in bnx2x_free_rx_bds()
1563 fp->rx_buf_size, DMA_FROM_DEVICE); in bnx2x_free_rx_bds()
1565 rx_buf->data = NULL; in bnx2x_free_rx_bds()
1575 bnx2x_free_rx_bds(&bp->fp[j]); in bnx2x_free_rx_skbs_cnic()
1584 struct bnx2x_fastpath *fp = &bp->fp[j]; in bnx2x_free_rx_skbs()
1588 if (fp->mode != TPA_MODE_DISABLED) in bnx2x_free_rx_skbs()
1608 u32 mf_cfg = bp->mf_config[BP_VN(bp)]; in bnx2x_update_max_mf_config()
1623 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1637 free_irq(bp->msix_table[offset].vector, bp->dev); in bnx2x_free_msix_irqs()
1639 bp->msix_table[offset].vector); in bnx2x_free_msix_irqs()
1652 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n", in bnx2x_free_msix_irqs()
1653 i, bp->msix_table[offset].vector); in bnx2x_free_msix_irqs()
1655 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]); in bnx2x_free_msix_irqs()
1661 if (bp->flags & USING_MSIX_FLAG && in bnx2x_free_irq()
1662 !(bp->flags & USING_SINGLE_MSIX_FLAG)) { in bnx2x_free_irq()
1671 free_irq(bp->dev->irq, bp->dev); in bnx2x_free_irq()
1681 bp->msix_table[msix_vec].entry = msix_vec; in bnx2x_enable_msix()
1683 bp->msix_table[0].entry); in bnx2x_enable_msix()
1689 bp->msix_table[msix_vec].entry = msix_vec; in bnx2x_enable_msix()
1691 msix_vec, bp->msix_table[msix_vec].entry); in bnx2x_enable_msix()
1697 bp->msix_table[msix_vec].entry = msix_vec; in bnx2x_enable_msix()
1706 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], in bnx2x_enable_msix()
1709 * reconfigure number of tx/rx queues according to available in bnx2x_enable_msix()
1710 * MSI-X vectors in bnx2x_enable_msix()
1712 if (rc == -ENOSPC) { in bnx2x_enable_msix()
1714 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1); in bnx2x_enable_msix()
1716 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n", in bnx2x_enable_msix()
1721 BNX2X_DEV_INFO("Using single MSI-X vector\n"); in bnx2x_enable_msix()
1722 bp->flags |= USING_SINGLE_MSIX_FLAG; in bnx2x_enable_msix()
1725 bp->num_ethernet_queues = 1; in bnx2x_enable_msix()
1726 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; in bnx2x_enable_msix()
1728 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc); in bnx2x_enable_msix()
1732 int diff = msix_vec - rc; in bnx2x_enable_msix()
1734 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc); in bnx2x_enable_msix()
1739 bp->num_ethernet_queues -= diff; in bnx2x_enable_msix()
1740 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; in bnx2x_enable_msix()
1743 bp->num_queues); in bnx2x_enable_msix()
1746 bp->flags |= USING_MSIX_FLAG; in bnx2x_enable_msix()
1752 if (rc == -ENOMEM) in bnx2x_enable_msix()
1753 bp->flags |= DISABLE_MSI_FLAG; in bnx2x_enable_msix()
1764 rc = request_irq(bp->msix_table[offset++].vector, in bnx2x_req_msix_irqs()
1766 bp->dev->name, bp->dev); in bnx2x_req_msix_irqs()
1769 return -EBUSY; in bnx2x_req_msix_irqs()
1777 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_req_msix_irqs()
1778 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", in bnx2x_req_msix_irqs()
1779 bp->dev->name, i); in bnx2x_req_msix_irqs()
1781 rc = request_irq(bp->msix_table[offset].vector, in bnx2x_req_msix_irqs()
1782 bnx2x_msix_fp_int, 0, fp->name, fp); in bnx2x_req_msix_irqs()
1785 bp->msix_table[offset].vector, rc); in bnx2x_req_msix_irqs()
1787 return -EBUSY; in bnx2x_req_msix_irqs()
1796 netdev_info(bp->dev, in bnx2x_req_msix_irqs()
1797 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n", in bnx2x_req_msix_irqs()
1798 bp->msix_table[0].vector, in bnx2x_req_msix_irqs()
1799 0, bp->msix_table[offset].vector, in bnx2x_req_msix_irqs()
1800 i - 1, bp->msix_table[offset + i - 1].vector); in bnx2x_req_msix_irqs()
1803 netdev_info(bp->dev, in bnx2x_req_msix_irqs()
1804 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n", in bnx2x_req_msix_irqs()
1805 0, bp->msix_table[offset].vector, in bnx2x_req_msix_irqs()
1806 i - 1, bp->msix_table[offset + i - 1].vector); in bnx2x_req_msix_irqs()
1815 rc = pci_enable_msi(bp->pdev); in bnx2x_enable_msi()
1818 return -1; in bnx2x_enable_msi()
1820 bp->flags |= USING_MSI_FLAG; in bnx2x_enable_msi()
1830 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG)) in bnx2x_req_irq()
1835 if (bp->flags & USING_MSIX_FLAG) in bnx2x_req_irq()
1836 irq = bp->msix_table[0].vector; in bnx2x_req_irq()
1838 irq = bp->pdev->irq; in bnx2x_req_irq()
1840 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev); in bnx2x_req_irq()
1846 if (bp->flags & USING_MSIX_FLAG && in bnx2x_setup_irqs()
1847 !(bp->flags & USING_SINGLE_MSIX_FLAG)) { in bnx2x_setup_irqs()
1857 if (bp->flags & USING_MSI_FLAG) { in bnx2x_setup_irqs()
1858 bp->dev->irq = bp->pdev->irq; in bnx2x_setup_irqs()
1859 netdev_info(bp->dev, "using MSI IRQ %d\n", in bnx2x_setup_irqs()
1860 bp->dev->irq); in bnx2x_setup_irqs()
1862 if (bp->flags & USING_MSIX_FLAG) { in bnx2x_setup_irqs()
1863 bp->dev->irq = bp->msix_table[0].vector; in bnx2x_setup_irqs()
1864 netdev_info(bp->dev, "using MSIX IRQ %d\n", in bnx2x_setup_irqs()
1865 bp->dev->irq); in bnx2x_setup_irqs()
1910 if (netif_running(bp->dev)) { in bnx2x_netif_start()
1915 if (bp->state == BNX2X_STATE_OPEN) in bnx2x_netif_start()
1916 netif_tx_wake_all_queues(bp->dev); in bnx2x_netif_start()
1934 struct ethhdr *hdr = (struct ethhdr *)skb->data; in bnx2x_select_queue()
1935 u16 ether_type = ntohs(hdr->h_proto); in bnx2x_select_queue()
1941 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto); in bnx2x_select_queue()
1944 /* If ethertype is FCoE or FIP - use FCoE ring */ in bnx2x_select_queue()
1949 /* select a non-FCoE queue */ in bnx2x_select_queue()
1951 (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos); in bnx2x_select_queue()
1957 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp); in bnx2x_set_num_queues()
1961 bp->num_ethernet_queues = 1; in bnx2x_set_num_queues()
1964 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */ in bnx2x_set_num_queues()
1965 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; in bnx2x_set_num_queues()
1967 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues); in bnx2x_set_num_queues()
1971 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1976 * We currently support for at most 16 Tx queues for each CoS thus we will
1978 * bp->max_cos.
1980 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1983 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1985 * 16..31,...) with indices that are not coupled with any real Tx queue.
1987 * The proper configuration of skb->queue_mapping is handled by
1991 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1995 int rc, tx, rx; in bnx2x_set_real_num_queues() local
1997 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos; in bnx2x_set_real_num_queues()
2003 tx++; in bnx2x_set_real_num_queues()
2006 rc = netif_set_real_num_tx_queues(bp->dev, tx); in bnx2x_set_real_num_queues()
2008 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc); in bnx2x_set_real_num_queues()
2011 rc = netif_set_real_num_rx_queues(bp->dev, rx); in bnx2x_set_real_num_queues()
2017 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n", in bnx2x_set_real_num_queues()
2018 tx, rx); in bnx2x_set_real_num_queues()
2028 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_set_rx_buf_size()
2031 /* Always use a mini-jumbo MTU for the FCoE L2 ring */ in bnx2x_set_rx_buf_size()
2041 mtu = bp->dev->mtu; in bnx2x_set_rx_buf_size()
2042 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START + in bnx2x_set_rx_buf_size()
2047 fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size); in bnx2x_set_rx_buf_size()
2049 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE) in bnx2x_set_rx_buf_size()
2050 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD; in bnx2x_set_rx_buf_size()
2052 fp->rx_frag_size = 0; in bnx2x_set_rx_buf_size()
2064 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++) in bnx2x_init_rss()
2065 bp->rss_conf_obj.ind_table[i] = in bnx2x_init_rss()
2066 bp->fp->cl_id + in bnx2x_init_rss()
2071 * per-port, so if explicit configuration is needed , do it only in bnx2x_init_rss()
2074 * For 57712 and newer on the other hand it's a per-function in bnx2x_init_rss()
2077 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp)); in bnx2x_init_rss()
2089 * bp->multi_mode = ETH_RSS_MODE_DISABLED; in bnx2x_rss()
2104 if (rss_obj->udp_rss_v4) in bnx2x_rss()
2106 if (rss_obj->udp_rss_v6) in bnx2x_rss()
2124 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table)); in bnx2x_rss()
2145 func_params.f_obj = &bp->func_obj; in bnx2x_init_hw()
2162 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj; in bnx2x_squeeze_objects()
2173 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags, in bnx2x_squeeze_objects()
2181 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, in bnx2x_squeeze_objects()
2187 rparam.mcast_obj = &bp->mcast_obj; in bnx2x_squeeze_objects()
2190 /* Add a DEL command... - Since we're doing a driver cleanup only, in bnx2x_squeeze_objects()
2194 netif_addr_lock_bh(bp->dev); in bnx2x_squeeze_objects()
2197 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n", in bnx2x_squeeze_objects()
2204 BNX2X_ERR("Failed to clean multi-cast object: %d\n", in bnx2x_squeeze_objects()
2206 netif_addr_unlock_bh(bp->dev); in bnx2x_squeeze_objects()
2212 netif_addr_unlock_bh(bp->dev); in bnx2x_squeeze_objects()
2218 (bp)->state = BNX2X_STATE_ERROR; \
2224 bp->cnic_loaded = false; \
2230 (bp)->state = BNX2X_STATE_ERROR; \
2231 (bp)->panic = 1; \
2232 return -EBUSY; \
2236 bp->cnic_loaded = false; \
2237 (bp)->panic = 1; \
2238 return -EBUSY; \
2244 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, in bnx2x_free_fw_stats_mem()
2245 bp->fw_stats_data_sz + bp->fw_stats_req_sz); in bnx2x_free_fw_stats_mem()
2258 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper in bnx2x_alloc_fw_stats_mem()
2262 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats; in bnx2x_alloc_fw_stats_mem()
2265 * the VFs themselves. We don't include them in the bp->fw_stats_num as in bnx2x_alloc_fw_stats_mem()
2278 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) + in bnx2x_alloc_fw_stats_mem()
2279 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ? in bnx2x_alloc_fw_stats_mem()
2283 bp->fw_stats_num, vf_headroom, num_groups); in bnx2x_alloc_fw_stats_mem()
2284 bp->fw_stats_req_sz = sizeof(struct stats_query_header) + in bnx2x_alloc_fw_stats_mem()
2288 * stats_counter holds per-STORM counters that are incremented in bnx2x_alloc_fw_stats_mem()
2295 bp->fw_stats_data_sz = sizeof(struct per_port_stats) + in bnx2x_alloc_fw_stats_mem()
2301 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping, in bnx2x_alloc_fw_stats_mem()
2302 bp->fw_stats_data_sz + bp->fw_stats_req_sz); in bnx2x_alloc_fw_stats_mem()
2303 if (!bp->fw_stats) in bnx2x_alloc_fw_stats_mem()
2307 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats; in bnx2x_alloc_fw_stats_mem()
2308 bp->fw_stats_req_mapping = bp->fw_stats_mapping; in bnx2x_alloc_fw_stats_mem()
2309 bp->fw_stats_data = (struct bnx2x_fw_stats_data *) in bnx2x_alloc_fw_stats_mem()
2310 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz); in bnx2x_alloc_fw_stats_mem()
2311 bp->fw_stats_data_mapping = bp->fw_stats_mapping + in bnx2x_alloc_fw_stats_mem()
2312 bp->fw_stats_req_sz; in bnx2x_alloc_fw_stats_mem()
2315 U64_HI(bp->fw_stats_req_mapping), in bnx2x_alloc_fw_stats_mem()
2316 U64_LO(bp->fw_stats_req_mapping)); in bnx2x_alloc_fw_stats_mem()
2318 U64_HI(bp->fw_stats_data_mapping), in bnx2x_alloc_fw_stats_mem()
2319 U64_LO(bp->fw_stats_data_mapping)); in bnx2x_alloc_fw_stats_mem()
2325 return -ENOMEM; in bnx2x_alloc_fw_stats_mem()
2334 bp->fw_seq = in bnx2x_nic_load_request()
2337 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); in bnx2x_nic_load_request()
2340 bp->fw_drv_pulse_wr_seq = in bnx2x_nic_load_request()
2343 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); in bnx2x_nic_load_request()
2356 return -EBUSY; in bnx2x_nic_load_request()
2359 /* If mcp refused (e.g. other port is in diagnostic mode) we in bnx2x_nic_load_request()
2364 return -EBUSY; in bnx2x_nic_load_request()
2402 return -EBUSY; in bnx2x_compare_fw_ver()
2409 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port) in bnx2x_nic_load_no_mcp() argument
2413 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n", in bnx2x_nic_load_no_mcp()
2417 bnx2x_load_count[path][1 + port]++; in bnx2x_nic_load_no_mcp()
2418 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n", in bnx2x_nic_load_no_mcp()
2423 else if (bnx2x_load_count[path][1 + port] == 1) in bnx2x_nic_load_no_mcp()
2435 bp->port.pmf = 1; in bnx2x_nic_load_pmf()
2437 * writing to bp->port.pmf here and reading it from the in bnx2x_nic_load_pmf()
2442 bp->port.pmf = 0; in bnx2x_nic_load_pmf()
2445 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); in bnx2x_nic_load_pmf()
2452 (bp->common.shmem2_base)) { in bnx2x_nic_load_afex_dcc()
2463 bp->afex_def_vlan_tag = -1; in bnx2x_nic_load_afex_dcc()
2467 * bnx2x_bz_fp - zero content of the fastpath structure.
2472 * Makes sure the contents of the bp->fp[index].napi is kept
2477 struct bnx2x_fastpath *fp = &bp->fp[index]; in bnx2x_bz_fp()
2479 struct napi_struct orig_napi = fp->napi; in bnx2x_bz_fp()
2480 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info; in bnx2x_bz_fp()
2483 if (fp->tpa_info) in bnx2x_bz_fp()
2484 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 * in bnx2x_bz_fp()
2489 fp->napi = orig_napi; in bnx2x_bz_fp()
2490 fp->tpa_info = orig_tpa_info; in bnx2x_bz_fp()
2491 fp->bp = bp; in bnx2x_bz_fp()
2492 fp->index = index; in bnx2x_bz_fp()
2494 fp->max_cos = bp->max_cos; in bnx2x_bz_fp()
2497 fp->max_cos = 1; in bnx2x_bz_fp()
2501 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)]; in bnx2x_bz_fp()
2504 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos * in bnx2x_bz_fp()
2510 if (bp->dev->features & NETIF_F_LRO) in bnx2x_bz_fp()
2511 fp->mode = TPA_MODE_LRO; in bnx2x_bz_fp()
2512 else if (bp->dev->features & NETIF_F_GRO_HW) in bnx2x_bz_fp()
2513 fp->mode = TPA_MODE_GRO; in bnx2x_bz_fp()
2515 fp->mode = TPA_MODE_DISABLED; in bnx2x_bz_fp()
2520 if (bp->disable_tpa || IS_FCOE_FP(fp)) in bnx2x_bz_fp()
2521 fp->mode = TPA_MODE_DISABLED; in bnx2x_bz_fp()
2532 DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n", in bnx2x_set_os_driver_state()
2540 int i, rc, port = BP_PORT(bp); in bnx2x_load_cnic() local
2542 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n"); in bnx2x_load_cnic()
2544 mutex_init(&bp->cnic_mutex); in bnx2x_load_cnic()
2580 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1); in bnx2x_load_cnic()
2584 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); in bnx2x_load_cnic()
2595 /* re-read iscsi info */ in bnx2x_load_cnic()
2599 bp->cnic_loaded = true; in bnx2x_load_cnic()
2600 if (bp->state == BNX2X_STATE_OPEN) in bnx2x_load_cnic()
2603 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n"); in bnx2x_load_cnic()
2610 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); in bnx2x_load_cnic()
2618 BNX2X_ERR("CNIC-related load failed\n"); in bnx2x_load_cnic()
2628 int port = BP_PORT(bp); in bnx2x_nic_load() local
2636 if (unlikely(bp->panic)) { in bnx2x_nic_load()
2638 return -EPERM; in bnx2x_nic_load()
2642 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; in bnx2x_nic_load()
2645 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link)); in bnx2x_nic_load()
2647 &bp->last_reported_link.link_report_flags); in bnx2x_nic_load()
2656 * Also set fp->mode and txdata_ptr. in bnx2x_nic_load()
2658 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues); in bnx2x_nic_load()
2661 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + in bnx2x_nic_load()
2662 bp->num_cnic_queues) * in bnx2x_nic_load()
2665 bp->fcoe_init = false; in bnx2x_nic_load()
2700 * bp->num_queues, bnx2x_set_real_num_queues() should always in bnx2x_nic_load()
2713 bnx2x_setup_tc(bp->dev, bp->max_cos); in bnx2x_nic_load()
2719 bp->nic_stopped = false; in bnx2x_nic_load()
2739 load_code = bnx2x_nic_load_no_mcp(bp, port); in bnx2x_nic_load()
2768 /* Init per-function objects */ in bnx2x_nic_load()
2777 bp->afex_def_vlan_tag = -1; in bnx2x_nic_load()
2779 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; in bnx2x_nic_load()
2794 rc = -EBUSY; in bnx2x_nic_load()
2813 rc = bnx2x_setup_queue(bp, &bp->fp[i], false); in bnx2x_nic_load()
2815 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false); in bnx2x_nic_load()
2830 bp->state = BNX2X_STATE_OPEN; in bnx2x_nic_load()
2836 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, in bnx2x_nic_load()
2843 if (IS_PF(bp) && bp->pending_max) { in bnx2x_nic_load()
2844 bnx2x_update_max_mf_config(bp, bp->pending_max); in bnx2x_nic_load()
2845 bp->pending_max = 0; in bnx2x_nic_load()
2848 bp->force_link_down = false; in bnx2x_nic_load()
2849 if (bp->port.pmf) { in bnx2x_nic_load()
2854 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN; in bnx2x_nic_load()
2858 /* Re-configure vlan filters */ in bnx2x_nic_load()
2866 if (bp->flags & PTP_SUPPORTED) { in bnx2x_nic_load()
2871 /* Start Tx */ in bnx2x_nic_load()
2874 /* Tx queue should be only re-enabled */ in bnx2x_nic_load()
2875 netif_tx_wake_all_queues(bp->dev); in bnx2x_nic_load()
2879 netif_tx_start_all_queues(bp->dev); in bnx2x_nic_load()
2885 bp->state = BNX2X_STATE_DIAG; in bnx2x_nic_load()
2892 if (bp->port.pmf) in bnx2x_nic_load()
2898 mod_timer(&bp->timer, jiffies + bp->current_interval); in bnx2x_nic_load()
2911 val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT); in bnx2x_nic_load()
2921 return -EBUSY; in bnx2x_nic_load()
2924 /* Update driver data for On-Chip MFW dump. */ in bnx2x_nic_load()
2928 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */ in bnx2x_nic_load()
2929 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG)) in bnx2x_nic_load()
2951 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); in bnx2x_nic_load()
2961 bp->port.pmf = 0; in bnx2x_nic_load()
2965 bp->nic_stopped = true; in bnx2x_nic_load()
2983 /* Wait until tx fastpath tasks complete */ in bnx2x_drain_tx_queues()
2985 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_drain_tx_queues()
2988 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]); in bnx2x_drain_tx_queues()
3014 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE && in bnx2x_nic_unload()
3015 (bp->state == BNX2X_STATE_CLOSED || in bnx2x_nic_unload()
3016 bp->state == BNX2X_STATE_ERROR)) { in bnx2x_nic_unload()
3024 bp->recovery_state = BNX2X_RECOVERY_DONE; in bnx2x_nic_unload()
3025 bp->is_leader = 0; in bnx2x_nic_unload()
3031 return -EINVAL; in bnx2x_nic_unload()
3035 * have not completed successfully - all resources are released. in bnx2x_nic_unload()
3038 * dev->IFF_UP flag is still on. in bnx2x_nic_unload()
3040 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR) in bnx2x_nic_unload()
3043 /* It's important to set the bp->state to the value different from in bnx2x_nic_unload()
3044 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int() in bnx2x_nic_unload()
3045 * may restart the Tx from the NAPI context (see bnx2x_tx_int()). in bnx2x_nic_unload()
3047 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; in bnx2x_nic_unload()
3056 /* Stop Tx */ in bnx2x_nic_unload()
3058 netdev_reset_tc(bp->dev); in bnx2x_nic_unload()
3060 bp->rx_mode = BNX2X_RX_MODE_NONE; in bnx2x_nic_unload()
3062 del_timer_sync(&bp->timer); in bnx2x_nic_unload()
3066 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; in bnx2x_nic_unload()
3074 * to wait for the queues to complete all Tx. in bnx2x_nic_unload()
3101 if (!bp->nic_stopped) { in bnx2x_nic_unload()
3110 bp->nic_stopped = true; in bnx2x_nic_unload()
3125 bp->sp_state = 0; in bnx2x_nic_unload()
3127 bp->port.pmf = 0; in bnx2x_nic_unload()
3130 bp->sp_rtnl_state = 0; in bnx2x_nic_unload()
3138 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); in bnx2x_nic_unload()
3150 bp->state = BNX2X_STATE_CLOSED; in bnx2x_nic_unload()
3151 bp->cnic_loaded = false; in bnx2x_nic_unload()
3157 /* Check if there are pending parity attentions. If there are - set in bnx2x_nic_unload()
3186 if (!bp->pdev->pm_cap) { in bnx2x_set_power_state()
3191 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr); in bnx2x_set_power_state()
3195 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, in bnx2x_set_power_state()
3207 if (atomic_read(&bp->pdev->enable_cnt) != 1) in bnx2x_set_power_state()
3216 if (bp->wol) in bnx2x_set_power_state()
3219 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, in bnx2x_set_power_state()
3228 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state); in bnx2x_set_power_state()
3229 return -EINVAL; in bnx2x_set_power_state()
3241 struct bnx2x *bp = fp->bp; in bnx2x_poll()
3246 if (unlikely(bp->panic)) { in bnx2x_poll()
3252 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) in bnx2x_poll()
3253 bnx2x_tx_int(bp, fp->txdata_ptr[cos]); in bnx2x_poll()
3283 /* Re-enable interrupts */ in bnx2x_poll()
3285 "Update index to %d\n", fp->fp_hc_idx); in bnx2x_poll()
3286 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, in bnx2x_poll()
3287 le16_to_cpu(fp->fp_hc_idx), in bnx2x_poll()
3301 * we use one mapping for both BDs
3311 dma_addr_t mapping; in bnx2x_tx_split() local
3312 int old_len = le16_to_cpu(h_tx_bd->nbytes); in bnx2x_tx_split()
3315 h_tx_bd->nbytes = cpu_to_le16(hlen); in bnx2x_tx_split()
3318 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo); in bnx2x_tx_split()
3323 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; in bnx2x_tx_split()
3325 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi), in bnx2x_tx_split()
3326 le32_to_cpu(h_tx_bd->addr_lo)) + hlen; in bnx2x_tx_split()
3328 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); in bnx2x_tx_split()
3329 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); in bnx2x_tx_split()
3330 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen); in bnx2x_tx_split()
3332 /* this marks the BD as one that has no individual mapping */ in bnx2x_tx_split()
3333 tx_buf->flags |= BNX2X_TSO_SPLIT_BD; in bnx2x_tx_split()
3337 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo); in bnx2x_tx_split()
3353 csum_partial(t_header - fix, fix, 0))); in bnx2x_csum_fix()
3357 csum_partial(t_header, -fix, 0))); in bnx2x_csum_fix()
3368 if (skb->ip_summed != CHECKSUM_PARTIAL) in bnx2x_xmit_type()
3374 prot = ipv6_hdr(skb)->nexthdr; in bnx2x_xmit_type()
3377 prot = ip_hdr(skb)->protocol; in bnx2x_xmit_type()
3380 if (!CHIP_IS_E1x(bp) && skb->encapsulation) { in bnx2x_xmit_type()
3381 if (inner_ip_hdr(skb)->version == 6) { in bnx2x_xmit_type()
3383 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) in bnx2x_xmit_type()
3387 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP) in bnx2x_xmit_type()
3415 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3428 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) { in bnx2x_pkt_req_lin()
3430 unsigned short lso_mss = skb_shinfo(skb)->gso_size; in bnx2x_pkt_req_lin()
3431 int wnd_size = MAX_FETCH_BD - num_tso_win_sub; in bnx2x_pkt_req_lin()
3433 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size; in bnx2x_pkt_req_lin()
3445 first_bd_sz = skb_headlen(skb) - hlen; in bnx2x_pkt_req_lin()
3449 /* Calculate the first sum - it's special */ in bnx2x_pkt_req_lin()
3450 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++) in bnx2x_pkt_req_lin()
3452 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]); in bnx2x_pkt_req_lin()
3454 /* If there was data on linear skb data - check it */ in bnx2x_pkt_req_lin()
3461 wnd_sum -= first_bd_sz; in bnx2x_pkt_req_lin()
3468 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]); in bnx2x_pkt_req_lin()
3474 wnd_sum -= in bnx2x_pkt_req_lin()
3475 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]); in bnx2x_pkt_req_lin()
3478 /* in non-LSO too fragmented packet should always in bnx2x_pkt_req_lin()
3488 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO", in bnx2x_pkt_req_lin()
3489 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz); in bnx2x_pkt_req_lin()
3496 * bnx2x_set_pbd_gso - update PBD in GSO case.
3506 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); in bnx2x_set_pbd_gso()
3507 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq); in bnx2x_set_pbd_gso()
3508 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb)); in bnx2x_set_pbd_gso()
3511 pbd->ip_id = bswab16(ip_hdr(skb)->id); in bnx2x_set_pbd_gso()
3512 pbd->tcp_pseudo_csum = in bnx2x_set_pbd_gso()
3513 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr, in bnx2x_set_pbd_gso()
3514 ip_hdr(skb)->daddr, in bnx2x_set_pbd_gso()
3517 pbd->tcp_pseudo_csum = in bnx2x_set_pbd_gso()
3518 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, in bnx2x_set_pbd_gso()
3519 &ipv6_hdr(skb)->daddr, in bnx2x_set_pbd_gso()
3523 pbd->global_data |= in bnx2x_set_pbd_gso()
3528 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3554 * No need to pass the UDP header length - it's a constant. in bnx2x_set_pbd_csum_enc()
3560 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3585 * No need to pass the UDP header length - it's a constant. in bnx2x_set_pbd_csum_e2()
3595 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; in bnx2x_set_sbd_csum()
3598 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6; in bnx2x_set_sbd_csum()
3601 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP; in bnx2x_set_sbd_csum()
3605 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3619 pbd->global_data = in bnx2x_set_pbd_csum()
3621 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << in bnx2x_set_pbd_csum()
3624 pbd->ip_hlen_w = skb_network_header_len(skb) >> 1; in bnx2x_set_pbd_csum()
3626 hlen += pbd->ip_hlen_w; in bnx2x_set_pbd_csum()
3634 pbd->total_hlen_w = cpu_to_le16(hlen); in bnx2x_set_pbd_csum()
3638 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check); in bnx2x_set_pbd_csum()
3645 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb)); in bnx2x_set_pbd_csum()
3648 pbd->tcp_pseudo_csum = in bnx2x_set_pbd_csum()
3653 pbd->tcp_pseudo_csum); in bnx2x_set_pbd_csum()
3674 pbd2->fw_ip_hdr_to_payload_w = hlen_w; in bnx2x_update_pbds_gso_enc()
3679 u32 csum = (__force u32)(~iph->check) - in bnx2x_update_pbds_gso_enc()
3680 (__force u32)iph->tot_len - in bnx2x_update_pbds_gso_enc()
3681 (__force u32)iph->frag_off; in bnx2x_update_pbds_gso_enc()
3683 outerip_len = iph->ihl << 1; in bnx2x_update_pbds_gso_enc()
3685 pbd2->fw_ip_csum_wo_len_flags_frag = in bnx2x_update_pbds_gso_enc()
3688 pbd2->fw_ip_hdr_to_payload_w = in bnx2x_update_pbds_gso_enc()
3689 hlen_w - ((sizeof(struct ipv6hdr)) >> 1); in bnx2x_update_pbds_gso_enc()
3690 pbd_e2->data.tunnel_data.flags |= in bnx2x_update_pbds_gso_enc()
3694 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq); in bnx2x_update_pbds_gso_enc()
3696 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb)); in bnx2x_update_pbds_gso_enc()
3700 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id); in bnx2x_update_pbds_gso_enc()
3702 pbd_e2->data.tunnel_data.pseudo_csum = in bnx2x_update_pbds_gso_enc()
3704 inner_ip_hdr(skb)->saddr, in bnx2x_update_pbds_gso_enc()
3705 inner_ip_hdr(skb)->daddr, in bnx2x_update_pbds_gso_enc()
3708 pbd_e2->data.tunnel_data.pseudo_csum = in bnx2x_update_pbds_gso_enc()
3710 &inner_ipv6_hdr(skb)->saddr, in bnx2x_update_pbds_gso_enc()
3711 &inner_ipv6_hdr(skb)->daddr, in bnx2x_update_pbds_gso_enc()
3721 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << in bnx2x_update_pbds_gso_enc()
3724 if (ip_hdr(skb)->protocol == IPPROTO_UDP) { in bnx2x_update_pbds_gso_enc()
3726 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1; in bnx2x_update_pbds_gso_enc()
3743 if (ipv6->nexthdr == NEXTHDR_IPV6) in bnx2x_set_ipv6_ext_e2()
3766 dma_addr_t mapping; in bnx2x_start_xmit() local
3775 if (unlikely(bp->panic)) in bnx2x_start_xmit()
3784 txdata = &bp->bnx2x_txq[txq_index]; in bnx2x_start_xmit()
3793 txdata->cid, fp_index, txdata_index, txdata, fp); */ in bnx2x_start_xmit()
3796 skb_shinfo(skb)->nr_frags + in bnx2x_start_xmit()
3800 if (txdata->tx_ring_size == 0) { in bnx2x_start_xmit()
3802 bnx2x_fp_qstats(bp, txdata->parent_fp); in bnx2x_start_xmit()
3803 q_stats->driver_filtered_tx_pkt++; in bnx2x_start_xmit()
3807 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; in bnx2x_start_xmit()
3809 BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); in bnx2x_start_xmit()
3816 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, in bnx2x_start_xmit()
3817 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type, in bnx2x_start_xmit()
3818 skb->len); in bnx2x_start_xmit()
3820 eth = (struct ethhdr *)skb->data; in bnx2x_start_xmit()
3823 if (unlikely(is_multicast_ether_addr(eth->h_dest))) { in bnx2x_start_xmit()
3824 if (is_broadcast_ether_addr(eth->h_dest)) in bnx2x_start_xmit()
3830 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT) in bnx2x_start_xmit()
3836 bp->lin_cnt++; in bnx2x_start_xmit()
3839 "SKB linearization failed - silently dropping this SKB\n"); in bnx2x_start_xmit()
3846 mapping = dma_map_single(&bp->pdev->dev, skb->data, in bnx2x_start_xmit()
3848 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { in bnx2x_start_xmit()
3850 "SKB mapping failed - silently dropping this SKB\n"); in bnx2x_start_xmit()
3860 And above all, all pdb sizes are in words - NOT DWORDS! in bnx2x_start_xmit()
3863 /* get current pkt produced now - advance it just before sending packet in bnx2x_start_xmit()
3864 * since mapping of pages may fail and cause packet to be dropped in bnx2x_start_xmit()
3866 pkt_prod = txdata->tx_pkt_prod; in bnx2x_start_xmit()
3867 bd_prod = TX_BD(txdata->tx_bd_prod); in bnx2x_start_xmit()
3873 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)]; in bnx2x_start_xmit()
3874 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd; in bnx2x_start_xmit()
3877 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; in bnx2x_start_xmit()
3879 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { in bnx2x_start_xmit()
3880 if (!(bp->flags & TX_TIMESTAMPING_EN)) { in bnx2x_start_xmit()
3881 bp->eth_stats.ptp_skip_tx_ts++; in bnx2x_start_xmit()
3882 BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n"); in bnx2x_start_xmit()
3883 } else if (bp->ptp_tx_skb) { in bnx2x_start_xmit()
3884 bp->eth_stats.ptp_skip_tx_ts++; in bnx2x_start_xmit()
3885 netdev_err_once(bp->dev, in bnx2x_start_xmit()
3888 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in bnx2x_start_xmit()
3889 /* schedule check for Tx timestamp */ in bnx2x_start_xmit()
3890 bp->ptp_tx_skb = skb_get(skb); in bnx2x_start_xmit()
3891 bp->ptp_tx_start = jiffies; in bnx2x_start_xmit()
3892 schedule_work(&bp->ptp_task); in bnx2x_start_xmit()
3897 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT; in bnx2x_start_xmit()
3900 tx_buf->first_bd = txdata->tx_bd_prod; in bnx2x_start_xmit()
3901 tx_buf->skb = skb; in bnx2x_start_xmit()
3902 tx_buf->flags = 0; in bnx2x_start_xmit()
3906 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd); in bnx2x_start_xmit()
3909 tx_start_bd->vlan_or_ethertype = in bnx2x_start_xmit()
3911 tx_start_bd->bd_flags.as_bitfield |= in bnx2x_start_xmit()
3923 tx_start_bd->vlan_or_ethertype = in bnx2x_start_xmit()
3924 cpu_to_le16(ntohs(eth->h_proto)); in bnx2x_start_xmit()
3926 tx_start_bd->bd_flags.as_bitfield |= in bnx2x_start_xmit()
3929 tx_start_bd->vlan_or_ethertype = in bnx2x_start_xmit()
3935 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); in bnx2x_start_xmit()
3949 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2; in bnx2x_start_xmit()
3963 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd; in bnx2x_start_xmit()
3967 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w = in bnx2x_start_xmit()
3968 (skb_inner_network_header(skb) - in bnx2x_start_xmit()
3969 skb->data) >> 1; in bnx2x_start_xmit()
3976 pbd2->global_data = cpu_to_le16(global_data); in bnx2x_start_xmit()
3979 SET_FLAG(tx_start_bd->general_data, in bnx2x_start_xmit()
3982 SET_FLAG(tx_start_bd->general_data, in bnx2x_start_xmit()
3985 tx_buf->flags |= BNX2X_HAS_SECOND_PBD; in bnx2x_start_xmit()
3997 * Tx Switching is enabled. in bnx2x_start_xmit()
4001 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi, in bnx2x_start_xmit()
4002 &pbd_e2->data.mac_addr.src_mid, in bnx2x_start_xmit()
4003 &pbd_e2->data.mac_addr.src_lo, in bnx2x_start_xmit()
4004 eth->h_source); in bnx2x_start_xmit()
4006 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi, in bnx2x_start_xmit()
4007 &pbd_e2->data.mac_addr.dst_mid, in bnx2x_start_xmit()
4008 &pbd_e2->data.mac_addr.dst_lo, in bnx2x_start_xmit()
4009 eth->h_dest); in bnx2x_start_xmit()
4011 if (bp->flags & TX_SWITCHING) in bnx2x_start_xmit()
4013 &pbd_e2->data.mac_addr.dst_hi, in bnx2x_start_xmit()
4014 &pbd_e2->data.mac_addr.dst_mid, in bnx2x_start_xmit()
4015 &pbd_e2->data.mac_addr.dst_lo, in bnx2x_start_xmit()
4016 eth->h_dest); in bnx2x_start_xmit()
4018 /* Enforce security is always set in Stop on Error - in bnx2x_start_xmit()
4021 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi, in bnx2x_start_xmit()
4022 &pbd_e2->data.mac_addr.src_mid, in bnx2x_start_xmit()
4023 &pbd_e2->data.mac_addr.src_lo, in bnx2x_start_xmit()
4024 eth->h_source); in bnx2x_start_xmit()
4032 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x; in bnx2x_start_xmit()
4040 pbd_e1x->global_data |= cpu_to_le16(global_data); in bnx2x_start_xmit()
4044 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); in bnx2x_start_xmit()
4045 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); in bnx2x_start_xmit()
4046 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); in bnx2x_start_xmit()
4047 pkt_size = tx_start_bd->nbytes; in bnx2x_start_xmit()
4051 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo, in bnx2x_start_xmit()
4052 le16_to_cpu(tx_start_bd->nbytes), in bnx2x_start_xmit()
4053 tx_start_bd->bd_flags.as_bitfield, in bnx2x_start_xmit()
4054 le16_to_cpu(tx_start_bd->vlan_or_ethertype)); in bnx2x_start_xmit()
4060 skb->len, hlen, skb_headlen(skb), in bnx2x_start_xmit()
4061 skb_shinfo(skb)->gso_size); in bnx2x_start_xmit()
4063 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; in bnx2x_start_xmit()
4073 (skb_shinfo(skb)->gso_size << in bnx2x_start_xmit()
4084 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data); in bnx2x_start_xmit()
4089 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in bnx2x_start_xmit()
4090 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in bnx2x_start_xmit()
4092 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, in bnx2x_start_xmit()
4094 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { in bnx2x_start_xmit()
4098 "Unable to map page - dropping packet...\n"); in bnx2x_start_xmit()
4102 * first_bd->nbd need to be properly updated in bnx2x_start_xmit()
4105 first_bd->nbd = cpu_to_le16(nbd); in bnx2x_start_xmit()
4107 TX_BD(txdata->tx_pkt_prod), in bnx2x_start_xmit()
4113 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; in bnx2x_start_xmit()
4115 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; in bnx2x_start_xmit()
4117 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); in bnx2x_start_xmit()
4118 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); in bnx2x_start_xmit()
4119 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag)); in bnx2x_start_xmit()
4125 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo, in bnx2x_start_xmit()
4126 le16_to_cpu(tx_data_bd->nbytes)); in bnx2x_start_xmit()
4132 first_bd->nbd = cpu_to_le16(nbd); in bnx2x_start_xmit()
4136 /* now send a tx doorbell, counting the next BD in bnx2x_start_xmit()
4146 * have to) in order to save some CPU cycles in a none-LSO in bnx2x_start_xmit()
4150 total_pkt_bd->total_pkt_bytes = pkt_size; in bnx2x_start_xmit()
4155 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w, in bnx2x_start_xmit()
4156 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags, in bnx2x_start_xmit()
4157 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq, in bnx2x_start_xmit()
4158 le16_to_cpu(pbd_e1x->total_hlen_w)); in bnx2x_start_xmit()
4163 pbd_e2->data.mac_addr.dst_hi, in bnx2x_start_xmit()
4164 pbd_e2->data.mac_addr.dst_mid, in bnx2x_start_xmit()
4165 pbd_e2->data.mac_addr.dst_lo, in bnx2x_start_xmit()
4166 pbd_e2->data.mac_addr.src_hi, in bnx2x_start_xmit()
4167 pbd_e2->data.mac_addr.src_mid, in bnx2x_start_xmit()
4168 pbd_e2->data.mac_addr.src_lo, in bnx2x_start_xmit()
4169 pbd_e2->parsing_data); in bnx2x_start_xmit()
4172 netdev_tx_sent_queue(txq, skb->len); in bnx2x_start_xmit()
4176 txdata->tx_pkt_prod++; in bnx2x_start_xmit()
4180 * This is only applicable for weak-ordered memory model archs such in bnx2x_start_xmit()
4181 * as IA-64. The following barrier is also mandatory since FW will in bnx2x_start_xmit()
4186 txdata->tx_db.data.prod += nbd; in bnx2x_start_xmit()
4190 DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw); in bnx2x_start_xmit()
4192 txdata->tx_bd_prod += nbd; in bnx2x_start_xmit()
4199 * fp->bd_tx_cons */ in bnx2x_start_xmit()
4202 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; in bnx2x_start_xmit()
4206 txdata->tx_pkt++; in bnx2x_start_xmit()
4247 * bnx2x_setup_tc - routine to configure net_device for multi tc
4270 if (num_tc > bp->max_cos) { in bnx2x_setup_tc()
4272 num_tc, bp->max_cos); in bnx2x_setup_tc()
4273 return -EINVAL; in bnx2x_setup_tc()
4279 return -EINVAL; in bnx2x_setup_tc()
4284 /* configure priority to traffic class mapping */ in bnx2x_setup_tc()
4288 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]); in bnx2x_setup_tc()
4290 "mapping priority %d to tc %d\n", in bnx2x_setup_tc()
4291 outer_prio, bp->prio_to_cos[outer_prio]); in bnx2x_setup_tc()
4298 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0); in bnx2x_setup_tc()
4301 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1); in bnx2x_setup_tc()
4304 /* configure traffic class to transmission queue mapping */ in bnx2x_setup_tc()
4305 for (cos = 0; cos < bp->max_cos; cos++) { in bnx2x_setup_tc()
4310 "mapping tc %d to offset %d count %d\n", in bnx2x_setup_tc()
4323 return -EOPNOTSUPP; in __bnx2x_setup_tc()
4325 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; in __bnx2x_setup_tc()
4327 return bnx2x_setup_tc(dev, mqprio->num_tc); in __bnx2x_setup_tc()
4337 if (!is_valid_ether_addr(addr->sa_data)) { in bnx2x_change_mac_addr()
4339 return -EINVAL; in bnx2x_change_mac_addr()
4344 return -EINVAL; in bnx2x_change_mac_addr()
4353 eth_hw_addr_set(dev, addr->sa_data); in bnx2x_change_mac_addr()
4367 struct bnx2x_fastpath *fp = &bp->fp[fp_index]; in bnx2x_free_fp_mem_at()
4374 fp->status_blk_mapping = 0; in bnx2x_free_fp_mem_at()
4378 BNX2X_PCI_FREE(sb->e2_sb, in bnx2x_free_fp_mem_at()
4383 BNX2X_PCI_FREE(sb->e1x_sb, in bnx2x_free_fp_mem_at()
4411 /* Tx */ in bnx2x_free_fp_mem_at()
4413 /* fastpath tx rings: tx_buf tx_desc */ in bnx2x_free_fp_mem_at()
4415 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; in bnx2x_free_fp_mem_at()
4418 "freeing tx memory of fp %d cos %d cid %d\n", in bnx2x_free_fp_mem_at()
4419 fp_index, cos, txdata->cid); in bnx2x_free_fp_mem_at()
4421 BNX2X_FREE(txdata->tx_buf_ring); in bnx2x_free_fp_mem_at()
4422 BNX2X_PCI_FREE(txdata->tx_desc_ring, in bnx2x_free_fp_mem_at()
4423 txdata->tx_desc_mapping, in bnx2x_free_fp_mem_at()
4449 (__le16 *)status_blk.e2_sb->sb.index_values; in set_sb_shortcuts()
4451 (__le16 *)status_blk.e2_sb->sb.running_index; in set_sb_shortcuts()
4454 (__le16 *)status_blk.e1x_sb->sb.index_values; in set_sb_shortcuts()
4456 (__le16 *)status_blk.e1x_sb->sb.running_index; in set_sb_shortcuts()
4464 struct bnx2x *bp = fp->bp; in bnx2x_alloc_rx_bds()
4468 fp->rx_comp_cons = 0; in bnx2x_alloc_rx_bds()
4472 * fp->eth_q_stats.rx_skb_alloc_failed = 0 in bnx2x_alloc_rx_bds()
4481 WARN_ON(ring_prod <= (i - failure_cnt)); in bnx2x_alloc_rx_bds()
4486 i - failure_cnt, fp->index); in bnx2x_alloc_rx_bds()
4488 fp->rx_bd_prod = ring_prod; in bnx2x_alloc_rx_bds()
4490 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT, in bnx2x_alloc_rx_bds()
4493 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt; in bnx2x_alloc_rx_bds()
4495 return i - failure_cnt; in bnx2x_alloc_rx_bds()
4506 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1]; in bnx2x_set_next_page_rx_cq()
4507 nextpg->addr_hi = in bnx2x_set_next_page_rx_cq()
4508 cpu_to_le32(U64_HI(fp->rx_comp_mapping + in bnx2x_set_next_page_rx_cq()
4510 nextpg->addr_lo = in bnx2x_set_next_page_rx_cq()
4511 cpu_to_le32(U64_LO(fp->rx_comp_mapping + in bnx2x_set_next_page_rx_cq()
4519 struct bnx2x_fastpath *fp = &bp->fp[index]; in bnx2x_alloc_fp_mem_at()
4524 if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) { in bnx2x_alloc_fp_mem_at()
4526 bp->rx_ring_size = rx_ring_size; in bnx2x_alloc_fp_mem_at()
4527 } else if (!bp->rx_ring_size) { in bnx2x_alloc_fp_mem_at()
4542 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : in bnx2x_alloc_fp_mem_at()
4545 bp->rx_ring_size = rx_ring_size; in bnx2x_alloc_fp_mem_at()
4546 } else /* if rx_ring_size specified - use it */ in bnx2x_alloc_fp_mem_at()
4547 rx_ring_size = bp->rx_ring_size; in bnx2x_alloc_fp_mem_at()
4557 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping), in bnx2x_alloc_fp_mem_at()
4559 if (!sb->e2_sb) in bnx2x_alloc_fp_mem_at()
4562 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping), in bnx2x_alloc_fp_mem_at()
4564 if (!sb->e1x_sb) in bnx2x_alloc_fp_mem_at()
4575 /* Tx */ in bnx2x_alloc_fp_mem_at()
4577 /* fastpath tx rings: tx_buf tx_desc */ in bnx2x_alloc_fp_mem_at()
4579 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; in bnx2x_alloc_fp_mem_at()
4582 "allocating tx memory of fp %d cos %d\n", in bnx2x_alloc_fp_mem_at()
4585 txdata->tx_buf_ring = kcalloc(NUM_TX_BD, in bnx2x_alloc_fp_mem_at()
4588 if (!txdata->tx_buf_ring) in bnx2x_alloc_fp_mem_at()
4590 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping, in bnx2x_alloc_fp_mem_at()
4592 if (!txdata->tx_desc_ring) in bnx2x_alloc_fp_mem_at()
4648 * Min size is different for OOO, TPA and non-TPA queues in bnx2x_alloc_fp_mem_at()
4650 if (ring_size < (fp->mode == TPA_MODE_DISABLED ? in bnx2x_alloc_fp_mem_at()
4654 return -ENOMEM; in bnx2x_alloc_fp_mem_at()
4667 return -ENOMEM; in bnx2x_alloc_fp_mem_cnic()
4676 /* 1. Allocate FP for leading - fatal if error in bnx2x_alloc_fp_mem()
4677 * 2. Allocate RSS - fix number of queues if error in bnx2x_alloc_fp_mem()
4682 return -ENOMEM; in bnx2x_alloc_fp_mem()
4691 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i; in bnx2x_alloc_fp_mem()
4702 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta); in bnx2x_alloc_fp_mem()
4703 bp->num_ethernet_queues -= delta; in bnx2x_alloc_fp_mem()
4704 bp->num_queues = bp->num_ethernet_queues + in bnx2x_alloc_fp_mem()
4705 bp->num_cnic_queues; in bnx2x_alloc_fp_mem()
4707 bp->num_queues + delta, bp->num_queues); in bnx2x_alloc_fp_mem()
4717 for (i = 0; i < bp->fp_array_size; i++) in bnx2x_free_mem_bp()
4718 kfree(bp->fp[i].tpa_info); in bnx2x_free_mem_bp()
4719 kfree(bp->fp); in bnx2x_free_mem_bp()
4720 kfree(bp->sp_objs); in bnx2x_free_mem_bp()
4721 kfree(bp->fp_stats); in bnx2x_free_mem_bp()
4722 kfree(bp->bnx2x_txq); in bnx2x_free_mem_bp()
4723 kfree(bp->msix_table); in bnx2x_free_mem_bp()
4724 kfree(bp->ilt); in bnx2x_free_mem_bp()
4737 * The biggest MSI-X table we might need is as a maximum number of fast in bnx2x_alloc_mem_bp()
4740 msix_table_size = bp->igu_sb_cnt; in bnx2x_alloc_mem_bp()
4747 bp->fp_array_size = fp_array_size; in bnx2x_alloc_mem_bp()
4748 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size); in bnx2x_alloc_mem_bp()
4750 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL); in bnx2x_alloc_mem_bp()
4753 for (i = 0; i < bp->fp_array_size; i++) { in bnx2x_alloc_mem_bp()
4761 bp->fp = fp; in bnx2x_alloc_mem_bp()
4764 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs), in bnx2x_alloc_mem_bp()
4766 if (!bp->sp_objs) in bnx2x_alloc_mem_bp()
4770 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats), in bnx2x_alloc_mem_bp()
4772 if (!bp->fp_stats) in bnx2x_alloc_mem_bp()
4780 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata), in bnx2x_alloc_mem_bp()
4782 if (!bp->bnx2x_txq) in bnx2x_alloc_mem_bp()
4789 bp->msix_table = tbl; in bnx2x_alloc_mem_bp()
4795 bp->ilt = ilt; in bnx2x_alloc_mem_bp()
4800 return -ENOMEM; in bnx2x_alloc_mem_bp()
4817 if (bp->link_params.num_phys <= 1) in bnx2x_get_cur_phy_idx()
4820 if (bp->link_vars.link_up) { in bnx2x_get_cur_phy_idx()
4823 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) && in bnx2x_get_cur_phy_idx()
4824 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE)) in bnx2x_get_cur_phy_idx()
4828 switch (bnx2x_phy_selection(&bp->link_params)) { in bnx2x_get_cur_phy_idx()
4852 if (bp->link_params.multi_phy_config & in bnx2x_get_link_cfg_idx()
4866 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; in bnx2x_fcoe_get_wwn()
4870 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi, in bnx2x_fcoe_get_wwn()
4871 cp->fcoe_wwn_node_name_lo); in bnx2x_fcoe_get_wwn()
4874 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi, in bnx2x_fcoe_get_wwn()
4875 cp->fcoe_wwn_port_name_lo); in bnx2x_fcoe_get_wwn()
4878 BNX2X_ERR("Wrong WWN type requested - %d\n", type); in bnx2x_fcoe_get_wwn()
4879 return -EINVAL; in bnx2x_fcoe_get_wwn()
4891 if (pci_num_vf(bp->pdev)) { in bnx2x_change_mtu()
4893 return -EPERM; in bnx2x_change_mtu()
4896 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { in bnx2x_change_mtu()
4898 return -EAGAIN; in bnx2x_change_mtu()
4905 WRITE_ONCE(dev->mtu, new_mtu); in bnx2x_change_mtu()
4908 dev->features &= ~NETIF_F_GRO_HW; in bnx2x_change_mtu()
4921 if (pci_num_vf(bp->pdev)) { in bnx2x_fix_features()
4922 netdev_features_t changed = dev->features ^ features; in bnx2x_fix_features()
4927 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) { in bnx2x_fix_features()
4929 features |= dev->features & NETIF_F_RXCSUM; in bnx2x_fix_features()
4934 features |= dev->features & NETIF_F_LOOPBACK; in bnx2x_fix_features()
4942 if (!(features & NETIF_F_GRO) || !bnx2x_mtu_allows_gro(dev->mtu)) in bnx2x_fix_features()
4953 netdev_features_t changes = features ^ dev->features; in bnx2x_set_features()
4958 if (!pci_num_vf(bp->pdev)) { in bnx2x_set_features()
4960 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) { in bnx2x_set_features()
4961 bp->link_params.loopback_mode = LOOPBACK_BMAC; in bnx2x_set_features()
4965 if (bp->link_params.loopback_mode != LOOPBACK_NONE) { in bnx2x_set_features()
4966 bp->link_params.loopback_mode = LOOPBACK_NONE; in bnx2x_set_features()
4979 if (bp->recovery_state == BNX2X_RECOVERY_DONE) { in bnx2x_set_features()
4980 dev->features = features; in bnx2x_set_features()
4997 if (!bp->panic) in bnx2x_tx_timeout()
5015 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); in bnx2x_suspend()
5016 return -ENODEV; in bnx2x_suspend()
5044 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); in bnx2x_resume()
5045 return -ENODEV; in bnx2x_resume()
5049 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { in bnx2x_resume()
5051 return -EAGAIN; in bnx2x_resume()
5081 cxt->ustorm_ag_context.cdu_usage = in bnx2x_set_ctx_validation()
5085 cxt->xstorm_ag_context.cdu_reserved = in bnx2x_set_ctx_validation()
5090 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port, in storm_memset_hc_timeout() argument
5098 "port %x fw_sb_id %d sb_index %d ticks %d\n", in storm_memset_hc_timeout()
5099 port, fw_sb_id, sb_index, ticks); in storm_memset_hc_timeout()
5102 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port, in storm_memset_hc_disable() argument
5115 "port %x fw_sb_id %d sb_index %d disable %d\n", in storm_memset_hc_disable()
5116 port, fw_sb_id, sb_index, disable); in storm_memset_hc_disable()
5122 int port = BP_PORT(bp); in bnx2x_update_coalesce_sb_index() local
5125 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks); in bnx2x_update_coalesce_sb_index()
5128 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable); in bnx2x_update_coalesce_sb_index()
5135 set_bit(flag, &bp->sp_rtnl_state); in bnx2x_schedule_sp_rtnl()
5139 schedule_delayed_work(&bp->sp_rtnl_task, 0); in bnx2x_schedule_sp_rtnl()