Lines Matching +full:atomic +full:- +full:threshold +full:- +full:us
1 // SPDX-License-Identifier: GPL-2.0-only
25 "RX descriptor ring refill threshold (%)");
29 * This must be at least 1 to prevent overflow, plus one packet-worth
37 struct efx_nic *efx = rx_queue->efx; in efx_reuse_page()
42 if (unlikely(!rx_queue->page_ring)) in efx_reuse_page()
44 index = rx_queue->page_remove & rx_queue->page_ptr_mask; in efx_reuse_page()
45 page = rx_queue->page_ring[index]; in efx_reuse_page()
49 rx_queue->page_ring[index] = NULL; in efx_reuse_page()
51 if (rx_queue->page_remove != rx_queue->page_add) in efx_reuse_page()
52 ++rx_queue->page_remove; in efx_reuse_page()
56 ++rx_queue->page_recycle_count; in efx_reuse_page()
60 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, in efx_reuse_page()
61 PAGE_SIZE << efx->rx_buffer_order, in efx_reuse_page()
64 ++rx_queue->page_recycle_failed; in efx_reuse_page()
78 struct efx_nic *efx = rx_queue->efx; in efx_recycle_rx_page()
79 struct page *page = rx_buf->page; in efx_recycle_rx_page()
83 if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE)) in efx_recycle_rx_page()
86 index = rx_queue->page_add & rx_queue->page_ptr_mask; in efx_recycle_rx_page()
87 if (rx_queue->page_ring[index] == NULL) { in efx_recycle_rx_page()
88 unsigned int read_index = rx_queue->page_remove & in efx_recycle_rx_page()
89 rx_queue->page_ptr_mask; in efx_recycle_rx_page()
96 ++rx_queue->page_remove; in efx_recycle_rx_page()
97 rx_queue->page_ring[index] = page; in efx_recycle_rx_page()
98 ++rx_queue->page_add; in efx_recycle_rx_page()
101 ++rx_queue->page_recycle_full; in efx_recycle_rx_page()
103 put_page(rx_buf->page); in efx_recycle_rx_page()
113 if (unlikely(!rx_queue->page_ring)) in efx_recycle_rx_pages()
119 } while (--n_frags); in efx_recycle_rx_pages()
136 struct efx_nic *efx = rx_queue->efx; in efx_init_rx_recycle_ring()
140 efx->rx_bufs_per_page); in efx_init_rx_recycle_ring()
141 rx_queue->page_ring = kcalloc(page_ring_size, in efx_init_rx_recycle_ring()
142 sizeof(*rx_queue->page_ring), GFP_KERNEL); in efx_init_rx_recycle_ring()
143 if (!rx_queue->page_ring) in efx_init_rx_recycle_ring()
144 rx_queue->page_ptr_mask = 0; in efx_init_rx_recycle_ring()
146 rx_queue->page_ptr_mask = page_ring_size - 1; in efx_init_rx_recycle_ring()
151 struct efx_nic *efx = rx_queue->efx; in efx_fini_rx_recycle_ring()
154 if (unlikely(!rx_queue->page_ring)) in efx_fini_rx_recycle_ring()
158 for (i = 0; i <= rx_queue->page_ptr_mask; i++) { in efx_fini_rx_recycle_ring()
159 struct page *page = rx_queue->page_ring[i]; in efx_fini_rx_recycle_ring()
166 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, in efx_fini_rx_recycle_ring()
167 PAGE_SIZE << efx->rx_buffer_order, in efx_fini_rx_recycle_ring()
171 kfree(rx_queue->page_ring); in efx_fini_rx_recycle_ring()
172 rx_queue->page_ring = NULL; in efx_fini_rx_recycle_ring()
179 if (rx_buf->page) in efx_fini_rx_buffer()
180 put_page(rx_buf->page); in efx_fini_rx_buffer()
183 if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) { in efx_fini_rx_buffer()
184 efx_unmap_rx_buffer(rx_queue->efx, rx_buf); in efx_fini_rx_buffer()
187 rx_buf->page = NULL; in efx_fini_rx_buffer()
192 struct efx_nic *efx = rx_queue->efx; in efx_probe_rx_queue()
196 /* Create the smallest power-of-two aligned ring */ in efx_probe_rx_queue()
197 entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE); in efx_probe_rx_queue()
199 rx_queue->ptr_mask = entries - 1; in efx_probe_rx_queue()
201 netif_dbg(efx, probe, efx->net_dev, in efx_probe_rx_queue()
203 efx_rx_queue_index(rx_queue), efx->rxq_entries, in efx_probe_rx_queue()
204 rx_queue->ptr_mask); in efx_probe_rx_queue()
207 rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer), in efx_probe_rx_queue()
209 if (!rx_queue->buffer) in efx_probe_rx_queue()
210 return -ENOMEM; in efx_probe_rx_queue()
214 kfree(rx_queue->buffer); in efx_probe_rx_queue()
215 rx_queue->buffer = NULL; in efx_probe_rx_queue()
224 struct efx_nic *efx = rx_queue->efx; in efx_init_rx_queue()
227 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, in efx_init_rx_queue()
231 rx_queue->added_count = 0; in efx_init_rx_queue()
232 rx_queue->notified_count = 0; in efx_init_rx_queue()
233 rx_queue->granted_count = 0; in efx_init_rx_queue()
234 rx_queue->removed_count = 0; in efx_init_rx_queue()
235 rx_queue->min_fill = -1U; in efx_init_rx_queue()
238 rx_queue->page_remove = 0; in efx_init_rx_queue()
239 rx_queue->page_add = rx_queue->page_ptr_mask + 1; in efx_init_rx_queue()
240 rx_queue->page_recycle_count = 0; in efx_init_rx_queue()
241 rx_queue->page_recycle_failed = 0; in efx_init_rx_queue()
242 rx_queue->page_recycle_full = 0; in efx_init_rx_queue()
245 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM; in efx_init_rx_queue()
247 max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page; in efx_init_rx_queue()
256 rx_queue->max_fill = max_fill; in efx_init_rx_queue()
257 rx_queue->fast_fill_trigger = trigger; in efx_init_rx_queue()
258 rx_queue->refill_enabled = true; in efx_init_rx_queue()
261 rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev, in efx_init_rx_queue()
262 rx_queue->core_index, 0); in efx_init_rx_queue()
265 netif_err(efx, rx_err, efx->net_dev, in efx_init_rx_queue()
268 efx->xdp_rxq_info_failed = true; in efx_init_rx_queue()
270 rx_queue->xdp_rxq_info_valid = true; in efx_init_rx_queue()
282 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, in efx_fini_rx_queue()
285 del_timer_sync(&rx_queue->slow_fill); in efx_fini_rx_queue()
286 if (rx_queue->grant_credits) in efx_fini_rx_queue()
287 flush_work(&rx_queue->grant_work); in efx_fini_rx_queue()
290 if (rx_queue->buffer) { in efx_fini_rx_queue()
291 for (i = rx_queue->removed_count; i < rx_queue->added_count; in efx_fini_rx_queue()
293 unsigned int index = i & rx_queue->ptr_mask; in efx_fini_rx_queue()
302 if (rx_queue->xdp_rxq_info_valid) in efx_fini_rx_queue()
303 xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info); in efx_fini_rx_queue()
305 rx_queue->xdp_rxq_info_valid = false; in efx_fini_rx_queue()
310 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, in efx_remove_rx_queue()
315 kfree(rx_queue->buffer); in efx_remove_rx_queue()
316 rx_queue->buffer = NULL; in efx_remove_rx_queue()
319 /* Unmap a DMA-mapped page. This function is only called for the final RX
325 struct page *page = rx_buf->page; in efx_unmap_rx_buffer()
330 dma_unmap_page(&efx->pci_dev->dev, in efx_unmap_rx_buffer()
331 state->dma_addr, in efx_unmap_rx_buffer()
332 PAGE_SIZE << efx->rx_buffer_order, in efx_unmap_rx_buffer()
342 if (rx_buf->page) { in efx_free_rx_buffers()
343 put_page(rx_buf->page); in efx_free_rx_buffers()
344 rx_buf->page = NULL; in efx_free_rx_buffers()
347 } while (--num_bufs); in efx_free_rx_buffers()
356 ++rx_queue->slow_fill_count; in efx_rx_slow_fill()
361 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10)); in efx_schedule_slow_fill()
364 /* efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
373 static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic) in efx_init_rx_buffers() argument
376 struct efx_nic *efx = rx_queue->efx; in efx_init_rx_buffers()
387 (atomic ? GFP_ATOMIC : GFP_KERNEL), in efx_init_rx_buffers()
388 efx->rx_buffer_order); in efx_init_rx_buffers()
390 return -ENOMEM; in efx_init_rx_buffers()
392 dma_map_page(&efx->pci_dev->dev, page, 0, in efx_init_rx_buffers()
393 PAGE_SIZE << efx->rx_buffer_order, in efx_init_rx_buffers()
395 if (unlikely(dma_mapping_error(&efx->pci_dev->dev, in efx_init_rx_buffers()
397 __free_pages(page, efx->rx_buffer_order); in efx_init_rx_buffers()
398 return -EIO; in efx_init_rx_buffers()
401 state->dma_addr = dma_addr; in efx_init_rx_buffers()
404 dma_addr = state->dma_addr; in efx_init_rx_buffers()
411 index = rx_queue->added_count & rx_queue->ptr_mask; in efx_init_rx_buffers()
413 rx_buf->dma_addr = dma_addr + efx->rx_ip_align + in efx_init_rx_buffers()
415 rx_buf->page = page; in efx_init_rx_buffers()
416 rx_buf->page_offset = page_offset + efx->rx_ip_align + in efx_init_rx_buffers()
418 rx_buf->len = efx->rx_dma_len; in efx_init_rx_buffers()
419 rx_buf->flags = 0; in efx_init_rx_buffers()
420 ++rx_queue->added_count; in efx_init_rx_buffers()
422 dma_addr += efx->rx_page_buf_step; in efx_init_rx_buffers()
423 page_offset += efx->rx_page_buf_step; in efx_init_rx_buffers()
424 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE); in efx_init_rx_buffers()
426 rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE; in efx_init_rx_buffers()
427 } while (++count < efx->rx_pages_per_batch); in efx_init_rx_buffers()
434 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align + in efx_rx_config_page_split()
437 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : in efx_rx_config_page_split()
438 ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) / in efx_rx_config_page_split()
439 efx->rx_page_buf_step); in efx_rx_config_page_split()
440 efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) / in efx_rx_config_page_split()
441 efx->rx_bufs_per_page; in efx_rx_config_page_split()
442 efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH, in efx_rx_config_page_split()
443 efx->rx_bufs_per_page); in efx_rx_config_page_split()
446 /* efx_fast_push_rx_descriptors - push new RX descriptors quickly
450 * @rx_queue->@max_fill. If there is insufficient atomic
457 void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic) in efx_fast_push_rx_descriptors() argument
459 struct efx_nic *efx = rx_queue->efx; in efx_fast_push_rx_descriptors()
463 if (!rx_queue->refill_enabled) in efx_fast_push_rx_descriptors()
467 fill_level = (rx_queue->added_count - rx_queue->removed_count); in efx_fast_push_rx_descriptors()
468 EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries); in efx_fast_push_rx_descriptors()
469 if (fill_level >= rx_queue->fast_fill_trigger) in efx_fast_push_rx_descriptors()
473 if (unlikely(fill_level < rx_queue->min_fill)) { in efx_fast_push_rx_descriptors()
475 rx_queue->min_fill = fill_level; in efx_fast_push_rx_descriptors()
478 batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page; in efx_fast_push_rx_descriptors()
479 space = rx_queue->max_fill - fill_level; in efx_fast_push_rx_descriptors()
482 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, in efx_fast_push_rx_descriptors()
483 "RX queue %d fast-filling descriptor ring from" in efx_fast_push_rx_descriptors()
486 rx_queue->max_fill); in efx_fast_push_rx_descriptors()
489 rc = efx_init_rx_buffers(rx_queue, atomic); in efx_fast_push_rx_descriptors()
495 } while ((space -= batch_size) >= batch_size); in efx_fast_push_rx_descriptors()
497 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, in efx_fast_push_rx_descriptors()
498 "RX queue %d fast-filled descriptor ring " in efx_fast_push_rx_descriptors()
500 rx_queue->added_count - rx_queue->removed_count); in efx_fast_push_rx_descriptors()
503 if (rx_queue->notified_count != rx_queue->added_count) in efx_fast_push_rx_descriptors()
514 struct napi_struct *napi = &channel->napi_str; in efx_rx_packet_gro()
515 struct efx_nic *efx = channel->efx; in efx_rx_packet_gro()
527 if (efx->net_dev->features & NETIF_F_RXHASH && in efx_rx_packet_gro()
532 skb->csum = csum; in efx_rx_packet_gro()
533 skb->ip_summed = CHECKSUM_COMPLETE; in efx_rx_packet_gro()
535 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ? in efx_rx_packet_gro()
538 skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL); in efx_rx_packet_gro()
541 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, in efx_rx_packet_gro()
542 rx_buf->page, rx_buf->page_offset, in efx_rx_packet_gro()
543 rx_buf->len); in efx_rx_packet_gro()
544 rx_buf->page = NULL; in efx_rx_packet_gro()
545 skb->len += rx_buf->len; in efx_rx_packet_gro()
546 if (skb_shinfo(skb)->nr_frags == n_frags) in efx_rx_packet_gro()
549 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); in efx_rx_packet_gro()
552 skb->data_len = skb->len; in efx_rx_packet_gro()
553 skb->truesize += n_frags * efx->rx_buffer_truesize; in efx_rx_packet_gro()
555 skb_record_rx_queue(skb, channel->rx_queue.core_index); in efx_rx_packet_gro()
565 WARN_ON(!mutex_is_locked(&efx->net_dev->ethtool->rss_lock)); in efx_find_rss_context_entry()
567 ctx = xa_load(&efx->net_dev->ethtool->rss_ctx, id); in efx_find_rss_context_entry()
577 for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_indir_table); i++) in efx_set_default_rx_indir_table()
578 indir[i] = ethtool_rxfh_indir_default(i, efx->rss_spread); in efx_set_default_rx_indir_table()
582 * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
585 * Return: %true if the specification is a non-drop RX filter that
592 if (!(spec->flags & EFX_FILTER_FLAG_RX) || in efx_filter_is_mc_recipient()
593 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP) in efx_filter_is_mc_recipient()
596 if (spec->match_flags & in efx_filter_is_mc_recipient()
598 is_multicast_ether_addr(spec->loc_mac)) in efx_filter_is_mc_recipient()
601 if ((spec->match_flags & in efx_filter_is_mc_recipient()
604 if (spec->ether_type == htons(ETH_P_IP) && in efx_filter_is_mc_recipient()
605 ipv4_is_multicast(spec->loc_host[0])) in efx_filter_is_mc_recipient()
607 if (spec->ether_type == htons(ETH_P_IPV6) && in efx_filter_is_mc_recipient()
608 ((const u8 *)spec->loc_host)[0] == 0xff) in efx_filter_is_mc_recipient()
618 if ((left->match_flags ^ right->match_flags) | in efx_filter_spec_equal()
619 ((left->flags ^ right->flags) & in efx_filter_spec_equal()
623 return memcmp(&left->vport_id, &right->vport_id, in efx_filter_spec_equal()
624 sizeof(struct efx_filter_spec) - in efx_filter_spec_equal()
631 return jhash2((const u32 *)&spec->vport_id, in efx_filter_spec_hash()
632 (sizeof(struct efx_filter_spec) - in efx_filter_spec_hash()
641 if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) { in efx_rps_check_rule()
645 if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) { in efx_rps_check_rule()
649 rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING; in efx_rps_check_rule()
652 } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */ in efx_rps_check_rule()
670 lockdep_assert_held(&efx->rps_hash_lock); in efx_rps_hash_bucket()
671 if (!efx->rps_hash_table) in efx_rps_hash_bucket()
673 return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE]; in efx_rps_hash_bucket()
688 if (efx_filter_spec_equal(spec, &rule->spec)) in efx_rps_hash_find()
707 if (efx_filter_spec_equal(spec, &rule->spec)) { in efx_rps_hash_add()
715 memcpy(&rule->spec, spec, sizeof(rule->spec)); in efx_rps_hash_add()
716 hlist_add_head(&rule->node, head); in efx_rps_hash_add()
732 if (efx_filter_spec_equal(spec, &rule->spec)) { in efx_rps_hash_del()
739 if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING) in efx_rps_hash_del()
755 mutex_lock(&efx->mac_lock); in efx_probe_filters()
756 rc = efx->type->filter_table_probe(efx); in efx_probe_filters()
761 if (efx->type->offload_features & NETIF_F_NTUPLE) { in efx_probe_filters()
766 channel->rps_flow_id = in efx_probe_filters()
767 kcalloc(efx->type->max_rx_ip_filters, in efx_probe_filters()
768 sizeof(*channel->rps_flow_id), in efx_probe_filters()
770 if (!channel->rps_flow_id) in efx_probe_filters()
774 i < efx->type->max_rx_ip_filters; in efx_probe_filters()
776 channel->rps_flow_id[i] = in efx_probe_filters()
778 channel->rfs_expire_index = 0; in efx_probe_filters()
779 channel->rfs_filter_count = 0; in efx_probe_filters()
784 kfree(channel->rps_flow_id); in efx_probe_filters()
785 channel->rps_flow_id = NULL; in efx_probe_filters()
787 efx->type->filter_table_remove(efx); in efx_probe_filters()
788 rc = -ENOMEM; in efx_probe_filters()
794 mutex_unlock(&efx->mac_lock); in efx_probe_filters()
804 cancel_delayed_work_sync(&channel->filter_work); in efx_remove_filters()
805 kfree(channel->rps_flow_id); in efx_remove_filters()
806 channel->rps_flow_id = NULL; in efx_remove_filters()
809 efx->type->filter_table_remove(efx); in efx_remove_filters()
818 struct efx_nic *efx = efx_netdev_priv(req->net_dev); in efx_filter_rfs_work()
819 struct efx_channel *channel = efx_get_channel(efx, req->rxq_index); in efx_filter_rfs_work()
820 int slot_idx = req - efx->rps_slot; in efx_filter_rfs_work()
825 rc = efx->type->filter_insert(efx, &req->spec, true); in efx_filter_rfs_work()
828 rc %= efx->type->max_rx_ip_filters; in efx_filter_rfs_work()
829 if (efx->rps_hash_table) { in efx_filter_rfs_work()
830 spin_lock_bh(&efx->rps_hash_lock); in efx_filter_rfs_work()
831 rule = efx_rps_hash_find(efx, &req->spec); in efx_filter_rfs_work()
835 * tying us to an arfs_id, meaning that as soon as the filter in efx_filter_rfs_work()
840 rule->filter_id = EFX_ARFS_FILTER_ID_ERROR; in efx_filter_rfs_work()
842 rule->filter_id = rc; in efx_filter_rfs_work()
843 arfs_id = rule->arfs_id; in efx_filter_rfs_work()
845 spin_unlock_bh(&efx->rps_hash_lock); in efx_filter_rfs_work()
851 mutex_lock(&efx->rps_mutex); in efx_filter_rfs_work()
852 if (channel->rps_flow_id[rc] == RPS_FLOW_ID_INVALID) in efx_filter_rfs_work()
853 channel->rfs_filter_count++; in efx_filter_rfs_work()
854 channel->rps_flow_id[rc] = req->flow_id; in efx_filter_rfs_work()
855 mutex_unlock(&efx->rps_mutex); in efx_filter_rfs_work()
857 if (req->spec.ether_type == htons(ETH_P_IP)) in efx_filter_rfs_work()
858 netif_info(efx, rx_status, efx->net_dev, in efx_filter_rfs_work()
860 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", in efx_filter_rfs_work()
861 req->spec.rem_host, ntohs(req->spec.rem_port), in efx_filter_rfs_work()
862 req->spec.loc_host, ntohs(req->spec.loc_port), in efx_filter_rfs_work()
863 req->rxq_index, req->flow_id, rc, arfs_id); in efx_filter_rfs_work()
865 netif_info(efx, rx_status, efx->net_dev, in efx_filter_rfs_work()
867 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", in efx_filter_rfs_work()
868 req->spec.rem_host, ntohs(req->spec.rem_port), in efx_filter_rfs_work()
869 req->spec.loc_host, ntohs(req->spec.loc_port), in efx_filter_rfs_work()
870 req->rxq_index, req->flow_id, rc, arfs_id); in efx_filter_rfs_work()
871 channel->n_rfs_succeeded++; in efx_filter_rfs_work()
873 if (req->spec.ether_type == htons(ETH_P_IP)) in efx_filter_rfs_work()
874 netif_dbg(efx, rx_status, efx->net_dev, in efx_filter_rfs_work()
876 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", in efx_filter_rfs_work()
877 req->spec.rem_host, ntohs(req->spec.rem_port), in efx_filter_rfs_work()
878 req->spec.loc_host, ntohs(req->spec.loc_port), in efx_filter_rfs_work()
879 req->rxq_index, req->flow_id, rc, arfs_id); in efx_filter_rfs_work()
881 netif_dbg(efx, rx_status, efx->net_dev, in efx_filter_rfs_work()
883 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", in efx_filter_rfs_work()
884 req->spec.rem_host, ntohs(req->spec.rem_port), in efx_filter_rfs_work()
885 req->spec.loc_host, ntohs(req->spec.loc_port), in efx_filter_rfs_work()
886 req->rxq_index, req->flow_id, rc, arfs_id); in efx_filter_rfs_work()
887 channel->n_rfs_failed++; in efx_filter_rfs_work()
891 __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, in efx_filter_rfs_work()
896 clear_bit(slot_idx, &efx->rps_slot_map); in efx_filter_rfs_work()
897 dev_put(req->net_dev); in efx_filter_rfs_work()
913 if (!test_and_set_bit(slot_idx, &efx->rps_slot_map)) in efx_filter_rfs()
916 return -EBUSY; in efx_filter_rfs()
919 rc = -EINVAL; in efx_filter_rfs()
924 rc = -EPROTONOSUPPORT; in efx_filter_rfs()
929 rc = -EPROTONOSUPPORT; in efx_filter_rfs()
933 rc = -EPROTONOSUPPORT; in efx_filter_rfs()
937 req = efx->rps_slot + slot_idx; in efx_filter_rfs()
938 efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT, in efx_filter_rfs()
939 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, in efx_filter_rfs()
941 req->spec.match_flags = in efx_filter_rfs()
945 req->spec.ether_type = fk.basic.n_proto; in efx_filter_rfs()
946 req->spec.ip_proto = fk.basic.ip_proto; in efx_filter_rfs()
949 req->spec.rem_host[0] = fk.addrs.v4addrs.src; in efx_filter_rfs()
950 req->spec.loc_host[0] = fk.addrs.v4addrs.dst; in efx_filter_rfs()
952 memcpy(req->spec.rem_host, &fk.addrs.v6addrs.src, in efx_filter_rfs()
954 memcpy(req->spec.loc_host, &fk.addrs.v6addrs.dst, in efx_filter_rfs()
958 req->spec.rem_port = fk.ports.src; in efx_filter_rfs()
959 req->spec.loc_port = fk.ports.dst; in efx_filter_rfs()
961 if (efx->rps_hash_table) { in efx_filter_rfs()
963 spin_lock(&efx->rps_hash_lock); in efx_filter_rfs()
964 rule = efx_rps_hash_add(efx, &req->spec, &new); in efx_filter_rfs()
966 rc = -ENOMEM; in efx_filter_rfs()
970 rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER; in efx_filter_rfs()
971 rc = rule->arfs_id; in efx_filter_rfs()
973 if (!new && rule->rxq_index == rxq_index && in efx_filter_rfs()
974 rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING) in efx_filter_rfs()
976 rule->rxq_index = rxq_index; in efx_filter_rfs()
977 rule->filter_id = EFX_ARFS_FILTER_ID_PENDING; in efx_filter_rfs()
978 spin_unlock(&efx->rps_hash_lock); in efx_filter_rfs()
989 dev_hold(req->net_dev = net_dev); in efx_filter_rfs()
990 INIT_WORK(&req->work, efx_filter_rfs_work); in efx_filter_rfs()
991 req->rxq_index = rxq_index; in efx_filter_rfs()
992 req->flow_id = flow_id; in efx_filter_rfs()
993 schedule_work(&req->work); in efx_filter_rfs()
996 spin_unlock(&efx->rps_hash_lock); in efx_filter_rfs()
998 clear_bit(slot_idx, &efx->rps_slot_map); in efx_filter_rfs()
1005 struct efx_nic *efx = channel->efx; in __efx_filter_rfs_expire()
1009 if (!mutex_trylock(&efx->rps_mutex)) in __efx_filter_rfs_expire()
1011 expire_one = efx->type->filter_rfs_expire_one; in __efx_filter_rfs_expire()
1012 index = channel->rfs_expire_index; in __efx_filter_rfs_expire()
1014 size = efx->type->max_rx_ip_filters; in __efx_filter_rfs_expire()
1016 flow_id = channel->rps_flow_id[index]; in __efx_filter_rfs_expire()
1019 quota--; in __efx_filter_rfs_expire()
1021 netif_info(efx, rx_status, efx->net_dev, in __efx_filter_rfs_expire()
1023 index, channel->channel, flow_id); in __efx_filter_rfs_expire()
1024 channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID; in __efx_filter_rfs_expire()
1025 channel->rfs_filter_count--; in __efx_filter_rfs_expire()
1032 * if two callers race), ensure that we don't loop forever - in __efx_filter_rfs_expire()
1039 channel->rfs_expire_index = index; in __efx_filter_rfs_expire()
1040 mutex_unlock(&efx->rps_mutex); in __efx_filter_rfs_expire()