1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2018 Intel Corporation. */
3 
4 #include <linux/bpf_trace.h>
5 #include <net/xdp_sock_drv.h>
6 #include <net/xdp.h>
7 
8 #include "ixgbe.h"
9 #include "ixgbe_txrx_common.h"
10 
ixgbe_xsk_pool(struct ixgbe_adapter * adapter,struct ixgbe_ring * ring)11 struct xsk_buff_pool *ixgbe_xsk_pool(struct ixgbe_adapter *adapter,
12 				     struct ixgbe_ring *ring)
13 {
14 	bool xdp_on = READ_ONCE(adapter->xdp_prog);
15 	int qid = ring->ring_idx;
16 
17 	if (!xdp_on || !test_bit(qid, adapter->af_xdp_zc_qps))
18 		return NULL;
19 
20 	return xsk_get_pool_from_qid(adapter->netdev, qid);
21 }
22 
ixgbe_xsk_pool_enable(struct ixgbe_adapter * adapter,struct xsk_buff_pool * pool,u16 qid)23 static int ixgbe_xsk_pool_enable(struct ixgbe_adapter *adapter,
24 				 struct xsk_buff_pool *pool,
25 				 u16 qid)
26 {
27 	struct net_device *netdev = adapter->netdev;
28 	bool if_running;
29 	int err;
30 
31 	if (qid >= adapter->num_rx_queues)
32 		return -EINVAL;
33 
34 	if (qid >= netdev->real_num_rx_queues ||
35 	    qid >= netdev->real_num_tx_queues)
36 		return -EINVAL;
37 
38 	err = xsk_pool_dma_map(pool, &adapter->pdev->dev, IXGBE_RX_DMA_ATTR);
39 	if (err)
40 		return err;
41 
42 	if_running = netif_running(adapter->netdev) &&
43 		     ixgbe_enabled_xdp_adapter(adapter);
44 
45 	if (if_running)
46 		ixgbe_txrx_ring_disable(adapter, qid);
47 
48 	set_bit(qid, adapter->af_xdp_zc_qps);
49 
50 	if (if_running) {
51 		ixgbe_txrx_ring_enable(adapter, qid);
52 
53 		/* Kick start the NAPI context so that receiving will start */
54 		err = ixgbe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX);
55 		if (err) {
56 			clear_bit(qid, adapter->af_xdp_zc_qps);
57 			xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR);
58 			return err;
59 		}
60 	}
61 
62 	return 0;
63 }
64 
ixgbe_xsk_pool_disable(struct ixgbe_adapter * adapter,u16 qid)65 static int ixgbe_xsk_pool_disable(struct ixgbe_adapter *adapter, u16 qid)
66 {
67 	struct xsk_buff_pool *pool;
68 	bool if_running;
69 
70 	pool = xsk_get_pool_from_qid(adapter->netdev, qid);
71 	if (!pool)
72 		return -EINVAL;
73 
74 	if_running = netif_running(adapter->netdev) &&
75 		     ixgbe_enabled_xdp_adapter(adapter);
76 
77 	if (if_running)
78 		ixgbe_txrx_ring_disable(adapter, qid);
79 
80 	clear_bit(qid, adapter->af_xdp_zc_qps);
81 	xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR);
82 
83 	if (if_running)
84 		ixgbe_txrx_ring_enable(adapter, qid);
85 
86 	return 0;
87 }
88 
ixgbe_xsk_pool_setup(struct ixgbe_adapter * adapter,struct xsk_buff_pool * pool,u16 qid)89 int ixgbe_xsk_pool_setup(struct ixgbe_adapter *adapter,
90 			 struct xsk_buff_pool *pool,
91 			 u16 qid)
92 {
93 	return pool ? ixgbe_xsk_pool_enable(adapter, pool, qid) :
94 		ixgbe_xsk_pool_disable(adapter, qid);
95 }
96 
ixgbe_run_xdp_zc(struct ixgbe_adapter * adapter,struct ixgbe_ring * rx_ring,struct xdp_buff * xdp)97 static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
98 			    struct ixgbe_ring *rx_ring,
99 			    struct xdp_buff *xdp)
100 {
101 	int err, result = IXGBE_XDP_PASS;
102 	struct bpf_prog *xdp_prog;
103 	struct ixgbe_ring *ring;
104 	struct xdp_frame *xdpf;
105 	u32 act;
106 
107 	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
108 	act = bpf_prog_run_xdp(xdp_prog, xdp);
109 
110 	if (likely(act == XDP_REDIRECT)) {
111 		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
112 		if (!err)
113 			return IXGBE_XDP_REDIR;
114 		if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
115 			result = IXGBE_XDP_EXIT;
116 		else
117 			result = IXGBE_XDP_CONSUMED;
118 		goto out_failure;
119 	}
120 
121 	switch (act) {
122 	case XDP_PASS:
123 		break;
124 	case XDP_TX:
125 		xdpf = xdp_convert_buff_to_frame(xdp);
126 		if (unlikely(!xdpf))
127 			goto out_failure;
128 		ring = ixgbe_determine_xdp_ring(adapter);
129 		if (static_branch_unlikely(&ixgbe_xdp_locking_key))
130 			spin_lock(&ring->tx_lock);
131 		result = ixgbe_xmit_xdp_ring(ring, xdpf);
132 		if (static_branch_unlikely(&ixgbe_xdp_locking_key))
133 			spin_unlock(&ring->tx_lock);
134 		if (result == IXGBE_XDP_CONSUMED)
135 			goto out_failure;
136 		break;
137 	case XDP_DROP:
138 		result = IXGBE_XDP_CONSUMED;
139 		break;
140 	default:
141 		bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
142 		fallthrough;
143 	case XDP_ABORTED:
144 		result = IXGBE_XDP_CONSUMED;
145 out_failure:
146 		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
147 	}
148 	return result;
149 }
150 
ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring * rx_ring,u16 count)151 bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
152 {
153 	union ixgbe_adv_rx_desc *rx_desc;
154 	struct ixgbe_rx_buffer *bi;
155 	u16 i = rx_ring->next_to_use;
156 	dma_addr_t dma;
157 	bool ok = true;
158 
159 	/* nothing to do */
160 	if (!count)
161 		return true;
162 
163 	rx_desc = IXGBE_RX_DESC(rx_ring, i);
164 	bi = &rx_ring->rx_buffer_info[i];
165 	i -= rx_ring->count;
166 
167 	do {
168 		bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
169 		if (!bi->xdp) {
170 			ok = false;
171 			break;
172 		}
173 
174 		dma = xsk_buff_xdp_get_dma(bi->xdp);
175 
176 		/* Refresh the desc even if buffer_addrs didn't change
177 		 * because each write-back erases this info.
178 		 */
179 		rx_desc->read.pkt_addr = cpu_to_le64(dma);
180 
181 		rx_desc++;
182 		bi++;
183 		i++;
184 		if (unlikely(!i)) {
185 			rx_desc = IXGBE_RX_DESC(rx_ring, 0);
186 			bi = rx_ring->rx_buffer_info;
187 			i -= rx_ring->count;
188 		}
189 
190 		/* clear the length for the next_to_use descriptor */
191 		rx_desc->wb.upper.length = 0;
192 
193 		count--;
194 	} while (count);
195 
196 	i += rx_ring->count;
197 
198 	if (rx_ring->next_to_use != i) {
199 		rx_ring->next_to_use = i;
200 
201 		/* Force memory writes to complete before letting h/w
202 		 * know there are new descriptors to fetch.  (Only
203 		 * applicable for weak-ordered memory model archs,
204 		 * such as IA-64).
205 		 */
206 		wmb();
207 		writel(i, rx_ring->tail);
208 	}
209 
210 	return ok;
211 }
212 
ixgbe_construct_skb_zc(struct ixgbe_ring * rx_ring,const struct xdp_buff * xdp)213 static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring,
214 					      const struct xdp_buff *xdp)
215 {
216 	unsigned int totalsize = xdp->data_end - xdp->data_meta;
217 	unsigned int metasize = xdp->data - xdp->data_meta;
218 	struct sk_buff *skb;
219 
220 	net_prefetch(xdp->data_meta);
221 
222 	/* allocate a skb to store the frags */
223 	skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize);
224 	if (unlikely(!skb))
225 		return NULL;
226 
227 	memcpy(__skb_put(skb, totalsize), xdp->data_meta,
228 	       ALIGN(totalsize, sizeof(long)));
229 
230 	if (metasize) {
231 		skb_metadata_set(skb, metasize);
232 		__skb_pull(skb, metasize);
233 	}
234 
235 	return skb;
236 }
237 
ixgbe_inc_ntc(struct ixgbe_ring * rx_ring)238 static void ixgbe_inc_ntc(struct ixgbe_ring *rx_ring)
239 {
240 	u32 ntc = rx_ring->next_to_clean + 1;
241 
242 	ntc = (ntc < rx_ring->count) ? ntc : 0;
243 	rx_ring->next_to_clean = ntc;
244 	prefetch(IXGBE_RX_DESC(rx_ring, ntc));
245 }
246 
ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector * q_vector,struct ixgbe_ring * rx_ring,const int budget)247 int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
248 			  struct ixgbe_ring *rx_ring,
249 			  const int budget)
250 {
251 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
252 	struct ixgbe_adapter *adapter = q_vector->adapter;
253 	u16 cleaned_count = ixgbe_desc_unused(rx_ring);
254 	unsigned int xdp_res, xdp_xmit = 0;
255 	bool failure = false;
256 	struct sk_buff *skb;
257 
258 	while (likely(total_rx_packets < budget)) {
259 		union ixgbe_adv_rx_desc *rx_desc;
260 		struct ixgbe_rx_buffer *bi;
261 		unsigned int size;
262 
263 		/* return some buffers to hardware, one at a time is too slow */
264 		if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
265 			failure = failure ||
266 				  !ixgbe_alloc_rx_buffers_zc(rx_ring,
267 							     cleaned_count);
268 			cleaned_count = 0;
269 		}
270 
271 		rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
272 		size = le16_to_cpu(rx_desc->wb.upper.length);
273 		if (!size)
274 			break;
275 
276 		/* This memory barrier is needed to keep us from reading
277 		 * any other fields out of the rx_desc until we know the
278 		 * descriptor has been written back
279 		 */
280 		dma_rmb();
281 
282 		bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
283 
284 		if (unlikely(!ixgbe_test_staterr(rx_desc,
285 						 IXGBE_RXD_STAT_EOP))) {
286 			struct ixgbe_rx_buffer *next_bi;
287 
288 			xsk_buff_free(bi->xdp);
289 			bi->xdp = NULL;
290 			ixgbe_inc_ntc(rx_ring);
291 			next_bi =
292 			       &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
293 			next_bi->discard = true;
294 			continue;
295 		}
296 
297 		if (unlikely(bi->discard)) {
298 			xsk_buff_free(bi->xdp);
299 			bi->xdp = NULL;
300 			bi->discard = false;
301 			ixgbe_inc_ntc(rx_ring);
302 			continue;
303 		}
304 
305 		bi->xdp->data_end = bi->xdp->data + size;
306 		xsk_buff_dma_sync_for_cpu(bi->xdp);
307 		xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp);
308 
309 		if (likely(xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR))) {
310 			xdp_xmit |= xdp_res;
311 		} else if (xdp_res == IXGBE_XDP_EXIT) {
312 			failure = true;
313 			break;
314 		} else if (xdp_res == IXGBE_XDP_CONSUMED) {
315 			xsk_buff_free(bi->xdp);
316 		} else if (xdp_res == IXGBE_XDP_PASS) {
317 			goto construct_skb;
318 		}
319 
320 		bi->xdp = NULL;
321 		total_rx_packets++;
322 		total_rx_bytes += size;
323 
324 		cleaned_count++;
325 		ixgbe_inc_ntc(rx_ring);
326 		continue;
327 
328 construct_skb:
329 		/* XDP_PASS path */
330 		skb = ixgbe_construct_skb_zc(rx_ring, bi->xdp);
331 		if (!skb) {
332 			rx_ring->rx_stats.alloc_rx_buff_failed++;
333 			break;
334 		}
335 
336 		xsk_buff_free(bi->xdp);
337 		bi->xdp = NULL;
338 
339 		cleaned_count++;
340 		ixgbe_inc_ntc(rx_ring);
341 
342 		if (eth_skb_pad(skb))
343 			continue;
344 
345 		total_rx_bytes += skb->len;
346 		total_rx_packets++;
347 
348 		ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
349 		ixgbe_rx_skb(q_vector, skb);
350 	}
351 
352 	if (xdp_xmit & IXGBE_XDP_REDIR)
353 		xdp_do_flush();
354 
355 	if (xdp_xmit & IXGBE_XDP_TX) {
356 		struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter);
357 
358 		ixgbe_xdp_ring_update_tail_locked(ring);
359 	}
360 
361 	ixgbe_update_rx_ring_stats(rx_ring, q_vector, total_rx_packets,
362 				   total_rx_bytes);
363 
364 	if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
365 		if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
366 			xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
367 		else
368 			xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
369 
370 		return (int)total_rx_packets;
371 	}
372 	return failure ? budget : (int)total_rx_packets;
373 }
374 
ixgbe_xsk_clean_rx_ring(struct ixgbe_ring * rx_ring)375 void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring)
376 {
377 	struct ixgbe_rx_buffer *bi;
378 	u16 i;
379 
380 	for (i = 0; i < rx_ring->count; i++) {
381 		bi = &rx_ring->rx_buffer_info[i];
382 
383 		if (!bi->xdp)
384 			continue;
385 
386 		xsk_buff_free(bi->xdp);
387 		bi->xdp = NULL;
388 	}
389 }
390 
ixgbe_xmit_zc(struct ixgbe_ring * xdp_ring,unsigned int budget)391 static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
392 {
393 	struct xsk_buff_pool *pool = xdp_ring->xsk_pool;
394 	union ixgbe_adv_tx_desc *tx_desc = NULL;
395 	struct ixgbe_tx_buffer *tx_bi;
396 	bool work_done = true;
397 	struct xdp_desc desc;
398 	dma_addr_t dma;
399 	u32 cmd_type;
400 
401 	while (budget-- > 0) {
402 		if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
403 			work_done = false;
404 			break;
405 		}
406 
407 		if (!netif_carrier_ok(xdp_ring->netdev))
408 			break;
409 
410 		if (!xsk_tx_peek_desc(pool, &desc))
411 			break;
412 
413 		dma = xsk_buff_raw_get_dma(pool, desc.addr);
414 		xsk_buff_raw_dma_sync_for_device(pool, dma, desc.len);
415 
416 		tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use];
417 		tx_bi->bytecount = desc.len;
418 		tx_bi->xdpf = NULL;
419 		tx_bi->gso_segs = 1;
420 
421 		tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
422 		tx_desc->read.buffer_addr = cpu_to_le64(dma);
423 
424 		/* put descriptor type bits */
425 		cmd_type = IXGBE_ADVTXD_DTYP_DATA |
426 			   IXGBE_ADVTXD_DCMD_DEXT |
427 			   IXGBE_ADVTXD_DCMD_IFCS;
428 		cmd_type |= desc.len | IXGBE_TXD_CMD;
429 		tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
430 		tx_desc->read.olinfo_status =
431 			cpu_to_le32(desc.len << IXGBE_ADVTXD_PAYLEN_SHIFT);
432 
433 		xdp_ring->next_to_use++;
434 		if (xdp_ring->next_to_use == xdp_ring->count)
435 			xdp_ring->next_to_use = 0;
436 	}
437 
438 	if (tx_desc) {
439 		ixgbe_xdp_ring_update_tail(xdp_ring);
440 		xsk_tx_release(pool);
441 	}
442 
443 	return !!budget && work_done;
444 }
445 
ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring * tx_ring,struct ixgbe_tx_buffer * tx_bi)446 static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring,
447 				      struct ixgbe_tx_buffer *tx_bi)
448 {
449 	xdp_return_frame(tx_bi->xdpf);
450 	dma_unmap_single(tx_ring->dev,
451 			 dma_unmap_addr(tx_bi, dma),
452 			 dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
453 	dma_unmap_len_set(tx_bi, len, 0);
454 }
455 
ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector * q_vector,struct ixgbe_ring * tx_ring,int napi_budget)456 bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
457 			    struct ixgbe_ring *tx_ring, int napi_budget)
458 {
459 	u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
460 	unsigned int total_packets = 0, total_bytes = 0;
461 	struct xsk_buff_pool *pool = tx_ring->xsk_pool;
462 	union ixgbe_adv_tx_desc *tx_desc;
463 	struct ixgbe_tx_buffer *tx_bi;
464 	u32 xsk_frames = 0;
465 
466 	tx_bi = &tx_ring->tx_buffer_info[ntc];
467 	tx_desc = IXGBE_TX_DESC(tx_ring, ntc);
468 
469 	while (ntc != ntu) {
470 		if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
471 			break;
472 
473 		total_bytes += tx_bi->bytecount;
474 		total_packets += tx_bi->gso_segs;
475 
476 		if (tx_bi->xdpf)
477 			ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
478 		else
479 			xsk_frames++;
480 
481 		tx_bi->xdpf = NULL;
482 
483 		tx_bi++;
484 		tx_desc++;
485 		ntc++;
486 		if (unlikely(ntc == tx_ring->count)) {
487 			ntc = 0;
488 			tx_bi = tx_ring->tx_buffer_info;
489 			tx_desc = IXGBE_TX_DESC(tx_ring, 0);
490 		}
491 
492 		/* issue prefetch for next Tx descriptor */
493 		prefetch(tx_desc);
494 	}
495 
496 	tx_ring->next_to_clean = ntc;
497 	ixgbe_update_tx_ring_stats(tx_ring, q_vector, total_packets,
498 				   total_bytes);
499 
500 	if (xsk_frames)
501 		xsk_tx_completed(pool, xsk_frames);
502 
503 	if (xsk_uses_need_wakeup(pool))
504 		xsk_set_tx_need_wakeup(pool);
505 
506 	return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
507 }
508 
ixgbe_xsk_wakeup(struct net_device * dev,u32 qid,u32 flags)509 int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
510 {
511 	struct ixgbe_adapter *adapter = netdev_priv(dev);
512 	struct ixgbe_ring *ring;
513 
514 	if (test_bit(__IXGBE_DOWN, &adapter->state))
515 		return -ENETDOWN;
516 
517 	if (!READ_ONCE(adapter->xdp_prog))
518 		return -EINVAL;
519 
520 	if (qid >= adapter->num_xdp_queues)
521 		return -EINVAL;
522 
523 	ring = adapter->xdp_ring[qid];
524 
525 	if (test_bit(__IXGBE_TX_DISABLED, &ring->state))
526 		return -ENETDOWN;
527 
528 	if (!ring->xsk_pool)
529 		return -EINVAL;
530 
531 	if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
532 		u64 eics = BIT_ULL(ring->q_vector->v_idx);
533 
534 		ixgbe_irq_rearm_queues(adapter, eics);
535 	}
536 
537 	return 0;
538 }
539 
ixgbe_xsk_clean_tx_ring(struct ixgbe_ring * tx_ring)540 void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring)
541 {
542 	u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
543 	struct xsk_buff_pool *pool = tx_ring->xsk_pool;
544 	struct ixgbe_tx_buffer *tx_bi;
545 	u32 xsk_frames = 0;
546 
547 	while (ntc != ntu) {
548 		tx_bi = &tx_ring->tx_buffer_info[ntc];
549 
550 		if (tx_bi->xdpf)
551 			ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
552 		else
553 			xsk_frames++;
554 
555 		tx_bi->xdpf = NULL;
556 
557 		ntc++;
558 		if (ntc == tx_ring->count)
559 			ntc = 0;
560 	}
561 
562 	if (xsk_frames)
563 		xsk_tx_completed(pool, xsk_frames);
564 }
565