1  /* SPDX-License-Identifier: GPL-2.0 */
2  /* Copyright (c) 2019, Intel Corporation. */
3  
4  #ifndef _ICE_TXRX_LIB_H_
5  #define _ICE_TXRX_LIB_H_
6  #include "ice.h"
7  
8  /**
9   * ice_set_rx_bufs_act - propagate Rx buffer action to frags
10   * @xdp: XDP buffer representing frame (linear and frags part)
11   * @rx_ring: Rx ring struct
12   * act: action to store onto Rx buffers related to XDP buffer parts
13   *
14   * Set action that should be taken before putting Rx buffer from first frag
15   * to the last.
16   */
17  static inline void
ice_set_rx_bufs_act(struct xdp_buff * xdp,const struct ice_rx_ring * rx_ring,const unsigned int act)18  ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring,
19  		    const unsigned int act)
20  {
21  	u32 sinfo_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
22  	u32 nr_frags = rx_ring->nr_frags + 1;
23  	u32 idx = rx_ring->first_desc;
24  	u32 cnt = rx_ring->count;
25  	struct ice_rx_buf *buf;
26  
27  	for (int i = 0; i < nr_frags; i++) {
28  		buf = &rx_ring->rx_buf[idx];
29  		buf->act = act;
30  
31  		if (++idx == cnt)
32  			idx = 0;
33  	}
34  
35  	/* adjust pagecnt_bias on frags freed by XDP prog */
36  	if (sinfo_frags < rx_ring->nr_frags && act == ICE_XDP_CONSUMED) {
37  		u32 delta = rx_ring->nr_frags - sinfo_frags;
38  
39  		while (delta) {
40  			if (idx == 0)
41  				idx = cnt - 1;
42  			else
43  				idx--;
44  			buf = &rx_ring->rx_buf[idx];
45  			buf->pagecnt_bias--;
46  			delta--;
47  		}
48  	}
49  }
50  
51  /**
52   * ice_test_staterr - tests bits in Rx descriptor status and error fields
53   * @status_err_n: Rx descriptor status_error0 or status_error1 bits
54   * @stat_err_bits: value to mask
55   *
56   * This function does some fast chicanery in order to return the
57   * value of the mask which is really only used for boolean tests.
58   * The status_error_len doesn't need to be shifted because it begins
59   * at offset zero.
60   */
61  static inline bool
ice_test_staterr(__le16 status_err_n,const u16 stat_err_bits)62  ice_test_staterr(__le16 status_err_n, const u16 stat_err_bits)
63  {
64  	return !!(status_err_n & cpu_to_le16(stat_err_bits));
65  }
66  
67  /**
68   * ice_is_non_eop - process handling of non-EOP buffers
69   * @rx_ring: Rx ring being processed
70   * @rx_desc: Rx descriptor for current buffer
71   *
72   * If the buffer is an EOP buffer, this function exits returning false,
73   * otherwise return true indicating that this is in fact a non-EOP buffer.
74   */
75  static inline bool
ice_is_non_eop(const struct ice_rx_ring * rx_ring,const union ice_32b_rx_flex_desc * rx_desc)76  ice_is_non_eop(const struct ice_rx_ring *rx_ring,
77  	       const union ice_32b_rx_flex_desc *rx_desc)
78  {
79  	/* if we are the last buffer then there is nothing else to do */
80  #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
81  	if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF)))
82  		return false;
83  
84  	rx_ring->ring_stats->rx_stats.non_eop_descs++;
85  
86  	return true;
87  }
88  
89  static inline __le64
ice_build_ctob(u64 td_cmd,u64 td_offset,unsigned int size,u64 td_tag)90  ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
91  {
92  	return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
93  			   (td_cmd    << ICE_TXD_QW1_CMD_S) |
94  			   (td_offset << ICE_TXD_QW1_OFFSET_S) |
95  			   ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
96  			   (td_tag    << ICE_TXD_QW1_L2TAG1_S));
97  }
98  
99  /**
100   * ice_get_vlan_tci - get VLAN TCI from Rx flex descriptor
101   * @rx_desc: Rx 32b flex descriptor with RXDID=2
102   *
103   * The OS and current PF implementation only support stripping a single VLAN tag
104   * at a time, so there should only ever be 0 or 1 tags in the l2tag* fields. If
105   * one is found return the tag, else return 0 to mean no VLAN tag was found.
106   */
107  static inline u16
ice_get_vlan_tci(const union ice_32b_rx_flex_desc * rx_desc)108  ice_get_vlan_tci(const union ice_32b_rx_flex_desc *rx_desc)
109  {
110  	u16 stat_err_bits;
111  
112  	stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
113  	if (ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
114  		return le16_to_cpu(rx_desc->wb.l2tag1);
115  
116  	stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S);
117  	if (ice_test_staterr(rx_desc->wb.status_error1, stat_err_bits))
118  		return le16_to_cpu(rx_desc->wb.l2tag2_2nd);
119  
120  	return 0;
121  }
122  
123  /**
124   * ice_xdp_ring_update_tail - Updates the XDP Tx ring tail register
125   * @xdp_ring: XDP Tx ring
126   *
127   * This function updates the XDP Tx ring tail register.
128   */
ice_xdp_ring_update_tail(struct ice_tx_ring * xdp_ring)129  static inline void ice_xdp_ring_update_tail(struct ice_tx_ring *xdp_ring)
130  {
131  	/* Force memory writes to complete before letting h/w
132  	 * know there are new descriptors to fetch.
133  	 */
134  	wmb();
135  	writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
136  }
137  
138  /**
139   * ice_set_rs_bit - set RS bit on last produced descriptor (one behind current NTU)
140   * @xdp_ring: XDP ring to produce the HW Tx descriptors on
141   *
142   * returns index of descriptor that had RS bit produced on
143   */
ice_set_rs_bit(const struct ice_tx_ring * xdp_ring)144  static inline u32 ice_set_rs_bit(const struct ice_tx_ring *xdp_ring)
145  {
146  	u32 rs_idx = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1;
147  	struct ice_tx_desc *tx_desc;
148  
149  	tx_desc = ICE_TX_DESC(xdp_ring, rs_idx);
150  	tx_desc->cmd_type_offset_bsz |=
151  		cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
152  
153  	return rs_idx;
154  }
155  
156  void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, u32 first_idx);
157  int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring);
158  int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring,
159  			bool frame);
160  void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val);
161  void
162  ice_process_skb_fields(struct ice_rx_ring *rx_ring,
163  		       union ice_32b_rx_flex_desc *rx_desc,
164  		       struct sk_buff *skb);
165  void
166  ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tci);
167  
168  static inline void
ice_xdp_meta_set_desc(struct xdp_buff * xdp,union ice_32b_rx_flex_desc * eop_desc)169  ice_xdp_meta_set_desc(struct xdp_buff *xdp,
170  		      union ice_32b_rx_flex_desc *eop_desc)
171  {
172  	struct ice_xdp_buff *xdp_ext = container_of(xdp, struct ice_xdp_buff,
173  						    xdp_buff);
174  
175  	xdp_ext->eop_desc = eop_desc;
176  }
177  #endif /* !_ICE_TXRX_LIB_H_ */
178