1  /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
2  /* Copyright (C) 2019 Netronome Systems, Inc. */
3  
4  #ifndef _NFP_NET_DP_
5  #define _NFP_NET_DP_
6  
7  #include "nfp_net.h"
8  
nfp_net_dma_map_rx(struct nfp_net_dp * dp,void * frag)9  static inline dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag)
10  {
11  	return dma_map_single_attrs(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM,
12  				    dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
13  				    dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
14  }
15  
16  static inline void
nfp_net_dma_sync_dev_rx(const struct nfp_net_dp * dp,dma_addr_t dma_addr)17  nfp_net_dma_sync_dev_rx(const struct nfp_net_dp *dp, dma_addr_t dma_addr)
18  {
19  	dma_sync_single_for_device(dp->dev, dma_addr,
20  				   dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
21  				   dp->rx_dma_dir);
22  }
23  
nfp_net_dma_unmap_rx(struct nfp_net_dp * dp,dma_addr_t dma_addr)24  static inline void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp,
25  					dma_addr_t dma_addr)
26  {
27  	dma_unmap_single_attrs(dp->dev, dma_addr,
28  			       dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
29  			       dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
30  }
31  
nfp_net_dma_sync_cpu_rx(struct nfp_net_dp * dp,dma_addr_t dma_addr,unsigned int len)32  static inline void nfp_net_dma_sync_cpu_rx(struct nfp_net_dp *dp,
33  					   dma_addr_t dma_addr,
34  					   unsigned int len)
35  {
36  	dma_sync_single_for_cpu(dp->dev, dma_addr - NFP_NET_RX_BUF_HEADROOM,
37  				len, dp->rx_dma_dir);
38  }
39  
40  /**
41   * nfp_net_tx_full() - check if the TX ring is full
42   * @tx_ring: TX ring to check
43   * @dcnt:    Number of descriptors that need to be enqueued (must be >= 1)
44   *
45   * This function checks, based on the *host copy* of read/write
46   * pointer if a given TX ring is full.  The real TX queue may have
47   * some newly made available slots.
48   *
49   * Return: True if the ring is full.
50   */
nfp_net_tx_full(struct nfp_net_tx_ring * tx_ring,int dcnt)51  static inline int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt)
52  {
53  	return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt);
54  }
55  
nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring * tx_ring)56  static inline void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
57  {
58  	wmb(); /* drain writebuffer */
59  	nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add);
60  	tx_ring->wr_ptr_add = 0;
61  }
62  
63  static inline u32
nfp_net_read_tx_cmpl(struct nfp_net_tx_ring * tx_ring,struct nfp_net_dp * dp)64  nfp_net_read_tx_cmpl(struct nfp_net_tx_ring *tx_ring, struct nfp_net_dp *dp)
65  {
66  	if (tx_ring->txrwb)
67  		return *tx_ring->txrwb;
68  	return nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
69  }
70  
nfp_net_free_frag(void * frag,bool xdp)71  static inline void nfp_net_free_frag(void *frag, bool xdp)
72  {
73  	if (!xdp)
74  		skb_free_frag(frag);
75  	else
76  		__free_page(virt_to_page(frag));
77  }
78  
79  /**
80   * nfp_net_irq_unmask() - Unmask automasked interrupt
81   * @nn:       NFP Network structure
82   * @entry_nr: MSI-X table entry
83   *
84   * Clear the ICR for the IRQ entry.
85   */
nfp_net_irq_unmask(struct nfp_net * nn,unsigned int entry_nr)86  static inline void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr)
87  {
88  	nn_writeb(nn, NFP_NET_CFG_ICR(entry_nr), NFP_NET_CFG_ICR_UNMASKED);
89  	nn_pci_flush(nn);
90  }
91  
92  struct seq_file;
93  
94  /* Common */
95  void
96  nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
97  			     struct nfp_net_rx_ring *rx_ring, unsigned int idx);
98  void
99  nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
100  			     struct nfp_net_tx_ring *tx_ring, unsigned int idx);
101  void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx);
102  
103  void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr);
104  int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp);
105  int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp);
106  void nfp_net_rx_rings_free(struct nfp_net_dp *dp);
107  void nfp_net_tx_rings_free(struct nfp_net_dp *dp);
108  void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring);
109  bool nfp_net_vlan_strip(struct sk_buff *skb, const struct nfp_net_rx_desc *rxd,
110  			const struct nfp_meta_parsed *meta);
111  
112  enum nfp_nfd_version {
113  	NFP_NFD_VER_NFD3,
114  	NFP_NFD_VER_NFDK,
115  };
116  
117  /**
118   * struct nfp_dp_ops - Hooks to wrap different implementation of different dp
119   * @version:			Indicate dp type
120   * @tx_min_desc_per_pkt:	Minimal TX descs needed for each packet
121   * @cap_mask:			Mask of supported features
122   * @dma_mask:			DMA addressing capability
123   * @poll:			Napi poll for normal rx/tx
124   * @xsk_poll:			Napi poll when xsk is enabled
125   * @ctrl_poll:			Tasklet poll for ctrl rx/tx
126   * @xmit:			Xmit for normal path
127   * @ctrl_tx_one:		Xmit for ctrl path
128   * @rx_ring_fill_freelist:	Give buffers from the ring to FW
129   * @tx_ring_alloc:		Allocate resource for a TX ring
130   * @tx_ring_reset:		Free any untransmitted buffers and reset pointers
131   * @tx_ring_free:		Free resources allocated to a TX ring
132   * @tx_ring_bufs_alloc:		Allocate resource for each TX buffer
133   * @tx_ring_bufs_free:		Free resources allocated to each TX buffer
134   * @print_tx_descs:		Show TX ring's info for debug purpose
135   */
136  struct nfp_dp_ops {
137  	enum nfp_nfd_version version;
138  	unsigned int tx_min_desc_per_pkt;
139  	u32 cap_mask;
140  	u64 dma_mask;
141  
142  	int (*poll)(struct napi_struct *napi, int budget);
143  	int (*xsk_poll)(struct napi_struct *napi, int budget);
144  	void (*ctrl_poll)(struct tasklet_struct *t);
145  	netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *netdev);
146  	bool (*ctrl_tx_one)(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
147  			    struct sk_buff *skb, bool old);
148  	void (*rx_ring_fill_freelist)(struct nfp_net_dp *dp,
149  				      struct nfp_net_rx_ring *rx_ring);
150  	int (*tx_ring_alloc)(struct nfp_net_dp *dp,
151  			     struct nfp_net_tx_ring *tx_ring);
152  	void (*tx_ring_reset)(struct nfp_net_dp *dp,
153  			      struct nfp_net_tx_ring *tx_ring);
154  	void (*tx_ring_free)(struct nfp_net_tx_ring *tx_ring);
155  	int (*tx_ring_bufs_alloc)(struct nfp_net_dp *dp,
156  				  struct nfp_net_tx_ring *tx_ring);
157  	void (*tx_ring_bufs_free)(struct nfp_net_dp *dp,
158  				  struct nfp_net_tx_ring *tx_ring);
159  
160  	void (*print_tx_descs)(struct seq_file *file,
161  			       struct nfp_net_r_vector *r_vec,
162  			       struct nfp_net_tx_ring *tx_ring,
163  			       u32 d_rd_p, u32 d_wr_p);
164  };
165  
166  static inline void
nfp_net_tx_ring_reset(struct nfp_net_dp * dp,struct nfp_net_tx_ring * tx_ring)167  nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
168  {
169  	return dp->ops->tx_ring_reset(dp, tx_ring);
170  }
171  
172  static inline void
nfp_net_rx_ring_fill_freelist(struct nfp_net_dp * dp,struct nfp_net_rx_ring * rx_ring)173  nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp,
174  			      struct nfp_net_rx_ring *rx_ring)
175  {
176  	dp->ops->rx_ring_fill_freelist(dp, rx_ring);
177  }
178  
179  static inline int
nfp_net_tx_ring_alloc(struct nfp_net_dp * dp,struct nfp_net_tx_ring * tx_ring)180  nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
181  {
182  	return dp->ops->tx_ring_alloc(dp, tx_ring);
183  }
184  
185  static inline void
nfp_net_tx_ring_free(struct nfp_net_dp * dp,struct nfp_net_tx_ring * tx_ring)186  nfp_net_tx_ring_free(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
187  {
188  	dp->ops->tx_ring_free(tx_ring);
189  }
190  
191  static inline int
nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp * dp,struct nfp_net_tx_ring * tx_ring)192  nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
193  			   struct nfp_net_tx_ring *tx_ring)
194  {
195  	return dp->ops->tx_ring_bufs_alloc(dp, tx_ring);
196  }
197  
198  static inline void
nfp_net_tx_ring_bufs_free(struct nfp_net_dp * dp,struct nfp_net_tx_ring * tx_ring)199  nfp_net_tx_ring_bufs_free(struct nfp_net_dp *dp,
200  			  struct nfp_net_tx_ring *tx_ring)
201  {
202  	dp->ops->tx_ring_bufs_free(dp, tx_ring);
203  }
204  
205  static inline void
nfp_net_debugfs_print_tx_descs(struct seq_file * file,struct nfp_net_dp * dp,struct nfp_net_r_vector * r_vec,struct nfp_net_tx_ring * tx_ring,u32 d_rd_p,u32 d_wr_p)206  nfp_net_debugfs_print_tx_descs(struct seq_file *file, struct nfp_net_dp *dp,
207  			       struct nfp_net_r_vector *r_vec,
208  			       struct nfp_net_tx_ring *tx_ring,
209  			       u32 d_rd_p, u32 d_wr_p)
210  {
211  	dp->ops->print_tx_descs(file, r_vec, tx_ring, d_rd_p, d_wr_p);
212  }
213  
214  extern const struct nfp_dp_ops nfp_nfd3_ops;
215  extern const struct nfp_dp_ops nfp_nfdk_ops;
216  
217  netdev_tx_t nfp_net_tx(struct sk_buff *skb, struct net_device *netdev);
218  
219  #endif /* _NFP_NET_DP_ */
220