1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2020 Intel Corporation. */
3
4 #ifndef XSK_BUFF_POOL_H_
5 #define XSK_BUFF_POOL_H_
6
7 #include <linux/if_xdp.h>
8 #include <linux/types.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/bpf.h>
11 #include <net/xdp.h>
12
13 struct xsk_buff_pool;
14 struct xdp_rxq_info;
15 struct xsk_cb_desc;
16 struct xsk_queue;
17 struct xdp_desc;
18 struct xdp_umem;
19 struct xdp_sock;
20 struct device;
21 struct page;
22
23 #define XSK_PRIV_MAX 24
24
25 struct xdp_buff_xsk {
26 struct xdp_buff xdp;
27 u8 cb[XSK_PRIV_MAX];
28 dma_addr_t dma;
29 dma_addr_t frame_dma;
30 struct xsk_buff_pool *pool;
31 u64 orig_addr;
32 struct list_head free_list_node;
33 struct list_head xskb_list_node;
34 };
35
36 #define XSK_CHECK_PRIV_TYPE(t) BUILD_BUG_ON(sizeof(t) > offsetofend(struct xdp_buff_xsk, cb))
37 #define XSK_TX_COMPL_FITS(t) BUILD_BUG_ON(sizeof(struct xsk_tx_metadata_compl) > sizeof(t))
38
39 struct xsk_dma_map {
40 dma_addr_t *dma_pages;
41 struct device *dev;
42 struct net_device *netdev;
43 refcount_t users;
44 struct list_head list; /* Protected by the RTNL_LOCK */
45 u32 dma_pages_cnt;
46 };
47
48 struct xsk_buff_pool {
49 /* Members only used in the control path first. */
50 struct device *dev;
51 struct net_device *netdev;
52 struct list_head xsk_tx_list;
53 /* Protects modifications to the xsk_tx_list */
54 spinlock_t xsk_tx_list_lock;
55 refcount_t users;
56 struct xdp_umem *umem;
57 struct work_struct work;
58 struct list_head free_list;
59 struct list_head xskb_list;
60 u32 heads_cnt;
61 u16 queue_id;
62
63 /* Data path members as close to free_heads at the end as possible. */
64 struct xsk_queue *fq ____cacheline_aligned_in_smp;
65 struct xsk_queue *cq;
66 /* For performance reasons, each buff pool has its own array of dma_pages
67 * even when they are identical.
68 */
69 dma_addr_t *dma_pages;
70 struct xdp_buff_xsk *heads;
71 struct xdp_desc *tx_descs;
72 u64 chunk_mask;
73 u64 addrs_cnt;
74 u32 free_list_cnt;
75 u32 dma_pages_cnt;
76 u32 free_heads_cnt;
77 u32 headroom;
78 u32 chunk_size;
79 u32 chunk_shift;
80 u32 frame_len;
81 u8 tx_metadata_len; /* inherited from umem */
82 u8 cached_need_wakeup;
83 bool uses_need_wakeup;
84 bool unaligned;
85 bool tx_sw_csum;
86 void *addrs;
87 /* Mutual exclusion of the completion ring in the SKB mode. Two cases to protect:
88 * NAPI TX thread and sendmsg error paths in the SKB destructor callback and when
89 * sockets share a single cq when the same netdev and queue id is shared.
90 */
91 spinlock_t cq_lock;
92 struct xdp_buff_xsk *free_heads[];
93 };
94
95 /* Masks for xdp_umem_page flags.
96 * The low 12-bits of the addr will be 0 since this is the page address, so we
97 * can use them for flags.
98 */
99 #define XSK_NEXT_PG_CONTIG_SHIFT 0
100 #define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
101
102 /* AF_XDP core. */
103 struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
104 struct xdp_umem *umem);
105 int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
106 u16 queue_id, u16 flags);
107 int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs,
108 struct net_device *dev, u16 queue_id);
109 int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs);
110 void xp_destroy(struct xsk_buff_pool *pool);
111 void xp_get_pool(struct xsk_buff_pool *pool);
112 bool xp_put_pool(struct xsk_buff_pool *pool);
113 void xp_clear_dev(struct xsk_buff_pool *pool);
114 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
115 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
116
117 /* AF_XDP, and XDP core. */
118 void xp_free(struct xdp_buff_xsk *xskb);
119
xp_init_xskb_addr(struct xdp_buff_xsk * xskb,struct xsk_buff_pool * pool,u64 addr)120 static inline void xp_init_xskb_addr(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
121 u64 addr)
122 {
123 xskb->orig_addr = addr;
124 xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom;
125 }
126
xp_init_xskb_dma(struct xdp_buff_xsk * xskb,struct xsk_buff_pool * pool,dma_addr_t * dma_pages,u64 addr)127 static inline void xp_init_xskb_dma(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
128 dma_addr_t *dma_pages, u64 addr)
129 {
130 xskb->frame_dma = (dma_pages[addr >> PAGE_SHIFT] & ~XSK_NEXT_PG_CONTIG_MASK) +
131 (addr & ~PAGE_MASK);
132 xskb->dma = xskb->frame_dma + pool->headroom + XDP_PACKET_HEADROOM;
133 }
134
135 /* AF_XDP ZC drivers, via xdp_sock_buff.h */
136 void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq);
137 void xp_fill_cb(struct xsk_buff_pool *pool, struct xsk_cb_desc *desc);
138 int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
139 unsigned long attrs, struct page **pages, u32 nr_pages);
140 void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs);
141 struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool);
142 u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max);
143 bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count);
144 void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr);
145 dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr);
xp_get_dma(struct xdp_buff_xsk * xskb)146 static inline dma_addr_t xp_get_dma(struct xdp_buff_xsk *xskb)
147 {
148 return xskb->dma;
149 }
150
xp_get_frame_dma(struct xdp_buff_xsk * xskb)151 static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb)
152 {
153 return xskb->frame_dma;
154 }
155
xp_dma_sync_for_cpu(struct xdp_buff_xsk * xskb)156 static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb)
157 {
158 dma_sync_single_for_cpu(xskb->pool->dev, xskb->dma,
159 xskb->pool->frame_len,
160 DMA_BIDIRECTIONAL);
161 }
162
xp_dma_sync_for_device(struct xsk_buff_pool * pool,dma_addr_t dma,size_t size)163 static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool,
164 dma_addr_t dma, size_t size)
165 {
166 dma_sync_single_for_device(pool->dev, dma, size, DMA_BIDIRECTIONAL);
167 }
168
169 /* Masks for xdp_umem_page flags.
170 * The low 12-bits of the addr will be 0 since this is the page address, so we
171 * can use them for flags.
172 */
173 #define XSK_NEXT_PG_CONTIG_SHIFT 0
174 #define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
175
xp_desc_crosses_non_contig_pg(struct xsk_buff_pool * pool,u64 addr,u32 len)176 static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
177 u64 addr, u32 len)
178 {
179 bool cross_pg = (addr & (PAGE_SIZE - 1)) + len > PAGE_SIZE;
180
181 if (likely(!cross_pg))
182 return false;
183
184 return pool->dma_pages &&
185 !(pool->dma_pages[addr >> PAGE_SHIFT] & XSK_NEXT_PG_CONTIG_MASK);
186 }
187
xp_mb_desc(struct xdp_desc * desc)188 static inline bool xp_mb_desc(struct xdp_desc *desc)
189 {
190 return desc->options & XDP_PKT_CONTD;
191 }
192
xp_aligned_extract_addr(struct xsk_buff_pool * pool,u64 addr)193 static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr)
194 {
195 return addr & pool->chunk_mask;
196 }
197
xp_unaligned_extract_addr(u64 addr)198 static inline u64 xp_unaligned_extract_addr(u64 addr)
199 {
200 return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
201 }
202
xp_unaligned_extract_offset(u64 addr)203 static inline u64 xp_unaligned_extract_offset(u64 addr)
204 {
205 return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
206 }
207
xp_unaligned_add_offset_to_addr(u64 addr)208 static inline u64 xp_unaligned_add_offset_to_addr(u64 addr)
209 {
210 return xp_unaligned_extract_addr(addr) +
211 xp_unaligned_extract_offset(addr);
212 }
213
xp_aligned_extract_idx(struct xsk_buff_pool * pool,u64 addr)214 static inline u32 xp_aligned_extract_idx(struct xsk_buff_pool *pool, u64 addr)
215 {
216 return xp_aligned_extract_addr(pool, addr) >> pool->chunk_shift;
217 }
218
xp_release(struct xdp_buff_xsk * xskb)219 static inline void xp_release(struct xdp_buff_xsk *xskb)
220 {
221 if (xskb->pool->unaligned)
222 xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
223 }
224
xp_get_handle(struct xdp_buff_xsk * xskb)225 static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb)
226 {
227 u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
228
229 offset += xskb->pool->headroom;
230 if (!xskb->pool->unaligned)
231 return xskb->orig_addr + offset;
232 return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
233 }
234
xp_tx_metadata_enabled(const struct xsk_buff_pool * pool)235 static inline bool xp_tx_metadata_enabled(const struct xsk_buff_pool *pool)
236 {
237 return pool->tx_metadata_len > 0;
238 }
239
240 #endif /* XSK_BUFF_POOL_H_ */
241