xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_lro.c (revision 901120c066e139c7f8a2c8e4820561fdd83c67ef)
1 /*
2  * Copyright (c) 2015-2017, 2019 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: qdf_lro.c
22  * QCA driver framework(QDF) Large Receive Offload
23  */
24 
25 #include <qdf_lro.h>
26 #include <qdf_trace.h>
27 #include <qdf_types.h>
28 
29 #include <linux/list.h>
30 #include <net/tcp.h>
31 
32 /**
33  * qdf_lro_desc_pool_init() - Initialize the free pool of LRO
34  * descriptors
35  * @lro_desc_pool: free pool of the LRO descriptors
36  * @lro_mgr: LRO manager
37  *
38  * Initialize a list that holds the free LRO descriptors
39  *
40  * Return: none
41  */
42 static void qdf_lro_desc_pool_init(struct qdf_lro_desc_pool *lro_desc_pool,
43 	 struct net_lro_mgr *lro_mgr)
44 {
45 	int i;
46 
47 	INIT_LIST_HEAD(&lro_desc_pool->lro_free_list_head);
48 
49 	for (i = 0; i < QDF_LRO_DESC_POOL_SZ; i++) {
50 		lro_desc_pool->lro_desc_array[i].lro_desc =
51 			 &lro_mgr->lro_arr[i];
52 		list_add_tail(&lro_desc_pool->lro_desc_array[i].lro_node,
53 			 &lro_desc_pool->lro_free_list_head);
54 	}
55 }
56 
57 /**
58  * qdf_lro_desc_info_init() - Initialize the LRO descriptors
59  * @qdf_info: QDF LRO data structure
60  *
61  * Initialize the free pool of LRO descriptors and the entries
62  * of the hash table
63  *
64  * Return: none
65  */
66 static void qdf_lro_desc_info_init(struct qdf_lro_s *qdf_info)
67 {
68 	int i;
69 
70 	/* Initialize pool of free LRO desc.*/
71 	qdf_lro_desc_pool_init(&qdf_info->lro_desc_info.lro_desc_pool,
72 		 qdf_info->lro_mgr);
73 
74 	/* Initialize the hash table of LRO desc.*/
75 	for (i = 0; i < QDF_LRO_DESC_TABLE_SZ; i++) {
76 		/* initialize the flows in the hash table */
77 		INIT_LIST_HEAD(&qdf_info->lro_desc_info.
78 			 lro_hash_table[i].lro_desc_list);
79 	}
80 
81 }
82 
83 /**
84  * qdf_lro_get_skb_header() - LRO callback function
85  * @skb: network buffer
86  * @ip_hdr: contains a pointer to the IP header
87  * @tcpudp_hdr: contains a pointer to the TCP header
88  * @hdr_flags: indicates if this is a TCP, IPV4 frame
89  * @priv: private driver specific opaque pointer
90  *
91  * Get the IP and TCP headers from the skb
92  *
93  * Return: 0 - success, < 0 - failure
94  */
95 static int qdf_lro_get_skb_header(struct sk_buff *skb, void **ip_hdr,
96 	void **tcpudp_hdr, u64 *hdr_flags, void *priv)
97 {
98 	if (QDF_NBUF_CB_RX_IPV6_PROTO(skb)) {
99 		hdr_flags = 0;
100 		return -EINVAL;
101 	}
102 
103 	*hdr_flags |= (LRO_IPV4 | LRO_TCP);
104 	(*ip_hdr) = skb->data;
105 	(*tcpudp_hdr) = skb->data + QDF_NBUF_CB_RX_TCP_OFFSET(skb);
106 	return 0;
107 }
108 
109 /**
110  * qdf_lro_init() - LRO initialization function
111  *
112  * Return: LRO context
113  */
114 qdf_lro_ctx_t qdf_lro_init(void)
115 {
116 	struct qdf_lro_s *lro_ctx;
117 	size_t lro_info_sz, lro_mgr_sz, desc_arr_sz, desc_pool_sz;
118 	size_t hash_table_sz;
119 	uint8_t *lro_mem_ptr;
120 
121 	/*
122 	* Allocate all the LRO data structures at once and then carve
123 	* them up as needed
124 	*/
125 	lro_info_sz = sizeof(struct qdf_lro_s);
126 	lro_mgr_sz = sizeof(struct net_lro_mgr);
127 	desc_arr_sz =
128 		 (QDF_LRO_DESC_POOL_SZ * sizeof(struct net_lro_desc));
129 	desc_pool_sz =
130 		 (QDF_LRO_DESC_POOL_SZ * sizeof(struct qdf_lro_desc_entry));
131 	hash_table_sz =
132 		 (sizeof(struct qdf_lro_desc_table) * QDF_LRO_DESC_TABLE_SZ);
133 
134 	lro_mem_ptr = qdf_mem_malloc(lro_info_sz + lro_mgr_sz + desc_arr_sz +
135 					desc_pool_sz + hash_table_sz);
136 
137 	if (unlikely(!lro_mem_ptr))
138 		return NULL;
139 
140 	lro_ctx = (struct qdf_lro_s *)lro_mem_ptr;
141 	lro_mem_ptr += lro_info_sz;
142 	/* LRO manager */
143 	lro_ctx->lro_mgr = (struct net_lro_mgr *)lro_mem_ptr;
144 	lro_mem_ptr += lro_mgr_sz;
145 
146 	/* LRO descriptor array */
147 	lro_ctx->lro_mgr->lro_arr = (struct net_lro_desc *)lro_mem_ptr;
148 	lro_mem_ptr += desc_arr_sz;
149 
150 	/* LRO descriptor pool */
151 	lro_ctx->lro_desc_info.lro_desc_pool.lro_desc_array =
152 		 (struct qdf_lro_desc_entry *)lro_mem_ptr;
153 	lro_mem_ptr += desc_pool_sz;
154 
155 	/* hash table to store the LRO descriptors */
156 	lro_ctx->lro_desc_info.lro_hash_table =
157 		 (struct qdf_lro_desc_table *)lro_mem_ptr;
158 
159 	/* Initialize the LRO descriptors */
160 	qdf_lro_desc_info_init(lro_ctx);
161 
162 	/* LRO TODO - NAPI or RX thread */
163 	lro_ctx->lro_mgr->features |= LRO_F_NAPI;
164 
165 	lro_ctx->lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
166 	lro_ctx->lro_mgr->max_aggr = QDF_LRO_MAX_AGGR_SIZE;
167 	lro_ctx->lro_mgr->get_skb_header = qdf_lro_get_skb_header;
168 	lro_ctx->lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
169 	lro_ctx->lro_mgr->max_desc = QDF_LRO_DESC_POOL_SZ;
170 
171 	return lro_ctx;
172 }
173 
174 /**
175  * qdf_lro_deinit() - LRO deinitialization function
176  * @lro_ctx: LRO context
177  *
178  * Return: nothing
179  */
180 void qdf_lro_deinit(qdf_lro_ctx_t lro_ctx)
181 {
182 	if (likely(lro_ctx)) {
183 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
184 			 "LRO instance %pK is being freed", lro_ctx);
185 		qdf_mem_free(lro_ctx);
186 	}
187 }
188 
189 /**
190  * qdf_lro_tcp_flow_match() - function to check for a flow match
191  * @iph: IP header
192  * @tcph: TCP header
193  * @lro_desc: LRO descriptor
194  *
195  * Checks if the descriptor belongs to the same flow as the one
196  * indicated by the TCP and IP header.
197  *
198  * Return: true - flow match, false - flow does not match
199  */
200 static inline bool qdf_lro_tcp_flow_match(struct net_lro_desc *lro_desc,
201 	 struct iphdr *iph,
202 	 struct tcphdr *tcph)
203 {
204 	if ((lro_desc->tcph->source != tcph->source) ||
205 		 (lro_desc->tcph->dest != tcph->dest) ||
206 		 (lro_desc->iph->saddr != iph->saddr) ||
207 		 (lro_desc->iph->daddr != iph->daddr))
208 		return false;
209 
210 	return true;
211 
212 }
213 
214 /**
215  * qdf_lro_desc_find() - LRO descriptor look-up function
216  *
217  * @lro_ctx: LRO context
218  * @skb: network buffer
219  * @iph: IP header
220  * @tcph: TCP header
221  * @flow_hash: toeplitz hash
222  * @lro_desc: LRO descriptor to be returned
223  *
224  * Look-up the LRO descriptor in the hash table based on the
225  * flow ID toeplitz. If the flow is not found, allocates a new
226  * LRO descriptor and places it in the hash table
227  *
228  * Return: 0 - success, < 0 - failure
229  */
230 static int qdf_lro_desc_find(struct qdf_lro_s *lro_ctx,
231 	 struct sk_buff *skb, struct iphdr *iph, struct tcphdr *tcph,
232 	 uint32_t flow_hash, struct net_lro_desc **lro_desc)
233 {
234 	uint32_t i;
235 	struct qdf_lro_desc_table *lro_hash_table;
236 	struct list_head *ptr;
237 	struct qdf_lro_desc_entry *entry;
238 	struct qdf_lro_desc_pool *free_pool;
239 	struct qdf_lro_desc_info *desc_info = &lro_ctx->lro_desc_info;
240 
241 	*lro_desc = NULL;
242 	i = flow_hash & QDF_LRO_DESC_TABLE_SZ_MASK;
243 
244 	lro_hash_table = &desc_info->lro_hash_table[i];
245 
246 	if (unlikely(!lro_hash_table)) {
247 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
248 			 "Invalid hash entry");
249 		QDF_ASSERT(0);
250 		return -EINVAL;
251 	}
252 
253 	/* Check if this flow exists in the descriptor list */
254 	list_for_each(ptr, &lro_hash_table->lro_desc_list) {
255 		struct net_lro_desc *tmp_lro_desc = NULL;
256 
257 		entry = list_entry(ptr, struct qdf_lro_desc_entry, lro_node);
258 		tmp_lro_desc = entry->lro_desc;
259 			if (qdf_lro_tcp_flow_match(entry->lro_desc, iph, tcph)) {
260 				*lro_desc = entry->lro_desc;
261 				return 0;
262 			}
263 	}
264 
265 	/* no existing flow found, a new LRO desc needs to be allocated */
266 	free_pool = &lro_ctx->lro_desc_info.lro_desc_pool;
267 	entry = list_first_entry_or_null(
268 		 &free_pool->lro_free_list_head,
269 		 struct qdf_lro_desc_entry, lro_node);
270 	if (unlikely(!entry)) {
271 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
272 			 "Could not allocate LRO desc!");
273 		return -ENOMEM;
274 	}
275 
276 	list_del_init(&entry->lro_node);
277 
278 	if (unlikely(!entry->lro_desc)) {
279 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
280 			 "entry->lro_desc is NULL!");
281 		return -EINVAL;
282 	}
283 
284 	memset(entry->lro_desc, 0, sizeof(struct net_lro_desc));
285 
286 	/*
287 	 * lro_desc->active should be 0 and lro_desc->tcp_rcv_tsval
288 	 * should be 0 for newly allocated lro descriptors
289 	 */
290 	list_add_tail(&entry->lro_node,
291 		 &lro_hash_table->lro_desc_list);
292 
293 	*lro_desc = entry->lro_desc;
294 	return 0;
295 }
296 
297 /**
298  *  qdf_lro_get_info() - Update the LRO information
299  *
300  * @lro_ctx: LRO context
301  * @nbuf: network buffer
302  * @info: LRO related information passed in by the caller
303  * @plro_desc: lro information returned as output
304  *
305  * Look-up the LRO descriptor based on the LRO information and
306  * the network buffer provided. Update the skb cb with the
307  * descriptor found
308  *
309  * Return: true: LRO eligible false: LRO ineligible
310  */
311 bool qdf_lro_get_info(qdf_lro_ctx_t lro_ctx, qdf_nbuf_t nbuf,
312 						 struct qdf_lro_info *info,
313 						 void **plro_desc)
314 {
315 	struct net_lro_desc *lro_desc;
316 	struct iphdr *iph;
317 	struct tcphdr *tcph;
318 	int hw_lro_eligible =
319 		 QDF_NBUF_CB_RX_LRO_ELIGIBLE(nbuf) &&
320 		 (!QDF_NBUF_CB_RX_TCP_PURE_ACK(nbuf));
321 
322 	if (unlikely(!lro_ctx)) {
323 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
324 			 "Invalid LRO context");
325 		return false;
326 	}
327 
328 	if (!hw_lro_eligible)
329 		return false;
330 
331 	iph = (struct iphdr *)info->iph;
332 	tcph = (struct tcphdr *)info->tcph;
333 	if (0 != qdf_lro_desc_find(lro_ctx, nbuf, iph, tcph,
334 		 QDF_NBUF_CB_RX_FLOW_ID(nbuf),
335 		 (struct net_lro_desc **)plro_desc)) {
336 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
337 			 "finding the LRO desc failed");
338 		return false;
339 	}
340 
341 	lro_desc = (struct net_lro_desc *)(*plro_desc);
342 	if (unlikely(!lro_desc)) {
343 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
344 			 "finding the LRO desc failed");
345 		return false;
346 	}
347 
348 	/* if this is not the first skb, check the timestamp option */
349 	if (lro_desc->tcp_rcv_tsval) {
350 		if (tcph->doff == 8) {
351 			__be32 *topt = (__be32 *)(tcph + 1);
352 
353 			if (*topt != htonl((TCPOPT_NOP << 24)
354 				 |(TCPOPT_NOP << 16)
355 				 | (TCPOPT_TIMESTAMP << 8)
356 				 | TCPOLEN_TIMESTAMP))
357 				return true;
358 
359 			/* timestamp should be in right order */
360 			topt++;
361 			if (after(ntohl(lro_desc->tcp_rcv_tsval),
362 					 ntohl(*topt)))
363 				return false;
364 
365 			/* timestamp reply should not be zero */
366 			topt++;
367 			if (*topt == 0)
368 				return false;
369 		}
370 	}
371 
372 	return true;
373 }
374 
375 /**
376  * qdf_lro_desc_free() - Free the LRO descriptor
377  * @desc: LRO descriptor
378  * @lro_ctx: LRO context
379  *
380  * Return the LRO descriptor to the free pool
381  *
382  * Return: none
383  */
384 void qdf_lro_desc_free(qdf_lro_ctx_t lro_ctx,
385 	 void *data)
386 {
387 	struct qdf_lro_desc_entry *entry;
388 	struct net_lro_mgr *lro_mgr;
389 	struct net_lro_desc *arr_base;
390 	struct qdf_lro_desc_info *desc_info;
391 	int i;
392 	struct net_lro_desc *desc = (struct net_lro_desc *)data;
393 
394 	qdf_assert(desc);
395 	qdf_assert(lro_ctx);
396 
397 	if (unlikely(!desc || !lro_ctx)) {
398 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
399 			 "invalid input");
400 		return;
401 	}
402 
403 	lro_mgr = lro_ctx->lro_mgr;
404 	arr_base = lro_mgr->lro_arr;
405 	i = desc - arr_base;
406 
407 	if (unlikely(i >= QDF_LRO_DESC_POOL_SZ)) {
408 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
409 			 "invalid index %d", i);
410 		return;
411 	}
412 
413 	desc_info =  &lro_ctx->lro_desc_info;
414 	entry = &desc_info->lro_desc_pool.lro_desc_array[i];
415 
416 	list_del_init(&entry->lro_node);
417 
418 	list_add_tail(&entry->lro_node, &desc_info->
419 		 lro_desc_pool.lro_free_list_head);
420 }
421 
422 /**
423  * qdf_lro_flush() - LRO flush API
424  * @lro_ctx: LRO context
425  *
426  * Flush all the packets aggregated in the LRO manager for all
427  * the flows
428  *
429  * Return: none
430  */
431 void qdf_lro_flush(qdf_lro_ctx_t lro_ctx)
432 {
433 	struct net_lro_mgr *lro_mgr = lro_ctx->lro_mgr;
434 	int i;
435 
436 	for (i = 0; i < lro_mgr->max_desc; i++) {
437 		if (lro_mgr->lro_arr[i].active) {
438 			qdf_lro_desc_free(lro_ctx, &lro_mgr->lro_arr[i]);
439 			lro_flush_desc(lro_mgr, &lro_mgr->lro_arr[i]);
440 		}
441 	}
442 }
443 /**
444  * qdf_lro_get_desc() - LRO descriptor look-up function
445  * @iph: IP header
446  * @tcph: TCP header
447  * @lro_arr: Array of LRO descriptors
448  * @lro_mgr: LRO manager
449  *
450  * Looks-up the LRO descriptor for a given flow
451  *
452  * Return: LRO descriptor
453  */
454 static struct net_lro_desc *qdf_lro_get_desc(struct net_lro_mgr *lro_mgr,
455 	 struct net_lro_desc *lro_arr,
456 	 struct iphdr *iph,
457 	 struct tcphdr *tcph)
458 {
459 	int i;
460 
461 	for (i = 0; i < lro_mgr->max_desc; i++) {
462 		if (lro_arr[i].active)
463 			if (qdf_lro_tcp_flow_match(&lro_arr[i], iph, tcph))
464 				return &lro_arr[i];
465 	}
466 
467 	return NULL;
468 }
469 
470 /**
471  * qdf_lro_flush_pkt() - function to flush the LRO flow
472  * @info: LRO related information passed by the caller
473  * @lro_ctx: LRO context
474  *
475  * Flush all the packets aggregated in the LRO manager for the
476  * flow indicated by the TCP and IP header
477  *
478  * Return: none
479  */
480 void qdf_lro_flush_pkt(qdf_lro_ctx_t lro_ctx,
481 	 struct qdf_lro_info *info)
482 {
483 	struct net_lro_desc *lro_desc;
484 	struct net_lro_mgr *lro_mgr = lro_ctx->lro_mgr;
485 	struct iphdr *iph = (struct iphdr *) info->iph;
486 	struct tcphdr *tcph = (struct tcphdr *) info->tcph;
487 
488 	lro_desc = qdf_lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph);
489 
490 	if (lro_desc) {
491 		/* statistics */
492 		qdf_lro_desc_free(lro_ctx, lro_desc);
493 		lro_flush_desc(lro_mgr, lro_desc);
494 	}
495 }
496