xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_lro.c (revision 4865edfd190c086bbe2c69aae12a8226f877b91e)
1 /*
2  * Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_lro.c
21  * QCA driver framework(QDF) Large Receive Offload
22  */
23 
24 #include <qdf_lro.h>
25 #include <qdf_trace.h>
26 #include <qdf_types.h>
27 
28 #include <linux/list.h>
29 #include <net/tcp.h>
30 
31 /**
32  * qdf_lro_desc_pool_init() - Initialize the free pool of LRO
33  * descriptors
34  * @lro_desc_pool: free pool of the LRO descriptors
35  * @lro_mgr: LRO manager
36  *
37  * Initialize a list that holds the free LRO descriptors
38  *
39  * Return: none
40  */
41 static void qdf_lro_desc_pool_init(struct qdf_lro_desc_pool *lro_desc_pool,
42 	 struct net_lro_mgr *lro_mgr)
43 {
44 	int i;
45 
46 	INIT_LIST_HEAD(&lro_desc_pool->lro_free_list_head);
47 
48 	for (i = 0; i < QDF_LRO_DESC_POOL_SZ; i++) {
49 		lro_desc_pool->lro_desc_array[i].lro_desc =
50 			 &lro_mgr->lro_arr[i];
51 		list_add_tail(&lro_desc_pool->lro_desc_array[i].lro_node,
52 			 &lro_desc_pool->lro_free_list_head);
53 	}
54 }
55 
56 /**
57  * qdf_lro_desc_info_init() - Initialize the LRO descriptors
58  * @qdf_info: QDF LRO data structure
59  *
60  * Initialize the free pool of LRO descriptors and the entries
61  * of the hash table
62  *
63  * Return: none
64  */
65 static void qdf_lro_desc_info_init(struct qdf_lro_s *qdf_info)
66 {
67 	int i;
68 
69 	/* Initialize pool of free LRO desc.*/
70 	qdf_lro_desc_pool_init(&qdf_info->lro_desc_info.lro_desc_pool,
71 		 qdf_info->lro_mgr);
72 
73 	/* Initialize the hash table of LRO desc.*/
74 	for (i = 0; i < QDF_LRO_DESC_TABLE_SZ; i++) {
75 		/* initialize the flows in the hash table */
76 		INIT_LIST_HEAD(&qdf_info->lro_desc_info.
77 			 lro_hash_table[i].lro_desc_list);
78 	}
79 
80 }
81 
82 /**
83  * qdf_lro_get_skb_header() - LRO callback function
84  * @skb: network buffer
85  * @ip_hdr: contains a pointer to the IP header
86  * @tcpudp_hdr: contains a pointer to the TCP header
87  * @hdr_flags: indicates if this is a TCP, IPV4 frame
88  * @priv: private driver specific opaque pointer
89  *
90  * Get the IP and TCP headers from the skb
91  *
92  * Return: 0 - success, < 0 - failure
93  */
94 static int qdf_lro_get_skb_header(struct sk_buff *skb, void **ip_hdr,
95 	void **tcpudp_hdr, u64 *hdr_flags, void *priv)
96 {
97 	if (QDF_NBUF_CB_RX_IPV6_PROTO(skb)) {
98 		hdr_flags = 0;
99 		return -EINVAL;
100 	}
101 
102 	*hdr_flags |= (LRO_IPV4 | LRO_TCP);
103 	(*ip_hdr) = skb->data;
104 	(*tcpudp_hdr) = skb->data + QDF_NBUF_CB_RX_TCP_OFFSET(skb);
105 	return 0;
106 }
107 
108 /**
109  * qdf_lro_init() - LRO initialization function
110  *
111  * Return: LRO context
112  */
113 qdf_lro_ctx_t qdf_lro_init(void)
114 {
115 	struct qdf_lro_s *lro_ctx;
116 	size_t lro_info_sz, lro_mgr_sz, desc_arr_sz, desc_pool_sz;
117 	size_t hash_table_sz;
118 	uint8_t *lro_mem_ptr;
119 
120 	/*
121 	* Allocate all the LRO data structures at once and then carve
122 	* them up as needed
123 	*/
124 	lro_info_sz = sizeof(struct qdf_lro_s);
125 	lro_mgr_sz = sizeof(struct net_lro_mgr);
126 	desc_arr_sz =
127 		 (QDF_LRO_DESC_POOL_SZ * sizeof(struct net_lro_desc));
128 	desc_pool_sz =
129 		 (QDF_LRO_DESC_POOL_SZ * sizeof(struct qdf_lro_desc_entry));
130 	hash_table_sz =
131 		 (sizeof(struct qdf_lro_desc_table) * QDF_LRO_DESC_TABLE_SZ);
132 
133 	lro_mem_ptr = qdf_mem_malloc(lro_info_sz + lro_mgr_sz + desc_arr_sz +
134 					desc_pool_sz + hash_table_sz);
135 
136 	if (unlikely(!lro_mem_ptr)) {
137 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
138 			 "Unable to allocate memory for LRO");
139 		return NULL;
140 	}
141 
142 	lro_ctx = (struct qdf_lro_s *)lro_mem_ptr;
143 	lro_mem_ptr += lro_info_sz;
144 	/* LRO manager */
145 	lro_ctx->lro_mgr = (struct net_lro_mgr *)lro_mem_ptr;
146 	lro_mem_ptr += lro_mgr_sz;
147 
148 	/* LRO decriptor array */
149 	lro_ctx->lro_mgr->lro_arr = (struct net_lro_desc *)lro_mem_ptr;
150 	lro_mem_ptr += desc_arr_sz;
151 
152 	/* LRO descriptor pool */
153 	lro_ctx->lro_desc_info.lro_desc_pool.lro_desc_array =
154 		 (struct qdf_lro_desc_entry *)lro_mem_ptr;
155 	lro_mem_ptr += desc_pool_sz;
156 
157 	/* hash table to store the LRO descriptors */
158 	lro_ctx->lro_desc_info.lro_hash_table =
159 		 (struct qdf_lro_desc_table *)lro_mem_ptr;
160 
161 	/* Initialize the LRO descriptors */
162 	qdf_lro_desc_info_init(lro_ctx);
163 
164 	/* LRO TODO - NAPI or RX thread */
165 	lro_ctx->lro_mgr->features |= LRO_F_NAPI;
166 
167 	lro_ctx->lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
168 	lro_ctx->lro_mgr->max_aggr = QDF_LRO_MAX_AGGR_SIZE;
169 	lro_ctx->lro_mgr->get_skb_header = qdf_lro_get_skb_header;
170 	lro_ctx->lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
171 	lro_ctx->lro_mgr->max_desc = QDF_LRO_DESC_POOL_SZ;
172 
173 	return lro_ctx;
174 }
175 
176 /**
177  * qdf_lro_deinit() - LRO deinitialization function
178  * @lro_ctx: LRO context
179  *
180  * Return: nothing
181  */
182 void qdf_lro_deinit(qdf_lro_ctx_t lro_ctx)
183 {
184 	if (likely(lro_ctx)) {
185 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
186 			 "LRO instance %pK is being freed", lro_ctx);
187 		qdf_mem_free(lro_ctx);
188 	}
189 }
190 
191 /**
192  * qdf_lro_tcp_flow_match() - function to check for a flow match
193  * @iph: IP header
194  * @tcph: TCP header
195  * @lro_desc: LRO decriptor
196  *
197  * Checks if the descriptor belongs to the same flow as the one
198  * indicated by the TCP and IP header.
199  *
200  * Return: true - flow match, false - flow does not match
201  */
202 static inline bool qdf_lro_tcp_flow_match(struct net_lro_desc *lro_desc,
203 	 struct iphdr *iph,
204 	 struct tcphdr *tcph)
205 {
206 	if ((lro_desc->tcph->source != tcph->source) ||
207 		 (lro_desc->tcph->dest != tcph->dest) ||
208 		 (lro_desc->iph->saddr != iph->saddr) ||
209 		 (lro_desc->iph->daddr != iph->daddr))
210 		return false;
211 
212 	return true;
213 
214 }
215 
216 /**
217  * qdf_lro_desc_find() - LRO descriptor look-up function
218  *
219  * @lro_ctx: LRO context
220  * @skb: network buffer
221  * @iph: IP header
222  * @tcph: TCP header
223  * @flow_hash: toeplitz hash
224  * @lro_desc: LRO descriptor to be returned
225  *
226  * Look-up the LRO descriptor in the hash table based on the
227  * flow ID toeplitz. If the flow is not found, allocates a new
228  * LRO descriptor and places it in the hash table
229  *
230  * Return: 0 - success, < 0 - failure
231  */
232 static int qdf_lro_desc_find(struct qdf_lro_s *lro_ctx,
233 	 struct sk_buff *skb, struct iphdr *iph, struct tcphdr *tcph,
234 	 uint32_t flow_hash, struct net_lro_desc **lro_desc)
235 {
236 	uint32_t i;
237 	struct qdf_lro_desc_table *lro_hash_table;
238 	struct list_head *ptr;
239 	struct qdf_lro_desc_entry *entry;
240 	struct qdf_lro_desc_pool *free_pool;
241 	struct qdf_lro_desc_info *desc_info = &lro_ctx->lro_desc_info;
242 
243 	*lro_desc = NULL;
244 	i = flow_hash & QDF_LRO_DESC_TABLE_SZ_MASK;
245 
246 	lro_hash_table = &desc_info->lro_hash_table[i];
247 
248 	if (unlikely(!lro_hash_table)) {
249 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
250 			 "Invalid hash entry");
251 		QDF_ASSERT(0);
252 		return -EINVAL;
253 	}
254 
255 	/* Check if this flow exists in the descriptor list */
256 	list_for_each(ptr, &lro_hash_table->lro_desc_list) {
257 		struct net_lro_desc *tmp_lro_desc = NULL;
258 
259 		entry = list_entry(ptr, struct qdf_lro_desc_entry, lro_node);
260 		tmp_lro_desc = entry->lro_desc;
261 			if (qdf_lro_tcp_flow_match(entry->lro_desc, iph, tcph)) {
262 				*lro_desc = entry->lro_desc;
263 				return 0;
264 			}
265 	}
266 
267 	/* no existing flow found, a new LRO desc needs to be allocated */
268 	free_pool = &lro_ctx->lro_desc_info.lro_desc_pool;
269 	entry = list_first_entry_or_null(
270 		 &free_pool->lro_free_list_head,
271 		 struct qdf_lro_desc_entry, lro_node);
272 	if (unlikely(!entry)) {
273 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
274 			 "Could not allocate LRO desc!");
275 		return -ENOMEM;
276 	}
277 
278 	list_del_init(&entry->lro_node);
279 
280 	if (unlikely(!entry->lro_desc)) {
281 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
282 			 "entry->lro_desc is NULL!");
283 		return -EINVAL;
284 	}
285 
286 	memset(entry->lro_desc, 0, sizeof(struct net_lro_desc));
287 
288 	/*
289 	 * lro_desc->active should be 0 and lro_desc->tcp_rcv_tsval
290 	 * should be 0 for newly allocated lro descriptors
291 	 */
292 	list_add_tail(&entry->lro_node,
293 		 &lro_hash_table->lro_desc_list);
294 
295 	*lro_desc = entry->lro_desc;
296 	return 0;
297 }
298 
299 /**
300  *  qdf_lro_get_info() - Update the LRO information
301  *
302  * @lro_ctx: LRO context
303  * @nbuf: network buffer
304  * @info: LRO related information passed in by the caller
305  * @plro_desc: lro information returned as output
306  *
307  * Look-up the LRO descriptor based on the LRO information and
308  * the network buffer provided. Update the skb cb with the
309  * descriptor found
310  *
311  * Return: true: LRO eligible false: LRO ineligible
312  */
313 bool qdf_lro_get_info(qdf_lro_ctx_t lro_ctx, qdf_nbuf_t nbuf,
314 						 struct qdf_lro_info *info,
315 						 void **plro_desc)
316 {
317 	struct net_lro_desc *lro_desc;
318 	struct iphdr *iph;
319 	struct tcphdr *tcph;
320 	int hw_lro_eligible =
321 		 QDF_NBUF_CB_RX_LRO_ELIGIBLE(nbuf) &&
322 		 (!QDF_NBUF_CB_RX_TCP_PURE_ACK(nbuf));
323 
324 	if (unlikely(!lro_ctx)) {
325 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
326 			 "Invalid LRO context");
327 		return false;
328 	}
329 
330 	if (!hw_lro_eligible)
331 		return false;
332 
333 	iph = (struct iphdr *)info->iph;
334 	tcph = (struct tcphdr *)info->tcph;
335 	if (0 != qdf_lro_desc_find(lro_ctx, nbuf, iph, tcph,
336 		 QDF_NBUF_CB_RX_FLOW_ID(nbuf),
337 		 (struct net_lro_desc **)plro_desc)) {
338 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
339 			 "finding the LRO desc failed");
340 		return false;
341 	}
342 
343 	lro_desc = (struct net_lro_desc *)(*plro_desc);
344 	if (unlikely(!lro_desc)) {
345 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
346 			 "finding the LRO desc failed");
347 		return false;
348 	}
349 
350 	/* if this is not the first skb, check the timestamp option */
351 	if (lro_desc->tcp_rcv_tsval) {
352 		if (tcph->doff == 8) {
353 			__be32 *topt = (__be32 *)(tcph + 1);
354 
355 			if (*topt != htonl((TCPOPT_NOP << 24)
356 				 |(TCPOPT_NOP << 16)
357 				 | (TCPOPT_TIMESTAMP << 8)
358 				 | TCPOLEN_TIMESTAMP))
359 				return true;
360 
361 			/* timestamp should be in right order */
362 			topt++;
363 			if (after(ntohl(lro_desc->tcp_rcv_tsval),
364 					 ntohl(*topt)))
365 				return false;
366 
367 			/* timestamp reply should not be zero */
368 			topt++;
369 			if (*topt == 0)
370 				return false;
371 		}
372 	}
373 
374 	return true;
375 }
376 
377 /**
378  * qdf_lro_desc_free() - Free the LRO descriptor
379  * @desc: LRO descriptor
380  * @lro_ctx: LRO context
381  *
382  * Return the LRO descriptor to the free pool
383  *
384  * Return: none
385  */
386 void qdf_lro_desc_free(qdf_lro_ctx_t lro_ctx,
387 	 void *data)
388 {
389 	struct qdf_lro_desc_entry *entry;
390 	struct net_lro_mgr *lro_mgr;
391 	struct net_lro_desc *arr_base;
392 	struct qdf_lro_desc_info *desc_info;
393 	int i;
394 	struct net_lro_desc *desc = (struct net_lro_desc *)data;
395 
396 	qdf_assert(desc);
397 	qdf_assert(lro_ctx);
398 
399 	if (unlikely(!desc || !lro_ctx)) {
400 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
401 			 "invalid input");
402 		return;
403 	}
404 
405 	lro_mgr = lro_ctx->lro_mgr;
406 	arr_base = lro_mgr->lro_arr;
407 	i = desc - arr_base;
408 
409 	if (unlikely(i >= QDF_LRO_DESC_POOL_SZ)) {
410 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
411 			 "invalid index %d", i);
412 		return;
413 	}
414 
415 	desc_info =  &lro_ctx->lro_desc_info;
416 	entry = &desc_info->lro_desc_pool.lro_desc_array[i];
417 
418 	list_del_init(&entry->lro_node);
419 
420 	list_add_tail(&entry->lro_node, &desc_info->
421 		 lro_desc_pool.lro_free_list_head);
422 }
423 
424 /**
425  * qdf_lro_flush() - LRO flush API
426  * @lro_ctx: LRO context
427  *
428  * Flush all the packets aggregated in the LRO manager for all
429  * the flows
430  *
431  * Return: none
432  */
433 void qdf_lro_flush(qdf_lro_ctx_t lro_ctx)
434 {
435 	struct net_lro_mgr *lro_mgr = lro_ctx->lro_mgr;
436 	int i;
437 
438 	for (i = 0; i < lro_mgr->max_desc; i++) {
439 		if (lro_mgr->lro_arr[i].active) {
440 			qdf_lro_desc_free(lro_ctx, &lro_mgr->lro_arr[i]);
441 			lro_flush_desc(lro_mgr, &lro_mgr->lro_arr[i]);
442 		}
443 	}
444 }
445 /**
446  * qdf_lro_get_desc() - LRO descriptor look-up function
447  * @iph: IP header
448  * @tcph: TCP header
449  * @lro_arr: Array of LRO decriptors
450  * @lro_mgr: LRO manager
451  *
452  * Looks-up the LRO descriptor for a given flow
453  *
454  * Return: LRO descriptor
455  */
456 static struct net_lro_desc *qdf_lro_get_desc(struct net_lro_mgr *lro_mgr,
457 	 struct net_lro_desc *lro_arr,
458 	 struct iphdr *iph,
459 	 struct tcphdr *tcph)
460 {
461 	int i;
462 
463 	for (i = 0; i < lro_mgr->max_desc; i++) {
464 		if (lro_arr[i].active)
465 			if (qdf_lro_tcp_flow_match(&lro_arr[i], iph, tcph))
466 				return &lro_arr[i];
467 	}
468 
469 	return NULL;
470 }
471 
472 /**
473  * qdf_lro_flush_pkt() - function to flush the LRO flow
474  * @info: LRO related information passed by the caller
475  * @lro_ctx: LRO context
476  *
477  * Flush all the packets aggregated in the LRO manager for the
478  * flow indicated by the TCP and IP header
479  *
480  * Return: none
481  */
482 void qdf_lro_flush_pkt(qdf_lro_ctx_t lro_ctx,
483 	 struct qdf_lro_info *info)
484 {
485 	struct net_lro_desc *lro_desc;
486 	struct net_lro_mgr *lro_mgr = lro_ctx->lro_mgr;
487 	struct iphdr *iph = (struct iphdr *) info->iph;
488 	struct tcphdr *tcph = (struct tcphdr *) info->tcph;
489 
490 	lro_desc = qdf_lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph);
491 
492 	if (lro_desc) {
493 		/* statistics */
494 		qdf_lro_desc_free(lro_ctx, lro_desc);
495 		lro_flush_desc(lro_mgr, lro_desc);
496 	}
497 }
498