xref: /wlan-dirver/qcacld-3.0/core/dp/txrx/ol_tx_ll.c (revision e5679f29adc217242693121cc12db3ac733311de)
1 /*
2  * Copyright (c) 2011-2019 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <qdf_atomic.h>         /* qdf_atomic_inc, etc. */
21 #include <qdf_lock.h>           /* qdf_os_spinlock */
22 #include <qdf_time.h>           /* qdf_system_ticks, etc. */
23 #include <qdf_nbuf.h>           /* qdf_nbuf_t */
24 #include <qdf_net_types.h>      /* QDF_NBUF_TX_EXT_TID_INVALID */
25 
26 #include "queue.h"          /* TAILQ */
27 #ifdef QCA_COMPUTE_TX_DELAY
28 #include <enet.h>               /* ethernet_hdr_t, etc. */
29 #include <ipv6_defs.h>          /* ipv6_traffic_class */
30 #endif
31 
32 #include <ol_txrx_api.h>        /* ol_txrx_vdev_handle, etc. */
33 #include <ol_htt_tx_api.h>      /* htt_tx_compl_desc_id */
34 #include <ol_txrx_htt_api.h>    /* htt_tx_status */
35 
36 #include <ol_ctrl_txrx_api.h>
37 #include <cdp_txrx_tx_delay.h>
38 #include <ol_txrx_types.h>      /* ol_txrx_vdev_t, etc */
39 #include <ol_tx_desc.h>         /* ol_tx_desc_find, ol_tx_desc_frame_free */
40 #ifdef QCA_COMPUTE_TX_DELAY
41 #include <ol_tx_classify.h>     /* ol_tx_dest_addr_find */
42 #endif
43 #include <ol_txrx_internal.h>   /* OL_TX_DESC_NO_REFS, etc. */
44 #include <ol_osif_txrx_api.h>
45 #include <ol_tx.h>              /* ol_tx_reinject */
46 #include <ol_tx_send.h>
47 
48 #include <ol_cfg.h>             /* ol_cfg_is_high_latency */
49 #include <ol_tx_sched.h>
50 #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
51 #include <ol_txrx_encap.h>      /* OL_TX_RESTORE_HDR, etc */
52 #endif
53 #include <ol_tx_queue.h>
54 #include <ol_txrx.h>
55 #include <pktlog_ac_fmt.h>
56 #include <cdp_txrx_handle.h>
57 
58 void ol_tx_init_pdev(ol_txrx_pdev_handle pdev)
59 {
60 	qdf_atomic_add(ol_cfg_target_tx_credit(pdev->ctrl_pdev),
61 		       &pdev->target_tx_credit);
62 }
63 
64 qdf_nbuf_t ol_tx_reinject(struct ol_txrx_vdev_t *vdev,
65 			  qdf_nbuf_t msdu, uint16_t peer_id)
66 {
67 	struct ol_tx_desc_t *tx_desc = NULL;
68 	struct ol_txrx_msdu_info_t msdu_info;
69 
70 	msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
71 	msdu_info.htt.info.ext_tid = HTT_TX_EXT_TID_INVALID;
72 	msdu_info.peer = NULL;
73 	msdu_info.htt.action.tx_comp_req = 0;
74 	msdu_info.tso_info.is_tso = 0;
75 
76 	tx_desc = ol_tx_prepare_ll(vdev, msdu, &msdu_info);
77 	if (!tx_desc)
78 		return msdu;
79 
80 	HTT_TX_DESC_POSTPONED_SET(*((uint32_t *)(tx_desc->htt_tx_desc)), true);
81 
82 	htt_tx_desc_set_peer_id(tx_desc->htt_tx_desc, peer_id);
83 
84 	ol_tx_send(vdev->pdev, tx_desc, msdu, vdev->vdev_id);
85 
86 	return NULL;
87 }
88 
89 /*
90  * The TXRX module doesn't accept tx frames unless the target has
91  * enough descriptors for them.
92  * For LL, the TXRX descriptor pool is sized to match the target's
93  * descriptor pool.  Hence, if the descriptor allocation in TXRX
94  * succeeds, that guarantees that the target has room to accept
95  * the new tx frame.
96  */
97 struct ol_tx_desc_t *
98 ol_tx_prepare_ll(ol_txrx_vdev_handle vdev,
99 		 qdf_nbuf_t msdu,
100 		 struct ol_txrx_msdu_info_t *msdu_info)
101 {
102 	struct ol_tx_desc_t *tx_desc;
103 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
104 
105 	(msdu_info)->htt.info.frame_type = pdev->htt_pkt_type;
106 	tx_desc = ol_tx_desc_ll(pdev, vdev, msdu, msdu_info);
107 	if (qdf_unlikely(!tx_desc)) {
108 		/*
109 		 * If TSO packet, free associated
110 		 * remaining TSO segment descriptors
111 		 */
112 		if (qdf_nbuf_is_tso(msdu))
113 			ol_free_remaining_tso_segs(
114 					vdev, msdu_info, true);
115 		TXRX_STATS_MSDU_LIST_INCR(
116 				pdev, tx.dropped.host_reject, msdu);
117 		return NULL;
118 	}
119 
120 	return tx_desc;
121 }
122 
123 qdf_nbuf_t
124 ol_tx_non_std_ll(struct ol_txrx_vdev_t *vdev,
125 		 enum ol_tx_spec tx_spec,
126 		 qdf_nbuf_t msdu_list)
127 {
128 	qdf_nbuf_t msdu = msdu_list;
129 	htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
130 	struct ol_txrx_msdu_info_t msdu_info;
131 
132 	msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
133 	msdu_info.htt.action.tx_comp_req = 0;
134 
135 	/*
136 	 * The msdu_list variable could be used instead of the msdu var,
137 	 * but just to clarify which operations are done on a single MSDU
138 	 * vs. a list of MSDUs, use a distinct variable for single MSDUs
139 	 * within the list.
140 	 */
141 	while (msdu) {
142 		qdf_nbuf_t next;
143 		struct ol_tx_desc_t *tx_desc = NULL;
144 
145 		msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
146 		msdu_info.peer = NULL;
147 		msdu_info.tso_info.is_tso = 0;
148 
149 		tx_desc = ol_tx_prepare_ll(vdev, msdu, &msdu_info);
150 		if (!tx_desc)
151 			return msdu;
152 
153 		/*
154 		 * The netbuf may get linked into a different list inside the
155 		 * ol_tx_send function, so store the next pointer before the
156 		 * tx_send call.
157 		 */
158 		next = qdf_nbuf_next(msdu);
159 
160 		if (tx_spec != OL_TX_SPEC_STD) {
161 			if (tx_spec & OL_TX_SPEC_NO_FREE) {
162 				tx_desc->pkt_type = OL_TX_FRM_NO_FREE;
163 			} else if (tx_spec & OL_TX_SPEC_TSO) {
164 				tx_desc->pkt_type = OL_TX_FRM_TSO;
165 			} else if (tx_spec & OL_TX_SPEC_NWIFI_NO_ENCRYPT) {
166 				uint8_t sub_type =
167 					ol_txrx_tx_raw_subtype(tx_spec);
168 				htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
169 						 htt_pkt_type_native_wifi,
170 						 sub_type);
171 			} else if (ol_txrx_tx_is_raw(tx_spec)) {
172 				/* different types of raw frames */
173 				uint8_t sub_type =
174 					ol_txrx_tx_raw_subtype(tx_spec);
175 				htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
176 						 htt_pkt_type_raw, sub_type);
177 			}
178 		}
179 		/*
180 		 * If debug display is enabled, show the meta-data being
181 		 * downloaded to the target via the HTT tx descriptor.
182 		 */
183 		htt_tx_desc_display(tx_desc->htt_tx_desc);
184 		ol_tx_send(vdev->pdev, tx_desc, msdu, vdev->vdev_id);
185 		msdu = next;
186 	}
187 	return NULL;            /* all MSDUs were accepted */
188 }
189 
190 void ol_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id, uint8_t vdev_id,
191 		     enum QDF_OPMODE op_mode)
192 {
193 	DPTRACE(qdf_dp_trace_ptr(skb,
194 				 QDF_DP_TRACE_TXRX_FAST_PACKET_PTR_RECORD,
195 				 QDF_TRACE_DEFAULT_PDEV_ID,
196 				 qdf_nbuf_data_addr(skb),
197 				 sizeof(qdf_nbuf_data(skb)),
198 				 msdu_id, vdev_id, 0,
199 				 op_mode));
200 
201 	qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID,
202 			     op_mode);
203 
204 	DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
205 				      QDF_DP_TRACE_TX_PACKET_RECORD,
206 				      msdu_id, QDF_TX));
207 }
208 
209 #if defined(HELIUMPLUS)
210 void ol_txrx_dump_frag_desc(char *msg, struct ol_tx_desc_t *tx_desc)
211 {
212 	uint32_t                *frag_ptr_i_p;
213 	int                     i;
214 
215 	ol_txrx_err("OL TX Descriptor 0x%pK msdu_id %d",
216 		    tx_desc, tx_desc->id);
217 	ol_txrx_err("HTT TX Descriptor vaddr: 0x%pK paddr: %pad",
218 		    tx_desc->htt_tx_desc, &tx_desc->htt_tx_desc_paddr);
219 	ol_txrx_err("Fragment Descriptor 0x%pK (paddr=%pad)",
220 		    tx_desc->htt_frag_desc, &tx_desc->htt_frag_desc_paddr);
221 
222 	/*
223 	 * it looks from htt_tx_desc_frag() that tx_desc->htt_frag_desc
224 	 * is already de-referrable (=> in virtual address space)
225 	 */
226 	frag_ptr_i_p = tx_desc->htt_frag_desc;
227 
228 	/* Dump 6 words of TSO flags */
229 	print_hex_dump(KERN_DEBUG, "MLE Desc:TSO Flags:  ",
230 		       DUMP_PREFIX_NONE, 8, 4,
231 		       frag_ptr_i_p, 24, true);
232 
233 	frag_ptr_i_p += 6; /* Skip 6 words of TSO flags */
234 
235 	i = 0;
236 	while (*frag_ptr_i_p) {
237 		print_hex_dump(KERN_DEBUG, "MLE Desc:Frag Ptr:  ",
238 			       DUMP_PREFIX_NONE, 8, 4,
239 			       frag_ptr_i_p, 8, true);
240 		i++;
241 		if (i > 5) /* max 6 times: frag_ptr0 to frag_ptr5 */
242 			break;
243 		/* jump to next  pointer - skip length */
244 		frag_ptr_i_p += 2;
245 	}
246 }
247 #endif /* HELIUMPLUS */
248 
249 struct ol_tx_desc_t *
250 ol_txrx_mgmt_tx_desc_alloc(
251 	struct ol_txrx_pdev_t *pdev,
252 	struct ol_txrx_vdev_t *vdev,
253 	qdf_nbuf_t tx_mgmt_frm,
254 	struct ol_txrx_msdu_info_t *tx_msdu_info)
255 {
256 	struct ol_tx_desc_t *tx_desc;
257 
258 	/* For LL tx_comp_req is not used so initialized to 0 */
259 	tx_msdu_info->htt.action.tx_comp_req = 0;
260 	tx_desc = ol_tx_desc_ll(pdev, vdev, tx_mgmt_frm, tx_msdu_info);
261 	/* FIX THIS -
262 	 * The FW currently has trouble using the host's fragments table
263 	 * for management frames.  Until this is fixed, rather than
264 	 * specifying the fragment table to the FW, specify just the
265 	 * address of the initial fragment.
266 	 */
267 #if defined(HELIUMPLUS)
268 	/* ol_txrx_dump_frag_desc("ol_txrx_mgmt_send(): after ol_tx_desc_ll",
269 	 *			  tx_desc);
270 	 */
271 #endif /* defined(HELIUMPLUS) */
272 	if (tx_desc) {
273 		/*
274 		 * Following the call to ol_tx_desc_ll, frag 0 is the
275 		 * HTT tx HW descriptor, and the frame payload is in
276 		 * frag 1.
277 		 */
278 		htt_tx_desc_frags_table_set(
279 				pdev->htt_pdev,
280 				tx_desc->htt_tx_desc,
281 				qdf_nbuf_get_frag_paddr(tx_mgmt_frm, 1),
282 				0, 0);
283 #if defined(HELIUMPLUS) && defined(HELIUMPLUS_DEBUG)
284 		ol_txrx_dump_frag_desc(
285 				"after htt_tx_desc_frags_table_set",
286 				tx_desc);
287 #endif /* defined(HELIUMPLUS) */
288 	}
289 
290 	return tx_desc;
291 }
292 
293 int ol_txrx_mgmt_send_frame(
294 	struct ol_txrx_vdev_t *vdev,
295 	struct ol_tx_desc_t *tx_desc,
296 	qdf_nbuf_t tx_mgmt_frm,
297 	struct ol_txrx_msdu_info_t *tx_msdu_info,
298 	uint16_t chanfreq)
299 {
300 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
301 
302 	htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq);
303 	QDF_NBUF_CB_TX_PACKET_TRACK(tx_desc->netbuf) =
304 					QDF_NBUF_TX_PKT_MGMT_TRACK;
305 	ol_tx_send_nonstd(pdev, tx_desc, tx_mgmt_frm,
306 			  htt_pkt_type_mgmt);
307 
308 	return 0;
309 }
310 
311 #if defined(FEATURE_TSO)
312 void ol_free_remaining_tso_segs(ol_txrx_vdev_handle vdev,
313 				struct ol_txrx_msdu_info_t *msdu_info,
314 				bool is_tso_seg_mapping_done)
315 {
316 	struct qdf_tso_seg_elem_t *next_seg;
317 	struct qdf_tso_seg_elem_t *free_seg = msdu_info->tso_info.curr_seg;
318 	struct ol_txrx_pdev_t *pdev;
319 	bool is_last_seg = false;
320 
321 	if (qdf_unlikely(!vdev)) {
322 		ol_txrx_err("vdev is null");
323 		return;
324 	}
325 
326 	pdev = vdev->pdev;
327 	if (qdf_unlikely(!pdev)) {
328 		ol_txrx_err("pdev is null");
329 		return;
330 	}
331 
332 	/*
333 	 * TSO segment are mapped already, therefore,
334 	 * 1. unmap the tso segments,
335 	 * 2. free tso num segment if it is a last segment, and
336 	 * 3. free the tso segments.
337 	 */
338 
339 	if (is_tso_seg_mapping_done) {
340 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
341 				msdu_info->tso_info.tso_num_seg_list;
342 
343 		if (qdf_unlikely(!tso_num_desc)) {
344 			ol_txrx_err("TSO common info is NULL!");
345 			return;
346 		}
347 
348 		while (free_seg) {
349 			qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
350 			tso_num_desc->num_seg.tso_cmn_num_seg--;
351 
352 			is_last_seg = (tso_num_desc->num_seg.tso_cmn_num_seg ==
353 				       0) ? true : false;
354 			qdf_nbuf_unmap_tso_segment(pdev->osdev, free_seg,
355 						   is_last_seg);
356 			qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
357 
358 			if (is_last_seg) {
359 				ol_tso_num_seg_free(pdev,
360 						    msdu_info->tso_info.
361 						    tso_num_seg_list);
362 				msdu_info->tso_info.tso_num_seg_list = NULL;
363 			}
364 
365 			next_seg = free_seg->next;
366 			free_seg->force_free = 1;
367 			ol_tso_free_segment(pdev, free_seg);
368 			free_seg = next_seg;
369 		}
370 	} else {
371 		/*
372 		 * TSO segment are not mapped therefore,
373 		 * free the tso segments only.
374 		 */
375 		while (free_seg) {
376 			next_seg = free_seg->next;
377 			free_seg->force_free = 1;
378 			ol_tso_free_segment(pdev, free_seg);
379 			free_seg = next_seg;
380 		}
381 	}
382 }
383 
384 /**
385  * ol_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO
386  * related information in the msdu_info meta data
387  * @vdev: virtual device handle
388  * @msdu: network buffer
389  * @msdu_info: meta data associated with the msdu
390  *
391  * Return: 0 - success, >0 - error
392  */
393 uint8_t ol_tx_prepare_tso(ol_txrx_vdev_handle vdev,
394 			  qdf_nbuf_t msdu,
395 			  struct ol_txrx_msdu_info_t *msdu_info)
396 {
397 	msdu_info->tso_info.curr_seg = NULL;
398 	if (qdf_nbuf_is_tso(msdu)) {
399 		int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
400 		struct qdf_tso_num_seg_elem_t *tso_num_seg;
401 
402 		msdu_info->tso_info.tso_num_seg_list = NULL;
403 		msdu_info->tso_info.tso_seg_list = NULL;
404 		msdu_info->tso_info.num_segs = num_seg;
405 		while (num_seg) {
406 			struct qdf_tso_seg_elem_t *tso_seg =
407 				ol_tso_alloc_segment(vdev->pdev);
408 			if (tso_seg) {
409 				qdf_tso_seg_dbg_record(tso_seg,
410 						       TSOSEG_LOC_PREPARETSO);
411 				tso_seg->next =
412 					msdu_info->tso_info.tso_seg_list;
413 				msdu_info->tso_info.tso_seg_list
414 					= tso_seg;
415 				num_seg--;
416 			} else {
417 				/* Free above allocated TSO segments till now */
418 				msdu_info->tso_info.curr_seg =
419 					msdu_info->tso_info.tso_seg_list;
420 				ol_free_remaining_tso_segs(vdev, msdu_info,
421 							   false);
422 				return 1;
423 			}
424 		}
425 		tso_num_seg = ol_tso_num_seg_alloc(vdev->pdev);
426 		if (tso_num_seg) {
427 			tso_num_seg->next = msdu_info->tso_info.
428 						tso_num_seg_list;
429 			msdu_info->tso_info.tso_num_seg_list = tso_num_seg;
430 		} else {
431 			/* Free the already allocated num of segments */
432 			msdu_info->tso_info.curr_seg =
433 				msdu_info->tso_info.tso_seg_list;
434 			ol_free_remaining_tso_segs(vdev, msdu_info, false);
435 			return 1;
436 		}
437 
438 		if (qdf_unlikely(!qdf_nbuf_get_tso_info(vdev->pdev->osdev,
439 						msdu, &msdu_info->tso_info))) {
440 			/* Free the already allocated num of segments */
441 			msdu_info->tso_info.curr_seg =
442 				msdu_info->tso_info.tso_seg_list;
443 			ol_free_remaining_tso_segs(vdev, msdu_info, false);
444 			return 1;
445 		}
446 
447 		msdu_info->tso_info.curr_seg =
448 			msdu_info->tso_info.tso_seg_list;
449 		num_seg = msdu_info->tso_info.num_segs;
450 	} else {
451 		msdu_info->tso_info.is_tso = 0;
452 		msdu_info->tso_info.num_segs = 1;
453 	}
454 	return 0;
455 }
456 
457 /**
458  * ol_tx_tso_update_stats() - update TSO stats
459  * @pdev: pointer to ol_txrx_pdev_t structure
460  * @msdu_info: tso msdu_info for the msdu
461  * @msdu: tso mdsu for which stats are updated
462  * @tso_msdu_idx: stats index in the global TSO stats array where stats will be
463  *                updated
464  *
465  * Return: None
466  */
467 void ol_tx_tso_update_stats(struct ol_txrx_pdev_t *pdev,
468 			    struct qdf_tso_info_t  *tso_info, qdf_nbuf_t msdu,
469 			    uint32_t tso_msdu_idx)
470 {
471 	TXRX_STATS_TSO_HISTOGRAM(pdev, tso_info->num_segs);
472 	TXRX_STATS_TSO_GSO_SIZE_UPDATE(pdev, tso_msdu_idx,
473 				       qdf_nbuf_tcp_tso_size(msdu));
474 	TXRX_STATS_TSO_TOTAL_LEN_UPDATE(pdev,
475 					tso_msdu_idx, qdf_nbuf_len(msdu));
476 	TXRX_STATS_TSO_NUM_FRAGS_UPDATE(pdev, tso_msdu_idx,
477 					qdf_nbuf_get_nr_frags(msdu));
478 }
479 
480 /**
481  * ol_tx_tso_get_stats_idx() - retrieve global TSO stats index and increment it
482  * @pdev: pointer to ol_txrx_pdev_t structure
483  *
484  * Retrieve  the current value of the global variable and increment it. This is
485  * done in a spinlock as the global TSO stats may be accessed in parallel by
486  * multiple TX streams.
487  *
488  * Return: The current value of TSO stats index.
489  */
490 uint32_t ol_tx_tso_get_stats_idx(struct ol_txrx_pdev_t *pdev)
491 {
492 	uint32_t msdu_stats_idx = 0;
493 
494 	qdf_spin_lock_bh(&pdev->stats.pub.tx.tso.tso_stats_lock);
495 	msdu_stats_idx = pdev->stats.pub.tx.tso.tso_info.tso_msdu_idx;
496 	pdev->stats.pub.tx.tso.tso_info.tso_msdu_idx++;
497 	pdev->stats.pub.tx.tso.tso_info.tso_msdu_idx &=
498 					NUM_MAX_TSO_MSDUS_MASK;
499 	qdf_spin_unlock_bh(&pdev->stats.pub.tx.tso.tso_stats_lock);
500 
501 	TXRX_STATS_TSO_RESET_MSDU(pdev, msdu_stats_idx);
502 
503 	return msdu_stats_idx;
504 }
505 
506 /**
507  * ol_tso_seg_list_init() - function to initialise the tso seg freelist
508  * @pdev: the data physical device sending the data
509  * @num_seg: number of segments needs to be initialized
510  *
511  * Return: none
512  */
513 void ol_tso_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg)
514 {
515 	int i = 0;
516 	struct qdf_tso_seg_elem_t *c_element;
517 
518 	/* Host should not allocate any c_element. */
519 	if (num_seg <= 0) {
520 		ol_txrx_err("Pool size passed is 0");
521 		QDF_BUG(0);
522 		pdev->tso_seg_pool.pool_size = i;
523 		qdf_spinlock_create(&pdev->tso_seg_pool.tso_mutex);
524 		return;
525 	}
526 
527 	c_element = qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
528 	pdev->tso_seg_pool.freelist = c_element;
529 	for (i = 0; i < (num_seg - 1); i++) {
530 		if (qdf_unlikely(!c_element)) {
531 			ol_txrx_err("c_element NULL for seg %d", i);
532 			QDF_BUG(0);
533 			pdev->tso_seg_pool.pool_size = i;
534 			pdev->tso_seg_pool.num_free = i;
535 			qdf_spinlock_create(&pdev->tso_seg_pool.tso_mutex);
536 			return;
537 		}
538 		/* set the freelist bit and magic cookie*/
539 		c_element->on_freelist = 1;
540 		c_element->cookie = TSO_SEG_MAGIC_COOKIE;
541 #ifdef TSOSEG_DEBUG
542 		c_element->dbg.txdesc = NULL;
543 		qdf_atomic_init(&c_element->dbg.cur); /* history empty */
544 		qdf_tso_seg_dbg_record(c_element, TSOSEG_LOC_INIT1);
545 #endif /* TSOSEG_DEBUG */
546 		c_element->next =
547 			qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
548 		c_element = c_element->next;
549 	}
550 	/*
551 	 * NULL check for the last c_element of the list or
552 	 * first c_element if num_seg is equal to 1.
553 	 */
554 	if (qdf_unlikely(!c_element)) {
555 		ol_txrx_err("c_element NULL for seg %d", i);
556 		QDF_BUG(0);
557 		pdev->tso_seg_pool.pool_size = i;
558 		pdev->tso_seg_pool.num_free = i;
559 		qdf_spinlock_create(&pdev->tso_seg_pool.tso_mutex);
560 		return;
561 	}
562 	c_element->on_freelist = 1;
563 	c_element->cookie = TSO_SEG_MAGIC_COOKIE;
564 #ifdef TSOSEG_DEBUG
565 	qdf_tso_seg_dbg_init(c_element);
566 	qdf_tso_seg_dbg_record(c_element, TSOSEG_LOC_INIT2);
567 #endif /* TSOSEG_DEBUG */
568 	c_element->next = NULL;
569 	pdev->tso_seg_pool.pool_size = num_seg;
570 	pdev->tso_seg_pool.num_free = num_seg;
571 	qdf_spinlock_create(&pdev->tso_seg_pool.tso_mutex);
572 }
573 
574 /**
575  * ol_tso_seg_list_deinit() - function to de-initialise the tso seg freelist
576  * @pdev: the data physical device sending the data
577  *
578  * Return: none
579  */
580 void ol_tso_seg_list_deinit(struct ol_txrx_pdev_t *pdev)
581 {
582 	int i;
583 	struct qdf_tso_seg_elem_t *c_element;
584 	struct qdf_tso_seg_elem_t *temp;
585 
586 	/* pool size 0 implies that tso seg list is not initialised*/
587 	if (!pdev->tso_seg_pool.freelist &&
588 	    pdev->tso_seg_pool.pool_size == 0)
589 		return;
590 
591 	qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
592 	c_element = pdev->tso_seg_pool.freelist;
593 	i = pdev->tso_seg_pool.pool_size;
594 
595 	pdev->tso_seg_pool.freelist = NULL;
596 	pdev->tso_seg_pool.num_free = 0;
597 	pdev->tso_seg_pool.pool_size = 0;
598 
599 	qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
600 	qdf_spinlock_destroy(&pdev->tso_seg_pool.tso_mutex);
601 
602 	while (i-- > 0 && c_element) {
603 		temp = c_element->next;
604 		if (c_element->on_freelist != 1) {
605 			qdf_tso_seg_dbg_bug("seg already freed (double?)");
606 			return;
607 		} else if (c_element->cookie != TSO_SEG_MAGIC_COOKIE) {
608 			qdf_tso_seg_dbg_bug("seg cookie is bad (corruption?)");
609 			return;
610 		}
611 		/* free this seg, so reset the cookie value*/
612 		c_element->cookie = 0;
613 		qdf_mem_free(c_element);
614 		c_element = temp;
615 	}
616 }
617 
618 /**
619  * ol_tso_num_seg_list_init() - function to initialise the freelist of elements
620  *				use to count the num of tso segments in jumbo
621  *				skb packet freelist
622  * @pdev: the data physical device sending the data
623  * @num_seg: number of elements needs to be initialized
624  *
625  * Return: none
626  */
627 void ol_tso_num_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg)
628 {
629 	int i = 0;
630 	struct qdf_tso_num_seg_elem_t *c_element;
631 
632 	/* Host should not allocate any c_element. */
633 	if (num_seg <= 0) {
634 		ol_txrx_err("Pool size passed is 0");
635 		QDF_BUG(0);
636 		pdev->tso_num_seg_pool.num_seg_pool_size = i;
637 		qdf_spinlock_create(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
638 		return;
639 	}
640 
641 	c_element = qdf_mem_malloc(sizeof(struct qdf_tso_num_seg_elem_t));
642 	pdev->tso_num_seg_pool.freelist = c_element;
643 	for (i = 0; i < (num_seg - 1); i++) {
644 		if (qdf_unlikely(!c_element)) {
645 			ol_txrx_err("c_element NULL for num of seg %d", i);
646 			QDF_BUG(0);
647 			pdev->tso_num_seg_pool.num_seg_pool_size = i;
648 			pdev->tso_num_seg_pool.num_free = i;
649 			qdf_spinlock_create(&pdev->tso_num_seg_pool.
650 							tso_num_seg_mutex);
651 			return;
652 		}
653 		c_element->next =
654 			qdf_mem_malloc(sizeof(struct qdf_tso_num_seg_elem_t));
655 		c_element = c_element->next;
656 	}
657 	/*
658 	 * NULL check for the last c_element of the list or
659 	 * first c_element if num_seg is equal to 1.
660 	 */
661 	if (qdf_unlikely(!c_element)) {
662 		ol_txrx_err("c_element NULL for num of seg %d", i);
663 		QDF_BUG(0);
664 		pdev->tso_num_seg_pool.num_seg_pool_size = i;
665 		pdev->tso_num_seg_pool.num_free = i;
666 		qdf_spinlock_create(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
667 		return;
668 	}
669 	c_element->next = NULL;
670 	pdev->tso_num_seg_pool.num_seg_pool_size = num_seg;
671 	pdev->tso_num_seg_pool.num_free = num_seg;
672 	qdf_spinlock_create(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
673 }
674 
675 /**
676  * ol_tso_num_seg_list_deinit() - function to de-initialise the freelist of
677  *				  elements use to count the num of tso segment
678  *				  in a jumbo skb packet freelist
679  * @pdev: the data physical device sending the data
680  *
681  * Return: none
682  */
683 void ol_tso_num_seg_list_deinit(struct ol_txrx_pdev_t *pdev)
684 {
685 	int i;
686 	struct qdf_tso_num_seg_elem_t *c_element;
687 	struct qdf_tso_num_seg_elem_t *temp;
688 
689 	/* pool size 0 implies that tso num seg list is not initialised*/
690 	if (!pdev->tso_num_seg_pool.freelist &&
691 	    pdev->tso_num_seg_pool.num_seg_pool_size == 0)
692 		return;
693 
694 	qdf_spin_lock_bh(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
695 	c_element = pdev->tso_num_seg_pool.freelist;
696 	i = pdev->tso_num_seg_pool.num_seg_pool_size;
697 
698 	pdev->tso_num_seg_pool.freelist = NULL;
699 	pdev->tso_num_seg_pool.num_free = 0;
700 	pdev->tso_num_seg_pool.num_seg_pool_size = 0;
701 
702 	qdf_spin_unlock_bh(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
703 	qdf_spinlock_destroy(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
704 
705 	while (i-- > 0 && c_element) {
706 		temp = c_element->next;
707 		qdf_mem_free(c_element);
708 		c_element = temp;
709 	}
710 }
711 #endif /* FEATURE_TSO */
712 
713 #if defined(FEATURE_TSO) && defined(FEATURE_TSO_DEBUG)
714 void ol_txrx_tso_stats_init(ol_txrx_pdev_handle pdev)
715 {
716 	qdf_spinlock_create(&pdev->stats.pub.tx.tso.tso_stats_lock);
717 }
718 
719 void ol_txrx_tso_stats_deinit(ol_txrx_pdev_handle pdev)
720 {
721 	qdf_spinlock_destroy(&pdev->stats.pub.tx.tso.tso_stats_lock);
722 }
723 
724 void ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev)
725 {
726 	int msdu_idx;
727 	int seg_idx;
728 
729 	txrx_nofl_info("TSO Statistics:");
730 	txrx_nofl_info("TSO pkts %lld, bytes %lld",
731 		       pdev->stats.pub.tx.tso.tso_pkts.pkts,
732 		       pdev->stats.pub.tx.tso.tso_pkts.bytes);
733 
734 	txrx_nofl_info("TSO Histogram for numbers of segments:\n"
735 		       "Single segment	%d\n"
736 		       "  2-5 segments	%d\n"
737 		       " 6-10 segments	%d\n"
738 		       "11-15 segments	%d\n"
739 		       "16-20 segments	%d\n"
740 		       "  20+ segments	%d\n",
741 		       pdev->stats.pub.tx.tso.tso_hist.pkts_1,
742 		       pdev->stats.pub.tx.tso.tso_hist.pkts_2_5,
743 		       pdev->stats.pub.tx.tso.tso_hist.pkts_6_10,
744 		       pdev->stats.pub.tx.tso.tso_hist.pkts_11_15,
745 		       pdev->stats.pub.tx.tso.tso_hist.pkts_16_20,
746 		       pdev->stats.pub.tx.tso.tso_hist.pkts_20_plus);
747 
748 	txrx_nofl_info("TSO History Buffer: Total size %d, current_index %d",
749 		       NUM_MAX_TSO_MSDUS,
750 		       TXRX_STATS_TSO_MSDU_IDX(pdev));
751 
752 	for (msdu_idx = 0; msdu_idx < NUM_MAX_TSO_MSDUS; msdu_idx++) {
753 		if (TXRX_STATS_TSO_MSDU_TOTAL_LEN(pdev, msdu_idx) == 0)
754 			continue;
755 		txrx_nofl_info("jumbo pkt idx: %d num segs %d gso_len %d total_len %d nr_frags %d",
756 			       msdu_idx,
757 			       TXRX_STATS_TSO_MSDU_NUM_SEG(pdev, msdu_idx),
758 			       TXRX_STATS_TSO_MSDU_GSO_SIZE(pdev, msdu_idx),
759 			       TXRX_STATS_TSO_MSDU_TOTAL_LEN(pdev, msdu_idx),
760 			       TXRX_STATS_TSO_MSDU_NR_FRAGS(pdev, msdu_idx));
761 
762 		for (seg_idx = 0;
763 			 ((seg_idx < TXRX_STATS_TSO_MSDU_NUM_SEG(pdev,
764 			   msdu_idx)) && (seg_idx < NUM_MAX_TSO_SEGS));
765 			 seg_idx++) {
766 			struct qdf_tso_seg_t tso_seg =
767 				 TXRX_STATS_TSO_SEG(pdev, msdu_idx, seg_idx);
768 
769 			txrx_nofl_info("seg idx: %d", seg_idx);
770 			txrx_nofl_info("tso_enable: %d",
771 				       tso_seg.tso_flags.tso_enable);
772 			txrx_nofl_info("fin %d syn %d rst %d psh %d ack %d urg %d ece %d cwr %d ns %d",
773 				       tso_seg.tso_flags.fin,
774 				       tso_seg.tso_flags.syn,
775 				       tso_seg.tso_flags.rst,
776 				       tso_seg.tso_flags.psh,
777 				       tso_seg.tso_flags.ack,
778 				       tso_seg.tso_flags.urg,
779 				       tso_seg.tso_flags.ece,
780 				       tso_seg.tso_flags.cwr,
781 				       tso_seg.tso_flags.ns);
782 			txrx_nofl_info("tcp_seq_num: 0x%x ip_id: %d",
783 				       tso_seg.tso_flags.tcp_seq_num,
784 				       tso_seg.tso_flags.ip_id);
785 		}
786 	}
787 }
788 
789 void ol_txrx_tso_stats_clear(ol_txrx_pdev_handle pdev)
790 {
791 	qdf_mem_zero(&pdev->stats.pub.tx.tso.tso_pkts,
792 		     sizeof(struct ol_txrx_stats_elem));
793 #if defined(FEATURE_TSO)
794 	qdf_mem_zero(&pdev->stats.pub.tx.tso.tso_info,
795 		     sizeof(struct ol_txrx_stats_tso_info));
796 	qdf_mem_zero(&pdev->stats.pub.tx.tso.tso_hist,
797 		     sizeof(struct ol_txrx_tso_histogram));
798 #endif
799 }
800 #endif /* defined(FEATURE_TSO) && defined(FEATURE_TSO_DEBUG) */
801