xref: /wlan-dirver/qcacld-3.0/core/dp/txrx/ol_tx_desc.c (revision bf14ba81a9dde77532035d124922099fe95cd35d)
1 /*
2  * Copyright (c) 2011, 2014-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_net_types.h>      /* QDF_NBUF_EXEMPT_NO_EXEMPTION, etc. */
20 #include <qdf_nbuf.h>           /* qdf_nbuf_t, etc. */
21 #include <qdf_util.h>           /* qdf_assert */
22 #include <qdf_lock.h>           /* qdf_spinlock */
23 #include <qdf_trace.h>          /* qdf_tso_seg_dbg stuff */
24 #ifdef QCA_COMPUTE_TX_DELAY
25 #include <qdf_time.h>           /* qdf_system_ticks */
26 #endif
27 
28 #include <ol_htt_tx_api.h>      /* htt_tx_desc_id */
29 
30 #include <ol_tx_desc.h>
31 #include <ol_txrx_internal.h>
32 #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
33 #include <ol_txrx_encap.h>      /* OL_TX_RESTORE_HDR, etc */
34 #endif
35 #include <ol_txrx.h>
36 
37 #ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
38 static inline void ol_tx_desc_sanity_checks(struct ol_txrx_pdev_t *pdev,
39 					struct ol_tx_desc_t *tx_desc)
40 {
41 	if (tx_desc->pkt_type != ol_tx_frm_freed) {
42 		ol_txrx_err("Potential tx_desc corruption pkt_type:0x%x pdev:0x%pK",
43 			    tx_desc->pkt_type, pdev);
44 		qdf_assert(0);
45 	}
46 }
47 static inline void ol_tx_desc_reset_pkt_type(struct ol_tx_desc_t *tx_desc)
48 {
49 	tx_desc->pkt_type = ol_tx_frm_freed;
50 }
51 #ifdef QCA_COMPUTE_TX_DELAY
52 static inline void ol_tx_desc_compute_delay(struct ol_tx_desc_t *tx_desc)
53 {
54 	if (tx_desc->entry_timestamp_ticks != 0xffffffff) {
55 		ol_txrx_err("Timestamp:0x%x", tx_desc->entry_timestamp_ticks);
56 		qdf_assert(0);
57 	}
58 	tx_desc->entry_timestamp_ticks = qdf_system_ticks();
59 }
60 static inline void ol_tx_desc_reset_timestamp(struct ol_tx_desc_t *tx_desc)
61 {
62 	tx_desc->entry_timestamp_ticks = 0xffffffff;
63 }
64 #endif
65 #else
66 static inline void ol_tx_desc_sanity_checks(struct ol_txrx_pdev_t *pdev,
67 						struct ol_tx_desc_t *tx_desc)
68 {
69 }
70 static inline void ol_tx_desc_reset_pkt_type(struct ol_tx_desc_t *tx_desc)
71 {
72 }
73 static inline void ol_tx_desc_compute_delay(struct ol_tx_desc_t *tx_desc)
74 {
75 }
76 static inline void ol_tx_desc_reset_timestamp(struct ol_tx_desc_t *tx_desc)
77 {
78 }
79 #endif
80 
81 #ifdef DESC_TIMESTAMP_DEBUG_INFO
82 static inline void ol_tx_desc_update_tx_ts(struct ol_tx_desc_t *tx_desc)
83 {
84 	tx_desc->desc_debug_info.prev_tx_ts = tx_desc
85 						->desc_debug_info.curr_tx_ts;
86 	tx_desc->desc_debug_info.curr_tx_ts = qdf_get_log_timestamp();
87 }
88 #else
89 static inline void ol_tx_desc_update_tx_ts(struct ol_tx_desc_t *tx_desc)
90 {
91 }
92 #endif
93 
94 /**
95  * ol_tx_desc_vdev_update() - vedv assign.
96  * @tx_desc: tx descriptor pointer
97  * @vdev: vdev handle
98  *
99  * Return: None
100  */
101 static inline void
102 ol_tx_desc_vdev_update(struct ol_tx_desc_t *tx_desc,
103 		       struct ol_txrx_vdev_t *vdev)
104 {
105 	tx_desc->vdev = vdev;
106 	tx_desc->vdev_id = vdev->vdev_id;
107 }
108 
109 #ifdef QCA_HL_NETDEV_FLOW_CONTROL
110 
111 /**
112  * ol_tx_desc_count_inc() - tx desc count increment for desc allocation.
113  * @vdev: vdev handle
114  *
115  * Return: None
116  */
117 static inline void
118 ol_tx_desc_count_inc(struct ol_txrx_vdev_t *vdev)
119 {
120 	qdf_atomic_inc(&vdev->tx_desc_count);
121 }
122 #else
123 
124 static inline void
125 ol_tx_desc_count_inc(struct ol_txrx_vdev_t *vdev)
126 {
127 }
128 
129 #endif
130 
131 #ifndef QCA_LL_TX_FLOW_CONTROL_V2
132 #ifdef QCA_LL_PDEV_TX_FLOW_CONTROL
133 /**
134  * ol_tx_do_pdev_flow_control_pause - pause queues when stop_th reached.
135  * @pdev: pdev handle
136  *
137  * Return: void
138  */
139 static void ol_tx_do_pdev_flow_control_pause(struct ol_txrx_pdev_t *pdev)
140 {
141 	struct ol_txrx_vdev_t *vdev;
142 
143 	if (qdf_unlikely(pdev->tx_desc.num_free <
144 				pdev->tx_desc.stop_th &&
145 			pdev->tx_desc.num_free >=
146 			 pdev->tx_desc.stop_priority_th &&
147 			pdev->tx_desc.status ==
148 			 FLOW_POOL_ACTIVE_UNPAUSED)) {
149 		pdev->tx_desc.status = FLOW_POOL_NON_PRIO_PAUSED;
150 		/* pause network NON PRIORITY queues */
151 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
152 			pdev->pause_cb(vdev->vdev_id,
153 				       WLAN_STOP_NON_PRIORITY_QUEUE,
154 				       WLAN_DATA_FLOW_CONTROL);
155 		}
156 	} else if (qdf_unlikely((pdev->tx_desc.num_free <
157 				 pdev->tx_desc.stop_priority_th) &&
158 			pdev->tx_desc.status ==
159 			FLOW_POOL_NON_PRIO_PAUSED)) {
160 		pdev->tx_desc.status = FLOW_POOL_ACTIVE_PAUSED;
161 		/* pause priority queue */
162 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
163 			pdev->pause_cb(vdev->vdev_id,
164 				       WLAN_NETIF_PRIORITY_QUEUE_OFF,
165 				       WLAN_DATA_FLOW_CONTROL_PRIORITY);
166 		}
167 	}
168 }
169 
170 /**
171  * ol_tx_do_pdev_flow_control_unpause - unpause queues when start_th restored.
172  * @pdev: pdev handle
173  *
174  * Return: void
175  */
176 static void ol_tx_do_pdev_flow_control_unpause(struct ol_txrx_pdev_t *pdev)
177 {
178 	struct ol_txrx_vdev_t *vdev;
179 
180 	switch (pdev->tx_desc.status) {
181 	case FLOW_POOL_ACTIVE_PAUSED:
182 		if (pdev->tx_desc.num_free >
183 		    pdev->tx_desc.start_priority_th) {
184 			/* unpause priority queue */
185 			TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
186 				pdev->pause_cb(vdev->vdev_id,
187 				       WLAN_NETIF_PRIORITY_QUEUE_ON,
188 				       WLAN_DATA_FLOW_CONTROL_PRIORITY);
189 			}
190 			pdev->tx_desc.status = FLOW_POOL_NON_PRIO_PAUSED;
191 		}
192 		break;
193 	case FLOW_POOL_NON_PRIO_PAUSED:
194 		if (pdev->tx_desc.num_free > pdev->tx_desc.start_th) {
195 			TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
196 				pdev->pause_cb(vdev->vdev_id,
197 					       WLAN_WAKE_NON_PRIORITY_QUEUE,
198 					       WLAN_DATA_FLOW_CONTROL);
199 			}
200 			pdev->tx_desc.status = FLOW_POOL_ACTIVE_UNPAUSED;
201 		}
202 		break;
203 	case FLOW_POOL_INVALID:
204 		if (pdev->tx_desc.num_free == pdev->tx_desc.pool_size)
205 			ol_txrx_err("pool is INVALID State!!");
206 		break;
207 	case FLOW_POOL_ACTIVE_UNPAUSED:
208 		break;
209 	default:
210 		ol_txrx_err("pool is INACTIVE State!!\n");
211 		break;
212 	};
213 }
214 #else
215 static inline void
216 ol_tx_do_pdev_flow_control_pause(struct ol_txrx_pdev_t *pdev)
217 {
218 }
219 
220 static inline void
221 ol_tx_do_pdev_flow_control_unpause(struct ol_txrx_pdev_t *pdev)
222 {
223 }
224 #endif
225 /**
226  * ol_tx_desc_alloc() - allocate descriptor from freelist
227  * @pdev: pdev handle
228  * @vdev: vdev handle
229  *
230  * Return: tx descriptor pointer/ NULL in case of error
231  */
232 static
233 struct ol_tx_desc_t *ol_tx_desc_alloc(struct ol_txrx_pdev_t *pdev,
234 					     struct ol_txrx_vdev_t *vdev)
235 {
236 	struct ol_tx_desc_t *tx_desc = NULL;
237 
238 	qdf_spin_lock_bh(&pdev->tx_mutex);
239 	if (pdev->tx_desc.freelist) {
240 		tx_desc = ol_tx_get_desc_global_pool(pdev);
241 		if (!tx_desc) {
242 			qdf_spin_unlock_bh(&pdev->tx_mutex);
243 			return NULL;
244 		}
245 		ol_tx_desc_dup_detect_set(pdev, tx_desc);
246 		ol_tx_do_pdev_flow_control_pause(pdev);
247 		ol_tx_desc_sanity_checks(pdev, tx_desc);
248 		ol_tx_desc_compute_delay(tx_desc);
249 		ol_tx_desc_vdev_update(tx_desc, vdev);
250 		ol_tx_desc_count_inc(vdev);
251 		ol_tx_desc_update_tx_ts(tx_desc);
252 		qdf_atomic_inc(&tx_desc->ref_cnt);
253 	}
254 	qdf_spin_unlock_bh(&pdev->tx_mutex);
255 	return tx_desc;
256 }
257 
258 /**
259  * ol_tx_desc_alloc_wrapper() -allocate tx descriptor
260  * @pdev: pdev handler
261  * @vdev: vdev handler
262  * @msdu_info: msdu handler
263  *
264  * Return: tx descriptor or NULL
265  */
266 struct ol_tx_desc_t *
267 ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev,
268 			 struct ol_txrx_vdev_t *vdev,
269 			 struct ol_txrx_msdu_info_t *msdu_info)
270 {
271 	return ol_tx_desc_alloc(pdev, vdev);
272 }
273 
274 #else
275 /**
276  * ol_tx_desc_alloc() -allocate tx descriptor
277  * @pdev: pdev handler
278  * @vdev: vdev handler
279  * @pool: flow pool
280  *
281  * Return: tx descriptor or NULL
282  */
283 static
284 struct ol_tx_desc_t *ol_tx_desc_alloc(struct ol_txrx_pdev_t *pdev,
285 				      struct ol_txrx_vdev_t *vdev,
286 				      struct ol_tx_flow_pool_t *pool)
287 {
288 	struct ol_tx_desc_t *tx_desc = NULL;
289 
290 	if (!pool) {
291 		pdev->pool_stats.pkt_drop_no_pool++;
292 		goto end;
293 	}
294 
295 	qdf_spin_lock_bh(&pool->flow_pool_lock);
296 	if (pool->avail_desc) {
297 		tx_desc = ol_tx_get_desc_flow_pool(pool);
298 		ol_tx_desc_dup_detect_set(pdev, tx_desc);
299 		if (qdf_unlikely(pool->avail_desc < pool->stop_th &&
300 				(pool->avail_desc >= pool->stop_priority_th) &&
301 				(pool->status == FLOW_POOL_ACTIVE_UNPAUSED))) {
302 			pool->status = FLOW_POOL_NON_PRIO_PAUSED;
303 			/* pause network NON PRIORITY queues */
304 			pdev->pause_cb(vdev->vdev_id,
305 				       WLAN_STOP_NON_PRIORITY_QUEUE,
306 				       WLAN_DATA_FLOW_CONTROL);
307 		} else if (qdf_unlikely((pool->avail_desc <
308 						pool->stop_priority_th) &&
309 				pool->status == FLOW_POOL_NON_PRIO_PAUSED)) {
310 			pool->status = FLOW_POOL_ACTIVE_PAUSED;
311 			/* pause priority queue */
312 			pdev->pause_cb(vdev->vdev_id,
313 				       WLAN_NETIF_PRIORITY_QUEUE_OFF,
314 				       WLAN_DATA_FLOW_CONTROL_PRIORITY);
315 		}
316 
317 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
318 
319 		ol_tx_desc_sanity_checks(pdev, tx_desc);
320 		ol_tx_desc_compute_delay(tx_desc);
321 		ol_tx_desc_update_tx_ts(tx_desc);
322 		ol_tx_desc_vdev_update(tx_desc, vdev);
323 		qdf_atomic_inc(&tx_desc->ref_cnt);
324 	} else {
325 		pool->pkt_drop_no_desc++;
326 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
327 	}
328 
329 end:
330 	return tx_desc;
331 }
332 
333 /**
334  * ol_tx_desc_alloc_wrapper() -allocate tx descriptor
335  * @pdev: pdev handler
336  * @vdev: vdev handler
337  * @msdu_info: msdu handler
338  *
339  * Return: tx descriptor or NULL
340  */
341 #ifdef QCA_LL_TX_FLOW_GLOBAL_MGMT_POOL
342 struct ol_tx_desc_t *
343 ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev,
344 			 struct ol_txrx_vdev_t *vdev,
345 			 struct ol_txrx_msdu_info_t *msdu_info)
346 {
347 	if (qdf_unlikely(msdu_info->htt.info.frame_type == htt_pkt_type_mgmt))
348 		return ol_tx_desc_alloc(pdev, vdev, pdev->mgmt_pool);
349 	else
350 		return ol_tx_desc_alloc(pdev, vdev, vdev->pool);
351 }
352 #else
353 struct ol_tx_desc_t *
354 ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev,
355 			 struct ol_txrx_vdev_t *vdev,
356 			 struct ol_txrx_msdu_info_t *msdu_info)
357 {
358 	return ol_tx_desc_alloc(pdev, vdev, vdev->pool);
359 }
360 #endif
361 #endif
362 
363 /**
364  * ol_tx_desc_alloc_hl() - allocate tx descriptor
365  * @pdev: pdev handle
366  * @vdev: vdev handle
367  * @msdu_info: tx msdu info
368  *
369  * Return: tx descriptor pointer/ NULL in case of error
370  */
371 static struct ol_tx_desc_t *
372 ol_tx_desc_alloc_hl(struct ol_txrx_pdev_t *pdev,
373 		    struct ol_txrx_vdev_t *vdev,
374 		    struct ol_txrx_msdu_info_t *msdu_info)
375 {
376 	struct ol_tx_desc_t *tx_desc;
377 
378 	tx_desc = ol_tx_desc_alloc_wrapper(pdev, vdev, msdu_info);
379 	if (!tx_desc)
380 		return NULL;
381 
382 	qdf_atomic_dec(&pdev->tx_queue.rsrc_cnt);
383 
384 	return tx_desc;
385 }
386 
387 #ifdef QCA_HL_NETDEV_FLOW_CONTROL
388 
389 /**
390  * ol_tx_desc_vdev_rm() - decrement the tx desc count for vdev.
391  * @tx_desc: tx desc
392  *
393  * Return: None
394  */
395 static inline void
396 ol_tx_desc_vdev_rm(struct ol_tx_desc_t *tx_desc)
397 {
398 	/*
399 	 * In module exit context, vdev handle could be destroyed but still
400 	 * we need to free pending completion tx_desc.
401 	 */
402 	if (!tx_desc || !tx_desc->vdev)
403 		return;
404 
405 	qdf_atomic_dec(&tx_desc->vdev->tx_desc_count);
406 	tx_desc->vdev = NULL;
407 }
408 #else
409 
410 static inline void
411 ol_tx_desc_vdev_rm(struct ol_tx_desc_t *tx_desc)
412 {
413 }
414 #endif
415 
416 #ifdef FEATURE_TSO
417 /**
418  * ol_tso_unmap_tso_segment() - Unmap TSO segment
419  * @pdev: pointer to ol_txrx_pdev_t structure
420  * @tx_desc: pointer to ol_tx_desc_t containing the TSO segment
421  *
422  * Unmap TSO segment (frag[1]). If it is the last TSO segment corresponding the
423  * nbuf, also unmap the EIT header(frag[0]).
424  *
425  * Return: None
426  */
427 static void ol_tso_unmap_tso_segment(struct ol_txrx_pdev_t *pdev,
428 						struct ol_tx_desc_t *tx_desc)
429 {
430 	bool is_last_seg = false;
431 	struct qdf_tso_num_seg_elem_t *tso_num_desc = NULL;
432 
433 	if (qdf_unlikely(!tx_desc->tso_desc)) {
434 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
435 			  "%s %d TSO desc is NULL!",
436 			  __func__, __LINE__);
437 		qdf_assert(0);
438 		return;
439 	} else if (qdf_unlikely(!tx_desc->tso_num_desc)) {
440 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
441 			  "%s %d TSO common info is NULL!",
442 			  __func__, __LINE__);
443 		qdf_assert(0);
444 		return;
445 	}
446 
447 	tso_num_desc = tx_desc->tso_num_desc;
448 
449 	qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
450 
451 	tso_num_desc->num_seg.tso_cmn_num_seg--;
452 	is_last_seg = (tso_num_desc->num_seg.tso_cmn_num_seg == 0) ?
453 								true : false;
454 	qdf_nbuf_unmap_tso_segment(pdev->osdev, tx_desc->tso_desc, is_last_seg);
455 
456 	qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
457 
458 }
459 
460 /**
461  * ol_tx_tso_desc_free() - Add TSO TX descs back to the freelist
462  * @pdev: pointer to ol_txrx_pdev_t structure
463  * @tx_desc: pointer to ol_tx_desc_t containing the TSO segment
464  *
465  * Add qdf_tso_seg_elem_t corresponding to the TSO seg back to freelist.
466  * If it is the last segment of the jumbo skb, also add the
467  * qdf_tso_num_seg_elem_t to the free list.
468  *
469  * Return: None
470  */
471 static void ol_tx_tso_desc_free(struct ol_txrx_pdev_t *pdev,
472 				struct ol_tx_desc_t *tx_desc)
473 {
474 	bool is_last_seg;
475 	struct qdf_tso_num_seg_elem_t *tso_num_desc = tx_desc->tso_num_desc;
476 
477 	is_last_seg = (tso_num_desc->num_seg.tso_cmn_num_seg == 0) ?
478 								true : false;
479 	if (is_last_seg) {
480 		ol_tso_num_seg_free(pdev, tx_desc->tso_num_desc);
481 		tx_desc->tso_num_desc = NULL;
482 	}
483 
484 	ol_tso_free_segment(pdev, tx_desc->tso_desc);
485 	tx_desc->tso_desc = NULL;
486 }
487 
488 #else
489 static inline void ol_tx_tso_desc_free(struct ol_txrx_pdev_t *pdev,
490 				       struct ol_tx_desc_t *tx_desc)
491 {
492 }
493 
494 static inline void ol_tso_unmap_tso_segment(
495 					struct ol_txrx_pdev_t *pdev,
496 					struct ol_tx_desc_t *tx_desc)
497 {
498 }
499 #endif
500 
501 /**
502  * ol_tx_desc_free_common() - common funcs to free tx_desc for all flow ctl vers
503  * @pdev: pdev handle
504  * @tx_desc: tx descriptor
505  *
506  * Set of common functions needed for QCA_LL_TX_FLOW_CONTROL_V2 and older
507  * versions of flow control. Needs to be called from within a spinlock.
508  *
509  * Return: None
510  */
511 static void ol_tx_desc_free_common(struct ol_txrx_pdev_t *pdev,
512 						struct ol_tx_desc_t *tx_desc)
513 {
514 	ol_tx_desc_dup_detect_reset(pdev, tx_desc);
515 
516 	if (tx_desc->pkt_type == OL_TX_FRM_TSO)
517 		ol_tx_tso_desc_free(pdev, tx_desc);
518 
519 	ol_tx_desc_reset_pkt_type(tx_desc);
520 	ol_tx_desc_reset_timestamp(tx_desc);
521 	/* clear the ref cnt */
522 	qdf_atomic_init(&tx_desc->ref_cnt);
523 	tx_desc->vdev_id = OL_TXRX_INVALID_VDEV_ID;
524 }
525 
526 #ifndef QCA_LL_TX_FLOW_CONTROL_V2
527 /**
528  * ol_tx_desc_free() - put descriptor to freelist
529  * @pdev: pdev handle
530  * @tx_desc: tx descriptor
531  *
532  * Return: None
533  */
534 void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
535 {
536 	qdf_spin_lock_bh(&pdev->tx_mutex);
537 
538 	ol_tx_desc_free_common(pdev, tx_desc);
539 
540 	ol_tx_put_desc_global_pool(pdev, tx_desc);
541 	ol_tx_desc_vdev_rm(tx_desc);
542 	ol_tx_do_pdev_flow_control_unpause(pdev);
543 
544 	qdf_spin_unlock_bh(&pdev->tx_mutex);
545 }
546 
547 #else
548 
549 /**
550  * ol_tx_update_free_desc_to_pool() - update free desc to pool
551  * @pdev: pdev handle
552  * @tx_desc: descriptor
553  *
554  * Return : 1 desc distribution required / 0 don't need distribution
555  */
556 #ifdef QCA_LL_TX_FLOW_CONTROL_RESIZE
557 static inline bool ol_tx_update_free_desc_to_pool(struct ol_txrx_pdev_t *pdev,
558 						  struct ol_tx_desc_t *tx_desc)
559 {
560 	struct ol_tx_flow_pool_t *pool = tx_desc->pool;
561 	bool distribute_desc = false;
562 
563 	if (unlikely(pool->overflow_desc)) {
564 		ol_tx_put_desc_global_pool(pdev, tx_desc);
565 		--pool->overflow_desc;
566 		distribute_desc = true;
567 	} else {
568 		ol_tx_put_desc_flow_pool(pool, tx_desc);
569 	}
570 
571 	return distribute_desc;
572 }
573 #else
574 static inline bool ol_tx_update_free_desc_to_pool(struct ol_txrx_pdev_t *pdev,
575 						  struct ol_tx_desc_t *tx_desc)
576 {
577 	ol_tx_put_desc_flow_pool(tx_desc->pool, tx_desc);
578 	return false;
579 }
580 #endif
581 
582 /**
583  * ol_tx_desc_free() - put descriptor to pool freelist
584  * @pdev: pdev handle
585  * @tx_desc: tx descriptor
586  *
587  * Return: None
588  */
589 void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
590 {
591 	bool distribute_desc = false;
592 	struct ol_tx_flow_pool_t *pool = tx_desc->pool;
593 
594 	qdf_spin_lock_bh(&pool->flow_pool_lock);
595 
596 	ol_tx_desc_free_common(pdev, tx_desc);
597 	distribute_desc = ol_tx_update_free_desc_to_pool(pdev, tx_desc);
598 
599 	switch (pool->status) {
600 	case FLOW_POOL_ACTIVE_PAUSED:
601 		if (pool->avail_desc > pool->start_priority_th) {
602 			/* unpause priority queue */
603 			pdev->pause_cb(pool->member_flow_id,
604 			       WLAN_NETIF_PRIORITY_QUEUE_ON,
605 			       WLAN_DATA_FLOW_CONTROL_PRIORITY);
606 			pool->status = FLOW_POOL_NON_PRIO_PAUSED;
607 		}
608 		break;
609 	case FLOW_POOL_NON_PRIO_PAUSED:
610 		if (pool->avail_desc > pool->start_th) {
611 			pdev->pause_cb(pool->member_flow_id,
612 				       WLAN_WAKE_NON_PRIORITY_QUEUE,
613 				       WLAN_DATA_FLOW_CONTROL);
614 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
615 		}
616 		break;
617 	case FLOW_POOL_INVALID:
618 		if (pool->avail_desc == pool->flow_pool_size) {
619 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
620 			ol_tx_free_invalid_flow_pool(pool);
621 			qdf_print("pool is INVALID State!!");
622 			return;
623 		}
624 		break;
625 	case FLOW_POOL_ACTIVE_UNPAUSED:
626 		break;
627 	default:
628 		qdf_print("pool is INACTIVE State!!");
629 		break;
630 	};
631 
632 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
633 
634 	if (unlikely(distribute_desc))
635 		ol_tx_distribute_descs_to_deficient_pools_from_global_pool();
636 
637 }
638 #endif
639 
640 const uint32_t htt_to_ce_pkt_type[] = {
641 	[htt_pkt_type_raw] = tx_pkt_type_raw,
642 	[htt_pkt_type_native_wifi] = tx_pkt_type_native_wifi,
643 	[htt_pkt_type_ethernet] = tx_pkt_type_802_3,
644 	[htt_pkt_type_mgmt] = tx_pkt_type_mgmt,
645 	[htt_pkt_type_eth2] = tx_pkt_type_eth2,
646 	[htt_pkt_num_types] = 0xffffffff
647 };
648 
649 #define WISA_DEST_PORT_6MBPS	50000
650 #define WISA_DEST_PORT_24MBPS	50001
651 
652 /**
653  * ol_tx_get_wisa_ext_hdr_type() - get header type for WiSA mode
654  * @netbuf: network buffer
655  *
656  * Return: extension header type
657  */
658 static enum extension_header_type
659 ol_tx_get_wisa_ext_hdr_type(qdf_nbuf_t netbuf)
660 {
661 	uint8_t *buf = qdf_nbuf_data(netbuf);
662 	uint16_t dport;
663 
664 	if (qdf_is_macaddr_group(
665 		(struct qdf_mac_addr *)(buf + QDF_NBUF_DEST_MAC_OFFSET))) {
666 
667 		dport = (uint16_t)(*(uint16_t *)(buf +
668 			QDF_NBUF_TRAC_IPV4_OFFSET +
669 			QDF_NBUF_TRAC_IPV4_HEADER_SIZE + sizeof(uint16_t)));
670 
671 		if (dport == QDF_SWAP_U16(WISA_DEST_PORT_6MBPS))
672 			return WISA_MODE_EXT_HEADER_6MBPS;
673 		else if (dport == QDF_SWAP_U16(WISA_DEST_PORT_24MBPS))
674 			return WISA_MODE_EXT_HEADER_24MBPS;
675 		else
676 			return EXT_HEADER_NOT_PRESENT;
677 	} else {
678 		return EXT_HEADER_NOT_PRESENT;
679 	}
680 }
681 
682 /**
683  * ol_tx_get_ext_header_type() - extension header is required or not
684  * @vdev: vdev pointer
685  * @netbuf: network buffer
686  *
687  * This function returns header type and if extension header is
688  * not required than returns EXT_HEADER_NOT_PRESENT.
689  *
690  * Return: extension header type
691  */
692 enum extension_header_type
693 ol_tx_get_ext_header_type(struct ol_txrx_vdev_t *vdev,
694 	qdf_nbuf_t netbuf)
695 {
696 	if (vdev->is_wisa_mode_enable == true)
697 		return ol_tx_get_wisa_ext_hdr_type(netbuf);
698 	else
699 		return EXT_HEADER_NOT_PRESENT;
700 }
701 
702 struct ol_tx_desc_t *ol_tx_desc_ll(struct ol_txrx_pdev_t *pdev,
703 				   struct ol_txrx_vdev_t *vdev,
704 				   qdf_nbuf_t netbuf,
705 				   struct ol_txrx_msdu_info_t *msdu_info)
706 {
707 	struct ol_tx_desc_t *tx_desc;
708 	unsigned int i;
709 	uint32_t num_frags;
710 	enum extension_header_type type;
711 
712 	msdu_info->htt.info.vdev_id = vdev->vdev_id;
713 	msdu_info->htt.action.cksum_offload = qdf_nbuf_get_tx_cksum(netbuf);
714 	switch (qdf_nbuf_get_exemption_type(netbuf)) {
715 	case QDF_NBUF_EXEMPT_NO_EXEMPTION:
716 	case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
717 		/* We want to encrypt this frame */
718 		msdu_info->htt.action.do_encrypt = 1;
719 		break;
720 	case QDF_NBUF_EXEMPT_ALWAYS:
721 		/* We don't want to encrypt this frame */
722 		msdu_info->htt.action.do_encrypt = 0;
723 		break;
724 	default:
725 		qdf_assert(0);
726 		break;
727 	}
728 
729 	/* allocate the descriptor */
730 	tx_desc = ol_tx_desc_alloc_wrapper(pdev, vdev, msdu_info);
731 	if (!tx_desc)
732 		return NULL;
733 
734 	/* initialize the SW tx descriptor */
735 	tx_desc->netbuf = netbuf;
736 
737 	if (msdu_info->tso_info.is_tso) {
738 		tx_desc->tso_desc = msdu_info->tso_info.curr_seg;
739 		tx_desc->tso_num_desc = msdu_info->tso_info.tso_num_seg_list;
740 		tx_desc->pkt_type = OL_TX_FRM_TSO;
741 		TXRX_STATS_MSDU_INCR(pdev, tx.tso.tso_pkts, netbuf);
742 	} else {
743 		tx_desc->pkt_type = OL_TX_FRM_STD;
744 	}
745 
746 	type = ol_tx_get_ext_header_type(vdev, netbuf);
747 
748 	/* initialize the HW tx descriptor */
749 	if (qdf_unlikely(htt_tx_desc_init(pdev->htt_pdev, tx_desc->htt_tx_desc,
750 			 tx_desc->htt_tx_desc_paddr,
751 			 ol_tx_desc_id(pdev, tx_desc), netbuf, &msdu_info->htt,
752 			 &msdu_info->tso_info, NULL, type))) {
753 		/*
754 		 * HTT Tx descriptor initialization failed.
755 		 * therefore, free the tx desc
756 		 */
757 		ol_tx_desc_free(pdev, tx_desc);
758 		return NULL;
759 	}
760 
761 	/*
762 	 * Initialize the fragmentation descriptor.
763 	 * Skip the prefix fragment (HTT tx descriptor) that was added
764 	 * during the call to htt_tx_desc_init above.
765 	 */
766 	num_frags = qdf_nbuf_get_num_frags(netbuf);
767 	/* num_frags are expected to be 2 max */
768 	num_frags = (num_frags > QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS)
769 		? QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS
770 		: num_frags;
771 #if defined(HELIUMPLUS)
772 	/*
773 	 * Use num_frags - 1, since 1 frag is used to store
774 	 * the HTT/HTC descriptor
775 	 * Refer to htt_tx_desc_init()
776 	 */
777 	htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_frag_desc,
778 			      num_frags - 1);
779 #else /* ! defined(HELIUMPLUS) */
780 	htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_tx_desc,
781 			      num_frags - 1);
782 #endif /* defined(HELIUMPLUS) */
783 
784 	if (msdu_info->tso_info.is_tso) {
785 		htt_tx_desc_fill_tso_info(pdev->htt_pdev,
786 			 tx_desc->htt_frag_desc, &msdu_info->tso_info);
787 		TXRX_STATS_TSO_SEG_UPDATE(pdev,
788 			 msdu_info->tso_info.msdu_stats_idx,
789 			 msdu_info->tso_info.curr_seg->seg);
790 	} else {
791 		for (i = 1; i < num_frags; i++) {
792 			qdf_size_t frag_len;
793 			qdf_dma_addr_t frag_paddr;
794 #ifdef HELIUMPLUS_DEBUG
795 			void *frag_vaddr;
796 
797 			frag_vaddr = qdf_nbuf_get_frag_vaddr(netbuf, i);
798 #endif
799 			frag_len = qdf_nbuf_get_frag_len(netbuf, i);
800 			frag_paddr = qdf_nbuf_get_frag_paddr(netbuf, i);
801 #if defined(HELIUMPLUS)
802 			htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_frag_desc,
803 					 i - 1, frag_paddr, frag_len);
804 #if defined(HELIUMPLUS_DEBUG)
805 			qdf_debug("htt_fdesc=%pK frag=%d frag_vaddr=0x%pK frag_paddr=0x%llx len=%zu\n",
806 				  tx_desc->htt_frag_desc,
807 				  i-1, frag_vaddr, frag_paddr, frag_len);
808 			ol_txrx_dump_pkt(netbuf, frag_paddr, 64);
809 #endif /* HELIUMPLUS_DEBUG */
810 #else /* ! defined(HELIUMPLUS) */
811 			htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_tx_desc,
812 					 i - 1, frag_paddr, frag_len);
813 #endif /* defined(HELIUMPLUS) */
814 		}
815 	}
816 
817 #if defined(HELIUMPLUS_DEBUG)
818 	ol_txrx_dump_frag_desc("ol_tx_desc_ll()", tx_desc);
819 #endif
820 	return tx_desc;
821 }
822 
823 struct ol_tx_desc_t *
824 ol_tx_desc_hl(
825 	struct ol_txrx_pdev_t *pdev,
826 	struct ol_txrx_vdev_t *vdev,
827 	qdf_nbuf_t netbuf,
828 	struct ol_txrx_msdu_info_t *msdu_info)
829 {
830 	struct ol_tx_desc_t *tx_desc;
831 
832 	/* FIX THIS: these inits should probably be done by tx classify */
833 	msdu_info->htt.info.vdev_id = vdev->vdev_id;
834 	msdu_info->htt.info.frame_type = pdev->htt_pkt_type;
835 	msdu_info->htt.action.cksum_offload = qdf_nbuf_get_tx_cksum(netbuf);
836 	switch (qdf_nbuf_get_exemption_type(netbuf)) {
837 	case QDF_NBUF_EXEMPT_NO_EXEMPTION:
838 	case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
839 		/* We want to encrypt this frame */
840 		msdu_info->htt.action.do_encrypt = 1;
841 		break;
842 	case QDF_NBUF_EXEMPT_ALWAYS:
843 		/* We don't want to encrypt this frame */
844 		msdu_info->htt.action.do_encrypt = 0;
845 		break;
846 	default:
847 		qdf_assert(0);
848 		break;
849 	}
850 
851 	/* allocate the descriptor */
852 	tx_desc = ol_tx_desc_alloc_hl(pdev, vdev, msdu_info);
853 	if (!tx_desc)
854 		return NULL;
855 
856 	/* initialize the SW tx descriptor */
857 	tx_desc->netbuf = netbuf;
858 	/* fix this - get pkt_type from msdu_info */
859 	tx_desc->pkt_type = OL_TX_FRM_STD;
860 
861 #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
862 	tx_desc->orig_l2_hdr_bytes = 0;
863 #endif
864 	/* the HW tx descriptor will be initialized later by the caller */
865 
866 	return tx_desc;
867 }
868 
869 void ol_tx_desc_frame_list_free(struct ol_txrx_pdev_t *pdev,
870 				ol_tx_desc_list *tx_descs, int had_error)
871 {
872 	struct ol_tx_desc_t *tx_desc, *tmp;
873 	qdf_nbuf_t msdus = NULL;
874 
875 	TAILQ_FOREACH_SAFE(tx_desc, tx_descs, tx_desc_list_elem, tmp) {
876 		qdf_nbuf_t msdu = tx_desc->netbuf;
877 
878 		qdf_atomic_init(&tx_desc->ref_cnt);   /* clear the ref cnt */
879 #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
880 		/* restore original hdr offset */
881 		OL_TX_RESTORE_HDR(tx_desc, msdu);
882 #endif
883 
884 		/*
885 		 * In MCC IPA tx context, IPA driver provides skb with directly
886 		 * DMA mapped address. In such case, there's no need for WLAN
887 		 * driver to DMA unmap the skb.
888 		 */
889 		if ((qdf_nbuf_get_users(msdu) <= 1) &&
890 		    !qdf_nbuf_ipa_owned_get(msdu))
891 			qdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_TO_DEVICE);
892 
893 		/* free the tx desc */
894 		ol_tx_desc_free(pdev, tx_desc);
895 		/* link the netbuf into a list to free as a batch */
896 		qdf_nbuf_set_next(msdu, msdus);
897 		msdus = msdu;
898 	}
899 	/* free the netbufs as a batch */
900 	qdf_nbuf_tx_free(msdus, had_error);
901 }
902 
903 void ol_tx_desc_frame_free_nonstd(struct ol_txrx_pdev_t *pdev,
904 				  struct ol_tx_desc_t *tx_desc, int had_error)
905 {
906 	int mgmt_type;
907 	ol_txrx_mgmt_tx_cb ota_ack_cb;
908 
909 	qdf_atomic_init(&tx_desc->ref_cnt);     /* clear the ref cnt */
910 #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
911 	/* restore original hdr offset */
912 	OL_TX_RESTORE_HDR(tx_desc, (tx_desc->netbuf));
913 #endif
914 	if (tx_desc->pkt_type == OL_TX_FRM_NO_FREE) {
915 
916 		/* free the tx desc but don't unmap or free the frame */
917 		if (pdev->tx_data_callback.func) {
918 			qdf_nbuf_set_next(tx_desc->netbuf, NULL);
919 			pdev->tx_data_callback.func(pdev->tx_data_callback.ctxt,
920 						    tx_desc->netbuf, had_error);
921 			goto free_tx_desc;
922 		}
923 		/* let the code below unmap and free the frame */
924 	}
925 	if (tx_desc->pkt_type == OL_TX_FRM_TSO)
926 		ol_tso_unmap_tso_segment(pdev, tx_desc);
927 	else
928 		qdf_nbuf_unmap(pdev->osdev, tx_desc->netbuf, QDF_DMA_TO_DEVICE);
929 	/* check the frame type to see what kind of special steps are needed */
930 	if ((tx_desc->pkt_type >= OL_TXRX_MGMT_TYPE_BASE) &&
931 		   (tx_desc->pkt_type != ol_tx_frm_freed)) {
932 		qdf_dma_addr_t frag_desc_paddr = 0;
933 
934 #if defined(HELIUMPLUS)
935 		frag_desc_paddr = tx_desc->htt_frag_desc_paddr;
936 		/* FIX THIS -
937 		 * The FW currently has trouble using the host's fragments
938 		 * table for management frames.  Until this is fixed,
939 		 * rather than specifying the fragment table to the FW,
940 		 * the host SW will specify just the address of the initial
941 		 * fragment.
942 		 * Now that the mgmt frame is done, the HTT tx desc's frags
943 		 * table pointer needs to be reset.
944 		 */
945 #if defined(HELIUMPLUS_DEBUG)
946 		qdf_print("Frag Descriptor Reset [%d] to 0x%x\n",
947 			  tx_desc->id,
948 			  frag_desc_paddr);
949 #endif /* HELIUMPLUS_DEBUG */
950 #endif /* HELIUMPLUS */
951 		htt_tx_desc_frags_table_set(pdev->htt_pdev,
952 					    tx_desc->htt_tx_desc, 0,
953 					    frag_desc_paddr, 1);
954 
955 		mgmt_type = tx_desc->pkt_type - OL_TXRX_MGMT_TYPE_BASE;
956 		/*
957 		 *  we already checked the value when the mgmt frame was
958 		 *  provided to the txrx layer.
959 		 *  no need to check it a 2nd time.
960 		 */
961 		ota_ack_cb = pdev->tx_mgmt_cb.ota_ack_cb;
962 		if (ota_ack_cb) {
963 			void *ctxt;
964 			ctxt = pdev->tx_mgmt_cb.ctxt;
965 			ota_ack_cb(ctxt, tx_desc->netbuf, had_error);
966 		}
967 	} else if (had_error == htt_tx_status_download_fail) {
968 		/* Failed to send to target */
969 		goto free_tx_desc;
970 	} else {
971 		/* single regular frame, called from completion path */
972 		qdf_nbuf_set_next(tx_desc->netbuf, NULL);
973 		qdf_nbuf_tx_free(tx_desc->netbuf, had_error);
974 	}
975 free_tx_desc:
976 	/* free the tx desc */
977 	ol_tx_desc_free(pdev, tx_desc);
978 }
979 
980 #if defined(FEATURE_TSO)
981 #ifdef TSOSEG_DEBUG
982 static int
983 ol_tso_seg_dbg_sanitize(struct qdf_tso_seg_elem_t *tsoseg)
984 {
985 	int rc = -1;
986 	struct ol_tx_desc_t *txdesc;
987 
988 	if (tsoseg) {
989 		txdesc = tsoseg->dbg.txdesc;
990 		/* Don't validate if TX desc is NULL*/
991 		if (!txdesc)
992 			return 0;
993 		if (txdesc->tso_desc != tsoseg)
994 			qdf_tso_seg_dbg_bug("Owner sanity failed");
995 		else
996 			rc = 0;
997 	}
998 	return rc;
999 
1000 };
1001 #else
1002 static int
1003 ol_tso_seg_dbg_sanitize(struct qdf_tso_seg_elem_t *tsoseg)
1004 {
1005 	return 0;
1006 }
1007 #endif /* TSOSEG_DEBUG */
1008 
1009 /**
1010  * ol_tso_alloc_segment() - function to allocate a TSO segment
1011  * element
1012  * @pdev: the data physical device sending the data
1013  *
1014  * Allocates a TSO segment element from the free list held in
1015  * the pdev
1016  *
1017  * Return: tso_seg
1018  */
1019 struct qdf_tso_seg_elem_t *ol_tso_alloc_segment(struct ol_txrx_pdev_t *pdev)
1020 {
1021 	struct qdf_tso_seg_elem_t *tso_seg = NULL;
1022 
1023 	qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
1024 	if (pdev->tso_seg_pool.freelist) {
1025 		pdev->tso_seg_pool.num_free--;
1026 		tso_seg = pdev->tso_seg_pool.freelist;
1027 		if (tso_seg->on_freelist != 1) {
1028 			qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
1029 			qdf_print("tso seg alloc failed: not in freelist");
1030 			QDF_BUG(0);
1031 			return NULL;
1032 		} else if (tso_seg->cookie != TSO_SEG_MAGIC_COOKIE) {
1033 			qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
1034 			qdf_print("tso seg alloc failed: bad cookie");
1035 			QDF_BUG(0);
1036 			return NULL;
1037 		}
1038 		/*this tso seg is not a part of freelist now.*/
1039 		tso_seg->on_freelist = 0;
1040 		tso_seg->sent_to_target = 0;
1041 		tso_seg->force_free = 0;
1042 		pdev->tso_seg_pool.freelist = pdev->tso_seg_pool.freelist->next;
1043 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_ALLOC);
1044 	}
1045 	qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
1046 
1047 	return tso_seg;
1048 }
1049 
1050 /**
1051  * ol_tso_free_segment() - function to free a TSO segment
1052  * element
1053  * @pdev: the data physical device sending the data
1054  * @tso_seg: The TSO segment element to be freed
1055  *
1056  * Returns a TSO segment element to the free list held in the
1057  * pdev
1058  *
1059  * Return: none
1060  */
1061 void ol_tso_free_segment(struct ol_txrx_pdev_t *pdev,
1062 	 struct qdf_tso_seg_elem_t *tso_seg)
1063 {
1064 	qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
1065 	if (tso_seg->on_freelist != 0) {
1066 		qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
1067 		qdf_print("Do not free tso seg, already freed");
1068 		QDF_BUG(0);
1069 		return;
1070 	} else if (tso_seg->cookie != TSO_SEG_MAGIC_COOKIE) {
1071 		qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
1072 		qdf_print("Do not free tso seg: cookie is not good.");
1073 		QDF_BUG(0);
1074 		return;
1075 	} else if ((tso_seg->sent_to_target != 1) &&
1076 		   (tso_seg->force_free != 1)) {
1077 		qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
1078 		qdf_print("Do not free tso seg:  yet to be sent to target");
1079 		QDF_BUG(0);
1080 		return;
1081 	}
1082 	/* sanitize before free */
1083 	ol_tso_seg_dbg_sanitize(tso_seg);
1084 	qdf_tso_seg_dbg_setowner(tso_seg, NULL);
1085 	/*this tso seg is now a part of freelist*/
1086 	/* retain segment history, if debug is enabled */
1087 	qdf_tso_seg_dbg_zero(tso_seg);
1088 	tso_seg->next = pdev->tso_seg_pool.freelist;
1089 	tso_seg->on_freelist = 1;
1090 	tso_seg->sent_to_target = 0;
1091 	tso_seg->cookie = TSO_SEG_MAGIC_COOKIE;
1092 	pdev->tso_seg_pool.freelist = tso_seg;
1093 	pdev->tso_seg_pool.num_free++;
1094 	qdf_tso_seg_dbg_record(tso_seg, tso_seg->force_free
1095 			       ? TSOSEG_LOC_FORCE_FREE
1096 			       : TSOSEG_LOC_FREE);
1097 	tso_seg->force_free = 0;
1098 	qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
1099 }
1100 
1101 /**
1102  * ol_tso_num_seg_alloc() - function to allocate a element to count TSO segments
1103  *			    in a jumbo skb packet.
1104  * @pdev: the data physical device sending the data
1105  *
1106  * Allocates a element to count TSO segments from the free list held in
1107  * the pdev
1108  *
1109  * Return: tso_num_seg
1110  */
1111 struct qdf_tso_num_seg_elem_t *ol_tso_num_seg_alloc(struct ol_txrx_pdev_t *pdev)
1112 {
1113 	struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL;
1114 
1115 	qdf_spin_lock_bh(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
1116 	if (pdev->tso_num_seg_pool.freelist) {
1117 		pdev->tso_num_seg_pool.num_free--;
1118 		tso_num_seg = pdev->tso_num_seg_pool.freelist;
1119 		pdev->tso_num_seg_pool.freelist =
1120 				pdev->tso_num_seg_pool.freelist->next;
1121 	}
1122 	qdf_spin_unlock_bh(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
1123 
1124 	return tso_num_seg;
1125 }
1126 
1127 /**
1128  * ol_tso_num_seg_free() - function to free a TSO segment
1129  * element
1130  * @pdev: the data physical device sending the data
1131  * @tso_seg: The TSO segment element to be freed
1132  *
1133  * Returns a element to the free list held in the pdev
1134  *
1135  * Return: none
1136  */
1137 void ol_tso_num_seg_free(struct ol_txrx_pdev_t *pdev,
1138 	 struct qdf_tso_num_seg_elem_t *tso_num_seg)
1139 {
1140 	qdf_spin_lock_bh(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
1141 	tso_num_seg->next = pdev->tso_num_seg_pool.freelist;
1142 	pdev->tso_num_seg_pool.freelist = tso_num_seg;
1143 		pdev->tso_num_seg_pool.num_free++;
1144 	qdf_spin_unlock_bh(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
1145 }
1146 #endif
1147