xref: /wlan-dirver/qcacld-3.0/core/dp/txrx/ol_tx.c (revision f65bd4cf8fca8a30dcc78601a42879626d6bc7ee)
1 /*
2  * Copyright (c) 2011-2016 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 
28 /* OS abstraction libraries */
29 #include <qdf_nbuf.h>           /* qdf_nbuf_t, etc. */
30 #include <qdf_atomic.h>         /* qdf_atomic_read, etc. */
31 #include <qdf_util.h>           /* qdf_unlikely */
32 
33 /* APIs for other modules */
34 #include <htt.h>                /* HTT_TX_EXT_TID_MGMT */
35 #include <ol_htt_tx_api.h>      /* htt_tx_desc_tid */
36 
37 /* internal header files relevant for all systems */
38 #include <ol_txrx_internal.h>   /* TXRX_ASSERT1 */
39 #include <ol_tx_desc.h>         /* ol_tx_desc */
40 #include <ol_tx_send.h>         /* ol_tx_send */
41 #include <ol_txrx.h>
42 
43 /* internal header files relevant only for HL systems */
44 #include <ol_tx_classify.h>   /* ol_tx_classify, ol_tx_classify_mgmt */
45 #include <ol_tx_queue.h>        /* ol_tx_enqueue */
46 #include <ol_tx_sched.h>      /* ol_tx_sched */
47 
48 
49 /* internal header files relevant only for specific systems (Pronto) */
50 #include <ol_txrx_encap.h>      /* OL_TX_ENCAP, etc */
51 #include <ol_tx.h>
52 
53 #ifdef WLAN_FEATURE_FASTPATH
54 #include <hif.h>              /* HIF_DEVICE */
55 #include <htc_api.h>    /* Layering violation, but required for fast path */
56 #include <htt_internal.h>
57 #include <htt_types.h>        /* htc_endpoint */
58 #include <cdp_txrx_peer_ops.h>
59 
60 int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu,
61 		 unsigned int transfer_id, uint32_t download_len);
62 #endif  /* WLAN_FEATURE_FASTPATH */
63 
64 /*
65  * The TXRX module doesn't accept tx frames unless the target has
66  * enough descriptors for them.
67  * For LL, the TXRX descriptor pool is sized to match the target's
68  * descriptor pool.  Hence, if the descriptor allocation in TXRX
69  * succeeds, that guarantees that the target has room to accept
70  * the new tx frame.
71  */
72 #define ol_tx_prepare_ll(tx_desc, vdev, msdu, msdu_info)		\
73 	do {								\
74 		struct ol_txrx_pdev_t *pdev = vdev->pdev;		\
75 		(msdu_info)->htt.info.frame_type = pdev->htt_pkt_type;	\
76 		tx_desc = ol_tx_desc_ll(pdev, vdev, msdu, msdu_info);	\
77 		if (qdf_unlikely(!tx_desc)) {				\
78 			TXRX_STATS_MSDU_LIST_INCR(			\
79 				pdev, tx.dropped.host_reject, msdu);	\
80 			return msdu; /* the list of unaccepted MSDUs */	\
81 		}							\
82 	} while (0)
83 
84 #if defined(FEATURE_TSO)
85 /**
86  * ol_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO
87  * related information in the msdu_info meta data
88  * @vdev: virtual device handle
89  * @msdu: network buffer
90  * @msdu_info: meta data associated with the msdu
91  *
92  * Return: 0 - success, >0 - error
93  */
94 static inline uint8_t ol_tx_prepare_tso(ol_txrx_vdev_handle vdev,
95 	 qdf_nbuf_t msdu, struct ol_txrx_msdu_info_t *msdu_info)
96 {
97 	msdu_info->tso_info.curr_seg = NULL;
98 	if (qdf_nbuf_is_tso(msdu)) {
99 		int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
100 		msdu_info->tso_info.tso_seg_list = NULL;
101 		msdu_info->tso_info.num_segs = num_seg;
102 		while (num_seg) {
103 			struct qdf_tso_seg_elem_t *tso_seg =
104 				ol_tso_alloc_segment(vdev->pdev);
105 			if (tso_seg) {
106 				tso_seg->next =
107 					msdu_info->tso_info.tso_seg_list;
108 				msdu_info->tso_info.tso_seg_list
109 					= tso_seg;
110 				num_seg--;
111 			} else {
112 				struct qdf_tso_seg_elem_t *next_seg;
113 				struct qdf_tso_seg_elem_t *free_seg =
114 					msdu_info->tso_info.tso_seg_list;
115 				qdf_print("TSO seg alloc failed!\n");
116 				while (free_seg) {
117 					next_seg = free_seg->next;
118 					ol_tso_free_segment(vdev->pdev,
119 						 free_seg);
120 					free_seg = next_seg;
121 				}
122 				return 1;
123 			}
124 		}
125 		qdf_nbuf_get_tso_info(vdev->pdev->osdev,
126 			msdu, &(msdu_info->tso_info));
127 		msdu_info->tso_info.curr_seg =
128 			msdu_info->tso_info.tso_seg_list;
129 		num_seg = msdu_info->tso_info.num_segs;
130 	} else {
131 		msdu_info->tso_info.is_tso = 0;
132 		msdu_info->tso_info.num_segs = 1;
133 	}
134 	return 0;
135 }
136 #endif
137 
138 /**
139  * ol_tx_data() - send data frame
140  * @vdev: virtual device handle
141  * @skb: skb
142  *
143  * Return: skb/NULL for success
144  */
145 qdf_nbuf_t ol_tx_data(void *data_vdev, qdf_nbuf_t skb)
146 {
147 	struct ol_txrx_pdev_t *pdev;
148 	qdf_nbuf_t ret;
149 	ol_txrx_vdev_handle vdev = data_vdev;
150 
151 	if (qdf_unlikely(!vdev)) {
152 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
153 			"%s:vdev is null", __func__);
154 		return skb;
155 	} else {
156 		pdev = vdev->pdev;
157 	}
158 
159 	if (qdf_unlikely(!pdev)) {
160 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
161 			"%s:pdev is null", __func__);
162 		return skb;
163 	}
164 
165 	if ((ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(pdev->ctrl_pdev))
166 		&& (qdf_nbuf_get_protocol(skb) == htons(ETH_P_IP))
167 		&& (qdf_nbuf_get_ip_summed(skb) == CHECKSUM_PARTIAL))
168 		qdf_nbuf_set_ip_summed(skb, CHECKSUM_COMPLETE);
169 
170 	/* Terminate the (single-element) list of tx frames */
171 	qdf_nbuf_set_next(skb, NULL);
172 	ret = OL_TX_SEND(vdev, skb);
173 	if (ret) {
174 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
175 			"%s: Failed to tx", __func__);
176 		return ret;
177 	}
178 
179 	return NULL;
180 }
181 
182 #ifdef IPA_OFFLOAD
183 /**
184  * ol_tx_send_ipa_data_frame() - send IPA data frame
185  * @vdev: vdev
186  * @skb: skb
187  *
188  * Return: skb/ NULL is for success
189  */
190 qdf_nbuf_t ol_tx_send_ipa_data_frame(void *vdev,
191 			qdf_nbuf_t skb)
192 {
193 	ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
194 	qdf_nbuf_t ret;
195 
196 	if (qdf_unlikely(!pdev)) {
197 		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
198 			"%s: pdev is NULL", __func__);
199 		return skb;
200 	}
201 
202 	if ((ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(pdev->ctrl_pdev))
203 		&& (qdf_nbuf_get_protocol(skb) == htons(ETH_P_IP))
204 		&& (qdf_nbuf_get_ip_summed(skb) == CHECKSUM_PARTIAL))
205 		qdf_nbuf_set_ip_summed(skb, CHECKSUM_COMPLETE);
206 
207 	/* Terminate the (single-element) list of tx frames */
208 	qdf_nbuf_set_next(skb, NULL);
209 	ret = OL_TX_SEND((struct ol_txrx_vdev_t *)vdev, skb);
210 	if (ret) {
211 		TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
212 			"%s: Failed to tx", __func__);
213 		return ret;
214 	}
215 
216 	return NULL;
217 }
218 #endif
219 
220 
221 #if defined(FEATURE_TSO)
222 qdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
223 {
224 	qdf_nbuf_t msdu = msdu_list;
225 	struct ol_txrx_msdu_info_t msdu_info;
226 
227 	msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
228 	msdu_info.htt.action.tx_comp_req = 0;
229 	/*
230 	 * The msdu_list variable could be used instead of the msdu var,
231 	 * but just to clarify which operations are done on a single MSDU
232 	 * vs. a list of MSDUs, use a distinct variable for single MSDUs
233 	 * within the list.
234 	 */
235 	while (msdu) {
236 		qdf_nbuf_t next;
237 		struct ol_tx_desc_t *tx_desc;
238 		int segments = 1;
239 
240 		msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
241 		msdu_info.peer = NULL;
242 
243 		if (qdf_unlikely(ol_tx_prepare_tso(vdev, msdu, &msdu_info))) {
244 			qdf_print("ol_tx_prepare_tso failed\n");
245 			TXRX_STATS_MSDU_LIST_INCR(vdev->pdev,
246 				 tx.dropped.host_reject, msdu);
247 			return msdu;
248 		}
249 
250 		segments = msdu_info.tso_info.num_segs;
251 		TXRX_STATS_TSO_HISTOGRAM(vdev->pdev, segments);
252 		TXRX_STATS_TSO_GSO_SIZE_UPDATE(vdev->pdev,
253 					 qdf_nbuf_tcp_tso_size(msdu));
254 		TXRX_STATS_TSO_TOTAL_LEN_UPDATE(vdev->pdev,
255 					 qdf_nbuf_len(msdu));
256 		TXRX_STATS_TSO_NUM_FRAGS_UPDATE(vdev->pdev,
257 					 qdf_nbuf_get_nr_frags(msdu));
258 
259 
260 		/*
261 		 * The netbuf may get linked into a different list inside the
262 		 * ol_tx_send function, so store the next pointer before the
263 		 * tx_send call.
264 		 */
265 		next = qdf_nbuf_next(msdu);
266 		/* init the current segment to the 1st segment in the list */
267 		while (segments) {
268 
269 			if (msdu_info.tso_info.curr_seg)
270 				QDF_NBUF_CB_PADDR(msdu) =
271 					msdu_info.tso_info.curr_seg->
272 					seg.tso_frags[0].paddr;
273 
274 			segments--;
275 
276 			/**
277 			* if this is a jumbo nbuf, then increment the number
278 			* of nbuf users for each additional segment of the msdu.
279 			* This will ensure that the skb is freed only after
280 			* receiving tx completion for all segments of an nbuf
281 			*/
282 			if (segments)
283 				qdf_nbuf_inc_users(msdu);
284 
285 			ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
286 
287 			TXRX_STATS_MSDU_INCR(vdev->pdev, tx.from_stack, msdu);
288 
289 			/*
290 			 * If debug display is enabled, show the meta-data being
291 			 * downloaded to the target via the HTT tx descriptor.
292 			 */
293 			htt_tx_desc_display(tx_desc->htt_tx_desc);
294 
295 			ol_tx_send(vdev->pdev, tx_desc, msdu, vdev->vdev_id);
296 
297 			if (msdu_info.tso_info.curr_seg) {
298 				msdu_info.tso_info.curr_seg =
299 					 msdu_info.tso_info.curr_seg->next;
300 			}
301 
302 			qdf_nbuf_reset_num_frags(msdu);
303 
304 			if (msdu_info.tso_info.is_tso) {
305 				TXRX_STATS_TSO_INC_SEG(vdev->pdev);
306 				TXRX_STATS_TSO_INC_SEG_IDX(vdev->pdev);
307 			}
308 		} /* while segments */
309 
310 		msdu = next;
311 		if (msdu_info.tso_info.is_tso) {
312 			TXRX_STATS_TSO_INC_MSDU_IDX(vdev->pdev);
313 			TXRX_STATS_TSO_RESET_MSDU(vdev->pdev);
314 		}
315 	} /* while msdus */
316 	return NULL;            /* all MSDUs were accepted */
317 }
318 #else /* TSO */
319 
320 qdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
321 {
322 	qdf_nbuf_t msdu = msdu_list;
323 	struct ol_txrx_msdu_info_t msdu_info;
324 
325 	msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
326 	msdu_info.htt.action.tx_comp_req = 0;
327 	msdu_info.tso_info.is_tso = 0;
328 	/*
329 	 * The msdu_list variable could be used instead of the msdu var,
330 	 * but just to clarify which operations are done on a single MSDU
331 	 * vs. a list of MSDUs, use a distinct variable for single MSDUs
332 	 * within the list.
333 	 */
334 	while (msdu) {
335 		qdf_nbuf_t next;
336 		struct ol_tx_desc_t *tx_desc;
337 
338 		msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
339 		msdu_info.peer = NULL;
340 		ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
341 
342 		TXRX_STATS_MSDU_INCR(vdev->pdev, tx.from_stack, msdu);
343 
344 		/*
345 		 * If debug display is enabled, show the meta-data being
346 		 * downloaded to the target via the HTT tx descriptor.
347 		 */
348 		htt_tx_desc_display(tx_desc->htt_tx_desc);
349 		/*
350 		 * The netbuf may get linked into a different list inside the
351 		 * ol_tx_send function, so store the next pointer before the
352 		 * tx_send call.
353 		 */
354 		next = qdf_nbuf_next(msdu);
355 		ol_tx_send(vdev->pdev, tx_desc, msdu, vdev->vdev_id);
356 		msdu = next;
357 	}
358 	return NULL;            /* all MSDUs were accepted */
359 }
360 #endif /* TSO */
361 
362 #ifdef WLAN_FEATURE_FASTPATH
363 /**
364  * ol_tx_prepare_ll_fast() Alloc and prepare Tx descriptor
365  *
366  * Allocate and prepare Tx descriptor with msdu and fragment descritor
367  * inforamtion.
368  *
369  * @pdev: pointer to ol pdev handle
370  * @vdev: pointer to ol vdev handle
371  * @msdu: linked list of msdu packets
372  * @pkt_download_len: packet download length
373  * @ep_id: endpoint ID
374  * @msdu_info: Handle to msdu_info
375  *
376  * Return: Pointer to Tx descriptor
377  */
378 static inline struct ol_tx_desc_t *
379 ol_tx_prepare_ll_fast(struct ol_txrx_pdev_t *pdev,
380 		      ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu,
381 		      uint32_t pkt_download_len, uint32_t ep_id,
382 		      struct ol_txrx_msdu_info_t *msdu_info)
383 {
384 	struct ol_tx_desc_t *tx_desc = NULL;
385 	uint32_t *htt_tx_desc;
386 	void *htc_hdr_vaddr;
387 	u_int32_t num_frags, i;
388 	enum extension_header_type type;
389 
390 	tx_desc = ol_tx_desc_alloc_wrapper(pdev, vdev, msdu_info);
391 	if (qdf_unlikely(!tx_desc))
392 		return NULL;
393 
394 	tx_desc->netbuf = msdu;
395 	if (msdu_info->tso_info.is_tso) {
396 		tx_desc->tso_desc = msdu_info->tso_info.curr_seg;
397 		tx_desc->pkt_type = OL_TX_FRM_TSO;
398 		TXRX_STATS_MSDU_INCR(pdev, tx.tso.tso_pkts, msdu);
399 	} else {
400 		tx_desc->pkt_type = OL_TX_FRM_STD;
401 	}
402 
403 	htt_tx_desc = tx_desc->htt_tx_desc;
404 
405 	/* Make sure frags num is set to 0 */
406 	/*
407 	 * Do this here rather than in hardstart, so
408 	 * that we can hopefully take only one cache-miss while
409 	 * accessing skb->cb.
410 	 */
411 
412 	/* HTT Header */
413 	/* TODO : Take care of multiple fragments */
414 
415 	type = ol_tx_get_ext_header_type(vdev, msdu);
416 
417 	/* TODO: Precompute and store paddr in ol_tx_desc_t */
418 	/* Virtual address of the HTT/HTC header, added by driver */
419 	htc_hdr_vaddr = (char *)htt_tx_desc - HTC_HEADER_LEN;
420 	htt_tx_desc_init(pdev->htt_pdev, htt_tx_desc,
421 			 tx_desc->htt_tx_desc_paddr, tx_desc->id, msdu,
422 			 &msdu_info->htt, &msdu_info->tso_info,
423 			 NULL, type);
424 
425 	num_frags = qdf_nbuf_get_num_frags(msdu);
426 	/* num_frags are expected to be 2 max */
427 	num_frags = (num_frags > QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS)
428 		? QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS
429 		: num_frags;
430 #if defined(HELIUMPLUS_PADDR64)
431 	/*
432 	 * Use num_frags - 1, since 1 frag is used to store
433 	 * the HTT/HTC descriptor
434 	 * Refer to htt_tx_desc_init()
435 	 */
436 	htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_frag_desc,
437 			      num_frags - 1);
438 #else /* ! defined(HELIUMPLUSPADDR64) */
439 	htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_tx_desc,
440 			      num_frags-1);
441 #endif /* defined(HELIUMPLUS_PADDR64) */
442 	if (msdu_info->tso_info.is_tso) {
443 		htt_tx_desc_fill_tso_info(pdev->htt_pdev,
444 			 tx_desc->htt_frag_desc, &msdu_info->tso_info);
445 		TXRX_STATS_TSO_SEG_UPDATE(pdev,
446 			 msdu_info->tso_info.curr_seg->seg);
447 	} else {
448 		for (i = 1; i < num_frags; i++) {
449 			qdf_size_t frag_len;
450 			qdf_dma_addr_t frag_paddr;
451 
452 			frag_len = qdf_nbuf_get_frag_len(msdu, i);
453 			frag_paddr = qdf_nbuf_get_frag_paddr(msdu, i);
454 			if (type != EXT_HEADER_NOT_PRESENT) {
455 				frag_paddr +=
456 				    sizeof(struct htt_tx_msdu_desc_ext_t);
457 				frag_len -=
458 				    sizeof(struct htt_tx_msdu_desc_ext_t);
459 			}
460 #if defined(HELIUMPLUS_PADDR64)
461 			htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_frag_desc,
462 					 i - 1, frag_paddr, frag_len);
463 #if defined(HELIUMPLUS_DEBUG)
464 			qdf_print("%s:%d: htt_fdesc=%p frag=%d frag_paddr=0x%0llx len=%zu",
465 				  __func__, __LINE__, tx_desc->htt_frag_desc,
466 				  i-1, frag_paddr, frag_len);
467 			ol_txrx_dump_pkt(netbuf, frag_paddr, 64);
468 #endif /* HELIUMPLUS_DEBUG */
469 #else /* ! defined(HELIUMPLUSPADDR64) */
470 			htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_tx_desc,
471 					 i - 1, frag_paddr, frag_len);
472 #endif /* defined(HELIUMPLUS_PADDR64) */
473 		}
474 	}
475 
476 	/*
477 	 *  Do we want to turn on word_stream bit-map here ? For linux, non-TSO
478 	 *  this is not required. We still have to mark the swap bit correctly,
479 	 *  when posting to the ring
480 	 */
481 	/* Check to make sure, data download length is correct */
482 
483 	/*
484 	 * TODO : Can we remove this check and always download a fixed length ?
485 	 * */
486 
487 
488 	if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(msdu))
489 		pkt_download_len += sizeof(struct htt_tx_msdu_desc_ext_t);
490 
491 	if (qdf_unlikely(qdf_nbuf_len(msdu) < pkt_download_len))
492 		pkt_download_len = qdf_nbuf_len(msdu);
493 
494 	/* Fill the HTC header information */
495 	/*
496 	 * Passing 0 as the seq_no field, we can probably get away
497 	 * with it for the time being, since this is not checked in f/w
498 	 */
499 	/* TODO : Prefill this, look at multi-fragment case */
500 	HTC_TX_DESC_FILL(htc_hdr_vaddr, pkt_download_len, ep_id, 0);
501 
502 	return tx_desc;
503 }
504 #if defined(FEATURE_TSO)
505 /**
506  * ol_tx_ll_fast() Update metadata information and send msdu to HIF/CE
507  *
508  * @vdev: handle to ol_txrx_vdev_t
509  * @msdu_list: msdu list to be sent out.
510  *
511  * Return: on success return NULL, pointer to nbuf when it fails to send.
512  */
513 qdf_nbuf_t
514 ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
515 {
516 	qdf_nbuf_t msdu = msdu_list;
517 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
518 	uint32_t pkt_download_len =
519 		((struct htt_pdev_t *)(pdev->htt_pdev))->download_len;
520 	uint32_t ep_id = HTT_EPID_GET(pdev->htt_pdev);
521 	struct ol_txrx_msdu_info_t msdu_info;
522 
523 	msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
524 	msdu_info.htt.action.tx_comp_req = 0;
525 	/*
526 	 * The msdu_list variable could be used instead of the msdu var,
527 	 * but just to clarify which operations are done on a single MSDU
528 	 * vs. a list of MSDUs, use a distinct variable for single MSDUs
529 	 * within the list.
530 	 */
531 	while (msdu) {
532 		qdf_nbuf_t next;
533 		struct ol_tx_desc_t *tx_desc;
534 		int segments = 1;
535 
536 		msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
537 		msdu_info.peer = NULL;
538 
539 		if (qdf_unlikely(ol_tx_prepare_tso(vdev, msdu, &msdu_info))) {
540 			qdf_print("ol_tx_prepare_tso failed\n");
541 			TXRX_STATS_MSDU_LIST_INCR(vdev->pdev,
542 				 tx.dropped.host_reject, msdu);
543 			return msdu;
544 		}
545 
546 		segments = msdu_info.tso_info.num_segs;
547 		TXRX_STATS_TSO_HISTOGRAM(vdev->pdev, segments);
548 		TXRX_STATS_TSO_GSO_SIZE_UPDATE(vdev->pdev,
549 				 qdf_nbuf_tcp_tso_size(msdu));
550 		TXRX_STATS_TSO_TOTAL_LEN_UPDATE(vdev->pdev,
551 				 qdf_nbuf_len(msdu));
552 		TXRX_STATS_TSO_NUM_FRAGS_UPDATE(vdev->pdev,
553 				 qdf_nbuf_get_nr_frags(msdu));
554 
555 		/*
556 		 * The netbuf may get linked into a different list
557 		 * inside the ce_send_fast function, so store the next
558 		 * pointer before the ce_send call.
559 		 */
560 		next = qdf_nbuf_next(msdu);
561 		/* init the current segment to the 1st segment in the list */
562 		while (segments) {
563 
564 			if (msdu_info.tso_info.curr_seg)
565 				QDF_NBUF_CB_PADDR(msdu) = msdu_info.tso_info.
566 					curr_seg->seg.tso_frags[0].paddr;
567 
568 			segments--;
569 
570 			/**
571 			* if this is a jumbo nbuf, then increment the number
572 			* of nbuf users for each additional segment of the msdu.
573 			* This will ensure that the skb is freed only after
574 			* receiving tx completion for all segments of an nbuf
575 			*/
576 			if (segments)
577 				qdf_nbuf_inc_users(msdu);
578 
579 			msdu_info.htt.info.frame_type = pdev->htt_pkt_type;
580 			msdu_info.htt.info.vdev_id = vdev->vdev_id;
581 			msdu_info.htt.action.cksum_offload =
582 				qdf_nbuf_get_tx_cksum(msdu);
583 			switch (qdf_nbuf_get_exemption_type(msdu)) {
584 			case QDF_NBUF_EXEMPT_NO_EXEMPTION:
585 			case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
586 				/* We want to encrypt this frame */
587 				msdu_info.htt.action.do_encrypt = 1;
588 				break;
589 			case QDF_NBUF_EXEMPT_ALWAYS:
590 				/* We don't want to encrypt this frame */
591 				msdu_info.htt.action.do_encrypt = 0;
592 				break;
593 			default:
594 				msdu_info.htt.action.do_encrypt = 1;
595 				qdf_assert(0);
596 				break;
597 			}
598 
599 			tx_desc = ol_tx_prepare_ll_fast(pdev, vdev, msdu,
600 						  pkt_download_len, ep_id,
601 						  &msdu_info);
602 
603 			TXRX_STATS_MSDU_INCR(pdev, tx.from_stack, msdu);
604 
605 			if (qdf_likely(tx_desc)) {
606 				DPTRACE(qdf_dp_trace_ptr(msdu,
607 				    QDF_DP_TRACE_TXRX_FAST_PACKET_PTR_RECORD,
608 				    qdf_nbuf_data_addr(msdu),
609 				    sizeof(qdf_nbuf_data(msdu)),
610 				     tx_desc->id, vdev->vdev_id));
611 				/*
612 				 * If debug display is enabled, show the meta
613 				 * data being downloaded to the target via the
614 				 * HTT tx descriptor.
615 				 */
616 				if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER
617 									 (msdu))
618 					pkt_download_len +=
619 					  sizeof(struct htt_tx_msdu_desc_ext_t);
620 
621 				htt_tx_desc_display(tx_desc->htt_tx_desc);
622 				if ((0 == ce_send_fast(pdev->ce_tx_hdl, msdu,
623 						ep_id, pkt_download_len))) {
624 					/*
625 					 * The packet could not be sent.
626 					 * Free the descriptor, return the
627 					 * packet to the caller.
628 					 */
629 					ol_tx_desc_frame_free_nonstd(pdev,
630 								tx_desc, 1);
631 					return msdu;
632 				}
633 				if (msdu_info.tso_info.curr_seg) {
634 					msdu_info.tso_info.curr_seg =
635 					msdu_info.tso_info.curr_seg->next;
636 				}
637 
638 				if (msdu_info.tso_info.is_tso) {
639 					qdf_nbuf_reset_num_frags(msdu);
640 					TXRX_STATS_TSO_INC_SEG(vdev->pdev);
641 					TXRX_STATS_TSO_INC_SEG_IDX(vdev->pdev);
642 				}
643 			} else {
644 				TXRX_STATS_MSDU_LIST_INCR(
645 					pdev, tx.dropped.host_reject, msdu);
646 				/* the list of unaccepted MSDUs */
647 				return msdu;
648 			}
649 		} /* while segments */
650 
651 		msdu = next;
652 		if (msdu_info.tso_info.is_tso) {
653 			TXRX_STATS_TSO_INC_MSDU_IDX(vdev->pdev);
654 			TXRX_STATS_TSO_RESET_MSDU(vdev->pdev);
655 		}
656 	} /* while msdus */
657 	return NULL; /* all MSDUs were accepted */
658 }
659 #else
660 qdf_nbuf_t
661 ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
662 {
663 	qdf_nbuf_t msdu = msdu_list;
664 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
665 	uint32_t pkt_download_len =
666 		((struct htt_pdev_t *)(pdev->htt_pdev))->download_len;
667 	uint32_t ep_id = HTT_EPID_GET(pdev->htt_pdev);
668 	struct ol_txrx_msdu_info_t msdu_info;
669 
670 	msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
671 	msdu_info.htt.action.tx_comp_req = 0;
672 	msdu_info.tso_info.is_tso = 0;
673 	/*
674 	 * The msdu_list variable could be used instead of the msdu var,
675 	 * but just to clarify which operations are done on a single MSDU
676 	 * vs. a list of MSDUs, use a distinct variable for single MSDUs
677 	 * within the list.
678 	 */
679 	while (msdu) {
680 		qdf_nbuf_t next;
681 		struct ol_tx_desc_t *tx_desc;
682 
683 		msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
684 		msdu_info.peer = NULL;
685 
686 		msdu_info.htt.info.frame_type = pdev->htt_pkt_type;
687 		msdu_info.htt.info.vdev_id = vdev->vdev_id;
688 		msdu_info.htt.action.cksum_offload =
689 			qdf_nbuf_get_tx_cksum(msdu);
690 		switch (qdf_nbuf_get_exemption_type(msdu)) {
691 		case QDF_NBUF_EXEMPT_NO_EXEMPTION:
692 		case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
693 			/* We want to encrypt this frame */
694 			msdu_info.htt.action.do_encrypt = 1;
695 			break;
696 		case QDF_NBUF_EXEMPT_ALWAYS:
697 			/* We don't want to encrypt this frame */
698 			msdu_info.htt.action.do_encrypt = 0;
699 			break;
700 		default:
701 			msdu_info.htt.action.do_encrypt = 1;
702 			qdf_assert(0);
703 			break;
704 		}
705 
706 		tx_desc = ol_tx_prepare_ll_fast(pdev, vdev, msdu,
707 					  pkt_download_len, ep_id,
708 					  &msdu_info);
709 
710 		TXRX_STATS_MSDU_INCR(pdev, tx.from_stack, msdu);
711 
712 		if (qdf_likely(tx_desc)) {
713 			DPTRACE(qdf_dp_trace_ptr(msdu,
714 				QDF_DP_TRACE_TXRX_FAST_PACKET_PTR_RECORD,
715 				qdf_nbuf_data_addr(msdu),
716 				sizeof(qdf_nbuf_data(msdu)), tx_desc->id,
717 				vdev->vdev_id));
718 			/*
719 			 * If debug display is enabled, show the meta-data being
720 			 * downloaded to the target via the HTT tx descriptor.
721 			 */
722 			if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(msdu))
723 				pkt_download_len +=
724 				   sizeof(struct htt_tx_msdu_desc_ext_t);
725 
726 			htt_tx_desc_display(tx_desc->htt_tx_desc);
727 			/*
728 			 * The netbuf may get linked into a different list
729 			 * inside the ce_send_fast function, so store the next
730 			 * pointer before the ce_send call.
731 			 */
732 			next = qdf_nbuf_next(msdu);
733 			if ((0 == ce_send_fast(pdev->ce_tx_hdl, msdu,
734 					       ep_id, pkt_download_len))) {
735 				/* The packet could not be sent */
736 				/* Free the descriptor, return the packet to the
737 				 * caller */
738 				ol_tx_desc_free(pdev, tx_desc);
739 				return msdu;
740 			}
741 			msdu = next;
742 		} else {
743 			TXRX_STATS_MSDU_LIST_INCR(
744 				pdev, tx.dropped.host_reject, msdu);
745 			return msdu; /* the list of unaccepted MSDUs */
746 		}
747 	}
748 
749 	return NULL; /* all MSDUs were accepted */
750 }
751 #endif /* FEATURE_TSO */
752 #endif /* WLAN_FEATURE_FASTPATH */
753 
754 #ifdef WLAN_FEATURE_FASTPATH
755 /**
756  * ol_tx_ll_wrapper() wrapper to ol_tx_ll
757  *
758  */
759 qdf_nbuf_t
760 ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
761 {
762 	struct hif_opaque_softc *hif_device =
763 		(struct hif_opaque_softc *)cds_get_context(QDF_MODULE_ID_HIF);
764 
765 	if (qdf_likely(hif_device && hif_is_fastpath_mode_enabled(hif_device)))
766 		msdu_list = ol_tx_ll_fast(vdev, msdu_list);
767 	else
768 		msdu_list = ol_tx_ll(vdev, msdu_list);
769 
770 	return msdu_list;
771 }
772 #else
773 qdf_nbuf_t
774 ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
775 {
776 	return ol_tx_ll(vdev, msdu_list);
777 }
778 #endif  /* WLAN_FEATURE_FASTPATH */
779 
780 #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
781 
782 #define OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN 400
783 #define OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS 5
784 static void ol_tx_vdev_ll_pause_queue_send_base(struct ol_txrx_vdev_t *vdev)
785 {
786 	int max_to_accept;
787 
788 	qdf_spin_lock_bh(&vdev->ll_pause.mutex);
789 	if (vdev->ll_pause.paused_reason) {
790 		qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
791 		return;
792 	}
793 
794 	/*
795 	 * Send as much of the backlog as possible, but leave some margin
796 	 * of unallocated tx descriptors that can be used for new frames
797 	 * being transmitted by other vdevs.
798 	 * Ideally there would be a scheduler, which would not only leave
799 	 * some margin for new frames for other vdevs, but also would
800 	 * fairly apportion the tx descriptors between multiple vdevs that
801 	 * have backlogs in their pause queues.
802 	 * However, the fairness benefit of having a scheduler for frames
803 	 * from multiple vdev's pause queues is not sufficient to outweigh
804 	 * the extra complexity.
805 	 */
806 	max_to_accept = vdev->pdev->tx_desc.num_free -
807 		OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN;
808 	while (max_to_accept > 0 && vdev->ll_pause.txq.depth) {
809 		qdf_nbuf_t tx_msdu;
810 		max_to_accept--;
811 		vdev->ll_pause.txq.depth--;
812 		tx_msdu = vdev->ll_pause.txq.head;
813 		if (tx_msdu) {
814 			vdev->ll_pause.txq.head = qdf_nbuf_next(tx_msdu);
815 			if (NULL == vdev->ll_pause.txq.head)
816 				vdev->ll_pause.txq.tail = NULL;
817 			qdf_nbuf_set_next(tx_msdu, NULL);
818 			QDF_NBUF_UPDATE_TX_PKT_COUNT(tx_msdu,
819 						QDF_NBUF_TX_PKT_TXRX_DEQUEUE);
820 			tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu);
821 			/*
822 			 * It is unexpected that ol_tx_ll would reject the frame
823 			 * since we checked that there's room for it, though
824 			 * there's an infinitesimal possibility that between the
825 			 * time we checked the room available and now, a
826 			 * concurrent batch of tx frames used up all the room.
827 			 * For simplicity, just drop the frame.
828 			 */
829 			if (tx_msdu) {
830 				qdf_nbuf_unmap(vdev->pdev->osdev, tx_msdu,
831 					       QDF_DMA_TO_DEVICE);
832 				qdf_nbuf_tx_free(tx_msdu, QDF_NBUF_PKT_ERROR);
833 			}
834 		}
835 	}
836 	if (vdev->ll_pause.txq.depth) {
837 		qdf_timer_stop(&vdev->ll_pause.timer);
838 		qdf_timer_start(&vdev->ll_pause.timer,
839 					OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
840 		vdev->ll_pause.is_q_timer_on = true;
841 		if (vdev->ll_pause.txq.depth >= vdev->ll_pause.max_q_depth)
842 			vdev->ll_pause.q_overflow_cnt++;
843 	}
844 
845 	qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
846 }
847 
848 static qdf_nbuf_t
849 ol_tx_vdev_pause_queue_append(struct ol_txrx_vdev_t *vdev,
850 			      qdf_nbuf_t msdu_list, uint8_t start_timer)
851 {
852 	qdf_spin_lock_bh(&vdev->ll_pause.mutex);
853 	while (msdu_list &&
854 	       vdev->ll_pause.txq.depth < vdev->ll_pause.max_q_depth) {
855 		qdf_nbuf_t next = qdf_nbuf_next(msdu_list);
856 		QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu_list,
857 					     QDF_NBUF_TX_PKT_TXRX_ENQUEUE);
858 		DPTRACE(qdf_dp_trace(msdu_list,
859 				QDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD,
860 				qdf_nbuf_data_addr(msdu_list),
861 				sizeof(qdf_nbuf_data(msdu_list)), QDF_TX));
862 
863 		vdev->ll_pause.txq.depth++;
864 		if (!vdev->ll_pause.txq.head) {
865 			vdev->ll_pause.txq.head = msdu_list;
866 			vdev->ll_pause.txq.tail = msdu_list;
867 		} else {
868 			qdf_nbuf_set_next(vdev->ll_pause.txq.tail, msdu_list);
869 		}
870 		vdev->ll_pause.txq.tail = msdu_list;
871 
872 		msdu_list = next;
873 	}
874 	if (vdev->ll_pause.txq.tail)
875 		qdf_nbuf_set_next(vdev->ll_pause.txq.tail, NULL);
876 
877 	if (start_timer) {
878 		qdf_timer_stop(&vdev->ll_pause.timer);
879 		qdf_timer_start(&vdev->ll_pause.timer,
880 					OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
881 		vdev->ll_pause.is_q_timer_on = true;
882 	}
883 	qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
884 
885 	return msdu_list;
886 }
887 
888 /*
889  * Store up the tx frame in the vdev's tx queue if the vdev is paused.
890  * If there are too many frames in the tx queue, reject it.
891  */
892 qdf_nbuf_t ol_tx_ll_queue(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
893 {
894 	uint16_t eth_type;
895 	uint32_t paused_reason;
896 
897 	if (msdu_list == NULL)
898 		return NULL;
899 
900 	paused_reason = vdev->ll_pause.paused_reason;
901 	if (paused_reason) {
902 		if (qdf_unlikely((paused_reason &
903 				  OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED) ==
904 				 paused_reason)) {
905 			eth_type = (((struct ethernet_hdr_t *)
906 				     qdf_nbuf_data(msdu_list))->
907 				    ethertype[0] << 8) |
908 				   (((struct ethernet_hdr_t *)
909 				     qdf_nbuf_data(msdu_list))->ethertype[1]);
910 			if (ETHERTYPE_IS_EAPOL_WAPI(eth_type)) {
911 				msdu_list = ol_tx_ll_wrapper(vdev, msdu_list);
912 				return msdu_list;
913 			}
914 		}
915 		msdu_list = ol_tx_vdev_pause_queue_append(vdev, msdu_list, 1);
916 	} else {
917 		if (vdev->ll_pause.txq.depth > 0 ||
918 		    vdev->pdev->tx_throttle.current_throttle_level !=
919 		    THROTTLE_LEVEL_0) {
920 			/* not paused, but there is a backlog of frms
921 			   from a prior pause or throttle off phase */
922 			msdu_list = ol_tx_vdev_pause_queue_append(
923 				vdev, msdu_list, 0);
924 			/* if throttle is disabled or phase is "on",
925 			   send the frame */
926 			if (vdev->pdev->tx_throttle.current_throttle_level ==
927 			    THROTTLE_LEVEL_0 ||
928 			    vdev->pdev->tx_throttle.current_throttle_phase ==
929 			    THROTTLE_PHASE_ON) {
930 				/* send as many frames as possible
931 				   from the vdevs backlog */
932 				ol_tx_vdev_ll_pause_queue_send_base(vdev);
933 			}
934 		} else {
935 			/* not paused, no throttle and no backlog -
936 			   send the new frames */
937 			msdu_list = ol_tx_ll_wrapper(vdev, msdu_list);
938 		}
939 	}
940 	return msdu_list;
941 }
942 
943 /*
944  * Run through the transmit queues for all the vdevs and
945  * send the pending frames
946  */
947 void ol_tx_pdev_ll_pause_queue_send_all(struct ol_txrx_pdev_t *pdev)
948 {
949 	int max_to_send;        /* tracks how many frames have been sent */
950 	qdf_nbuf_t tx_msdu;
951 	struct ol_txrx_vdev_t *vdev = NULL;
952 	uint8_t more;
953 
954 	if (NULL == pdev)
955 		return;
956 
957 	if (pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF)
958 		return;
959 
960 	/* ensure that we send no more than tx_threshold frames at once */
961 	max_to_send = pdev->tx_throttle.tx_threshold;
962 
963 	/* round robin through the vdev queues for the given pdev */
964 
965 	/* Potential improvement: download several frames from the same vdev
966 	   at a time, since it is more likely that those frames could be
967 	   aggregated together, remember which vdev was serviced last,
968 	   so the next call this function can resume the round-robin
969 	   traversing where the current invocation left off */
970 	do {
971 		more = 0;
972 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
973 
974 			qdf_spin_lock_bh(&vdev->ll_pause.mutex);
975 			if (vdev->ll_pause.txq.depth) {
976 				if (vdev->ll_pause.paused_reason) {
977 					qdf_spin_unlock_bh(&vdev->ll_pause.
978 							   mutex);
979 					continue;
980 				}
981 
982 				tx_msdu = vdev->ll_pause.txq.head;
983 				if (NULL == tx_msdu) {
984 					qdf_spin_unlock_bh(&vdev->ll_pause.
985 							   mutex);
986 					continue;
987 				}
988 
989 				max_to_send--;
990 				vdev->ll_pause.txq.depth--;
991 
992 				vdev->ll_pause.txq.head =
993 					qdf_nbuf_next(tx_msdu);
994 
995 				if (NULL == vdev->ll_pause.txq.head)
996 					vdev->ll_pause.txq.tail = NULL;
997 
998 				qdf_nbuf_set_next(tx_msdu, NULL);
999 				tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu);
1000 				/*
1001 				 * It is unexpected that ol_tx_ll would reject
1002 				 * the frame, since we checked that there's
1003 				 * room for it, though there's an infinitesimal
1004 				 * possibility that between the time we checked
1005 				 * the room available and now, a concurrent
1006 				 * batch of tx frames used up all the room.
1007 				 * For simplicity, just drop the frame.
1008 				 */
1009 				if (tx_msdu) {
1010 					qdf_nbuf_unmap(pdev->osdev, tx_msdu,
1011 						       QDF_DMA_TO_DEVICE);
1012 					qdf_nbuf_tx_free(tx_msdu,
1013 							 QDF_NBUF_PKT_ERROR);
1014 				}
1015 			}
1016 			/*check if there are more msdus to transmit */
1017 			if (vdev->ll_pause.txq.depth)
1018 				more = 1;
1019 			qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
1020 		}
1021 	} while (more && max_to_send);
1022 
1023 	vdev = NULL;
1024 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
1025 		qdf_spin_lock_bh(&vdev->ll_pause.mutex);
1026 		if (vdev->ll_pause.txq.depth) {
1027 			qdf_timer_stop(&pdev->tx_throttle.tx_timer);
1028 			qdf_timer_start(
1029 				&pdev->tx_throttle.tx_timer,
1030 				OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
1031 			qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
1032 			return;
1033 		}
1034 		qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
1035 	}
1036 }
1037 
1038 void ol_tx_vdev_ll_pause_queue_send(void *context)
1039 {
1040 	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)context;
1041 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
1042 
1043 	if (pdev->tx_throttle.current_throttle_level != THROTTLE_LEVEL_0 &&
1044 	    pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF)
1045 		return;
1046 	ol_tx_vdev_ll_pause_queue_send_base(vdev);
1047 }
1048 #endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
1049 
1050 static inline int ol_txrx_tx_is_raw(enum ol_tx_spec tx_spec)
1051 {
1052 	return
1053 		tx_spec &
1054 		(OL_TX_SPEC_RAW | OL_TX_SPEC_NO_AGGR | OL_TX_SPEC_NO_ENCRYPT);
1055 }
1056 
1057 static inline uint8_t ol_txrx_tx_raw_subtype(enum ol_tx_spec tx_spec)
1058 {
1059 	uint8_t sub_type = 0x1; /* 802.11 MAC header present */
1060 
1061 	if (tx_spec & OL_TX_SPEC_NO_AGGR)
1062 		sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_AGGR_S;
1063 	if (tx_spec & OL_TX_SPEC_NO_ENCRYPT)
1064 		sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_ENCRYPT_S;
1065 	if (tx_spec & OL_TX_SPEC_NWIFI_NO_ENCRYPT)
1066 		sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_ENCRYPT_S;
1067 	return sub_type;
1068 }
1069 
1070 qdf_nbuf_t
1071 ol_tx_non_std_ll(ol_txrx_vdev_handle vdev,
1072 		 enum ol_tx_spec tx_spec,
1073 		 qdf_nbuf_t msdu_list)
1074 {
1075 	qdf_nbuf_t msdu = msdu_list;
1076 	htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
1077 	struct ol_txrx_msdu_info_t msdu_info;
1078 
1079 	msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
1080 	msdu_info.htt.action.tx_comp_req = 0;
1081 
1082 	/*
1083 	 * The msdu_list variable could be used instead of the msdu var,
1084 	 * but just to clarify which operations are done on a single MSDU
1085 	 * vs. a list of MSDUs, use a distinct variable for single MSDUs
1086 	 * within the list.
1087 	 */
1088 	while (msdu) {
1089 		qdf_nbuf_t next;
1090 		struct ol_tx_desc_t *tx_desc;
1091 
1092 		msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
1093 		msdu_info.peer = NULL;
1094 		msdu_info.tso_info.is_tso = 0;
1095 
1096 		ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
1097 
1098 		/*
1099 		 * The netbuf may get linked into a different list inside the
1100 		 * ol_tx_send function, so store the next pointer before the
1101 		 * tx_send call.
1102 		 */
1103 		next = qdf_nbuf_next(msdu);
1104 
1105 		if (tx_spec != OL_TX_SPEC_STD) {
1106 			if (tx_spec & OL_TX_SPEC_NO_FREE) {
1107 				tx_desc->pkt_type = OL_TX_FRM_NO_FREE;
1108 			} else if (tx_spec & OL_TX_SPEC_TSO) {
1109 				tx_desc->pkt_type = OL_TX_FRM_TSO;
1110 			} else if (tx_spec & OL_TX_SPEC_NWIFI_NO_ENCRYPT) {
1111 				uint8_t sub_type =
1112 					ol_txrx_tx_raw_subtype(tx_spec);
1113 				htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
1114 						htt_pkt_type_native_wifi,
1115 						sub_type);
1116 			} else if (ol_txrx_tx_is_raw(tx_spec)) {
1117 				/* different types of raw frames */
1118 				uint8_t sub_type =
1119 					ol_txrx_tx_raw_subtype(tx_spec);
1120 				htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
1121 						htt_pkt_type_raw, sub_type);
1122 			}
1123 		}
1124 		/*
1125 		 * If debug display is enabled, show the meta-data being
1126 		 * downloaded to the target via the HTT tx descriptor.
1127 		 */
1128 		htt_tx_desc_display(tx_desc->htt_tx_desc);
1129 		ol_tx_send(vdev->pdev, tx_desc, msdu, vdev->vdev_id);
1130 		msdu = next;
1131 	}
1132 	return NULL;            /* all MSDUs were accepted */
1133 }
1134 
1135 #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
1136 #define OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu, tx_msdu_info) \
1137 	do { \
1138 		if (OL_TX_ENCAP(vdev, tx_desc, msdu, &tx_msdu_info) != A_OK) { \
1139 			qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt); \
1140 			ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1);	\
1141 			if (tx_msdu_info.peer) { \
1142 				/* remove the peer reference added above */ \
1143 				ol_txrx_peer_unref_delete(tx_msdu_info.peer); \
1144 			} \
1145 			goto MSDU_LOOP_BOTTOM; \
1146 		} \
1147 	} while (0)
1148 #else
1149 #define OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu, tx_msdu_info) /* no-op */
1150 #endif
1151 
1152 /* tx filtering is handled within the target FW */
1153 #define TX_FILTER_CHECK(tx_msdu_info) 0 /* don't filter */
1154 
1155 /**
1156  * parse_ocb_tx_header() - Function to check for OCB
1157  * @msdu:   Pointer to OS packet (qdf_nbuf_t)
1158  * @tx_ctrl: TX control header on a packet and extract it if present
1159  *
1160  * Return: true if ocb parsing is successful
1161  */
1162 #define OCB_HEADER_VERSION     1
1163 bool parse_ocb_tx_header(qdf_nbuf_t msdu,
1164 			struct ocb_tx_ctrl_hdr_t *tx_ctrl)
1165 {
1166 	struct ether_header *eth_hdr_p;
1167 	struct ocb_tx_ctrl_hdr_t *tx_ctrl_hdr;
1168 
1169 	/* Check if TX control header is present */
1170 	eth_hdr_p = (struct ether_header *)qdf_nbuf_data(msdu);
1171 	if (eth_hdr_p->ether_type != QDF_SWAP_U16(ETHERTYPE_OCB_TX))
1172 		/* TX control header is not present. Nothing to do.. */
1173 		return true;
1174 
1175 	/* Remove the ethernet header */
1176 	qdf_nbuf_pull_head(msdu, sizeof(struct ether_header));
1177 
1178 	/* Parse the TX control header */
1179 	tx_ctrl_hdr = (struct ocb_tx_ctrl_hdr_t *)qdf_nbuf_data(msdu);
1180 
1181 	if (tx_ctrl_hdr->version == OCB_HEADER_VERSION) {
1182 		if (tx_ctrl)
1183 			qdf_mem_copy(tx_ctrl, tx_ctrl_hdr,
1184 				     sizeof(*tx_ctrl_hdr));
1185 	} else {
1186 		/* The TX control header is invalid. */
1187 		return false;
1188 	}
1189 
1190 	/* Remove the TX control header */
1191 	qdf_nbuf_pull_head(msdu, tx_ctrl_hdr->length);
1192 	return true;
1193 }
1194 
1195 
1196 #if defined(CONFIG_HL_SUPPORT) && defined(CONFIG_TX_DESC_HI_PRIO_RESERVE)
1197 
1198 /**
1199  * ol_tx_hl_desc_alloc() - Allocate and initialize a tx descriptor
1200  *			   for a HL system.
1201  * @pdev: the data physical device sending the data
1202  * @vdev: the virtual device sending the data
1203  * @msdu: the tx frame
1204  * @msdu_info: the tx meta data
1205  *
1206  * Return: the tx decriptor
1207  */
1208 static inline
1209 struct ol_tx_desc_t *ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t *pdev,
1210 	struct ol_txrx_vdev_t *vdev,
1211 	qdf_nbuf_t msdu,
1212 	struct ol_txrx_msdu_info_t *msdu_info)
1213 {
1214 	struct ol_tx_desc_t *tx_desc = NULL;
1215 
1216 	if (qdf_atomic_read(&pdev->tx_queue.rsrc_cnt) >
1217 			TXRX_HL_TX_DESC_HI_PRIO_RESERVED) {
1218 		tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
1219 	} else if (qdf_nbuf_is_ipv4_pkt(msdu) == true) {
1220 		if ((QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
1221 				QDF_NBUF_CB_PACKET_TYPE_DHCP) ||
1222 		    (QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
1223 			QDF_NBUF_CB_PACKET_TYPE_EAPOL)) {
1224 			tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
1225 			TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
1226 				   "Provided tx descriptor from reserve pool for DHCP/EAPOL\n");
1227 		}
1228 	}
1229 	return tx_desc;
1230 }
1231 #else
1232 
1233 static inline
1234 struct ol_tx_desc_t *ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t *pdev,
1235 	struct ol_txrx_vdev_t *vdev,
1236 	qdf_nbuf_t msdu,
1237 	struct ol_txrx_msdu_info_t *msdu_info)
1238 {
1239 	struct ol_tx_desc_t *tx_desc = NULL;
1240 	tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
1241 	return tx_desc;
1242 }
1243 #endif
1244 
1245 #if defined(CONFIG_HL_SUPPORT)
1246 
1247 /**
1248  * ol_txrx_mgmt_tx_desc_alloc() - Allocate and initialize a tx descriptor
1249  *				 for management frame
1250  * @pdev: the data physical device sending the data
1251  * @vdev: the virtual device sending the data
1252  * @tx_mgmt_frm: the tx managment frame
1253  * @tx_msdu_info: the tx meta data
1254  *
1255  * Return: the tx decriptor
1256  */
1257 static inline
1258 struct ol_tx_desc_t *
1259 ol_txrx_mgmt_tx_desc_alloc(
1260 	struct ol_txrx_pdev_t *pdev,
1261 	struct ol_txrx_vdev_t *vdev,
1262 	qdf_nbuf_t tx_mgmt_frm,
1263 	struct ol_txrx_msdu_info_t *tx_msdu_info)
1264 {
1265 	struct ol_tx_desc_t *tx_desc;
1266 	tx_msdu_info->htt.action.tx_comp_req = 1;
1267 	tx_desc = ol_tx_desc_hl(pdev, vdev, tx_mgmt_frm, tx_msdu_info);
1268 	return tx_desc;
1269 }
1270 
1271 /**
1272  * ol_txrx_mgmt_send_frame() - send a management frame
1273  * @vdev: virtual device sending the frame
1274  * @tx_desc: tx desc
1275  * @tx_mgmt_frm: management frame to send
1276  * @tx_msdu_info: the tx meta data
1277  * @chanfreq: download change frequency
1278  *
1279  * Return:
1280  *      0 -> the frame is accepted for transmission, -OR-
1281  *      1 -> the frame was not accepted
1282  */
1283 static inline
1284 int ol_txrx_mgmt_send_frame(
1285 	struct ol_txrx_vdev_t *vdev,
1286 	struct ol_tx_desc_t *tx_desc,
1287 	qdf_nbuf_t tx_mgmt_frm,
1288 	struct ol_txrx_msdu_info_t *tx_msdu_info,
1289 	uint16_t chanfreq)
1290 {
1291 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
1292 	struct ol_tx_frms_queue_t *txq;
1293 	/*
1294 	 * 1.  Look up the peer and queue the frame in the peer's mgmt queue.
1295 	 * 2.  Invoke the download scheduler.
1296 	 */
1297 	txq = ol_tx_classify_mgmt(vdev, tx_desc, tx_mgmt_frm, tx_msdu_info);
1298 	if (!txq) {
1299 		/*TXRX_STATS_MSDU_LIST_INCR(vdev->pdev, tx.dropped.no_txq,
1300 								msdu);*/
1301 		qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt);
1302 		ol_tx_desc_frame_free_nonstd(vdev->pdev, tx_desc,
1303 					     1 /* error */);
1304 		if (tx_msdu_info->peer) {
1305 			/* remove the peer reference added above */
1306 			ol_txrx_peer_unref_delete(tx_msdu_info->peer);
1307 		}
1308 		return 1; /* can't accept the tx mgmt frame */
1309 	}
1310 	/* Initialize the HTT tx desc l2 header offset field.
1311 	 * Even though tx encap does not apply to mgmt frames,
1312 	 * htt_tx_desc_mpdu_header still needs to be called,
1313 	 * to specifiy that there was no L2 header added by tx encap,
1314 	 * so the frame's length does not need to be adjusted to account for
1315 	 * an added L2 header.
1316 	 */
1317 	htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc, 0);
1318 	htt_tx_desc_init(
1319 			pdev->htt_pdev, tx_desc->htt_tx_desc,
1320 			tx_desc->htt_tx_desc_paddr,
1321 			ol_tx_desc_id(pdev, tx_desc),
1322 			tx_mgmt_frm,
1323 			&tx_msdu_info->htt, &tx_msdu_info->tso_info, NULL, 0);
1324 	htt_tx_desc_display(tx_desc->htt_tx_desc);
1325 	htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq);
1326 
1327 	ol_tx_enqueue(vdev->pdev, txq, tx_desc, tx_msdu_info);
1328 	if (tx_msdu_info->peer) {
1329 		/* remove the peer reference added above */
1330 		ol_txrx_peer_unref_delete(tx_msdu_info->peer);
1331 	}
1332 	ol_tx_sched(vdev->pdev);
1333 
1334 	return 0;
1335 }
1336 
1337 #else
1338 
1339 static inline
1340 struct ol_tx_desc_t *
1341 ol_txrx_mgmt_tx_desc_alloc(
1342 	struct ol_txrx_pdev_t *pdev,
1343 	struct ol_txrx_vdev_t *vdev,
1344 	qdf_nbuf_t tx_mgmt_frm,
1345 	struct ol_txrx_msdu_info_t *tx_msdu_info)
1346 {
1347 	struct ol_tx_desc_t *tx_desc;
1348 	/* For LL tx_comp_req is not used so initialized to 0 */
1349 	tx_msdu_info->htt.action.tx_comp_req = 0;
1350 	tx_desc = ol_tx_desc_ll(pdev, vdev, tx_mgmt_frm, tx_msdu_info);
1351 	/* FIX THIS -
1352 	 * The FW currently has trouble using the host's fragments table
1353 	 * for management frames.  Until this is fixed, rather than
1354 	 * specifying the fragment table to the FW, specify just the
1355 	 * address of the initial fragment.
1356 	 */
1357 #if defined(HELIUMPLUS_PADDR64)
1358 	/* ol_txrx_dump_frag_desc("ol_txrx_mgmt_send(): after ol_tx_desc_ll",
1359 	   tx_desc); */
1360 #endif /* defined(HELIUMPLUS_PADDR64) */
1361 	if (tx_desc) {
1362 		/*
1363 		 * Following the call to ol_tx_desc_ll, frag 0 is the
1364 		 * HTT tx HW descriptor, and the frame payload is in
1365 		 * frag 1.
1366 		 */
1367 		htt_tx_desc_frags_table_set(
1368 				pdev->htt_pdev,
1369 				tx_desc->htt_tx_desc,
1370 				qdf_nbuf_get_frag_paddr(tx_mgmt_frm, 1),
1371 				0, 0);
1372 #if defined(HELIUMPLUS_PADDR64) && defined(HELIUMPLUS_DEBUG)
1373 		ol_txrx_dump_frag_desc(
1374 				"after htt_tx_desc_frags_table_set",
1375 				tx_desc);
1376 #endif /* defined(HELIUMPLUS_PADDR64) */
1377 	}
1378 
1379 	return tx_desc;
1380 }
1381 
1382 static inline
1383 int ol_txrx_mgmt_send_frame(
1384 	struct ol_txrx_vdev_t *vdev,
1385 	struct ol_tx_desc_t *tx_desc,
1386 	qdf_nbuf_t tx_mgmt_frm,
1387 	struct ol_txrx_msdu_info_t *tx_msdu_info,
1388 	uint16_t chanfreq)
1389 {
1390 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
1391 	htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq);
1392 	QDF_NBUF_CB_TX_PACKET_TRACK(tx_desc->netbuf) =
1393 					QDF_NBUF_TX_PKT_MGMT_TRACK;
1394 	ol_tx_send_nonstd(pdev, tx_desc, tx_mgmt_frm,
1395 			  htt_pkt_type_mgmt);
1396 
1397 	return 0;
1398 }
1399 #endif
1400 
1401 /**
1402  * ol_tx_hl_base() - send tx frames for a HL system.
1403  * @vdev: the virtual device sending the data
1404  * @tx_spec: indicate what non-standard transmission actions to apply
1405  * @msdu_list: the tx frames to send
1406  * @tx_comp_req: tx completion req
1407  *
1408  * Return: NULL if all MSDUs are accepted
1409  */
1410 static inline qdf_nbuf_t
1411 ol_tx_hl_base(
1412 	ol_txrx_vdev_handle vdev,
1413 	enum ol_tx_spec tx_spec,
1414 	qdf_nbuf_t msdu_list,
1415 	int tx_comp_req)
1416 {
1417 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
1418 	qdf_nbuf_t msdu = msdu_list;
1419 	struct ol_txrx_msdu_info_t tx_msdu_info;
1420 	struct ocb_tx_ctrl_hdr_t tx_ctrl;
1421 
1422 	htt_pdev_handle htt_pdev = pdev->htt_pdev;
1423 	tx_msdu_info.peer = NULL;
1424 	tx_msdu_info.tso_info.is_tso = 0;
1425 
1426 	/*
1427 	 * The msdu_list variable could be used instead of the msdu var,
1428 	 * but just to clarify which operations are done on a single MSDU
1429 	 * vs. a list of MSDUs, use a distinct variable for single MSDUs
1430 	 * within the list.
1431 	 */
1432 	while (msdu) {
1433 		qdf_nbuf_t next;
1434 		struct ol_tx_frms_queue_t *txq;
1435 		struct ol_tx_desc_t *tx_desc = NULL;
1436 
1437 		qdf_mem_zero(&tx_ctrl, sizeof(tx_ctrl));
1438 
1439 		/*
1440 		 * The netbuf will get stored into a (peer-TID) tx queue list
1441 		 * inside the ol_tx_classify_store function or else dropped,
1442 		 * so store the next pointer immediately.
1443 		 */
1444 		next = qdf_nbuf_next(msdu);
1445 
1446 		tx_desc = ol_tx_hl_desc_alloc(pdev, vdev, msdu, &tx_msdu_info);
1447 
1448 		if (!tx_desc) {
1449 			/*
1450 			 * If we're out of tx descs, there's no need to try
1451 			 * to allocate tx descs for the remaining MSDUs.
1452 			 */
1453 			TXRX_STATS_MSDU_LIST_INCR(pdev, tx.dropped.host_reject,
1454 						  msdu);
1455 			return msdu; /* the list of unaccepted MSDUs */
1456 		}
1457 
1458 		/* OL_TXRX_PROT_AN_LOG(pdev->prot_an_tx_sent, msdu);*/
1459 
1460 		if (tx_spec != OL_TX_SPEC_STD) {
1461 #if defined(FEATURE_WLAN_TDLS)
1462 			if (tx_spec & OL_TX_SPEC_NO_FREE) {
1463 				tx_desc->pkt_type = OL_TX_FRM_NO_FREE;
1464 			} else if (tx_spec & OL_TX_SPEC_TSO) {
1465 #else
1466 				if (tx_spec & OL_TX_SPEC_TSO) {
1467 #endif
1468 					tx_desc->pkt_type = OL_TX_FRM_TSO;
1469 				}
1470 				if (ol_txrx_tx_is_raw(tx_spec)) {
1471 					/* CHECK THIS: does this need
1472 					 * to happen after htt_tx_desc_init?
1473 					 */
1474 					/* different types of raw frames */
1475 					u_int8_t sub_type =
1476 						ol_txrx_tx_raw_subtype(
1477 								tx_spec);
1478 					htt_tx_desc_type(htt_pdev,
1479 							 tx_desc->htt_tx_desc,
1480 							 htt_pkt_type_raw,
1481 							 sub_type);
1482 				}
1483 			}
1484 
1485 			tx_msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
1486 			tx_msdu_info.htt.info.vdev_id = vdev->vdev_id;
1487 			tx_msdu_info.htt.info.frame_type = htt_frm_type_data;
1488 			tx_msdu_info.htt.info.l2_hdr_type = pdev->htt_pkt_type;
1489 			tx_msdu_info.htt.action.tx_comp_req = tx_comp_req;
1490 
1491 			/* If the vdev is in OCB mode,
1492 			 * parse the tx control header.
1493 			 */
1494 			if (vdev->opmode == wlan_op_mode_ocb) {
1495 				if (!parse_ocb_tx_header(msdu, &tx_ctrl)) {
1496 					/* There was an error parsing
1497 					 * the header.Skip this packet.
1498 					 */
1499 					goto MSDU_LOOP_BOTTOM;
1500 				}
1501 			}
1502 
1503 			txq = ol_tx_classify(vdev, tx_desc, msdu,
1504 							&tx_msdu_info);
1505 
1506 			if ((!txq) || TX_FILTER_CHECK(&tx_msdu_info)) {
1507 				/* drop this frame,
1508 				 * but try sending subsequent frames
1509 				 */
1510 				/*TXRX_STATS_MSDU_LIST_INCR(pdev,
1511 							tx.dropped.no_txq,
1512 							msdu);*/
1513 				qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt);
1514 				ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1);
1515 				if (tx_msdu_info.peer) {
1516 					/* remove the peer reference
1517 					 * added above */
1518 					ol_txrx_peer_unref_delete(
1519 							tx_msdu_info.peer);
1520 				}
1521 				goto MSDU_LOOP_BOTTOM;
1522 			}
1523 
1524 			if (tx_msdu_info.peer) {
1525 				/*If the state is not associated then drop all
1526 				 *the data packets received for that peer*/
1527 				if (tx_msdu_info.peer->state ==
1528 						OL_TXRX_PEER_STATE_DISC) {
1529 					qdf_atomic_inc(
1530 						&pdev->tx_queue.rsrc_cnt);
1531 					ol_tx_desc_frame_free_nonstd(pdev,
1532 								     tx_desc,
1533 								     1);
1534 					ol_txrx_peer_unref_delete(
1535 							tx_msdu_info.peer);
1536 					msdu = next;
1537 					continue;
1538 				} else if (tx_msdu_info.peer->state !=
1539 						OL_TXRX_PEER_STATE_AUTH) {
1540 					if (tx_msdu_info.htt.info.ethertype !=
1541 						ETHERTYPE_PAE &&
1542 						tx_msdu_info.htt.info.ethertype
1543 							!= ETHERTYPE_WAI) {
1544 						qdf_atomic_inc(
1545 							&pdev->tx_queue.
1546 								rsrc_cnt);
1547 						ol_tx_desc_frame_free_nonstd(
1548 								pdev,
1549 								tx_desc, 1);
1550 						ol_txrx_peer_unref_delete(
1551 							tx_msdu_info.peer);
1552 						msdu = next;
1553 						continue;
1554 					}
1555 				}
1556 			}
1557 			/*
1558 			 * Initialize the HTT tx desc l2 header offset field.
1559 			 * htt_tx_desc_mpdu_header  needs to be called to
1560 			 * make sure, the l2 header size is initialized
1561 			 * correctly to handle cases where TX ENCAP is disabled
1562 			 * or Tx Encap fails to perform Encap
1563 			 */
1564 			htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc, 0);
1565 
1566 			/*
1567 			 * Note: when the driver is built without support for
1568 			 * SW tx encap,the following macro is a no-op.
1569 			 * When the driver is built with support for SW tx
1570 			 * encap, it performs encap, and if an error is
1571 			 * encountered, jumps to the MSDU_LOOP_BOTTOM label.
1572 			 */
1573 			OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu,
1574 					    tx_msdu_info);
1575 
1576 			/* initialize the HW tx descriptor */
1577 			htt_tx_desc_init(
1578 					pdev->htt_pdev, tx_desc->htt_tx_desc,
1579 					tx_desc->htt_tx_desc_paddr,
1580 					ol_tx_desc_id(pdev, tx_desc),
1581 					msdu,
1582 					&tx_msdu_info.htt,
1583 					&tx_msdu_info.tso_info,
1584 					&tx_ctrl,
1585 					vdev->opmode == wlan_op_mode_ocb);
1586 			/*
1587 			 * If debug display is enabled, show the meta-data
1588 			 * being downloaded to the target via the
1589 			 * HTT tx descriptor.
1590 			 */
1591 			htt_tx_desc_display(tx_desc->htt_tx_desc);
1592 
1593 			ol_tx_enqueue(pdev, txq, tx_desc, &tx_msdu_info);
1594 			if (tx_msdu_info.peer) {
1595 				OL_TX_PEER_STATS_UPDATE(tx_msdu_info.peer,
1596 							msdu);
1597 				/* remove the peer reference added above */
1598 				ol_txrx_peer_unref_delete(tx_msdu_info.peer);
1599 			}
1600 MSDU_LOOP_BOTTOM:
1601 			msdu = next;
1602 		}
1603 		ol_tx_sched(pdev);
1604 		return NULL; /* all MSDUs were accepted */
1605 }
1606 
1607 qdf_nbuf_t
1608 ol_tx_hl(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
1609 {
1610 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
1611 	int tx_comp_req = pdev->cfg.default_tx_comp_req;
1612 	return ol_tx_hl_base(vdev, OL_TX_SPEC_STD, msdu_list, tx_comp_req);
1613 }
1614 
1615 qdf_nbuf_t
1616 ol_tx_non_std_hl(ol_txrx_vdev_handle vdev,
1617 		 enum ol_tx_spec tx_spec,
1618 		 qdf_nbuf_t msdu_list)
1619 {
1620 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
1621 	int tx_comp_req = pdev->cfg.default_tx_comp_req;
1622 
1623 	if (!tx_comp_req) {
1624 		if ((tx_spec == OL_TX_SPEC_NO_FREE) &&
1625 		    (pdev->tx_data_callback.func))
1626 			tx_comp_req = 1;
1627 	}
1628 	return ol_tx_hl_base(vdev, tx_spec, msdu_list, tx_comp_req);
1629 }
1630 
1631 /**
1632  * ol_tx_non_std - Allow the control-path SW to send data frames
1633  *
1634  * @data_vdev - which vdev should transmit the tx data frames
1635  * @tx_spec - what non-standard handling to apply to the tx data frames
1636  * @msdu_list - NULL-terminated list of tx MSDUs
1637  *
1638  * Generally, all tx data frames come from the OS shim into the txrx layer.
1639  * However, there are rare cases such as TDLS messaging where the UMAC
1640  * control-path SW creates tx data frames.
1641  *  This UMAC SW can call this function to provide the tx data frames to
1642  *  the txrx layer.
1643  *  The UMAC SW can request a callback for these data frames after their
1644  *  transmission completes, by using the ol_txrx_data_tx_cb_set function
1645  *  to register a tx completion callback, and by specifying
1646  *  ol_tx_spec_no_free as the tx_spec arg when giving the frames to
1647  *  ol_tx_non_std.
1648  *  The MSDUs need to have the appropriate L2 header type (802.3 vs. 802.11),
1649  *  as specified by ol_cfg_frame_type().
1650  *
1651  *  Return: null - success, skb - failure
1652  */
1653 qdf_nbuf_t
1654 ol_tx_non_std(void *pvdev,
1655 	      enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
1656 {
1657 	ol_txrx_vdev_handle vdev = pvdev;
1658 
1659 	if (vdev->pdev->cfg.is_high_latency)
1660 		return ol_tx_non_std_hl(vdev, tx_spec, msdu_list);
1661 	else
1662 		return ol_tx_non_std_ll(vdev, tx_spec, msdu_list);
1663 }
1664 
1665 void
1666 ol_txrx_data_tx_cb_set(void *pvdev,
1667 		       ol_txrx_data_tx_cb callback, void *ctxt)
1668 {
1669 	ol_txrx_vdev_handle vdev = pvdev;
1670 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
1671 	pdev->tx_data_callback.func = callback;
1672 	pdev->tx_data_callback.ctxt = ctxt;
1673 }
1674 
1675 /**
1676  * ol_txrx_mgmt_tx_cb_set() - Store a callback for delivery
1677  * notifications for management frames.
1678  *
1679  * @pdev - the data physical device object
1680  * @type - the type of mgmt frame the callback is used for
1681  * @download_cb - the callback for notification of delivery to the target
1682  * @ota_ack_cb - the callback for notification of delivery to the peer
1683  * @ctxt - context to use with the callback
1684  *
1685  * When the txrx SW receives notifications from the target that a tx frame
1686  * has been delivered to its recipient, it will check if the tx frame
1687  * is a management frame.  If so, the txrx SW will check the management
1688  * frame type specified when the frame was submitted for transmission.
1689  * If there is a callback function registered for the type of managment
1690  * frame in question, the txrx code will invoke the callback to inform
1691  * the management + control SW that the mgmt frame was delivered.
1692  * This function is used by the control SW to store a callback pointer
1693  * for a given type of management frame.
1694  */
1695 void
1696 ol_txrx_mgmt_tx_cb_set(void *ppdev,
1697 		       uint8_t type,
1698 		       ol_txrx_mgmt_tx_cb download_cb,
1699 		       ol_txrx_mgmt_tx_cb ota_ack_cb, void *ctxt)
1700 {
1701 	ol_txrx_pdev_handle pdev = ppdev;
1702 	TXRX_ASSERT1(type < OL_TXRX_MGMT_NUM_TYPES);
1703 	pdev->tx_mgmt.callbacks[type].download_cb = download_cb;
1704 	pdev->tx_mgmt.callbacks[type].ota_ack_cb = ota_ack_cb;
1705 	pdev->tx_mgmt.callbacks[type].ctxt = ctxt;
1706 }
1707 
1708 #if defined(HELIUMPLUS_PADDR64)
1709 void ol_txrx_dump_frag_desc(char *msg, struct ol_tx_desc_t *tx_desc)
1710 {
1711 	uint32_t                *frag_ptr_i_p;
1712 	int                     i;
1713 
1714 	qdf_print("OL TX Descriptor 0x%p msdu_id %d\n",
1715 		 tx_desc, tx_desc->id);
1716 	qdf_print("HTT TX Descriptor vaddr: 0x%p paddr: %pad",
1717 		 tx_desc->htt_tx_desc, &tx_desc->htt_tx_desc_paddr);
1718 	qdf_print("%s %d: Fragment Descriptor 0x%p (paddr=%pad)",
1719 		 __func__, __LINE__, tx_desc->htt_frag_desc,
1720 		 &tx_desc->htt_frag_desc_paddr);
1721 
1722 	/* it looks from htt_tx_desc_frag() that tx_desc->htt_frag_desc
1723 	   is already de-referrable (=> in virtual address space) */
1724 	frag_ptr_i_p = tx_desc->htt_frag_desc;
1725 
1726 	/* Dump 6 words of TSO flags */
1727 	print_hex_dump(KERN_DEBUG, "MLE Desc:TSO Flags:  ",
1728 		       DUMP_PREFIX_NONE, 8, 4,
1729 		       frag_ptr_i_p, 24, true);
1730 
1731 	frag_ptr_i_p += 6; /* Skip 6 words of TSO flags */
1732 
1733 	i = 0;
1734 	while (*frag_ptr_i_p) {
1735 		print_hex_dump(KERN_DEBUG, "MLE Desc:Frag Ptr:  ",
1736 			       DUMP_PREFIX_NONE, 8, 4,
1737 			       frag_ptr_i_p, 8, true);
1738 		i++;
1739 		if (i > 5) /* max 6 times: frag_ptr0 to frag_ptr5 */
1740 			break;
1741 		else  /* jump to next  pointer - skip length */
1742 			frag_ptr_i_p += 2;
1743 	}
1744 	return;
1745 }
1746 #endif /* HELIUMPLUS_PADDR64 */
1747 
1748 /**
1749  * ol_txrx_mgmt_send_ext() - Transmit a management frame
1750  *
1751  * @vdev - virtual device transmitting the frame
1752  * @tx_mgmt_frm - management frame to transmit
1753  * @type - the type of managment frame (determines what callback to use)
1754  * @use_6mbps - specify whether management frame to transmit should
1755  * use 6 Mbps rather than 1 Mbps min rate(for 5GHz band or P2P)
1756  * @chanfreq - channel to transmit the frame on
1757  *
1758  * Send the specified management frame from the specified virtual device.
1759  * The type is used for determining whether to invoke a callback to inform
1760  * the sender that the tx mgmt frame was delivered, and if so, which
1761  * callback to use.
1762  *
1763  * Return: 0 - the frame is accepted for transmission
1764  *         1 - the frame was not accepted
1765  */
1766 int
1767 ol_txrx_mgmt_send_ext(ol_txrx_vdev_handle vdev,
1768 		  qdf_nbuf_t tx_mgmt_frm,
1769 		  uint8_t type, uint8_t use_6mbps, uint16_t chanfreq)
1770 {
1771 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
1772 	struct ol_tx_desc_t *tx_desc;
1773 	struct ol_txrx_msdu_info_t tx_msdu_info;
1774 	int result = 0;
1775 	tx_msdu_info.tso_info.is_tso = 0;
1776 
1777 	tx_msdu_info.htt.action.use_6mbps = use_6mbps;
1778 	tx_msdu_info.htt.info.ext_tid = HTT_TX_EXT_TID_MGMT;
1779 	tx_msdu_info.htt.info.vdev_id = vdev->vdev_id;
1780 	tx_msdu_info.htt.action.do_tx_complete =
1781 		pdev->tx_mgmt.callbacks[type].ota_ack_cb ? 1 : 0;
1782 
1783 	/*
1784 	 * FIX THIS: l2_hdr_type should only specify L2 header type
1785 	 * The Peregrine/Rome HTT layer provides the FW with a "pkt type"
1786 	 * that is a combination of L2 header type and 802.11 frame type.
1787 	 * If the 802.11 frame type is "mgmt", then the HTT pkt type is "mgmt".
1788 	 * But if the 802.11 frame type is "data", then the HTT pkt type is
1789 	 * the L2 header type (more or less): 802.3 vs. Native WiFi
1790 	 * (basic 802.11).
1791 	 * (Or the header type can be "raw", which is any version of the 802.11
1792 	 * header, and also implies that some of the offloaded tx data
1793 	 * processing steps may not apply.)
1794 	 * For efficiency, the Peregrine/Rome HTT uses the msdu_info's
1795 	 * l2_hdr_type field to program the HTT pkt type.  Thus, this txrx SW
1796 	 * needs to overload the l2_hdr_type to indicate whether the frame is
1797 	 * data vs. mgmt, as well as 802.3 L2 header vs. 802.11 L2 header.
1798 	 * To fix this, the msdu_info's l2_hdr_type should be left specifying
1799 	 * just the L2 header type.  For mgmt frames, there should be a
1800 	 * separate function to patch the HTT pkt type to store a "mgmt" value
1801 	 * rather than the L2 header type.  Then the HTT pkt type can be
1802 	 * programmed efficiently for data frames, and the msdu_info's
1803 	 * l2_hdr_type field won't be confusingly overloaded to hold the 802.11
1804 	 * frame type rather than the L2 header type.
1805 	 */
1806 	/*
1807 	 * FIX THIS: remove duplication of htt_frm_type_mgmt and
1808 	 * htt_pkt_type_mgmt
1809 	 * The htt module expects a "enum htt_pkt_type" value.
1810 	 * The htt_dxe module expects a "enum htt_frm_type" value.
1811 	 * This needs to be cleaned up, so both versions of htt use a
1812 	 * consistent method of specifying the frame type.
1813 	 */
1814 #ifdef QCA_SUPPORT_INTEGRATED_SOC
1815 	/* tx mgmt frames always come with a 802.11 header */
1816 	tx_msdu_info.htt.info.l2_hdr_type = htt_pkt_type_native_wifi;
1817 	tx_msdu_info.htt.info.frame_type = htt_frm_type_mgmt;
1818 #else
1819 	tx_msdu_info.htt.info.l2_hdr_type = htt_pkt_type_mgmt;
1820 	tx_msdu_info.htt.info.frame_type = htt_pkt_type_mgmt;
1821 #endif
1822 
1823 	tx_msdu_info.peer = NULL;
1824 
1825 	tx_desc = ol_txrx_mgmt_tx_desc_alloc(pdev, vdev, tx_mgmt_frm,
1826 							&tx_msdu_info);
1827 	if (!tx_desc)
1828 		return -EINVAL;       /* can't accept the tx mgmt frame */
1829 
1830 	TXRX_STATS_MSDU_INCR(pdev, tx.mgmt, tx_mgmt_frm);
1831 	TXRX_ASSERT1(type < OL_TXRX_MGMT_NUM_TYPES);
1832 	tx_desc->pkt_type = type + OL_TXRX_MGMT_TYPE_BASE;
1833 
1834 	result = ol_txrx_mgmt_send_frame(vdev, tx_desc, tx_mgmt_frm,
1835 						&tx_msdu_info, chanfreq);
1836 
1837 	return 0;               /* accepted the tx mgmt frame */
1838 }
1839 
1840 void ol_txrx_sync(ol_txrx_pdev_handle pdev, uint8_t sync_cnt)
1841 {
1842 	htt_h2t_sync_msg(pdev->htt_pdev, sync_cnt);
1843 }
1844 
1845 qdf_nbuf_t ol_tx_reinject(struct ol_txrx_vdev_t *vdev,
1846 			  qdf_nbuf_t msdu, uint16_t peer_id)
1847 {
1848 	struct ol_tx_desc_t *tx_desc;
1849 	struct ol_txrx_msdu_info_t msdu_info;
1850 
1851 	msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
1852 	msdu_info.htt.info.ext_tid = HTT_TX_EXT_TID_INVALID;
1853 	msdu_info.peer = NULL;
1854 	msdu_info.htt.action.tx_comp_req = 0;
1855 	msdu_info.tso_info.is_tso = 0;
1856 
1857 	ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
1858 	HTT_TX_DESC_POSTPONED_SET(*((uint32_t *) (tx_desc->htt_tx_desc)), true);
1859 
1860 	htt_tx_desc_set_peer_id(tx_desc->htt_tx_desc, peer_id);
1861 
1862 	ol_tx_send(vdev->pdev, tx_desc, msdu, vdev->vdev_id);
1863 
1864 	return NULL;
1865 }
1866 
1867 #if defined(FEATURE_TSO)
1868 void ol_tso_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg)
1869 {
1870 	int i;
1871 	struct qdf_tso_seg_elem_t *c_element;
1872 
1873 	c_element = qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
1874 	pdev->tso_seg_pool.freelist = c_element;
1875 	for (i = 0; i < (num_seg - 1); i++) {
1876 		if (qdf_unlikely(!c_element)) {
1877 			TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
1878 				   "%s: ERROR: c_element NULL for seg %d",
1879 				   __func__, i);
1880 			QDF_BUG(0);
1881 			pdev->tso_seg_pool.pool_size = i;
1882 			qdf_spinlock_create(&pdev->tso_seg_pool.tso_mutex);
1883 			return;
1884 		}
1885 
1886 		c_element->next =
1887 			qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
1888 		c_element = c_element->next;
1889 		c_element->next = NULL;
1890 	}
1891 	pdev->tso_seg_pool.pool_size = num_seg;
1892 	qdf_spinlock_create(&pdev->tso_seg_pool.tso_mutex);
1893 }
1894 
1895 void ol_tso_seg_list_deinit(struct ol_txrx_pdev_t *pdev)
1896 {
1897 	int i;
1898 	struct qdf_tso_seg_elem_t *c_element;
1899 	struct qdf_tso_seg_elem_t *temp;
1900 
1901 	qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
1902 	c_element = pdev->tso_seg_pool.freelist;
1903 	i = pdev->tso_seg_pool.pool_size;
1904 
1905 	pdev->tso_seg_pool.freelist = NULL;
1906 	pdev->tso_seg_pool.num_free = 0;
1907 	pdev->tso_seg_pool.pool_size = 0;
1908 
1909 	qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
1910 	qdf_spinlock_destroy(&pdev->tso_seg_pool.tso_mutex);
1911 
1912 	while (i-- > 0 && c_element) {
1913 		temp = c_element->next;
1914 		qdf_mem_free(c_element);
1915 		c_element = temp;
1916 	}
1917 }
1918 #endif /* FEATURE_TSO */
1919