xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c (revision c8039e3fa439b838b525783fb76d6bdc0259257c)
1 /*
2  * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "htt.h"
20 #include "dp_tx.h"
21 #include "dp_tx_desc.h"
22 #include "dp_peer.h"
23 #include "dp_types.h"
24 #include "hal_tx.h"
25 #include "qdf_mem.h"
26 #include "qdf_nbuf.h"
27 #include <wlan_cfg.h>
28 #ifdef MESH_MODE_SUPPORT
29 #include "if_meta_hdr.h"
30 #endif
31 
32 #ifdef TX_PER_PDEV_DESC_POOL
33 	#define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id)
34 	#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
35 #else
36 	#ifdef TX_PER_VDEV_DESC_POOL
37 		#define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
38 		#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
39 	#else
40 		#define DP_TX_GET_DESC_POOL_ID(vdev) qdf_get_cpu()
41 		#define DP_TX_GET_RING_ID(vdev) qdf_get_cpu()
42 	#endif /* TX_PER_VDEV_DESC_POOL */
43 #endif /* TX_PER_PDEV_DESC_POOL */
44 
45 /* TODO Add support in TSO */
46 #define DP_DESC_NUM_FRAG(x) 0
47 
48 /* disable TQM_BYPASS */
49 #define TQM_BYPASS_WAR 0
50 
51 /**
52  * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
53  * @vdev: DP Virtual device handle
54  * @nbuf: Buffer pointer
55  * @queue: queue ids container for nbuf
56  *
57  * TX packet queue has 2 instances, software descriptors id and dma ring id
58  * Based on tx feature and hardware configuration queue id combination could be
59  * different.
60  * For example -
61  * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
62  * With no XPS,lock based resource protection, Descriptor pool ids are different
63  * for each vdev, dma ring id will be same as single pdev id
64  *
65  * Return: None
66  */
67 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
68 		qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
69 {
70 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
71 	queue->ring_id = DP_TX_GET_RING_ID(vdev);
72 
73 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
74 			"%s, pool_id:%d ring_id: %d\n",
75 			__func__, queue->desc_pool_id, queue->ring_id);
76 
77 	return;
78 }
79 
80 /**
81  * dp_tx_desc_release() - Release Tx Descriptor
82  * @tx_desc : Tx Descriptor
83  * @desc_pool_id: Descriptor Pool ID
84  *
85  * Deallocate all resources attached to Tx descriptor and free the Tx
86  * descriptor.
87  *
88  * Return:
89  */
90 static void
91 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
92 {
93 	struct dp_pdev *pdev = tx_desc->pdev;
94 	struct dp_soc *soc;
95 	uint8_t comp_status = 0;
96 
97 	qdf_assert(pdev);
98 
99 	soc = pdev->soc;
100 
101 	DP_STATS_INC(tx_desc->vdev, tx_i.freed.num, 1);
102 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
103 		dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
104 
105 	qdf_atomic_dec(&pdev->num_tx_outstanding);
106 
107 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
108 		qdf_atomic_dec(&pdev->num_tx_exception);
109 
110 	if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
111 				hal_tx_comp_get_buffer_source(&tx_desc->comp))
112 		comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp);
113 	else
114 		comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
115 
116 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
117 			"Tx Completion Release desc %d status %d outstanding %d\n",
118 			tx_desc->id, comp_status,
119 			qdf_atomic_read(&pdev->num_tx_outstanding));
120 
121 	dp_tx_desc_free(soc, tx_desc, desc_pool_id);
122 	return;
123 }
124 
125 /**
126  * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
127  * @vdev: DP vdev Handle
128  * @nbuf: skb
129  *
130  * Prepares and fills HTT metadata in the frame pre-header for special frames
131  * that should be transmitted using varying transmit parameters.
132  * There are 2 VDEV modes that currently needs this special metadata -
133  *  1) Mesh Mode
134  *  2) DSRC Mode
135  *
136  * Return: HTT metadata size
137  *
138  */
139 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
140 		uint32_t *meta_data)
141 {
142 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
143 				(struct htt_tx_msdu_desc_ext2_t *) meta_data;
144 
145 	uint8_t htt_desc_size;
146 
147 	/* Size rounded of multiple of 8 bytes */
148 	uint8_t htt_desc_size_aligned;
149 
150 	uint8_t *hdr = NULL;
151 
152 	qdf_nbuf_unshare(nbuf);
153 
154 	HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 1);
155 
156 	/*
157 	 * Metadata - HTT MSDU Extension header
158 	 */
159 	htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
160 	htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
161 
162 	if (vdev->mesh_vdev) {
163 
164 		/* Fill and add HTT metaheader */
165 		hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
166 		qdf_mem_copy(hdr, desc_ext, htt_desc_size);
167 
168 	} else if (vdev->opmode == wlan_op_mode_ocb) {
169 		/* Todo - Add support for DSRC */
170 	}
171 
172 	return htt_desc_size_aligned;
173 }
174 
175 /**
176  * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
177  * @tso_seg: TSO segment to process
178  * @ext_desc: Pointer to MSDU extension descriptor
179  *
180  * Return: void
181  */
182 #if defined(FEATURE_TSO)
183 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
184 		void *ext_desc)
185 {
186 	uint8_t num_frag;
187 	uint32_t tso_flags;
188 
189 	/*
190 	 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
191 	 * tcp_flag_mask
192 	 *
193 	 * Checksum enable flags are set in TCL descriptor and not in Extension
194 	 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
195 	 */
196 	tso_flags = *(uint32_t *) &tso_seg->tso_flags;
197 
198 	hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
199 
200 	hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
201 		tso_seg->tso_flags.ip_len);
202 
203 	hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
204 	hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
205 
206 
207 	for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
208 		uint32_t lo = 0;
209 		uint32_t hi = 0;
210 
211 		qdf_dmaaddr_to_32s(
212 			tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
213 		hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
214 			tso_seg->tso_frags[num_frag].length);
215 	}
216 
217 	return;
218 }
219 #else
220 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
221 		void *ext_desc)
222 {
223 	return;
224 }
225 #endif
226 
227 /**
228  * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
229  * @vdev: virtual device handle
230  * @msdu: network buffer
231  * @msdu_info: meta data associated with the msdu
232  *
233  * Return: QDF_STATUS_SUCCESS success
234  */
235 #if defined(FEATURE_TSO)
236 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
237 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
238 {
239 	struct qdf_tso_seg_elem_t *tso_seg;
240 	int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
241 	struct dp_soc *soc = vdev->pdev->soc;
242 	struct qdf_tso_info_t *tso_info;
243 
244 	tso_info = &msdu_info->u.tso_info;
245 	tso_info->curr_seg = NULL;
246 	tso_info->tso_seg_list = NULL;
247 	tso_info->num_segs = num_seg;
248 	msdu_info->frm_type = dp_tx_frm_tso;
249 
250 	while (num_seg) {
251 		tso_seg = dp_tx_tso_desc_alloc(
252 				soc, msdu_info->tx_queue.desc_pool_id);
253 		if (tso_seg) {
254 			tso_seg->next = tso_info->tso_seg_list;
255 			tso_info->tso_seg_list = tso_seg;
256 			num_seg--;
257 		} else {
258 			struct qdf_tso_seg_elem_t *next_seg;
259 			struct qdf_tso_seg_elem_t *free_seg =
260 				tso_info->tso_seg_list;
261 
262 			while (free_seg) {
263 				next_seg = free_seg->next;
264 				dp_tx_tso_desc_free(soc,
265 					msdu_info->tx_queue.desc_pool_id,
266 					free_seg);
267 				free_seg = next_seg;
268 			}
269 			return QDF_STATUS_E_NOMEM;
270 		}
271 	}
272 
273 	msdu_info->num_seg =
274 		qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
275 
276 	tso_info->curr_seg = tso_info->tso_seg_list;
277 
278 	return QDF_STATUS_SUCCESS;
279 }
280 #else
281 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
282 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
283 {
284 	return QDF_STATUS_E_NOMEM;
285 }
286 #endif
287 
288 /**
289  * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
290  * @vdev: DP Vdev handle
291  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
292  * @desc_pool_id: Descriptor Pool ID
293  *
294  * Return:
295  */
296 static
297 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
298 		struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
299 {
300 	uint8_t i;
301 	uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
302 	struct dp_tx_seg_info_s *seg_info;
303 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
304 	struct dp_soc *soc = vdev->pdev->soc;
305 
306 	/* Allocate an extension descriptor */
307 	msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
308 	qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
309 
310 	if (!msdu_ext_desc)
311 		return NULL;
312 
313 	if (qdf_unlikely(vdev->mesh_vdev)) {
314 		qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
315 				&msdu_info->meta_data[0],
316 				sizeof(struct htt_tx_msdu_desc_ext2_t));
317 		qdf_atomic_inc(&vdev->pdev->num_tx_exception);
318 		HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 1);
319 	}
320 
321 	switch (msdu_info->frm_type) {
322 	case dp_tx_frm_sg:
323 	case dp_tx_frm_me:
324 	case dp_tx_frm_raw:
325 		seg_info = msdu_info->u.sg_info.curr_seg;
326 		/* Update the buffer pointers in MSDU Extension Descriptor */
327 		for (i = 0; i < seg_info->frag_cnt; i++) {
328 			hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
329 				seg_info->frags[i].paddr_lo,
330 				seg_info->frags[i].paddr_hi,
331 				seg_info->frags[i].len);
332 		}
333 
334 		break;
335 
336 	case dp_tx_frm_tso:
337 		dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
338 				&cached_ext_desc[0]);
339 		break;
340 
341 
342 	default:
343 		break;
344 	}
345 
346 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
347 			cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
348 
349 	hal_tx_ext_desc_sync(&cached_ext_desc[0],
350 			msdu_ext_desc->vaddr);
351 
352 	return msdu_ext_desc;
353 }
354 
355 /**
356  * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
357  * @vdev: DP vdev handle
358  * @nbuf: skb
359  * @desc_pool_id: Descriptor pool ID
360  * Allocate and prepare Tx descriptor with msdu information.
361  *
362  * Return: Pointer to Tx Descriptor on success,
363  *         NULL on failure
364  */
365 static
366 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
367 		qdf_nbuf_t nbuf, uint8_t desc_pool_id,
368 		uint32_t *meta_data)
369 {
370 	QDF_STATUS status;
371 	uint8_t align_pad;
372 	uint8_t is_exception = 0;
373 	uint8_t htt_hdr_size;
374 	struct ether_header *eh;
375 	struct dp_tx_desc_s *tx_desc;
376 	struct dp_pdev *pdev = vdev->pdev;
377 	struct dp_soc *soc = pdev->soc;
378 
379 	/* Flow control/Congestion Control processing */
380 	status = dp_tx_flow_control(vdev);
381 	if (QDF_STATUS_E_RESOURCES == status) {
382 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
383 				"%s Tx Resource Full\n", __func__);
384 		/* TODO Stop Tx Queues */
385 	}
386 
387 	/* Allocate software Tx descriptor */
388 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
389 
390 	if (qdf_unlikely(!tx_desc)) {
391 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
392 			"%s Tx Desc Alloc Failed\n", __func__);
393 		return NULL;
394 	}
395 
396 	/* Flow control/Congestion Control counters */
397 	qdf_atomic_inc(&pdev->num_tx_outstanding);
398 
399 	/* Initialize the SW tx descriptor */
400 	tx_desc->nbuf = nbuf;
401 	tx_desc->frm_type = dp_tx_frm_std;
402 	tx_desc->tx_encap_type = vdev->tx_encap_type;
403 	tx_desc->vdev = vdev;
404 	tx_desc->pdev = pdev;
405 	tx_desc->msdu_ext_desc = NULL;
406 
407 	/**
408 	 * For non-scatter regular frames, buffer pointer is directly
409 	 * programmed in TCL input descriptor instead of using an MSDU
410 	 * extension descriptor.For this cass, HW requirement is that
411 	 * descriptor should always point to a 8-byte aligned address.
412 	 *
413 	 * So we add alignment pad to start of buffer, and specify the actual
414 	 * start of data through pkt_offset
415 	 */
416 	align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
417 	qdf_nbuf_push_head(nbuf, align_pad);
418 	tx_desc->pkt_offset = align_pad;
419 
420 	/*
421 	 * For special modes (vdev_type == ocb or mesh), data frames should be
422 	 * transmitted using varying transmit parameters (tx spec) which include
423 	 * transmit rate, power, priority, channel, channel bandwidth , nss etc.
424 	 * These are filled in HTT MSDU descriptor and sent in frame pre-header.
425 	 * These frames are sent as exception packets to firmware.
426 	 *
427 	 *  HTT Metadata should be ensured to be multiple of 8-bytes,
428 	 *  to get 8-byte aligned start address along with align_pad added above
429 	 *
430 	 *  |-----------------------------|
431 	 *  |                             |
432 	 *  |-----------------------------| <-----Buffer Pointer Address given
433 	 *  |                             |  ^    in HW descriptor (aligned)
434 	 *  |       HTT Metadata          |  |
435 	 *  |                             |  |
436 	 *  |                             |  | Packet Offset given in descriptor
437 	 *  |                             |  |
438 	 *  |-----------------------------|  |
439 	 *  |       Alignment Pad         |  v
440 	 *  |-----------------------------| <----- Actual buffer start address
441 	 *  |        SKB Data             |           (Unaligned)
442 	 *  |                             |
443 	 *  |                             |
444 	 *  |                             |
445 	 *  |                             |
446 	 *  |                             |
447 	 *  |-----------------------------|
448 	 */
449 	if (qdf_unlikely(vdev->mesh_vdev ||
450 				(vdev->opmode == wlan_op_mode_ocb))) {
451 		htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
452 				meta_data);
453 		tx_desc->pkt_offset += htt_hdr_size;
454 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
455 		is_exception = 1;
456 	}
457 
458 	if (qdf_unlikely(QDF_STATUS_SUCCESS !=
459 				qdf_nbuf_map(soc->osdev, nbuf,
460 					QDF_DMA_TO_DEVICE))) {
461 		/* Handle failure */
462 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
463 				"qdf_nbuf_map failed\n");
464 		goto failure;
465 	}
466 
467 	if (qdf_unlikely(vdev->nawds_enabled)) {
468 		eh = (struct ether_header *) qdf_nbuf_data(nbuf);
469 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
470 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
471 			is_exception = 1;
472 		}
473 	}
474 
475 #if !TQM_BYPASS_WAR
476 	if (is_exception)
477 #endif
478 	{
479 		/* Temporary WAR due to TQM VP issues */
480 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
481 		qdf_atomic_inc(&pdev->num_tx_exception);
482 	}
483 
484 	return tx_desc;
485 
486 failure:
487 	DP_STATS_INC_PKT(vdev, tx_i.dropped.dropped_pkt, 1,
488 			qdf_nbuf_len(nbuf));
489 	DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
490 	dp_tx_desc_release(tx_desc, desc_pool_id);
491 	return NULL;
492 }
493 
494 /**
495  * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
496  * @vdev: DP vdev handle
497  * @nbuf: skb
498  * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
499  * @desc_pool_id : Descriptor Pool ID
500  *
501  * Allocate and prepare Tx descriptor with msdu and fragment descritor
502  * information. For frames wth fragments, allocate and prepare
503  * an MSDU extension descriptor
504  *
505  * Return: Pointer to Tx Descriptor on success,
506  *         NULL on failure
507  */
508 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
509 		qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
510 		uint8_t desc_pool_id)
511 {
512 	struct dp_tx_desc_s *tx_desc;
513 	QDF_STATUS status;
514 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
515 	struct dp_pdev *pdev = vdev->pdev;
516 	struct dp_soc *soc = pdev->soc;
517 
518 	/* Flow control/Congestion Control processing */
519 	status = dp_tx_flow_control(vdev);
520 	if (QDF_STATUS_E_RESOURCES == status) {
521 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
522 				"%s Tx Resource Full\n", __func__);
523 		/* TODO Stop Tx Queues */
524 	}
525 
526 	/* Allocate software Tx descriptor */
527 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
528 
529 	if (!tx_desc)
530 		return NULL;
531 
532 	/* Flow control/Congestion Control counters */
533 	qdf_atomic_inc(&pdev->num_tx_outstanding);
534 
535 	/* Initialize the SW tx descriptor */
536 	tx_desc->nbuf = nbuf;
537 	tx_desc->frm_type = msdu_info->frm_type;
538 	tx_desc->tx_encap_type = vdev->tx_encap_type;
539 	tx_desc->vdev = vdev;
540 	tx_desc->pdev = pdev;
541 	tx_desc->pkt_offset = 0;
542 
543 	/* Handle scattered frames - TSO/SG/ME */
544 	/* Allocate and prepare an extension descriptor for scattered frames */
545 	msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
546 	if (!msdu_ext_desc) {
547 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
548 				"%s Tx Extension Descriptor Alloc Fail\n",
549 				__func__);
550 		goto failure;
551 	}
552 
553 #if TQM_BYPASS_WAR
554 	/* Temporary WAR due to TQM VP issues */
555 	tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
556 	qdf_atomic_inc(&pdev->num_tx_exception);
557 #endif
558 	if (qdf_unlikely(vdev->mesh_vdev))
559 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
560 
561 	tx_desc->msdu_ext_desc = msdu_ext_desc;
562 	tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
563 
564 	return tx_desc;
565 failure:
566 	DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
567 	DP_STATS_INC_PKT(vdev, tx_i.dropped.dropped_pkt, 1,
568 			qdf_nbuf_len(nbuf));
569 	if (qdf_unlikely(tx_desc->flags & DP_TX_DESC_FLAG_ME))
570 		dp_tx_me_free_buf(pdev, tx_desc->me_buffer);
571 	dp_tx_desc_release(tx_desc, desc_pool_id);
572 	return NULL;
573 }
574 
575 /**
576  * dp_tx_prepare_raw() - Prepare RAW packet TX
577  * @vdev: DP vdev handle
578  * @nbuf: buffer pointer
579  * @seg_info: Pointer to Segment info Descriptor to be prepared
580  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
581  *     descriptor
582  *
583  * Return:
584  */
585 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
586 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
587 {
588 	qdf_nbuf_t curr_nbuf = NULL;
589 	uint16_t total_len = 0;
590 	int32_t i;
591 
592 	struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
593 
594 	if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, nbuf,
595 				QDF_DMA_TO_DEVICE)) {
596 		qdf_print("dma map error\n");
597 		qdf_nbuf_free(nbuf);
598 		return NULL;
599 	}
600 
601 	for (curr_nbuf = nbuf, i = 0; curr_nbuf;
602 				curr_nbuf = qdf_nbuf_next(nbuf), i++) {
603 		seg_info->frags[i].paddr_lo =
604 			qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
605 		seg_info->frags[i].paddr_hi = 0x0;
606 		seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
607 		seg_info->frags[i].vaddr = (void *) curr_nbuf;
608 		total_len += qdf_nbuf_len(curr_nbuf);
609 	}
610 
611 	seg_info->frag_cnt = i;
612 	seg_info->total_len = total_len;
613 	seg_info->next = NULL;
614 
615 	sg_info->curr_seg = seg_info;
616 
617 	msdu_info->frm_type = dp_tx_frm_raw;
618 	msdu_info->num_seg = 1;
619 
620 	return nbuf;
621 }
622 
623 /**
624  * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit
625  * @soc: DP Soc Handle
626  * @vdev: DP vdev handle
627  * @tx_desc: Tx Descriptor Handle
628  * @tid: TID from HLOS for overriding default DSCP-TID mapping
629  * @fw_metadata: Metadata to send to Target Firmware along with frame
630  * @ring_id: Ring ID of H/W ring to which we enqueue the packet
631  *
632  *  Gets the next free TCL HW DMA descriptor and sets up required parameters
633  *  from software Tx descriptor
634  *
635  * Return:
636  */
637 static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
638 				   struct dp_tx_desc_s *tx_desc, uint8_t tid,
639 				   uint16_t fw_metadata, uint8_t ring_id)
640 {
641 	uint8_t type;
642 	uint16_t length;
643 	void *hal_tx_desc, *hal_tx_desc_cached;
644 	qdf_dma_addr_t dma_addr;
645 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES];
646 
647 	/* Return Buffer Manager ID */
648 	uint8_t bm_id = ring_id;
649 	void *hal_srng = soc->tcl_data_ring[ring_id].hal_srng;
650 
651 	hal_tx_desc_cached = (void *) cached_desc;
652 	qdf_mem_zero_outline(hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
653 
654 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) {
655 		length = HAL_TX_EXT_DESC_WITH_META_DATA;
656 		type = HAL_TX_BUF_TYPE_EXT_DESC;
657 		dma_addr = tx_desc->msdu_ext_desc->paddr;
658 	} else {
659 		length = qdf_nbuf_len(tx_desc->nbuf) - tx_desc->pkt_offset;
660 		type = HAL_TX_BUF_TYPE_BUFFER;
661 		dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
662 	}
663 
664 	hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
665 	hal_tx_desc_set_buf_addr(hal_tx_desc_cached,
666 			dma_addr , bm_id, tx_desc->id, type);
667 	hal_tx_desc_set_buf_length(hal_tx_desc_cached, length);
668 	hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
669 	hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
670 	hal_tx_desc_set_dscp_tid_table_id(hal_tx_desc_cached,
671 			vdev->dscp_tid_map_id);
672 
673 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
674 			"%s length:%d , type = %d, dma_addr %llx, offset %d\n",
675 			__func__, length, type, (uint64_t)dma_addr,
676 			tx_desc->pkt_offset);
677 
678 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
679 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
680 
681 	/*
682 	 * TODO
683 	 * For AP mode, enable AddrX flag only
684 	 * For all other modes, enable both AddrX and AddrY
685 	 * flags for now
686 	 */
687 	if (vdev->opmode == wlan_op_mode_ap)
688 		hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
689 			HAL_TX_DESC_ADDRX_EN);
690 	else
691 		hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
692 			HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
693 
694 	if ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP)
695 		|| qdf_nbuf_is_tso(tx_desc->nbuf))  {
696 		hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
697 		hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
698 	}
699 
700 	if (tid != HTT_TX_EXT_TID_INVALID)
701 		hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
702 
703 	if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
704 		hal_tx_desc_set_mesh_en(hal_tx_desc_cached, 1);
705 
706 
707 	/* Sync cached descriptor with HW */
708 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng);
709 
710 	if (!hal_tx_desc) {
711 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
712 			  "%s TCL ring full ring_id:%d\n", __func__, ring_id);
713 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
714 		DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
715 		DP_STATS_INC_PKT(vdev, tx_i.dropped.dropped_pkt, 1,
716 				length);
717 		hal_srng_access_end(soc->hal_soc,
718 				soc->tcl_data_ring[ring_id].hal_srng);
719 		return QDF_STATUS_E_RESOURCES;
720 	}
721 
722 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
723 
724 	hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
725 	DP_STATS_INC_PKT(vdev, tx_i.processed, 1, length);
726 
727 	return QDF_STATUS_SUCCESS;
728 }
729 
730 /**
731  * dp_tx_classify_tid() - Obtain TID to be used for this frame
732  * @vdev: DP vdev handle
733  * @nbuf: skb
734  *
735  * Extract the DSCP or PCP information from frame and map into TID value.
736  * Software based TID classification is required when more than 2 DSCP-TID
737  * mapping tables are needed.
738  * Hardware supports 2 DSCP-TID mapping tables
739  *
740  * Return: void
741  */
742 static void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
743 		struct dp_tx_msdu_info_s *msdu_info)
744 {
745 	uint8_t tos = 0, dscp_tid_override = 0;
746 	uint8_t *hdr_ptr, *L3datap;
747 	uint8_t is_mcast = 0;
748 	struct ether_header *eh = NULL;
749 	qdf_ethervlan_header_t *evh = NULL;
750 	uint16_t   ether_type;
751 	qdf_llc_t *llcHdr;
752 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
753 
754 	/* for mesh packets don't do any classification */
755 	if (qdf_unlikely(vdev->mesh_vdev))
756 		return;
757 
758 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
759 		eh = (struct ether_header *) nbuf->data;
760 		hdr_ptr = eh->ether_dhost;
761 		L3datap = hdr_ptr + sizeof(struct ether_header);
762 	} else {
763 		qdf_dot3_qosframe_t *qos_wh =
764 			(qdf_dot3_qosframe_t *) nbuf->data;
765 		msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
766 			qos_wh->i_qos[0] & DP_QOS_TID : 0;
767 		return;
768 	}
769 
770 	is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
771 	ether_type = eh->ether_type;
772 	/*
773 	 * Check if packet is dot3 or eth2 type.
774 	 */
775 	if (IS_LLC_PRESENT(ether_type)) {
776 		ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN +
777 				sizeof(*llcHdr));
778 
779 		if (ether_type == htons(ETHERTYPE_8021Q)) {
780 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
781 				sizeof(*llcHdr);
782 			ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN
783 					+ sizeof(*llcHdr) +
784 					sizeof(qdf_net_vlanhdr_t));
785 		} else {
786 			L3datap = hdr_ptr + sizeof(struct ether_header) +
787 				sizeof(*llcHdr);
788 		}
789 
790 	} else {
791 		if (ether_type == htons(ETHERTYPE_8021Q)) {
792 			evh = (qdf_ethervlan_header_t *) eh;
793 			ether_type = evh->ether_type;
794 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
795 		}
796 	}
797 	/*
798 	 * Find priority from IP TOS DSCP field
799 	 */
800 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
801 		qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
802 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
803 			/* Only for unicast frames */
804 			if (!is_mcast) {
805 				/* send it on VO queue */
806 				msdu_info->tid = DP_VO_TID;
807 			}
808 		} else {
809 			/*
810 			 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
811 			 * from TOS byte.
812 			 */
813 			tos = ip->ip_tos;
814 			dscp_tid_override = 1;
815 
816 		}
817 	} else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
818 		/* TODO
819 		 * use flowlabel
820 		 *igmpmld cases to be handled in phase 2
821 		 */
822 		unsigned long ver_pri_flowlabel;
823 		unsigned long pri;
824 		ver_pri_flowlabel = *(unsigned long *) L3datap;
825 		pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
826 			DP_IPV6_PRIORITY_SHIFT;
827 		tos = pri;
828 		dscp_tid_override = 1;
829 	} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
830 		msdu_info->tid = DP_VO_TID;
831 	else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
832 		/* Only for unicast frames */
833 		if (!is_mcast) {
834 			/* send ucast arp on VO queue */
835 			msdu_info->tid = DP_VO_TID;
836 		}
837 	}
838 
839 	/*
840 	 * Assign all MCAST packets to BE
841 	 */
842 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
843 		if (is_mcast) {
844 			tos = 0;
845 			dscp_tid_override = 1;
846 		}
847 	}
848 
849 	if (dscp_tid_override == 1) {
850 		tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
851 		msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
852 	}
853 	return;
854 }
855 
856 /**
857  * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
858  * @vdev: DP vdev handle
859  * @nbuf: skb
860  * @tid: TID from HLOS for overriding default DSCP-TID mapping
861  * @tx_q: Tx queue to be used for this Tx frame
862  * @peer_id: peer_id of the peer in case of NAWDS frames
863  *
864  * Return: NULL on success,
865  *         nbuf when it fails to send
866  */
867 static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
868 		uint8_t tid, struct dp_tx_queue *tx_q,
869 		uint32_t *meta_data, uint16_t peer_id)
870 {
871 	struct dp_pdev *pdev = vdev->pdev;
872 	struct dp_soc *soc = pdev->soc;
873 	struct dp_tx_desc_s *tx_desc;
874 	QDF_STATUS status;
875 	void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
876 	uint16_t htt_tcl_metadata = 0;
877 
878 	HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 0);
879 	/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
880 	tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id, meta_data);
881 	if (!tx_desc) {
882 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
883 			  "%s Tx_desc prepare Fail vdev %p queue %d\n",
884 			  __func__, vdev, tx_q->desc_pool_id);
885 		DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
886 		goto fail_return;
887 	}
888 
889 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
890 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
891 				"%s %d : HAL RING Access Failed -- %p\n",
892 				__func__, __LINE__, hal_srng);
893 		DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
894 		goto fail_return;
895 	}
896 
897 	if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
898 		HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
899 				HTT_TCL_METADATA_TYPE_PEER_BASED);
900 		HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
901 				peer_id);
902 	} else
903 		htt_tcl_metadata = vdev->htt_tcl_metadata;
904 
905 	/* Enqueue the Tx MSDU descriptor to HW for transmit */
906 	status = dp_tx_hw_enqueue(soc, vdev, tx_desc, tid,
907 			htt_tcl_metadata, tx_q->ring_id);
908 
909 	if (status != QDF_STATUS_SUCCESS) {
910 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
911 			  "%s Tx_hw_enqueue Fail tx_desc %p queue %d\n",
912 			  __func__, tx_desc, tx_q->ring_id);
913 		dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
914 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
915 		goto fail_return;
916 	}
917 
918 	hal_srng_access_end(soc->hal_soc, hal_srng);
919 
920 	return NULL;
921 
922 fail_return:
923 	DP_STATS_INC_PKT(pdev, tx_i.dropped.dropped_pkt, 1,
924 			qdf_nbuf_len(nbuf));
925 	return nbuf;
926 }
927 
928 /**
929  * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
930  * @vdev: DP vdev handle
931  * @nbuf: skb
932  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
933  *
934  * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
935  *
936  * Return: NULL on success,
937  *         nbuf when it fails to send
938  */
939 #if QDF_LOCK_STATS
940 static noinline
941 #else
942 static
943 #endif
944 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
945 				    struct dp_tx_msdu_info_s *msdu_info)
946 {
947 	uint8_t i;
948 	struct dp_pdev *pdev = vdev->pdev;
949 	struct dp_soc *soc = pdev->soc;
950 	struct dp_tx_desc_s *tx_desc;
951 	QDF_STATUS status;
952 
953 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
954 	void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
955 
956 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
957 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
958 				"%s %d : HAL RING Access Failed -- %p\n",
959 				__func__, __LINE__, hal_srng);
960 		DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
961 		DP_STATS_INC_PKT(vdev,
962 				tx_i.dropped.dropped_pkt, 1,
963 				qdf_nbuf_len(nbuf));
964 		return nbuf;
965 	}
966 
967 	if (msdu_info->frm_type == dp_tx_frm_me)
968 		nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
969 
970 	i = 0;
971 
972 	/*
973 	 * For each segment (maps to 1 MSDU) , prepare software and hardware
974 	 * descriptors using information in msdu_info
975 	 */
976 	while (i < msdu_info->num_seg) {
977 		/*
978 		 * Setup Tx descriptor for an MSDU, and MSDU extension
979 		 * descriptor
980 		 */
981 		tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
982 				tx_q->desc_pool_id);
983 
984 		if (msdu_info->frm_type == dp_tx_frm_me) {
985 			tx_desc->me_buffer =
986 				msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
987 			tx_desc->flags |= DP_TX_DESC_FLAG_ME;
988 		}
989 
990 		if (!tx_desc) {
991 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
992 				  "%s Tx_desc prepare Fail vdev %p queue %d\n",
993 				  __func__, vdev, tx_q->desc_pool_id);
994 			DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
995 			DP_STATS_INC_PKT(vdev,
996 					tx_i.dropped.dropped_pkt, 1,
997 					qdf_nbuf_len(nbuf));
998 
999 			if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
1000 				dp_tx_me_free_buf(pdev, tx_desc->me_buffer);
1001 			dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1002 			goto done;
1003 		}
1004 
1005 		/*
1006 		 * Enqueue the Tx MSDU descriptor to HW for transmit
1007 		 */
1008 		status = dp_tx_hw_enqueue(soc, vdev, tx_desc, msdu_info->tid,
1009 			vdev->htt_tcl_metadata, tx_q->ring_id);
1010 
1011 		if (status != QDF_STATUS_SUCCESS) {
1012 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1013 				  "%s Tx_hw_enqueue Fail tx_desc %p queue %d\n",
1014 				  __func__, tx_desc, tx_q->ring_id);
1015 
1016 			DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
1017 			DP_STATS_INC_PKT(pdev,
1018 					tx_i.dropped.dropped_pkt, 1,
1019 					qdf_nbuf_len(nbuf));
1020 
1021 			if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
1022 				dp_tx_me_free_buf(pdev, tx_desc->me_buffer);
1023 
1024 			dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1025 			goto done;
1026 		}
1027 
1028 		/*
1029 		 * TODO
1030 		 * if tso_info structure can be modified to have curr_seg
1031 		 * as first element, following 2 blocks of code (for TSO and SG)
1032 		 * can be combined into 1
1033 		 */
1034 
1035 		/*
1036 		 * For frames with multiple segments (TSO, ME), jump to next
1037 		 * segment.
1038 		 */
1039 		if (msdu_info->frm_type == dp_tx_frm_tso) {
1040 			if (msdu_info->u.tso_info.curr_seg->next) {
1041 				msdu_info->u.tso_info.curr_seg =
1042 					msdu_info->u.tso_info.curr_seg->next;
1043 
1044 				/*
1045 				 * If this is a jumbo nbuf, then increment the number of
1046 				 * nbuf users for each additional segment of the msdu.
1047 				 * This will ensure that the skb is freed only after
1048 				 * receiving tx completion for all segments of an nbuf
1049 				 */
1050 				qdf_nbuf_inc_users(nbuf);
1051 
1052 				/* Check with MCL if this is needed */
1053 				/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; */
1054 			}
1055 		}
1056 
1057 		/*
1058 		 * For Multicast-Unicast converted packets,
1059 		 * each converted frame (for a client) is represented as
1060 		 * 1 segment
1061 		 */
1062 		if ((msdu_info->frm_type == dp_tx_frm_sg) ||
1063 				(msdu_info->frm_type == dp_tx_frm_me)) {
1064 			if (msdu_info->u.sg_info.curr_seg->next) {
1065 				msdu_info->u.sg_info.curr_seg =
1066 					msdu_info->u.sg_info.curr_seg->next;
1067 				nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1068 			}
1069 		}
1070 		i++;
1071 	}
1072 
1073 	nbuf = NULL;
1074 
1075 done:
1076 	hal_srng_access_end(soc->hal_soc, hal_srng);
1077 
1078 	return nbuf;
1079 }
1080 
1081 /**
1082  * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
1083  *                     for SG frames
1084  * @vdev: DP vdev handle
1085  * @nbuf: skb
1086  * @seg_info: Pointer to Segment info Descriptor to be prepared
1087  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1088  *
1089  * Return: NULL on success,
1090  *         nbuf when it fails to send
1091  */
1092 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1093 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1094 {
1095 	uint32_t cur_frag, nr_frags;
1096 	qdf_dma_addr_t paddr;
1097 	struct dp_tx_sg_info_s *sg_info;
1098 
1099 	sg_info = &msdu_info->u.sg_info;
1100 	nr_frags = qdf_nbuf_get_nr_frags(nbuf);
1101 
1102 	if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, nbuf,
1103 				QDF_DMA_TO_DEVICE)) {
1104 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1105 				"dma map error\n");
1106 
1107 		qdf_nbuf_free(nbuf);
1108 		return NULL;
1109 	}
1110 
1111 	seg_info->frags[0].paddr_lo = qdf_nbuf_get_frag_paddr(nbuf, 0);
1112 	seg_info->frags[0].paddr_hi = 0;
1113 	seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
1114 	seg_info->frags[0].vaddr = (void *) nbuf;
1115 
1116 	for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
1117 		if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
1118 					nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
1119 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1120 					"frag dma map error\n");
1121 			qdf_nbuf_free(nbuf);
1122 			return NULL;
1123 		}
1124 
1125 		paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1126 		seg_info->frags[cur_frag + 1].paddr_lo = paddr;
1127 		seg_info->frags[cur_frag + 1].paddr_hi =
1128 			((uint64_t) paddr) >> 32;
1129 		seg_info->frags[cur_frag + 1].len =
1130 			qdf_nbuf_get_frag_size(nbuf, cur_frag);
1131 	}
1132 
1133 	seg_info->frag_cnt = (cur_frag + 1);
1134 	seg_info->total_len = qdf_nbuf_len(nbuf);
1135 	seg_info->next = NULL;
1136 
1137 	sg_info->curr_seg = seg_info;
1138 
1139 	msdu_info->frm_type = dp_tx_frm_sg;
1140 	msdu_info->num_seg = 1;
1141 
1142 	return nbuf;
1143 }
1144 
1145 #ifdef MESH_MODE_SUPPORT
1146 
1147 /**
1148  * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
1149 				and prepare msdu_info for mesh frames.
1150  * @vdev: DP vdev handle
1151  * @nbuf: skb
1152  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1153  *
1154  * Return: void
1155  */
1156 static
1157 void dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1158 				struct dp_tx_msdu_info_s *msdu_info)
1159 {
1160 	struct meta_hdr_s *mhdr;
1161 	struct htt_tx_msdu_desc_ext2_t *meta_data =
1162 				(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
1163 
1164 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
1165 
1166 	qdf_mem_set(meta_data, 0, sizeof(struct htt_tx_msdu_desc_ext2_t));
1167 
1168 	if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
1169 		meta_data->power = mhdr->power;
1170 		meta_data->mcs_mask = mhdr->rates[0] & 0xF;
1171 		meta_data->nss_mask = (mhdr->rates[0] >> 4) & 0x3;
1172 		meta_data->pream_type = (mhdr->rates[0] >> 6) & 0x3;
1173 
1174 		meta_data->retry_limit = mhdr->max_tries[0];
1175 		meta_data->dyn_bw = 1;
1176 
1177 		meta_data->valid_pwr = 1;
1178 		meta_data->valid_mcs_mask = 1;
1179 		meta_data->valid_nss_mask = 1;
1180 		meta_data->valid_preamble_type  = 1;
1181 		meta_data->valid_retries = 1;
1182 		meta_data->valid_bw_info = 1;
1183 	}
1184 
1185 	if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
1186 		meta_data->encrypt_type = 0;
1187 		meta_data->valid_encrypt_type = 1;
1188 	}
1189 
1190 	if (mhdr->flags & METAHDR_FLAG_NOQOS)
1191 		msdu_info->tid = HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST;
1192 	else
1193 		msdu_info->tid = qdf_nbuf_get_priority(nbuf);
1194 
1195 	meta_data->valid_key_flags = 1;
1196 	meta_data->key_flags = (mhdr->keyix & 0x3);
1197 
1198 	qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s));
1199 
1200 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1201 			"%s , Meta hdr %0x %0x %0x %0x %0x\n",
1202 			__func__, msdu_info->meta_data[0],
1203 			msdu_info->meta_data[1],
1204 			msdu_info->meta_data[2],
1205 			msdu_info->meta_data[3],
1206 			msdu_info->meta_data[4]);
1207 
1208 	return;
1209 }
1210 #else
1211 static
1212 void dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1213 				struct dp_tx_msdu_info_s *msdu_info)
1214 {
1215 }
1216 
1217 #endif
1218 
1219 /**
1220  * dp_tx_prepare_nawds(): Tramit NAWDS frames
1221  * @vdev: dp_vdev handle
1222  * @nbuf: skb
1223  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1224  * @tx_q: Tx queue to be used for this Tx frame
1225  * @meta_data: Meta date for mesh
1226  * @peer_id: peer_id of the peer in case of NAWDS frames
1227  *
1228  * return: NULL on success nbuf on failure
1229  */
1230 static qdf_nbuf_t dp_tx_prepare_nawds(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1231 		uint8_t tid, struct dp_tx_queue *tx_q, uint32_t *meta_data,
1232 		uint32_t peer_id)
1233 {
1234 	struct dp_peer *peer = NULL;
1235 	qdf_nbuf_t nbuf_copy;
1236 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
1237 		if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
1238 				(peer->nawds_enabled || peer->bss_peer)) {
1239 			nbuf_copy = qdf_nbuf_copy(nbuf);
1240 			if (!nbuf_copy) {
1241 				QDF_TRACE(QDF_MODULE_ID_DP,
1242 						QDF_TRACE_LEVEL_ERROR,
1243 						"nbuf copy failed");
1244 			}
1245 
1246 			peer_id = peer->peer_ids[0];
1247 			nbuf_copy = dp_tx_send_msdu_single(vdev, nbuf_copy, tid,
1248 					tx_q, meta_data, peer_id);
1249 			if (nbuf_copy != NULL) {
1250 				qdf_nbuf_free(nbuf);
1251 				return nbuf_copy;
1252 			}
1253 		}
1254 	}
1255 	if (peer_id == HTT_INVALID_PEER)
1256 		return nbuf;
1257 
1258 	qdf_nbuf_free(nbuf);
1259 	return NULL;
1260 }
1261 
1262 /**
1263  * dp_tx_send() - Transmit a frame on a given VAP
1264  * @vap_dev: DP vdev handle
1265  * @nbuf: skb
1266  *
1267  * Entry point for Core Tx layer (DP_TX) invoked from
1268  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
1269  * cases
1270  *
1271  * Return: NULL on success,
1272  *         nbuf when it fails to send
1273  */
1274 qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf)
1275 {
1276 	struct ether_header *eh = NULL;
1277 	struct dp_tx_msdu_info_s msdu_info;
1278 	struct dp_tx_seg_info_s seg_info;
1279 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
1280 	uint16_t peer_id = HTT_INVALID_PEER;
1281 
1282 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
1283 	qdf_mem_set(&seg_info, sizeof(seg_info), 0x0);
1284 
1285 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1286 			"%s , skb %0x:%0x:%0x:%0x:%0x:%0x\n",
1287 			__func__, nbuf->data[0], nbuf->data[1], nbuf->data[2],
1288 			nbuf->data[3], nbuf->data[4], nbuf->data[5]);
1289 	/*
1290 	 * Set Default Host TID value to invalid TID
1291 	 * (TID override disabled)
1292 	 */
1293 	msdu_info.tid = HTT_TX_EXT_TID_INVALID;
1294 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
1295 
1296 	if (qdf_unlikely(vdev->mesh_vdev))
1297 		dp_tx_extract_mesh_meta_data(vdev, nbuf, &msdu_info);
1298 
1299 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1300 			"%s , skb %0x:%0x:%0x:%0x:%0x:%0x\n",
1301 			__func__, nbuf->data[0], nbuf->data[1], nbuf->data[2],
1302 			nbuf->data[3], nbuf->data[4], nbuf->data[5]);
1303 	/*
1304 	 * Get HW Queue to use for this frame.
1305 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
1306 	 * dedicated for data and 1 for command.
1307 	 * "queue_id" maps to one hardware ring.
1308 	 *  With each ring, we also associate a unique Tx descriptor pool
1309 	 *  to minimize lock contention for these resources.
1310 	 */
1311 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
1312 
1313 	/*
1314 	 * TCL H/W supports 2 DSCP-TID mapping tables.
1315 	 *  Table 1 - Default DSCP-TID mapping table
1316 	 *  Table 2 - 1 DSCP-TID override table
1317 	 *
1318 	 * If we need a different DSCP-TID mapping for this vap,
1319 	 * call tid_classify to extract DSCP/ToS from frame and
1320 	 * map to a TID and store in msdu_info. This is later used
1321 	 * to fill in TCL Input descriptor (per-packet TID override).
1322 	 */
1323 	if (vdev->dscp_tid_map_id > 1)
1324 		dp_tx_classify_tid(vdev, nbuf, &msdu_info);
1325 
1326 	/* Reset the control block */
1327 	qdf_nbuf_reset_ctxt(nbuf);
1328 
1329 	/*
1330 	 * Classify the frame and call corresponding
1331 	 * "prepare" function which extracts the segment (TSO)
1332 	 * and fragmentation information (for TSO , SG, ME, or Raw)
1333 	 * into MSDU_INFO structure which is later used to fill
1334 	 * SW and HW descriptors.
1335 	 */
1336 	if (qdf_nbuf_is_tso(nbuf)) {
1337 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1338 			  "%s TSO frame %p\n", __func__, vdev);
1339 		DP_STATS_INC_PKT(vdev, tx_i.tso.tso_pkt, 1,
1340 				qdf_nbuf_len(nbuf));
1341 
1342 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
1343 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1344 					"%s tso_prepare fail vdev_id:%d\n",
1345 					__func__, vdev->vdev_id);
1346 			return nbuf;
1347 		}
1348 
1349 		goto send_multiple;
1350 	}
1351 
1352 	/* SG */
1353 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
1354 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
1355 
1356 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1357 			 "%s non-TSO SG frame %p\n", __func__, vdev);
1358 
1359 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
1360 				qdf_nbuf_len(nbuf));
1361 
1362 		goto send_multiple;
1363 	}
1364 
1365 #ifdef ATH_SUPPORT_IQUE
1366 	/* Mcast to Ucast Conversion*/
1367 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
1368 		eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1369 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
1370 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1371 				  "%s Mcast frm for ME %p\n", __func__, vdev);
1372 
1373 			DP_STATS_INC_PKT(vdev,
1374 					tx_i.mcast_en.mcast_pkt, 1,
1375 					qdf_nbuf_len(nbuf));
1376 			if (dp_tx_prepare_send_me(vdev, nbuf)) {
1377 				qdf_nbuf_free(nbuf);
1378 				return NULL;
1379 			}
1380 			return nbuf;
1381 		}
1382 	}
1383 #endif
1384 
1385 	/* RAW */
1386 	if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
1387 		nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
1388 		if (nbuf == NULL)
1389 			return NULL;
1390 
1391 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1392 			  "%s Raw frame %p\n", __func__, vdev);
1393 
1394 		DP_STATS_INC_PKT(vdev, tx_i.raw_pkt, 1,
1395 				qdf_nbuf_len(nbuf));
1396 
1397 		goto send_multiple;
1398 
1399 	}
1400 
1401 	if (vdev->nawds_enabled) {
1402 		eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1403 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
1404 			nbuf = dp_tx_prepare_nawds(vdev, nbuf, msdu_info.tid,
1405 					&msdu_info.tx_queue,
1406 					msdu_info.meta_data, peer_id);
1407 			return nbuf;
1408 		}
1409 	}
1410 
1411 	/*  Single linear frame */
1412 	/*
1413 	 * If nbuf is a simple linear frame, use send_single function to
1414 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
1415 	 * SRNG. There is no need to setup a MSDU extension descriptor.
1416 	 */
1417 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, msdu_info.tid,
1418 			&msdu_info.tx_queue, msdu_info.meta_data, peer_id);
1419 
1420 	return nbuf;
1421 
1422 send_multiple:
1423 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
1424 
1425 	return nbuf;
1426 }
1427 
1428 /**
1429  * dp_tx_reinject_handler() - Tx Reinject Handler
1430  * @tx_desc: software descriptor head pointer
1431  * @status : Tx completion status from HTT descriptor
1432  *
1433  * This function reinjects frames back to Target.
1434  * Todo - Host queue needs to be added
1435  *
1436  * Return: none
1437  */
1438 static
1439 void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
1440 {
1441 	struct dp_vdev *vdev;
1442 
1443 	vdev = tx_desc->vdev;
1444 
1445 	qdf_assert(vdev);
1446 
1447 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1448 			"%s Tx reinject path\n", __func__);
1449 
1450 	DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
1451 			qdf_nbuf_len(tx_desc->nbuf));
1452 
1453 	if (qdf_unlikely(vdev->mesh_vdev)) {
1454 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
1455 	} else
1456 		dp_tx_send(vdev, tx_desc->nbuf);
1457 
1458 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
1459 }
1460 
1461 /**
1462  * dp_tx_inspect_handler() - Tx Inspect Handler
1463  * @tx_desc: software descriptor head pointer
1464  * @status : Tx completion status from HTT descriptor
1465  *
1466  * Handles Tx frames sent back to Host for inspection
1467  * (ProxyARP)
1468  *
1469  * Return: none
1470  */
1471 static void dp_tx_inspect_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
1472 {
1473 
1474 	struct dp_soc *soc;
1475 	struct dp_pdev *pdev = tx_desc->pdev;
1476 
1477 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1478 			"%s Tx inspect path\n",
1479 			__func__);
1480 
1481 	qdf_assert(pdev);
1482 
1483 	soc = pdev->soc;
1484 
1485 	DP_STATS_INC_PKT(tx_desc->vdev, tx_i.inspect_pkts, 1,
1486 			qdf_nbuf_len(tx_desc->nbuf));
1487 
1488 	DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
1489 }
1490 
1491 /**
1492  * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
1493  * @tx_desc: software descriptor head pointer
1494  * @status : Tx completion status from HTT descriptor
1495  *
1496  * This function will process HTT Tx indication messages from Target
1497  *
1498  * Return: none
1499  */
1500 static
1501 void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status)
1502 {
1503 	uint8_t tx_status;
1504 	struct dp_pdev *pdev;
1505 	struct dp_soc *soc;
1506 	uint32_t *htt_status_word = (uint32_t *) status;
1507 
1508 	qdf_assert(tx_desc->pdev);
1509 
1510 	pdev = tx_desc->pdev;
1511 	soc = pdev->soc;
1512 
1513 	tx_status = HTT_TX_WBM_COMPLETION_TX_STATUS_GET(htt_status_word[0]);
1514 
1515 	switch (tx_status) {
1516 	case HTT_TX_FW2WBM_TX_STATUS_OK:
1517 	{
1518 		qdf_atomic_dec(&pdev->num_tx_exception);
1519 		DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
1520 		break;
1521 	}
1522 	case HTT_TX_FW2WBM_TX_STATUS_DROP:
1523 	case HTT_TX_FW2WBM_TX_STATUS_TTL:
1524 	{
1525 		qdf_atomic_dec(&pdev->num_tx_exception);
1526 		DP_STATS_INC_PKT(tx_desc->vdev, tx_i.dropped.dropped_pkt,
1527 				1, qdf_nbuf_len(tx_desc->nbuf));
1528 		DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
1529 		break;
1530 	}
1531 	case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
1532 	{
1533 		dp_tx_reinject_handler(tx_desc, status);
1534 		break;
1535 	}
1536 	case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
1537 	{
1538 		dp_tx_inspect_handler(tx_desc, status);
1539 		break;
1540 	}
1541 	default:
1542 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1543 				"%s Invalid HTT tx_status %d\n",
1544 				__func__, tx_status);
1545 		break;
1546 	}
1547 }
1548 
1549 #ifdef MESH_MODE_SUPPORT
1550 /**
1551  * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
1552  *                                         in mesh meta header
1553  * @tx_desc: software descriptor head pointer
1554  * @ts: pointer to tx completion stats
1555  * Return: none
1556  */
1557 static
1558 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
1559 		struct hal_tx_completion_status *ts)
1560 {
1561 	struct meta_hdr_s *mhdr;
1562 	qdf_nbuf_t netbuf = tx_desc->nbuf;
1563 
1564 	if (!tx_desc->msdu_ext_desc) {
1565 		qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset);
1566 	}
1567 	qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s));
1568 
1569 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf);
1570 	mhdr->rssi = ts->ack_frame_rssi;
1571 }
1572 
1573 #else
1574 static
1575 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
1576 		struct hal_tx_completion_status *ts)
1577 {
1578 }
1579 
1580 #endif
1581 
1582 
1583 /**
1584  * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
1585  * @tx_desc: software descriptor head pointer
1586  * @length: packet length
1587  *
1588  * Return: none
1589  */
1590 static inline void dp_tx_comp_process_tx_status(struct dp_tx_desc_s *tx_desc,
1591 		uint32_t length)
1592 {
1593 	struct hal_tx_completion_status ts;
1594 	struct dp_soc *soc = NULL;
1595 	struct dp_vdev *vdev = tx_desc->vdev;
1596 	struct dp_peer *peer = NULL;
1597 	uint8_t comp_status = 0;
1598 	qdf_mem_zero(&ts, sizeof(struct hal_tx_completion_status));
1599 	hal_tx_comp_get_status(&tx_desc->comp, &ts);
1600 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1601 				"-------------------- \n"
1602 				"Tx Completion Stats: \n"
1603 				"-------------------- \n"
1604 				"ack_frame_rssi = %d \n"
1605 				"first_msdu = %d \n"
1606 				"last_msdu = %d \n"
1607 				"msdu_part_of_amsdu = %d \n"
1608 				"rate_stats valid = %d \n"
1609 				"bw = %d \n"
1610 				"pkt_type = %d \n"
1611 				"stbc = %d \n"
1612 				"ldpc = %d \n"
1613 				"sgi = %d \n"
1614 				"mcs = %d \n"
1615 				"ofdma = %d \n"
1616 				"tones_in_ru = %d \n"
1617 				"tsf = %d \n"
1618 				"ppdu_id = %d \n"
1619 				"transmit_cnt = %d \n"
1620 				"tid = %d \n"
1621 				"peer_id = %d \n",
1622 				ts.ack_frame_rssi, ts.first_msdu, ts.last_msdu,
1623 				ts.msdu_part_of_amsdu, ts.valid, ts.bw,
1624 				ts.pkt_type, ts.stbc, ts.ldpc, ts.sgi,
1625 				ts.mcs, ts.ofdma, ts.tones_in_ru, ts.tsf,
1626 				ts.ppdu_id, ts.transmit_cnt, ts.tid,
1627 				ts.peer_id);
1628 
1629 	if (qdf_unlikely(tx_desc->vdev->mesh_vdev))
1630 		dp_tx_comp_fill_tx_completion_stats(tx_desc, &ts);
1631 
1632 	if (!vdev) {
1633 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1634 				"invalid peer");
1635 		goto fail;
1636 	}
1637 
1638 	soc = tx_desc->vdev->pdev->soc;
1639 	peer = dp_peer_find_by_id(soc, ts.peer_id);
1640 	if (!peer) {
1641 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1642 				"invalid peer");
1643 		DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
1644 		goto out;
1645 	}
1646 
1647 	DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
1648 
1649 	if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
1650 				hal_tx_comp_get_buffer_source(&tx_desc->comp)) {
1651 		comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp);
1652 
1653 		DP_STATS_INCC(peer, tx.dropped.mpdu_age_out, 1,
1654 				(comp_status == HAL_TX_TQM_RR_REM_CMD_AGED));
1655 		DP_STATS_INCC(peer, tx.dropped.fw_discard_reason1, 1,
1656 				(comp_status == HAL_TX_TQM_RR_FW_REASON1));
1657 		DP_STATS_INCC(peer, tx.dropped.fw_discard_reason2, 1,
1658 				(comp_status == HAL_TX_TQM_RR_FW_REASON2));
1659 		DP_STATS_INCC(peer, tx.dropped.fw_discard_reason3, 1,
1660 				(comp_status == HAL_TX_TQM_RR_FW_REASON3));
1661 		DP_STATS_INCC(peer, tx.tx_failed, 1,
1662 				comp_status != HAL_TX_TQM_RR_FRAME_ACKED);
1663 
1664 		if (comp_status == HAL_TX_TQM_RR_FRAME_ACKED) {
1665 			DP_STATS_INCC(peer, tx.pkt_type[ts.pkt_type].
1666 					mcs_count[MAX_MCS], 1,
1667 					((ts.mcs >= MAX_MCS_11A) && (ts.pkt_type
1668 						== DOT11_A)));
1669 			DP_STATS_INCC(peer, tx.pkt_type[ts.pkt_type].
1670 					mcs_count[ts.mcs], 1,
1671 					((ts.mcs <= MAX_MCS_11A) && (ts.pkt_type
1672 						== DOT11_A)));
1673 			DP_STATS_INCC(peer, tx.pkt_type[ts.pkt_type].
1674 					mcs_count[MAX_MCS], 1,
1675 					((ts.mcs >= MAX_MCS_11B)
1676 					 && (ts.pkt_type == DOT11_B)));
1677 			DP_STATS_INCC(peer, tx.pkt_type[ts.pkt_type].
1678 					mcs_count[ts.mcs], 1,
1679 					((ts.mcs <= MAX_MCS_11B)
1680 					 && (ts.pkt_type == DOT11_B)));
1681 			DP_STATS_INCC(peer, tx.pkt_type[ts.pkt_type].
1682 					mcs_count[MAX_MCS], 1,
1683 					((ts.mcs >= MAX_MCS_11A)
1684 					 && (ts.pkt_type == DOT11_N)));
1685 			DP_STATS_INCC(peer, tx.pkt_type[ts.pkt_type].
1686 					mcs_count[ts.mcs], 1,
1687 					((ts.mcs <= MAX_MCS_11A)
1688 					 && (ts.pkt_type == DOT11_N)));
1689 			DP_STATS_INCC(peer, tx.pkt_type[ts.pkt_type].
1690 					mcs_count[MAX_MCS], 1,
1691 					((ts.mcs >= MAX_MCS_11AC)
1692 					 && (ts.pkt_type == DOT11_AC)));
1693 			DP_STATS_INCC(peer, tx.pkt_type[ts.pkt_type].
1694 					mcs_count[ts.mcs], 1,
1695 					((ts.mcs <= MAX_MCS_11AC)
1696 					 && (ts.pkt_type == DOT11_AC)));
1697 			DP_STATS_INCC(peer, tx.pkt_type[ts.pkt_type].
1698 					mcs_count[MAX_MCS], 1,
1699 					((ts.mcs >= MAX_MCS)
1700 					 && (ts.pkt_type == DOT11_AX)));
1701 			DP_STATS_INCC(peer, tx.pkt_type[ts.pkt_type].
1702 					mcs_count[ts.mcs], 1,
1703 					((ts.mcs <= MAX_MCS)
1704 					 && (ts.pkt_type == DOT11_AX)));
1705 
1706 			DP_STATS_INC(peer, tx.sgi_count[ts.sgi], 1);
1707 			DP_STATS_INC(peer, tx.bw[ts.bw], 1);
1708 			DP_STATS_UPD(peer, tx.last_ack_rssi, ts.ack_frame_rssi);
1709 			DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts.tid)]
1710 					, 1);
1711 			DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
1712 			DP_STATS_INCC(peer, tx.stbc, 1, ts.stbc);
1713 			DP_STATS_INCC(peer, tx.ofdma, 1, ts.ofdma);
1714 			DP_STATS_INCC(peer, tx.ldpc, 1, ts.ldpc);
1715 			DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1,
1716 					(ts.first_msdu && ts.last_msdu));
1717 			DP_STATS_INCC(peer, tx.amsdu_cnt, 1,
1718 					!(ts.first_msdu && ts.last_msdu));
1719 			DP_STATS_INCC(peer, tx.retries, 1, ts.transmit_cnt > 1);
1720 		}
1721 	}
1722 
1723 	/* TODO: This call is temporary.
1724 	 * Stats update has to be attached to the HTT PPDU message
1725 	 */
1726 	if (soc->cdp_soc.ol_ops->update_dp_stats)
1727 		soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->osif_pdev,
1728 				&peer->stats, ts.peer_id, UPDATE_PEER_STATS);
1729 
1730 out:
1731 	dp_aggregate_vdev_stats(tx_desc->vdev);
1732 	if (soc->cdp_soc.ol_ops->update_dp_stats)
1733 		soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->osif_pdev,
1734 				&vdev->stats, vdev->vdev_id, UPDATE_VDEV_STATS);
1735 fail:
1736 	return;
1737 }
1738 
1739 
1740 /**
1741  * dp_tx_comp_process_desc() - Tx complete software descriptor handler
1742  * @soc: core txrx main context
1743  * @comp_head: software descriptor head pointer
1744  *
1745  * This function will process batch of descriptors reaped by dp_tx_comp_handler
1746  * and release the software descriptors after processing is complete
1747  *
1748  * Return: none
1749  */
1750 static void dp_tx_comp_process_desc(struct dp_soc *soc,
1751 				    struct dp_tx_desc_s *comp_head)
1752 {
1753 	struct dp_tx_desc_s *desc;
1754 	struct dp_tx_desc_s *next;
1755 	struct hal_tx_completion_status ts = {0};
1756 	uint32_t length;
1757 	struct dp_peer *peer;
1758 
1759 	DP_HIST_INIT();
1760 	desc = comp_head;
1761 
1762 	while (desc) {
1763 
1764 		hal_tx_comp_get_status(&desc->comp, &ts);
1765 		peer = dp_peer_find_by_id(soc, ts.peer_id);
1766 		length = qdf_nbuf_len(desc->nbuf);
1767 		/* Error Handling */
1768 		if (hal_tx_comp_get_buffer_source(&desc->comp) ==
1769 				HAL_TX_COMP_RELEASE_SOURCE_FW) {
1770 			dp_tx_comp_process_exception(desc);
1771 			desc = desc->next;
1772 			continue;
1773 		}
1774 
1775 		/* Process Tx status in descriptor */
1776 		if (soc->process_tx_status ||
1777 				(desc->vdev && desc->vdev->mesh_vdev))
1778 			dp_tx_comp_process_tx_status(desc, length);
1779 
1780 		/* 0 : MSDU buffer, 1 : MLE */
1781 		if (desc->msdu_ext_desc) {
1782 			/* TSO free */
1783 			if (hal_tx_ext_desc_get_tso_enable(
1784 				desc->msdu_ext_desc->vaddr)) {
1785 				/* If remaining number of segment is 0
1786 				 * actual TSO may unmap and free */
1787 				if (!DP_DESC_NUM_FRAG(desc)) {
1788 					qdf_nbuf_unmap(soc->osdev, desc->nbuf,
1789 							QDF_DMA_TO_DEVICE);
1790 					qdf_nbuf_free(desc->nbuf);
1791 				}
1792 			} else {
1793 				/* SG free */
1794 				/* Free buffer */
1795 				DP_TX_FREE_DMA_TO_DEVICE(soc, desc->vdev,
1796 								desc->nbuf);
1797 			}
1798 		} else {
1799 			/* Free buffer */
1800 			DP_TX_FREE_DMA_TO_DEVICE(soc, desc->vdev, desc->nbuf);
1801 		}
1802 
1803 		DP_HIST_PACKET_COUNT_INC(desc->pdev->pdev_id);
1804 		DP_TRACE(NONE, "pdev_id: %u", desc->pdev->pdev_id);
1805 		next = desc->next;
1806 
1807 		if (desc->flags & DP_TX_DESC_FLAG_ME)
1808 			dp_tx_me_free_buf(desc->pdev, desc->me_buffer);
1809 
1810 		dp_tx_desc_release(desc, desc->pool_id);
1811 		desc = next;
1812 	}
1813 	DP_TX_HIST_STATS_PER_PDEV();
1814 }
1815 
1816 /**
1817  * dp_tx_comp_handler() - Tx completion handler
1818  * @soc: core txrx main context
1819  * @ring_id: completion ring id
1820  * @budget: No. of packets/descriptors that can be serviced in one loop
1821  *
1822  * This function will collect hardware release ring element contents and
1823  * handle descriptor contents. Based on contents, free packet or handle error
1824  * conditions
1825  *
1826  * Return: none
1827  */
1828 uint32_t dp_tx_comp_handler(struct dp_soc *soc, uint32_t ring_id,
1829 			uint32_t budget)
1830 {
1831 	void *tx_comp_hal_desc;
1832 	uint8_t buffer_src;
1833 	uint8_t pool_id;
1834 	uint32_t tx_desc_id;
1835 	struct dp_tx_desc_s *tx_desc = NULL;
1836 	struct dp_tx_desc_s *head_desc = NULL;
1837 	struct dp_tx_desc_s *tail_desc = NULL;
1838 	uint32_t num_processed;
1839 	void *hal_srng = soc->tx_comp_ring[ring_id].hal_srng;
1840 
1841 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1842 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1843 				"%s %d : HAL RING Access Failed -- %p\n",
1844 				__func__, __LINE__, hal_srng);
1845 		return 0;
1846 	}
1847 
1848 	num_processed = 0;
1849 
1850 	/* Find head descriptor from completion ring */
1851 	while (qdf_likely(tx_comp_hal_desc =
1852 			hal_srng_dst_get_next(soc->hal_soc, hal_srng))) {
1853 
1854 		buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc);
1855 
1856 		/* If this buffer was not released by TQM or FW, then it is not
1857 		 * Tx completion indication, skip to next descriptor */
1858 		if ((buffer_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
1859 				(buffer_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) {
1860 
1861 			QDF_TRACE(QDF_MODULE_ID_DP,
1862 					QDF_TRACE_LEVEL_ERROR,
1863 					"Tx comp release_src != TQM | FW");
1864 
1865 			/* TODO Handle Freeing of the buffer in descriptor */
1866 			continue;
1867 		}
1868 
1869 		/* Get descriptor id */
1870 		tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
1871 		pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
1872 			DP_TX_DESC_ID_POOL_OS;
1873 
1874 		/* Pool ID is out of limit. Error */
1875 		if (pool_id > wlan_cfg_get_num_tx_desc_pool(
1876 					soc->wlan_cfg_ctx)) {
1877 			QDF_TRACE(QDF_MODULE_ID_DP,
1878 					QDF_TRACE_LEVEL_FATAL,
1879 					"TX COMP pool id %d not valid",
1880 					pool_id);
1881 
1882 			/* Check if assert aborts execution, if not handle
1883 			 * return here */
1884 			QDF_ASSERT(0);
1885 		}
1886 
1887 		/* Find Tx descriptor */
1888 		tx_desc = dp_tx_desc_find(soc, pool_id,
1889 				(tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
1890 				DP_TX_DESC_ID_PAGE_OS,
1891 				(tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
1892 				DP_TX_DESC_ID_OFFSET_OS);
1893 
1894 		/* Pool id is not matching. Error */
1895 		if (tx_desc && (tx_desc->pool_id != pool_id)) {
1896 			QDF_TRACE(QDF_MODULE_ID_DP,
1897 					QDF_TRACE_LEVEL_FATAL,
1898 					"Tx Comp pool id %d not matched %d",
1899 					pool_id, tx_desc->pool_id);
1900 
1901 			/* Check if assert aborts execution, if not handle
1902 			 * return here */
1903 			QDF_ASSERT(0);
1904 		}
1905 
1906 		if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
1907 				!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
1908 			QDF_TRACE(QDF_MODULE_ID_DP,
1909 					QDF_TRACE_LEVEL_FATAL,
1910 					"Txdesc invalid, flgs = %x,id = %d",
1911 					tx_desc->flags,	tx_desc_id);
1912 
1913 			/* TODO Handle Freeing of the buffer in this invalid
1914 			 * descriptor */
1915 			continue;
1916 		}
1917 
1918 		/*
1919 		 * If the release source is FW, process the HTT
1920 		 * status
1921 		 */
1922 		if (qdf_unlikely(buffer_src ==
1923 					HAL_TX_COMP_RELEASE_SOURCE_FW)) {
1924 			uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
1925 			hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
1926 					htt_tx_status);
1927 			dp_tx_process_htt_completion(tx_desc,
1928 					htt_tx_status);
1929 		} else {
1930 			tx_desc->next = NULL;
1931 
1932 			/* First ring descriptor on the cycle */
1933 			if (!head_desc) {
1934 				head_desc = tx_desc;
1935 			} else {
1936 				tail_desc->next = tx_desc;
1937 			}
1938 
1939 			tail_desc = tx_desc;
1940 
1941 			/* Collect hw completion contents */
1942 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
1943 					&tx_desc->comp, soc->process_tx_status);
1944 
1945 		}
1946 
1947 		num_processed++;
1948 
1949 		/*
1950 		 * Processed packet count is more than given quota
1951 		 * stop to processing
1952 		 */
1953 		if (num_processed >= budget)
1954 			break;
1955 
1956 	}
1957 
1958 	hal_srng_access_end(soc->hal_soc, hal_srng);
1959 
1960 	/* Process the reaped descriptors */
1961 	if (head_desc)
1962 		dp_tx_comp_process_desc(soc, head_desc);
1963 
1964 	return num_processed;
1965 }
1966 
1967 /**
1968  * dp_tx_vdev_attach() - attach vdev to dp tx
1969  * @vdev: virtual device instance
1970  *
1971  * Return: QDF_STATUS_SUCCESS: success
1972  *         QDF_STATUS_E_RESOURCES: Error return
1973  */
1974 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
1975 {
1976 
1977 	/*
1978 	 * Fill HTT TCL Metadata with Vdev ID and MAC ID
1979 	 */
1980 	HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
1981 			HTT_TCL_METADATA_TYPE_VDEV_BASED);
1982 
1983 	HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
1984 			vdev->vdev_id);
1985 
1986 	HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
1987 			DP_SW2HW_MACID(vdev->pdev->pdev_id));
1988 
1989 	/*
1990 	 * Set HTT Extension Valid bit to 0 by default
1991 	 */
1992 	HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
1993 
1994 	return QDF_STATUS_SUCCESS;
1995 }
1996 
1997 /**
1998  * dp_tx_vdev_detach() - detach vdev from dp tx
1999  * @vdev: virtual device instance
2000  *
2001  * Return: QDF_STATUS_SUCCESS: success
2002  *         QDF_STATUS_E_RESOURCES: Error return
2003  */
2004 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
2005 {
2006 	return QDF_STATUS_SUCCESS;
2007 }
2008 
2009 /**
2010  * dp_tx_pdev_attach() - attach pdev to dp tx
2011  * @pdev: physical device instance
2012  *
2013  * Return: QDF_STATUS_SUCCESS: success
2014  *         QDF_STATUS_E_RESOURCES: Error return
2015  */
2016 QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev)
2017 {
2018 	struct dp_soc *soc = pdev->soc;
2019 
2020 	/* Initialize Flow control counters */
2021 	qdf_atomic_init(&pdev->num_tx_exception);
2022 	qdf_atomic_init(&pdev->num_tx_outstanding);
2023 
2024 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
2025 		/* Initialize descriptors in TCL Ring */
2026 		hal_tx_init_data_ring(soc->hal_soc,
2027 				soc->tcl_data_ring[pdev->pdev_id].hal_srng);
2028 	}
2029 
2030 	return QDF_STATUS_SUCCESS;
2031 }
2032 
2033 /**
2034  * dp_tx_pdev_detach() - detach pdev from dp tx
2035  * @pdev: physical device instance
2036  *
2037  * Return: QDF_STATUS_SUCCESS: success
2038  *         QDF_STATUS_E_RESOURCES: Error return
2039  */
2040 QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev)
2041 {
2042 	/* What should do here? */
2043 	return QDF_STATUS_SUCCESS;
2044 }
2045 
2046 /**
2047  * dp_tx_soc_detach() - detach soc from dp tx
2048  * @soc: core txrx main context
2049  *
2050  * This function will detach dp tx into main device context
2051  * will free dp tx resource and initialize resources
2052  *
2053  * Return: QDF_STATUS_SUCCESS: success
2054  *         QDF_STATUS_E_RESOURCES: Error return
2055  */
2056 QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc)
2057 {
2058 	uint8_t num_pool;
2059 	uint16_t num_desc;
2060 	uint16_t num_ext_desc;
2061 	uint8_t i;
2062 
2063 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
2064 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
2065 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
2066 
2067 	for (i = 0; i < num_pool; i++) {
2068 		if (dp_tx_desc_pool_free(soc, i)) {
2069 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2070 					"%s Tx Desc Pool Free failed\n",
2071 					__func__);
2072 			return QDF_STATUS_E_RESOURCES;
2073 		}
2074 	}
2075 
2076 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2077 			"%s Tx Desc Pool Free num_pool = %d, descs = %d\n",
2078 			__func__, num_pool, num_desc);
2079 
2080 	for (i = 0; i < num_pool; i++) {
2081 		if (dp_tx_ext_desc_pool_free(soc, i)) {
2082 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2083 					"%s Tx Ext Desc Pool Free failed\n",
2084 					__func__);
2085 			return QDF_STATUS_E_RESOURCES;
2086 		}
2087 	}
2088 
2089 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2090 			"%s MSDU Ext Desc Pool %d Free descs = %d\n",
2091 			__func__, num_pool, num_ext_desc);
2092 
2093 	for (i = 0; i < num_pool; i++) {
2094 		dp_tx_tso_desc_pool_free(soc, i);
2095 	}
2096 
2097 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2098 			"%s TSO Desc Pool %d Free descs = %d\n",
2099 			__func__, num_pool, num_desc);
2100 
2101 	return QDF_STATUS_SUCCESS;
2102 }
2103 
2104 /**
2105  * dp_tx_soc_attach() - attach soc to dp tx
2106  * @soc: core txrx main context
2107  *
2108  * This function will attach dp tx into main device context
2109  * will allocate dp tx resource and initialize resources
2110  *
2111  * Return: QDF_STATUS_SUCCESS: success
2112  *         QDF_STATUS_E_RESOURCES: Error return
2113  */
2114 QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc)
2115 {
2116 	uint8_t num_pool;
2117 	uint32_t num_desc;
2118 	uint32_t num_ext_desc;
2119 	uint8_t i;
2120 
2121 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
2122 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
2123 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
2124 
2125 	/* Allocate software Tx descriptor pools */
2126 	for (i = 0; i < num_pool; i++) {
2127 		if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
2128 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2129 					"%s Tx Desc Pool alloc %d failed %p\n",
2130 					__func__, i, soc);
2131 			goto fail;
2132 		}
2133 	}
2134 
2135 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2136 			"%s Tx Desc Alloc num_pool = %d, descs = %d\n",
2137 			__func__, num_pool, num_desc);
2138 
2139 	/* Allocate extension tx descriptor pools */
2140 	for (i = 0; i < num_pool; i++) {
2141 		if (dp_tx_ext_desc_pool_alloc(soc, i, num_ext_desc)) {
2142 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2143 				"MSDU Ext Desc Pool alloc %d failed %p\n",
2144 				i, soc);
2145 
2146 			goto fail;
2147 		}
2148 	}
2149 
2150 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2151 			"%s MSDU Ext Desc Alloc %d, descs = %d\n",
2152 			__func__, num_pool, num_ext_desc);
2153 
2154 	for (i = 0; i < num_pool; i++) {
2155 		if (dp_tx_tso_desc_pool_alloc(soc, i, num_desc)) {
2156 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2157 				"TSO Desc Pool alloc %d failed %p\n",
2158 				i, soc);
2159 
2160 			goto fail;
2161 		}
2162 	}
2163 
2164 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2165 			"%s TSO Desc Alloc %d, descs = %d\n",
2166 			__func__, num_pool, num_desc);
2167 
2168 	/* Initialize descriptors in TCL Rings */
2169 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
2170 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
2171 			hal_tx_init_data_ring(soc->hal_soc,
2172 					soc->tcl_data_ring[i].hal_srng);
2173 		}
2174 	}
2175 
2176 	/*
2177 	 * todo - Add a runtime config option to enable this.
2178 	 */
2179 	/*
2180 	 * Due to multiple issues on NPR EMU, enable it selectively
2181 	 * only for NPR EMU, should be removed, once NPR platforms
2182 	 * are stable.
2183 	 */
2184 	soc->process_tx_status = 1;
2185 
2186 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2187 			"%s HAL Tx init Success\n", __func__);
2188 
2189 	return QDF_STATUS_SUCCESS;
2190 
2191 fail:
2192 	/* Detach will take care of freeing only allocated resources */
2193 	dp_tx_soc_detach(soc);
2194 	return QDF_STATUS_E_RESOURCES;
2195 }
2196 
2197 /*
2198  * dp_tx_me_mem_free(): Function to free allocated memory in mcast enahncement
2199  * pdev: pointer to DP PDEV structure
2200  * seg_info_head: Pointer to the head of list
2201  *
2202  * return: void
2203  */
2204 static inline void dp_tx_me_mem_free(struct dp_pdev *pdev,
2205 		struct dp_tx_seg_info_s *seg_info_head)
2206 {
2207 	struct dp_tx_me_buf_t *mc_uc_buf;
2208 	struct dp_tx_seg_info_s *seg_info_new = NULL;
2209 	qdf_nbuf_t nbuf = NULL;
2210 	uint64_t phy_addr;
2211 
2212 	while (seg_info_head) {
2213 		nbuf = seg_info_head->nbuf;
2214 		mc_uc_buf = (struct dp_tx_me_buf_t *)
2215 			seg_info_new->frags[0].vaddr;
2216 		phy_addr = seg_info_head->frags[0].paddr_hi;
2217 		phy_addr =  (phy_addr << 32) | seg_info_head->frags[0].paddr_lo;
2218 		qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
2219 				phy_addr,
2220 				QDF_DMA_TO_DEVICE , DP_MAC_ADDR_LEN);
2221 		dp_tx_me_free_buf(pdev, mc_uc_buf);
2222 		qdf_nbuf_free(nbuf);
2223 		seg_info_new = seg_info_head;
2224 		seg_info_head = seg_info_head->next;
2225 		qdf_mem_free(seg_info_new);
2226 	}
2227 }
2228 
2229 /**
2230  * dp_tx_me_send_convert_ucast(): fuction to convert multicast to unicast
2231  * @vdev: DP VDEV handle
2232  * @nbuf: Multicast nbuf
2233  * @newmac: Table of the clients to which packets have to be sent
2234  * @new_mac_cnt: No of clients
2235  *
2236  * return: no of converted packets
2237  */
2238 uint16_t
2239 dp_tx_me_send_convert_ucast(struct cdp_vdev *vdev_handle, qdf_nbuf_t nbuf,
2240 		uint8_t newmac[][DP_MAC_ADDR_LEN], uint8_t new_mac_cnt)
2241 {
2242 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
2243 	struct dp_pdev *pdev = vdev->pdev;
2244 	struct ether_header *eh;
2245 	uint8_t *data;
2246 	uint16_t len;
2247 
2248 	/* reference to frame dst addr */
2249 	uint8_t *dstmac;
2250 	/* copy of original frame src addr */
2251 	uint8_t srcmac[DP_MAC_ADDR_LEN];
2252 
2253 	/* local index into newmac */
2254 	uint8_t new_mac_idx = 0;
2255 	struct dp_tx_me_buf_t *mc_uc_buf;
2256 	qdf_nbuf_t  nbuf_clone;
2257 	struct dp_tx_msdu_info_s msdu_info;
2258 	struct dp_tx_seg_info_s *seg_info_head = NULL;
2259 	struct dp_tx_seg_info_s *seg_info_tail = NULL;
2260 	struct dp_tx_seg_info_s *seg_info_new;
2261 	struct dp_tx_frag_info_s data_frag;
2262 	qdf_dma_addr_t paddr_data;
2263 	qdf_dma_addr_t paddr_mcbuf = 0;
2264 	uint8_t empty_entry_mac[DP_MAC_ADDR_LEN] = {0};
2265 	QDF_STATUS status;
2266 
2267 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2268 
2269 	eh = (struct ether_header *) nbuf;
2270 	qdf_mem_copy(srcmac, eh->ether_shost, DP_MAC_ADDR_LEN);
2271 
2272 	len = qdf_nbuf_len(nbuf);
2273 
2274 	data = qdf_nbuf_data(nbuf);
2275 
2276 	status = qdf_nbuf_map(vdev->osdev, nbuf,
2277 			QDF_DMA_TO_DEVICE);
2278 
2279 	if (status) {
2280 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2281 				"Mapping failure Error:%d", status);
2282 		DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
2283 		return 0;
2284 	}
2285 
2286 	paddr_data = qdf_nbuf_get_frag_paddr(nbuf, 0) + IEEE80211_ADDR_LEN;
2287 
2288 	/*preparing data fragment*/
2289 	data_frag.vaddr = qdf_nbuf_data(nbuf) + IEEE80211_ADDR_LEN;
2290 	data_frag.paddr_lo = (uint32_t)paddr_data;
2291 	data_frag.paddr_hi = ((uint64_t)paddr_data & 0xffffffff00000000) >> 32;
2292 	data_frag.len = len - DP_MAC_ADDR_LEN;
2293 
2294 	for (new_mac_idx = 0; new_mac_idx < new_mac_cnt; new_mac_idx++) {
2295 		dstmac = newmac[new_mac_idx];
2296 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2297 				"added mac addr (%pM)", dstmac);
2298 
2299 		/* Check for NULL Mac Address */
2300 		if (!qdf_mem_cmp(dstmac, empty_entry_mac, DP_MAC_ADDR_LEN))
2301 			continue;
2302 
2303 		/* frame to self mac. skip */
2304 		if (!qdf_mem_cmp(dstmac, srcmac, DP_MAC_ADDR_LEN))
2305 			continue;
2306 
2307 		/*
2308 		 * TODO: optimize to avoid malloc in per-packet path
2309 		 * For eg. seg_pool can be made part of vdev structure
2310 		 */
2311 		seg_info_new = qdf_mem_malloc(sizeof(*seg_info_new));
2312 
2313 		if (!seg_info_new) {
2314 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2315 					"alloc failed");
2316 			DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc, 1);
2317 			goto fail_seg_alloc;
2318 		}
2319 
2320 		mc_uc_buf = dp_tx_me_alloc_buf(pdev);
2321 		if (mc_uc_buf == NULL)
2322 			goto fail_buf_alloc;
2323 
2324 		/*
2325 		 * TODO: Check if we need to clone the nbuf
2326 		 * Or can we just use the reference for all cases
2327 		 */
2328 		if (new_mac_idx < (new_mac_cnt - 1)) {
2329 			nbuf_clone = qdf_nbuf_clone((qdf_nbuf_t)nbuf);
2330 			if (nbuf_clone == NULL) {
2331 				DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail, 1);
2332 				goto fail_clone;
2333 			}
2334 		} else {
2335 			/*
2336 			 * Update the ref
2337 			 * to account for frame sent without cloning
2338 			 */
2339 			qdf_nbuf_ref(nbuf);
2340 			nbuf_clone = nbuf;
2341 		}
2342 
2343 		qdf_mem_copy(mc_uc_buf->data, dstmac, DP_MAC_ADDR_LEN);
2344 
2345 		status = qdf_mem_map_nbytes_single(vdev->osdev, mc_uc_buf->data,
2346 				QDF_DMA_TO_DEVICE, DP_MAC_ADDR_LEN,
2347 				&paddr_mcbuf);
2348 
2349 		if (status) {
2350 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2351 					"Mapping failure Error:%d", status);
2352 			DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
2353 			goto fail_map;
2354 		}
2355 
2356 		seg_info_new->frags[0].vaddr =  (uint8_t *)mc_uc_buf;
2357 		seg_info_new->frags[0].paddr_lo = (uint32_t) paddr_mcbuf;
2358 		seg_info_new->frags[0].paddr_hi =
2359 			((u64)paddr_mcbuf & 0xffffffff00000000) >> 32;
2360 		seg_info_new->frags[0].len = DP_MAC_ADDR_LEN;
2361 
2362 		seg_info_new->frags[1] = data_frag;
2363 		seg_info_new->nbuf = nbuf_clone;
2364 		seg_info_new->frag_cnt = 2;
2365 		seg_info_new->total_len = len;
2366 
2367 		seg_info_new->next = NULL;
2368 
2369 		if (seg_info_head == NULL)
2370 			seg_info_head = seg_info_new;
2371 		else
2372 			seg_info_tail->next = seg_info_new;
2373 
2374 		seg_info_tail = seg_info_new;
2375 	}
2376 
2377 	if (!seg_info_head)
2378 		return 0;
2379 
2380 	msdu_info.u.sg_info.curr_seg = seg_info_head;
2381 	msdu_info.num_seg = new_mac_cnt;
2382 	msdu_info.frm_type = dp_tx_frm_me;
2383 
2384 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast, new_mac_cnt);
2385 	dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
2386 
2387 	while (seg_info_head->next) {
2388 		seg_info_new = seg_info_head;
2389 		seg_info_head = seg_info_head->next;
2390 		qdf_mem_free(seg_info_new);
2391 	}
2392 	qdf_mem_free(seg_info_head);
2393 
2394 	return new_mac_cnt;
2395 
2396 fail_map:
2397 	qdf_nbuf_free(nbuf_clone);
2398 
2399 fail_clone:
2400 	dp_tx_me_free_buf(pdev, mc_uc_buf);
2401 
2402 fail_buf_alloc:
2403 	qdf_mem_free(seg_info_new);
2404 
2405 fail_seg_alloc:
2406 	dp_tx_me_mem_free(pdev, seg_info_head);
2407 	qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
2408 	return 0;
2409 }
2410