xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c (revision 1b9674e21e24478fba4530f5ae7396b9555e9c6a)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <htt.h>
20 #include <hal_hw_headers.h>
21 #include <hal_api.h>
22 #include "dp_htt.h"
23 #include "dp_peer.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_rx_mon.h"
27 #include "htt_stats.h"
28 #include "htt_ppdu_stats.h"
29 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
30 #include "cdp_txrx_cmn_struct.h"
31 
32 #ifdef FEATURE_PERPKT_INFO
33 #include "dp_ratetable.h"
34 #endif
35 
36 #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
37 
38 #define HTT_HTC_PKT_POOL_INIT_SIZE 64
39 #define HTT_T2H_MAX_MSG_SIZE 2048
40 
41 #define HTT_MSG_BUF_SIZE(msg_bytes) \
42 	((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
43 
44 #define HTT_PID_BIT_MASK 0x3
45 
46 #define DP_EXT_MSG_LENGTH 2048
47 #define DP_HTT_SEND_HTC_PKT(soc, pkt)                            \
48 do {                                                             \
49 	if (htc_send_pkt(soc->htc_soc, &pkt->htc_pkt) ==         \
50 					QDF_STATUS_SUCCESS)      \
51 		htt_htc_misc_pkt_list_add(soc, pkt);             \
52 } while (0)
53 
54 #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16
55 
56 /**
57  * Bitmap of HTT PPDU TLV types for Default mode
58  */
59 #define HTT_PPDU_DEFAULT_TLV_BITMAP \
60 	(1 << HTT_PPDU_STATS_COMMON_TLV) | \
61 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
62 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
63 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
64 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
65 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)
66 
67 /**
68  * Bitmap of HTT PPDU TLV types for Sniffer mode
69  */
70 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP \
71 	(1 << HTT_PPDU_STATS_COMMON_TLV) | \
72 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
73 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
74 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
75 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
76 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \
77 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \
78 	(1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV) | \
79 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV)
80 
81 #define HTT_FRAMECTRL_DATATYPE 0x08
82 #define HTT_PPDU_DESC_MAX_DEPTH 16
83 
84 /*
85  * dp_tx_stats_update() - Update per-peer statistics
86  * @soc: Datapath soc handle
87  * @peer: Datapath peer handle
88  * @ppdu: PPDU Descriptor
89  * @ack_rssi: RSSI of last ack received
90  *
91  * Return: None
92  */
93 #ifdef FEATURE_PERPKT_INFO
94 static inline void
95 dp_tx_rate_stats_update(struct dp_peer *peer,
96 			struct cdp_tx_completion_ppdu_user *ppdu)
97 {
98 	uint32_t ratekbps = 0;
99 	uint32_t ppdu_tx_rate = 0;
100 
101 	if (!peer || !ppdu)
102 		return;
103 
104 	dp_peer_stats_notify(peer);
105 
106 	ratekbps = dp_getrateindex(ppdu->mcs,
107 				   ppdu->nss,
108 				   ppdu->preamble,
109 				   ppdu->bw);
110 
111 	DP_STATS_UPD(peer, tx.last_tx_rate, ratekbps);
112 
113 	if (!ratekbps)
114 		return;
115 
116 	dp_ath_rate_lpf(peer->stats.tx.avg_tx_rate, ratekbps);
117 	ppdu_tx_rate = dp_ath_rate_out(peer->stats.tx.avg_tx_rate);
118 	DP_STATS_UPD(peer, tx.rnd_avg_tx_rate, ppdu_tx_rate);
119 
120 	if (peer->vdev) {
121 		peer->vdev->stats.tx.last_tx_rate = ratekbps;
122 		peer->vdev->stats.tx.last_tx_rate_mcs = ppdu->mcs;
123 	}
124 }
125 
126 static void dp_tx_stats_update(struct dp_soc *soc, struct dp_peer *peer,
127 		struct cdp_tx_completion_ppdu_user *ppdu, uint32_t ack_rssi)
128 {
129 	struct dp_pdev *pdev = peer->vdev->pdev;
130 	uint8_t preamble, mcs;
131 	uint16_t num_msdu;
132 
133 	preamble = ppdu->preamble;
134 	mcs = ppdu->mcs;
135 	num_msdu = ppdu->num_msdu;
136 
137 	/* If the peer statistics are already processed as part of
138 	 * per-MSDU completion handler, do not process these again in per-PPDU
139 	 * indications */
140 	if (soc->process_tx_status)
141 		return;
142 
143 	DP_STATS_INC_PKT(peer, tx.comp_pkt,
144 			num_msdu, (ppdu->success_bytes +
145 				ppdu->retry_bytes + ppdu->failed_bytes));
146 	DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus);
147 	DP_STATS_UPD(peer, tx.tx_rate, ppdu->tx_rate);
148 	DP_STATS_INC(peer, tx.sgi_count[ppdu->gi], num_msdu);
149 	DP_STATS_INC(peer, tx.bw[ppdu->bw], num_msdu);
150 	DP_STATS_INC(peer, tx.nss[ppdu->nss], num_msdu);
151 	DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)], num_msdu);
152 	DP_STATS_INCC(peer, tx.stbc, num_msdu, ppdu->stbc);
153 	DP_STATS_INCC(peer, tx.ldpc, num_msdu, ppdu->ldpc);
154 	if (!(ppdu->is_mcast))
155 		DP_STATS_UPD(peer, tx.last_ack_rssi, ack_rssi);
156 
157 	DP_STATS_INC(peer, tx.retries,
158 			(ppdu->long_retries + ppdu->short_retries));
159 	DP_STATS_INCC(peer,
160 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
161 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
162 	DP_STATS_INCC(peer,
163 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
164 			((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
165 	DP_STATS_INCC(peer,
166 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
167 			((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
168 	DP_STATS_INCC(peer,
169 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
170 			((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B)));
171 	DP_STATS_INCC(peer,
172 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
173 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
174 	DP_STATS_INCC(peer,
175 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
176 			((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
177 	DP_STATS_INCC(peer,
178 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
179 			((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
180 	DP_STATS_INCC(peer,
181 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
182 			((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
183 	DP_STATS_INCC(peer,
184 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
185 			((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX)));
186 	DP_STATS_INCC(peer,
187 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
188 			((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX)));
189 
190 	dp_tx_rate_stats_update(peer, ppdu);
191 
192 	if (peer->stats.tx.ucast.num)
193 		peer->stats.tx.last_per = ((peer->stats.tx.ucast.num -
194 					peer->stats.tx.tx_success.num) * 100) /
195 					peer->stats.tx.ucast.num;
196 
197 	if (soc->cdp_soc.ol_ops->update_dp_stats) {
198 		soc->cdp_soc.ol_ops->update_dp_stats(pdev->ctrl_pdev,
199 				&peer->stats, ppdu->peer_id,
200 				UPDATE_PEER_STATS);
201 
202 	}
203 }
204 #endif
205 
206 /*
207  * htt_htc_pkt_alloc() - Allocate HTC packet buffer
208  * @htt_soc:	HTT SOC handle
209  *
210  * Return: Pointer to htc packet buffer
211  */
212 static struct dp_htt_htc_pkt *
213 htt_htc_pkt_alloc(struct htt_soc *soc)
214 {
215 	struct dp_htt_htc_pkt_union *pkt = NULL;
216 
217 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
218 	if (soc->htt_htc_pkt_freelist) {
219 		pkt = soc->htt_htc_pkt_freelist;
220 		soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next;
221 	}
222 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
223 
224 	if (pkt == NULL)
225 		pkt = qdf_mem_malloc(sizeof(*pkt));
226 	return &pkt->u.pkt; /* not actually a dereference */
227 }
228 
229 /*
230  * htt_htc_pkt_free() - Free HTC packet buffer
231  * @htt_soc:	HTT SOC handle
232  */
233 static void
234 htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
235 {
236 	struct dp_htt_htc_pkt_union *u_pkt =
237 		(struct dp_htt_htc_pkt_union *)pkt;
238 
239 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
240 	u_pkt->u.next = soc->htt_htc_pkt_freelist;
241 	soc->htt_htc_pkt_freelist = u_pkt;
242 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
243 }
244 
245 /*
246  * htt_htc_pkt_pool_free() - Free HTC packet pool
247  * @htt_soc:	HTT SOC handle
248  */
249 static void
250 htt_htc_pkt_pool_free(struct htt_soc *soc)
251 {
252 	struct dp_htt_htc_pkt_union *pkt, *next;
253 	pkt = soc->htt_htc_pkt_freelist;
254 	while (pkt) {
255 		next = pkt->u.next;
256 		qdf_mem_free(pkt);
257 		pkt = next;
258 	}
259 	soc->htt_htc_pkt_freelist = NULL;
260 }
261 
262 /*
263  * htt_htc_misc_pkt_list_trim() - trim misc list
264  * @htt_soc: HTT SOC handle
265  * @level: max no. of pkts in list
266  */
267 static void
268 htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level)
269 {
270 	struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL;
271 	int i = 0;
272 	qdf_nbuf_t netbuf;
273 
274 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
275 	pkt = soc->htt_htc_pkt_misclist;
276 	while (pkt) {
277 		next = pkt->u.next;
278 		/* trim the out grown list*/
279 		if (++i > level) {
280 			netbuf =
281 				(qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
282 			qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
283 			qdf_nbuf_free(netbuf);
284 			qdf_mem_free(pkt);
285 			pkt = NULL;
286 			if (prev)
287 				prev->u.next = NULL;
288 		}
289 		prev = pkt;
290 		pkt = next;
291 	}
292 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
293 }
294 
295 /*
296  * htt_htc_misc_pkt_list_add() - Add pkt to misc list
297  * @htt_soc:	HTT SOC handle
298  * @dp_htt_htc_pkt: pkt to be added to list
299  */
300 static void
301 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
302 {
303 	struct dp_htt_htc_pkt_union *u_pkt =
304 				(struct dp_htt_htc_pkt_union *)pkt;
305 	int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc,
306 							pkt->htc_pkt.Endpoint)
307 				+ DP_HTT_HTC_PKT_MISCLIST_SIZE;
308 
309 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
310 	if (soc->htt_htc_pkt_misclist) {
311 		u_pkt->u.next = soc->htt_htc_pkt_misclist;
312 		soc->htt_htc_pkt_misclist = u_pkt;
313 	} else {
314 		soc->htt_htc_pkt_misclist = u_pkt;
315 	}
316 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
317 
318 	/* only ce pipe size + tx_queue_depth could possibly be in use
319 	 * free older packets in the misclist
320 	 */
321 	htt_htc_misc_pkt_list_trim(soc, misclist_trim_level);
322 }
323 
324 /*
325  * htt_htc_misc_pkt_pool_free() - free pkts in misc list
326  * @htt_soc:	HTT SOC handle
327  */
328 static void
329 htt_htc_misc_pkt_pool_free(struct htt_soc *soc)
330 {
331 	struct dp_htt_htc_pkt_union *pkt, *next;
332 	qdf_nbuf_t netbuf;
333 
334 	pkt = soc->htt_htc_pkt_misclist;
335 
336 	while (pkt) {
337 		next = pkt->u.next;
338 		netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
339 		qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
340 
341 		soc->stats.htc_pkt_free++;
342 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
343 			 "%s: Pkt free count %d",
344 			 __func__, soc->stats.htc_pkt_free);
345 
346 		qdf_nbuf_free(netbuf);
347 		qdf_mem_free(pkt);
348 		pkt = next;
349 	}
350 	soc->htt_htc_pkt_misclist = NULL;
351 }
352 
353 /*
354  * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ
355  * @tgt_mac_addr:	Target MAC
356  * @buffer:		Output buffer
357  */
358 static u_int8_t *
359 htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer)
360 {
361 #ifdef BIG_ENDIAN_HOST
362 	/*
363 	 * The host endianness is opposite of the target endianness.
364 	 * To make u_int32_t elements come out correctly, the target->host
365 	 * upload has swizzled the bytes in each u_int32_t element of the
366 	 * message.
367 	 * For byte-array message fields like the MAC address, this
368 	 * upload swizzling puts the bytes in the wrong order, and needs
369 	 * to be undone.
370 	 */
371 	buffer[0] = tgt_mac_addr[3];
372 	buffer[1] = tgt_mac_addr[2];
373 	buffer[2] = tgt_mac_addr[1];
374 	buffer[3] = tgt_mac_addr[0];
375 	buffer[4] = tgt_mac_addr[7];
376 	buffer[5] = tgt_mac_addr[6];
377 	return buffer;
378 #else
379 	/*
380 	 * The host endianness matches the target endianness -
381 	 * we can use the mac addr directly from the message buffer.
382 	 */
383 	return tgt_mac_addr;
384 #endif
385 }
386 
387 /*
388  * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
389  * @soc:	SOC handle
390  * @status:	Completion status
391  * @netbuf:	HTT buffer
392  */
393 static void
394 dp_htt_h2t_send_complete_free_netbuf(
395 	void *soc, A_STATUS status, qdf_nbuf_t netbuf)
396 {
397 	qdf_nbuf_free(netbuf);
398 }
399 
400 /*
401  * dp_htt_h2t_send_complete() - H2T completion handler
402  * @context:	Opaque context (HTT SOC handle)
403  * @htc_pkt:	HTC packet
404  */
405 static void
406 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
407 {
408 	void (*send_complete_part2)(
409 		void *soc, A_STATUS status, qdf_nbuf_t msdu);
410 	struct htt_soc *soc =  (struct htt_soc *) context;
411 	struct dp_htt_htc_pkt *htt_pkt;
412 	qdf_nbuf_t netbuf;
413 
414 	send_complete_part2 = htc_pkt->pPktContext;
415 
416 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
417 
418 	/* process (free or keep) the netbuf that held the message */
419 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
420 	/*
421 	 * adf sendcomplete is required for windows only
422 	 */
423 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
424 	if (send_complete_part2 != NULL) {
425 		send_complete_part2(
426 			htt_pkt->soc_ctxt, htc_pkt->Status, netbuf);
427 	}
428 	/* free the htt_htc_pkt / HTC_PACKET object */
429 	htt_htc_pkt_free(soc, htt_pkt);
430 }
431 
432 /*
433  * htt_h2t_ver_req_msg() - Send HTT version request message to target
434  * @htt_soc:	HTT SOC handle
435  *
436  * Return: 0 on success; error code on failure
437  */
438 static int htt_h2t_ver_req_msg(struct htt_soc *soc)
439 {
440 	struct dp_htt_htc_pkt *pkt;
441 	qdf_nbuf_t msg;
442 	uint32_t *msg_word;
443 
444 	msg = qdf_nbuf_alloc(
445 		soc->osdev,
446 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
447 		/* reserve room for the HTC header */
448 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
449 	if (!msg)
450 		return QDF_STATUS_E_NOMEM;
451 
452 	/*
453 	 * Set the length of the message.
454 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
455 	 * separately during the below call to qdf_nbuf_push_head.
456 	 * The contribution from the HTC header is added separately inside HTC.
457 	 */
458 	if (qdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES) == NULL) {
459 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
460 			"%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
461 			__func__);
462 		return QDF_STATUS_E_FAILURE;
463 	}
464 
465 	/* fill in the message contents */
466 	msg_word = (u_int32_t *) qdf_nbuf_data(msg);
467 
468 	/* rewind beyond alignment pad to get to the HTC header reserved area */
469 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
470 
471 	*msg_word = 0;
472 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
473 
474 	pkt = htt_htc_pkt_alloc(soc);
475 	if (!pkt) {
476 		qdf_nbuf_free(msg);
477 		return QDF_STATUS_E_FAILURE;
478 	}
479 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
480 
481 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
482 		dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg),
483 		qdf_nbuf_len(msg), soc->htc_endpoint,
484 		1); /* tag - not relevant here */
485 
486 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
487 	DP_HTT_SEND_HTC_PKT(soc, pkt);
488 	return 0;
489 }
490 
491 /*
492  * htt_srng_setup() - Send SRNG setup message to target
493  * @htt_soc:	HTT SOC handle
494  * @mac_id:	MAC Id
495  * @hal_srng:	Opaque HAL SRNG pointer
496  * @hal_ring_type:	SRNG ring type
497  *
498  * Return: 0 on success; error code on failure
499  */
500 int htt_srng_setup(void *htt_soc, int mac_id, void *hal_srng,
501 	int hal_ring_type)
502 {
503 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
504 	struct dp_htt_htc_pkt *pkt;
505 	qdf_nbuf_t htt_msg;
506 	uint32_t *msg_word;
507 	struct hal_srng_params srng_params;
508 	qdf_dma_addr_t hp_addr, tp_addr;
509 	uint32_t ring_entry_size =
510 		hal_srng_get_entrysize(soc->hal_soc, hal_ring_type);
511 	int htt_ring_type, htt_ring_id;
512 
513 	/* Sizes should be set in 4-byte words */
514 	ring_entry_size = ring_entry_size >> 2;
515 
516 	htt_msg = qdf_nbuf_alloc(soc->osdev,
517 		HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ),
518 		/* reserve room for the HTC header */
519 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
520 	if (!htt_msg)
521 		goto fail0;
522 
523 	hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params);
524 	hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_srng);
525 	tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_srng);
526 
527 	switch (hal_ring_type) {
528 	case RXDMA_BUF:
529 #ifdef QCA_HOST2FW_RXBUF_RING
530 		if (srng_params.ring_id ==
531 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0)) {
532 			htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
533 			htt_ring_type = HTT_SW_TO_SW_RING;
534 #ifdef IPA_OFFLOAD
535 		} else if (srng_params.ring_id ==
536 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2)) {
537 			htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING;
538 			htt_ring_type = HTT_SW_TO_SW_RING;
539 #endif
540 #else
541 		if (srng_params.ring_id ==
542 			(HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
543 			  (mac_id * HAL_MAX_RINGS_PER_LMAC))) {
544 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
545 			htt_ring_type = HTT_SW_TO_HW_RING;
546 #endif
547 		} else if (srng_params.ring_id ==
548 #ifdef IPA_OFFLOAD
549 			 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 +
550 #else
551 			 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF +
552 #endif
553 			  (mac_id * HAL_MAX_RINGS_PER_LMAC))) {
554 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
555 			htt_ring_type = HTT_SW_TO_HW_RING;
556 		} else {
557 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
558 				   "%s: Ring %d currently not supported",
559 				   __func__, srng_params.ring_id);
560 			goto fail1;
561 		}
562 
563 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
564 			  "%s: ring_type %d ring_id %d",
565 			  __func__, hal_ring_type, srng_params.ring_id);
566 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
567 			  "%s: hp_addr 0x%llx tp_addr 0x%llx",
568 			  __func__, (uint64_t)hp_addr, (uint64_t)tp_addr);
569 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
570 			  "%s: htt_ring_id %d", __func__, htt_ring_id);
571 		break;
572 	case RXDMA_MONITOR_BUF:
573 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
574 		htt_ring_type = HTT_SW_TO_HW_RING;
575 		break;
576 	case RXDMA_MONITOR_STATUS:
577 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
578 		htt_ring_type = HTT_SW_TO_HW_RING;
579 		break;
580 	case RXDMA_MONITOR_DST:
581 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
582 		htt_ring_type = HTT_HW_TO_SW_RING;
583 		break;
584 	case RXDMA_MONITOR_DESC:
585 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
586 		htt_ring_type = HTT_SW_TO_HW_RING;
587 		break;
588 	case RXDMA_DST:
589 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
590 		htt_ring_type = HTT_HW_TO_SW_RING;
591 		break;
592 
593 	default:
594 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
595 			"%s: Ring currently not supported", __func__);
596 			goto fail1;
597 	}
598 
599 	/*
600 	 * Set the length of the message.
601 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
602 	 * separately during the below call to qdf_nbuf_push_head.
603 	 * The contribution from the HTC header is added separately inside HTC.
604 	 */
605 	if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) {
606 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
607 			"%s: Failed to expand head for SRING_SETUP msg",
608 			__func__);
609 		return QDF_STATUS_E_FAILURE;
610 	}
611 
612 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
613 
614 	/* rewind beyond alignment pad to get to the HTC header reserved area */
615 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
616 
617 	/* word 0 */
618 	*msg_word = 0;
619 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP);
620 
621 	if ((htt_ring_type == HTT_SW_TO_HW_RING) ||
622 			(htt_ring_type == HTT_HW_TO_SW_RING))
623 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word,
624 			 DP_SW2HW_MACID(mac_id));
625 	else
626 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id);
627 
628 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
629 		  "%s: mac_id %d", __func__, mac_id);
630 	HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type);
631 	/* TODO: Discuss with FW on changing this to unique ID and using
632 	 * htt_ring_type to send the type of ring
633 	 */
634 	HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id);
635 
636 	/* word 1 */
637 	msg_word++;
638 	*msg_word = 0;
639 	HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word,
640 		srng_params.ring_base_paddr & 0xffffffff);
641 
642 	/* word 2 */
643 	msg_word++;
644 	*msg_word = 0;
645 	HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word,
646 		(uint64_t)srng_params.ring_base_paddr >> 32);
647 
648 	/* word 3 */
649 	msg_word++;
650 	*msg_word = 0;
651 	HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size);
652 	HTT_SRING_SETUP_RING_SIZE_SET(*msg_word,
653 		(ring_entry_size * srng_params.num_entries));
654 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
655 		  "%s: entry_size %d", __func__,
656 			 ring_entry_size);
657 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
658 		  "%s: num_entries %d", __func__,
659 			 srng_params.num_entries);
660 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
661 		  "%s: ring_size %d", __func__,
662 			 (ring_entry_size * srng_params.num_entries));
663 	if (htt_ring_type == HTT_SW_TO_HW_RING)
664 		HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET(
665 						*msg_word, 1);
666 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word,
667 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
668 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word,
669 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
670 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word,
671 		!!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP));
672 
673 	/* word 4 */
674 	msg_word++;
675 	*msg_word = 0;
676 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
677 		hp_addr & 0xffffffff);
678 
679 	/* word 5 */
680 	msg_word++;
681 	*msg_word = 0;
682 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
683 		(uint64_t)hp_addr >> 32);
684 
685 	/* word 6 */
686 	msg_word++;
687 	*msg_word = 0;
688 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
689 		tp_addr & 0xffffffff);
690 
691 	/* word 7 */
692 	msg_word++;
693 	*msg_word = 0;
694 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
695 		(uint64_t)tp_addr >> 32);
696 
697 	/* word 8 */
698 	msg_word++;
699 	*msg_word = 0;
700 	HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word,
701 		srng_params.msi_addr & 0xffffffff);
702 
703 	/* word 9 */
704 	msg_word++;
705 	*msg_word = 0;
706 	HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word,
707 		(uint64_t)(srng_params.msi_addr) >> 32);
708 
709 	/* word 10 */
710 	msg_word++;
711 	*msg_word = 0;
712 	HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word,
713 		srng_params.msi_data);
714 
715 	/* word 11 */
716 	msg_word++;
717 	*msg_word = 0;
718 	HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word,
719 		srng_params.intr_batch_cntr_thres_entries *
720 		ring_entry_size);
721 	HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word,
722 		srng_params.intr_timer_thres_us >> 3);
723 
724 	/* word 12 */
725 	msg_word++;
726 	*msg_word = 0;
727 	if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
728 		/* TODO: Setting low threshold to 1/8th of ring size - see
729 		 * if this needs to be configurable
730 		 */
731 		HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word,
732 			srng_params.low_threshold);
733 	}
734 	/* "response_required" field should be set if a HTT response message is
735 	 * required after setting up the ring.
736 	 */
737 	pkt = htt_htc_pkt_alloc(soc);
738 	if (!pkt)
739 		goto fail1;
740 
741 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
742 
743 	SET_HTC_PACKET_INFO_TX(
744 		&pkt->htc_pkt,
745 		dp_htt_h2t_send_complete_free_netbuf,
746 		qdf_nbuf_data(htt_msg),
747 		qdf_nbuf_len(htt_msg),
748 		soc->htc_endpoint,
749 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
750 
751 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
752 	DP_HTT_SEND_HTC_PKT(soc, pkt);
753 
754 	return QDF_STATUS_SUCCESS;
755 
756 fail1:
757 	qdf_nbuf_free(htt_msg);
758 fail0:
759 	return QDF_STATUS_E_FAILURE;
760 }
761 
762 /*
763  * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter
764  * config message to target
765  * @htt_soc:	HTT SOC handle
766  * @pdev_id:	PDEV Id
767  * @hal_srng:	Opaque HAL SRNG pointer
768  * @hal_ring_type:	SRNG ring type
769  * @ring_buf_size:	SRNG buffer size
770  * @htt_tlv_filter:	Rx SRNG TLV and filter setting
771  * Return: 0 on success; error code on failure
772  */
773 int htt_h2t_rx_ring_cfg(void *htt_soc, int pdev_id, void *hal_srng,
774 	int hal_ring_type, int ring_buf_size,
775 	struct htt_rx_ring_tlv_filter *htt_tlv_filter)
776 {
777 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
778 	struct dp_htt_htc_pkt *pkt;
779 	qdf_nbuf_t htt_msg;
780 	uint32_t *msg_word;
781 	struct hal_srng_params srng_params;
782 	uint32_t htt_ring_type, htt_ring_id;
783 	uint32_t tlv_filter;
784 
785 	htt_msg = qdf_nbuf_alloc(soc->osdev,
786 		HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ),
787 	/* reserve room for the HTC header */
788 	HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
789 	if (!htt_msg)
790 		goto fail0;
791 
792 	hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params);
793 
794 	switch (hal_ring_type) {
795 	case RXDMA_BUF:
796 #if QCA_HOST2FW_RXBUF_RING
797 		htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
798 		htt_ring_type = HTT_SW_TO_SW_RING;
799 #else
800 		htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
801 		htt_ring_type = HTT_SW_TO_HW_RING;
802 #endif
803 		break;
804 	case RXDMA_MONITOR_BUF:
805 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
806 		htt_ring_type = HTT_SW_TO_HW_RING;
807 		break;
808 	case RXDMA_MONITOR_STATUS:
809 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
810 		htt_ring_type = HTT_SW_TO_HW_RING;
811 		break;
812 	case RXDMA_MONITOR_DST:
813 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
814 		htt_ring_type = HTT_HW_TO_SW_RING;
815 		break;
816 	case RXDMA_MONITOR_DESC:
817 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
818 		htt_ring_type = HTT_SW_TO_HW_RING;
819 		break;
820 	case RXDMA_DST:
821 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
822 		htt_ring_type = HTT_HW_TO_SW_RING;
823 		break;
824 
825 	default:
826 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
827 			"%s: Ring currently not supported", __func__);
828 		goto fail1;
829 	}
830 
831 	/*
832 	 * Set the length of the message.
833 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
834 	 * separately during the below call to qdf_nbuf_push_head.
835 	 * The contribution from the HTC header is added separately inside HTC.
836 	 */
837 	if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) {
838 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
839 			"%s: Failed to expand head for RX Ring Cfg msg",
840 			__func__);
841 		goto fail1; /* failure */
842 	}
843 
844 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
845 
846 	/* rewind beyond alignment pad to get to the HTC header reserved area */
847 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
848 
849 	/* word 0 */
850 	*msg_word = 0;
851 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
852 
853 	/*
854 	 * pdev_id is indexed from 0 whereas mac_id is indexed from 1
855 	 * SW_TO_SW and SW_TO_HW rings are unaffected by this
856 	 */
857 	if (htt_ring_type == HTT_SW_TO_SW_RING ||
858 			htt_ring_type == HTT_SW_TO_HW_RING)
859 		HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word,
860 						DP_SW2HW_MACID(pdev_id));
861 
862 	/* TODO: Discuss with FW on changing this to unique ID and using
863 	 * htt_ring_type to send the type of ring
864 	 */
865 	HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id);
866 
867 	HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word,
868 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
869 
870 	HTT_RX_RING_SELECTION_CFG_PKT_TLV_SET(*msg_word,
871 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
872 
873 	/* word 1 */
874 	msg_word++;
875 	*msg_word = 0;
876 	HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word,
877 		ring_buf_size);
878 
879 	/* word 2 */
880 	msg_word++;
881 	*msg_word = 0;
882 
883 	if (htt_tlv_filter->enable_fp) {
884 		/* TYPE: MGMT */
885 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
886 			FP, MGMT, 0000,
887 			(htt_tlv_filter->fp_mgmt_filter &
888 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
889 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
890 			FP, MGMT, 0001,
891 			(htt_tlv_filter->fp_mgmt_filter &
892 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
893 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
894 			FP, MGMT, 0010,
895 			(htt_tlv_filter->fp_mgmt_filter &
896 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
897 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
898 			FP, MGMT, 0011,
899 			(htt_tlv_filter->fp_mgmt_filter &
900 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
901 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
902 			FP, MGMT, 0100,
903 			(htt_tlv_filter->fp_mgmt_filter &
904 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
905 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
906 			FP, MGMT, 0101,
907 			(htt_tlv_filter->fp_mgmt_filter &
908 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
909 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
910 			FP, MGMT, 0110,
911 			(htt_tlv_filter->fp_mgmt_filter &
912 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
913 		/* reserved */
914 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
915 			MGMT, 0111,
916 			(htt_tlv_filter->fp_mgmt_filter &
917 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
918 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
919 			FP, MGMT, 1000,
920 			(htt_tlv_filter->fp_mgmt_filter &
921 			FILTER_MGMT_BEACON) ? 1 : 0);
922 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
923 			FP, MGMT, 1001,
924 			(htt_tlv_filter->fp_mgmt_filter &
925 			FILTER_MGMT_ATIM) ? 1 : 0);
926 	}
927 
928 	if (htt_tlv_filter->enable_md) {
929 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
930 				MGMT, 0000, 1);
931 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
932 				MGMT, 0001, 1);
933 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
934 				MGMT, 0010, 1);
935 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
936 				MGMT, 0011, 1);
937 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
938 				MGMT, 0100, 1);
939 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
940 				MGMT, 0101, 1);
941 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
942 				MGMT, 0110, 1);
943 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
944 				MGMT, 0111, 1);
945 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
946 				MGMT, 1000, 1);
947 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
948 				MGMT, 1001, 1);
949 	}
950 
951 	if (htt_tlv_filter->enable_mo) {
952 		/* TYPE: MGMT */
953 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
954 			MO, MGMT, 0000,
955 			(htt_tlv_filter->mo_mgmt_filter &
956 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
957 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
958 			MO, MGMT, 0001,
959 			(htt_tlv_filter->mo_mgmt_filter &
960 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
961 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
962 			MO, MGMT, 0010,
963 			(htt_tlv_filter->mo_mgmt_filter &
964 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
965 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
966 			MO, MGMT, 0011,
967 			(htt_tlv_filter->mo_mgmt_filter &
968 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
969 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
970 			MO, MGMT, 0100,
971 			(htt_tlv_filter->mo_mgmt_filter &
972 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
973 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
974 			MO, MGMT, 0101,
975 			(htt_tlv_filter->mo_mgmt_filter &
976 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
977 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
978 			MO, MGMT, 0110,
979 			(htt_tlv_filter->mo_mgmt_filter &
980 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
981 		/* reserved */
982 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
983 			MGMT, 0111,
984 			(htt_tlv_filter->mo_mgmt_filter &
985 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
986 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
987 			MO, MGMT, 1000,
988 			(htt_tlv_filter->mo_mgmt_filter &
989 			FILTER_MGMT_BEACON) ? 1 : 0);
990 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
991 			MO, MGMT, 1001,
992 			(htt_tlv_filter->mo_mgmt_filter &
993 			FILTER_MGMT_ATIM) ? 1 : 0);
994 	}
995 
996 	/* word 3 */
997 	msg_word++;
998 	*msg_word = 0;
999 
1000 	if (htt_tlv_filter->enable_fp) {
1001 		/* TYPE: MGMT */
1002 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1003 			FP, MGMT, 1010,
1004 			(htt_tlv_filter->fp_mgmt_filter &
1005 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1006 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1007 			FP, MGMT, 1011,
1008 			(htt_tlv_filter->fp_mgmt_filter &
1009 			FILTER_MGMT_AUTH) ? 1 : 0);
1010 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1011 			FP, MGMT, 1100,
1012 			(htt_tlv_filter->fp_mgmt_filter &
1013 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1014 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1015 			FP, MGMT, 1101,
1016 			(htt_tlv_filter->fp_mgmt_filter &
1017 			FILTER_MGMT_ACTION) ? 1 : 0);
1018 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1019 			FP, MGMT, 1110,
1020 			(htt_tlv_filter->fp_mgmt_filter &
1021 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1022 		/* reserved*/
1023 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
1024 			MGMT, 1111,
1025 			(htt_tlv_filter->fp_mgmt_filter &
1026 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1027 	}
1028 
1029 	if (htt_tlv_filter->enable_md) {
1030 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD,
1031 				MGMT, 1010, 1);
1032 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD,
1033 				MGMT, 1011, 1);
1034 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD,
1035 				MGMT, 1100, 1);
1036 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD,
1037 				MGMT, 1101, 1);
1038 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD,
1039 				MGMT, 1110, 1);
1040 	}
1041 
1042 	if (htt_tlv_filter->enable_mo) {
1043 		/* TYPE: MGMT */
1044 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1045 			MO, MGMT, 1010,
1046 			(htt_tlv_filter->mo_mgmt_filter &
1047 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1048 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1049 			MO, MGMT, 1011,
1050 			(htt_tlv_filter->mo_mgmt_filter &
1051 			FILTER_MGMT_AUTH) ? 1 : 0);
1052 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1053 			MO, MGMT, 1100,
1054 			(htt_tlv_filter->mo_mgmt_filter &
1055 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1056 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1057 			MO, MGMT, 1101,
1058 			(htt_tlv_filter->mo_mgmt_filter &
1059 			FILTER_MGMT_ACTION) ? 1 : 0);
1060 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1061 			MO, MGMT, 1110,
1062 			(htt_tlv_filter->mo_mgmt_filter &
1063 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1064 		/* reserved*/
1065 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
1066 			MGMT, 1111,
1067 			(htt_tlv_filter->mo_mgmt_filter &
1068 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1069 	}
1070 
1071 	/* word 4 */
1072 	msg_word++;
1073 	*msg_word = 0;
1074 
1075 	if (htt_tlv_filter->enable_fp) {
1076 		/* TYPE: CTRL */
1077 		/* reserved */
1078 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1079 			CTRL, 0000,
1080 			(htt_tlv_filter->fp_ctrl_filter &
1081 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1082 		/* reserved */
1083 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1084 			CTRL, 0001,
1085 			(htt_tlv_filter->fp_ctrl_filter &
1086 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1087 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1088 			CTRL, 0010,
1089 			(htt_tlv_filter->fp_ctrl_filter &
1090 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1091 		/* reserved */
1092 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1093 			CTRL, 0011,
1094 			(htt_tlv_filter->fp_ctrl_filter &
1095 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1096 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1097 			CTRL, 0100,
1098 			(htt_tlv_filter->fp_ctrl_filter &
1099 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1100 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1101 			CTRL, 0101,
1102 			(htt_tlv_filter->fp_ctrl_filter &
1103 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1104 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1105 			CTRL, 0110,
1106 			(htt_tlv_filter->fp_ctrl_filter &
1107 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1108 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1109 			CTRL, 0111,
1110 			(htt_tlv_filter->fp_ctrl_filter &
1111 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1112 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1113 			CTRL, 1000,
1114 			(htt_tlv_filter->fp_ctrl_filter &
1115 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1116 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1117 			CTRL, 1001,
1118 			(htt_tlv_filter->fp_ctrl_filter &
1119 			FILTER_CTRL_BA) ? 1 : 0);
1120 	}
1121 
1122 	if (htt_tlv_filter->enable_md) {
1123 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1124 				CTRL, 0000, 1);
1125 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1126 				CTRL, 0001, 1);
1127 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1128 				CTRL, 0010, 1);
1129 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1130 				CTRL, 0011, 1);
1131 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1132 				CTRL, 0100, 1);
1133 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1134 				CTRL, 0101, 1);
1135 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1136 				CTRL, 0110, 1);
1137 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1138 				CTRL, 0111, 1);
1139 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1140 				CTRL, 1000, 1);
1141 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1142 				CTRL, 1001, 1);
1143 	}
1144 
1145 	if (htt_tlv_filter->enable_mo) {
1146 		/* TYPE: CTRL */
1147 		/* reserved */
1148 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1149 			CTRL, 0000,
1150 			(htt_tlv_filter->mo_ctrl_filter &
1151 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1152 		/* reserved */
1153 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1154 			CTRL, 0001,
1155 			(htt_tlv_filter->mo_ctrl_filter &
1156 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1157 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1158 			CTRL, 0010,
1159 			(htt_tlv_filter->mo_ctrl_filter &
1160 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1161 		/* reserved */
1162 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1163 			CTRL, 0011,
1164 			(htt_tlv_filter->mo_ctrl_filter &
1165 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1166 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1167 			CTRL, 0100,
1168 			(htt_tlv_filter->mo_ctrl_filter &
1169 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1170 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1171 			CTRL, 0101,
1172 			(htt_tlv_filter->mo_ctrl_filter &
1173 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1174 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1175 			CTRL, 0110,
1176 			(htt_tlv_filter->mo_ctrl_filter &
1177 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1178 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1179 			CTRL, 0111,
1180 			(htt_tlv_filter->mo_ctrl_filter &
1181 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1182 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1183 			CTRL, 1000,
1184 			(htt_tlv_filter->mo_ctrl_filter &
1185 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1186 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1187 			CTRL, 1001,
1188 			(htt_tlv_filter->mo_ctrl_filter &
1189 			FILTER_CTRL_BA) ? 1 : 0);
1190 	}
1191 
1192 	/* word 5 */
1193 	msg_word++;
1194 	*msg_word = 0;
1195 	if (htt_tlv_filter->enable_fp) {
1196 		/* TYPE: CTRL */
1197 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1198 			CTRL, 1010,
1199 			(htt_tlv_filter->fp_ctrl_filter &
1200 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1201 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1202 			CTRL, 1011,
1203 			(htt_tlv_filter->fp_ctrl_filter &
1204 			FILTER_CTRL_RTS) ? 1 : 0);
1205 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1206 			CTRL, 1100,
1207 			(htt_tlv_filter->fp_ctrl_filter &
1208 			FILTER_CTRL_CTS) ? 1 : 0);
1209 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1210 			CTRL, 1101,
1211 			(htt_tlv_filter->fp_ctrl_filter &
1212 			FILTER_CTRL_ACK) ? 1 : 0);
1213 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1214 			CTRL, 1110,
1215 			(htt_tlv_filter->fp_ctrl_filter &
1216 			FILTER_CTRL_CFEND) ? 1 : 0);
1217 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1218 			CTRL, 1111,
1219 			(htt_tlv_filter->fp_ctrl_filter &
1220 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1221 		/* TYPE: DATA */
1222 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1223 			DATA, MCAST,
1224 			(htt_tlv_filter->fp_data_filter &
1225 			FILTER_DATA_MCAST) ? 1 : 0);
1226 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1227 			DATA, UCAST,
1228 			(htt_tlv_filter->fp_data_filter &
1229 			FILTER_DATA_UCAST) ? 1 : 0);
1230 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1231 			DATA, NULL,
1232 			(htt_tlv_filter->fp_data_filter &
1233 			FILTER_DATA_NULL) ? 1 : 0);
1234 	}
1235 
1236 	if (htt_tlv_filter->enable_md) {
1237 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1238 				CTRL, 1010, 1);
1239 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1240 				CTRL, 1011, 1);
1241 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1242 				CTRL, 1100, 1);
1243 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1244 				CTRL, 1101, 1);
1245 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1246 				CTRL, 1110, 1);
1247 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1248 				CTRL, 1111, 1);
1249 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1250 				DATA, MCAST, 1);
1251 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1252 				DATA, UCAST, 1);
1253 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1254 				DATA, NULL, 1);
1255 	}
1256 
1257 	if (htt_tlv_filter->enable_mo) {
1258 		/* TYPE: CTRL */
1259 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1260 			CTRL, 1010,
1261 			(htt_tlv_filter->mo_ctrl_filter &
1262 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1263 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1264 			CTRL, 1011,
1265 			(htt_tlv_filter->mo_ctrl_filter &
1266 			FILTER_CTRL_RTS) ? 1 : 0);
1267 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1268 			CTRL, 1100,
1269 			(htt_tlv_filter->mo_ctrl_filter &
1270 			FILTER_CTRL_CTS) ? 1 : 0);
1271 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1272 			CTRL, 1101,
1273 			(htt_tlv_filter->mo_ctrl_filter &
1274 			FILTER_CTRL_ACK) ? 1 : 0);
1275 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1276 			CTRL, 1110,
1277 			(htt_tlv_filter->mo_ctrl_filter &
1278 			FILTER_CTRL_CFEND) ? 1 : 0);
1279 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1280 			CTRL, 1111,
1281 			(htt_tlv_filter->mo_ctrl_filter &
1282 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1283 		/* TYPE: DATA */
1284 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1285 			DATA, MCAST,
1286 			(htt_tlv_filter->mo_data_filter &
1287 			FILTER_DATA_MCAST) ? 1 : 0);
1288 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1289 			DATA, UCAST,
1290 			(htt_tlv_filter->mo_data_filter &
1291 			FILTER_DATA_UCAST) ? 1 : 0);
1292 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1293 			DATA, NULL,
1294 			(htt_tlv_filter->mo_data_filter &
1295 			FILTER_DATA_NULL) ? 1 : 0);
1296 	}
1297 
1298 	/* word 6 */
1299 	msg_word++;
1300 	*msg_word = 0;
1301 	tlv_filter = 0;
1302 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START,
1303 		htt_tlv_filter->mpdu_start);
1304 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START,
1305 		htt_tlv_filter->msdu_start);
1306 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET,
1307 		htt_tlv_filter->packet);
1308 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END,
1309 		htt_tlv_filter->msdu_end);
1310 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END,
1311 		htt_tlv_filter->mpdu_end);
1312 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER,
1313 		htt_tlv_filter->packet_header);
1314 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION,
1315 		htt_tlv_filter->attention);
1316 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START,
1317 		htt_tlv_filter->ppdu_start);
1318 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END,
1319 		htt_tlv_filter->ppdu_end);
1320 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS,
1321 		htt_tlv_filter->ppdu_end_user_stats);
1322 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter,
1323 		PPDU_END_USER_STATS_EXT,
1324 		htt_tlv_filter->ppdu_end_user_stats_ext);
1325 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE,
1326 		htt_tlv_filter->ppdu_end_status_done);
1327 	/* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/
1328 	 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED,
1329 		 htt_tlv_filter->header_per_msdu);
1330 
1331 	HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter);
1332 
1333 	/* "response_required" field should be set if a HTT response message is
1334 	 * required after setting up the ring.
1335 	 */
1336 	pkt = htt_htc_pkt_alloc(soc);
1337 	if (!pkt)
1338 		goto fail1;
1339 
1340 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1341 
1342 	SET_HTC_PACKET_INFO_TX(
1343 		&pkt->htc_pkt,
1344 		dp_htt_h2t_send_complete_free_netbuf,
1345 		qdf_nbuf_data(htt_msg),
1346 		qdf_nbuf_len(htt_msg),
1347 		soc->htc_endpoint,
1348 		1); /* tag - not relevant here */
1349 
1350 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1351 	DP_HTT_SEND_HTC_PKT(soc, pkt);
1352 	return QDF_STATUS_SUCCESS;
1353 
1354 fail1:
1355 	qdf_nbuf_free(htt_msg);
1356 fail0:
1357 	return QDF_STATUS_E_FAILURE;
1358 }
1359 
1360 #if defined(CONFIG_WIN) && WDI_EVENT_ENABLE
1361 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1362 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1363 
1364 {
1365 	uint32_t pdev_id;
1366 	uint32_t *msg_word = NULL;
1367 	uint32_t msg_remain_len = 0;
1368 
1369 	msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1370 
1371 	/*COOKIE MSB*/
1372 	pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1373 
1374 	/* stats message length + 16 size of HTT header*/
1375 	msg_remain_len = qdf_min(htt_stats->msg_len + 16,
1376 				(uint32_t)DP_EXT_MSG_LENGTH);
1377 
1378 	dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc,
1379 			msg_word,  msg_remain_len,
1380 			WDI_NO_VAL, pdev_id);
1381 
1382 	if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1383 		htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1384 	}
1385 	/* Need to be freed here as WDI handler will
1386 	 * make a copy of pkt to send data to application
1387 	 */
1388 	qdf_nbuf_free(htt_msg);
1389 	return QDF_STATUS_SUCCESS;
1390 }
1391 #else
1392 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1393 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1394 {
1395 	return QDF_STATUS_E_NOSUPPORT;
1396 }
1397 #endif
1398 
1399 /**
1400  * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats
1401  * @htt_stats: htt stats info
1402  *
1403  * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message
1404  * contains sub messages which are identified by a TLV header.
1405  * In this function we will process the stream of T2H messages and read all the
1406  * TLV contained in the message.
1407  *
1408  * THe following cases have been taken care of
1409  * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer
1410  *		In this case the buffer will contain multiple tlvs.
1411  * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer.
1412  *		Only one tlv will be contained in the HTT message and this tag
1413  *		will extend onto the next buffer.
1414  * Case 3: When the buffer is the continuation of the previous message
1415  * Case 4: tlv length is 0. which will indicate the end of message
1416  *
1417  * return: void
1418  */
1419 static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats,
1420 					struct dp_soc *soc)
1421 {
1422 	htt_tlv_tag_t tlv_type = 0xff;
1423 	qdf_nbuf_t htt_msg = NULL;
1424 	uint32_t *msg_word;
1425 	uint8_t *tlv_buf_head = NULL;
1426 	uint8_t *tlv_buf_tail = NULL;
1427 	uint32_t msg_remain_len = 0;
1428 	uint32_t tlv_remain_len = 0;
1429 	uint32_t *tlv_start;
1430 	int cookie_val;
1431 	int cookie_msb;
1432 	int pdev_id;
1433 	bool copy_stats = false;
1434 	struct dp_pdev *pdev;
1435 
1436 	/* Process node in the HTT message queue */
1437 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1438 		!= NULL) {
1439 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1440 		cookie_val = *(msg_word + 1);
1441 		htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET(
1442 					*(msg_word +
1443 					HTT_T2H_EXT_STATS_TLV_START_OFFSET));
1444 
1445 		if (cookie_val) {
1446 			if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg)
1447 					== QDF_STATUS_SUCCESS) {
1448 				continue;
1449 			}
1450 		}
1451 
1452 		cookie_msb = *(msg_word + 2);
1453 		pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1454 		pdev = soc->pdev_list[pdev_id];
1455 
1456 		if (cookie_msb >> 2) {
1457 			copy_stats = true;
1458 		}
1459 
1460 		/* read 5th word */
1461 		msg_word = msg_word + 4;
1462 		msg_remain_len = qdf_min(htt_stats->msg_len,
1463 				(uint32_t) DP_EXT_MSG_LENGTH);
1464 		/* Keep processing the node till node length is 0 */
1465 		while (msg_remain_len) {
1466 			/*
1467 			 * if message is not a continuation of previous message
1468 			 * read the tlv type and tlv length
1469 			 */
1470 			if (!tlv_buf_head) {
1471 				tlv_type = HTT_STATS_TLV_TAG_GET(
1472 						*msg_word);
1473 				tlv_remain_len = HTT_STATS_TLV_LENGTH_GET(
1474 						*msg_word);
1475 			}
1476 
1477 			if (tlv_remain_len == 0) {
1478 				msg_remain_len = 0;
1479 
1480 				if (tlv_buf_head) {
1481 					qdf_mem_free(tlv_buf_head);
1482 					tlv_buf_head = NULL;
1483 					tlv_buf_tail = NULL;
1484 				}
1485 
1486 				goto error;
1487 			}
1488 
1489 			if (!tlv_buf_head)
1490 				tlv_remain_len += HTT_TLV_HDR_LEN;
1491 
1492 			if ((tlv_remain_len <= msg_remain_len)) {
1493 				/* Case 3 */
1494 				if (tlv_buf_head) {
1495 					qdf_mem_copy(tlv_buf_tail,
1496 							(uint8_t *)msg_word,
1497 							tlv_remain_len);
1498 					tlv_start = (uint32_t *)tlv_buf_head;
1499 				} else {
1500 					/* Case 1 */
1501 					tlv_start = msg_word;
1502 				}
1503 
1504 				if (copy_stats)
1505 					dp_htt_stats_copy_tag(pdev, tlv_type, tlv_start);
1506 				else
1507 					dp_htt_stats_print_tag(tlv_type, tlv_start);
1508 
1509 				msg_remain_len -= tlv_remain_len;
1510 
1511 				msg_word = (uint32_t *)
1512 					(((uint8_t *)msg_word) +
1513 					tlv_remain_len);
1514 
1515 				tlv_remain_len = 0;
1516 
1517 				if (tlv_buf_head) {
1518 					qdf_mem_free(tlv_buf_head);
1519 					tlv_buf_head = NULL;
1520 					tlv_buf_tail = NULL;
1521 				}
1522 
1523 			} else { /* tlv_remain_len > msg_remain_len */
1524 				/* Case 2 & 3 */
1525 				if (!tlv_buf_head) {
1526 					tlv_buf_head = qdf_mem_malloc(
1527 							tlv_remain_len);
1528 
1529 					if (!tlv_buf_head) {
1530 						QDF_TRACE(QDF_MODULE_ID_TXRX,
1531 								QDF_TRACE_LEVEL_ERROR,
1532 								"Alloc failed");
1533 						goto error;
1534 					}
1535 
1536 					tlv_buf_tail = tlv_buf_head;
1537 				}
1538 
1539 				qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word,
1540 						msg_remain_len);
1541 				tlv_remain_len -= msg_remain_len;
1542 				tlv_buf_tail += msg_remain_len;
1543 			}
1544 		}
1545 
1546 		if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1547 			htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1548 		}
1549 
1550 		qdf_nbuf_free(htt_msg);
1551 	}
1552 	return;
1553 
1554 error:
1555 	qdf_nbuf_free(htt_msg);
1556 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1557 			!= NULL)
1558 		qdf_nbuf_free(htt_msg);
1559 }
1560 
1561 void htt_t2h_stats_handler(void *context)
1562 {
1563 	struct dp_soc *soc = (struct dp_soc *)context;
1564 	struct htt_stats_context htt_stats;
1565 	uint32_t *msg_word;
1566 	qdf_nbuf_t htt_msg = NULL;
1567 	uint8_t done;
1568 	uint8_t rem_stats;
1569 
1570 	if (!soc || !qdf_atomic_read(&soc->cmn_init_done)) {
1571 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1572 			"soc: 0x%pK, init_done: %d", soc,
1573 			qdf_atomic_read(&soc->cmn_init_done));
1574 		return;
1575 	}
1576 
1577 	qdf_mem_zero(&htt_stats, sizeof(htt_stats));
1578 	qdf_nbuf_queue_init(&htt_stats.msg);
1579 
1580 	/* pull one completed stats from soc->htt_stats_msg and process */
1581 	qdf_spin_lock_bh(&soc->htt_stats.lock);
1582 	if (!soc->htt_stats.num_stats) {
1583 		qdf_spin_unlock_bh(&soc->htt_stats.lock);
1584 		return;
1585 	}
1586 	while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) {
1587 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1588 		msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET;
1589 		done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
1590 		qdf_nbuf_queue_add(&htt_stats.msg, htt_msg);
1591 		/*
1592 		 * Done bit signifies that this is the last T2H buffer in the
1593 		 * stream of HTT EXT STATS message
1594 		 */
1595 		if (done)
1596 			break;
1597 	}
1598 	rem_stats = --soc->htt_stats.num_stats;
1599 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
1600 
1601 	dp_process_htt_stat_msg(&htt_stats, soc);
1602 	/* If there are more stats to process, schedule stats work again */
1603 	if (rem_stats)
1604 		qdf_sched_work(0, &soc->htt_stats.work);
1605 }
1606 
1607 /*
1608  * dp_get_ppdu_info_user_index: Find and allocate a per-user descriptor for a PPDU,
1609  * if a new peer id arrives in a PPDU
1610  * pdev: DP pdev handle
1611  * @peer_id : peer unique identifier
1612  * @ppdu_info: per ppdu tlv structure
1613  *
1614  * return:user index to be populated
1615  */
1616 #ifdef FEATURE_PERPKT_INFO
1617 static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev,
1618 						uint16_t peer_id,
1619 						struct ppdu_info *ppdu_info)
1620 {
1621 	uint8_t user_index = 0;
1622 	struct cdp_tx_completion_ppdu *ppdu_desc;
1623 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1624 
1625 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1626 
1627 	while ((user_index + 1) <= ppdu_info->last_user) {
1628 		ppdu_user_desc = &ppdu_desc->user[user_index];
1629 		if (ppdu_user_desc->peer_id != peer_id) {
1630 			user_index++;
1631 			continue;
1632 		} else {
1633 			/* Max users possible is 8 so user array index should
1634 			 * not exceed 7
1635 			 */
1636 			qdf_assert_always(user_index <= CDP_MU_MAX_USER_INDEX);
1637 			return user_index;
1638 		}
1639 	}
1640 
1641 	ppdu_info->last_user++;
1642 	/* Max users possible is 8 so last user should not exceed 8 */
1643 	qdf_assert_always(ppdu_info->last_user <= CDP_MU_MAX_USERS);
1644 	return ppdu_info->last_user - 1;
1645 }
1646 
1647 /*
1648  * dp_process_ppdu_stats_common_tlv: Process htt_ppdu_stats_common_tlv
1649  * pdev: DP pdev handle
1650  * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv
1651  * @ppdu_info: per ppdu tlv structure
1652  *
1653  * return:void
1654  */
1655 static void dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev,
1656 		uint32_t *tag_buf, struct ppdu_info *ppdu_info)
1657 {
1658 	uint16_t frame_type;
1659 	uint16_t freq;
1660 	struct dp_soc *soc = NULL;
1661 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
1662 
1663 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1664 
1665 	tag_buf += 2;
1666 	ppdu_desc->num_users =
1667 		HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf);
1668 	tag_buf++;
1669 	frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf);
1670 
1671 	if ((frame_type == HTT_STATS_FTYPE_TIDQ_DATA_SU) ||
1672 			(frame_type == HTT_STATS_FTYPE_TIDQ_DATA_MU))
1673 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA;
1674 	else if ((frame_type == HTT_STATS_FTYPE_SGEN_MU_BAR) ||
1675 		 (frame_type == HTT_STATS_FTYPE_SGEN_BAR))
1676 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_BAR;
1677 	else
1678 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
1679 
1680 	tag_buf += 2;
1681 	ppdu_desc->tx_duration = *tag_buf;
1682 	tag_buf += 3;
1683 	ppdu_desc->ppdu_start_timestamp = *tag_buf;
1684 
1685 	ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
1686 					ppdu_desc->tx_duration;
1687 	/* Ack time stamp is same as end time stamp*/
1688 	ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
1689 
1690 	tag_buf++;
1691 
1692 	freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf);
1693 	if (freq != ppdu_desc->channel) {
1694 		soc = pdev->soc;
1695 		ppdu_desc->channel = freq;
1696 		if (soc && soc->cdp_soc.ol_ops->freq_to_channel)
1697 			pdev->operating_channel =
1698 		soc->cdp_soc.ol_ops->freq_to_channel(pdev->ctrl_pdev, freq);
1699 	}
1700 
1701 	ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf);
1702 }
1703 
1704 /*
1705  * dp_process_ppdu_stats_user_common_tlv: Process ppdu_stats_user_common
1706  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv
1707  * @ppdu_info: per ppdu tlv structure
1708  *
1709  * return:void
1710  */
1711 static void dp_process_ppdu_stats_user_common_tlv(
1712 		struct dp_pdev *pdev, uint32_t *tag_buf,
1713 		struct ppdu_info *ppdu_info)
1714 {
1715 	uint16_t peer_id;
1716 	struct dp_peer *peer;
1717 	struct cdp_tx_completion_ppdu *ppdu_desc;
1718 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1719 	uint8_t curr_user_index = 0;
1720 
1721 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1722 
1723 	tag_buf++;
1724 	peer_id = HTT_PPDU_STATS_USER_COMMON_TLV_SW_PEER_ID_GET(*tag_buf);
1725 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
1726 
1727 	if (!peer)
1728 		return;
1729 
1730 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
1731 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1732 
1733 	ppdu_user_desc->peer_id = peer_id;
1734 
1735 	tag_buf++;
1736 
1737 	if (HTT_PPDU_STATS_USER_COMMON_TLV_DELAYED_BA_GET(*tag_buf)) {
1738 		ppdu_user_desc->delayed_ba = 1;
1739 	}
1740 
1741 	if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) {
1742 		ppdu_user_desc->is_mcast = true;
1743 		ppdu_user_desc->mpdu_tried_mcast =
1744 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
1745 		ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast;
1746 	} else {
1747 		ppdu_user_desc->mpdu_tried_ucast =
1748 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
1749 	}
1750 
1751 	tag_buf++;
1752 
1753 	ppdu_user_desc->qos_ctrl =
1754 		HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf);
1755 	ppdu_user_desc->frame_ctrl =
1756 		HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf);
1757 	ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl;
1758 
1759 	if (ppdu_user_desc->delayed_ba) {
1760 		ppdu_user_desc->mpdu_success = 0;
1761 		ppdu_user_desc->mpdu_tried_mcast = 0;
1762 		ppdu_user_desc->mpdu_tried_ucast = 0;
1763 	}
1764 }
1765 
1766 
1767 /**
1768  * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv
1769  * @pdev: DP pdev handle
1770  * @tag_buf: T2H message buffer carrying the user rate TLV
1771  * @ppdu_info: per ppdu tlv structure
1772  *
1773  * return:void
1774  */
1775 static void dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev,
1776 		uint32_t *tag_buf,
1777 		struct ppdu_info *ppdu_info)
1778 {
1779 	uint16_t peer_id;
1780 	struct dp_peer *peer;
1781 	struct cdp_tx_completion_ppdu *ppdu_desc;
1782 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1783 	uint8_t curr_user_index = 0;
1784 
1785 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1786 
1787 	tag_buf++;
1788 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
1789 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
1790 
1791 	if (!peer)
1792 		return;
1793 
1794 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
1795 
1796 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1797 	ppdu_user_desc->peer_id = peer_id;
1798 
1799 	ppdu_user_desc->tid =
1800 		HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf);
1801 
1802 	qdf_mem_copy(ppdu_user_desc->mac_addr, peer->mac_addr.raw,
1803 			DP_MAC_ADDR_LEN);
1804 
1805 	tag_buf += 2;
1806 
1807 	ppdu_user_desc->ru_tones = (HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) -
1808 			HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1;
1809 
1810 	tag_buf += 2;
1811 
1812 	ppdu_user_desc->ppdu_type =
1813 		HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf);
1814 
1815 	tag_buf++;
1816 	ppdu_user_desc->tx_rate = *tag_buf;
1817 
1818 	ppdu_user_desc->ltf_size =
1819 		HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf);
1820 	ppdu_user_desc->stbc =
1821 		HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf);
1822 	ppdu_user_desc->he_re =
1823 		HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf);
1824 	ppdu_user_desc->txbf =
1825 		HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf);
1826 	ppdu_user_desc->bw =
1827 		HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf) - 2;
1828 	ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf);
1829 	ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf);
1830 	ppdu_user_desc->preamble =
1831 		HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf);
1832 	ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf);
1833 	ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf);
1834 	ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf);
1835 }
1836 
1837 /*
1838  * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv: Process
1839  * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
1840  * pdev: DP PDEV handle
1841  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
1842  * @ppdu_info: per ppdu tlv structure
1843  *
1844  * return:void
1845  */
1846 static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
1847 		struct dp_pdev *pdev, uint32_t *tag_buf,
1848 		struct ppdu_info *ppdu_info)
1849 {
1850 	htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf =
1851 		(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf;
1852 
1853 	struct cdp_tx_completion_ppdu *ppdu_desc;
1854 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1855 	uint8_t curr_user_index = 0;
1856 	uint16_t peer_id;
1857 	struct dp_peer *peer;
1858 
1859 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1860 
1861 	tag_buf++;
1862 
1863 	peer_id =
1864 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
1865 
1866 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
1867 
1868 	if (!peer)
1869 		return;
1870 
1871 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
1872 
1873 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1874 	ppdu_user_desc->peer_id = peer_id;
1875 
1876 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
1877 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
1878 					CDP_BA_64_BIT_MAP_SIZE_DWORDS);
1879 }
1880 
1881 /*
1882  * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv: Process
1883  * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
1884  * soc: DP SOC handle
1885  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
1886  * @ppdu_info: per ppdu tlv structure
1887  *
1888  * return:void
1889  */
1890 static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
1891 		struct dp_pdev *pdev, uint32_t *tag_buf,
1892 		struct ppdu_info *ppdu_info)
1893 {
1894 	htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf =
1895 		(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf;
1896 
1897 	struct cdp_tx_completion_ppdu *ppdu_desc;
1898 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1899 	uint8_t curr_user_index = 0;
1900 	uint16_t peer_id;
1901 	struct dp_peer *peer;
1902 
1903 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1904 
1905 	tag_buf++;
1906 
1907 	peer_id =
1908 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
1909 
1910 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
1911 
1912 	if (!peer)
1913 		return;
1914 
1915 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
1916 
1917 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1918 	ppdu_user_desc->peer_id = peer_id;
1919 
1920 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
1921 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
1922 					CDP_BA_256_BIT_MAP_SIZE_DWORDS);
1923 }
1924 
1925 /*
1926  * dp_process_ppdu_stats_user_cmpltn_common_tlv: Process
1927  * htt_ppdu_stats_user_cmpltn_common_tlv
1928  * soc: DP SOC handle
1929  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv
1930  * @ppdu_info: per ppdu tlv structure
1931  *
1932  * return:void
1933  */
1934 static void dp_process_ppdu_stats_user_cmpltn_common_tlv(
1935 		struct dp_pdev *pdev, uint32_t *tag_buf,
1936 		struct ppdu_info *ppdu_info)
1937 {
1938 	uint16_t peer_id;
1939 	struct dp_peer *peer;
1940 	struct cdp_tx_completion_ppdu *ppdu_desc;
1941 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1942 	uint8_t curr_user_index = 0;
1943 	htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf =
1944 		(htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf;
1945 
1946 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1947 
1948 	tag_buf++;
1949 	peer_id =
1950 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf);
1951 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
1952 
1953 	if (!peer)
1954 		return;
1955 
1956 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
1957 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1958 	ppdu_user_desc->peer_id = peer_id;
1959 
1960 	ppdu_user_desc->completion_status =
1961 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET(
1962 				*tag_buf);
1963 
1964 	ppdu_user_desc->tid =
1965 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf);
1966 
1967 
1968 	tag_buf++;
1969 	ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi;
1970 
1971 	tag_buf++;
1972 
1973 	ppdu_user_desc->mpdu_success =
1974 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf);
1975 
1976 	tag_buf++;
1977 
1978 	ppdu_user_desc->long_retries =
1979 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf);
1980 
1981 	ppdu_user_desc->short_retries =
1982 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf);
1983 	ppdu_user_desc->retry_msdus =
1984 		ppdu_user_desc->long_retries + ppdu_user_desc->short_retries;
1985 
1986 	ppdu_user_desc->is_ampdu =
1987 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf);
1988 	ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu;
1989 
1990 }
1991 
1992 /*
1993  * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv: Process
1994  * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
1995  * pdev: DP PDEV handle
1996  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
1997  * @ppdu_info: per ppdu tlv structure
1998  *
1999  * return:void
2000  */
2001 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
2002 		struct dp_pdev *pdev, uint32_t *tag_buf,
2003 		struct ppdu_info *ppdu_info)
2004 {
2005 	htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf =
2006 		(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf;
2007 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2008 	struct cdp_tx_completion_ppdu *ppdu_desc;
2009 	uint8_t curr_user_index = 0;
2010 	uint16_t peer_id;
2011 	struct dp_peer *peer;
2012 
2013 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2014 
2015 	tag_buf++;
2016 
2017 	peer_id =
2018 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2019 
2020 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2021 
2022 	if (!peer)
2023 		return;
2024 
2025 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2026 
2027 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2028 	ppdu_user_desc->peer_id = peer_id;
2029 
2030 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2031 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2032 			CDP_BA_64_BIT_MAP_SIZE_DWORDS);
2033 }
2034 
2035 /*
2036  * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv: Process
2037  * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2038  * pdev: DP PDEV handle
2039  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2040  * @ppdu_info: per ppdu tlv structure
2041  *
2042  * return:void
2043  */
2044 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
2045 		struct dp_pdev *pdev, uint32_t *tag_buf,
2046 		struct ppdu_info *ppdu_info)
2047 {
2048 	htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf =
2049 		(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf;
2050 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2051 	struct cdp_tx_completion_ppdu *ppdu_desc;
2052 	uint8_t curr_user_index = 0;
2053 	uint16_t peer_id;
2054 	struct dp_peer *peer;
2055 
2056 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2057 
2058 	tag_buf++;
2059 
2060 	peer_id =
2061 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2062 
2063 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2064 
2065 	if (!peer)
2066 		return;
2067 
2068 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2069 
2070 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2071 	ppdu_user_desc->peer_id = peer_id;
2072 
2073 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2074 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2075 			CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2076 }
2077 
2078 /*
2079  * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv: Process
2080  * htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2081  * pdev: DP PDE handle
2082  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2083  * @ppdu_info: per ppdu tlv structure
2084  *
2085  * return:void
2086  */
2087 static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
2088 		struct dp_pdev *pdev, uint32_t *tag_buf,
2089 		struct ppdu_info *ppdu_info)
2090 {
2091 	uint16_t peer_id;
2092 	struct dp_peer *peer;
2093 	struct cdp_tx_completion_ppdu *ppdu_desc;
2094 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2095 	uint8_t curr_user_index = 0;
2096 
2097 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2098 
2099 	tag_buf += 2;
2100 	peer_id =
2101 	HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf);
2102 
2103 
2104 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2105 
2106 	if (!peer)
2107 		return;
2108 
2109 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2110 
2111 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2112 	ppdu_user_desc->peer_id = peer_id;
2113 
2114 	tag_buf++;
2115 	ppdu_user_desc->tid =
2116 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_TID_NUM_GET(*tag_buf);
2117 	ppdu_user_desc->num_mpdu =
2118 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf);
2119 
2120 	ppdu_user_desc->num_msdu =
2121 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf);
2122 
2123 	ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu;
2124 
2125 	tag_buf += 2;
2126 	ppdu_user_desc->success_bytes = *tag_buf;
2127 
2128 }
2129 
2130 /*
2131  * dp_process_ppdu_stats_user_common_array_tlv: Process
2132  * htt_ppdu_stats_user_common_array_tlv
2133  * pdev: DP PDEV handle
2134  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2135  * @ppdu_info: per ppdu tlv structure
2136  *
2137  * return:void
2138  */
2139 static void dp_process_ppdu_stats_user_common_array_tlv(
2140 		struct dp_pdev *pdev, uint32_t *tag_buf,
2141 		struct ppdu_info *ppdu_info)
2142 {
2143 	uint32_t peer_id;
2144 	struct dp_peer *peer;
2145 	struct cdp_tx_completion_ppdu *ppdu_desc;
2146 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2147 	uint8_t curr_user_index = 0;
2148 	struct htt_tx_ppdu_stats_info *dp_stats_buf;
2149 
2150 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2151 
2152 	tag_buf++;
2153 	dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf;
2154 	tag_buf += 3;
2155 	peer_id =
2156 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf);
2157 
2158 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2159 
2160 	if (!peer) {
2161 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2162 			"Invalid peer");
2163 		return;
2164 	}
2165 
2166 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2167 
2168 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2169 
2170 	ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes;
2171 	ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes;
2172 
2173 	tag_buf++;
2174 
2175 	ppdu_user_desc->success_msdus =
2176 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf);
2177 	ppdu_user_desc->retry_bytes =
2178 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf);
2179 	tag_buf++;
2180 	ppdu_user_desc->failed_msdus =
2181 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf);
2182 }
2183 
2184 /*
2185  * dp_process_ppdu_stats_flush_tlv: Process
2186  * htt_ppdu_stats_flush_tlv
2187  * @pdev: DP PDEV handle
2188  * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv
2189  *
2190  * return:void
2191  */
2192 static void dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev,
2193 						uint32_t *tag_buf)
2194 {
2195 	uint32_t peer_id;
2196 	uint32_t drop_reason;
2197 	uint8_t tid;
2198 	uint32_t num_msdu;
2199 	struct dp_peer *peer;
2200 
2201 	tag_buf++;
2202 	drop_reason = *tag_buf;
2203 
2204 	tag_buf++;
2205 	num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf);
2206 
2207 	tag_buf++;
2208 	peer_id =
2209 		HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf);
2210 
2211 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2212 	if (!peer)
2213 		return;
2214 
2215 	tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf);
2216 
2217 	if (drop_reason == HTT_FLUSH_EXCESS_RETRIES) {
2218 		DP_STATS_INC(peer, tx.excess_retries_per_ac[TID_TO_WME_AC(tid)],
2219 					num_msdu);
2220 	}
2221 }
2222 
2223 /*
2224  * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv: Process
2225  * htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2226  * @pdev: DP PDEV handle
2227  * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2228  * @length: tlv_length
2229  *
2230  * return:QDF_STATUS_SUCCESS if nbuf as to be freed in caller
2231  */
2232 static QDF_STATUS
2233 dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev,
2234 					      qdf_nbuf_t tag_buf,
2235 					      uint32_t ppdu_id)
2236 {
2237 	uint32_t *nbuf_ptr;
2238 	uint8_t trim_size;
2239 
2240 	if ((!pdev->tx_sniffer_enable) && (!pdev->mcopy_mode) &&
2241 	    (!pdev->bpr_enable))
2242 		return QDF_STATUS_SUCCESS;
2243 
2244 	trim_size = ((pdev->mgmtctrl_frm_info.mgmt_buf +
2245 		      HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) -
2246 		      qdf_nbuf_data(tag_buf));
2247 
2248 	if (!qdf_nbuf_pull_head(tag_buf, trim_size))
2249 		return QDF_STATUS_SUCCESS;
2250 
2251 	qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) -
2252 			    pdev->mgmtctrl_frm_info.mgmt_buf_len);
2253 
2254 	nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(
2255 				tag_buf, sizeof(ppdu_id));
2256 	*nbuf_ptr = ppdu_id;
2257 
2258 	if (pdev->bpr_enable) {
2259 		dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc,
2260 				     tag_buf, HTT_INVALID_PEER,
2261 				     WDI_NO_VAL, pdev->pdev_id);
2262 	}
2263 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2264 		dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc,
2265 				     tag_buf, HTT_INVALID_PEER,
2266 				     WDI_NO_VAL, pdev->pdev_id);
2267 	}
2268 
2269 	return QDF_STATUS_E_ALREADY;
2270 }
2271 
2272 /**
2273  * dp_process_ppdu_tag(): Function to process the PPDU TLVs
2274  * @pdev: DP pdev handle
2275  * @tag_buf: TLV buffer
2276  * @tlv_len: length of tlv
2277  * @ppdu_info: per ppdu tlv structure
2278  *
2279  * return: void
2280  */
2281 static void dp_process_ppdu_tag(struct dp_pdev *pdev, uint32_t *tag_buf,
2282 		uint32_t tlv_len, struct ppdu_info *ppdu_info)
2283 {
2284 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2285 
2286 	switch (tlv_type) {
2287 	case HTT_PPDU_STATS_COMMON_TLV:
2288 		qdf_assert_always(tlv_len ==
2289 				sizeof(htt_ppdu_stats_common_tlv));
2290 		dp_process_ppdu_stats_common_tlv(pdev, tag_buf, ppdu_info);
2291 		break;
2292 	case HTT_PPDU_STATS_USR_COMMON_TLV:
2293 		qdf_assert_always(tlv_len ==
2294 				sizeof(htt_ppdu_stats_user_common_tlv));
2295 		dp_process_ppdu_stats_user_common_tlv(
2296 				pdev, tag_buf, ppdu_info);
2297 		break;
2298 	case HTT_PPDU_STATS_USR_RATE_TLV:
2299 		qdf_assert_always(tlv_len ==
2300 				sizeof(htt_ppdu_stats_user_rate_tlv));
2301 		dp_process_ppdu_stats_user_rate_tlv(pdev, tag_buf, ppdu_info);
2302 		break;
2303 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV:
2304 		qdf_assert_always(tlv_len ==
2305 				sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv));
2306 		dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
2307 				pdev, tag_buf, ppdu_info);
2308 		break;
2309 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV:
2310 		qdf_assert_always(tlv_len ==
2311 				sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv));
2312 		dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
2313 				pdev, tag_buf, ppdu_info);
2314 		break;
2315 	case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV:
2316 		qdf_assert_always(tlv_len ==
2317 				sizeof(htt_ppdu_stats_user_cmpltn_common_tlv));
2318 		dp_process_ppdu_stats_user_cmpltn_common_tlv(
2319 				pdev, tag_buf, ppdu_info);
2320 		break;
2321 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV:
2322 		qdf_assert_always(tlv_len ==
2323 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv));
2324 		dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
2325 				pdev, tag_buf, ppdu_info);
2326 		break;
2327 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV:
2328 		qdf_assert_always(tlv_len ==
2329 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv));
2330 		dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
2331 				pdev, tag_buf, ppdu_info);
2332 		break;
2333 	case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV:
2334 		qdf_assert_always(tlv_len ==
2335 			sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv));
2336 		dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
2337 				pdev, tag_buf, ppdu_info);
2338 		break;
2339 	case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV:
2340 		qdf_assert_always(tlv_len ==
2341 			sizeof(htt_ppdu_stats_usr_common_array_tlv_v));
2342 		dp_process_ppdu_stats_user_common_array_tlv(
2343 				pdev, tag_buf, ppdu_info);
2344 		break;
2345 	case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV:
2346 		qdf_assert_always(tlv_len ==
2347 			sizeof(htt_ppdu_stats_flush_tlv));
2348 		dp_process_ppdu_stats_user_compltn_flush_tlv(
2349 				pdev, tag_buf);
2350 		break;
2351 	default:
2352 		break;
2353 	}
2354 }
2355 
2356 /**
2357  * dp_ppdu_desc_deliver(): Function to deliver Tx PPDU status descriptor
2358  * to upper layer
2359  * @pdev: DP pdev handle
2360  * @ppdu_info: per PPDU TLV descriptor
2361  *
2362  * return: void
2363  */
2364 static
2365 void dp_ppdu_desc_deliver(struct dp_pdev *pdev,
2366 			      struct ppdu_info *ppdu_info)
2367 {
2368 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
2369 	struct dp_peer *peer = NULL;
2370 	qdf_nbuf_t nbuf;
2371 	uint16_t i;
2372 
2373 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
2374 		qdf_nbuf_data(ppdu_info->nbuf);
2375 
2376 	ppdu_desc->num_users = ppdu_info->last_user;
2377 	ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
2378 
2379 	for (i = 0; i < ppdu_desc->num_users; i++) {
2380 
2381 
2382 		ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu;
2383 		ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu;
2384 
2385 		if (ppdu_desc->user[i].tid < CDP_DATA_TID_MAX) {
2386 			peer = dp_peer_find_by_id(pdev->soc,
2387 					ppdu_desc->user[i].peer_id);
2388 			/**
2389 			 * This check is to make sure peer is not deleted
2390 			 * after processing the TLVs.
2391 			 */
2392 			if (!peer)
2393 				continue;
2394 
2395 			dp_tx_stats_update(pdev->soc, peer,
2396 					&ppdu_desc->user[i],
2397 					ppdu_desc->ack_rssi);
2398 		}
2399 	}
2400 
2401 	/*
2402 	 * Remove from the list
2403 	 */
2404 	TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
2405 	nbuf = ppdu_info->nbuf;
2406 	pdev->list_depth--;
2407 	qdf_mem_free(ppdu_info);
2408 
2409 	qdf_assert_always(nbuf);
2410 
2411 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
2412 		qdf_nbuf_data(nbuf);
2413 
2414 	/**
2415 	 * Deliver PPDU stats only for valid (acked) data frames if
2416 	 * sniffer mode is not enabled.
2417 	 * If sniffer mode is enabled, PPDU stats for all frames
2418 	 * including mgmt/control frames should be delivered to upper layer
2419 	 */
2420 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2421 		dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC, pdev->soc,
2422 				nbuf, HTT_INVALID_PEER,
2423 				WDI_NO_VAL, pdev->pdev_id);
2424 	} else {
2425 		if (ppdu_desc->num_mpdu != 0 && ppdu_desc->num_users != 0 &&
2426 				ppdu_desc->frame_ctrl & HTT_FRAMECTRL_DATATYPE) {
2427 
2428 			dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
2429 					pdev->soc, nbuf, HTT_INVALID_PEER,
2430 					WDI_NO_VAL, pdev->pdev_id);
2431 		} else
2432 			qdf_nbuf_free(nbuf);
2433 	}
2434 	return;
2435 }
2436 
2437 /**
2438  * dp_get_ppdu_desc(): Function to allocate new PPDU status
2439  * desc for new ppdu id
2440  * @pdev: DP pdev handle
2441  * @ppdu_id: PPDU unique identifier
2442  * @tlv_type: TLV type received
2443  *
2444  * return: ppdu_info per ppdu tlv structure
2445  */
2446 static
2447 struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id,
2448 			uint8_t tlv_type)
2449 {
2450 	struct ppdu_info *ppdu_info = NULL;
2451 
2452 	/*
2453 	 * Find ppdu_id node exists or not
2454 	 */
2455 	TAILQ_FOREACH(ppdu_info, &pdev->ppdu_info_list, ppdu_info_list_elem) {
2456 
2457 		if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) {
2458 			break;
2459 		}
2460 	}
2461 
2462 	if (ppdu_info) {
2463 		/**
2464 		 * if we get tlv_type that is already been processed for ppdu,
2465 		 * that means we got a new ppdu with same ppdu id.
2466 		 * Hence Flush the older ppdu
2467 		 */
2468 		if (ppdu_info->tlv_bitmap & (1 << tlv_type))
2469 			dp_ppdu_desc_deliver(pdev, ppdu_info);
2470 		else
2471 			return ppdu_info;
2472 	}
2473 
2474 	/**
2475 	 * Flush the head ppdu descriptor if ppdu desc list reaches max
2476 	 * threshold
2477 	 */
2478 	if (pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
2479 		ppdu_info = TAILQ_FIRST(&pdev->ppdu_info_list);
2480 		dp_ppdu_desc_deliver(pdev, ppdu_info);
2481 	}
2482 
2483 	/*
2484 	 * Allocate new ppdu_info node
2485 	 */
2486 	ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info));
2487 	if (!ppdu_info)
2488 		return NULL;
2489 
2490 	ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
2491 			sizeof(struct cdp_tx_completion_ppdu), 0, 4,
2492 			TRUE);
2493 	if (!ppdu_info->nbuf) {
2494 		qdf_mem_free(ppdu_info);
2495 		return NULL;
2496 	}
2497 
2498 	qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf),
2499 			sizeof(struct cdp_tx_completion_ppdu));
2500 
2501 	if (qdf_nbuf_put_tail(ppdu_info->nbuf,
2502 			sizeof(struct cdp_tx_completion_ppdu)) == NULL) {
2503 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2504 				"No tailroom for HTT PPDU");
2505 		qdf_nbuf_free(ppdu_info->nbuf);
2506 		ppdu_info->nbuf = NULL;
2507 		ppdu_info->last_user = 0;
2508 		qdf_mem_free(ppdu_info);
2509 		return NULL;
2510 	}
2511 
2512 	/**
2513 	 * No lock is needed because all PPDU TLVs are processed in
2514 	 * same context and this list is updated in same context
2515 	 */
2516 	TAILQ_INSERT_TAIL(&pdev->ppdu_info_list, ppdu_info,
2517 			ppdu_info_list_elem);
2518 	pdev->list_depth++;
2519 	return ppdu_info;
2520 }
2521 
2522 /**
2523  * dp_htt_process_tlv(): Function to process each PPDU TLVs
2524  * @pdev: DP pdev handle
2525  * @htt_t2h_msg: HTT target to host message
2526  *
2527  * return: ppdu_info per ppdu tlv structure
2528  */
2529 
2530 static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev,
2531 		qdf_nbuf_t htt_t2h_msg)
2532 {
2533 	uint32_t length;
2534 	uint32_t ppdu_id;
2535 	uint8_t tlv_type;
2536 	uint32_t tlv_length, tlv_bitmap_expected;
2537 	uint8_t *tlv_buf;
2538 	struct ppdu_info *ppdu_info = NULL;
2539 
2540 	uint32_t *msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
2541 
2542 	length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word);
2543 
2544 	msg_word = msg_word + 1;
2545 	ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word);
2546 
2547 
2548 	msg_word = msg_word + 3;
2549 	while (length > 0) {
2550 		tlv_buf = (uint8_t *)msg_word;
2551 		tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
2552 		tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
2553 		if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG))
2554 			pdev->stats.ppdu_stats_counter[tlv_type]++;
2555 
2556 		if (tlv_length == 0)
2557 			break;
2558 
2559 		tlv_length += HTT_TLV_HDR_LEN;
2560 
2561 		/**
2562 		 * Not allocating separate ppdu descriptor for MGMT Payload
2563 		 * TLV as this is sent as separate WDI indication and it
2564 		 * doesn't contain any ppdu information
2565 		 */
2566 		if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) {
2567 			pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf;
2568 			pdev->mgmtctrl_frm_info.mgmt_buf_len = tlv_length;
2569 			pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id;
2570 			msg_word =
2571 				(uint32_t *)((uint8_t *)tlv_buf + tlv_length);
2572 			length -= (tlv_length);
2573 			continue;
2574 		}
2575 
2576 		ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type);
2577 		if (!ppdu_info)
2578 			return NULL;
2579 		ppdu_info->ppdu_id = ppdu_id;
2580 		ppdu_info->tlv_bitmap |= (1 << tlv_type);
2581 
2582 		dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info);
2583 
2584 		/**
2585 		 * Increment pdev level tlv count to monitor
2586 		 * missing TLVs
2587 		 */
2588 		pdev->tlv_count++;
2589 		ppdu_info->last_tlv_cnt = pdev->tlv_count;
2590 		msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
2591 		length -= (tlv_length);
2592 	}
2593 
2594 	if (!ppdu_info)
2595 		return NULL;
2596 
2597 	pdev->last_ppdu_id = ppdu_id;
2598 
2599 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
2600 
2601 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2602 		if (ppdu_info->is_ampdu)
2603 			tlv_bitmap_expected = HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP;
2604 	}
2605 
2606 	/**
2607 	 * Once all the TLVs for a given PPDU has been processed,
2608 	 * return PPDU status to be delivered to higher layer
2609 	 */
2610 	if (ppdu_info->tlv_bitmap == tlv_bitmap_expected)
2611 		return ppdu_info;
2612 
2613 	return NULL;
2614 }
2615 #endif /* FEATURE_PERPKT_INFO */
2616 
2617 /**
2618  * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW
2619  * @soc: DP SOC handle
2620  * @pdev_id: pdev id
2621  * @htt_t2h_msg: HTT message nbuf
2622  *
2623  * return:void
2624  */
2625 #if defined(WDI_EVENT_ENABLE)
2626 #ifdef FEATURE_PERPKT_INFO
2627 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
2628 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
2629 {
2630 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
2631 	struct ppdu_info *ppdu_info = NULL;
2632 	bool free_buf = true;
2633 
2634 	if (!pdev)
2635 		return true;
2636 
2637 	if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
2638 	    !pdev->mcopy_mode && !pdev->bpr_enable)
2639 		return free_buf;
2640 
2641 	ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg);
2642 
2643 	if (pdev->mgmtctrl_frm_info.mgmt_buf) {
2644 		if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv
2645 		    (pdev, htt_t2h_msg, pdev->mgmtctrl_frm_info.ppdu_id) !=
2646 		    QDF_STATUS_SUCCESS)
2647 			free_buf = false;
2648 
2649 		pdev->mgmtctrl_frm_info.mgmt_buf = NULL;
2650 		pdev->mgmtctrl_frm_info.mgmt_buf_len = 0;
2651 		pdev->mgmtctrl_frm_info.ppdu_id = 0;
2652 	}
2653 
2654 	if (ppdu_info)
2655 		dp_ppdu_desc_deliver(pdev, ppdu_info);
2656 
2657 	return free_buf;
2658 }
2659 #else
2660 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
2661 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
2662 {
2663 	return true;
2664 }
2665 #endif
2666 #endif
2667 
2668 /**
2669  * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats
2670  * @soc: DP SOC handle
2671  * @htt_t2h_msg: HTT message nbuf
2672  *
2673  * return:void
2674  */
2675 static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc,
2676 		qdf_nbuf_t htt_t2h_msg)
2677 {
2678 	uint8_t done;
2679 	qdf_nbuf_t msg_copy;
2680 	uint32_t *msg_word;
2681 
2682 	msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
2683 	msg_word = msg_word + 3;
2684 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2685 
2686 	/*
2687 	 * HTT EXT stats response comes as stream of TLVs which span over
2688 	 * multiple T2H messages.
2689 	 * The first message will carry length of the response.
2690 	 * For rest of the messages length will be zero.
2691 	 *
2692 	 * Clone the T2H message buffer and store it in a list to process
2693 	 * it later.
2694 	 *
2695 	 * The original T2H message buffers gets freed in the T2H HTT event
2696 	 * handler
2697 	 */
2698 	msg_copy = qdf_nbuf_clone(htt_t2h_msg);
2699 
2700 	if (!msg_copy) {
2701 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2702 				"T2H messge clone failed for HTT EXT STATS");
2703 		goto error;
2704 	}
2705 
2706 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2707 	qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy);
2708 	/*
2709 	 * Done bit signifies that this is the last T2H buffer in the stream of
2710 	 * HTT EXT STATS message
2711 	 */
2712 	if (done) {
2713 		soc->htt_stats.num_stats++;
2714 		qdf_sched_work(0, &soc->htt_stats.work);
2715 	}
2716 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2717 
2718 	return;
2719 
2720 error:
2721 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2722 	while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg))
2723 			!= NULL) {
2724 		qdf_nbuf_free(msg_copy);
2725 	}
2726 	soc->htt_stats.num_stats = 0;
2727 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2728 	return;
2729 
2730 }
2731 
2732 /*
2733  * htt_soc_attach_target() - SOC level HTT setup
2734  * @htt_soc:	HTT SOC handle
2735  *
2736  * Return: 0 on success; error code on failure
2737  */
2738 int htt_soc_attach_target(void *htt_soc)
2739 {
2740 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
2741 
2742 	return htt_h2t_ver_req_msg(soc);
2743 }
2744 
2745 
2746 #if defined(WDI_EVENT_ENABLE) && !defined(REMOVE_PKT_LOG)
2747 /*
2748  * dp_ppdu_stats_ind_handler() - PPDU stats msg handler
2749  * @htt_soc:	 HTT SOC handle
2750  * @msg_word:    Pointer to payload
2751  * @htt_t2h_msg: HTT msg nbuf
2752  *
2753  * Return: True if buffer should be freed by caller.
2754  */
2755 static bool
2756 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
2757 				uint32_t *msg_word,
2758 				qdf_nbuf_t htt_t2h_msg)
2759 {
2760 	u_int8_t pdev_id;
2761 	bool free_buf;
2762 	qdf_nbuf_set_pktlen(htt_t2h_msg, HTT_T2H_MAX_MSG_SIZE);
2763 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2764 		"received HTT_T2H_MSG_TYPE_PPDU_STATS_IND");
2765 	pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
2766 	pdev_id = DP_HW2SW_MACID(pdev_id);
2767 	free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id,
2768 					      htt_t2h_msg);
2769 	dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc,
2770 		htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL,
2771 		pdev_id);
2772 	return free_buf;
2773 }
2774 #else
2775 static bool
2776 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
2777 				uint32_t *msg_word,
2778 				qdf_nbuf_t htt_t2h_msg)
2779 {
2780 	return true;
2781 }
2782 #endif
2783 
2784 #if defined(WDI_EVENT_ENABLE) && \
2785 		!defined(REMOVE_PKT_LOG) && defined(CONFIG_WIN)
2786 /*
2787  * dp_pktlog_msg_handler() - Pktlog msg handler
2788  * @htt_soc:	 HTT SOC handle
2789  * @msg_word:    Pointer to payload
2790  *
2791  * Return: None
2792  */
2793 static void
2794 dp_pktlog_msg_handler(struct htt_soc *soc,
2795 				uint32_t *msg_word)
2796 {
2797 	uint8_t pdev_id;
2798 	uint32_t *pl_hdr;
2799 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2800 		"received HTT_T2H_MSG_TYPE_PKTLOG");
2801 	pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word);
2802 	pdev_id = DP_HW2SW_MACID(pdev_id);
2803 	pl_hdr = (msg_word + 1);
2804 	dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc,
2805 		pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL,
2806 		pdev_id);
2807 }
2808 #else
2809 static void
2810 dp_pktlog_msg_handler(struct htt_soc *soc,
2811 				uint32_t *msg_word)
2812 {
2813 }
2814 #endif
2815 
2816 /*
2817  * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler
2818  * @context:	Opaque context (HTT SOC handle)
2819  * @pkt:	HTC packet
2820  */
2821 static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
2822 {
2823 	struct htt_soc *soc = (struct htt_soc *) context;
2824 	qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
2825 	u_int32_t *msg_word;
2826 	enum htt_t2h_msg_type msg_type;
2827 	bool free_buf = true;
2828 
2829 	/* check for successful message reception */
2830 	if (pkt->Status != QDF_STATUS_SUCCESS) {
2831 		if (pkt->Status != QDF_STATUS_E_CANCELED)
2832 			soc->stats.htc_err_cnt++;
2833 
2834 		qdf_nbuf_free(htt_t2h_msg);
2835 		return;
2836 	}
2837 
2838 	/* TODO: Check if we should pop the HTC/HTT header alignment padding */
2839 
2840 	msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
2841 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
2842 	switch (msg_type) {
2843 	case HTT_T2H_MSG_TYPE_PEER_MAP:
2844 		{
2845 			u_int8_t mac_addr_deswizzle_buf[HTT_MAC_ADDR_LEN];
2846 			u_int8_t *peer_mac_addr;
2847 			u_int16_t peer_id;
2848 			u_int16_t hw_peer_id;
2849 			u_int8_t vdev_id;
2850 
2851 			peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
2852 			hw_peer_id =
2853 				HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2));
2854 			vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
2855 			peer_mac_addr = htt_t2h_mac_addr_deswizzle(
2856 				(u_int8_t *) (msg_word+1),
2857 				&mac_addr_deswizzle_buf[0]);
2858 			QDF_TRACE(QDF_MODULE_ID_TXRX,
2859 				QDF_TRACE_LEVEL_INFO,
2860 				"HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
2861 				peer_id, vdev_id);
2862 
2863 			dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id,
2864 						vdev_id, peer_mac_addr);
2865 			break;
2866 		}
2867 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
2868 		{
2869 			u_int16_t peer_id;
2870 			peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
2871 
2872 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id);
2873 			break;
2874 		}
2875 	case HTT_T2H_MSG_TYPE_SEC_IND:
2876 		{
2877 			u_int16_t peer_id;
2878 			enum htt_sec_type sec_type;
2879 			int is_unicast;
2880 
2881 			peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
2882 			sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
2883 			is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
2884 			/* point to the first part of the Michael key */
2885 			msg_word++;
2886 			dp_rx_sec_ind_handler(
2887 				soc->dp_soc, peer_id, sec_type, is_unicast,
2888 				msg_word, msg_word + 2);
2889 			break;
2890 		}
2891 
2892 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
2893 		{
2894 			free_buf = dp_ppdu_stats_ind_handler(soc, msg_word,
2895 							     htt_t2h_msg);
2896 			break;
2897 		}
2898 
2899 	case HTT_T2H_MSG_TYPE_PKTLOG:
2900 		{
2901 			dp_pktlog_msg_handler(soc, msg_word);
2902 			break;
2903 		}
2904 
2905 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
2906 		{
2907 			htc_pm_runtime_put(soc->htc_soc);
2908 			soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
2909 			soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
2910 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2911 				"target uses HTT version %d.%d; host uses %d.%d",
2912 				soc->tgt_ver.major, soc->tgt_ver.minor,
2913 				HTT_CURRENT_VERSION_MAJOR,
2914 				HTT_CURRENT_VERSION_MINOR);
2915 			if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
2916 				QDF_TRACE(QDF_MODULE_ID_TXRX,
2917 					QDF_TRACE_LEVEL_ERROR,
2918 					"*** Incompatible host/target HTT versions!");
2919 			}
2920 			/* abort if the target is incompatible with the host */
2921 			qdf_assert(soc->tgt_ver.major ==
2922 				HTT_CURRENT_VERSION_MAJOR);
2923 			if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
2924 				QDF_TRACE(QDF_MODULE_ID_TXRX,
2925 					QDF_TRACE_LEVEL_WARN,
2926 					"*** Warning: host/target HTT versions"
2927 					" are different, though compatible!");
2928 			}
2929 			break;
2930 		}
2931 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
2932 		{
2933 			uint16_t peer_id;
2934 			uint8_t tid;
2935 			uint8_t win_sz;
2936 			uint16_t status;
2937 			struct dp_peer *peer;
2938 
2939 			/*
2940 			 * Update REO Queue Desc with new values
2941 			 */
2942 			peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
2943 			tid = HTT_RX_ADDBA_TID_GET(*msg_word);
2944 			win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
2945 			peer = dp_peer_find_by_id(soc->dp_soc, peer_id);
2946 
2947 			/*
2948 			 * Window size needs to be incremented by 1
2949 			 * since fw needs to represent a value of 256
2950 			 * using just 8 bits
2951 			 */
2952 			if (peer) {
2953 				status = dp_addba_requestprocess_wifi3(peer,
2954 						0, tid, 0, win_sz + 1, 0xffff);
2955 				QDF_TRACE(QDF_MODULE_ID_TXRX,
2956 					QDF_TRACE_LEVEL_INFO,
2957 					FL("PeerID %d BAW %d TID %d stat %d"),
2958 					peer_id, win_sz, tid, status);
2959 
2960 			} else {
2961 				QDF_TRACE(QDF_MODULE_ID_TXRX,
2962 					QDF_TRACE_LEVEL_ERROR,
2963 					FL("Peer not found peer id %d"),
2964 					peer_id);
2965 			}
2966 			break;
2967 		}
2968 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
2969 		{
2970 			dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg);
2971 			break;
2972 		}
2973 	default:
2974 		break;
2975 	};
2976 
2977 	/* Free the indication buffer */
2978 	if (free_buf)
2979 		qdf_nbuf_free(htt_t2h_msg);
2980 }
2981 
2982 /*
2983  * dp_htt_h2t_full() - Send full handler (called from HTC)
2984  * @context:	Opaque context (HTT SOC handle)
2985  * @pkt:	HTC packet
2986  *
2987  * Return: enum htc_send_full_action
2988  */
2989 static enum htc_send_full_action
2990 dp_htt_h2t_full(void *context, HTC_PACKET *pkt)
2991 {
2992 	return HTC_SEND_FULL_KEEP;
2993 }
2994 
2995 /*
2996  * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages
2997  * @context:	Opaque context (HTT SOC handle)
2998  * @nbuf:	nbuf containing T2H message
2999  * @pipe_id:	HIF pipe ID
3000  *
3001  * Return: QDF_STATUS
3002  *
3003  * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which
3004  * will be used for packet log and other high-priority HTT messages. Proper
3005  * HTC connection to be added later once required FW changes are available
3006  */
3007 static QDF_STATUS
3008 dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id)
3009 {
3010 	A_STATUS rc = QDF_STATUS_SUCCESS;
3011 	HTC_PACKET htc_pkt;
3012 
3013 	qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE);
3014 	qdf_mem_zero(&htc_pkt, sizeof(htc_pkt));
3015 	htc_pkt.Status = QDF_STATUS_SUCCESS;
3016 	htc_pkt.pPktContext = (void *)nbuf;
3017 	dp_htt_t2h_msg_handler(context, &htc_pkt);
3018 
3019 	return rc;
3020 }
3021 
3022 /*
3023  * htt_htc_soc_attach() - Register SOC level HTT instance with HTC
3024  * @htt_soc:	HTT SOC handle
3025  *
3026  * Return: 0 on success; error code on failure
3027  */
3028 static int
3029 htt_htc_soc_attach(struct htt_soc *soc)
3030 {
3031 	struct htc_service_connect_req connect;
3032 	struct htc_service_connect_resp response;
3033 	A_STATUS status;
3034 	struct dp_soc *dpsoc = soc->dp_soc;
3035 
3036 	qdf_mem_set(&connect, sizeof(connect), 0);
3037 	qdf_mem_set(&response, sizeof(response), 0);
3038 
3039 	connect.pMetaData = NULL;
3040 	connect.MetaDataLength = 0;
3041 	connect.EpCallbacks.pContext = soc;
3042 	connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
3043 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
3044 	connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler;
3045 
3046 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
3047 	connect.EpCallbacks.EpRecvRefill = NULL;
3048 
3049 	/* N/A, fill is done by HIF */
3050 	connect.EpCallbacks.RecvRefillWaterMark = 1;
3051 
3052 	connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
3053 	/*
3054 	 * Specify how deep to let a queue get before htc_send_pkt will
3055 	 * call the EpSendFull function due to excessive send queue depth.
3056 	 */
3057 	connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
3058 
3059 	/* disable flow control for HTT data message service */
3060 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
3061 
3062 	/* connect to control service */
3063 	connect.service_id = HTT_DATA_MSG_SVC;
3064 
3065 	status = htc_connect_service(soc->htc_soc, &connect, &response);
3066 
3067 	if (status != A_OK)
3068 		return QDF_STATUS_E_FAILURE;
3069 
3070 	soc->htc_endpoint = response.Endpoint;
3071 
3072 	hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint);
3073 	dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc,
3074 		dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE);
3075 
3076 	return 0; /* success */
3077 }
3078 
3079 /*
3080  * htt_soc_attach() - SOC level HTT initialization
3081  * @dp_soc:	Opaque Data path SOC handle
3082  * @ctrl_psoc:	Opaque ctrl SOC handle
3083  * @htc_soc:	SOC level HTC handle
3084  * @hal_soc:	Opaque HAL SOC handle
3085  * @osdev:	QDF device
3086  *
3087  * Return: HTT handle on success; NULL on failure
3088  */
3089 void *
3090 htt_soc_attach(void *dp_soc, void *ctrl_psoc, HTC_HANDLE htc_soc,
3091 	void *hal_soc, qdf_device_t osdev)
3092 {
3093 	struct htt_soc *soc;
3094 	int i;
3095 
3096 	soc = qdf_mem_malloc(sizeof(*soc));
3097 
3098 	if (!soc)
3099 		goto fail1;
3100 
3101 	soc->osdev = osdev;
3102 	soc->ctrl_psoc = ctrl_psoc;
3103 	soc->dp_soc = dp_soc;
3104 	soc->htc_soc = htc_soc;
3105 	soc->hal_soc = hal_soc;
3106 
3107 	/* TODO: See if any NSS related context is required in htt_soc */
3108 
3109 	soc->htt_htc_pkt_freelist = NULL;
3110 
3111 	if (htt_htc_soc_attach(soc))
3112 		goto fail2;
3113 
3114 	/* TODO: See if any Rx data specific intialization is required. For
3115 	 * MCL use cases, the data will be received as single packet and
3116 	 * should not required any descriptor or reorder handling
3117 	 */
3118 
3119 	HTT_TX_MUTEX_INIT(&soc->htt_tx_mutex);
3120 
3121 	/* pre-allocate some HTC_PACKET objects */
3122 	for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
3123 		struct dp_htt_htc_pkt_union *pkt;
3124 		pkt = qdf_mem_malloc(sizeof(*pkt));
3125 		if (!pkt)
3126 			break;
3127 
3128 		htt_htc_pkt_free(soc, &pkt->u.pkt);
3129 	}
3130 
3131 	return soc;
3132 
3133 fail2:
3134 	qdf_mem_free(soc);
3135 
3136 fail1:
3137 	return NULL;
3138 }
3139 
3140 
3141 /*
3142  * htt_soc_detach() - Detach SOC level HTT
3143  * @htt_soc:	HTT SOC handle
3144  */
3145 void
3146 htt_soc_detach(void *htt_soc)
3147 {
3148 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
3149 
3150 	htt_htc_misc_pkt_pool_free(soc);
3151 	htt_htc_pkt_pool_free(soc);
3152 	HTT_TX_MUTEX_DESTROY(&soc->htt_tx_mutex);
3153 	qdf_mem_free(soc);
3154 }
3155 
3156 /**
3157  * dp_h2t_ext_stats_msg_send(): function to contruct HTT message to pass to FW
3158  * @pdev: DP PDEV handle
3159  * @stats_type_upload_mask: stats type requested by user
3160  * @config_param_0: extra configuration parameters
3161  * @config_param_1: extra configuration parameters
3162  * @config_param_2: extra configuration parameters
3163  * @config_param_3: extra configuration parameters
3164  * @mac_id: mac number
3165  *
3166  * return: QDF STATUS
3167  */
3168 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
3169 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
3170 		uint32_t config_param_1, uint32_t config_param_2,
3171 		uint32_t config_param_3, int cookie_val, int cookie_msb,
3172 		uint8_t mac_id)
3173 {
3174 	struct htt_soc *soc = pdev->soc->htt_handle;
3175 	struct dp_htt_htc_pkt *pkt;
3176 	qdf_nbuf_t msg;
3177 	uint32_t *msg_word;
3178 	uint8_t pdev_mask = 0;
3179 
3180 	msg = qdf_nbuf_alloc(
3181 			soc->osdev,
3182 			HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ),
3183 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
3184 
3185 	if (!msg)
3186 		return QDF_STATUS_E_NOMEM;
3187 
3188 	/*TODO:Add support for SOC stats
3189 	 * Bit 0: SOC Stats
3190 	 * Bit 1: Pdev stats for pdev id 0
3191 	 * Bit 2: Pdev stats for pdev id 1
3192 	 * Bit 3: Pdev stats for pdev id 2
3193 	 */
3194 	mac_id = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
3195 
3196 	pdev_mask = 1 << DP_SW2HW_MACID(mac_id);
3197 	/*
3198 	 * Set the length of the message.
3199 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3200 	 * separately during the below call to qdf_nbuf_push_head.
3201 	 * The contribution from the HTC header is added separately inside HTC.
3202 	 */
3203 	if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) {
3204 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3205 				"Failed to expand head for HTT_EXT_STATS");
3206 		qdf_nbuf_free(msg);
3207 		return QDF_STATUS_E_FAILURE;
3208 	}
3209 
3210 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3211 		"-----%s:%d----\n cookie <-> %d\n config_param_0 %u\n"
3212 		"config_param_1 %u\n config_param_2 %u\n"
3213 		"config_param_4 %u\n -------------",
3214 		__func__, __LINE__, cookie_val, config_param_0,
3215 		config_param_1, config_param_2,	config_param_3);
3216 
3217 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
3218 
3219 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3220 	*msg_word = 0;
3221 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ);
3222 	HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask);
3223 	HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask);
3224 
3225 	/* word 1 */
3226 	msg_word++;
3227 	*msg_word = 0;
3228 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0);
3229 
3230 	/* word 2 */
3231 	msg_word++;
3232 	*msg_word = 0;
3233 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1);
3234 
3235 	/* word 3 */
3236 	msg_word++;
3237 	*msg_word = 0;
3238 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2);
3239 
3240 	/* word 4 */
3241 	msg_word++;
3242 	*msg_word = 0;
3243 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3);
3244 
3245 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0);
3246 
3247 	/* word 5 */
3248 	msg_word++;
3249 
3250 	/* word 6 */
3251 	msg_word++;
3252 	*msg_word = 0;
3253 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val);
3254 
3255 	/* word 7 */
3256 	msg_word++;
3257 	*msg_word = 0;
3258 	/*Using last 2 bits for pdev_id */
3259 	cookie_msb = ((cookie_msb << 2) | pdev->pdev_id);
3260 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb);
3261 
3262 	pkt = htt_htc_pkt_alloc(soc);
3263 	if (!pkt) {
3264 		qdf_nbuf_free(msg);
3265 		return QDF_STATUS_E_NOMEM;
3266 	}
3267 
3268 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
3269 
3270 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
3271 			dp_htt_h2t_send_complete_free_netbuf,
3272 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
3273 			soc->htc_endpoint,
3274 			1); /* tag - not relevant here */
3275 
3276 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
3277 	DP_HTT_SEND_HTC_PKT(soc, pkt);
3278 	return 0;
3279 }
3280 
3281 /* This macro will revert once proper HTT header will define for
3282  * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file
3283  * */
3284 #if defined(WDI_EVENT_ENABLE)
3285 /**
3286  * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW
3287  * @pdev: DP PDEV handle
3288  * @stats_type_upload_mask: stats type requested by user
3289  * @mac_id: Mac id number
3290  *
3291  * return: QDF STATUS
3292  */
3293 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
3294 		uint32_t stats_type_upload_mask, uint8_t mac_id)
3295 {
3296 	struct htt_soc *soc = pdev->soc->htt_handle;
3297 	struct dp_htt_htc_pkt *pkt;
3298 	qdf_nbuf_t msg;
3299 	uint32_t *msg_word;
3300 	uint8_t pdev_mask;
3301 
3302 	msg = qdf_nbuf_alloc(
3303 			soc->osdev,
3304 			HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ),
3305 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
3306 
3307 	if (!msg) {
3308 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3309 		"Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer");
3310 		qdf_assert(0);
3311 		return QDF_STATUS_E_NOMEM;
3312 	}
3313 
3314 	/*TODO:Add support for SOC stats
3315 	 * Bit 0: SOC Stats
3316 	 * Bit 1: Pdev stats for pdev id 0
3317 	 * Bit 2: Pdev stats for pdev id 1
3318 	 * Bit 3: Pdev stats for pdev id 2
3319 	 */
3320 	pdev_mask = 1 << DP_SW2HW_MACID(mac_id);
3321 
3322 	/*
3323 	 * Set the length of the message.
3324 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3325 	 * separately during the below call to qdf_nbuf_push_head.
3326 	 * The contribution from the HTC header is added separately inside HTC.
3327 	 */
3328 	if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) {
3329 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3330 				"Failed to expand head for HTT_CFG_STATS");
3331 		qdf_nbuf_free(msg);
3332 		return QDF_STATUS_E_FAILURE;
3333 	}
3334 
3335 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
3336 
3337 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3338 	*msg_word = 0;
3339 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
3340 	HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask);
3341 	HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word,
3342 			stats_type_upload_mask);
3343 
3344 	pkt = htt_htc_pkt_alloc(soc);
3345 	if (!pkt) {
3346 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3347 				"Fail to allocate dp_htt_htc_pkt buffer");
3348 		qdf_assert(0);
3349 		qdf_nbuf_free(msg);
3350 		return QDF_STATUS_E_NOMEM;
3351 	}
3352 
3353 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
3354 
3355 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
3356 			dp_htt_h2t_send_complete_free_netbuf,
3357 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
3358 			soc->htc_endpoint,
3359 			1); /* tag - not relevant here */
3360 
3361 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
3362 	DP_HTT_SEND_HTC_PKT(soc, pkt);
3363 	return 0;
3364 }
3365 #endif
3366