xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c (revision 4865edfd190c086bbe2c69aae12a8226f877b91e)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <htt.h>
20 #include <hal_api.h>
21 #include "dp_htt.h"
22 #include "dp_peer.h"
23 #include "dp_types.h"
24 #include "dp_internal.h"
25 #include "dp_rx_mon.h"
26 #include "htt_stats.h"
27 #include "htt_ppdu_stats.h"
28 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
29 #include "cdp_txrx_cmn_struct.h"
30 
31 #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
32 
33 #define HTT_HTC_PKT_POOL_INIT_SIZE 64
34 #define HTT_T2H_MAX_MSG_SIZE 2048
35 
36 #define HTT_MSG_BUF_SIZE(msg_bytes) \
37 	((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
38 
39 #define HTT_PID_BIT_MASK 0x3
40 
41 #define DP_EXT_MSG_LENGTH 2048
42 #define DP_HTT_SEND_HTC_PKT(soc, pkt)                            \
43 do {                                                             \
44 	if (htc_send_pkt(soc->htc_soc, &pkt->htc_pkt) ==         \
45 					QDF_STATUS_SUCCESS)      \
46 		htt_htc_misc_pkt_list_add(soc, pkt);             \
47 } while (0)
48 
49 #define HTT_MGMT_CTRL_TLV_RESERVERD_LEN 12
50 /**
51  * Bitmap of HTT PPDU TLV types for Default mode
52  */
53 #define HTT_PPDU_DEFAULT_TLV_BITMAP \
54 	(1 << HTT_PPDU_STATS_COMMON_TLV) | \
55 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
56 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
57 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
58 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
59 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)
60 
61 /**
62  * Bitmap of HTT PPDU TLV types for Sniffer mode
63  */
64 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP \
65 	(1 << HTT_PPDU_STATS_COMMON_TLV) | \
66 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
67 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
68 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
69 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
70 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \
71 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \
72 	(1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV) | \
73 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV)
74 
75 #define HTT_FRAMECTRL_DATATYPE 0x08
76 #define HTT_PPDU_DESC_MAX_DEPTH 16
77 
78 /*
79  * dp_tx_stats_update() - Update per-peer statistics
80  * @soc: Datapath soc handle
81  * @peer: Datapath peer handle
82  * @ppdu: PPDU Descriptor
83  * @ack_rssi: RSSI of last ack received
84  *
85  * Return: None
86  */
87 #ifdef FEATURE_PERPKT_INFO
88 static void dp_tx_stats_update(struct dp_soc *soc, struct dp_peer *peer,
89 		struct cdp_tx_completion_ppdu_user *ppdu, uint32_t ack_rssi)
90 {
91 	struct dp_pdev *pdev = peer->vdev->pdev;
92 	uint8_t preamble, mcs;
93 	uint16_t num_msdu;
94 
95 	preamble = ppdu->preamble;
96 	mcs = ppdu->mcs;
97 	num_msdu = ppdu->num_msdu;
98 
99 	/* If the peer statistics are already processed as part of
100 	 * per-MSDU completion handler, do not process these again in per-PPDU
101 	 * indications */
102 	if (soc->process_tx_status)
103 		return;
104 
105 	DP_STATS_INC_PKT(peer, tx.comp_pkt,
106 			num_msdu, (ppdu->success_bytes +
107 				ppdu->retry_bytes + ppdu->failed_bytes));
108 	DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus);
109 	DP_STATS_UPD(peer, tx.tx_rate, ppdu->tx_rate);
110 	DP_STATS_INC(peer, tx.sgi_count[ppdu->gi], num_msdu);
111 	DP_STATS_INC(peer, tx.bw[ppdu->bw], num_msdu);
112 	DP_STATS_INC(peer, tx.nss[ppdu->nss], num_msdu);
113 	DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)], num_msdu);
114 	DP_STATS_INCC(peer, tx.stbc, num_msdu, ppdu->stbc);
115 	DP_STATS_INCC(peer, tx.ldpc, num_msdu, ppdu->ldpc);
116 	if (!(ppdu->is_mcast))
117 		DP_STATS_UPD(peer, tx.last_ack_rssi, ack_rssi);
118 
119 	DP_STATS_INC(peer, tx.retries,
120 			(ppdu->long_retries + ppdu->short_retries));
121 	DP_STATS_INCC(peer,
122 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
123 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
124 	DP_STATS_INCC(peer,
125 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
126 			((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
127 	DP_STATS_INCC(peer,
128 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
129 			((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
130 	DP_STATS_INCC(peer,
131 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
132 			((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B)));
133 	DP_STATS_INCC(peer,
134 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
135 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
136 	DP_STATS_INCC(peer,
137 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
138 			((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
139 	DP_STATS_INCC(peer,
140 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
141 			((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
142 	DP_STATS_INCC(peer,
143 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
144 			((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
145 	DP_STATS_INCC(peer,
146 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
147 			((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX)));
148 	DP_STATS_INCC(peer,
149 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
150 			((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX)));
151 
152 	if (soc->cdp_soc.ol_ops->update_dp_stats) {
153 		soc->cdp_soc.ol_ops->update_dp_stats(pdev->osif_pdev,
154 				&peer->stats, ppdu->peer_id,
155 				UPDATE_PEER_STATS);
156 
157 	}
158 }
159 #endif
160 
161 /*
162  * htt_htc_pkt_alloc() - Allocate HTC packet buffer
163  * @htt_soc:	HTT SOC handle
164  *
165  * Return: Pointer to htc packet buffer
166  */
167 static struct dp_htt_htc_pkt *
168 htt_htc_pkt_alloc(struct htt_soc *soc)
169 {
170 	struct dp_htt_htc_pkt_union *pkt = NULL;
171 
172 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
173 	if (soc->htt_htc_pkt_freelist) {
174 		pkt = soc->htt_htc_pkt_freelist;
175 		soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next;
176 	}
177 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
178 
179 	if (pkt == NULL)
180 		pkt = qdf_mem_malloc(sizeof(*pkt));
181 	return &pkt->u.pkt; /* not actually a dereference */
182 }
183 
184 /*
185  * htt_htc_pkt_free() - Free HTC packet buffer
186  * @htt_soc:	HTT SOC handle
187  */
188 static void
189 htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
190 {
191 	struct dp_htt_htc_pkt_union *u_pkt =
192 		(struct dp_htt_htc_pkt_union *)pkt;
193 
194 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
195 	u_pkt->u.next = soc->htt_htc_pkt_freelist;
196 	soc->htt_htc_pkt_freelist = u_pkt;
197 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
198 }
199 
200 /*
201  * htt_htc_pkt_pool_free() - Free HTC packet pool
202  * @htt_soc:	HTT SOC handle
203  */
204 static void
205 htt_htc_pkt_pool_free(struct htt_soc *soc)
206 {
207 	struct dp_htt_htc_pkt_union *pkt, *next;
208 	pkt = soc->htt_htc_pkt_freelist;
209 	while (pkt) {
210 		next = pkt->u.next;
211 		qdf_mem_free(pkt);
212 		pkt = next;
213 	}
214 	soc->htt_htc_pkt_freelist = NULL;
215 }
216 
217 /*
218  * htt_htc_misc_pkt_list_trim() - trim misc list
219  * @htt_soc: HTT SOC handle
220  * @level: max no. of pkts in list
221  */
222 static void
223 htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level)
224 {
225 	struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL;
226 	int i = 0;
227 	qdf_nbuf_t netbuf;
228 
229 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
230 	pkt = soc->htt_htc_pkt_misclist;
231 	while (pkt) {
232 		next = pkt->u.next;
233 		/* trim the out grown list*/
234 		if (++i > level) {
235 			netbuf =
236 				(qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
237 			qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
238 			qdf_nbuf_free(netbuf);
239 			qdf_mem_free(pkt);
240 			pkt = NULL;
241 			if (prev)
242 				prev->u.next = NULL;
243 		}
244 		prev = pkt;
245 		pkt = next;
246 	}
247 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
248 }
249 
250 /*
251  * htt_htc_misc_pkt_list_add() - Add pkt to misc list
252  * @htt_soc:	HTT SOC handle
253  * @dp_htt_htc_pkt: pkt to be added to list
254  */
255 static void
256 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
257 {
258 	struct dp_htt_htc_pkt_union *u_pkt =
259 				(struct dp_htt_htc_pkt_union *)pkt;
260 	int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc,
261 							pkt->htc_pkt.Endpoint)
262 				+ DP_HTT_HTC_PKT_MISCLIST_SIZE;
263 
264 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
265 	if (soc->htt_htc_pkt_misclist) {
266 		u_pkt->u.next = soc->htt_htc_pkt_misclist;
267 		soc->htt_htc_pkt_misclist = u_pkt;
268 	} else {
269 		soc->htt_htc_pkt_misclist = u_pkt;
270 	}
271 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
272 
273 	/* only ce pipe size + tx_queue_depth could possibly be in use
274 	 * free older packets in the misclist
275 	 */
276 	htt_htc_misc_pkt_list_trim(soc, misclist_trim_level);
277 }
278 
279 /*
280  * htt_htc_misc_pkt_pool_free() - free pkts in misc list
281  * @htt_soc:	HTT SOC handle
282  */
283 static void
284 htt_htc_misc_pkt_pool_free(struct htt_soc *soc)
285 {
286 	struct dp_htt_htc_pkt_union *pkt, *next;
287 	qdf_nbuf_t netbuf;
288 
289 	pkt = soc->htt_htc_pkt_misclist;
290 
291 	while (pkt) {
292 		next = pkt->u.next;
293 		netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
294 		qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
295 
296 		soc->stats.htc_pkt_free++;
297 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
298 			 "%s: Pkt free count %d\n",
299 			 __func__, soc->stats.htc_pkt_free);
300 
301 		qdf_nbuf_free(netbuf);
302 		qdf_mem_free(pkt);
303 		pkt = next;
304 	}
305 	soc->htt_htc_pkt_misclist = NULL;
306 }
307 
308 /*
309  * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianess differ
310  * @tgt_mac_addr:	Target MAC
311  * @buffer:		Output buffer
312  */
313 static u_int8_t *
314 htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer)
315 {
316 #ifdef BIG_ENDIAN_HOST
317 	/*
318 	 * The host endianness is opposite of the target endianness.
319 	 * To make u_int32_t elements come out correctly, the target->host
320 	 * upload has swizzled the bytes in each u_int32_t element of the
321 	 * message.
322 	 * For byte-array message fields like the MAC address, this
323 	 * upload swizzling puts the bytes in the wrong order, and needs
324 	 * to be undone.
325 	 */
326 	buffer[0] = tgt_mac_addr[3];
327 	buffer[1] = tgt_mac_addr[2];
328 	buffer[2] = tgt_mac_addr[1];
329 	buffer[3] = tgt_mac_addr[0];
330 	buffer[4] = tgt_mac_addr[7];
331 	buffer[5] = tgt_mac_addr[6];
332 	return buffer;
333 #else
334 	/*
335 	 * The host endianness matches the target endianness -
336 	 * we can use the mac addr directly from the message buffer.
337 	 */
338 	return tgt_mac_addr;
339 #endif
340 }
341 
342 /*
343  * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
344  * @soc:	SOC handle
345  * @status:	Completion status
346  * @netbuf:	HTT buffer
347  */
348 static void
349 dp_htt_h2t_send_complete_free_netbuf(
350 	void *soc, A_STATUS status, qdf_nbuf_t netbuf)
351 {
352 	qdf_nbuf_free(netbuf);
353 }
354 
355 /*
356  * dp_htt_h2t_send_complete() - H2T completion handler
357  * @context:	Opaque context (HTT SOC handle)
358  * @htc_pkt:	HTC packet
359  */
360 static void
361 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
362 {
363 	void (*send_complete_part2)(
364 		void *soc, A_STATUS status, qdf_nbuf_t msdu);
365 	struct htt_soc *soc =  (struct htt_soc *) context;
366 	struct dp_htt_htc_pkt *htt_pkt;
367 	qdf_nbuf_t netbuf;
368 
369 	send_complete_part2 = htc_pkt->pPktContext;
370 
371 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
372 
373 	/* process (free or keep) the netbuf that held the message */
374 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
375 	/*
376 	 * adf sendcomplete is required for windows only
377 	 */
378 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
379 	if (send_complete_part2 != NULL) {
380 		send_complete_part2(
381 			htt_pkt->soc_ctxt, htc_pkt->Status, netbuf);
382 	}
383 	/* free the htt_htc_pkt / HTC_PACKET object */
384 	htt_htc_pkt_free(soc, htt_pkt);
385 }
386 
387 /*
388  * htt_h2t_ver_req_msg() - Send HTT version request message to target
389  * @htt_soc:	HTT SOC handle
390  *
391  * Return: 0 on success; error code on failure
392  */
393 static int htt_h2t_ver_req_msg(struct htt_soc *soc)
394 {
395 	struct dp_htt_htc_pkt *pkt;
396 	qdf_nbuf_t msg;
397 	uint32_t *msg_word;
398 
399 	msg = qdf_nbuf_alloc(
400 		soc->osdev,
401 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
402 		/* reserve room for the HTC header */
403 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
404 	if (!msg)
405 		return QDF_STATUS_E_NOMEM;
406 
407 	/*
408 	 * Set the length of the message.
409 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
410 	 * separately during the below call to qdf_nbuf_push_head.
411 	 * The contribution from the HTC header is added separately inside HTC.
412 	 */
413 	if (qdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES) == NULL) {
414 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
415 			"%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg\n",
416 			__func__);
417 		return QDF_STATUS_E_FAILURE;
418 	}
419 
420 	/* fill in the message contents */
421 	msg_word = (u_int32_t *) qdf_nbuf_data(msg);
422 
423 	/* rewind beyond alignment pad to get to the HTC header reserved area */
424 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
425 
426 	*msg_word = 0;
427 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
428 
429 	pkt = htt_htc_pkt_alloc(soc);
430 	if (!pkt) {
431 		qdf_nbuf_free(msg);
432 		return QDF_STATUS_E_FAILURE;
433 	}
434 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
435 
436 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
437 		dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg),
438 		qdf_nbuf_len(msg), soc->htc_endpoint,
439 		1); /* tag - not relevant here */
440 
441 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
442 	DP_HTT_SEND_HTC_PKT(soc, pkt);
443 	return 0;
444 }
445 
446 /*
447  * htt_srng_setup() - Send SRNG setup message to target
448  * @htt_soc:	HTT SOC handle
449  * @mac_id:	MAC Id
450  * @hal_srng:	Opaque HAL SRNG pointer
451  * @hal_ring_type:	SRNG ring type
452  *
453  * Return: 0 on success; error code on failure
454  */
455 int htt_srng_setup(void *htt_soc, int mac_id, void *hal_srng,
456 	int hal_ring_type)
457 {
458 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
459 	struct dp_htt_htc_pkt *pkt;
460 	qdf_nbuf_t htt_msg;
461 	uint32_t *msg_word;
462 	struct hal_srng_params srng_params;
463 	qdf_dma_addr_t hp_addr, tp_addr;
464 	uint32_t ring_entry_size =
465 		hal_srng_get_entrysize(soc->hal_soc, hal_ring_type);
466 	int htt_ring_type, htt_ring_id;
467 
468 	/* Sizes should be set in 4-byte words */
469 	ring_entry_size = ring_entry_size >> 2;
470 
471 	htt_msg = qdf_nbuf_alloc(soc->osdev,
472 		HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ),
473 		/* reserve room for the HTC header */
474 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
475 	if (!htt_msg)
476 		goto fail0;
477 
478 	hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params);
479 	hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_srng);
480 	tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_srng);
481 
482 	switch (hal_ring_type) {
483 	case RXDMA_BUF:
484 #ifdef QCA_HOST2FW_RXBUF_RING
485 		if (srng_params.ring_id ==
486 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0)) {
487 			htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
488 			htt_ring_type = HTT_SW_TO_SW_RING;
489 #ifdef IPA_OFFLOAD
490 		} else if (srng_params.ring_id ==
491 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2)) {
492 			htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING;
493 			htt_ring_type = HTT_SW_TO_SW_RING;
494 #endif
495 #else
496 		if (srng_params.ring_id ==
497 			(HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
498 			  (mac_id * HAL_MAX_RINGS_PER_LMAC))) {
499 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
500 			htt_ring_type = HTT_SW_TO_HW_RING;
501 #endif
502 		} else if (srng_params.ring_id ==
503 #ifdef IPA_OFFLOAD
504 			 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 +
505 #else
506 			 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF +
507 #endif
508 			  (mac_id * HAL_MAX_RINGS_PER_LMAC))) {
509 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
510 			htt_ring_type = HTT_SW_TO_HW_RING;
511 		} else {
512 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
513 				   "%s: Ring %d currently not supported\n",
514 				   __func__, srng_params.ring_id);
515 			goto fail1;
516 		}
517 
518 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
519 			 "%s: ring_type %d ring_id %d\n",
520 			 __func__, hal_ring_type, srng_params.ring_id);
521 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
522 			 "%s: hp_addr 0x%llx tp_addr 0x%llx\n",
523 			 __func__, (uint64_t)hp_addr, (uint64_t)tp_addr);
524 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
525 			 "%s: htt_ring_id %d\n", __func__, htt_ring_id);
526 		break;
527 	case RXDMA_MONITOR_BUF:
528 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
529 		htt_ring_type = HTT_SW_TO_HW_RING;
530 		break;
531 	case RXDMA_MONITOR_STATUS:
532 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
533 		htt_ring_type = HTT_SW_TO_HW_RING;
534 		break;
535 	case RXDMA_MONITOR_DST:
536 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
537 		htt_ring_type = HTT_HW_TO_SW_RING;
538 		break;
539 	case RXDMA_MONITOR_DESC:
540 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
541 		htt_ring_type = HTT_SW_TO_HW_RING;
542 		break;
543 	case RXDMA_DST:
544 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
545 		htt_ring_type = HTT_HW_TO_SW_RING;
546 		break;
547 
548 	default:
549 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
550 			"%s: Ring currently not supported\n", __func__);
551 			goto fail1;
552 	}
553 
554 	/*
555 	 * Set the length of the message.
556 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
557 	 * separately during the below call to qdf_nbuf_push_head.
558 	 * The contribution from the HTC header is added separately inside HTC.
559 	 */
560 	if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) {
561 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
562 			"%s: Failed to expand head for SRING_SETUP msg\n",
563 			__func__);
564 		return QDF_STATUS_E_FAILURE;
565 	}
566 
567 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
568 
569 	/* rewind beyond alignment pad to get to the HTC header reserved area */
570 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
571 
572 	/* word 0 */
573 	*msg_word = 0;
574 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP);
575 
576 	if ((htt_ring_type == HTT_SW_TO_HW_RING) ||
577 			(htt_ring_type == HTT_HW_TO_SW_RING))
578 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word,
579 			 DP_SW2HW_MACID(mac_id));
580 	else
581 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id);
582 
583 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
584 			 "%s: mac_id %d\n", __func__, mac_id);
585 	HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type);
586 	/* TODO: Discuss with FW on changing this to unique ID and using
587 	 * htt_ring_type to send the type of ring
588 	 */
589 	HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id);
590 
591 	/* word 1 */
592 	msg_word++;
593 	*msg_word = 0;
594 	HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word,
595 		srng_params.ring_base_paddr & 0xffffffff);
596 
597 	/* word 2 */
598 	msg_word++;
599 	*msg_word = 0;
600 	HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word,
601 		(uint64_t)srng_params.ring_base_paddr >> 32);
602 
603 	/* word 3 */
604 	msg_word++;
605 	*msg_word = 0;
606 	HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size);
607 	HTT_SRING_SETUP_RING_SIZE_SET(*msg_word,
608 		(ring_entry_size * srng_params.num_entries));
609 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
610 			 "%s: entry_size %d\n", __func__,
611 			 ring_entry_size);
612 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
613 			 "%s: num_entries %d\n", __func__,
614 			 srng_params.num_entries);
615 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
616 			 "%s: ring_size %d\n", __func__,
617 			 (ring_entry_size * srng_params.num_entries));
618 	if (htt_ring_type == HTT_SW_TO_HW_RING)
619 		HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET(
620 						*msg_word, 1);
621 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word,
622 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
623 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word,
624 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
625 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word,
626 		!!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP));
627 
628 	/* word 4 */
629 	msg_word++;
630 	*msg_word = 0;
631 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
632 		hp_addr & 0xffffffff);
633 
634 	/* word 5 */
635 	msg_word++;
636 	*msg_word = 0;
637 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
638 		(uint64_t)hp_addr >> 32);
639 
640 	/* word 6 */
641 	msg_word++;
642 	*msg_word = 0;
643 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
644 		tp_addr & 0xffffffff);
645 
646 	/* word 7 */
647 	msg_word++;
648 	*msg_word = 0;
649 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
650 		(uint64_t)tp_addr >> 32);
651 
652 	/* word 8 */
653 	msg_word++;
654 	*msg_word = 0;
655 	HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word,
656 		srng_params.msi_addr & 0xffffffff);
657 
658 	/* word 9 */
659 	msg_word++;
660 	*msg_word = 0;
661 	HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word,
662 		(uint64_t)(srng_params.msi_addr) >> 32);
663 
664 	/* word 10 */
665 	msg_word++;
666 	*msg_word = 0;
667 	HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word,
668 		srng_params.msi_data);
669 
670 	/* word 11 */
671 	msg_word++;
672 	*msg_word = 0;
673 	HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word,
674 		srng_params.intr_batch_cntr_thres_entries *
675 		ring_entry_size);
676 	HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word,
677 		srng_params.intr_timer_thres_us >> 3);
678 
679 	/* word 12 */
680 	msg_word++;
681 	*msg_word = 0;
682 	if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
683 		/* TODO: Setting low threshold to 1/8th of ring size - see
684 		 * if this needs to be configurable
685 		 */
686 		HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word,
687 			srng_params.low_threshold);
688 	}
689 	/* "response_required" field should be set if a HTT response message is
690 	 * required after setting up the ring.
691 	 */
692 	pkt = htt_htc_pkt_alloc(soc);
693 	if (!pkt)
694 		goto fail1;
695 
696 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
697 
698 	SET_HTC_PACKET_INFO_TX(
699 		&pkt->htc_pkt,
700 		dp_htt_h2t_send_complete_free_netbuf,
701 		qdf_nbuf_data(htt_msg),
702 		qdf_nbuf_len(htt_msg),
703 		soc->htc_endpoint,
704 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
705 
706 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
707 	DP_HTT_SEND_HTC_PKT(soc, pkt);
708 
709 	return QDF_STATUS_SUCCESS;
710 
711 fail1:
712 	qdf_nbuf_free(htt_msg);
713 fail0:
714 	return QDF_STATUS_E_FAILURE;
715 }
716 
717 /*
718  * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter
719  * config message to target
720  * @htt_soc:	HTT SOC handle
721  * @pdev_id:	PDEV Id
722  * @hal_srng:	Opaque HAL SRNG pointer
723  * @hal_ring_type:	SRNG ring type
724  * @ring_buf_size:	SRNG buffer size
725  * @htt_tlv_filter:	Rx SRNG TLV and filter setting
726  * Return: 0 on success; error code on failure
727  */
728 int htt_h2t_rx_ring_cfg(void *htt_soc, int pdev_id, void *hal_srng,
729 	int hal_ring_type, int ring_buf_size,
730 	struct htt_rx_ring_tlv_filter *htt_tlv_filter)
731 {
732 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
733 	struct dp_htt_htc_pkt *pkt;
734 	qdf_nbuf_t htt_msg;
735 	uint32_t *msg_word;
736 	struct hal_srng_params srng_params;
737 	uint32_t htt_ring_type, htt_ring_id;
738 	uint32_t tlv_filter;
739 
740 	htt_msg = qdf_nbuf_alloc(soc->osdev,
741 		HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ),
742 	/* reserve room for the HTC header */
743 	HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
744 	if (!htt_msg)
745 		goto fail0;
746 
747 	hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params);
748 
749 	switch (hal_ring_type) {
750 	case RXDMA_BUF:
751 #if QCA_HOST2FW_RXBUF_RING
752 		htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
753 		htt_ring_type = HTT_SW_TO_SW_RING;
754 #else
755 		htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
756 		htt_ring_type = HTT_SW_TO_HW_RING;
757 #endif
758 		break;
759 	case RXDMA_MONITOR_BUF:
760 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
761 		htt_ring_type = HTT_SW_TO_HW_RING;
762 		break;
763 	case RXDMA_MONITOR_STATUS:
764 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
765 		htt_ring_type = HTT_SW_TO_HW_RING;
766 		break;
767 	case RXDMA_MONITOR_DST:
768 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
769 		htt_ring_type = HTT_HW_TO_SW_RING;
770 		break;
771 	case RXDMA_MONITOR_DESC:
772 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
773 		htt_ring_type = HTT_SW_TO_HW_RING;
774 		break;
775 	case RXDMA_DST:
776 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
777 		htt_ring_type = HTT_HW_TO_SW_RING;
778 		break;
779 
780 	default:
781 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
782 			"%s: Ring currently not supported\n", __func__);
783 		goto fail1;
784 	}
785 
786 	/*
787 	 * Set the length of the message.
788 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
789 	 * separately during the below call to qdf_nbuf_push_head.
790 	 * The contribution from the HTC header is added separately inside HTC.
791 	 */
792 	if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) {
793 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
794 			"%s: Failed to expand head for RX Ring Cfg msg\n",
795 			__func__);
796 		goto fail1; /* failure */
797 	}
798 
799 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
800 
801 	/* rewind beyond alignment pad to get to the HTC header reserved area */
802 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
803 
804 	/* word 0 */
805 	*msg_word = 0;
806 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
807 
808 	/*
809 	 * pdev_id is indexed from 0 whereas mac_id is indexed from 1
810 	 * SW_TO_SW and SW_TO_HW rings are unaffected by this
811 	 */
812 	if (htt_ring_type == HTT_SW_TO_SW_RING ||
813 			htt_ring_type == HTT_SW_TO_HW_RING)
814 		HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word,
815 						DP_SW2HW_MACID(pdev_id));
816 
817 	/* TODO: Discuss with FW on changing this to unique ID and using
818 	 * htt_ring_type to send the type of ring
819 	 */
820 	HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id);
821 
822 	HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word,
823 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
824 
825 	HTT_RX_RING_SELECTION_CFG_PKT_TLV_SET(*msg_word,
826 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
827 
828 	/* word 1 */
829 	msg_word++;
830 	*msg_word = 0;
831 	HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word,
832 		ring_buf_size);
833 
834 	/* word 2 */
835 	msg_word++;
836 	*msg_word = 0;
837 
838 	if (htt_tlv_filter->enable_fp) {
839 		/* TYPE: MGMT */
840 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
841 			FP, MGMT, 0000,
842 			(htt_tlv_filter->fp_mgmt_filter &
843 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
844 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
845 			FP, MGMT, 0001,
846 			(htt_tlv_filter->fp_mgmt_filter &
847 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
848 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
849 			FP, MGMT, 0010,
850 			(htt_tlv_filter->fp_mgmt_filter &
851 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
852 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
853 			FP, MGMT, 0011,
854 			(htt_tlv_filter->fp_mgmt_filter &
855 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
856 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
857 			FP, MGMT, 0100,
858 			(htt_tlv_filter->fp_mgmt_filter &
859 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
860 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
861 			FP, MGMT, 0101,
862 			(htt_tlv_filter->fp_mgmt_filter &
863 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
864 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
865 			FP, MGMT, 0110,
866 			(htt_tlv_filter->fp_mgmt_filter &
867 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
868 		/* reserved */
869 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
870 			MGMT, 0111,
871 			(htt_tlv_filter->fp_mgmt_filter &
872 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
873 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
874 			FP, MGMT, 1000,
875 			(htt_tlv_filter->fp_mgmt_filter &
876 			FILTER_MGMT_BEACON) ? 1 : 0);
877 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
878 			FP, MGMT, 1001,
879 			(htt_tlv_filter->fp_mgmt_filter &
880 			FILTER_MGMT_ATIM) ? 1 : 0);
881 	}
882 
883 	if (htt_tlv_filter->enable_md) {
884 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
885 				MGMT, 0000, 1);
886 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
887 				MGMT, 0001, 1);
888 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
889 				MGMT, 0010, 1);
890 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
891 				MGMT, 0011, 1);
892 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
893 				MGMT, 0100, 1);
894 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
895 				MGMT, 0101, 1);
896 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
897 				MGMT, 0110, 1);
898 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
899 				MGMT, 0111, 1);
900 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
901 				MGMT, 1000, 1);
902 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
903 				MGMT, 1001, 1);
904 	}
905 
906 	if (htt_tlv_filter->enable_mo) {
907 		/* TYPE: MGMT */
908 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
909 			MO, MGMT, 0000,
910 			(htt_tlv_filter->mo_mgmt_filter &
911 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
912 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
913 			MO, MGMT, 0001,
914 			(htt_tlv_filter->mo_mgmt_filter &
915 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
916 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
917 			MO, MGMT, 0010,
918 			(htt_tlv_filter->mo_mgmt_filter &
919 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
920 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
921 			MO, MGMT, 0011,
922 			(htt_tlv_filter->mo_mgmt_filter &
923 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
924 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
925 			MO, MGMT, 0100,
926 			(htt_tlv_filter->mo_mgmt_filter &
927 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
928 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
929 			MO, MGMT, 0101,
930 			(htt_tlv_filter->mo_mgmt_filter &
931 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
932 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
933 			MO, MGMT, 0110,
934 			(htt_tlv_filter->mo_mgmt_filter &
935 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
936 		/* reserved */
937 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
938 			MGMT, 0111,
939 			(htt_tlv_filter->mo_mgmt_filter &
940 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
941 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
942 			MO, MGMT, 1000,
943 			(htt_tlv_filter->mo_mgmt_filter &
944 			FILTER_MGMT_BEACON) ? 1 : 0);
945 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
946 			MO, MGMT, 1001,
947 			(htt_tlv_filter->mo_mgmt_filter &
948 			FILTER_MGMT_ATIM) ? 1 : 0);
949 	}
950 
951 	/* word 3 */
952 	msg_word++;
953 	*msg_word = 0;
954 
955 	if (htt_tlv_filter->enable_fp) {
956 		/* TYPE: MGMT */
957 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
958 			FP, MGMT, 1010,
959 			(htt_tlv_filter->fp_mgmt_filter &
960 			FILTER_MGMT_DISASSOC) ? 1 : 0);
961 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
962 			FP, MGMT, 1011,
963 			(htt_tlv_filter->fp_mgmt_filter &
964 			FILTER_MGMT_AUTH) ? 1 : 0);
965 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
966 			FP, MGMT, 1100,
967 			(htt_tlv_filter->fp_mgmt_filter &
968 			FILTER_MGMT_DEAUTH) ? 1 : 0);
969 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
970 			FP, MGMT, 1101,
971 			(htt_tlv_filter->fp_mgmt_filter &
972 			FILTER_MGMT_ACTION) ? 1 : 0);
973 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
974 			FP, MGMT, 1110,
975 			(htt_tlv_filter->fp_mgmt_filter &
976 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
977 		/* reserved*/
978 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
979 			MGMT, 1111,
980 			(htt_tlv_filter->fp_mgmt_filter &
981 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
982 	}
983 
984 	if (htt_tlv_filter->enable_md) {
985 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD,
986 				MGMT, 1010, 1);
987 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD,
988 				MGMT, 1011, 1);
989 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD,
990 				MGMT, 1100, 1);
991 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD,
992 				MGMT, 1101, 1);
993 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD,
994 				MGMT, 1110, 1);
995 	}
996 
997 	if (htt_tlv_filter->enable_mo) {
998 		/* TYPE: MGMT */
999 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1000 			MO, MGMT, 1010,
1001 			(htt_tlv_filter->mo_mgmt_filter &
1002 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1003 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1004 			MO, MGMT, 1011,
1005 			(htt_tlv_filter->mo_mgmt_filter &
1006 			FILTER_MGMT_AUTH) ? 1 : 0);
1007 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1008 			MO, MGMT, 1100,
1009 			(htt_tlv_filter->mo_mgmt_filter &
1010 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1011 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1012 			MO, MGMT, 1101,
1013 			(htt_tlv_filter->mo_mgmt_filter &
1014 			FILTER_MGMT_ACTION) ? 1 : 0);
1015 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1016 			MO, MGMT, 1110,
1017 			(htt_tlv_filter->mo_mgmt_filter &
1018 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1019 		/* reserved*/
1020 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
1021 			MGMT, 1111,
1022 			(htt_tlv_filter->mo_mgmt_filter &
1023 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1024 	}
1025 
1026 	/* word 4 */
1027 	msg_word++;
1028 	*msg_word = 0;
1029 
1030 	if (htt_tlv_filter->enable_fp) {
1031 		/* TYPE: CTRL */
1032 		/* reserved */
1033 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1034 			CTRL, 0000,
1035 			(htt_tlv_filter->fp_ctrl_filter &
1036 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1037 		/* reserved */
1038 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1039 			CTRL, 0001,
1040 			(htt_tlv_filter->fp_ctrl_filter &
1041 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1042 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1043 			CTRL, 0010,
1044 			(htt_tlv_filter->fp_ctrl_filter &
1045 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1046 		/* reserved */
1047 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1048 			CTRL, 0011,
1049 			(htt_tlv_filter->fp_ctrl_filter &
1050 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1051 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1052 			CTRL, 0100,
1053 			(htt_tlv_filter->fp_ctrl_filter &
1054 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1055 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1056 			CTRL, 0101,
1057 			(htt_tlv_filter->fp_ctrl_filter &
1058 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1059 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1060 			CTRL, 0110,
1061 			(htt_tlv_filter->fp_ctrl_filter &
1062 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1063 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1064 			CTRL, 0111,
1065 			(htt_tlv_filter->fp_ctrl_filter &
1066 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1067 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1068 			CTRL, 1000,
1069 			(htt_tlv_filter->fp_ctrl_filter &
1070 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1071 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1072 			CTRL, 1001,
1073 			(htt_tlv_filter->fp_ctrl_filter &
1074 			FILTER_CTRL_BA) ? 1 : 0);
1075 	}
1076 
1077 	if (htt_tlv_filter->enable_md) {
1078 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1079 				CTRL, 0000, 1);
1080 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1081 				CTRL, 0001, 1);
1082 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1083 				CTRL, 0010, 1);
1084 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1085 				CTRL, 0011, 1);
1086 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1087 				CTRL, 0100, 1);
1088 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1089 				CTRL, 0101, 1);
1090 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1091 				CTRL, 0110, 1);
1092 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1093 				CTRL, 0111, 1);
1094 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1095 				CTRL, 1000, 1);
1096 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1097 				CTRL, 1001, 1);
1098 	}
1099 
1100 	if (htt_tlv_filter->enable_mo) {
1101 		/* TYPE: CTRL */
1102 		/* reserved */
1103 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1104 			CTRL, 0000,
1105 			(htt_tlv_filter->mo_ctrl_filter &
1106 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1107 		/* reserved */
1108 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1109 			CTRL, 0001,
1110 			(htt_tlv_filter->mo_ctrl_filter &
1111 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1112 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1113 			CTRL, 0010,
1114 			(htt_tlv_filter->mo_ctrl_filter &
1115 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1116 		/* reserved */
1117 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1118 			CTRL, 0011,
1119 			(htt_tlv_filter->mo_ctrl_filter &
1120 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1121 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1122 			CTRL, 0100,
1123 			(htt_tlv_filter->mo_ctrl_filter &
1124 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1125 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1126 			CTRL, 0101,
1127 			(htt_tlv_filter->mo_ctrl_filter &
1128 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1129 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1130 			CTRL, 0110,
1131 			(htt_tlv_filter->mo_ctrl_filter &
1132 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1133 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1134 			CTRL, 0111,
1135 			(htt_tlv_filter->mo_ctrl_filter &
1136 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1137 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1138 			CTRL, 1000,
1139 			(htt_tlv_filter->mo_ctrl_filter &
1140 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1141 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1142 			CTRL, 1001,
1143 			(htt_tlv_filter->mo_ctrl_filter &
1144 			FILTER_CTRL_BA) ? 1 : 0);
1145 	}
1146 
1147 	/* word 5 */
1148 	msg_word++;
1149 	*msg_word = 0;
1150 	if (htt_tlv_filter->enable_fp) {
1151 		/* TYPE: CTRL */
1152 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1153 			CTRL, 1010,
1154 			(htt_tlv_filter->fp_ctrl_filter &
1155 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1156 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1157 			CTRL, 1011,
1158 			(htt_tlv_filter->fp_ctrl_filter &
1159 			FILTER_CTRL_RTS) ? 1 : 0);
1160 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1161 			CTRL, 1100,
1162 			(htt_tlv_filter->fp_ctrl_filter &
1163 			FILTER_CTRL_CTS) ? 1 : 0);
1164 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1165 			CTRL, 1101,
1166 			(htt_tlv_filter->fp_ctrl_filter &
1167 			FILTER_CTRL_ACK) ? 1 : 0);
1168 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1169 			CTRL, 1110,
1170 			(htt_tlv_filter->fp_ctrl_filter &
1171 			FILTER_CTRL_CFEND) ? 1 : 0);
1172 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1173 			CTRL, 1111,
1174 			(htt_tlv_filter->fp_ctrl_filter &
1175 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1176 		/* TYPE: DATA */
1177 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1178 			DATA, MCAST,
1179 			(htt_tlv_filter->fp_data_filter &
1180 			FILTER_DATA_MCAST) ? 1 : 0);
1181 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1182 			DATA, UCAST,
1183 			(htt_tlv_filter->fp_data_filter &
1184 			FILTER_DATA_UCAST) ? 1 : 0);
1185 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1186 			DATA, NULL,
1187 			(htt_tlv_filter->fp_data_filter &
1188 			FILTER_DATA_NULL) ? 1 : 0);
1189 	}
1190 
1191 	if (htt_tlv_filter->enable_md) {
1192 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1193 				CTRL, 1010, 1);
1194 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1195 				CTRL, 1011, 1);
1196 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1197 				CTRL, 1100, 1);
1198 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1199 				CTRL, 1101, 1);
1200 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1201 				CTRL, 1110, 1);
1202 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1203 				CTRL, 1111, 1);
1204 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1205 				DATA, MCAST, 1);
1206 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1207 				DATA, UCAST, 1);
1208 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1209 				DATA, NULL, 1);
1210 	}
1211 
1212 	if (htt_tlv_filter->enable_mo) {
1213 		/* TYPE: CTRL */
1214 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1215 			CTRL, 1010,
1216 			(htt_tlv_filter->mo_ctrl_filter &
1217 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1218 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1219 			CTRL, 1011,
1220 			(htt_tlv_filter->mo_ctrl_filter &
1221 			FILTER_CTRL_RTS) ? 1 : 0);
1222 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1223 			CTRL, 1100,
1224 			(htt_tlv_filter->mo_ctrl_filter &
1225 			FILTER_CTRL_CTS) ? 1 : 0);
1226 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1227 			CTRL, 1101,
1228 			(htt_tlv_filter->mo_ctrl_filter &
1229 			FILTER_CTRL_ACK) ? 1 : 0);
1230 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1231 			CTRL, 1110,
1232 			(htt_tlv_filter->mo_ctrl_filter &
1233 			FILTER_CTRL_CFEND) ? 1 : 0);
1234 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1235 			CTRL, 1111,
1236 			(htt_tlv_filter->mo_ctrl_filter &
1237 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1238 		/* TYPE: DATA */
1239 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1240 			DATA, MCAST,
1241 			(htt_tlv_filter->mo_data_filter &
1242 			FILTER_DATA_MCAST) ? 1 : 0);
1243 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1244 			DATA, UCAST,
1245 			(htt_tlv_filter->mo_data_filter &
1246 			FILTER_DATA_UCAST) ? 1 : 0);
1247 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1248 			DATA, NULL,
1249 			(htt_tlv_filter->mo_data_filter &
1250 			FILTER_DATA_NULL) ? 1 : 0);
1251 	}
1252 
1253 	/* word 6 */
1254 	msg_word++;
1255 	*msg_word = 0;
1256 	tlv_filter = 0;
1257 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START,
1258 		htt_tlv_filter->mpdu_start);
1259 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START,
1260 		htt_tlv_filter->msdu_start);
1261 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET,
1262 		htt_tlv_filter->packet);
1263 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END,
1264 		htt_tlv_filter->msdu_end);
1265 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END,
1266 		htt_tlv_filter->mpdu_end);
1267 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER,
1268 		htt_tlv_filter->packet_header);
1269 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION,
1270 		htt_tlv_filter->attention);
1271 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START,
1272 		htt_tlv_filter->ppdu_start);
1273 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END,
1274 		htt_tlv_filter->ppdu_end);
1275 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS,
1276 		htt_tlv_filter->ppdu_end_user_stats);
1277 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter,
1278 		PPDU_END_USER_STATS_EXT,
1279 		htt_tlv_filter->ppdu_end_user_stats_ext);
1280 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE,
1281 		htt_tlv_filter->ppdu_end_status_done);
1282 	/* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/
1283 	 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED,
1284 		 htt_tlv_filter->header_per_msdu);
1285 
1286 	HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter);
1287 
1288 	/* "response_required" field should be set if a HTT response message is
1289 	 * required after setting up the ring.
1290 	 */
1291 	pkt = htt_htc_pkt_alloc(soc);
1292 	if (!pkt)
1293 		goto fail1;
1294 
1295 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1296 
1297 	SET_HTC_PACKET_INFO_TX(
1298 		&pkt->htc_pkt,
1299 		dp_htt_h2t_send_complete_free_netbuf,
1300 		qdf_nbuf_data(htt_msg),
1301 		qdf_nbuf_len(htt_msg),
1302 		soc->htc_endpoint,
1303 		1); /* tag - not relevant here */
1304 
1305 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1306 	DP_HTT_SEND_HTC_PKT(soc, pkt);
1307 	return QDF_STATUS_SUCCESS;
1308 
1309 fail1:
1310 	qdf_nbuf_free(htt_msg);
1311 fail0:
1312 	return QDF_STATUS_E_FAILURE;
1313 }
1314 
1315 #if defined(CONFIG_WIN) && WDI_EVENT_ENABLE
1316 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1317 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1318 
1319 {
1320 	uint32_t pdev_id;
1321 	uint32_t *msg_word = NULL;
1322 	uint32_t msg_remain_len = 0;
1323 
1324 	msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1325 
1326 	/*COOKIE MSB*/
1327 	pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1328 
1329 	/* stats message length + 16 size of HTT header*/
1330 	msg_remain_len = qdf_min(htt_stats->msg_len + 16,
1331 				(uint32_t)DP_EXT_MSG_LENGTH);
1332 
1333 	dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc,
1334 			msg_word,  msg_remain_len,
1335 			WDI_NO_VAL, pdev_id);
1336 
1337 	if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1338 		htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1339 	}
1340 	/* Need to be freed here as WDI handler will
1341 	 * make a copy of pkt to send data to application
1342 	 */
1343 	qdf_nbuf_free(htt_msg);
1344 	return QDF_STATUS_SUCCESS;
1345 }
1346 #else
1347 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1348 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1349 {
1350 	return QDF_STATUS_E_NOSUPPORT;
1351 }
1352 #endif
1353 
1354 /**
1355  * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats
1356  * @htt_stats: htt stats info
1357  *
1358  * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message
1359  * contains sub messages which are identified by a TLV header.
1360  * In this function we will process the stream of T2H messages and read all the
1361  * TLV contained in the message.
1362  *
1363  * THe following cases have been taken care of
1364  * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer
1365  *		In this case the buffer will contain multiple tlvs.
1366  * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer.
1367  *		Only one tlv will be contained in the HTT message and this tag
1368  *		will extend onto the next buffer.
1369  * Case 3: When the buffer is the continuation of the previous message
1370  * Case 4: tlv length is 0. which will indicate the end of message
1371  *
1372  * return: void
1373  */
1374 static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats,
1375 					struct dp_soc *soc)
1376 {
1377 	htt_tlv_tag_t tlv_type = 0xff;
1378 	qdf_nbuf_t htt_msg = NULL;
1379 	uint32_t *msg_word;
1380 	uint8_t *tlv_buf_head = NULL;
1381 	uint8_t *tlv_buf_tail = NULL;
1382 	uint32_t msg_remain_len = 0;
1383 	uint32_t tlv_remain_len = 0;
1384 	uint32_t *tlv_start;
1385 	int cookie_val;
1386 	int cookie_msb;
1387 	int pdev_id;
1388 	bool copy_stats = false;
1389 	struct dp_pdev *pdev;
1390 
1391 	/* Process node in the HTT message queue */
1392 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1393 		!= NULL) {
1394 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1395 		cookie_val = *(msg_word + 1);
1396 		htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET(
1397 					*(msg_word +
1398 					HTT_T2H_EXT_STATS_TLV_START_OFFSET));
1399 
1400 		if (cookie_val) {
1401 			if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg)
1402 					== QDF_STATUS_SUCCESS) {
1403 				continue;
1404 			}
1405 		}
1406 
1407 		cookie_msb = *(msg_word + 2);
1408 		pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1409 		pdev = soc->pdev_list[pdev_id];
1410 
1411 		if (cookie_msb >> 2) {
1412 			copy_stats = true;
1413 		}
1414 
1415 		/* read 5th word */
1416 		msg_word = msg_word + 4;
1417 		msg_remain_len = qdf_min(htt_stats->msg_len,
1418 				(uint32_t) DP_EXT_MSG_LENGTH);
1419 		/* Keep processing the node till node length is 0 */
1420 		while (msg_remain_len) {
1421 			/*
1422 			 * if message is not a continuation of previous message
1423 			 * read the tlv type and tlv length
1424 			 */
1425 			if (!tlv_buf_head) {
1426 				tlv_type = HTT_STATS_TLV_TAG_GET(
1427 						*msg_word);
1428 				tlv_remain_len = HTT_STATS_TLV_LENGTH_GET(
1429 						*msg_word);
1430 			}
1431 
1432 			if (tlv_remain_len == 0) {
1433 				msg_remain_len = 0;
1434 
1435 				if (tlv_buf_head) {
1436 					qdf_mem_free(tlv_buf_head);
1437 					tlv_buf_head = NULL;
1438 					tlv_buf_tail = NULL;
1439 				}
1440 
1441 				goto error;
1442 			}
1443 
1444 			if (!tlv_buf_head)
1445 				tlv_remain_len += HTT_TLV_HDR_LEN;
1446 
1447 			if ((tlv_remain_len <= msg_remain_len)) {
1448 				/* Case 3 */
1449 				if (tlv_buf_head) {
1450 					qdf_mem_copy(tlv_buf_tail,
1451 							(uint8_t *)msg_word,
1452 							tlv_remain_len);
1453 					tlv_start = (uint32_t *)tlv_buf_head;
1454 				} else {
1455 					/* Case 1 */
1456 					tlv_start = msg_word;
1457 				}
1458 
1459 				if (copy_stats)
1460 					dp_htt_stats_copy_tag(pdev, tlv_type, tlv_start);
1461 				else
1462 					dp_htt_stats_print_tag(tlv_type, tlv_start);
1463 
1464 				msg_remain_len -= tlv_remain_len;
1465 
1466 				msg_word = (uint32_t *)
1467 					(((uint8_t *)msg_word) +
1468 					tlv_remain_len);
1469 
1470 				tlv_remain_len = 0;
1471 
1472 				if (tlv_buf_head) {
1473 					qdf_mem_free(tlv_buf_head);
1474 					tlv_buf_head = NULL;
1475 					tlv_buf_tail = NULL;
1476 				}
1477 
1478 			} else { /* tlv_remain_len > msg_remain_len */
1479 				/* Case 2 & 3 */
1480 				if (!tlv_buf_head) {
1481 					tlv_buf_head = qdf_mem_malloc(
1482 							tlv_remain_len);
1483 
1484 					if (!tlv_buf_head) {
1485 						QDF_TRACE(QDF_MODULE_ID_TXRX,
1486 								QDF_TRACE_LEVEL_ERROR,
1487 								"Alloc failed");
1488 						goto error;
1489 					}
1490 
1491 					tlv_buf_tail = tlv_buf_head;
1492 				}
1493 
1494 				qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word,
1495 						msg_remain_len);
1496 				tlv_remain_len -= msg_remain_len;
1497 				tlv_buf_tail += msg_remain_len;
1498 			}
1499 		}
1500 
1501 		if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1502 			htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1503 		}
1504 
1505 		qdf_nbuf_free(htt_msg);
1506 	}
1507 	return;
1508 
1509 error:
1510 	qdf_nbuf_free(htt_msg);
1511 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1512 			!= NULL)
1513 		qdf_nbuf_free(htt_msg);
1514 }
1515 
1516 void htt_t2h_stats_handler(void *context)
1517 {
1518 	struct dp_soc *soc = (struct dp_soc *)context;
1519 	struct htt_stats_context htt_stats;
1520 	uint32_t *msg_word;
1521 	qdf_nbuf_t htt_msg = NULL;
1522 	uint8_t done;
1523 	uint8_t rem_stats;
1524 
1525 	if (!soc || !qdf_atomic_read(&soc->cmn_init_done)) {
1526 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1527 			"soc: 0x%pK, init_done: %d", soc,
1528 			qdf_atomic_read(&soc->cmn_init_done));
1529 		return;
1530 	}
1531 
1532 	qdf_mem_zero(&htt_stats, sizeof(htt_stats));
1533 	qdf_nbuf_queue_init(&htt_stats.msg);
1534 
1535 	/* pull one completed stats from soc->htt_stats_msg and process */
1536 	qdf_spin_lock_bh(&soc->htt_stats.lock);
1537 	if (!soc->htt_stats.num_stats) {
1538 		qdf_spin_unlock_bh(&soc->htt_stats.lock);
1539 		return;
1540 	}
1541 	while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) {
1542 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1543 		msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET;
1544 		done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
1545 		qdf_nbuf_queue_add(&htt_stats.msg, htt_msg);
1546 		/*
1547 		 * Done bit signifies that this is the last T2H buffer in the
1548 		 * stream of HTT EXT STATS message
1549 		 */
1550 		if (done)
1551 			break;
1552 	}
1553 	rem_stats = --soc->htt_stats.num_stats;
1554 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
1555 
1556 	dp_process_htt_stat_msg(&htt_stats, soc);
1557 	/* If there are more stats to process, schedule stats work again */
1558 	if (rem_stats)
1559 		qdf_sched_work(0, &soc->htt_stats.work);
1560 }
1561 
1562 /*
1563  * dp_get_ppdu_info_user_index: Find and allocate a per-user descriptor for a PPDU,
1564  * if a new peer id arrives in a PPDU
1565  * pdev: DP pdev handle
1566  * @peer_id : peer unique identifier
1567  * @ppdu_info: per ppdu tlv structure
1568  *
1569  * return:user index to be populated
1570  */
1571 #ifdef FEATURE_PERPKT_INFO
1572 static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev,
1573 						uint16_t peer_id,
1574 						struct ppdu_info *ppdu_info)
1575 {
1576 	uint8_t user_index = 0;
1577 	struct cdp_tx_completion_ppdu *ppdu_desc;
1578 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1579 
1580 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1581 
1582 	while ((user_index + 1) <= ppdu_info->last_user) {
1583 		ppdu_user_desc = &ppdu_desc->user[user_index];
1584 		if (ppdu_user_desc->peer_id != peer_id) {
1585 			user_index++;
1586 			continue;
1587 		} else {
1588 			/* Max users possible is 8 so user array index should
1589 			 * not exceed 7
1590 			 */
1591 			qdf_assert_always(user_index <= CDP_MU_MAX_USER_INDEX);
1592 			return user_index;
1593 		}
1594 	}
1595 
1596 	ppdu_info->last_user++;
1597 	/* Max users possible is 8 so last user should not exceed 8 */
1598 	qdf_assert_always(ppdu_info->last_user <= CDP_MU_MAX_USERS);
1599 	return ppdu_info->last_user - 1;
1600 }
1601 
1602 /*
1603  * dp_process_ppdu_stats_common_tlv: Process htt_ppdu_stats_common_tlv
1604  * pdev: DP pdev handle
1605  * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv
1606  * @ppdu_info: per ppdu tlv structure
1607  *
1608  * return:void
1609  */
1610 static void dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev,
1611 		uint32_t *tag_buf, struct ppdu_info *ppdu_info)
1612 {
1613 	uint16_t frame_type;
1614 	uint16_t freq;
1615 	struct dp_soc *soc = NULL;
1616 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
1617 
1618 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1619 
1620 	tag_buf += 2;
1621 	ppdu_desc->num_users =
1622 		HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf);
1623 	tag_buf++;
1624 	frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf);
1625 
1626 	if ((frame_type == HTT_STATS_FTYPE_TIDQ_DATA_SU) ||
1627 			(frame_type == HTT_STATS_FTYPE_TIDQ_DATA_MU))
1628 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA;
1629 	else
1630 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
1631 
1632 	tag_buf += 2;
1633 	ppdu_desc->tx_duration = *tag_buf;
1634 	tag_buf += 3;
1635 	ppdu_desc->ppdu_start_timestamp = *tag_buf;
1636 
1637 	ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
1638 					ppdu_desc->tx_duration;
1639 	/* Ack time stamp is same as end time stamp*/
1640 	ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
1641 
1642 	tag_buf++;
1643 
1644 	freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf);
1645 	if (freq != ppdu_desc->channel) {
1646 		soc = pdev->soc;
1647 		ppdu_desc->channel = freq;
1648 		if (soc && soc->cdp_soc.ol_ops->freq_to_channel)
1649 			pdev->operating_channel =
1650 		soc->cdp_soc.ol_ops->freq_to_channel(pdev->osif_pdev, freq);
1651 	}
1652 
1653 	ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf);
1654 }
1655 
1656 /*
1657  * dp_process_ppdu_stats_user_common_tlv: Process ppdu_stats_user_common
1658  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv
1659  * @ppdu_info: per ppdu tlv structure
1660  *
1661  * return:void
1662  */
1663 static void dp_process_ppdu_stats_user_common_tlv(
1664 		struct dp_pdev *pdev, uint32_t *tag_buf,
1665 		struct ppdu_info *ppdu_info)
1666 {
1667 	uint16_t peer_id;
1668 	struct dp_peer *peer;
1669 	struct cdp_tx_completion_ppdu *ppdu_desc;
1670 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1671 	uint8_t curr_user_index = 0;
1672 
1673 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1674 
1675 	tag_buf++;
1676 	peer_id = HTT_PPDU_STATS_USER_COMMON_TLV_SW_PEER_ID_GET(*tag_buf);
1677 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
1678 
1679 	if (!peer)
1680 		return;
1681 
1682 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
1683 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1684 
1685 	ppdu_user_desc->peer_id = peer_id;
1686 
1687 	tag_buf++;
1688 
1689 	if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) {
1690 		ppdu_user_desc->is_mcast = true;
1691 		ppdu_user_desc->mpdu_tried_mcast =
1692 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
1693 		ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast;
1694 	} else {
1695 		ppdu_user_desc->mpdu_tried_ucast =
1696 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
1697 	}
1698 
1699 	tag_buf++;
1700 
1701 	ppdu_user_desc->qos_ctrl =
1702 		HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf);
1703 	ppdu_user_desc->frame_ctrl =
1704 		HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf);
1705 	ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl;
1706 }
1707 
1708 
1709 /**
1710  * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv
1711  * @pdev: DP pdev handle
1712  * @tag_buf: T2H message buffer carrying the user rate TLV
1713  * @ppdu_info: per ppdu tlv structure
1714  *
1715  * return:void
1716  */
1717 static void dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev,
1718 		uint32_t *tag_buf,
1719 		struct ppdu_info *ppdu_info)
1720 {
1721 	uint16_t peer_id;
1722 	struct dp_peer *peer;
1723 	struct cdp_tx_completion_ppdu *ppdu_desc;
1724 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1725 	uint8_t curr_user_index = 0;
1726 
1727 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1728 
1729 	tag_buf++;
1730 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
1731 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
1732 
1733 	if (!peer)
1734 		return;
1735 
1736 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
1737 
1738 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1739 	ppdu_user_desc->peer_id = peer_id;
1740 
1741 	ppdu_user_desc->tid =
1742 		HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf);
1743 
1744 	qdf_mem_copy(ppdu_user_desc->mac_addr, peer->mac_addr.raw,
1745 			DP_MAC_ADDR_LEN);
1746 
1747 	tag_buf += 2;
1748 
1749 	ppdu_user_desc->ru_tones = (HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) -
1750 			HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1;
1751 
1752 	tag_buf += 2;
1753 
1754 	ppdu_user_desc->ppdu_type =
1755 		HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf);
1756 
1757 	tag_buf++;
1758 	ppdu_user_desc->tx_rate = *tag_buf;
1759 
1760 	ppdu_user_desc->ltf_size =
1761 		HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf);
1762 	ppdu_user_desc->stbc =
1763 		HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf);
1764 	ppdu_user_desc->he_re =
1765 		HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf);
1766 	ppdu_user_desc->txbf =
1767 		HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf);
1768 	ppdu_user_desc->bw =
1769 		HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf);
1770 	ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf);
1771 	ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf);
1772 	ppdu_user_desc->preamble =
1773 		HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf);
1774 	ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf);
1775 	ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf);
1776 	ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf);
1777 }
1778 
1779 /*
1780  * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv: Process
1781  * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
1782  * pdev: DP PDEV handle
1783  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
1784  * @ppdu_info: per ppdu tlv structure
1785  *
1786  * return:void
1787  */
1788 static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
1789 		struct dp_pdev *pdev, uint32_t *tag_buf,
1790 		struct ppdu_info *ppdu_info)
1791 {
1792 	htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf =
1793 		(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf;
1794 
1795 	struct cdp_tx_completion_ppdu *ppdu_desc;
1796 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1797 	uint8_t curr_user_index = 0;
1798 	uint16_t peer_id;
1799 	struct dp_peer *peer;
1800 
1801 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1802 
1803 	tag_buf++;
1804 
1805 	peer_id =
1806 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
1807 
1808 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
1809 
1810 	if (!peer)
1811 		return;
1812 
1813 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
1814 
1815 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1816 	ppdu_user_desc->peer_id = peer_id;
1817 
1818 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
1819 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
1820 					CDP_BA_64_BIT_MAP_SIZE_DWORDS);
1821 }
1822 
1823 /*
1824  * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv: Process
1825  * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
1826  * soc: DP SOC handle
1827  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
1828  * @ppdu_info: per ppdu tlv structure
1829  *
1830  * return:void
1831  */
1832 static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
1833 		struct dp_pdev *pdev, uint32_t *tag_buf,
1834 		struct ppdu_info *ppdu_info)
1835 {
1836 	htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf =
1837 		(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf;
1838 
1839 	struct cdp_tx_completion_ppdu *ppdu_desc;
1840 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1841 	uint8_t curr_user_index = 0;
1842 	uint16_t peer_id;
1843 	struct dp_peer *peer;
1844 
1845 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1846 
1847 	tag_buf++;
1848 
1849 	peer_id =
1850 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
1851 
1852 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
1853 
1854 	if (!peer)
1855 		return;
1856 
1857 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
1858 
1859 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1860 	ppdu_user_desc->peer_id = peer_id;
1861 
1862 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
1863 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
1864 					CDP_BA_256_BIT_MAP_SIZE_DWORDS);
1865 }
1866 
1867 /*
1868  * dp_process_ppdu_stats_user_cmpltn_common_tlv: Process
1869  * htt_ppdu_stats_user_cmpltn_common_tlv
1870  * soc: DP SOC handle
1871  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv
1872  * @ppdu_info: per ppdu tlv structure
1873  *
1874  * return:void
1875  */
1876 static void dp_process_ppdu_stats_user_cmpltn_common_tlv(
1877 		struct dp_pdev *pdev, uint32_t *tag_buf,
1878 		struct ppdu_info *ppdu_info)
1879 {
1880 	uint16_t peer_id;
1881 	struct dp_peer *peer;
1882 	struct cdp_tx_completion_ppdu *ppdu_desc;
1883 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1884 	uint8_t curr_user_index = 0;
1885 	htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf =
1886 		(htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf;
1887 
1888 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1889 
1890 	tag_buf++;
1891 	peer_id =
1892 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf);
1893 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
1894 
1895 	if (!peer)
1896 		return;
1897 
1898 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
1899 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1900 	ppdu_user_desc->peer_id = peer_id;
1901 
1902 	ppdu_user_desc->completion_status =
1903 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET(
1904 				*tag_buf);
1905 
1906 	ppdu_user_desc->tid =
1907 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf);
1908 
1909 
1910 	tag_buf++;
1911 	ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi;
1912 
1913 	tag_buf++;
1914 
1915 	ppdu_user_desc->mpdu_success =
1916 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf);
1917 
1918 	tag_buf++;
1919 
1920 	ppdu_user_desc->long_retries =
1921 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf);
1922 
1923 	ppdu_user_desc->short_retries =
1924 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf);
1925 	ppdu_user_desc->retry_msdus =
1926 		ppdu_user_desc->long_retries + ppdu_user_desc->short_retries;
1927 
1928 	ppdu_user_desc->is_ampdu =
1929 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf);
1930 	ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu;
1931 
1932 }
1933 
1934 /*
1935  * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv: Process
1936  * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
1937  * pdev: DP PDEV handle
1938  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
1939  * @ppdu_info: per ppdu tlv structure
1940  *
1941  * return:void
1942  */
1943 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
1944 		struct dp_pdev *pdev, uint32_t *tag_buf,
1945 		struct ppdu_info *ppdu_info)
1946 {
1947 	htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf =
1948 		(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf;
1949 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1950 	struct cdp_tx_completion_ppdu *ppdu_desc;
1951 	uint8_t curr_user_index = 0;
1952 	uint16_t peer_id;
1953 	struct dp_peer *peer;
1954 
1955 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1956 
1957 	tag_buf++;
1958 
1959 	peer_id =
1960 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
1961 
1962 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
1963 
1964 	if (!peer)
1965 		return;
1966 
1967 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
1968 
1969 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1970 	ppdu_user_desc->peer_id = peer_id;
1971 
1972 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
1973 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
1974 			CDP_BA_64_BIT_MAP_SIZE_DWORDS);
1975 }
1976 
1977 /*
1978  * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv: Process
1979  * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
1980  * pdev: DP PDEV handle
1981  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
1982  * @ppdu_info: per ppdu tlv structure
1983  *
1984  * return:void
1985  */
1986 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
1987 		struct dp_pdev *pdev, uint32_t *tag_buf,
1988 		struct ppdu_info *ppdu_info)
1989 {
1990 	htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf =
1991 		(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf;
1992 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1993 	struct cdp_tx_completion_ppdu *ppdu_desc;
1994 	uint8_t curr_user_index = 0;
1995 	uint16_t peer_id;
1996 	struct dp_peer *peer;
1997 
1998 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1999 
2000 	tag_buf++;
2001 
2002 	peer_id =
2003 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2004 
2005 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2006 
2007 	if (!peer)
2008 		return;
2009 
2010 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2011 
2012 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2013 	ppdu_user_desc->peer_id = peer_id;
2014 
2015 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2016 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2017 			CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2018 }
2019 
2020 /*
2021  * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv: Process
2022  * htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2023  * pdev: DP PDE handle
2024  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2025  * @ppdu_info: per ppdu tlv structure
2026  *
2027  * return:void
2028  */
2029 static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
2030 		struct dp_pdev *pdev, uint32_t *tag_buf,
2031 		struct ppdu_info *ppdu_info)
2032 {
2033 	uint16_t peer_id;
2034 	struct dp_peer *peer;
2035 	struct cdp_tx_completion_ppdu *ppdu_desc;
2036 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2037 	uint8_t curr_user_index = 0;
2038 
2039 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2040 
2041 	tag_buf += 2;
2042 	peer_id =
2043 	HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf);
2044 
2045 
2046 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2047 
2048 	if (!peer)
2049 		return;
2050 
2051 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2052 
2053 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2054 	ppdu_user_desc->peer_id = peer_id;
2055 
2056 	tag_buf++;
2057 	ppdu_user_desc->tid =
2058 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_TID_NUM_GET(*tag_buf);
2059 	ppdu_user_desc->num_mpdu =
2060 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf);
2061 
2062 	ppdu_user_desc->num_msdu =
2063 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf);
2064 
2065 	ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu;
2066 
2067 	tag_buf += 2;
2068 	ppdu_user_desc->success_bytes = *tag_buf;
2069 
2070 }
2071 
2072 /*
2073  * dp_process_ppdu_stats_user_common_array_tlv: Process
2074  * htt_ppdu_stats_user_common_array_tlv
2075  * pdev: DP PDEV handle
2076  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2077  * @ppdu_info: per ppdu tlv structure
2078  *
2079  * return:void
2080  */
2081 static void dp_process_ppdu_stats_user_common_array_tlv(
2082 		struct dp_pdev *pdev, uint32_t *tag_buf,
2083 		struct ppdu_info *ppdu_info)
2084 {
2085 	uint32_t peer_id;
2086 	struct dp_peer *peer;
2087 	struct cdp_tx_completion_ppdu *ppdu_desc;
2088 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2089 	uint8_t curr_user_index = 0;
2090 	struct htt_tx_ppdu_stats_info *dp_stats_buf;
2091 
2092 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2093 
2094 	tag_buf++;
2095 	dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf;
2096 	tag_buf += 3;
2097 	peer_id =
2098 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf);
2099 
2100 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2101 
2102 	if (!peer) {
2103 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2104 			"Invalid peer");
2105 		return;
2106 	}
2107 
2108 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2109 
2110 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2111 
2112 	ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes;
2113 	ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes;
2114 
2115 	tag_buf++;
2116 
2117 	ppdu_user_desc->success_msdus =
2118 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf);
2119 	ppdu_user_desc->retry_bytes =
2120 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf);
2121 	tag_buf++;
2122 	ppdu_user_desc->failed_msdus =
2123 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf);
2124 }
2125 
2126 /*
2127  * dp_process_ppdu_stats_flush_tlv: Process
2128  * htt_ppdu_stats_flush_tlv
2129  * @pdev: DP PDEV handle
2130  * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv
2131  *
2132  * return:void
2133  */
2134 static void dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev,
2135 						uint32_t *tag_buf)
2136 {
2137 	uint32_t peer_id;
2138 	uint32_t drop_reason;
2139 	uint8_t tid;
2140 	uint32_t num_msdu;
2141 	struct dp_peer *peer;
2142 
2143 	tag_buf++;
2144 	drop_reason = *tag_buf;
2145 
2146 	tag_buf++;
2147 	num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf);
2148 
2149 	tag_buf++;
2150 	peer_id =
2151 		HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf);
2152 
2153 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2154 	if (!peer)
2155 		return;
2156 
2157 	tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf);
2158 
2159 	if (drop_reason == HTT_FLUSH_EXCESS_RETRIES) {
2160 		DP_STATS_INC(peer, tx.excess_retries_per_ac[TID_TO_WME_AC(tid)],
2161 					num_msdu);
2162 	}
2163 }
2164 
2165 /*
2166  * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv: Process
2167  * htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2168  * @pdev: DP PDEV handle
2169  * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2170  * @length: tlv_length
2171  *
2172  * return:void
2173  */
2174 static void
2175 dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev,
2176 					      qdf_nbuf_t tag_buf,
2177 					      uint32_t length,
2178 					      uint32_t ppdu_id)
2179 {
2180 	uint32_t *nbuf_ptr;
2181 
2182 	if ((!pdev->tx_sniffer_enable) && (!pdev->mcopy_mode))
2183 		return;
2184 
2185 	if (qdf_nbuf_pull_head(tag_buf, HTT_MGMT_CTRL_TLV_RESERVERD_LEN + 4)
2186 			       == NULL)
2187 		return;
2188 
2189 	nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(
2190 				tag_buf, sizeof(ppdu_id));
2191 	*nbuf_ptr = ppdu_id;
2192 
2193 	dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc,
2194 		tag_buf, HTT_INVALID_PEER,
2195 		WDI_NO_VAL, pdev->pdev_id);
2196 }
2197 
2198 /**
2199  * dp_process_ppdu_tag(): Function to process the PPDU TLVs
2200  * @pdev: DP pdev handle
2201  * @tag_buf: TLV buffer
2202  * @tlv_len: length of tlv
2203  * @ppdu_info: per ppdu tlv structure
2204  *
2205  * return: void
2206  */
2207 static void dp_process_ppdu_tag(struct dp_pdev *pdev, uint32_t *tag_buf,
2208 		uint32_t tlv_len, struct ppdu_info *ppdu_info)
2209 {
2210 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2211 
2212 	switch (tlv_type) {
2213 	case HTT_PPDU_STATS_COMMON_TLV:
2214 		qdf_assert_always(tlv_len ==
2215 				sizeof(htt_ppdu_stats_common_tlv));
2216 		dp_process_ppdu_stats_common_tlv(pdev, tag_buf, ppdu_info);
2217 		break;
2218 	case HTT_PPDU_STATS_USR_COMMON_TLV:
2219 		qdf_assert_always(tlv_len ==
2220 				sizeof(htt_ppdu_stats_user_common_tlv));
2221 		dp_process_ppdu_stats_user_common_tlv(
2222 				pdev, tag_buf, ppdu_info);
2223 		break;
2224 	case HTT_PPDU_STATS_USR_RATE_TLV:
2225 		qdf_assert_always(tlv_len ==
2226 				sizeof(htt_ppdu_stats_user_rate_tlv));
2227 		dp_process_ppdu_stats_user_rate_tlv(pdev, tag_buf, ppdu_info);
2228 		break;
2229 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV:
2230 		qdf_assert_always(tlv_len ==
2231 				sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv));
2232 		dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
2233 				pdev, tag_buf, ppdu_info);
2234 		break;
2235 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV:
2236 		qdf_assert_always(tlv_len ==
2237 				sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv));
2238 		dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
2239 				pdev, tag_buf, ppdu_info);
2240 		break;
2241 	case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV:
2242 		qdf_assert_always(tlv_len ==
2243 				sizeof(htt_ppdu_stats_user_cmpltn_common_tlv));
2244 		dp_process_ppdu_stats_user_cmpltn_common_tlv(
2245 				pdev, tag_buf, ppdu_info);
2246 		break;
2247 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV:
2248 		qdf_assert_always(tlv_len ==
2249 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv));
2250 		dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
2251 				pdev, tag_buf, ppdu_info);
2252 		break;
2253 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV:
2254 		qdf_assert_always(tlv_len ==
2255 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv));
2256 		dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
2257 				pdev, tag_buf, ppdu_info);
2258 		break;
2259 	case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV:
2260 		qdf_assert_always(tlv_len ==
2261 			sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv));
2262 		dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
2263 				pdev, tag_buf, ppdu_info);
2264 		break;
2265 	case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV:
2266 		qdf_assert_always(tlv_len ==
2267 			sizeof(htt_ppdu_stats_usr_common_array_tlv_v));
2268 		dp_process_ppdu_stats_user_common_array_tlv(
2269 				pdev, tag_buf, ppdu_info);
2270 		break;
2271 	case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV:
2272 		qdf_assert_always(tlv_len ==
2273 			sizeof(htt_ppdu_stats_flush_tlv));
2274 		dp_process_ppdu_stats_user_compltn_flush_tlv(
2275 				pdev, tag_buf);
2276 		break;
2277 	default:
2278 		break;
2279 	}
2280 }
2281 
2282 /**
2283  * dp_ppdu_desc_deliver(): Function to deliver Tx PPDU status descriptor
2284  * to upper layer
2285  * @pdev: DP pdev handle
2286  * @ppdu_info: per PPDU TLV descriptor
2287  *
2288  * return: void
2289  */
2290 static
2291 void dp_ppdu_desc_deliver(struct dp_pdev *pdev,
2292 			      struct ppdu_info *ppdu_info)
2293 {
2294 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
2295 	struct dp_peer *peer = NULL;
2296 	qdf_nbuf_t nbuf;
2297 	uint16_t i;
2298 
2299 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
2300 		qdf_nbuf_data(ppdu_info->nbuf);
2301 
2302 	ppdu_desc->num_users = ppdu_info->last_user;
2303 	ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
2304 
2305 	for (i = 0; i < ppdu_desc->num_users; i++) {
2306 
2307 
2308 		ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu;
2309 		ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu;
2310 
2311 		if (ppdu_desc->user[i].tid < CDP_DATA_TID_MAX) {
2312 			peer = dp_peer_find_by_id(pdev->soc,
2313 					ppdu_desc->user[i].peer_id);
2314 			/**
2315 			 * This check is to make sure peer is not deleted
2316 			 * after processing the TLVs.
2317 			 */
2318 			if (!peer)
2319 				continue;
2320 
2321 			dp_tx_stats_update(pdev->soc, peer,
2322 					&ppdu_desc->user[i],
2323 					ppdu_desc->ack_rssi);
2324 		}
2325 	}
2326 
2327 	/*
2328 	 * Remove from the list
2329 	 */
2330 	TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
2331 	nbuf = ppdu_info->nbuf;
2332 	pdev->list_depth--;
2333 	qdf_mem_free(ppdu_info);
2334 
2335 	qdf_assert_always(nbuf);
2336 
2337 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
2338 		qdf_nbuf_data(nbuf);
2339 
2340 	/**
2341 	 * Deliver PPDU stats only for valid (acked) data frames if
2342 	 * sniffer mode is not enabled.
2343 	 * If sniffer mode is enabled, PPDU stats for all frames
2344 	 * including mgmt/control frames should be delivered to upper layer
2345 	 */
2346 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2347 		dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC, pdev->soc,
2348 				nbuf, HTT_INVALID_PEER,
2349 				WDI_NO_VAL, pdev->pdev_id);
2350 	} else {
2351 		if (ppdu_desc->num_mpdu != 0 && ppdu_desc->num_users != 0 &&
2352 				ppdu_desc->frame_ctrl & HTT_FRAMECTRL_DATATYPE) {
2353 
2354 			dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
2355 					pdev->soc, nbuf, HTT_INVALID_PEER,
2356 					WDI_NO_VAL, pdev->pdev_id);
2357 		} else
2358 			qdf_nbuf_free(nbuf);
2359 	}
2360 	return;
2361 }
2362 
2363 /**
2364  * dp_get_ppdu_desc(): Function to allocate new PPDU status
2365  * desc for new ppdu id
2366  * @pdev: DP pdev handle
2367  * @ppdu_id: PPDU unique identifier
2368  * @tlv_type: TLV type received
2369  *
2370  * return: ppdu_info per ppdu tlv structure
2371  */
2372 static
2373 struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id,
2374 			uint8_t tlv_type)
2375 {
2376 	struct ppdu_info *ppdu_info = NULL;
2377 
2378 	/*
2379 	 * Find ppdu_id node exists or not
2380 	 */
2381 	TAILQ_FOREACH(ppdu_info, &pdev->ppdu_info_list, ppdu_info_list_elem) {
2382 
2383 		if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) {
2384 			break;
2385 		}
2386 	}
2387 
2388 	if (ppdu_info) {
2389 		/**
2390 		 * if we get tlv_type that is already been processed for ppdu,
2391 		 * that means we got a new ppdu with same ppdu id.
2392 		 * Hence Flush the older ppdu
2393 		 */
2394 		if (ppdu_info->tlv_bitmap & (1 << tlv_type))
2395 			dp_ppdu_desc_deliver(pdev, ppdu_info);
2396 		else
2397 			return ppdu_info;
2398 	}
2399 
2400 	/**
2401 	 * Flush the head ppdu descriptor if ppdu desc list reaches max
2402 	 * threshold
2403 	 */
2404 	if (pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
2405 		ppdu_info = TAILQ_FIRST(&pdev->ppdu_info_list);
2406 		dp_ppdu_desc_deliver(pdev, ppdu_info);
2407 	}
2408 
2409 	/*
2410 	 * Allocate new ppdu_info node
2411 	 */
2412 	ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info));
2413 	if (!ppdu_info)
2414 		return NULL;
2415 
2416 	ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
2417 			sizeof(struct cdp_tx_completion_ppdu), 0, 4,
2418 			TRUE);
2419 	if (!ppdu_info->nbuf) {
2420 		qdf_mem_free(ppdu_info);
2421 		return NULL;
2422 	}
2423 
2424 	qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf),
2425 			sizeof(struct cdp_tx_completion_ppdu));
2426 
2427 	if (qdf_nbuf_put_tail(ppdu_info->nbuf,
2428 			sizeof(struct cdp_tx_completion_ppdu)) == NULL) {
2429 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2430 				"No tailroom for HTT PPDU");
2431 		qdf_nbuf_free(ppdu_info->nbuf);
2432 		ppdu_info->nbuf = NULL;
2433 		ppdu_info->last_user = 0;
2434 		qdf_mem_free(ppdu_info);
2435 		return NULL;
2436 	}
2437 
2438 	/**
2439 	 * No lock is needed because all PPDU TLVs are processed in
2440 	 * same context and this list is updated in same context
2441 	 */
2442 	TAILQ_INSERT_TAIL(&pdev->ppdu_info_list, ppdu_info,
2443 			ppdu_info_list_elem);
2444 	pdev->list_depth++;
2445 	return ppdu_info;
2446 }
2447 
2448 /**
2449  * dp_htt_process_tlv(): Function to process each PPDU TLVs
2450  * @pdev: DP pdev handle
2451  * @htt_t2h_msg: HTT target to host message
2452  *
2453  * return: ppdu_info per ppdu tlv structure
2454  */
2455 
2456 static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev,
2457 		qdf_nbuf_t htt_t2h_msg, bool *free_buf)
2458 {
2459 	uint32_t length;
2460 	uint32_t ppdu_id;
2461 	uint8_t tlv_type;
2462 	uint32_t tlv_length, tlv_bitmap_expected;
2463 	uint8_t *tlv_buf;
2464 	struct ppdu_info *ppdu_info = NULL;
2465 
2466 	uint32_t *msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
2467 
2468 	length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word);
2469 
2470 	msg_word = msg_word + 1;
2471 	ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word);
2472 
2473 
2474 	msg_word = msg_word + 3;
2475 	while (length > 0) {
2476 		tlv_buf = (uint8_t *)msg_word;
2477 		tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
2478 		tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
2479 		if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG))
2480 			pdev->stats.ppdu_stats_counter[tlv_type]++;
2481 
2482 		if (tlv_length == 0)
2483 			break;
2484 
2485 		tlv_length += HTT_TLV_HDR_LEN;
2486 
2487 		/**
2488 		 * Not allocating separate ppdu descriptor for MGMT Payload
2489 		 * TLV as this is sent as separate WDI indication and it
2490 		 * doesn't contain any ppdu information
2491 		 */
2492 		if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) {
2493 			dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(pdev,
2494 					htt_t2h_msg, tlv_length, ppdu_id);
2495 			msg_word =
2496 				(uint32_t *)((uint8_t *)tlv_buf + tlv_length);
2497 			length -= (tlv_length);
2498 			*free_buf = false;
2499 			return NULL;
2500 		}
2501 
2502 		ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type);
2503 		if (!ppdu_info)
2504 			return NULL;
2505 		ppdu_info->ppdu_id = ppdu_id;
2506 		ppdu_info->tlv_bitmap |= (1 << tlv_type);
2507 
2508 		dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info);
2509 
2510 		/**
2511 		 * Increment pdev level tlv count to monitor
2512 		 * missing TLVs
2513 		 */
2514 		pdev->tlv_count++;
2515 		ppdu_info->last_tlv_cnt = pdev->tlv_count;
2516 
2517 		msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
2518 		length -= (tlv_length);
2519 	}
2520 
2521 	if (!ppdu_info)
2522 		return NULL;
2523 
2524 	pdev->last_ppdu_id = ppdu_id;
2525 
2526 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
2527 
2528 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2529 		if (ppdu_info->is_ampdu)
2530 			tlv_bitmap_expected = HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP;
2531 	}
2532 
2533 	/**
2534 	 * Once all the TLVs for a given PPDU has been processed,
2535 	 * return PPDU status to be delivered to higher layer
2536 	 */
2537 	if (ppdu_info->tlv_bitmap == tlv_bitmap_expected)
2538 		return ppdu_info;
2539 
2540 	return NULL;
2541 }
2542 #endif /* FEATURE_PERPKT_INFO */
2543 
2544 /**
2545  * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW
2546  * @soc: DP SOC handle
2547  * @pdev_id: pdev id
2548  * @htt_t2h_msg: HTT message nbuf
2549  *
2550  * return:void
2551  */
2552 #if defined(WDI_EVENT_ENABLE)
2553 #ifdef FEATURE_PERPKT_INFO
2554 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
2555 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
2556 {
2557 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
2558 	struct ppdu_info *ppdu_info = NULL;
2559 	bool free_buf = true;
2560 
2561 	if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
2562 			!pdev->mcopy_mode)
2563 		return free_buf;
2564 
2565 	ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg, &free_buf);
2566 	if (ppdu_info)
2567 		dp_ppdu_desc_deliver(pdev, ppdu_info);
2568 
2569 	return free_buf;
2570 }
2571 #else
2572 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
2573 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
2574 {
2575 	return true;
2576 }
2577 #endif
2578 #endif
2579 
2580 /**
2581  * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats
2582  * @soc: DP SOC handle
2583  * @htt_t2h_msg: HTT message nbuf
2584  *
2585  * return:void
2586  */
2587 static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc,
2588 		qdf_nbuf_t htt_t2h_msg)
2589 {
2590 	uint8_t done;
2591 	qdf_nbuf_t msg_copy;
2592 	uint32_t *msg_word;
2593 
2594 	msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
2595 	msg_word = msg_word + 3;
2596 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2597 
2598 	/*
2599 	 * HTT EXT stats response comes as stream of TLVs which span over
2600 	 * multiple T2H messages.
2601 	 * The first message will carry length of the response.
2602 	 * For rest of the messages length will be zero.
2603 	 *
2604 	 * Clone the T2H message buffer and store it in a list to process
2605 	 * it later.
2606 	 *
2607 	 * The original T2H message buffers gets freed in the T2H HTT event
2608 	 * handler
2609 	 */
2610 	msg_copy = qdf_nbuf_clone(htt_t2h_msg);
2611 
2612 	if (!msg_copy) {
2613 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2614 				"T2H messge clone failed for HTT EXT STATS");
2615 		goto error;
2616 	}
2617 
2618 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2619 	qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy);
2620 	/*
2621 	 * Done bit signifies that this is the last T2H buffer in the stream of
2622 	 * HTT EXT STATS message
2623 	 */
2624 	if (done) {
2625 		soc->htt_stats.num_stats++;
2626 		qdf_sched_work(0, &soc->htt_stats.work);
2627 	}
2628 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2629 
2630 	return;
2631 
2632 error:
2633 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2634 	while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg))
2635 			!= NULL) {
2636 		qdf_nbuf_free(msg_copy);
2637 	}
2638 	soc->htt_stats.num_stats = 0;
2639 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2640 	return;
2641 
2642 }
2643 
2644 /*
2645  * htt_soc_attach_target() - SOC level HTT setup
2646  * @htt_soc:	HTT SOC handle
2647  *
2648  * Return: 0 on success; error code on failure
2649  */
2650 int htt_soc_attach_target(void *htt_soc)
2651 {
2652 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
2653 
2654 	return htt_h2t_ver_req_msg(soc);
2655 }
2656 
2657 
2658 #if defined(WDI_EVENT_ENABLE) && !defined(REMOVE_PKT_LOG)
2659 /*
2660  * dp_ppdu_stats_ind_handler() - PPDU stats msg handler
2661  * @htt_soc:	 HTT SOC handle
2662  * @msg_word:    Pointer to payload
2663  * @htt_t2h_msg: HTT msg nbuf
2664  *
2665  * Return: True if buffer should be freed by caller.
2666  */
2667 static bool
2668 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
2669 				uint32_t *msg_word,
2670 				qdf_nbuf_t htt_t2h_msg)
2671 {
2672 	u_int8_t pdev_id;
2673 	bool free_buf;
2674 	qdf_nbuf_set_pktlen(htt_t2h_msg, HTT_T2H_MAX_MSG_SIZE);
2675 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2676 		"received HTT_T2H_MSG_TYPE_PPDU_STATS_IND\n");
2677 	pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
2678 	pdev_id = DP_HW2SW_MACID(pdev_id);
2679 	free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id,
2680 					      htt_t2h_msg);
2681 	dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc,
2682 		htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL,
2683 		pdev_id);
2684 	return free_buf;
2685 }
2686 #else
2687 static bool
2688 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
2689 				uint32_t *msg_word,
2690 				qdf_nbuf_t htt_t2h_msg)
2691 {
2692 	return true;
2693 }
2694 #endif
2695 
2696 #if defined(WDI_EVENT_ENABLE) && \
2697 		!defined(REMOVE_PKT_LOG) && defined(CONFIG_WIN)
2698 /*
2699  * dp_pktlog_msg_handler() - Pktlog msg handler
2700  * @htt_soc:	 HTT SOC handle
2701  * @msg_word:    Pointer to payload
2702  *
2703  * Return: None
2704  */
2705 static void
2706 dp_pktlog_msg_handler(struct htt_soc *soc,
2707 				uint32_t *msg_word)
2708 {
2709 	uint8_t pdev_id;
2710 	uint32_t *pl_hdr;
2711 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2712 		"received HTT_T2H_MSG_TYPE_PKTLOG\n");
2713 	pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word);
2714 	pdev_id = DP_HW2SW_MACID(pdev_id);
2715 	pl_hdr = (msg_word + 1);
2716 	dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc,
2717 		pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL,
2718 		pdev_id);
2719 }
2720 #else
2721 static void
2722 dp_pktlog_msg_handler(struct htt_soc *soc,
2723 				uint32_t *msg_word)
2724 {
2725 }
2726 #endif
2727 
2728 /*
2729  * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler
2730  * @context:	Opaque context (HTT SOC handle)
2731  * @pkt:	HTC packet
2732  */
2733 static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
2734 {
2735 	struct htt_soc *soc = (struct htt_soc *) context;
2736 	qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
2737 	u_int32_t *msg_word;
2738 	enum htt_t2h_msg_type msg_type;
2739 	bool free_buf = true;
2740 
2741 	/* check for successful message reception */
2742 	if (pkt->Status != QDF_STATUS_SUCCESS) {
2743 		if (pkt->Status != QDF_STATUS_E_CANCELED)
2744 			soc->stats.htc_err_cnt++;
2745 
2746 		qdf_nbuf_free(htt_t2h_msg);
2747 		return;
2748 	}
2749 
2750 	/* TODO: Check if we should pop the HTC/HTT header alignment padding */
2751 
2752 	msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
2753 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
2754 	switch (msg_type) {
2755 	case HTT_T2H_MSG_TYPE_PEER_MAP:
2756 		{
2757 			u_int8_t mac_addr_deswizzle_buf[HTT_MAC_ADDR_LEN];
2758 			u_int8_t *peer_mac_addr;
2759 			u_int16_t peer_id;
2760 			u_int16_t hw_peer_id;
2761 			u_int8_t vdev_id;
2762 
2763 			peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
2764 			hw_peer_id =
2765 				HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2));
2766 			vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
2767 			peer_mac_addr = htt_t2h_mac_addr_deswizzle(
2768 				(u_int8_t *) (msg_word+1),
2769 				&mac_addr_deswizzle_buf[0]);
2770 			QDF_TRACE(QDF_MODULE_ID_TXRX,
2771 				QDF_TRACE_LEVEL_INFO,
2772 				"HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
2773 				peer_id, vdev_id);
2774 
2775 			dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id,
2776 						vdev_id, peer_mac_addr);
2777 			break;
2778 		}
2779 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
2780 		{
2781 			u_int16_t peer_id;
2782 			peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
2783 
2784 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id);
2785 			break;
2786 		}
2787 	case HTT_T2H_MSG_TYPE_SEC_IND:
2788 		{
2789 			u_int16_t peer_id;
2790 			enum htt_sec_type sec_type;
2791 			int is_unicast;
2792 
2793 			peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
2794 			sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
2795 			is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
2796 			/* point to the first part of the Michael key */
2797 			msg_word++;
2798 			dp_rx_sec_ind_handler(
2799 				soc->dp_soc, peer_id, sec_type, is_unicast,
2800 				msg_word, msg_word + 2);
2801 			break;
2802 		}
2803 
2804 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
2805 		{
2806 			free_buf = dp_ppdu_stats_ind_handler(soc, msg_word,
2807 							     htt_t2h_msg);
2808 			break;
2809 		}
2810 
2811 	case HTT_T2H_MSG_TYPE_PKTLOG:
2812 		{
2813 			dp_pktlog_msg_handler(soc, msg_word);
2814 			break;
2815 		}
2816 
2817 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
2818 		{
2819 			htc_pm_runtime_put(soc->htc_soc);
2820 			soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
2821 			soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
2822 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2823 				"target uses HTT version %d.%d; host uses %d.%d\n",
2824 				soc->tgt_ver.major, soc->tgt_ver.minor,
2825 				HTT_CURRENT_VERSION_MAJOR,
2826 				HTT_CURRENT_VERSION_MINOR);
2827 			if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
2828 				QDF_TRACE(QDF_MODULE_ID_TXRX,
2829 					QDF_TRACE_LEVEL_ERROR,
2830 					"*** Incompatible host/target HTT versions!\n");
2831 			}
2832 			/* abort if the target is incompatible with the host */
2833 			qdf_assert(soc->tgt_ver.major ==
2834 				HTT_CURRENT_VERSION_MAJOR);
2835 			if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
2836 				QDF_TRACE(QDF_MODULE_ID_TXRX,
2837 					QDF_TRACE_LEVEL_WARN,
2838 					"*** Warning: host/target HTT versions"
2839 					" are different, though compatible!\n");
2840 			}
2841 			break;
2842 		}
2843 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
2844 		{
2845 			uint16_t peer_id;
2846 			uint8_t tid;
2847 			uint8_t win_sz;
2848 			uint16_t status;
2849 			struct dp_peer *peer;
2850 
2851 			/*
2852 			 * Update REO Queue Desc with new values
2853 			 */
2854 			peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
2855 			tid = HTT_RX_ADDBA_TID_GET(*msg_word);
2856 			win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
2857 			peer = dp_peer_find_by_id(soc->dp_soc, peer_id);
2858 
2859 			/*
2860 			 * Window size needs to be incremented by 1
2861 			 * since fw needs to represent a value of 256
2862 			 * using just 8 bits
2863 			 */
2864 			if (peer) {
2865 				status = dp_addba_requestprocess_wifi3(peer,
2866 						0, tid, 0, win_sz + 1, 0xffff);
2867 				QDF_TRACE(QDF_MODULE_ID_TXRX,
2868 					QDF_TRACE_LEVEL_INFO,
2869 					FL("PeerID %d BAW %d TID %d stat %d\n"),
2870 					peer_id, win_sz, tid, status);
2871 
2872 			} else {
2873 				QDF_TRACE(QDF_MODULE_ID_TXRX,
2874 					QDF_TRACE_LEVEL_ERROR,
2875 					FL("Peer not found peer id %d\n"),
2876 					peer_id);
2877 			}
2878 			break;
2879 		}
2880 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
2881 		{
2882 			dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg);
2883 			break;
2884 		}
2885 	default:
2886 		break;
2887 	};
2888 
2889 	/* Free the indication buffer */
2890 	if (free_buf)
2891 		qdf_nbuf_free(htt_t2h_msg);
2892 }
2893 
2894 /*
2895  * dp_htt_h2t_full() - Send full handler (called from HTC)
2896  * @context:	Opaque context (HTT SOC handle)
2897  * @pkt:	HTC packet
2898  *
2899  * Return: enum htc_send_full_action
2900  */
2901 static enum htc_send_full_action
2902 dp_htt_h2t_full(void *context, HTC_PACKET *pkt)
2903 {
2904 	return HTC_SEND_FULL_KEEP;
2905 }
2906 
2907 /*
2908  * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages
2909  * @context:	Opaque context (HTT SOC handle)
2910  * @nbuf:	nbuf containing T2H message
2911  * @pipe_id:	HIF pipe ID
2912  *
2913  * Return: QDF_STATUS
2914  *
2915  * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which
2916  * will be used for packet log and other high-priority HTT messsages. Proper
2917  * HTC connection to be added later once required FW changes are available
2918  */
2919 static QDF_STATUS
2920 dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id)
2921 {
2922 	A_STATUS rc = QDF_STATUS_SUCCESS;
2923 	HTC_PACKET htc_pkt;
2924 
2925 	qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE);
2926 	qdf_mem_zero(&htc_pkt, sizeof(htc_pkt));
2927 	htc_pkt.Status = QDF_STATUS_SUCCESS;
2928 	htc_pkt.pPktContext = (void *)nbuf;
2929 	dp_htt_t2h_msg_handler(context, &htc_pkt);
2930 
2931 	return rc;
2932 }
2933 
2934 /*
2935  * htt_htc_soc_attach() - Register SOC level HTT instance with HTC
2936  * @htt_soc:	HTT SOC handle
2937  *
2938  * Return: 0 on success; error code on failure
2939  */
2940 static int
2941 htt_htc_soc_attach(struct htt_soc *soc)
2942 {
2943 	struct htc_service_connect_req connect;
2944 	struct htc_service_connect_resp response;
2945 	A_STATUS status;
2946 	struct dp_soc *dpsoc = soc->dp_soc;
2947 
2948 	qdf_mem_set(&connect, sizeof(connect), 0);
2949 	qdf_mem_set(&response, sizeof(response), 0);
2950 
2951 	connect.pMetaData = NULL;
2952 	connect.MetaDataLength = 0;
2953 	connect.EpCallbacks.pContext = soc;
2954 	connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
2955 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
2956 	connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler;
2957 
2958 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
2959 	connect.EpCallbacks.EpRecvRefill = NULL;
2960 
2961 	/* N/A, fill is done by HIF */
2962 	connect.EpCallbacks.RecvRefillWaterMark = 1;
2963 
2964 	connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
2965 	/*
2966 	 * Specify how deep to let a queue get before htc_send_pkt will
2967 	 * call the EpSendFull function due to excessive send queue depth.
2968 	 */
2969 	connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
2970 
2971 	/* disable flow control for HTT data message service */
2972 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
2973 
2974 	/* connect to control service */
2975 	connect.service_id = HTT_DATA_MSG_SVC;
2976 
2977 	status = htc_connect_service(soc->htc_soc, &connect, &response);
2978 
2979 	if (status != A_OK)
2980 		return QDF_STATUS_E_FAILURE;
2981 
2982 	soc->htc_endpoint = response.Endpoint;
2983 
2984 	hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint);
2985 	dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc,
2986 		dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE);
2987 
2988 	return 0; /* success */
2989 }
2990 
2991 /*
2992  * htt_soc_attach() - SOC level HTT initialization
2993  * @dp_soc:	Opaque Data path SOC handle
2994  * @ctrl_psoc:	Opaque ctrl SOC handle
2995  * @htc_soc:	SOC level HTC handle
2996  * @hal_soc:	Opaque HAL SOC handle
2997  * @osdev:	QDF device
2998  *
2999  * Return: HTT handle on success; NULL on failure
3000  */
3001 void *
3002 htt_soc_attach(void *dp_soc, void *ctrl_psoc, HTC_HANDLE htc_soc,
3003 	void *hal_soc, qdf_device_t osdev)
3004 {
3005 	struct htt_soc *soc;
3006 	int i;
3007 
3008 	soc = qdf_mem_malloc(sizeof(*soc));
3009 
3010 	if (!soc)
3011 		goto fail1;
3012 
3013 	soc->osdev = osdev;
3014 	soc->ctrl_psoc = ctrl_psoc;
3015 	soc->dp_soc = dp_soc;
3016 	soc->htc_soc = htc_soc;
3017 	soc->hal_soc = hal_soc;
3018 
3019 	/* TODO: See if any NSS related context is requred in htt_soc */
3020 
3021 	soc->htt_htc_pkt_freelist = NULL;
3022 
3023 	if (htt_htc_soc_attach(soc))
3024 		goto fail2;
3025 
3026 	/* TODO: See if any Rx data specific intialization is required. For
3027 	 * MCL use cases, the data will be received as single packet and
3028 	 * should not required any descriptor or reorder handling
3029 	 */
3030 
3031 	HTT_TX_MUTEX_INIT(&soc->htt_tx_mutex);
3032 
3033 	/* pre-allocate some HTC_PACKET objects */
3034 	for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
3035 		struct dp_htt_htc_pkt_union *pkt;
3036 		pkt = qdf_mem_malloc(sizeof(*pkt));
3037 		if (!pkt)
3038 			break;
3039 
3040 		htt_htc_pkt_free(soc, &pkt->u.pkt);
3041 	}
3042 
3043 	return soc;
3044 
3045 fail2:
3046 	qdf_mem_free(soc);
3047 
3048 fail1:
3049 	return NULL;
3050 }
3051 
3052 
3053 /*
3054  * htt_soc_detach() - Detach SOC level HTT
3055  * @htt_soc:	HTT SOC handle
3056  */
3057 void
3058 htt_soc_detach(void *htt_soc)
3059 {
3060 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
3061 
3062 	htt_htc_misc_pkt_pool_free(soc);
3063 	htt_htc_pkt_pool_free(soc);
3064 	HTT_TX_MUTEX_DESTROY(&soc->htt_tx_mutex);
3065 	qdf_mem_free(soc);
3066 }
3067 
3068 /**
3069  * dp_h2t_ext_stats_msg_send(): function to contruct HTT message to pass to FW
3070  * @pdev: DP PDEV handle
3071  * @stats_type_upload_mask: stats type requested by user
3072  * @config_param_0: extra configuration parameters
3073  * @config_param_1: extra configuration parameters
3074  * @config_param_2: extra configuration parameters
3075  * @config_param_3: extra configuration parameters
3076  * @mac_id: mac number
3077  *
3078  * return: QDF STATUS
3079  */
3080 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
3081 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
3082 		uint32_t config_param_1, uint32_t config_param_2,
3083 		uint32_t config_param_3, int cookie_val, int cookie_msb,
3084 		uint8_t mac_id)
3085 {
3086 	struct htt_soc *soc = pdev->soc->htt_handle;
3087 	struct dp_htt_htc_pkt *pkt;
3088 	qdf_nbuf_t msg;
3089 	uint32_t *msg_word;
3090 	uint8_t pdev_mask = 0;
3091 
3092 	msg = qdf_nbuf_alloc(
3093 			soc->osdev,
3094 			HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ),
3095 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
3096 
3097 	if (!msg)
3098 		return QDF_STATUS_E_NOMEM;
3099 
3100 	/*TODO:Add support for SOC stats
3101 	 * Bit 0: SOC Stats
3102 	 * Bit 1: Pdev stats for pdev id 0
3103 	 * Bit 2: Pdev stats for pdev id 1
3104 	 * Bit 3: Pdev stats for pdev id 2
3105 	 */
3106 	mac_id = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
3107 
3108 	pdev_mask = 1 << DP_SW2HW_MACID(mac_id);
3109 	/*
3110 	 * Set the length of the message.
3111 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3112 	 * separately during the below call to qdf_nbuf_push_head.
3113 	 * The contribution from the HTC header is added separately inside HTC.
3114 	 */
3115 	if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) {
3116 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3117 				"Failed to expand head for HTT_EXT_STATS");
3118 		qdf_nbuf_free(msg);
3119 		return QDF_STATUS_E_FAILURE;
3120 	}
3121 
3122 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3123 		"-----%s:%d----\n cookie <-> %d\n config_param_0 %u\n"
3124 		"config_param_1 %u\n config_param_2 %u\n"
3125 		"config_param_4 %u\n -------------\n",
3126 		__func__, __LINE__, cookie_val, config_param_0,
3127 		config_param_1, config_param_2,	config_param_3);
3128 
3129 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
3130 
3131 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3132 	*msg_word = 0;
3133 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ);
3134 	HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask);
3135 	HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask);
3136 
3137 	/* word 1 */
3138 	msg_word++;
3139 	*msg_word = 0;
3140 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0);
3141 
3142 	/* word 2 */
3143 	msg_word++;
3144 	*msg_word = 0;
3145 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1);
3146 
3147 	/* word 3 */
3148 	msg_word++;
3149 	*msg_word = 0;
3150 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2);
3151 
3152 	/* word 4 */
3153 	msg_word++;
3154 	*msg_word = 0;
3155 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3);
3156 
3157 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0);
3158 
3159 	/* word 5 */
3160 	msg_word++;
3161 
3162 	/* word 6 */
3163 	msg_word++;
3164 	*msg_word = 0;
3165 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val);
3166 
3167 	/* word 7 */
3168 	msg_word++;
3169 	*msg_word = 0;
3170 	/*Using last 2 bits for pdev_id */
3171 	cookie_msb = ((cookie_msb << 2) | pdev->pdev_id);
3172 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb);
3173 
3174 	pkt = htt_htc_pkt_alloc(soc);
3175 	if (!pkt) {
3176 		qdf_nbuf_free(msg);
3177 		return QDF_STATUS_E_NOMEM;
3178 	}
3179 
3180 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
3181 
3182 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
3183 			dp_htt_h2t_send_complete_free_netbuf,
3184 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
3185 			soc->htc_endpoint,
3186 			1); /* tag - not relevant here */
3187 
3188 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
3189 	DP_HTT_SEND_HTC_PKT(soc, pkt);
3190 	return 0;
3191 }
3192 
3193 /* This macro will revert once proper HTT header will define for
3194  * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file
3195  * */
3196 #if defined(WDI_EVENT_ENABLE)
3197 /**
3198  * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW
3199  * @pdev: DP PDEV handle
3200  * @stats_type_upload_mask: stats type requested by user
3201  * @mac_id: Mac id number
3202  *
3203  * return: QDF STATUS
3204  */
3205 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
3206 		uint32_t stats_type_upload_mask, uint8_t mac_id)
3207 {
3208 	struct htt_soc *soc = pdev->soc->htt_handle;
3209 	struct dp_htt_htc_pkt *pkt;
3210 	qdf_nbuf_t msg;
3211 	uint32_t *msg_word;
3212 	uint8_t pdev_mask;
3213 
3214 	msg = qdf_nbuf_alloc(
3215 			soc->osdev,
3216 			HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ),
3217 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
3218 
3219 	if (!msg) {
3220 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3221 		"Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer\n");
3222 		qdf_assert(0);
3223 		return QDF_STATUS_E_NOMEM;
3224 	}
3225 
3226 	/*TODO:Add support for SOC stats
3227 	 * Bit 0: SOC Stats
3228 	 * Bit 1: Pdev stats for pdev id 0
3229 	 * Bit 2: Pdev stats for pdev id 1
3230 	 * Bit 3: Pdev stats for pdev id 2
3231 	 */
3232 	pdev_mask = 1 << DP_SW2HW_MACID(mac_id);
3233 
3234 	/*
3235 	 * Set the length of the message.
3236 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3237 	 * separately during the below call to qdf_nbuf_push_head.
3238 	 * The contribution from the HTC header is added separately inside HTC.
3239 	 */
3240 	if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) {
3241 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3242 				"Failed to expand head for HTT_CFG_STATS\n");
3243 		qdf_nbuf_free(msg);
3244 		return QDF_STATUS_E_FAILURE;
3245 	}
3246 
3247 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
3248 
3249 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3250 	*msg_word = 0;
3251 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
3252 	HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask);
3253 	HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word,
3254 			stats_type_upload_mask);
3255 
3256 	pkt = htt_htc_pkt_alloc(soc);
3257 	if (!pkt) {
3258 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3259 				"Fail to allocate dp_htt_htc_pkt buffer\n");
3260 		qdf_assert(0);
3261 		qdf_nbuf_free(msg);
3262 		return QDF_STATUS_E_NOMEM;
3263 	}
3264 
3265 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
3266 
3267 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
3268 			dp_htt_h2t_send_complete_free_netbuf,
3269 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
3270 			soc->htc_endpoint,
3271 			1); /* tag - not relevant here */
3272 
3273 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
3274 	DP_HTT_SEND_HTC_PKT(soc, pkt);
3275 	return 0;
3276 }
3277 #endif
3278