xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c (revision 0987f2dcf7065bc3ba5fdd7e1f832009a3717fc4)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <htt.h>
20 #include <hal_hw_headers.h>
21 #include <hal_api.h>
22 #include "dp_htt.h"
23 #include "dp_peer.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_rx_mon.h"
27 #include "htt_stats.h"
28 #include "htt_ppdu_stats.h"
29 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
30 #include "cdp_txrx_cmn_struct.h"
31 
32 #ifdef FEATURE_PERPKT_INFO
33 #include "dp_ratetable.h"
34 #endif
35 
36 #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
37 
38 #define HTT_HTC_PKT_POOL_INIT_SIZE 64
39 #define HTT_T2H_MAX_MSG_SIZE 2048
40 
41 #define HTT_MSG_BUF_SIZE(msg_bytes) \
42 	((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
43 
44 #define HTT_PID_BIT_MASK 0x3
45 
46 #define DP_EXT_MSG_LENGTH 2048
47 #define DP_HTT_SEND_HTC_PKT(soc, pkt)                            \
48 do {                                                             \
49 	if (htc_send_pkt(soc->htc_soc, &pkt->htc_pkt) ==         \
50 					QDF_STATUS_SUCCESS)      \
51 		htt_htc_misc_pkt_list_add(soc, pkt);             \
52 } while (0)
53 
54 #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16
55 
56 /**
57  * Bitmap of HTT PPDU TLV types for Default mode
58  */
59 #define HTT_PPDU_DEFAULT_TLV_BITMAP \
60 	(1 << HTT_PPDU_STATS_COMMON_TLV) | \
61 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
62 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
63 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
64 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
65 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)
66 
67 /**
68  * Bitmap of HTT PPDU TLV types for Sniffer mode
69  */
70 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP \
71 	(1 << HTT_PPDU_STATS_COMMON_TLV) | \
72 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
73 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
74 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
75 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
76 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \
77 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \
78 	(1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV) | \
79 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV)
80 
81 #define HTT_FRAMECTRL_DATATYPE 0x08
82 #define HTT_PPDU_DESC_MAX_DEPTH 16
83 #define DP_SCAN_PEER_ID 0xFFFF
84 
85 /*
86  * dp_tx_stats_update() - Update per-peer statistics
87  * @soc: Datapath soc handle
88  * @peer: Datapath peer handle
89  * @ppdu: PPDU Descriptor
90  * @ack_rssi: RSSI of last ack received
91  *
92  * Return: None
93  */
94 #ifdef FEATURE_PERPKT_INFO
95 static inline void
96 dp_tx_rate_stats_update(struct dp_peer *peer,
97 			struct cdp_tx_completion_ppdu_user *ppdu)
98 {
99 	uint32_t ratekbps = 0;
100 	uint32_t ppdu_tx_rate = 0;
101 
102 	if (!peer || !ppdu)
103 		return;
104 
105 	dp_peer_stats_notify(peer);
106 
107 	ratekbps = dp_getrateindex(ppdu->mcs,
108 				   ppdu->nss,
109 				   ppdu->preamble,
110 				   ppdu->bw);
111 
112 	DP_STATS_UPD(peer, tx.last_tx_rate, ratekbps);
113 
114 	if (!ratekbps)
115 		return;
116 
117 	dp_ath_rate_lpf(peer->stats.tx.avg_tx_rate, ratekbps);
118 	ppdu_tx_rate = dp_ath_rate_out(peer->stats.tx.avg_tx_rate);
119 	DP_STATS_UPD(peer, tx.rnd_avg_tx_rate, ppdu_tx_rate);
120 
121 	if (peer->vdev) {
122 		peer->vdev->stats.tx.last_tx_rate = ratekbps;
123 		peer->vdev->stats.tx.last_tx_rate_mcs = ppdu->mcs;
124 	}
125 }
126 
127 static void dp_tx_stats_update(struct dp_soc *soc, struct dp_peer *peer,
128 		struct cdp_tx_completion_ppdu_user *ppdu, uint32_t ack_rssi)
129 {
130 	struct dp_pdev *pdev = peer->vdev->pdev;
131 	uint8_t preamble, mcs;
132 	uint16_t num_msdu;
133 
134 	preamble = ppdu->preamble;
135 	mcs = ppdu->mcs;
136 	num_msdu = ppdu->num_msdu;
137 
138 	/* If the peer statistics are already processed as part of
139 	 * per-MSDU completion handler, do not process these again in per-PPDU
140 	 * indications */
141 	if (soc->process_tx_status)
142 		return;
143 
144 	DP_STATS_INC_PKT(peer, tx.comp_pkt,
145 			num_msdu, (ppdu->success_bytes +
146 				ppdu->retry_bytes + ppdu->failed_bytes));
147 	DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus);
148 	DP_STATS_UPD(peer, tx.tx_rate, ppdu->tx_rate);
149 	DP_STATS_INC(peer, tx.sgi_count[ppdu->gi], num_msdu);
150 	DP_STATS_INC(peer, tx.bw[ppdu->bw], num_msdu);
151 	DP_STATS_INC(peer, tx.nss[ppdu->nss], num_msdu);
152 	DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)], num_msdu);
153 	DP_STATS_INCC(peer, tx.stbc, num_msdu, ppdu->stbc);
154 	DP_STATS_INCC(peer, tx.ldpc, num_msdu, ppdu->ldpc);
155 	if (!(ppdu->is_mcast) && ppdu->ack_rssi_valid)
156 		DP_STATS_UPD(peer, tx.last_ack_rssi, ack_rssi);
157 
158 	DP_STATS_INC(peer, tx.retries,
159 			(ppdu->long_retries + ppdu->short_retries));
160 	DP_STATS_INCC(peer,
161 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
162 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
163 	DP_STATS_INCC(peer,
164 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
165 			((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
166 	DP_STATS_INCC(peer,
167 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
168 			((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
169 	DP_STATS_INCC(peer,
170 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
171 			((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B)));
172 	DP_STATS_INCC(peer,
173 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
174 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
175 	DP_STATS_INCC(peer,
176 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
177 			((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
178 	DP_STATS_INCC(peer,
179 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
180 			((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
181 	DP_STATS_INCC(peer,
182 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
183 			((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
184 	DP_STATS_INCC(peer,
185 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
186 			((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX)));
187 	DP_STATS_INCC(peer,
188 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
189 			((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX)));
190 
191 	dp_tx_rate_stats_update(peer, ppdu);
192 
193 	if (peer->stats.tx.ucast.num)
194 		peer->stats.tx.last_per = ((peer->stats.tx.ucast.num -
195 					peer->stats.tx.tx_success.num) * 100) /
196 					peer->stats.tx.ucast.num;
197 
198 	if (soc->cdp_soc.ol_ops->update_dp_stats) {
199 		soc->cdp_soc.ol_ops->update_dp_stats(pdev->ctrl_pdev,
200 				&peer->stats, ppdu->peer_id,
201 				UPDATE_PEER_STATS);
202 
203 	}
204 }
205 #endif
206 
207 /*
208  * htt_htc_pkt_alloc() - Allocate HTC packet buffer
209  * @htt_soc:	HTT SOC handle
210  *
211  * Return: Pointer to htc packet buffer
212  */
213 static struct dp_htt_htc_pkt *
214 htt_htc_pkt_alloc(struct htt_soc *soc)
215 {
216 	struct dp_htt_htc_pkt_union *pkt = NULL;
217 
218 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
219 	if (soc->htt_htc_pkt_freelist) {
220 		pkt = soc->htt_htc_pkt_freelist;
221 		soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next;
222 	}
223 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
224 
225 	if (pkt == NULL)
226 		pkt = qdf_mem_malloc(sizeof(*pkt));
227 	return &pkt->u.pkt; /* not actually a dereference */
228 }
229 
230 /*
231  * htt_htc_pkt_free() - Free HTC packet buffer
232  * @htt_soc:	HTT SOC handle
233  */
234 static void
235 htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
236 {
237 	struct dp_htt_htc_pkt_union *u_pkt =
238 		(struct dp_htt_htc_pkt_union *)pkt;
239 
240 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
241 	u_pkt->u.next = soc->htt_htc_pkt_freelist;
242 	soc->htt_htc_pkt_freelist = u_pkt;
243 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
244 }
245 
246 /*
247  * htt_htc_pkt_pool_free() - Free HTC packet pool
248  * @htt_soc:	HTT SOC handle
249  */
250 static void
251 htt_htc_pkt_pool_free(struct htt_soc *soc)
252 {
253 	struct dp_htt_htc_pkt_union *pkt, *next;
254 	pkt = soc->htt_htc_pkt_freelist;
255 	while (pkt) {
256 		next = pkt->u.next;
257 		qdf_mem_free(pkt);
258 		pkt = next;
259 	}
260 	soc->htt_htc_pkt_freelist = NULL;
261 }
262 
263 /*
264  * htt_htc_misc_pkt_list_trim() - trim misc list
265  * @htt_soc: HTT SOC handle
266  * @level: max no. of pkts in list
267  */
268 static void
269 htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level)
270 {
271 	struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL;
272 	int i = 0;
273 	qdf_nbuf_t netbuf;
274 
275 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
276 	pkt = soc->htt_htc_pkt_misclist;
277 	while (pkt) {
278 		next = pkt->u.next;
279 		/* trim the out grown list*/
280 		if (++i > level) {
281 			netbuf =
282 				(qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
283 			qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
284 			qdf_nbuf_free(netbuf);
285 			qdf_mem_free(pkt);
286 			pkt = NULL;
287 			if (prev)
288 				prev->u.next = NULL;
289 		}
290 		prev = pkt;
291 		pkt = next;
292 	}
293 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
294 }
295 
296 /*
297  * htt_htc_misc_pkt_list_add() - Add pkt to misc list
298  * @htt_soc:	HTT SOC handle
299  * @dp_htt_htc_pkt: pkt to be added to list
300  */
301 static void
302 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
303 {
304 	struct dp_htt_htc_pkt_union *u_pkt =
305 				(struct dp_htt_htc_pkt_union *)pkt;
306 	int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc,
307 							pkt->htc_pkt.Endpoint)
308 				+ DP_HTT_HTC_PKT_MISCLIST_SIZE;
309 
310 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
311 	if (soc->htt_htc_pkt_misclist) {
312 		u_pkt->u.next = soc->htt_htc_pkt_misclist;
313 		soc->htt_htc_pkt_misclist = u_pkt;
314 	} else {
315 		soc->htt_htc_pkt_misclist = u_pkt;
316 	}
317 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
318 
319 	/* only ce pipe size + tx_queue_depth could possibly be in use
320 	 * free older packets in the misclist
321 	 */
322 	htt_htc_misc_pkt_list_trim(soc, misclist_trim_level);
323 }
324 
325 /*
326  * htt_htc_misc_pkt_pool_free() - free pkts in misc list
327  * @htt_soc:	HTT SOC handle
328  */
329 static void
330 htt_htc_misc_pkt_pool_free(struct htt_soc *soc)
331 {
332 	struct dp_htt_htc_pkt_union *pkt, *next;
333 	qdf_nbuf_t netbuf;
334 
335 	pkt = soc->htt_htc_pkt_misclist;
336 
337 	while (pkt) {
338 		next = pkt->u.next;
339 		netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
340 		qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
341 
342 		soc->stats.htc_pkt_free++;
343 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
344 			 "%s: Pkt free count %d",
345 			 __func__, soc->stats.htc_pkt_free);
346 
347 		qdf_nbuf_free(netbuf);
348 		qdf_mem_free(pkt);
349 		pkt = next;
350 	}
351 	soc->htt_htc_pkt_misclist = NULL;
352 }
353 
354 /*
355  * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ
356  * @tgt_mac_addr:	Target MAC
357  * @buffer:		Output buffer
358  */
359 static u_int8_t *
360 htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer)
361 {
362 #ifdef BIG_ENDIAN_HOST
363 	/*
364 	 * The host endianness is opposite of the target endianness.
365 	 * To make u_int32_t elements come out correctly, the target->host
366 	 * upload has swizzled the bytes in each u_int32_t element of the
367 	 * message.
368 	 * For byte-array message fields like the MAC address, this
369 	 * upload swizzling puts the bytes in the wrong order, and needs
370 	 * to be undone.
371 	 */
372 	buffer[0] = tgt_mac_addr[3];
373 	buffer[1] = tgt_mac_addr[2];
374 	buffer[2] = tgt_mac_addr[1];
375 	buffer[3] = tgt_mac_addr[0];
376 	buffer[4] = tgt_mac_addr[7];
377 	buffer[5] = tgt_mac_addr[6];
378 	return buffer;
379 #else
380 	/*
381 	 * The host endianness matches the target endianness -
382 	 * we can use the mac addr directly from the message buffer.
383 	 */
384 	return tgt_mac_addr;
385 #endif
386 }
387 
388 /*
389  * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
390  * @soc:	SOC handle
391  * @status:	Completion status
392  * @netbuf:	HTT buffer
393  */
394 static void
395 dp_htt_h2t_send_complete_free_netbuf(
396 	void *soc, A_STATUS status, qdf_nbuf_t netbuf)
397 {
398 	qdf_nbuf_free(netbuf);
399 }
400 
401 /*
402  * dp_htt_h2t_send_complete() - H2T completion handler
403  * @context:	Opaque context (HTT SOC handle)
404  * @htc_pkt:	HTC packet
405  */
406 static void
407 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
408 {
409 	void (*send_complete_part2)(
410 		void *soc, A_STATUS status, qdf_nbuf_t msdu);
411 	struct htt_soc *soc =  (struct htt_soc *) context;
412 	struct dp_htt_htc_pkt *htt_pkt;
413 	qdf_nbuf_t netbuf;
414 
415 	send_complete_part2 = htc_pkt->pPktContext;
416 
417 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
418 
419 	/* process (free or keep) the netbuf that held the message */
420 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
421 	/*
422 	 * adf sendcomplete is required for windows only
423 	 */
424 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
425 	if (send_complete_part2 != NULL) {
426 		send_complete_part2(
427 			htt_pkt->soc_ctxt, htc_pkt->Status, netbuf);
428 	}
429 	/* free the htt_htc_pkt / HTC_PACKET object */
430 	htt_htc_pkt_free(soc, htt_pkt);
431 }
432 
433 /*
434  * htt_h2t_ver_req_msg() - Send HTT version request message to target
435  * @htt_soc:	HTT SOC handle
436  *
437  * Return: 0 on success; error code on failure
438  */
439 static int htt_h2t_ver_req_msg(struct htt_soc *soc)
440 {
441 	struct dp_htt_htc_pkt *pkt;
442 	qdf_nbuf_t msg;
443 	uint32_t *msg_word;
444 
445 	msg = qdf_nbuf_alloc(
446 		soc->osdev,
447 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
448 		/* reserve room for the HTC header */
449 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
450 	if (!msg)
451 		return QDF_STATUS_E_NOMEM;
452 
453 	/*
454 	 * Set the length of the message.
455 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
456 	 * separately during the below call to qdf_nbuf_push_head.
457 	 * The contribution from the HTC header is added separately inside HTC.
458 	 */
459 	if (qdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES) == NULL) {
460 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
461 			"%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
462 			__func__);
463 		return QDF_STATUS_E_FAILURE;
464 	}
465 
466 	/* fill in the message contents */
467 	msg_word = (u_int32_t *) qdf_nbuf_data(msg);
468 
469 	/* rewind beyond alignment pad to get to the HTC header reserved area */
470 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
471 
472 	*msg_word = 0;
473 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
474 
475 	pkt = htt_htc_pkt_alloc(soc);
476 	if (!pkt) {
477 		qdf_nbuf_free(msg);
478 		return QDF_STATUS_E_FAILURE;
479 	}
480 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
481 
482 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
483 		dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg),
484 		qdf_nbuf_len(msg), soc->htc_endpoint,
485 		1); /* tag - not relevant here */
486 
487 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
488 	DP_HTT_SEND_HTC_PKT(soc, pkt);
489 	return 0;
490 }
491 
492 /*
493  * htt_srng_setup() - Send SRNG setup message to target
494  * @htt_soc:	HTT SOC handle
495  * @mac_id:	MAC Id
496  * @hal_srng:	Opaque HAL SRNG pointer
497  * @hal_ring_type:	SRNG ring type
498  *
499  * Return: 0 on success; error code on failure
500  */
501 int htt_srng_setup(void *htt_soc, int mac_id, void *hal_srng,
502 	int hal_ring_type)
503 {
504 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
505 	struct dp_htt_htc_pkt *pkt;
506 	qdf_nbuf_t htt_msg;
507 	uint32_t *msg_word;
508 	struct hal_srng_params srng_params;
509 	qdf_dma_addr_t hp_addr, tp_addr;
510 	uint32_t ring_entry_size =
511 		hal_srng_get_entrysize(soc->hal_soc, hal_ring_type);
512 	int htt_ring_type, htt_ring_id;
513 
514 	/* Sizes should be set in 4-byte words */
515 	ring_entry_size = ring_entry_size >> 2;
516 
517 	htt_msg = qdf_nbuf_alloc(soc->osdev,
518 		HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ),
519 		/* reserve room for the HTC header */
520 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
521 	if (!htt_msg)
522 		goto fail0;
523 
524 	hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params);
525 	hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_srng);
526 	tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_srng);
527 
528 	switch (hal_ring_type) {
529 	case RXDMA_BUF:
530 #ifdef QCA_HOST2FW_RXBUF_RING
531 		if (srng_params.ring_id ==
532 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0)) {
533 			htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
534 			htt_ring_type = HTT_SW_TO_SW_RING;
535 #ifdef IPA_OFFLOAD
536 		} else if (srng_params.ring_id ==
537 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2)) {
538 			htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING;
539 			htt_ring_type = HTT_SW_TO_SW_RING;
540 #endif
541 #else
542 		if (srng_params.ring_id ==
543 			(HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
544 			  (mac_id * HAL_MAX_RINGS_PER_LMAC))) {
545 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
546 			htt_ring_type = HTT_SW_TO_HW_RING;
547 #endif
548 		} else if (srng_params.ring_id ==
549 #ifdef IPA_OFFLOAD
550 			 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 +
551 #else
552 			 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF +
553 #endif
554 			  (mac_id * HAL_MAX_RINGS_PER_LMAC))) {
555 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
556 			htt_ring_type = HTT_SW_TO_HW_RING;
557 		} else {
558 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
559 				   "%s: Ring %d currently not supported",
560 				   __func__, srng_params.ring_id);
561 			goto fail1;
562 		}
563 
564 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
565 			  "%s: ring_type %d ring_id %d",
566 			  __func__, hal_ring_type, srng_params.ring_id);
567 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
568 			  "%s: hp_addr 0x%llx tp_addr 0x%llx",
569 			  __func__, (uint64_t)hp_addr, (uint64_t)tp_addr);
570 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
571 			  "%s: htt_ring_id %d", __func__, htt_ring_id);
572 		break;
573 	case RXDMA_MONITOR_BUF:
574 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
575 		htt_ring_type = HTT_SW_TO_HW_RING;
576 		break;
577 	case RXDMA_MONITOR_STATUS:
578 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
579 		htt_ring_type = HTT_SW_TO_HW_RING;
580 		break;
581 	case RXDMA_MONITOR_DST:
582 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
583 		htt_ring_type = HTT_HW_TO_SW_RING;
584 		break;
585 	case RXDMA_MONITOR_DESC:
586 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
587 		htt_ring_type = HTT_SW_TO_HW_RING;
588 		break;
589 	case RXDMA_DST:
590 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
591 		htt_ring_type = HTT_HW_TO_SW_RING;
592 		break;
593 
594 	default:
595 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
596 			"%s: Ring currently not supported", __func__);
597 			goto fail1;
598 	}
599 
600 	/*
601 	 * Set the length of the message.
602 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
603 	 * separately during the below call to qdf_nbuf_push_head.
604 	 * The contribution from the HTC header is added separately inside HTC.
605 	 */
606 	if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) {
607 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
608 			"%s: Failed to expand head for SRING_SETUP msg",
609 			__func__);
610 		return QDF_STATUS_E_FAILURE;
611 	}
612 
613 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
614 
615 	/* rewind beyond alignment pad to get to the HTC header reserved area */
616 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
617 
618 	/* word 0 */
619 	*msg_word = 0;
620 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP);
621 
622 	if ((htt_ring_type == HTT_SW_TO_HW_RING) ||
623 			(htt_ring_type == HTT_HW_TO_SW_RING))
624 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word,
625 			 DP_SW2HW_MACID(mac_id));
626 	else
627 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id);
628 
629 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
630 		  "%s: mac_id %d", __func__, mac_id);
631 	HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type);
632 	/* TODO: Discuss with FW on changing this to unique ID and using
633 	 * htt_ring_type to send the type of ring
634 	 */
635 	HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id);
636 
637 	/* word 1 */
638 	msg_word++;
639 	*msg_word = 0;
640 	HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word,
641 		srng_params.ring_base_paddr & 0xffffffff);
642 
643 	/* word 2 */
644 	msg_word++;
645 	*msg_word = 0;
646 	HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word,
647 		(uint64_t)srng_params.ring_base_paddr >> 32);
648 
649 	/* word 3 */
650 	msg_word++;
651 	*msg_word = 0;
652 	HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size);
653 	HTT_SRING_SETUP_RING_SIZE_SET(*msg_word,
654 		(ring_entry_size * srng_params.num_entries));
655 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
656 		  "%s: entry_size %d", __func__,
657 			 ring_entry_size);
658 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
659 		  "%s: num_entries %d", __func__,
660 			 srng_params.num_entries);
661 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
662 		  "%s: ring_size %d", __func__,
663 			 (ring_entry_size * srng_params.num_entries));
664 	if (htt_ring_type == HTT_SW_TO_HW_RING)
665 		HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET(
666 						*msg_word, 1);
667 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word,
668 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
669 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word,
670 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
671 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word,
672 		!!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP));
673 
674 	/* word 4 */
675 	msg_word++;
676 	*msg_word = 0;
677 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
678 		hp_addr & 0xffffffff);
679 
680 	/* word 5 */
681 	msg_word++;
682 	*msg_word = 0;
683 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
684 		(uint64_t)hp_addr >> 32);
685 
686 	/* word 6 */
687 	msg_word++;
688 	*msg_word = 0;
689 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
690 		tp_addr & 0xffffffff);
691 
692 	/* word 7 */
693 	msg_word++;
694 	*msg_word = 0;
695 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
696 		(uint64_t)tp_addr >> 32);
697 
698 	/* word 8 */
699 	msg_word++;
700 	*msg_word = 0;
701 	HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word,
702 		srng_params.msi_addr & 0xffffffff);
703 
704 	/* word 9 */
705 	msg_word++;
706 	*msg_word = 0;
707 	HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word,
708 		(uint64_t)(srng_params.msi_addr) >> 32);
709 
710 	/* word 10 */
711 	msg_word++;
712 	*msg_word = 0;
713 	HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word,
714 		srng_params.msi_data);
715 
716 	/* word 11 */
717 	msg_word++;
718 	*msg_word = 0;
719 	HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word,
720 		srng_params.intr_batch_cntr_thres_entries *
721 		ring_entry_size);
722 	HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word,
723 		srng_params.intr_timer_thres_us >> 3);
724 
725 	/* word 12 */
726 	msg_word++;
727 	*msg_word = 0;
728 	if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
729 		/* TODO: Setting low threshold to 1/8th of ring size - see
730 		 * if this needs to be configurable
731 		 */
732 		HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word,
733 			srng_params.low_threshold);
734 	}
735 	/* "response_required" field should be set if a HTT response message is
736 	 * required after setting up the ring.
737 	 */
738 	pkt = htt_htc_pkt_alloc(soc);
739 	if (!pkt)
740 		goto fail1;
741 
742 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
743 
744 	SET_HTC_PACKET_INFO_TX(
745 		&pkt->htc_pkt,
746 		dp_htt_h2t_send_complete_free_netbuf,
747 		qdf_nbuf_data(htt_msg),
748 		qdf_nbuf_len(htt_msg),
749 		soc->htc_endpoint,
750 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
751 
752 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
753 	DP_HTT_SEND_HTC_PKT(soc, pkt);
754 
755 	return QDF_STATUS_SUCCESS;
756 
757 fail1:
758 	qdf_nbuf_free(htt_msg);
759 fail0:
760 	return QDF_STATUS_E_FAILURE;
761 }
762 
763 /*
764  * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter
765  * config message to target
766  * @htt_soc:	HTT SOC handle
767  * @pdev_id:	PDEV Id
768  * @hal_srng:	Opaque HAL SRNG pointer
769  * @hal_ring_type:	SRNG ring type
770  * @ring_buf_size:	SRNG buffer size
771  * @htt_tlv_filter:	Rx SRNG TLV and filter setting
772  * Return: 0 on success; error code on failure
773  */
774 int htt_h2t_rx_ring_cfg(void *htt_soc, int pdev_id, void *hal_srng,
775 	int hal_ring_type, int ring_buf_size,
776 	struct htt_rx_ring_tlv_filter *htt_tlv_filter)
777 {
778 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
779 	struct dp_htt_htc_pkt *pkt;
780 	qdf_nbuf_t htt_msg;
781 	uint32_t *msg_word;
782 	struct hal_srng_params srng_params;
783 	uint32_t htt_ring_type, htt_ring_id;
784 	uint32_t tlv_filter;
785 
786 	htt_msg = qdf_nbuf_alloc(soc->osdev,
787 		HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ),
788 	/* reserve room for the HTC header */
789 	HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
790 	if (!htt_msg)
791 		goto fail0;
792 
793 	hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params);
794 
795 	switch (hal_ring_type) {
796 	case RXDMA_BUF:
797 #if QCA_HOST2FW_RXBUF_RING
798 		htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
799 		htt_ring_type = HTT_SW_TO_SW_RING;
800 #else
801 		htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
802 		htt_ring_type = HTT_SW_TO_HW_RING;
803 #endif
804 		break;
805 	case RXDMA_MONITOR_BUF:
806 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
807 		htt_ring_type = HTT_SW_TO_HW_RING;
808 		break;
809 	case RXDMA_MONITOR_STATUS:
810 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
811 		htt_ring_type = HTT_SW_TO_HW_RING;
812 		break;
813 	case RXDMA_MONITOR_DST:
814 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
815 		htt_ring_type = HTT_HW_TO_SW_RING;
816 		break;
817 	case RXDMA_MONITOR_DESC:
818 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
819 		htt_ring_type = HTT_SW_TO_HW_RING;
820 		break;
821 	case RXDMA_DST:
822 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
823 		htt_ring_type = HTT_HW_TO_SW_RING;
824 		break;
825 
826 	default:
827 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
828 			"%s: Ring currently not supported", __func__);
829 		goto fail1;
830 	}
831 
832 	/*
833 	 * Set the length of the message.
834 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
835 	 * separately during the below call to qdf_nbuf_push_head.
836 	 * The contribution from the HTC header is added separately inside HTC.
837 	 */
838 	if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) {
839 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
840 			"%s: Failed to expand head for RX Ring Cfg msg",
841 			__func__);
842 		goto fail1; /* failure */
843 	}
844 
845 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
846 
847 	/* rewind beyond alignment pad to get to the HTC header reserved area */
848 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
849 
850 	/* word 0 */
851 	*msg_word = 0;
852 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
853 
854 	/*
855 	 * pdev_id is indexed from 0 whereas mac_id is indexed from 1
856 	 * SW_TO_SW and SW_TO_HW rings are unaffected by this
857 	 */
858 	if (htt_ring_type == HTT_SW_TO_SW_RING ||
859 			htt_ring_type == HTT_SW_TO_HW_RING)
860 		HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word,
861 						DP_SW2HW_MACID(pdev_id));
862 
863 	/* TODO: Discuss with FW on changing this to unique ID and using
864 	 * htt_ring_type to send the type of ring
865 	 */
866 	HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id);
867 
868 	HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word,
869 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
870 
871 	HTT_RX_RING_SELECTION_CFG_PKT_TLV_SET(*msg_word,
872 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
873 
874 	/* word 1 */
875 	msg_word++;
876 	*msg_word = 0;
877 	HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word,
878 		ring_buf_size);
879 
880 	/* word 2 */
881 	msg_word++;
882 	*msg_word = 0;
883 
884 	if (htt_tlv_filter->enable_fp) {
885 		/* TYPE: MGMT */
886 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
887 			FP, MGMT, 0000,
888 			(htt_tlv_filter->fp_mgmt_filter &
889 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
890 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
891 			FP, MGMT, 0001,
892 			(htt_tlv_filter->fp_mgmt_filter &
893 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
894 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
895 			FP, MGMT, 0010,
896 			(htt_tlv_filter->fp_mgmt_filter &
897 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
898 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
899 			FP, MGMT, 0011,
900 			(htt_tlv_filter->fp_mgmt_filter &
901 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
902 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
903 			FP, MGMT, 0100,
904 			(htt_tlv_filter->fp_mgmt_filter &
905 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
906 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
907 			FP, MGMT, 0101,
908 			(htt_tlv_filter->fp_mgmt_filter &
909 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
910 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
911 			FP, MGMT, 0110,
912 			(htt_tlv_filter->fp_mgmt_filter &
913 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
914 		/* reserved */
915 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
916 			MGMT, 0111,
917 			(htt_tlv_filter->fp_mgmt_filter &
918 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
919 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
920 			FP, MGMT, 1000,
921 			(htt_tlv_filter->fp_mgmt_filter &
922 			FILTER_MGMT_BEACON) ? 1 : 0);
923 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
924 			FP, MGMT, 1001,
925 			(htt_tlv_filter->fp_mgmt_filter &
926 			FILTER_MGMT_ATIM) ? 1 : 0);
927 	}
928 
929 	if (htt_tlv_filter->enable_md) {
930 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
931 				MGMT, 0000, 1);
932 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
933 				MGMT, 0001, 1);
934 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
935 				MGMT, 0010, 1);
936 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
937 				MGMT, 0011, 1);
938 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
939 				MGMT, 0100, 1);
940 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
941 				MGMT, 0101, 1);
942 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
943 				MGMT, 0110, 1);
944 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
945 				MGMT, 0111, 1);
946 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
947 				MGMT, 1000, 1);
948 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
949 				MGMT, 1001, 1);
950 	}
951 
952 	if (htt_tlv_filter->enable_mo) {
953 		/* TYPE: MGMT */
954 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
955 			MO, MGMT, 0000,
956 			(htt_tlv_filter->mo_mgmt_filter &
957 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
958 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
959 			MO, MGMT, 0001,
960 			(htt_tlv_filter->mo_mgmt_filter &
961 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
962 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
963 			MO, MGMT, 0010,
964 			(htt_tlv_filter->mo_mgmt_filter &
965 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
966 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
967 			MO, MGMT, 0011,
968 			(htt_tlv_filter->mo_mgmt_filter &
969 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
970 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
971 			MO, MGMT, 0100,
972 			(htt_tlv_filter->mo_mgmt_filter &
973 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
974 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
975 			MO, MGMT, 0101,
976 			(htt_tlv_filter->mo_mgmt_filter &
977 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
978 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
979 			MO, MGMT, 0110,
980 			(htt_tlv_filter->mo_mgmt_filter &
981 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
982 		/* reserved */
983 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
984 			MGMT, 0111,
985 			(htt_tlv_filter->mo_mgmt_filter &
986 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
987 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
988 			MO, MGMT, 1000,
989 			(htt_tlv_filter->mo_mgmt_filter &
990 			FILTER_MGMT_BEACON) ? 1 : 0);
991 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
992 			MO, MGMT, 1001,
993 			(htt_tlv_filter->mo_mgmt_filter &
994 			FILTER_MGMT_ATIM) ? 1 : 0);
995 	}
996 
997 	/* word 3 */
998 	msg_word++;
999 	*msg_word = 0;
1000 
1001 	if (htt_tlv_filter->enable_fp) {
1002 		/* TYPE: MGMT */
1003 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1004 			FP, MGMT, 1010,
1005 			(htt_tlv_filter->fp_mgmt_filter &
1006 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1007 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1008 			FP, MGMT, 1011,
1009 			(htt_tlv_filter->fp_mgmt_filter &
1010 			FILTER_MGMT_AUTH) ? 1 : 0);
1011 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1012 			FP, MGMT, 1100,
1013 			(htt_tlv_filter->fp_mgmt_filter &
1014 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1015 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1016 			FP, MGMT, 1101,
1017 			(htt_tlv_filter->fp_mgmt_filter &
1018 			FILTER_MGMT_ACTION) ? 1 : 0);
1019 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1020 			FP, MGMT, 1110,
1021 			(htt_tlv_filter->fp_mgmt_filter &
1022 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1023 		/* reserved*/
1024 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
1025 			MGMT, 1111,
1026 			(htt_tlv_filter->fp_mgmt_filter &
1027 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1028 	}
1029 
1030 	if (htt_tlv_filter->enable_md) {
1031 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD,
1032 				MGMT, 1010, 1);
1033 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD,
1034 				MGMT, 1011, 1);
1035 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD,
1036 				MGMT, 1100, 1);
1037 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD,
1038 				MGMT, 1101, 1);
1039 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD,
1040 				MGMT, 1110, 1);
1041 	}
1042 
1043 	if (htt_tlv_filter->enable_mo) {
1044 		/* TYPE: MGMT */
1045 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1046 			MO, MGMT, 1010,
1047 			(htt_tlv_filter->mo_mgmt_filter &
1048 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1049 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1050 			MO, MGMT, 1011,
1051 			(htt_tlv_filter->mo_mgmt_filter &
1052 			FILTER_MGMT_AUTH) ? 1 : 0);
1053 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1054 			MO, MGMT, 1100,
1055 			(htt_tlv_filter->mo_mgmt_filter &
1056 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1057 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1058 			MO, MGMT, 1101,
1059 			(htt_tlv_filter->mo_mgmt_filter &
1060 			FILTER_MGMT_ACTION) ? 1 : 0);
1061 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1062 			MO, MGMT, 1110,
1063 			(htt_tlv_filter->mo_mgmt_filter &
1064 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1065 		/* reserved*/
1066 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
1067 			MGMT, 1111,
1068 			(htt_tlv_filter->mo_mgmt_filter &
1069 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1070 	}
1071 
1072 	/* word 4 */
1073 	msg_word++;
1074 	*msg_word = 0;
1075 
1076 	if (htt_tlv_filter->enable_fp) {
1077 		/* TYPE: CTRL */
1078 		/* reserved */
1079 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1080 			CTRL, 0000,
1081 			(htt_tlv_filter->fp_ctrl_filter &
1082 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1083 		/* reserved */
1084 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1085 			CTRL, 0001,
1086 			(htt_tlv_filter->fp_ctrl_filter &
1087 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1088 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1089 			CTRL, 0010,
1090 			(htt_tlv_filter->fp_ctrl_filter &
1091 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1092 		/* reserved */
1093 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1094 			CTRL, 0011,
1095 			(htt_tlv_filter->fp_ctrl_filter &
1096 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1097 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1098 			CTRL, 0100,
1099 			(htt_tlv_filter->fp_ctrl_filter &
1100 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1101 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1102 			CTRL, 0101,
1103 			(htt_tlv_filter->fp_ctrl_filter &
1104 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1105 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1106 			CTRL, 0110,
1107 			(htt_tlv_filter->fp_ctrl_filter &
1108 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1109 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1110 			CTRL, 0111,
1111 			(htt_tlv_filter->fp_ctrl_filter &
1112 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1113 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1114 			CTRL, 1000,
1115 			(htt_tlv_filter->fp_ctrl_filter &
1116 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1117 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1118 			CTRL, 1001,
1119 			(htt_tlv_filter->fp_ctrl_filter &
1120 			FILTER_CTRL_BA) ? 1 : 0);
1121 	}
1122 
1123 	if (htt_tlv_filter->enable_md) {
1124 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1125 				CTRL, 0000, 1);
1126 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1127 				CTRL, 0001, 1);
1128 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1129 				CTRL, 0010, 1);
1130 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1131 				CTRL, 0011, 1);
1132 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1133 				CTRL, 0100, 1);
1134 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1135 				CTRL, 0101, 1);
1136 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1137 				CTRL, 0110, 1);
1138 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1139 				CTRL, 0111, 1);
1140 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1141 				CTRL, 1000, 1);
1142 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1143 				CTRL, 1001, 1);
1144 	}
1145 
1146 	if (htt_tlv_filter->enable_mo) {
1147 		/* TYPE: CTRL */
1148 		/* reserved */
1149 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1150 			CTRL, 0000,
1151 			(htt_tlv_filter->mo_ctrl_filter &
1152 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1153 		/* reserved */
1154 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1155 			CTRL, 0001,
1156 			(htt_tlv_filter->mo_ctrl_filter &
1157 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1158 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1159 			CTRL, 0010,
1160 			(htt_tlv_filter->mo_ctrl_filter &
1161 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1162 		/* reserved */
1163 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1164 			CTRL, 0011,
1165 			(htt_tlv_filter->mo_ctrl_filter &
1166 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1167 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1168 			CTRL, 0100,
1169 			(htt_tlv_filter->mo_ctrl_filter &
1170 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1171 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1172 			CTRL, 0101,
1173 			(htt_tlv_filter->mo_ctrl_filter &
1174 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1175 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1176 			CTRL, 0110,
1177 			(htt_tlv_filter->mo_ctrl_filter &
1178 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1179 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1180 			CTRL, 0111,
1181 			(htt_tlv_filter->mo_ctrl_filter &
1182 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1183 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1184 			CTRL, 1000,
1185 			(htt_tlv_filter->mo_ctrl_filter &
1186 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1187 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1188 			CTRL, 1001,
1189 			(htt_tlv_filter->mo_ctrl_filter &
1190 			FILTER_CTRL_BA) ? 1 : 0);
1191 	}
1192 
1193 	/* word 5 */
1194 	msg_word++;
1195 	*msg_word = 0;
1196 	if (htt_tlv_filter->enable_fp) {
1197 		/* TYPE: CTRL */
1198 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1199 			CTRL, 1010,
1200 			(htt_tlv_filter->fp_ctrl_filter &
1201 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1202 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1203 			CTRL, 1011,
1204 			(htt_tlv_filter->fp_ctrl_filter &
1205 			FILTER_CTRL_RTS) ? 1 : 0);
1206 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1207 			CTRL, 1100,
1208 			(htt_tlv_filter->fp_ctrl_filter &
1209 			FILTER_CTRL_CTS) ? 1 : 0);
1210 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1211 			CTRL, 1101,
1212 			(htt_tlv_filter->fp_ctrl_filter &
1213 			FILTER_CTRL_ACK) ? 1 : 0);
1214 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1215 			CTRL, 1110,
1216 			(htt_tlv_filter->fp_ctrl_filter &
1217 			FILTER_CTRL_CFEND) ? 1 : 0);
1218 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1219 			CTRL, 1111,
1220 			(htt_tlv_filter->fp_ctrl_filter &
1221 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1222 		/* TYPE: DATA */
1223 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1224 			DATA, MCAST,
1225 			(htt_tlv_filter->fp_data_filter &
1226 			FILTER_DATA_MCAST) ? 1 : 0);
1227 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1228 			DATA, UCAST,
1229 			(htt_tlv_filter->fp_data_filter &
1230 			FILTER_DATA_UCAST) ? 1 : 0);
1231 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1232 			DATA, NULL,
1233 			(htt_tlv_filter->fp_data_filter &
1234 			FILTER_DATA_NULL) ? 1 : 0);
1235 	}
1236 
1237 	if (htt_tlv_filter->enable_md) {
1238 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1239 				CTRL, 1010, 1);
1240 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1241 				CTRL, 1011, 1);
1242 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1243 				CTRL, 1100, 1);
1244 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1245 				CTRL, 1101, 1);
1246 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1247 				CTRL, 1110, 1);
1248 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1249 				CTRL, 1111, 1);
1250 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1251 				DATA, MCAST, 1);
1252 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1253 				DATA, UCAST, 1);
1254 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1255 				DATA, NULL, 1);
1256 	}
1257 
1258 	if (htt_tlv_filter->enable_mo) {
1259 		/* TYPE: CTRL */
1260 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1261 			CTRL, 1010,
1262 			(htt_tlv_filter->mo_ctrl_filter &
1263 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1264 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1265 			CTRL, 1011,
1266 			(htt_tlv_filter->mo_ctrl_filter &
1267 			FILTER_CTRL_RTS) ? 1 : 0);
1268 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1269 			CTRL, 1100,
1270 			(htt_tlv_filter->mo_ctrl_filter &
1271 			FILTER_CTRL_CTS) ? 1 : 0);
1272 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1273 			CTRL, 1101,
1274 			(htt_tlv_filter->mo_ctrl_filter &
1275 			FILTER_CTRL_ACK) ? 1 : 0);
1276 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1277 			CTRL, 1110,
1278 			(htt_tlv_filter->mo_ctrl_filter &
1279 			FILTER_CTRL_CFEND) ? 1 : 0);
1280 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1281 			CTRL, 1111,
1282 			(htt_tlv_filter->mo_ctrl_filter &
1283 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1284 		/* TYPE: DATA */
1285 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1286 			DATA, MCAST,
1287 			(htt_tlv_filter->mo_data_filter &
1288 			FILTER_DATA_MCAST) ? 1 : 0);
1289 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1290 			DATA, UCAST,
1291 			(htt_tlv_filter->mo_data_filter &
1292 			FILTER_DATA_UCAST) ? 1 : 0);
1293 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1294 			DATA, NULL,
1295 			(htt_tlv_filter->mo_data_filter &
1296 			FILTER_DATA_NULL) ? 1 : 0);
1297 	}
1298 
1299 	/* word 6 */
1300 	msg_word++;
1301 	*msg_word = 0;
1302 	tlv_filter = 0;
1303 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START,
1304 		htt_tlv_filter->mpdu_start);
1305 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START,
1306 		htt_tlv_filter->msdu_start);
1307 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET,
1308 		htt_tlv_filter->packet);
1309 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END,
1310 		htt_tlv_filter->msdu_end);
1311 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END,
1312 		htt_tlv_filter->mpdu_end);
1313 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER,
1314 		htt_tlv_filter->packet_header);
1315 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION,
1316 		htt_tlv_filter->attention);
1317 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START,
1318 		htt_tlv_filter->ppdu_start);
1319 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END,
1320 		htt_tlv_filter->ppdu_end);
1321 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS,
1322 		htt_tlv_filter->ppdu_end_user_stats);
1323 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter,
1324 		PPDU_END_USER_STATS_EXT,
1325 		htt_tlv_filter->ppdu_end_user_stats_ext);
1326 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE,
1327 		htt_tlv_filter->ppdu_end_status_done);
1328 	/* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/
1329 	 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED,
1330 		 htt_tlv_filter->header_per_msdu);
1331 
1332 	HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter);
1333 
1334 	/* "response_required" field should be set if a HTT response message is
1335 	 * required after setting up the ring.
1336 	 */
1337 	pkt = htt_htc_pkt_alloc(soc);
1338 	if (!pkt)
1339 		goto fail1;
1340 
1341 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1342 
1343 	SET_HTC_PACKET_INFO_TX(
1344 		&pkt->htc_pkt,
1345 		dp_htt_h2t_send_complete_free_netbuf,
1346 		qdf_nbuf_data(htt_msg),
1347 		qdf_nbuf_len(htt_msg),
1348 		soc->htc_endpoint,
1349 		1); /* tag - not relevant here */
1350 
1351 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1352 	DP_HTT_SEND_HTC_PKT(soc, pkt);
1353 	return QDF_STATUS_SUCCESS;
1354 
1355 fail1:
1356 	qdf_nbuf_free(htt_msg);
1357 fail0:
1358 	return QDF_STATUS_E_FAILURE;
1359 }
1360 
1361 #if defined(CONFIG_WIN) && WDI_EVENT_ENABLE
1362 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1363 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1364 
1365 {
1366 	uint32_t pdev_id;
1367 	uint32_t *msg_word = NULL;
1368 	uint32_t msg_remain_len = 0;
1369 
1370 	msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1371 
1372 	/*COOKIE MSB*/
1373 	pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1374 
1375 	/* stats message length + 16 size of HTT header*/
1376 	msg_remain_len = qdf_min(htt_stats->msg_len + 16,
1377 				(uint32_t)DP_EXT_MSG_LENGTH);
1378 
1379 	dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc,
1380 			msg_word,  msg_remain_len,
1381 			WDI_NO_VAL, pdev_id);
1382 
1383 	if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1384 		htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1385 	}
1386 	/* Need to be freed here as WDI handler will
1387 	 * make a copy of pkt to send data to application
1388 	 */
1389 	qdf_nbuf_free(htt_msg);
1390 	return QDF_STATUS_SUCCESS;
1391 }
1392 #else
1393 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1394 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1395 {
1396 	return QDF_STATUS_E_NOSUPPORT;
1397 }
1398 #endif
1399 
1400 /**
1401  * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats
1402  * @htt_stats: htt stats info
1403  *
1404  * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message
1405  * contains sub messages which are identified by a TLV header.
1406  * In this function we will process the stream of T2H messages and read all the
1407  * TLV contained in the message.
1408  *
1409  * THe following cases have been taken care of
1410  * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer
1411  *		In this case the buffer will contain multiple tlvs.
1412  * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer.
1413  *		Only one tlv will be contained in the HTT message and this tag
1414  *		will extend onto the next buffer.
1415  * Case 3: When the buffer is the continuation of the previous message
1416  * Case 4: tlv length is 0. which will indicate the end of message
1417  *
1418  * return: void
1419  */
1420 static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats,
1421 					struct dp_soc *soc)
1422 {
1423 	htt_tlv_tag_t tlv_type = 0xff;
1424 	qdf_nbuf_t htt_msg = NULL;
1425 	uint32_t *msg_word;
1426 	uint8_t *tlv_buf_head = NULL;
1427 	uint8_t *tlv_buf_tail = NULL;
1428 	uint32_t msg_remain_len = 0;
1429 	uint32_t tlv_remain_len = 0;
1430 	uint32_t *tlv_start;
1431 	int cookie_val;
1432 	int cookie_msb;
1433 	int pdev_id;
1434 	bool copy_stats = false;
1435 	struct dp_pdev *pdev;
1436 
1437 	/* Process node in the HTT message queue */
1438 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1439 		!= NULL) {
1440 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1441 		cookie_val = *(msg_word + 1);
1442 		htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET(
1443 					*(msg_word +
1444 					HTT_T2H_EXT_STATS_TLV_START_OFFSET));
1445 
1446 		if (cookie_val) {
1447 			if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg)
1448 					== QDF_STATUS_SUCCESS) {
1449 				continue;
1450 			}
1451 		}
1452 
1453 		cookie_msb = *(msg_word + 2);
1454 		pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1455 		pdev = soc->pdev_list[pdev_id];
1456 
1457 		if (cookie_msb >> 2) {
1458 			copy_stats = true;
1459 		}
1460 
1461 		/* read 5th word */
1462 		msg_word = msg_word + 4;
1463 		msg_remain_len = qdf_min(htt_stats->msg_len,
1464 				(uint32_t) DP_EXT_MSG_LENGTH);
1465 		/* Keep processing the node till node length is 0 */
1466 		while (msg_remain_len) {
1467 			/*
1468 			 * if message is not a continuation of previous message
1469 			 * read the tlv type and tlv length
1470 			 */
1471 			if (!tlv_buf_head) {
1472 				tlv_type = HTT_STATS_TLV_TAG_GET(
1473 						*msg_word);
1474 				tlv_remain_len = HTT_STATS_TLV_LENGTH_GET(
1475 						*msg_word);
1476 			}
1477 
1478 			if (tlv_remain_len == 0) {
1479 				msg_remain_len = 0;
1480 
1481 				if (tlv_buf_head) {
1482 					qdf_mem_free(tlv_buf_head);
1483 					tlv_buf_head = NULL;
1484 					tlv_buf_tail = NULL;
1485 				}
1486 
1487 				goto error;
1488 			}
1489 
1490 			if (!tlv_buf_head)
1491 				tlv_remain_len += HTT_TLV_HDR_LEN;
1492 
1493 			if ((tlv_remain_len <= msg_remain_len)) {
1494 				/* Case 3 */
1495 				if (tlv_buf_head) {
1496 					qdf_mem_copy(tlv_buf_tail,
1497 							(uint8_t *)msg_word,
1498 							tlv_remain_len);
1499 					tlv_start = (uint32_t *)tlv_buf_head;
1500 				} else {
1501 					/* Case 1 */
1502 					tlv_start = msg_word;
1503 				}
1504 
1505 				if (copy_stats)
1506 					dp_htt_stats_copy_tag(pdev, tlv_type, tlv_start);
1507 				else
1508 					dp_htt_stats_print_tag(tlv_type, tlv_start);
1509 
1510 				msg_remain_len -= tlv_remain_len;
1511 
1512 				msg_word = (uint32_t *)
1513 					(((uint8_t *)msg_word) +
1514 					tlv_remain_len);
1515 
1516 				tlv_remain_len = 0;
1517 
1518 				if (tlv_buf_head) {
1519 					qdf_mem_free(tlv_buf_head);
1520 					tlv_buf_head = NULL;
1521 					tlv_buf_tail = NULL;
1522 				}
1523 
1524 			} else { /* tlv_remain_len > msg_remain_len */
1525 				/* Case 2 & 3 */
1526 				if (!tlv_buf_head) {
1527 					tlv_buf_head = qdf_mem_malloc(
1528 							tlv_remain_len);
1529 
1530 					if (!tlv_buf_head) {
1531 						QDF_TRACE(QDF_MODULE_ID_TXRX,
1532 								QDF_TRACE_LEVEL_ERROR,
1533 								"Alloc failed");
1534 						goto error;
1535 					}
1536 
1537 					tlv_buf_tail = tlv_buf_head;
1538 				}
1539 
1540 				qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word,
1541 						msg_remain_len);
1542 				tlv_remain_len -= msg_remain_len;
1543 				tlv_buf_tail += msg_remain_len;
1544 			}
1545 		}
1546 
1547 		if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1548 			htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1549 		}
1550 
1551 		qdf_nbuf_free(htt_msg);
1552 	}
1553 	return;
1554 
1555 error:
1556 	qdf_nbuf_free(htt_msg);
1557 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1558 			!= NULL)
1559 		qdf_nbuf_free(htt_msg);
1560 }
1561 
1562 void htt_t2h_stats_handler(void *context)
1563 {
1564 	struct dp_soc *soc = (struct dp_soc *)context;
1565 	struct htt_stats_context htt_stats;
1566 	uint32_t *msg_word;
1567 	qdf_nbuf_t htt_msg = NULL;
1568 	uint8_t done;
1569 	uint8_t rem_stats;
1570 
1571 	if (!soc || !qdf_atomic_read(&soc->cmn_init_done)) {
1572 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1573 			"soc: 0x%pK, init_done: %d", soc,
1574 			qdf_atomic_read(&soc->cmn_init_done));
1575 		return;
1576 	}
1577 
1578 	qdf_mem_zero(&htt_stats, sizeof(htt_stats));
1579 	qdf_nbuf_queue_init(&htt_stats.msg);
1580 
1581 	/* pull one completed stats from soc->htt_stats_msg and process */
1582 	qdf_spin_lock_bh(&soc->htt_stats.lock);
1583 	if (!soc->htt_stats.num_stats) {
1584 		qdf_spin_unlock_bh(&soc->htt_stats.lock);
1585 		return;
1586 	}
1587 	while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) {
1588 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1589 		msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET;
1590 		done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
1591 		qdf_nbuf_queue_add(&htt_stats.msg, htt_msg);
1592 		/*
1593 		 * Done bit signifies that this is the last T2H buffer in the
1594 		 * stream of HTT EXT STATS message
1595 		 */
1596 		if (done)
1597 			break;
1598 	}
1599 	rem_stats = --soc->htt_stats.num_stats;
1600 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
1601 
1602 	dp_process_htt_stat_msg(&htt_stats, soc);
1603 	/* If there are more stats to process, schedule stats work again */
1604 	if (rem_stats)
1605 		qdf_sched_work(0, &soc->htt_stats.work);
1606 }
1607 
1608 /*
1609  * dp_get_ppdu_info_user_index: Find and allocate a per-user descriptor for a PPDU,
1610  * if a new peer id arrives in a PPDU
1611  * pdev: DP pdev handle
1612  * @peer_id : peer unique identifier
1613  * @ppdu_info: per ppdu tlv structure
1614  *
1615  * return:user index to be populated
1616  */
1617 #ifdef FEATURE_PERPKT_INFO
1618 static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev,
1619 						uint16_t peer_id,
1620 						struct ppdu_info *ppdu_info)
1621 {
1622 	uint8_t user_index = 0;
1623 	struct cdp_tx_completion_ppdu *ppdu_desc;
1624 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1625 
1626 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1627 
1628 	while ((user_index + 1) <= ppdu_info->last_user) {
1629 		ppdu_user_desc = &ppdu_desc->user[user_index];
1630 		if (ppdu_user_desc->peer_id != peer_id) {
1631 			user_index++;
1632 			continue;
1633 		} else {
1634 			/* Max users possible is 8 so user array index should
1635 			 * not exceed 7
1636 			 */
1637 			qdf_assert_always(user_index <= CDP_MU_MAX_USER_INDEX);
1638 			return user_index;
1639 		}
1640 	}
1641 
1642 	ppdu_info->last_user++;
1643 	/* Max users possible is 8 so last user should not exceed 8 */
1644 	qdf_assert_always(ppdu_info->last_user <= CDP_MU_MAX_USERS);
1645 	return ppdu_info->last_user - 1;
1646 }
1647 
1648 /*
1649  * dp_process_ppdu_stats_common_tlv: Process htt_ppdu_stats_common_tlv
1650  * pdev: DP pdev handle
1651  * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv
1652  * @ppdu_info: per ppdu tlv structure
1653  *
1654  * return:void
1655  */
1656 static void dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev,
1657 		uint32_t *tag_buf, struct ppdu_info *ppdu_info)
1658 {
1659 	uint16_t frame_type;
1660 	uint16_t freq;
1661 	struct dp_soc *soc = NULL;
1662 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
1663 
1664 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1665 
1666 	tag_buf += 2;
1667 	ppdu_desc->num_users =
1668 		HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf);
1669 	tag_buf++;
1670 	frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf);
1671 
1672 	if ((frame_type == HTT_STATS_FTYPE_TIDQ_DATA_SU) ||
1673 			(frame_type == HTT_STATS_FTYPE_TIDQ_DATA_MU))
1674 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA;
1675 	else if ((frame_type == HTT_STATS_FTYPE_SGEN_MU_BAR) ||
1676 		 (frame_type == HTT_STATS_FTYPE_SGEN_BAR))
1677 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_BAR;
1678 	else
1679 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
1680 
1681 	tag_buf += 2;
1682 	ppdu_desc->tx_duration = *tag_buf;
1683 	tag_buf += 3;
1684 	ppdu_desc->ppdu_start_timestamp = *tag_buf;
1685 
1686 	ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
1687 					ppdu_desc->tx_duration;
1688 	/* Ack time stamp is same as end time stamp*/
1689 	ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
1690 
1691 	tag_buf++;
1692 
1693 	freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf);
1694 	if (freq != ppdu_desc->channel) {
1695 		soc = pdev->soc;
1696 		ppdu_desc->channel = freq;
1697 		if (soc && soc->cdp_soc.ol_ops->freq_to_channel)
1698 			pdev->operating_channel =
1699 		soc->cdp_soc.ol_ops->freq_to_channel(pdev->ctrl_pdev, freq);
1700 	}
1701 
1702 	ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf);
1703 }
1704 
1705 /*
1706  * dp_process_ppdu_stats_user_common_tlv: Process ppdu_stats_user_common
1707  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv
1708  * @ppdu_info: per ppdu tlv structure
1709  *
1710  * return:void
1711  */
1712 static void dp_process_ppdu_stats_user_common_tlv(
1713 		struct dp_pdev *pdev, uint32_t *tag_buf,
1714 		struct ppdu_info *ppdu_info)
1715 {
1716 	uint16_t peer_id;
1717 	struct dp_peer *peer;
1718 	struct cdp_tx_completion_ppdu *ppdu_desc;
1719 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1720 	uint8_t curr_user_index = 0;
1721 
1722 	ppdu_desc =
1723 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1724 
1725 	tag_buf++;
1726 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
1727 
1728 	curr_user_index =
1729 		dp_get_ppdu_info_user_index(pdev,
1730 					    peer_id, ppdu_info);
1731 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1732 
1733 	if (peer_id == DP_SCAN_PEER_ID) {
1734 		ppdu_desc->vdev_id =
1735 			HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(*tag_buf);
1736 	} else {
1737 		peer = dp_peer_find_by_id(pdev->soc, peer_id);
1738 		if (!peer)
1739 			return;
1740 	}
1741 
1742 	ppdu_user_desc->peer_id = peer_id;
1743 
1744 	tag_buf++;
1745 
1746 	if (HTT_PPDU_STATS_USER_COMMON_TLV_DELAYED_BA_GET(*tag_buf)) {
1747 		ppdu_user_desc->delayed_ba = 1;
1748 	}
1749 
1750 	if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) {
1751 		ppdu_user_desc->is_mcast = true;
1752 		ppdu_user_desc->mpdu_tried_mcast =
1753 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
1754 		ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast;
1755 	} else {
1756 		ppdu_user_desc->mpdu_tried_ucast =
1757 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
1758 	}
1759 
1760 	tag_buf++;
1761 
1762 	ppdu_user_desc->qos_ctrl =
1763 		HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf);
1764 	ppdu_user_desc->frame_ctrl =
1765 		HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf);
1766 	ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl;
1767 
1768 	if (ppdu_user_desc->delayed_ba) {
1769 		ppdu_user_desc->mpdu_success = 0;
1770 		ppdu_user_desc->mpdu_tried_mcast = 0;
1771 		ppdu_user_desc->mpdu_tried_ucast = 0;
1772 	}
1773 }
1774 
1775 
1776 /**
1777  * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv
1778  * @pdev: DP pdev handle
1779  * @tag_buf: T2H message buffer carrying the user rate TLV
1780  * @ppdu_info: per ppdu tlv structure
1781  *
1782  * return:void
1783  */
1784 static void dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev,
1785 		uint32_t *tag_buf,
1786 		struct ppdu_info *ppdu_info)
1787 {
1788 	uint16_t peer_id;
1789 	struct dp_peer *peer;
1790 	struct cdp_tx_completion_ppdu *ppdu_desc;
1791 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1792 	uint8_t curr_user_index = 0;
1793 	struct dp_vdev *vdev;
1794 
1795 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1796 
1797 	tag_buf++;
1798 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
1799 
1800 	curr_user_index =
1801 		dp_get_ppdu_info_user_index(pdev,
1802 					    peer_id, ppdu_info);
1803 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1804 
1805 	if (peer_id == DP_SCAN_PEER_ID) {
1806 		vdev =
1807 		       dp_get_vdev_from_soc_vdev_id_wifi3(pdev->soc,
1808 							  ppdu_desc->vdev_id);
1809 		qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw,
1810 			     DP_MAC_ADDR_LEN);
1811 	} else {
1812 		peer = dp_peer_find_by_id(pdev->soc, peer_id);
1813 		if (!peer)
1814 			return;
1815 		qdf_mem_copy(ppdu_user_desc->mac_addr,
1816 			     peer->mac_addr.raw, DP_MAC_ADDR_LEN);
1817 	}
1818 
1819 	ppdu_user_desc->peer_id = peer_id;
1820 
1821 	ppdu_user_desc->tid =
1822 		HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf);
1823 
1824 	tag_buf += 2;
1825 
1826 	ppdu_user_desc->ru_tones =
1827 		(HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) -
1828 		HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1;
1829 
1830 	tag_buf += 2;
1831 
1832 	ppdu_user_desc->ppdu_type =
1833 		HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf);
1834 
1835 	tag_buf++;
1836 	ppdu_user_desc->tx_rate = *tag_buf;
1837 
1838 	ppdu_user_desc->ltf_size =
1839 		HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf);
1840 	ppdu_user_desc->stbc =
1841 		HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf);
1842 	ppdu_user_desc->he_re =
1843 		HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf);
1844 	ppdu_user_desc->txbf =
1845 		HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf);
1846 	ppdu_user_desc->bw =
1847 		HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf) - 2;
1848 	ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf);
1849 	ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf);
1850 	ppdu_user_desc->preamble =
1851 		HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf);
1852 	ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf);
1853 	ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf);
1854 	ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf);
1855 }
1856 
1857 /*
1858  * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv: Process
1859  * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
1860  * pdev: DP PDEV handle
1861  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
1862  * @ppdu_info: per ppdu tlv structure
1863  *
1864  * return:void
1865  */
1866 static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
1867 		struct dp_pdev *pdev, uint32_t *tag_buf,
1868 		struct ppdu_info *ppdu_info)
1869 {
1870 	htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf =
1871 		(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf;
1872 
1873 	struct cdp_tx_completion_ppdu *ppdu_desc;
1874 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1875 	uint8_t curr_user_index = 0;
1876 	uint16_t peer_id;
1877 	struct dp_peer *peer;
1878 
1879 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1880 
1881 	tag_buf++;
1882 
1883 	peer_id =
1884 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
1885 
1886 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
1887 
1888 	if (!peer)
1889 		return;
1890 
1891 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
1892 
1893 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1894 	ppdu_user_desc->peer_id = peer_id;
1895 
1896 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
1897 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
1898 					CDP_BA_64_BIT_MAP_SIZE_DWORDS);
1899 }
1900 
1901 /*
1902  * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv: Process
1903  * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
1904  * soc: DP SOC handle
1905  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
1906  * @ppdu_info: per ppdu tlv structure
1907  *
1908  * return:void
1909  */
1910 static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
1911 		struct dp_pdev *pdev, uint32_t *tag_buf,
1912 		struct ppdu_info *ppdu_info)
1913 {
1914 	htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf =
1915 		(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf;
1916 
1917 	struct cdp_tx_completion_ppdu *ppdu_desc;
1918 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1919 	uint8_t curr_user_index = 0;
1920 	uint16_t peer_id;
1921 	struct dp_peer *peer;
1922 
1923 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1924 
1925 	tag_buf++;
1926 
1927 	peer_id =
1928 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
1929 
1930 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
1931 
1932 	if (!peer)
1933 		return;
1934 
1935 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
1936 
1937 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1938 	ppdu_user_desc->peer_id = peer_id;
1939 
1940 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
1941 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
1942 					CDP_BA_256_BIT_MAP_SIZE_DWORDS);
1943 }
1944 
1945 /*
1946  * dp_process_ppdu_stats_user_cmpltn_common_tlv: Process
1947  * htt_ppdu_stats_user_cmpltn_common_tlv
1948  * soc: DP SOC handle
1949  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv
1950  * @ppdu_info: per ppdu tlv structure
1951  *
1952  * return:void
1953  */
1954 static void dp_process_ppdu_stats_user_cmpltn_common_tlv(
1955 		struct dp_pdev *pdev, uint32_t *tag_buf,
1956 		struct ppdu_info *ppdu_info)
1957 {
1958 	uint16_t peer_id;
1959 	struct dp_peer *peer;
1960 	struct cdp_tx_completion_ppdu *ppdu_desc;
1961 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1962 	uint8_t curr_user_index = 0;
1963 	htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf =
1964 		(htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf;
1965 
1966 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1967 
1968 	tag_buf++;
1969 	peer_id =
1970 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf);
1971 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
1972 
1973 	if (!peer)
1974 		return;
1975 
1976 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
1977 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1978 	ppdu_user_desc->peer_id = peer_id;
1979 
1980 	ppdu_user_desc->completion_status =
1981 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET(
1982 				*tag_buf);
1983 
1984 	ppdu_user_desc->tid =
1985 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf);
1986 
1987 
1988 	tag_buf++;
1989 	if (qdf_likely(ppdu_user_desc->completion_status)) {
1990 		ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi;
1991 		ppdu_user_desc->ack_rssi_valid = 1;
1992 	} else {
1993 		ppdu_user_desc->ack_rssi_valid = 0;
1994 	}
1995 
1996 	tag_buf++;
1997 
1998 	ppdu_user_desc->mpdu_success =
1999 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf);
2000 
2001 	tag_buf++;
2002 
2003 	ppdu_user_desc->long_retries =
2004 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf);
2005 
2006 	ppdu_user_desc->short_retries =
2007 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf);
2008 	ppdu_user_desc->retry_msdus =
2009 		ppdu_user_desc->long_retries + ppdu_user_desc->short_retries;
2010 
2011 	ppdu_user_desc->is_ampdu =
2012 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf);
2013 	ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu;
2014 
2015 }
2016 
2017 /*
2018  * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv: Process
2019  * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2020  * pdev: DP PDEV handle
2021  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2022  * @ppdu_info: per ppdu tlv structure
2023  *
2024  * return:void
2025  */
2026 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
2027 		struct dp_pdev *pdev, uint32_t *tag_buf,
2028 		struct ppdu_info *ppdu_info)
2029 {
2030 	htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf =
2031 		(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf;
2032 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2033 	struct cdp_tx_completion_ppdu *ppdu_desc;
2034 	uint8_t curr_user_index = 0;
2035 	uint16_t peer_id;
2036 	struct dp_peer *peer;
2037 
2038 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2039 
2040 	tag_buf++;
2041 
2042 	peer_id =
2043 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2044 
2045 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2046 
2047 	if (!peer)
2048 		return;
2049 
2050 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2051 
2052 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2053 	ppdu_user_desc->peer_id = peer_id;
2054 
2055 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2056 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2057 			CDP_BA_64_BIT_MAP_SIZE_DWORDS);
2058 }
2059 
2060 /*
2061  * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv: Process
2062  * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2063  * pdev: DP PDEV handle
2064  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2065  * @ppdu_info: per ppdu tlv structure
2066  *
2067  * return:void
2068  */
2069 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
2070 		struct dp_pdev *pdev, uint32_t *tag_buf,
2071 		struct ppdu_info *ppdu_info)
2072 {
2073 	htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf =
2074 		(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf;
2075 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2076 	struct cdp_tx_completion_ppdu *ppdu_desc;
2077 	uint8_t curr_user_index = 0;
2078 	uint16_t peer_id;
2079 	struct dp_peer *peer;
2080 
2081 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2082 
2083 	tag_buf++;
2084 
2085 	peer_id =
2086 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2087 
2088 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2089 
2090 	if (!peer)
2091 		return;
2092 
2093 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2094 
2095 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2096 	ppdu_user_desc->peer_id = peer_id;
2097 
2098 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2099 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2100 			CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2101 }
2102 
2103 /*
2104  * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv: Process
2105  * htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2106  * pdev: DP PDE handle
2107  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2108  * @ppdu_info: per ppdu tlv structure
2109  *
2110  * return:void
2111  */
2112 static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
2113 		struct dp_pdev *pdev, uint32_t *tag_buf,
2114 		struct ppdu_info *ppdu_info)
2115 {
2116 	uint16_t peer_id;
2117 	struct dp_peer *peer;
2118 	struct cdp_tx_completion_ppdu *ppdu_desc;
2119 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2120 	uint8_t curr_user_index = 0;
2121 
2122 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2123 
2124 	tag_buf += 2;
2125 	peer_id =
2126 	HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf);
2127 
2128 
2129 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2130 
2131 	if (!peer)
2132 		return;
2133 
2134 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2135 
2136 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2137 	ppdu_user_desc->peer_id = peer_id;
2138 
2139 	tag_buf++;
2140 	ppdu_user_desc->tid =
2141 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_TID_NUM_GET(*tag_buf);
2142 	ppdu_user_desc->num_mpdu =
2143 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf);
2144 
2145 	ppdu_user_desc->num_msdu =
2146 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf);
2147 
2148 	ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu;
2149 
2150 	tag_buf += 2;
2151 	ppdu_user_desc->success_bytes = *tag_buf;
2152 
2153 }
2154 
2155 /*
2156  * dp_process_ppdu_stats_user_common_array_tlv: Process
2157  * htt_ppdu_stats_user_common_array_tlv
2158  * pdev: DP PDEV handle
2159  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2160  * @ppdu_info: per ppdu tlv structure
2161  *
2162  * return:void
2163  */
2164 static void dp_process_ppdu_stats_user_common_array_tlv(
2165 		struct dp_pdev *pdev, uint32_t *tag_buf,
2166 		struct ppdu_info *ppdu_info)
2167 {
2168 	uint32_t peer_id;
2169 	struct dp_peer *peer;
2170 	struct cdp_tx_completion_ppdu *ppdu_desc;
2171 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2172 	uint8_t curr_user_index = 0;
2173 	struct htt_tx_ppdu_stats_info *dp_stats_buf;
2174 
2175 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2176 
2177 	tag_buf++;
2178 	dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf;
2179 	tag_buf += 3;
2180 	peer_id =
2181 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf);
2182 
2183 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2184 
2185 	if (!peer) {
2186 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2187 			"Invalid peer");
2188 		return;
2189 	}
2190 
2191 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2192 
2193 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2194 
2195 	ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes;
2196 	ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes;
2197 
2198 	tag_buf++;
2199 
2200 	ppdu_user_desc->success_msdus =
2201 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf);
2202 	ppdu_user_desc->retry_bytes =
2203 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf);
2204 	tag_buf++;
2205 	ppdu_user_desc->failed_msdus =
2206 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf);
2207 }
2208 
2209 /*
2210  * dp_process_ppdu_stats_flush_tlv: Process
2211  * htt_ppdu_stats_flush_tlv
2212  * @pdev: DP PDEV handle
2213  * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv
2214  *
2215  * return:void
2216  */
2217 static void dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev,
2218 						uint32_t *tag_buf)
2219 {
2220 	uint32_t peer_id;
2221 	uint32_t drop_reason;
2222 	uint8_t tid;
2223 	uint32_t num_msdu;
2224 	struct dp_peer *peer;
2225 
2226 	tag_buf++;
2227 	drop_reason = *tag_buf;
2228 
2229 	tag_buf++;
2230 	num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf);
2231 
2232 	tag_buf++;
2233 	peer_id =
2234 		HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf);
2235 
2236 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2237 	if (!peer)
2238 		return;
2239 
2240 	tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf);
2241 
2242 	if (drop_reason == HTT_FLUSH_EXCESS_RETRIES) {
2243 		DP_STATS_INC(peer, tx.excess_retries_per_ac[TID_TO_WME_AC(tid)],
2244 					num_msdu);
2245 	}
2246 }
2247 
2248 /*
2249  * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv: Process
2250  * htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2251  * @pdev: DP PDEV handle
2252  * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2253  * @length: tlv_length
2254  *
2255  * return:QDF_STATUS_SUCCESS if nbuf as to be freed in caller
2256  */
2257 static QDF_STATUS
2258 dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev,
2259 					      qdf_nbuf_t tag_buf,
2260 					      uint32_t ppdu_id)
2261 {
2262 	uint32_t *nbuf_ptr;
2263 	uint8_t trim_size;
2264 
2265 	if ((!pdev->tx_sniffer_enable) && (!pdev->mcopy_mode) &&
2266 	    (!pdev->bpr_enable))
2267 		return QDF_STATUS_SUCCESS;
2268 
2269 	trim_size = ((pdev->mgmtctrl_frm_info.mgmt_buf +
2270 		      HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) -
2271 		      qdf_nbuf_data(tag_buf));
2272 
2273 	if (!qdf_nbuf_pull_head(tag_buf, trim_size))
2274 		return QDF_STATUS_SUCCESS;
2275 
2276 	qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) -
2277 			    pdev->mgmtctrl_frm_info.mgmt_buf_len);
2278 
2279 	nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(
2280 				tag_buf, sizeof(ppdu_id));
2281 	*nbuf_ptr = ppdu_id;
2282 
2283 	if (pdev->bpr_enable) {
2284 		dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc,
2285 				     tag_buf, HTT_INVALID_PEER,
2286 				     WDI_NO_VAL, pdev->pdev_id);
2287 	}
2288 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2289 		dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc,
2290 				     tag_buf, HTT_INVALID_PEER,
2291 				     WDI_NO_VAL, pdev->pdev_id);
2292 	}
2293 
2294 	return QDF_STATUS_E_ALREADY;
2295 }
2296 
2297 /**
2298  * dp_process_ppdu_tag(): Function to process the PPDU TLVs
2299  * @pdev: DP pdev handle
2300  * @tag_buf: TLV buffer
2301  * @tlv_len: length of tlv
2302  * @ppdu_info: per ppdu tlv structure
2303  *
2304  * return: void
2305  */
2306 static void dp_process_ppdu_tag(struct dp_pdev *pdev, uint32_t *tag_buf,
2307 		uint32_t tlv_len, struct ppdu_info *ppdu_info)
2308 {
2309 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2310 
2311 	switch (tlv_type) {
2312 	case HTT_PPDU_STATS_COMMON_TLV:
2313 		qdf_assert_always(tlv_len ==
2314 				sizeof(htt_ppdu_stats_common_tlv));
2315 		dp_process_ppdu_stats_common_tlv(pdev, tag_buf, ppdu_info);
2316 		break;
2317 	case HTT_PPDU_STATS_USR_COMMON_TLV:
2318 		qdf_assert_always(tlv_len ==
2319 				sizeof(htt_ppdu_stats_user_common_tlv));
2320 		dp_process_ppdu_stats_user_common_tlv(
2321 				pdev, tag_buf, ppdu_info);
2322 		break;
2323 	case HTT_PPDU_STATS_USR_RATE_TLV:
2324 		qdf_assert_always(tlv_len ==
2325 				sizeof(htt_ppdu_stats_user_rate_tlv));
2326 		dp_process_ppdu_stats_user_rate_tlv(pdev, tag_buf, ppdu_info);
2327 		break;
2328 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV:
2329 		qdf_assert_always(tlv_len ==
2330 				sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv));
2331 		dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
2332 				pdev, tag_buf, ppdu_info);
2333 		break;
2334 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV:
2335 		qdf_assert_always(tlv_len ==
2336 				sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv));
2337 		dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
2338 				pdev, tag_buf, ppdu_info);
2339 		break;
2340 	case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV:
2341 		qdf_assert_always(tlv_len ==
2342 				sizeof(htt_ppdu_stats_user_cmpltn_common_tlv));
2343 		dp_process_ppdu_stats_user_cmpltn_common_tlv(
2344 				pdev, tag_buf, ppdu_info);
2345 		break;
2346 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV:
2347 		qdf_assert_always(tlv_len ==
2348 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv));
2349 		dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
2350 				pdev, tag_buf, ppdu_info);
2351 		break;
2352 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV:
2353 		qdf_assert_always(tlv_len ==
2354 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv));
2355 		dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
2356 				pdev, tag_buf, ppdu_info);
2357 		break;
2358 	case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV:
2359 		qdf_assert_always(tlv_len ==
2360 			sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv));
2361 		dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
2362 				pdev, tag_buf, ppdu_info);
2363 		break;
2364 	case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV:
2365 		qdf_assert_always(tlv_len ==
2366 			sizeof(htt_ppdu_stats_usr_common_array_tlv_v));
2367 		dp_process_ppdu_stats_user_common_array_tlv(
2368 				pdev, tag_buf, ppdu_info);
2369 		break;
2370 	case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV:
2371 		qdf_assert_always(tlv_len ==
2372 			sizeof(htt_ppdu_stats_flush_tlv));
2373 		dp_process_ppdu_stats_user_compltn_flush_tlv(
2374 				pdev, tag_buf);
2375 		break;
2376 	default:
2377 		break;
2378 	}
2379 }
2380 
2381 /**
2382  * dp_ppdu_desc_deliver(): Function to deliver Tx PPDU status descriptor
2383  * to upper layer
2384  * @pdev: DP pdev handle
2385  * @ppdu_info: per PPDU TLV descriptor
2386  *
2387  * return: void
2388  */
2389 static
2390 void dp_ppdu_desc_deliver(struct dp_pdev *pdev,
2391 			      struct ppdu_info *ppdu_info)
2392 {
2393 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
2394 	struct dp_peer *peer = NULL;
2395 	qdf_nbuf_t nbuf;
2396 	uint16_t i;
2397 
2398 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
2399 		qdf_nbuf_data(ppdu_info->nbuf);
2400 
2401 	ppdu_desc->num_users = ppdu_info->last_user;
2402 	ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
2403 
2404 	for (i = 0; i < ppdu_desc->num_users; i++) {
2405 
2406 
2407 		ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu;
2408 		ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu;
2409 
2410 		if (ppdu_desc->user[i].tid < CDP_DATA_TID_MAX) {
2411 			peer = dp_peer_find_by_id(pdev->soc,
2412 					ppdu_desc->user[i].peer_id);
2413 			/**
2414 			 * This check is to make sure peer is not deleted
2415 			 * after processing the TLVs.
2416 			 */
2417 			if (!peer)
2418 				continue;
2419 
2420 			dp_tx_stats_update(pdev->soc, peer,
2421 					&ppdu_desc->user[i],
2422 					ppdu_desc->ack_rssi);
2423 		}
2424 	}
2425 
2426 	/*
2427 	 * Remove from the list
2428 	 */
2429 	TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
2430 	nbuf = ppdu_info->nbuf;
2431 	pdev->list_depth--;
2432 	qdf_mem_free(ppdu_info);
2433 
2434 	qdf_assert_always(nbuf);
2435 
2436 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
2437 		qdf_nbuf_data(nbuf);
2438 
2439 	/**
2440 	 * Deliver PPDU stats only for valid (acked) data frames if
2441 	 * sniffer mode is not enabled.
2442 	 * If sniffer mode is enabled, PPDU stats for all frames
2443 	 * including mgmt/control frames should be delivered to upper layer
2444 	 */
2445 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2446 		dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC, pdev->soc,
2447 				nbuf, HTT_INVALID_PEER,
2448 				WDI_NO_VAL, pdev->pdev_id);
2449 	} else {
2450 		if (ppdu_desc->num_mpdu != 0 && ppdu_desc->num_users != 0 &&
2451 				ppdu_desc->frame_ctrl & HTT_FRAMECTRL_DATATYPE) {
2452 
2453 			dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
2454 					pdev->soc, nbuf, HTT_INVALID_PEER,
2455 					WDI_NO_VAL, pdev->pdev_id);
2456 		} else
2457 			qdf_nbuf_free(nbuf);
2458 	}
2459 	return;
2460 }
2461 
2462 /**
2463  * dp_get_ppdu_desc(): Function to allocate new PPDU status
2464  * desc for new ppdu id
2465  * @pdev: DP pdev handle
2466  * @ppdu_id: PPDU unique identifier
2467  * @tlv_type: TLV type received
2468  *
2469  * return: ppdu_info per ppdu tlv structure
2470  */
2471 static
2472 struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id,
2473 			uint8_t tlv_type)
2474 {
2475 	struct ppdu_info *ppdu_info = NULL;
2476 
2477 	/*
2478 	 * Find ppdu_id node exists or not
2479 	 */
2480 	TAILQ_FOREACH(ppdu_info, &pdev->ppdu_info_list, ppdu_info_list_elem) {
2481 
2482 		if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) {
2483 			break;
2484 		}
2485 	}
2486 
2487 	if (ppdu_info) {
2488 		/**
2489 		 * if we get tlv_type that is already been processed for ppdu,
2490 		 * that means we got a new ppdu with same ppdu id.
2491 		 * Hence Flush the older ppdu
2492 		 */
2493 		if (ppdu_info->tlv_bitmap & (1 << tlv_type))
2494 			dp_ppdu_desc_deliver(pdev, ppdu_info);
2495 		else
2496 			return ppdu_info;
2497 	}
2498 
2499 	/**
2500 	 * Flush the head ppdu descriptor if ppdu desc list reaches max
2501 	 * threshold
2502 	 */
2503 	if (pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
2504 		ppdu_info = TAILQ_FIRST(&pdev->ppdu_info_list);
2505 		dp_ppdu_desc_deliver(pdev, ppdu_info);
2506 	}
2507 
2508 	/*
2509 	 * Allocate new ppdu_info node
2510 	 */
2511 	ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info));
2512 	if (!ppdu_info)
2513 		return NULL;
2514 
2515 	ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
2516 			sizeof(struct cdp_tx_completion_ppdu), 0, 4,
2517 			TRUE);
2518 	if (!ppdu_info->nbuf) {
2519 		qdf_mem_free(ppdu_info);
2520 		return NULL;
2521 	}
2522 
2523 	qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf),
2524 			sizeof(struct cdp_tx_completion_ppdu));
2525 
2526 	if (qdf_nbuf_put_tail(ppdu_info->nbuf,
2527 			sizeof(struct cdp_tx_completion_ppdu)) == NULL) {
2528 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2529 				"No tailroom for HTT PPDU");
2530 		qdf_nbuf_free(ppdu_info->nbuf);
2531 		ppdu_info->nbuf = NULL;
2532 		ppdu_info->last_user = 0;
2533 		qdf_mem_free(ppdu_info);
2534 		return NULL;
2535 	}
2536 
2537 	/**
2538 	 * No lock is needed because all PPDU TLVs are processed in
2539 	 * same context and this list is updated in same context
2540 	 */
2541 	TAILQ_INSERT_TAIL(&pdev->ppdu_info_list, ppdu_info,
2542 			ppdu_info_list_elem);
2543 	pdev->list_depth++;
2544 	return ppdu_info;
2545 }
2546 
2547 /**
2548  * dp_htt_process_tlv(): Function to process each PPDU TLVs
2549  * @pdev: DP pdev handle
2550  * @htt_t2h_msg: HTT target to host message
2551  *
2552  * return: ppdu_info per ppdu tlv structure
2553  */
2554 
2555 static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev,
2556 		qdf_nbuf_t htt_t2h_msg)
2557 {
2558 	uint32_t length;
2559 	uint32_t ppdu_id;
2560 	uint8_t tlv_type;
2561 	uint32_t tlv_length, tlv_bitmap_expected;
2562 	uint8_t *tlv_buf;
2563 	struct ppdu_info *ppdu_info = NULL;
2564 
2565 	uint32_t *msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
2566 
2567 	length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word);
2568 
2569 	msg_word = msg_word + 1;
2570 	ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word);
2571 
2572 
2573 	msg_word = msg_word + 3;
2574 	while (length > 0) {
2575 		tlv_buf = (uint8_t *)msg_word;
2576 		tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
2577 		tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
2578 		if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG))
2579 			pdev->stats.ppdu_stats_counter[tlv_type]++;
2580 
2581 		if (tlv_length == 0)
2582 			break;
2583 
2584 		tlv_length += HTT_TLV_HDR_LEN;
2585 
2586 		/**
2587 		 * Not allocating separate ppdu descriptor for MGMT Payload
2588 		 * TLV as this is sent as separate WDI indication and it
2589 		 * doesn't contain any ppdu information
2590 		 */
2591 		if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) {
2592 			pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf;
2593 			pdev->mgmtctrl_frm_info.mgmt_buf_len = tlv_length;
2594 			pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id;
2595 			msg_word =
2596 				(uint32_t *)((uint8_t *)tlv_buf + tlv_length);
2597 			length -= (tlv_length);
2598 			continue;
2599 		}
2600 
2601 		ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type);
2602 		if (!ppdu_info)
2603 			return NULL;
2604 		ppdu_info->ppdu_id = ppdu_id;
2605 		ppdu_info->tlv_bitmap |= (1 << tlv_type);
2606 
2607 		dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info);
2608 
2609 		/**
2610 		 * Increment pdev level tlv count to monitor
2611 		 * missing TLVs
2612 		 */
2613 		pdev->tlv_count++;
2614 		ppdu_info->last_tlv_cnt = pdev->tlv_count;
2615 		msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
2616 		length -= (tlv_length);
2617 	}
2618 
2619 	if (!ppdu_info)
2620 		return NULL;
2621 
2622 	pdev->last_ppdu_id = ppdu_id;
2623 
2624 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
2625 
2626 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2627 		if (ppdu_info->is_ampdu)
2628 			tlv_bitmap_expected = HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP;
2629 	}
2630 
2631 	/**
2632 	 * Once all the TLVs for a given PPDU has been processed,
2633 	 * return PPDU status to be delivered to higher layer
2634 	 */
2635 	if (ppdu_info->tlv_bitmap == tlv_bitmap_expected)
2636 		return ppdu_info;
2637 
2638 	return NULL;
2639 }
2640 #endif /* FEATURE_PERPKT_INFO */
2641 
2642 /**
2643  * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW
2644  * @soc: DP SOC handle
2645  * @pdev_id: pdev id
2646  * @htt_t2h_msg: HTT message nbuf
2647  *
2648  * return:void
2649  */
2650 #if defined(WDI_EVENT_ENABLE)
2651 #ifdef FEATURE_PERPKT_INFO
2652 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
2653 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
2654 {
2655 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
2656 	struct ppdu_info *ppdu_info = NULL;
2657 	bool free_buf = true;
2658 
2659 	if (!pdev)
2660 		return true;
2661 
2662 	if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
2663 	    !pdev->mcopy_mode && !pdev->bpr_enable)
2664 		return free_buf;
2665 
2666 	ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg);
2667 
2668 	if (pdev->mgmtctrl_frm_info.mgmt_buf) {
2669 		if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv
2670 		    (pdev, htt_t2h_msg, pdev->mgmtctrl_frm_info.ppdu_id) !=
2671 		    QDF_STATUS_SUCCESS)
2672 			free_buf = false;
2673 
2674 		pdev->mgmtctrl_frm_info.mgmt_buf = NULL;
2675 		pdev->mgmtctrl_frm_info.mgmt_buf_len = 0;
2676 		pdev->mgmtctrl_frm_info.ppdu_id = 0;
2677 	}
2678 
2679 	if (ppdu_info)
2680 		dp_ppdu_desc_deliver(pdev, ppdu_info);
2681 
2682 	return free_buf;
2683 }
2684 #else
2685 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
2686 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
2687 {
2688 	return true;
2689 }
2690 #endif
2691 #endif
2692 
2693 /**
2694  * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats
2695  * @soc: DP SOC handle
2696  * @htt_t2h_msg: HTT message nbuf
2697  *
2698  * return:void
2699  */
2700 static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc,
2701 		qdf_nbuf_t htt_t2h_msg)
2702 {
2703 	uint8_t done;
2704 	qdf_nbuf_t msg_copy;
2705 	uint32_t *msg_word;
2706 
2707 	msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
2708 	msg_word = msg_word + 3;
2709 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2710 
2711 	/*
2712 	 * HTT EXT stats response comes as stream of TLVs which span over
2713 	 * multiple T2H messages.
2714 	 * The first message will carry length of the response.
2715 	 * For rest of the messages length will be zero.
2716 	 *
2717 	 * Clone the T2H message buffer and store it in a list to process
2718 	 * it later.
2719 	 *
2720 	 * The original T2H message buffers gets freed in the T2H HTT event
2721 	 * handler
2722 	 */
2723 	msg_copy = qdf_nbuf_clone(htt_t2h_msg);
2724 
2725 	if (!msg_copy) {
2726 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2727 				"T2H messge clone failed for HTT EXT STATS");
2728 		goto error;
2729 	}
2730 
2731 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2732 	qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy);
2733 	/*
2734 	 * Done bit signifies that this is the last T2H buffer in the stream of
2735 	 * HTT EXT STATS message
2736 	 */
2737 	if (done) {
2738 		soc->htt_stats.num_stats++;
2739 		qdf_sched_work(0, &soc->htt_stats.work);
2740 	}
2741 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2742 
2743 	return;
2744 
2745 error:
2746 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2747 	while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg))
2748 			!= NULL) {
2749 		qdf_nbuf_free(msg_copy);
2750 	}
2751 	soc->htt_stats.num_stats = 0;
2752 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2753 	return;
2754 
2755 }
2756 
2757 /*
2758  * htt_soc_attach_target() - SOC level HTT setup
2759  * @htt_soc:	HTT SOC handle
2760  *
2761  * Return: 0 on success; error code on failure
2762  */
2763 int htt_soc_attach_target(void *htt_soc)
2764 {
2765 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
2766 
2767 	return htt_h2t_ver_req_msg(soc);
2768 }
2769 
2770 
2771 #if defined(WDI_EVENT_ENABLE) && !defined(REMOVE_PKT_LOG)
2772 /*
2773  * dp_ppdu_stats_ind_handler() - PPDU stats msg handler
2774  * @htt_soc:	 HTT SOC handle
2775  * @msg_word:    Pointer to payload
2776  * @htt_t2h_msg: HTT msg nbuf
2777  *
2778  * Return: True if buffer should be freed by caller.
2779  */
2780 static bool
2781 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
2782 				uint32_t *msg_word,
2783 				qdf_nbuf_t htt_t2h_msg)
2784 {
2785 	u_int8_t pdev_id;
2786 	bool free_buf;
2787 	qdf_nbuf_set_pktlen(htt_t2h_msg, HTT_T2H_MAX_MSG_SIZE);
2788 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2789 		"received HTT_T2H_MSG_TYPE_PPDU_STATS_IND");
2790 	pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
2791 	pdev_id = DP_HW2SW_MACID(pdev_id);
2792 	free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id,
2793 					      htt_t2h_msg);
2794 	dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc,
2795 		htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL,
2796 		pdev_id);
2797 	return free_buf;
2798 }
2799 #else
2800 static bool
2801 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
2802 				uint32_t *msg_word,
2803 				qdf_nbuf_t htt_t2h_msg)
2804 {
2805 	return true;
2806 }
2807 #endif
2808 
2809 #if defined(WDI_EVENT_ENABLE) && \
2810 		!defined(REMOVE_PKT_LOG) && defined(CONFIG_WIN)
2811 /*
2812  * dp_pktlog_msg_handler() - Pktlog msg handler
2813  * @htt_soc:	 HTT SOC handle
2814  * @msg_word:    Pointer to payload
2815  *
2816  * Return: None
2817  */
2818 static void
2819 dp_pktlog_msg_handler(struct htt_soc *soc,
2820 				uint32_t *msg_word)
2821 {
2822 	uint8_t pdev_id;
2823 	uint32_t *pl_hdr;
2824 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2825 		"received HTT_T2H_MSG_TYPE_PKTLOG");
2826 	pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word);
2827 	pdev_id = DP_HW2SW_MACID(pdev_id);
2828 	pl_hdr = (msg_word + 1);
2829 	dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc,
2830 		pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL,
2831 		pdev_id);
2832 }
2833 #else
2834 static void
2835 dp_pktlog_msg_handler(struct htt_soc *soc,
2836 				uint32_t *msg_word)
2837 {
2838 }
2839 #endif
2840 
2841 /*
2842  * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler
2843  * @context:	Opaque context (HTT SOC handle)
2844  * @pkt:	HTC packet
2845  */
2846 static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
2847 {
2848 	struct htt_soc *soc = (struct htt_soc *) context;
2849 	qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
2850 	u_int32_t *msg_word;
2851 	enum htt_t2h_msg_type msg_type;
2852 	bool free_buf = true;
2853 
2854 	/* check for successful message reception */
2855 	if (pkt->Status != QDF_STATUS_SUCCESS) {
2856 		if (pkt->Status != QDF_STATUS_E_CANCELED)
2857 			soc->stats.htc_err_cnt++;
2858 
2859 		qdf_nbuf_free(htt_t2h_msg);
2860 		return;
2861 	}
2862 
2863 	/* TODO: Check if we should pop the HTC/HTT header alignment padding */
2864 
2865 	msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
2866 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
2867 	switch (msg_type) {
2868 	case HTT_T2H_MSG_TYPE_PEER_MAP:
2869 		{
2870 			u_int8_t mac_addr_deswizzle_buf[HTT_MAC_ADDR_LEN];
2871 			u_int8_t *peer_mac_addr;
2872 			u_int16_t peer_id;
2873 			u_int16_t hw_peer_id;
2874 			u_int8_t vdev_id;
2875 
2876 			peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
2877 			hw_peer_id =
2878 				HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2));
2879 			vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
2880 			peer_mac_addr = htt_t2h_mac_addr_deswizzle(
2881 				(u_int8_t *) (msg_word+1),
2882 				&mac_addr_deswizzle_buf[0]);
2883 			QDF_TRACE(QDF_MODULE_ID_TXRX,
2884 				QDF_TRACE_LEVEL_INFO,
2885 				"HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
2886 				peer_id, vdev_id);
2887 
2888 			dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id,
2889 						vdev_id, peer_mac_addr);
2890 			break;
2891 		}
2892 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
2893 		{
2894 			u_int16_t peer_id;
2895 			peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
2896 
2897 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id);
2898 			break;
2899 		}
2900 	case HTT_T2H_MSG_TYPE_SEC_IND:
2901 		{
2902 			u_int16_t peer_id;
2903 			enum htt_sec_type sec_type;
2904 			int is_unicast;
2905 
2906 			peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
2907 			sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
2908 			is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
2909 			/* point to the first part of the Michael key */
2910 			msg_word++;
2911 			dp_rx_sec_ind_handler(
2912 				soc->dp_soc, peer_id, sec_type, is_unicast,
2913 				msg_word, msg_word + 2);
2914 			break;
2915 		}
2916 
2917 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
2918 		{
2919 			free_buf = dp_ppdu_stats_ind_handler(soc, msg_word,
2920 							     htt_t2h_msg);
2921 			break;
2922 		}
2923 
2924 	case HTT_T2H_MSG_TYPE_PKTLOG:
2925 		{
2926 			dp_pktlog_msg_handler(soc, msg_word);
2927 			break;
2928 		}
2929 
2930 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
2931 		{
2932 			htc_pm_runtime_put(soc->htc_soc);
2933 			soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
2934 			soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
2935 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2936 				"target uses HTT version %d.%d; host uses %d.%d",
2937 				soc->tgt_ver.major, soc->tgt_ver.minor,
2938 				HTT_CURRENT_VERSION_MAJOR,
2939 				HTT_CURRENT_VERSION_MINOR);
2940 			if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
2941 				QDF_TRACE(QDF_MODULE_ID_TXRX,
2942 					QDF_TRACE_LEVEL_ERROR,
2943 					"*** Incompatible host/target HTT versions!");
2944 			}
2945 			/* abort if the target is incompatible with the host */
2946 			qdf_assert(soc->tgt_ver.major ==
2947 				HTT_CURRENT_VERSION_MAJOR);
2948 			if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
2949 				QDF_TRACE(QDF_MODULE_ID_TXRX,
2950 					QDF_TRACE_LEVEL_WARN,
2951 					"*** Warning: host/target HTT versions"
2952 					" are different, though compatible!");
2953 			}
2954 			break;
2955 		}
2956 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
2957 		{
2958 			uint16_t peer_id;
2959 			uint8_t tid;
2960 			uint8_t win_sz;
2961 			uint16_t status;
2962 			struct dp_peer *peer;
2963 
2964 			/*
2965 			 * Update REO Queue Desc with new values
2966 			 */
2967 			peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
2968 			tid = HTT_RX_ADDBA_TID_GET(*msg_word);
2969 			win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
2970 			peer = dp_peer_find_by_id(soc->dp_soc, peer_id);
2971 
2972 			/*
2973 			 * Window size needs to be incremented by 1
2974 			 * since fw needs to represent a value of 256
2975 			 * using just 8 bits
2976 			 */
2977 			if (peer) {
2978 				status = dp_addba_requestprocess_wifi3(peer,
2979 						0, tid, 0, win_sz + 1, 0xffff);
2980 				QDF_TRACE(QDF_MODULE_ID_TXRX,
2981 					QDF_TRACE_LEVEL_INFO,
2982 					FL("PeerID %d BAW %d TID %d stat %d"),
2983 					peer_id, win_sz, tid, status);
2984 
2985 			} else {
2986 				QDF_TRACE(QDF_MODULE_ID_TXRX,
2987 					QDF_TRACE_LEVEL_ERROR,
2988 					FL("Peer not found peer id %d"),
2989 					peer_id);
2990 			}
2991 			break;
2992 		}
2993 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
2994 		{
2995 			dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg);
2996 			break;
2997 		}
2998 	default:
2999 		break;
3000 	};
3001 
3002 	/* Free the indication buffer */
3003 	if (free_buf)
3004 		qdf_nbuf_free(htt_t2h_msg);
3005 }
3006 
3007 /*
3008  * dp_htt_h2t_full() - Send full handler (called from HTC)
3009  * @context:	Opaque context (HTT SOC handle)
3010  * @pkt:	HTC packet
3011  *
3012  * Return: enum htc_send_full_action
3013  */
3014 static enum htc_send_full_action
3015 dp_htt_h2t_full(void *context, HTC_PACKET *pkt)
3016 {
3017 	return HTC_SEND_FULL_KEEP;
3018 }
3019 
3020 /*
3021  * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages
3022  * @context:	Opaque context (HTT SOC handle)
3023  * @nbuf:	nbuf containing T2H message
3024  * @pipe_id:	HIF pipe ID
3025  *
3026  * Return: QDF_STATUS
3027  *
3028  * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which
3029  * will be used for packet log and other high-priority HTT messages. Proper
3030  * HTC connection to be added later once required FW changes are available
3031  */
3032 static QDF_STATUS
3033 dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id)
3034 {
3035 	A_STATUS rc = QDF_STATUS_SUCCESS;
3036 	HTC_PACKET htc_pkt;
3037 
3038 	qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE);
3039 	qdf_mem_zero(&htc_pkt, sizeof(htc_pkt));
3040 	htc_pkt.Status = QDF_STATUS_SUCCESS;
3041 	htc_pkt.pPktContext = (void *)nbuf;
3042 	dp_htt_t2h_msg_handler(context, &htc_pkt);
3043 
3044 	return rc;
3045 }
3046 
3047 /*
3048  * htt_htc_soc_attach() - Register SOC level HTT instance with HTC
3049  * @htt_soc:	HTT SOC handle
3050  *
3051  * Return: 0 on success; error code on failure
3052  */
3053 static int
3054 htt_htc_soc_attach(struct htt_soc *soc)
3055 {
3056 	struct htc_service_connect_req connect;
3057 	struct htc_service_connect_resp response;
3058 	A_STATUS status;
3059 	struct dp_soc *dpsoc = soc->dp_soc;
3060 
3061 	qdf_mem_set(&connect, sizeof(connect), 0);
3062 	qdf_mem_set(&response, sizeof(response), 0);
3063 
3064 	connect.pMetaData = NULL;
3065 	connect.MetaDataLength = 0;
3066 	connect.EpCallbacks.pContext = soc;
3067 	connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
3068 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
3069 	connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler;
3070 
3071 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
3072 	connect.EpCallbacks.EpRecvRefill = NULL;
3073 
3074 	/* N/A, fill is done by HIF */
3075 	connect.EpCallbacks.RecvRefillWaterMark = 1;
3076 
3077 	connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
3078 	/*
3079 	 * Specify how deep to let a queue get before htc_send_pkt will
3080 	 * call the EpSendFull function due to excessive send queue depth.
3081 	 */
3082 	connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
3083 
3084 	/* disable flow control for HTT data message service */
3085 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
3086 
3087 	/* connect to control service */
3088 	connect.service_id = HTT_DATA_MSG_SVC;
3089 
3090 	status = htc_connect_service(soc->htc_soc, &connect, &response);
3091 
3092 	if (status != A_OK)
3093 		return QDF_STATUS_E_FAILURE;
3094 
3095 	soc->htc_endpoint = response.Endpoint;
3096 
3097 	hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint);
3098 	dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc,
3099 		dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE);
3100 
3101 	return 0; /* success */
3102 }
3103 
3104 /*
3105  * htt_soc_attach() - SOC level HTT initialization
3106  * @dp_soc:	Opaque Data path SOC handle
3107  * @ctrl_psoc:	Opaque ctrl SOC handle
3108  * @htc_soc:	SOC level HTC handle
3109  * @hal_soc:	Opaque HAL SOC handle
3110  * @osdev:	QDF device
3111  *
3112  * Return: HTT handle on success; NULL on failure
3113  */
3114 void *
3115 htt_soc_attach(void *dp_soc, void *ctrl_psoc, HTC_HANDLE htc_soc,
3116 	void *hal_soc, qdf_device_t osdev)
3117 {
3118 	struct htt_soc *soc;
3119 	int i;
3120 
3121 	soc = qdf_mem_malloc(sizeof(*soc));
3122 
3123 	if (!soc)
3124 		goto fail1;
3125 
3126 	soc->osdev = osdev;
3127 	soc->ctrl_psoc = ctrl_psoc;
3128 	soc->dp_soc = dp_soc;
3129 	soc->htc_soc = htc_soc;
3130 	soc->hal_soc = hal_soc;
3131 
3132 	/* TODO: See if any NSS related context is required in htt_soc */
3133 
3134 	soc->htt_htc_pkt_freelist = NULL;
3135 
3136 	if (htt_htc_soc_attach(soc))
3137 		goto fail2;
3138 
3139 	/* TODO: See if any Rx data specific intialization is required. For
3140 	 * MCL use cases, the data will be received as single packet and
3141 	 * should not required any descriptor or reorder handling
3142 	 */
3143 
3144 	HTT_TX_MUTEX_INIT(&soc->htt_tx_mutex);
3145 
3146 	/* pre-allocate some HTC_PACKET objects */
3147 	for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
3148 		struct dp_htt_htc_pkt_union *pkt;
3149 		pkt = qdf_mem_malloc(sizeof(*pkt));
3150 		if (!pkt)
3151 			break;
3152 
3153 		htt_htc_pkt_free(soc, &pkt->u.pkt);
3154 	}
3155 
3156 	return soc;
3157 
3158 fail2:
3159 	qdf_mem_free(soc);
3160 
3161 fail1:
3162 	return NULL;
3163 }
3164 
3165 
3166 /*
3167  * htt_soc_detach() - Detach SOC level HTT
3168  * @htt_soc:	HTT SOC handle
3169  */
3170 void
3171 htt_soc_detach(void *htt_soc)
3172 {
3173 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
3174 
3175 	htt_htc_misc_pkt_pool_free(soc);
3176 	htt_htc_pkt_pool_free(soc);
3177 	HTT_TX_MUTEX_DESTROY(&soc->htt_tx_mutex);
3178 	qdf_mem_free(soc);
3179 }
3180 
3181 /**
3182  * dp_h2t_ext_stats_msg_send(): function to contruct HTT message to pass to FW
3183  * @pdev: DP PDEV handle
3184  * @stats_type_upload_mask: stats type requested by user
3185  * @config_param_0: extra configuration parameters
3186  * @config_param_1: extra configuration parameters
3187  * @config_param_2: extra configuration parameters
3188  * @config_param_3: extra configuration parameters
3189  * @mac_id: mac number
3190  *
3191  * return: QDF STATUS
3192  */
3193 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
3194 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
3195 		uint32_t config_param_1, uint32_t config_param_2,
3196 		uint32_t config_param_3, int cookie_val, int cookie_msb,
3197 		uint8_t mac_id)
3198 {
3199 	struct htt_soc *soc = pdev->soc->htt_handle;
3200 	struct dp_htt_htc_pkt *pkt;
3201 	qdf_nbuf_t msg;
3202 	uint32_t *msg_word;
3203 	uint8_t pdev_mask = 0;
3204 
3205 	msg = qdf_nbuf_alloc(
3206 			soc->osdev,
3207 			HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ),
3208 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
3209 
3210 	if (!msg)
3211 		return QDF_STATUS_E_NOMEM;
3212 
3213 	/*TODO:Add support for SOC stats
3214 	 * Bit 0: SOC Stats
3215 	 * Bit 1: Pdev stats for pdev id 0
3216 	 * Bit 2: Pdev stats for pdev id 1
3217 	 * Bit 3: Pdev stats for pdev id 2
3218 	 */
3219 	mac_id = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
3220 
3221 	pdev_mask = 1 << DP_SW2HW_MACID(mac_id);
3222 	/*
3223 	 * Set the length of the message.
3224 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3225 	 * separately during the below call to qdf_nbuf_push_head.
3226 	 * The contribution from the HTC header is added separately inside HTC.
3227 	 */
3228 	if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) {
3229 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3230 				"Failed to expand head for HTT_EXT_STATS");
3231 		qdf_nbuf_free(msg);
3232 		return QDF_STATUS_E_FAILURE;
3233 	}
3234 
3235 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3236 		"-----%s:%d----\n cookie <-> %d\n config_param_0 %u\n"
3237 		"config_param_1 %u\n config_param_2 %u\n"
3238 		"config_param_4 %u\n -------------",
3239 		__func__, __LINE__, cookie_val, config_param_0,
3240 		config_param_1, config_param_2,	config_param_3);
3241 
3242 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
3243 
3244 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3245 	*msg_word = 0;
3246 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ);
3247 	HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask);
3248 	HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask);
3249 
3250 	/* word 1 */
3251 	msg_word++;
3252 	*msg_word = 0;
3253 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0);
3254 
3255 	/* word 2 */
3256 	msg_word++;
3257 	*msg_word = 0;
3258 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1);
3259 
3260 	/* word 3 */
3261 	msg_word++;
3262 	*msg_word = 0;
3263 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2);
3264 
3265 	/* word 4 */
3266 	msg_word++;
3267 	*msg_word = 0;
3268 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3);
3269 
3270 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0);
3271 
3272 	/* word 5 */
3273 	msg_word++;
3274 
3275 	/* word 6 */
3276 	msg_word++;
3277 	*msg_word = 0;
3278 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val);
3279 
3280 	/* word 7 */
3281 	msg_word++;
3282 	*msg_word = 0;
3283 	/*Using last 2 bits for pdev_id */
3284 	cookie_msb = ((cookie_msb << 2) | pdev->pdev_id);
3285 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb);
3286 
3287 	pkt = htt_htc_pkt_alloc(soc);
3288 	if (!pkt) {
3289 		qdf_nbuf_free(msg);
3290 		return QDF_STATUS_E_NOMEM;
3291 	}
3292 
3293 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
3294 
3295 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
3296 			dp_htt_h2t_send_complete_free_netbuf,
3297 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
3298 			soc->htc_endpoint,
3299 			1); /* tag - not relevant here */
3300 
3301 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
3302 	DP_HTT_SEND_HTC_PKT(soc, pkt);
3303 	return 0;
3304 }
3305 
3306 /* This macro will revert once proper HTT header will define for
3307  * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file
3308  * */
3309 #if defined(WDI_EVENT_ENABLE)
3310 /**
3311  * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW
3312  * @pdev: DP PDEV handle
3313  * @stats_type_upload_mask: stats type requested by user
3314  * @mac_id: Mac id number
3315  *
3316  * return: QDF STATUS
3317  */
3318 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
3319 		uint32_t stats_type_upload_mask, uint8_t mac_id)
3320 {
3321 	struct htt_soc *soc = pdev->soc->htt_handle;
3322 	struct dp_htt_htc_pkt *pkt;
3323 	qdf_nbuf_t msg;
3324 	uint32_t *msg_word;
3325 	uint8_t pdev_mask;
3326 
3327 	msg = qdf_nbuf_alloc(
3328 			soc->osdev,
3329 			HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ),
3330 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
3331 
3332 	if (!msg) {
3333 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3334 		"Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer");
3335 		qdf_assert(0);
3336 		return QDF_STATUS_E_NOMEM;
3337 	}
3338 
3339 	/*TODO:Add support for SOC stats
3340 	 * Bit 0: SOC Stats
3341 	 * Bit 1: Pdev stats for pdev id 0
3342 	 * Bit 2: Pdev stats for pdev id 1
3343 	 * Bit 3: Pdev stats for pdev id 2
3344 	 */
3345 	pdev_mask = 1 << DP_SW2HW_MACID(mac_id);
3346 
3347 	/*
3348 	 * Set the length of the message.
3349 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3350 	 * separately during the below call to qdf_nbuf_push_head.
3351 	 * The contribution from the HTC header is added separately inside HTC.
3352 	 */
3353 	if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) {
3354 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3355 				"Failed to expand head for HTT_CFG_STATS");
3356 		qdf_nbuf_free(msg);
3357 		return QDF_STATUS_E_FAILURE;
3358 	}
3359 
3360 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
3361 
3362 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3363 	*msg_word = 0;
3364 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
3365 	HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask);
3366 	HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word,
3367 			stats_type_upload_mask);
3368 
3369 	pkt = htt_htc_pkt_alloc(soc);
3370 	if (!pkt) {
3371 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3372 				"Fail to allocate dp_htt_htc_pkt buffer");
3373 		qdf_assert(0);
3374 		qdf_nbuf_free(msg);
3375 		return QDF_STATUS_E_NOMEM;
3376 	}
3377 
3378 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
3379 
3380 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
3381 			dp_htt_h2t_send_complete_free_netbuf,
3382 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
3383 			soc->htc_endpoint,
3384 			1); /* tag - not relevant here */
3385 
3386 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
3387 	DP_HTT_SEND_HTC_PKT(soc, pkt);
3388 	return 0;
3389 }
3390 #endif
3391