xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c (revision 68e058b24dcf3cb442b65abf94d56e3e27251f90)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <htt.h>
20 #include <hal_hw_headers.h>
21 #include <hal_api.h>
22 #include "dp_htt.h"
23 #include "dp_peer.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_rx_mon.h"
27 #include "htt_stats.h"
28 #include "htt_ppdu_stats.h"
29 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
30 #include "cdp_txrx_cmn_struct.h"
31 
32 #ifdef FEATURE_PERPKT_INFO
33 #include "dp_ratetable.h"
34 #endif
35 
36 #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
37 
38 #define HTT_HTC_PKT_POOL_INIT_SIZE 64
39 #define HTT_T2H_MAX_MSG_SIZE 2048
40 
41 #define HTT_MSG_BUF_SIZE(msg_bytes) \
42 	((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
43 
44 #define HTT_PID_BIT_MASK 0x3
45 
46 #define DP_EXT_MSG_LENGTH 2048
47 #define DP_HTT_SEND_HTC_PKT(soc, pkt)                            \
48 do {                                                             \
49 	if (htc_send_pkt(soc->htc_soc, &pkt->htc_pkt) ==         \
50 					QDF_STATUS_SUCCESS)      \
51 		htt_htc_misc_pkt_list_add(soc, pkt);             \
52 } while (0)
53 
54 #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16
55 
56 /**
57  * Bitmap of HTT PPDU TLV types for Default mode
58  */
59 #define HTT_PPDU_DEFAULT_TLV_BITMAP \
60 	(1 << HTT_PPDU_STATS_COMMON_TLV) | \
61 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
62 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
63 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
64 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
65 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)
66 
67 /**
68  * Bitmap of HTT PPDU TLV types for Sniffer mode
69  */
70 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP \
71 	(1 << HTT_PPDU_STATS_COMMON_TLV) | \
72 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
73 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
74 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
75 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
76 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \
77 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \
78 	(1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV) | \
79 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV)
80 
81 #define HTT_FRAMECTRL_DATATYPE 0x08
82 #define HTT_PPDU_DESC_MAX_DEPTH 16
83 #define DP_SCAN_PEER_ID 0xFFFF
84 
85 /*
86  * dp_tx_stats_update() - Update per-peer statistics
87  * @soc: Datapath soc handle
88  * @peer: Datapath peer handle
89  * @ppdu: PPDU Descriptor
90  * @ack_rssi: RSSI of last ack received
91  *
92  * Return: None
93  */
94 #ifdef FEATURE_PERPKT_INFO
95 static inline void
96 dp_tx_rate_stats_update(struct dp_peer *peer,
97 			struct cdp_tx_completion_ppdu_user *ppdu)
98 {
99 	uint32_t ratekbps = 0;
100 	uint32_t ppdu_tx_rate = 0;
101 
102 	if (!peer || !ppdu)
103 		return;
104 
105 	dp_peer_stats_notify(peer);
106 
107 	ratekbps = dp_getrateindex(ppdu->gi,
108 				   ppdu->mcs,
109 				   ppdu->nss,
110 				   ppdu->preamble,
111 				   ppdu->bw);
112 
113 	DP_STATS_UPD(peer, tx.last_tx_rate, ratekbps);
114 
115 	if (!ratekbps)
116 		return;
117 
118 	dp_ath_rate_lpf(peer->stats.tx.avg_tx_rate, ratekbps);
119 	ppdu_tx_rate = dp_ath_rate_out(peer->stats.tx.avg_tx_rate);
120 	DP_STATS_UPD(peer, tx.rnd_avg_tx_rate, ppdu_tx_rate);
121 
122 	if (peer->vdev) {
123 		peer->vdev->stats.tx.last_tx_rate = ratekbps;
124 		peer->vdev->stats.tx.last_tx_rate_mcs = ppdu->mcs;
125 	}
126 }
127 
128 static void dp_tx_stats_update(struct dp_soc *soc, struct dp_peer *peer,
129 		struct cdp_tx_completion_ppdu_user *ppdu, uint32_t ack_rssi)
130 {
131 	struct dp_pdev *pdev = peer->vdev->pdev;
132 	uint8_t preamble, mcs;
133 	uint16_t num_msdu;
134 
135 	preamble = ppdu->preamble;
136 	mcs = ppdu->mcs;
137 	num_msdu = ppdu->num_msdu;
138 
139 	/* If the peer statistics are already processed as part of
140 	 * per-MSDU completion handler, do not process these again in per-PPDU
141 	 * indications */
142 	if (soc->process_tx_status)
143 		return;
144 
145 	DP_STATS_INC_PKT(peer, tx.comp_pkt,
146 			num_msdu, (ppdu->success_bytes +
147 				ppdu->retry_bytes + ppdu->failed_bytes));
148 	DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus);
149 	DP_STATS_UPD(peer, tx.tx_rate, ppdu->tx_rate);
150 	DP_STATS_INC(peer, tx.sgi_count[ppdu->gi], num_msdu);
151 	DP_STATS_INC(peer, tx.bw[ppdu->bw], num_msdu);
152 	DP_STATS_INC(peer, tx.nss[ppdu->nss], num_msdu);
153 	DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)], num_msdu);
154 	DP_STATS_INCC(peer, tx.stbc, num_msdu, ppdu->stbc);
155 	DP_STATS_INCC(peer, tx.ldpc, num_msdu, ppdu->ldpc);
156 	if (!(ppdu->is_mcast) && ppdu->ack_rssi_valid)
157 		DP_STATS_UPD(peer, tx.last_ack_rssi, ack_rssi);
158 
159 	DP_STATS_INC(peer, tx.retries,
160 			(ppdu->long_retries + ppdu->short_retries));
161 	DP_STATS_INCC(peer,
162 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
163 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
164 	DP_STATS_INCC(peer,
165 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
166 			((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
167 	DP_STATS_INCC(peer,
168 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
169 			((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
170 	DP_STATS_INCC(peer,
171 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
172 			((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B)));
173 	DP_STATS_INCC(peer,
174 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
175 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
176 	DP_STATS_INCC(peer,
177 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
178 			((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
179 	DP_STATS_INCC(peer,
180 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
181 			((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
182 	DP_STATS_INCC(peer,
183 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
184 			((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
185 	DP_STATS_INCC(peer,
186 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
187 			((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX)));
188 	DP_STATS_INCC(peer,
189 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
190 			((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX)));
191 
192 	dp_tx_rate_stats_update(peer, ppdu);
193 
194 	if (peer->stats.tx.ucast.num)
195 		peer->stats.tx.last_per = ((peer->stats.tx.ucast.num -
196 					peer->stats.tx.tx_success.num) * 100) /
197 					peer->stats.tx.ucast.num;
198 
199 	if (soc->cdp_soc.ol_ops->update_dp_stats) {
200 		soc->cdp_soc.ol_ops->update_dp_stats(pdev->ctrl_pdev,
201 				&peer->stats, ppdu->peer_id,
202 				UPDATE_PEER_STATS);
203 
204 	}
205 }
206 #endif
207 
208 /*
209  * htt_htc_pkt_alloc() - Allocate HTC packet buffer
210  * @htt_soc:	HTT SOC handle
211  *
212  * Return: Pointer to htc packet buffer
213  */
214 static struct dp_htt_htc_pkt *
215 htt_htc_pkt_alloc(struct htt_soc *soc)
216 {
217 	struct dp_htt_htc_pkt_union *pkt = NULL;
218 
219 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
220 	if (soc->htt_htc_pkt_freelist) {
221 		pkt = soc->htt_htc_pkt_freelist;
222 		soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next;
223 	}
224 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
225 
226 	if (pkt == NULL)
227 		pkt = qdf_mem_malloc(sizeof(*pkt));
228 	return &pkt->u.pkt; /* not actually a dereference */
229 }
230 
231 /*
232  * htt_htc_pkt_free() - Free HTC packet buffer
233  * @htt_soc:	HTT SOC handle
234  */
235 static void
236 htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
237 {
238 	struct dp_htt_htc_pkt_union *u_pkt =
239 		(struct dp_htt_htc_pkt_union *)pkt;
240 
241 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
242 	u_pkt->u.next = soc->htt_htc_pkt_freelist;
243 	soc->htt_htc_pkt_freelist = u_pkt;
244 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
245 }
246 
247 /*
248  * htt_htc_pkt_pool_free() - Free HTC packet pool
249  * @htt_soc:	HTT SOC handle
250  */
251 static void
252 htt_htc_pkt_pool_free(struct htt_soc *soc)
253 {
254 	struct dp_htt_htc_pkt_union *pkt, *next;
255 	pkt = soc->htt_htc_pkt_freelist;
256 	while (pkt) {
257 		next = pkt->u.next;
258 		qdf_mem_free(pkt);
259 		pkt = next;
260 	}
261 	soc->htt_htc_pkt_freelist = NULL;
262 }
263 
264 /*
265  * htt_htc_misc_pkt_list_trim() - trim misc list
266  * @htt_soc: HTT SOC handle
267  * @level: max no. of pkts in list
268  */
269 static void
270 htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level)
271 {
272 	struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL;
273 	int i = 0;
274 	qdf_nbuf_t netbuf;
275 
276 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
277 	pkt = soc->htt_htc_pkt_misclist;
278 	while (pkt) {
279 		next = pkt->u.next;
280 		/* trim the out grown list*/
281 		if (++i > level) {
282 			netbuf =
283 				(qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
284 			qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
285 			qdf_nbuf_free(netbuf);
286 			qdf_mem_free(pkt);
287 			pkt = NULL;
288 			if (prev)
289 				prev->u.next = NULL;
290 		}
291 		prev = pkt;
292 		pkt = next;
293 	}
294 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
295 }
296 
297 /*
298  * htt_htc_misc_pkt_list_add() - Add pkt to misc list
299  * @htt_soc:	HTT SOC handle
300  * @dp_htt_htc_pkt: pkt to be added to list
301  */
302 static void
303 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
304 {
305 	struct dp_htt_htc_pkt_union *u_pkt =
306 				(struct dp_htt_htc_pkt_union *)pkt;
307 	int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc,
308 							pkt->htc_pkt.Endpoint)
309 				+ DP_HTT_HTC_PKT_MISCLIST_SIZE;
310 
311 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
312 	if (soc->htt_htc_pkt_misclist) {
313 		u_pkt->u.next = soc->htt_htc_pkt_misclist;
314 		soc->htt_htc_pkt_misclist = u_pkt;
315 	} else {
316 		soc->htt_htc_pkt_misclist = u_pkt;
317 	}
318 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
319 
320 	/* only ce pipe size + tx_queue_depth could possibly be in use
321 	 * free older packets in the misclist
322 	 */
323 	htt_htc_misc_pkt_list_trim(soc, misclist_trim_level);
324 }
325 
326 /*
327  * htt_htc_misc_pkt_pool_free() - free pkts in misc list
328  * @htt_soc:	HTT SOC handle
329  */
330 static void
331 htt_htc_misc_pkt_pool_free(struct htt_soc *soc)
332 {
333 	struct dp_htt_htc_pkt_union *pkt, *next;
334 	qdf_nbuf_t netbuf;
335 
336 	pkt = soc->htt_htc_pkt_misclist;
337 
338 	while (pkt) {
339 		next = pkt->u.next;
340 		netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
341 		qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
342 
343 		soc->stats.htc_pkt_free++;
344 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
345 			 "%s: Pkt free count %d",
346 			 __func__, soc->stats.htc_pkt_free);
347 
348 		qdf_nbuf_free(netbuf);
349 		qdf_mem_free(pkt);
350 		pkt = next;
351 	}
352 	soc->htt_htc_pkt_misclist = NULL;
353 }
354 
355 /*
356  * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ
357  * @tgt_mac_addr:	Target MAC
358  * @buffer:		Output buffer
359  */
360 static u_int8_t *
361 htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer)
362 {
363 #ifdef BIG_ENDIAN_HOST
364 	/*
365 	 * The host endianness is opposite of the target endianness.
366 	 * To make u_int32_t elements come out correctly, the target->host
367 	 * upload has swizzled the bytes in each u_int32_t element of the
368 	 * message.
369 	 * For byte-array message fields like the MAC address, this
370 	 * upload swizzling puts the bytes in the wrong order, and needs
371 	 * to be undone.
372 	 */
373 	buffer[0] = tgt_mac_addr[3];
374 	buffer[1] = tgt_mac_addr[2];
375 	buffer[2] = tgt_mac_addr[1];
376 	buffer[3] = tgt_mac_addr[0];
377 	buffer[4] = tgt_mac_addr[7];
378 	buffer[5] = tgt_mac_addr[6];
379 	return buffer;
380 #else
381 	/*
382 	 * The host endianness matches the target endianness -
383 	 * we can use the mac addr directly from the message buffer.
384 	 */
385 	return tgt_mac_addr;
386 #endif
387 }
388 
389 /*
390  * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
391  * @soc:	SOC handle
392  * @status:	Completion status
393  * @netbuf:	HTT buffer
394  */
395 static void
396 dp_htt_h2t_send_complete_free_netbuf(
397 	void *soc, A_STATUS status, qdf_nbuf_t netbuf)
398 {
399 	qdf_nbuf_free(netbuf);
400 }
401 
402 /*
403  * dp_htt_h2t_send_complete() - H2T completion handler
404  * @context:	Opaque context (HTT SOC handle)
405  * @htc_pkt:	HTC packet
406  */
407 static void
408 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
409 {
410 	void (*send_complete_part2)(
411 		void *soc, A_STATUS status, qdf_nbuf_t msdu);
412 	struct htt_soc *soc =  (struct htt_soc *) context;
413 	struct dp_htt_htc_pkt *htt_pkt;
414 	qdf_nbuf_t netbuf;
415 
416 	send_complete_part2 = htc_pkt->pPktContext;
417 
418 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
419 
420 	/* process (free or keep) the netbuf that held the message */
421 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
422 	/*
423 	 * adf sendcomplete is required for windows only
424 	 */
425 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
426 	if (send_complete_part2 != NULL) {
427 		send_complete_part2(
428 			htt_pkt->soc_ctxt, htc_pkt->Status, netbuf);
429 	}
430 	/* free the htt_htc_pkt / HTC_PACKET object */
431 	htt_htc_pkt_free(soc, htt_pkt);
432 }
433 
434 /*
435  * htt_h2t_ver_req_msg() - Send HTT version request message to target
436  * @htt_soc:	HTT SOC handle
437  *
438  * Return: 0 on success; error code on failure
439  */
440 static int htt_h2t_ver_req_msg(struct htt_soc *soc)
441 {
442 	struct dp_htt_htc_pkt *pkt;
443 	qdf_nbuf_t msg;
444 	uint32_t *msg_word;
445 
446 	msg = qdf_nbuf_alloc(
447 		soc->osdev,
448 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
449 		/* reserve room for the HTC header */
450 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
451 	if (!msg)
452 		return QDF_STATUS_E_NOMEM;
453 
454 	/*
455 	 * Set the length of the message.
456 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
457 	 * separately during the below call to qdf_nbuf_push_head.
458 	 * The contribution from the HTC header is added separately inside HTC.
459 	 */
460 	if (qdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES) == NULL) {
461 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
462 			"%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
463 			__func__);
464 		return QDF_STATUS_E_FAILURE;
465 	}
466 
467 	/* fill in the message contents */
468 	msg_word = (u_int32_t *) qdf_nbuf_data(msg);
469 
470 	/* rewind beyond alignment pad to get to the HTC header reserved area */
471 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
472 
473 	*msg_word = 0;
474 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
475 
476 	pkt = htt_htc_pkt_alloc(soc);
477 	if (!pkt) {
478 		qdf_nbuf_free(msg);
479 		return QDF_STATUS_E_FAILURE;
480 	}
481 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
482 
483 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
484 		dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg),
485 		qdf_nbuf_len(msg), soc->htc_endpoint,
486 		1); /* tag - not relevant here */
487 
488 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
489 	DP_HTT_SEND_HTC_PKT(soc, pkt);
490 	return 0;
491 }
492 
493 /*
494  * htt_srng_setup() - Send SRNG setup message to target
495  * @htt_soc:	HTT SOC handle
496  * @mac_id:	MAC Id
497  * @hal_srng:	Opaque HAL SRNG pointer
498  * @hal_ring_type:	SRNG ring type
499  *
500  * Return: 0 on success; error code on failure
501  */
502 int htt_srng_setup(void *htt_soc, int mac_id, void *hal_srng,
503 	int hal_ring_type)
504 {
505 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
506 	struct dp_htt_htc_pkt *pkt;
507 	qdf_nbuf_t htt_msg;
508 	uint32_t *msg_word;
509 	struct hal_srng_params srng_params;
510 	qdf_dma_addr_t hp_addr, tp_addr;
511 	uint32_t ring_entry_size =
512 		hal_srng_get_entrysize(soc->hal_soc, hal_ring_type);
513 	int htt_ring_type, htt_ring_id;
514 
515 	/* Sizes should be set in 4-byte words */
516 	ring_entry_size = ring_entry_size >> 2;
517 
518 	htt_msg = qdf_nbuf_alloc(soc->osdev,
519 		HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ),
520 		/* reserve room for the HTC header */
521 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
522 	if (!htt_msg)
523 		goto fail0;
524 
525 	hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params);
526 	hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_srng);
527 	tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_srng);
528 
529 	switch (hal_ring_type) {
530 	case RXDMA_BUF:
531 #ifdef QCA_HOST2FW_RXBUF_RING
532 		if (srng_params.ring_id ==
533 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0)) {
534 			htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
535 			htt_ring_type = HTT_SW_TO_SW_RING;
536 #ifdef IPA_OFFLOAD
537 		} else if (srng_params.ring_id ==
538 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2)) {
539 			htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING;
540 			htt_ring_type = HTT_SW_TO_SW_RING;
541 #endif
542 #else
543 		if (srng_params.ring_id ==
544 			(HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
545 			  (mac_id * HAL_MAX_RINGS_PER_LMAC))) {
546 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
547 			htt_ring_type = HTT_SW_TO_HW_RING;
548 #endif
549 		} else if (srng_params.ring_id ==
550 #ifdef IPA_OFFLOAD
551 			 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 +
552 #else
553 			 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF +
554 #endif
555 			  (mac_id * HAL_MAX_RINGS_PER_LMAC))) {
556 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
557 			htt_ring_type = HTT_SW_TO_HW_RING;
558 		} else {
559 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
560 				   "%s: Ring %d currently not supported",
561 				   __func__, srng_params.ring_id);
562 			goto fail1;
563 		}
564 
565 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
566 			  "%s: ring_type %d ring_id %d",
567 			  __func__, hal_ring_type, srng_params.ring_id);
568 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
569 			  "%s: hp_addr 0x%llx tp_addr 0x%llx",
570 			  __func__, (uint64_t)hp_addr, (uint64_t)tp_addr);
571 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
572 			  "%s: htt_ring_id %d", __func__, htt_ring_id);
573 		break;
574 	case RXDMA_MONITOR_BUF:
575 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
576 		htt_ring_type = HTT_SW_TO_HW_RING;
577 		break;
578 	case RXDMA_MONITOR_STATUS:
579 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
580 		htt_ring_type = HTT_SW_TO_HW_RING;
581 		break;
582 	case RXDMA_MONITOR_DST:
583 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
584 		htt_ring_type = HTT_HW_TO_SW_RING;
585 		break;
586 	case RXDMA_MONITOR_DESC:
587 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
588 		htt_ring_type = HTT_SW_TO_HW_RING;
589 		break;
590 	case RXDMA_DST:
591 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
592 		htt_ring_type = HTT_HW_TO_SW_RING;
593 		break;
594 
595 	default:
596 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
597 			"%s: Ring currently not supported", __func__);
598 			goto fail1;
599 	}
600 
601 	/*
602 	 * Set the length of the message.
603 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
604 	 * separately during the below call to qdf_nbuf_push_head.
605 	 * The contribution from the HTC header is added separately inside HTC.
606 	 */
607 	if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) {
608 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
609 			"%s: Failed to expand head for SRING_SETUP msg",
610 			__func__);
611 		return QDF_STATUS_E_FAILURE;
612 	}
613 
614 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
615 
616 	/* rewind beyond alignment pad to get to the HTC header reserved area */
617 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
618 
619 	/* word 0 */
620 	*msg_word = 0;
621 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP);
622 
623 	if ((htt_ring_type == HTT_SW_TO_HW_RING) ||
624 			(htt_ring_type == HTT_HW_TO_SW_RING))
625 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word,
626 			 DP_SW2HW_MACID(mac_id));
627 	else
628 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id);
629 
630 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
631 		  "%s: mac_id %d", __func__, mac_id);
632 	HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type);
633 	/* TODO: Discuss with FW on changing this to unique ID and using
634 	 * htt_ring_type to send the type of ring
635 	 */
636 	HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id);
637 
638 	/* word 1 */
639 	msg_word++;
640 	*msg_word = 0;
641 	HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word,
642 		srng_params.ring_base_paddr & 0xffffffff);
643 
644 	/* word 2 */
645 	msg_word++;
646 	*msg_word = 0;
647 	HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word,
648 		(uint64_t)srng_params.ring_base_paddr >> 32);
649 
650 	/* word 3 */
651 	msg_word++;
652 	*msg_word = 0;
653 	HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size);
654 	HTT_SRING_SETUP_RING_SIZE_SET(*msg_word,
655 		(ring_entry_size * srng_params.num_entries));
656 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
657 		  "%s: entry_size %d", __func__,
658 			 ring_entry_size);
659 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
660 		  "%s: num_entries %d", __func__,
661 			 srng_params.num_entries);
662 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
663 		  "%s: ring_size %d", __func__,
664 			 (ring_entry_size * srng_params.num_entries));
665 	if (htt_ring_type == HTT_SW_TO_HW_RING)
666 		HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET(
667 						*msg_word, 1);
668 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word,
669 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
670 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word,
671 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
672 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word,
673 		!!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP));
674 
675 	/* word 4 */
676 	msg_word++;
677 	*msg_word = 0;
678 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
679 		hp_addr & 0xffffffff);
680 
681 	/* word 5 */
682 	msg_word++;
683 	*msg_word = 0;
684 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
685 		(uint64_t)hp_addr >> 32);
686 
687 	/* word 6 */
688 	msg_word++;
689 	*msg_word = 0;
690 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
691 		tp_addr & 0xffffffff);
692 
693 	/* word 7 */
694 	msg_word++;
695 	*msg_word = 0;
696 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
697 		(uint64_t)tp_addr >> 32);
698 
699 	/* word 8 */
700 	msg_word++;
701 	*msg_word = 0;
702 	HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word,
703 		srng_params.msi_addr & 0xffffffff);
704 
705 	/* word 9 */
706 	msg_word++;
707 	*msg_word = 0;
708 	HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word,
709 		(uint64_t)(srng_params.msi_addr) >> 32);
710 
711 	/* word 10 */
712 	msg_word++;
713 	*msg_word = 0;
714 	HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word,
715 		srng_params.msi_data);
716 
717 	/* word 11 */
718 	msg_word++;
719 	*msg_word = 0;
720 	HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word,
721 		srng_params.intr_batch_cntr_thres_entries *
722 		ring_entry_size);
723 	HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word,
724 		srng_params.intr_timer_thres_us >> 3);
725 
726 	/* word 12 */
727 	msg_word++;
728 	*msg_word = 0;
729 	if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
730 		/* TODO: Setting low threshold to 1/8th of ring size - see
731 		 * if this needs to be configurable
732 		 */
733 		HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word,
734 			srng_params.low_threshold);
735 	}
736 	/* "response_required" field should be set if a HTT response message is
737 	 * required after setting up the ring.
738 	 */
739 	pkt = htt_htc_pkt_alloc(soc);
740 	if (!pkt)
741 		goto fail1;
742 
743 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
744 
745 	SET_HTC_PACKET_INFO_TX(
746 		&pkt->htc_pkt,
747 		dp_htt_h2t_send_complete_free_netbuf,
748 		qdf_nbuf_data(htt_msg),
749 		qdf_nbuf_len(htt_msg),
750 		soc->htc_endpoint,
751 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
752 
753 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
754 	DP_HTT_SEND_HTC_PKT(soc, pkt);
755 
756 	return QDF_STATUS_SUCCESS;
757 
758 fail1:
759 	qdf_nbuf_free(htt_msg);
760 fail0:
761 	return QDF_STATUS_E_FAILURE;
762 }
763 
764 /*
765  * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter
766  * config message to target
767  * @htt_soc:	HTT SOC handle
768  * @pdev_id:	PDEV Id
769  * @hal_srng:	Opaque HAL SRNG pointer
770  * @hal_ring_type:	SRNG ring type
771  * @ring_buf_size:	SRNG buffer size
772  * @htt_tlv_filter:	Rx SRNG TLV and filter setting
773  * Return: 0 on success; error code on failure
774  */
775 int htt_h2t_rx_ring_cfg(void *htt_soc, int pdev_id, void *hal_srng,
776 	int hal_ring_type, int ring_buf_size,
777 	struct htt_rx_ring_tlv_filter *htt_tlv_filter)
778 {
779 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
780 	struct dp_htt_htc_pkt *pkt;
781 	qdf_nbuf_t htt_msg;
782 	uint32_t *msg_word;
783 	struct hal_srng_params srng_params;
784 	uint32_t htt_ring_type, htt_ring_id;
785 	uint32_t tlv_filter;
786 
787 	htt_msg = qdf_nbuf_alloc(soc->osdev,
788 		HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ),
789 	/* reserve room for the HTC header */
790 	HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
791 	if (!htt_msg)
792 		goto fail0;
793 
794 	hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params);
795 
796 	switch (hal_ring_type) {
797 	case RXDMA_BUF:
798 #if QCA_HOST2FW_RXBUF_RING
799 		htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
800 		htt_ring_type = HTT_SW_TO_SW_RING;
801 #else
802 		htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
803 		htt_ring_type = HTT_SW_TO_HW_RING;
804 #endif
805 		break;
806 	case RXDMA_MONITOR_BUF:
807 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
808 		htt_ring_type = HTT_SW_TO_HW_RING;
809 		break;
810 	case RXDMA_MONITOR_STATUS:
811 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
812 		htt_ring_type = HTT_SW_TO_HW_RING;
813 		break;
814 	case RXDMA_MONITOR_DST:
815 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
816 		htt_ring_type = HTT_HW_TO_SW_RING;
817 		break;
818 	case RXDMA_MONITOR_DESC:
819 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
820 		htt_ring_type = HTT_SW_TO_HW_RING;
821 		break;
822 	case RXDMA_DST:
823 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
824 		htt_ring_type = HTT_HW_TO_SW_RING;
825 		break;
826 
827 	default:
828 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
829 			"%s: Ring currently not supported", __func__);
830 		goto fail1;
831 	}
832 
833 	/*
834 	 * Set the length of the message.
835 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
836 	 * separately during the below call to qdf_nbuf_push_head.
837 	 * The contribution from the HTC header is added separately inside HTC.
838 	 */
839 	if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) {
840 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
841 			"%s: Failed to expand head for RX Ring Cfg msg",
842 			__func__);
843 		goto fail1; /* failure */
844 	}
845 
846 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
847 
848 	/* rewind beyond alignment pad to get to the HTC header reserved area */
849 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
850 
851 	/* word 0 */
852 	*msg_word = 0;
853 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
854 
855 	/*
856 	 * pdev_id is indexed from 0 whereas mac_id is indexed from 1
857 	 * SW_TO_SW and SW_TO_HW rings are unaffected by this
858 	 */
859 	if (htt_ring_type == HTT_SW_TO_SW_RING ||
860 			htt_ring_type == HTT_SW_TO_HW_RING)
861 		HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word,
862 						DP_SW2HW_MACID(pdev_id));
863 
864 	/* TODO: Discuss with FW on changing this to unique ID and using
865 	 * htt_ring_type to send the type of ring
866 	 */
867 	HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id);
868 
869 	HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word,
870 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
871 
872 	HTT_RX_RING_SELECTION_CFG_PKT_TLV_SET(*msg_word,
873 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
874 
875 	/* word 1 */
876 	msg_word++;
877 	*msg_word = 0;
878 	HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word,
879 		ring_buf_size);
880 
881 	/* word 2 */
882 	msg_word++;
883 	*msg_word = 0;
884 
885 	if (htt_tlv_filter->enable_fp) {
886 		/* TYPE: MGMT */
887 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
888 			FP, MGMT, 0000,
889 			(htt_tlv_filter->fp_mgmt_filter &
890 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
891 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
892 			FP, MGMT, 0001,
893 			(htt_tlv_filter->fp_mgmt_filter &
894 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
895 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
896 			FP, MGMT, 0010,
897 			(htt_tlv_filter->fp_mgmt_filter &
898 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
899 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
900 			FP, MGMT, 0011,
901 			(htt_tlv_filter->fp_mgmt_filter &
902 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
903 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
904 			FP, MGMT, 0100,
905 			(htt_tlv_filter->fp_mgmt_filter &
906 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
907 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
908 			FP, MGMT, 0101,
909 			(htt_tlv_filter->fp_mgmt_filter &
910 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
911 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
912 			FP, MGMT, 0110,
913 			(htt_tlv_filter->fp_mgmt_filter &
914 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
915 		/* reserved */
916 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
917 			MGMT, 0111,
918 			(htt_tlv_filter->fp_mgmt_filter &
919 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
920 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
921 			FP, MGMT, 1000,
922 			(htt_tlv_filter->fp_mgmt_filter &
923 			FILTER_MGMT_BEACON) ? 1 : 0);
924 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
925 			FP, MGMT, 1001,
926 			(htt_tlv_filter->fp_mgmt_filter &
927 			FILTER_MGMT_ATIM) ? 1 : 0);
928 	}
929 
930 	if (htt_tlv_filter->enable_md) {
931 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
932 				MGMT, 0000, 1);
933 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
934 				MGMT, 0001, 1);
935 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
936 				MGMT, 0010, 1);
937 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
938 				MGMT, 0011, 1);
939 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
940 				MGMT, 0100, 1);
941 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
942 				MGMT, 0101, 1);
943 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
944 				MGMT, 0110, 1);
945 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
946 				MGMT, 0111, 1);
947 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
948 				MGMT, 1000, 1);
949 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
950 				MGMT, 1001, 1);
951 	}
952 
953 	if (htt_tlv_filter->enable_mo) {
954 		/* TYPE: MGMT */
955 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
956 			MO, MGMT, 0000,
957 			(htt_tlv_filter->mo_mgmt_filter &
958 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
959 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
960 			MO, MGMT, 0001,
961 			(htt_tlv_filter->mo_mgmt_filter &
962 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
963 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
964 			MO, MGMT, 0010,
965 			(htt_tlv_filter->mo_mgmt_filter &
966 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
967 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
968 			MO, MGMT, 0011,
969 			(htt_tlv_filter->mo_mgmt_filter &
970 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
971 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
972 			MO, MGMT, 0100,
973 			(htt_tlv_filter->mo_mgmt_filter &
974 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
975 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
976 			MO, MGMT, 0101,
977 			(htt_tlv_filter->mo_mgmt_filter &
978 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
979 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
980 			MO, MGMT, 0110,
981 			(htt_tlv_filter->mo_mgmt_filter &
982 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
983 		/* reserved */
984 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
985 			MGMT, 0111,
986 			(htt_tlv_filter->mo_mgmt_filter &
987 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
988 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
989 			MO, MGMT, 1000,
990 			(htt_tlv_filter->mo_mgmt_filter &
991 			FILTER_MGMT_BEACON) ? 1 : 0);
992 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
993 			MO, MGMT, 1001,
994 			(htt_tlv_filter->mo_mgmt_filter &
995 			FILTER_MGMT_ATIM) ? 1 : 0);
996 	}
997 
998 	/* word 3 */
999 	msg_word++;
1000 	*msg_word = 0;
1001 
1002 	if (htt_tlv_filter->enable_fp) {
1003 		/* TYPE: MGMT */
1004 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1005 			FP, MGMT, 1010,
1006 			(htt_tlv_filter->fp_mgmt_filter &
1007 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1008 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1009 			FP, MGMT, 1011,
1010 			(htt_tlv_filter->fp_mgmt_filter &
1011 			FILTER_MGMT_AUTH) ? 1 : 0);
1012 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1013 			FP, MGMT, 1100,
1014 			(htt_tlv_filter->fp_mgmt_filter &
1015 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1016 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1017 			FP, MGMT, 1101,
1018 			(htt_tlv_filter->fp_mgmt_filter &
1019 			FILTER_MGMT_ACTION) ? 1 : 0);
1020 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1021 			FP, MGMT, 1110,
1022 			(htt_tlv_filter->fp_mgmt_filter &
1023 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1024 		/* reserved*/
1025 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
1026 			MGMT, 1111,
1027 			(htt_tlv_filter->fp_mgmt_filter &
1028 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1029 	}
1030 
1031 	if (htt_tlv_filter->enable_md) {
1032 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD,
1033 				MGMT, 1010, 1);
1034 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD,
1035 				MGMT, 1011, 1);
1036 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD,
1037 				MGMT, 1100, 1);
1038 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD,
1039 				MGMT, 1101, 1);
1040 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD,
1041 				MGMT, 1110, 1);
1042 	}
1043 
1044 	if (htt_tlv_filter->enable_mo) {
1045 		/* TYPE: MGMT */
1046 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1047 			MO, MGMT, 1010,
1048 			(htt_tlv_filter->mo_mgmt_filter &
1049 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1050 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1051 			MO, MGMT, 1011,
1052 			(htt_tlv_filter->mo_mgmt_filter &
1053 			FILTER_MGMT_AUTH) ? 1 : 0);
1054 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1055 			MO, MGMT, 1100,
1056 			(htt_tlv_filter->mo_mgmt_filter &
1057 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1058 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1059 			MO, MGMT, 1101,
1060 			(htt_tlv_filter->mo_mgmt_filter &
1061 			FILTER_MGMT_ACTION) ? 1 : 0);
1062 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1063 			MO, MGMT, 1110,
1064 			(htt_tlv_filter->mo_mgmt_filter &
1065 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1066 		/* reserved*/
1067 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
1068 			MGMT, 1111,
1069 			(htt_tlv_filter->mo_mgmt_filter &
1070 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1071 	}
1072 
1073 	/* word 4 */
1074 	msg_word++;
1075 	*msg_word = 0;
1076 
1077 	if (htt_tlv_filter->enable_fp) {
1078 		/* TYPE: CTRL */
1079 		/* reserved */
1080 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1081 			CTRL, 0000,
1082 			(htt_tlv_filter->fp_ctrl_filter &
1083 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1084 		/* reserved */
1085 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1086 			CTRL, 0001,
1087 			(htt_tlv_filter->fp_ctrl_filter &
1088 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1089 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1090 			CTRL, 0010,
1091 			(htt_tlv_filter->fp_ctrl_filter &
1092 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1093 		/* reserved */
1094 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1095 			CTRL, 0011,
1096 			(htt_tlv_filter->fp_ctrl_filter &
1097 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1098 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1099 			CTRL, 0100,
1100 			(htt_tlv_filter->fp_ctrl_filter &
1101 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1102 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1103 			CTRL, 0101,
1104 			(htt_tlv_filter->fp_ctrl_filter &
1105 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1106 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1107 			CTRL, 0110,
1108 			(htt_tlv_filter->fp_ctrl_filter &
1109 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1110 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1111 			CTRL, 0111,
1112 			(htt_tlv_filter->fp_ctrl_filter &
1113 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1114 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1115 			CTRL, 1000,
1116 			(htt_tlv_filter->fp_ctrl_filter &
1117 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1118 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1119 			CTRL, 1001,
1120 			(htt_tlv_filter->fp_ctrl_filter &
1121 			FILTER_CTRL_BA) ? 1 : 0);
1122 	}
1123 
1124 	if (htt_tlv_filter->enable_md) {
1125 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1126 				CTRL, 0000, 1);
1127 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1128 				CTRL, 0001, 1);
1129 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1130 				CTRL, 0010, 1);
1131 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1132 				CTRL, 0011, 1);
1133 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1134 				CTRL, 0100, 1);
1135 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1136 				CTRL, 0101, 1);
1137 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1138 				CTRL, 0110, 1);
1139 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1140 				CTRL, 0111, 1);
1141 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1142 				CTRL, 1000, 1);
1143 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1144 				CTRL, 1001, 1);
1145 	}
1146 
1147 	if (htt_tlv_filter->enable_mo) {
1148 		/* TYPE: CTRL */
1149 		/* reserved */
1150 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1151 			CTRL, 0000,
1152 			(htt_tlv_filter->mo_ctrl_filter &
1153 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1154 		/* reserved */
1155 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1156 			CTRL, 0001,
1157 			(htt_tlv_filter->mo_ctrl_filter &
1158 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1159 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1160 			CTRL, 0010,
1161 			(htt_tlv_filter->mo_ctrl_filter &
1162 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1163 		/* reserved */
1164 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1165 			CTRL, 0011,
1166 			(htt_tlv_filter->mo_ctrl_filter &
1167 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1168 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1169 			CTRL, 0100,
1170 			(htt_tlv_filter->mo_ctrl_filter &
1171 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1172 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1173 			CTRL, 0101,
1174 			(htt_tlv_filter->mo_ctrl_filter &
1175 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1176 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1177 			CTRL, 0110,
1178 			(htt_tlv_filter->mo_ctrl_filter &
1179 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1180 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1181 			CTRL, 0111,
1182 			(htt_tlv_filter->mo_ctrl_filter &
1183 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1184 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1185 			CTRL, 1000,
1186 			(htt_tlv_filter->mo_ctrl_filter &
1187 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1188 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1189 			CTRL, 1001,
1190 			(htt_tlv_filter->mo_ctrl_filter &
1191 			FILTER_CTRL_BA) ? 1 : 0);
1192 	}
1193 
1194 	/* word 5 */
1195 	msg_word++;
1196 	*msg_word = 0;
1197 	if (htt_tlv_filter->enable_fp) {
1198 		/* TYPE: CTRL */
1199 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1200 			CTRL, 1010,
1201 			(htt_tlv_filter->fp_ctrl_filter &
1202 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1203 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1204 			CTRL, 1011,
1205 			(htt_tlv_filter->fp_ctrl_filter &
1206 			FILTER_CTRL_RTS) ? 1 : 0);
1207 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1208 			CTRL, 1100,
1209 			(htt_tlv_filter->fp_ctrl_filter &
1210 			FILTER_CTRL_CTS) ? 1 : 0);
1211 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1212 			CTRL, 1101,
1213 			(htt_tlv_filter->fp_ctrl_filter &
1214 			FILTER_CTRL_ACK) ? 1 : 0);
1215 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1216 			CTRL, 1110,
1217 			(htt_tlv_filter->fp_ctrl_filter &
1218 			FILTER_CTRL_CFEND) ? 1 : 0);
1219 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1220 			CTRL, 1111,
1221 			(htt_tlv_filter->fp_ctrl_filter &
1222 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1223 		/* TYPE: DATA */
1224 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1225 			DATA, MCAST,
1226 			(htt_tlv_filter->fp_data_filter &
1227 			FILTER_DATA_MCAST) ? 1 : 0);
1228 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1229 			DATA, UCAST,
1230 			(htt_tlv_filter->fp_data_filter &
1231 			FILTER_DATA_UCAST) ? 1 : 0);
1232 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1233 			DATA, NULL,
1234 			(htt_tlv_filter->fp_data_filter &
1235 			FILTER_DATA_NULL) ? 1 : 0);
1236 	}
1237 
1238 	if (htt_tlv_filter->enable_md) {
1239 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1240 				CTRL, 1010, 1);
1241 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1242 				CTRL, 1011, 1);
1243 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1244 				CTRL, 1100, 1);
1245 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1246 				CTRL, 1101, 1);
1247 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1248 				CTRL, 1110, 1);
1249 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1250 				CTRL, 1111, 1);
1251 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1252 				DATA, MCAST, 1);
1253 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1254 				DATA, UCAST, 1);
1255 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1256 				DATA, NULL, 1);
1257 	}
1258 
1259 	if (htt_tlv_filter->enable_mo) {
1260 		/* TYPE: CTRL */
1261 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1262 			CTRL, 1010,
1263 			(htt_tlv_filter->mo_ctrl_filter &
1264 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1265 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1266 			CTRL, 1011,
1267 			(htt_tlv_filter->mo_ctrl_filter &
1268 			FILTER_CTRL_RTS) ? 1 : 0);
1269 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1270 			CTRL, 1100,
1271 			(htt_tlv_filter->mo_ctrl_filter &
1272 			FILTER_CTRL_CTS) ? 1 : 0);
1273 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1274 			CTRL, 1101,
1275 			(htt_tlv_filter->mo_ctrl_filter &
1276 			FILTER_CTRL_ACK) ? 1 : 0);
1277 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1278 			CTRL, 1110,
1279 			(htt_tlv_filter->mo_ctrl_filter &
1280 			FILTER_CTRL_CFEND) ? 1 : 0);
1281 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1282 			CTRL, 1111,
1283 			(htt_tlv_filter->mo_ctrl_filter &
1284 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1285 		/* TYPE: DATA */
1286 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1287 			DATA, MCAST,
1288 			(htt_tlv_filter->mo_data_filter &
1289 			FILTER_DATA_MCAST) ? 1 : 0);
1290 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1291 			DATA, UCAST,
1292 			(htt_tlv_filter->mo_data_filter &
1293 			FILTER_DATA_UCAST) ? 1 : 0);
1294 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1295 			DATA, NULL,
1296 			(htt_tlv_filter->mo_data_filter &
1297 			FILTER_DATA_NULL) ? 1 : 0);
1298 	}
1299 
1300 	/* word 6 */
1301 	msg_word++;
1302 	*msg_word = 0;
1303 	tlv_filter = 0;
1304 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START,
1305 		htt_tlv_filter->mpdu_start);
1306 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START,
1307 		htt_tlv_filter->msdu_start);
1308 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET,
1309 		htt_tlv_filter->packet);
1310 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END,
1311 		htt_tlv_filter->msdu_end);
1312 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END,
1313 		htt_tlv_filter->mpdu_end);
1314 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER,
1315 		htt_tlv_filter->packet_header);
1316 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION,
1317 		htt_tlv_filter->attention);
1318 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START,
1319 		htt_tlv_filter->ppdu_start);
1320 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END,
1321 		htt_tlv_filter->ppdu_end);
1322 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS,
1323 		htt_tlv_filter->ppdu_end_user_stats);
1324 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter,
1325 		PPDU_END_USER_STATS_EXT,
1326 		htt_tlv_filter->ppdu_end_user_stats_ext);
1327 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE,
1328 		htt_tlv_filter->ppdu_end_status_done);
1329 	/* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/
1330 	 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED,
1331 		 htt_tlv_filter->header_per_msdu);
1332 
1333 	HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter);
1334 
1335 	/* "response_required" field should be set if a HTT response message is
1336 	 * required after setting up the ring.
1337 	 */
1338 	pkt = htt_htc_pkt_alloc(soc);
1339 	if (!pkt)
1340 		goto fail1;
1341 
1342 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1343 
1344 	SET_HTC_PACKET_INFO_TX(
1345 		&pkt->htc_pkt,
1346 		dp_htt_h2t_send_complete_free_netbuf,
1347 		qdf_nbuf_data(htt_msg),
1348 		qdf_nbuf_len(htt_msg),
1349 		soc->htc_endpoint,
1350 		1); /* tag - not relevant here */
1351 
1352 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1353 	DP_HTT_SEND_HTC_PKT(soc, pkt);
1354 	return QDF_STATUS_SUCCESS;
1355 
1356 fail1:
1357 	qdf_nbuf_free(htt_msg);
1358 fail0:
1359 	return QDF_STATUS_E_FAILURE;
1360 }
1361 
1362 #if defined(CONFIG_WIN) && WDI_EVENT_ENABLE
1363 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1364 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1365 
1366 {
1367 	uint32_t pdev_id;
1368 	uint32_t *msg_word = NULL;
1369 	uint32_t msg_remain_len = 0;
1370 
1371 	msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1372 
1373 	/*COOKIE MSB*/
1374 	pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1375 
1376 	/* stats message length + 16 size of HTT header*/
1377 	msg_remain_len = qdf_min(htt_stats->msg_len + 16,
1378 				(uint32_t)DP_EXT_MSG_LENGTH);
1379 
1380 	dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc,
1381 			msg_word,  msg_remain_len,
1382 			WDI_NO_VAL, pdev_id);
1383 
1384 	if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1385 		htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1386 	}
1387 	/* Need to be freed here as WDI handler will
1388 	 * make a copy of pkt to send data to application
1389 	 */
1390 	qdf_nbuf_free(htt_msg);
1391 	return QDF_STATUS_SUCCESS;
1392 }
1393 #else
1394 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1395 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1396 {
1397 	return QDF_STATUS_E_NOSUPPORT;
1398 }
1399 #endif
1400 
1401 /**
1402  * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats
1403  * @htt_stats: htt stats info
1404  *
1405  * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message
1406  * contains sub messages which are identified by a TLV header.
1407  * In this function we will process the stream of T2H messages and read all the
1408  * TLV contained in the message.
1409  *
1410  * THe following cases have been taken care of
1411  * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer
1412  *		In this case the buffer will contain multiple tlvs.
1413  * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer.
1414  *		Only one tlv will be contained in the HTT message and this tag
1415  *		will extend onto the next buffer.
1416  * Case 3: When the buffer is the continuation of the previous message
1417  * Case 4: tlv length is 0. which will indicate the end of message
1418  *
1419  * return: void
1420  */
1421 static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats,
1422 					struct dp_soc *soc)
1423 {
1424 	htt_tlv_tag_t tlv_type = 0xff;
1425 	qdf_nbuf_t htt_msg = NULL;
1426 	uint32_t *msg_word;
1427 	uint8_t *tlv_buf_head = NULL;
1428 	uint8_t *tlv_buf_tail = NULL;
1429 	uint32_t msg_remain_len = 0;
1430 	uint32_t tlv_remain_len = 0;
1431 	uint32_t *tlv_start;
1432 	int cookie_val;
1433 	int cookie_msb;
1434 	int pdev_id;
1435 	bool copy_stats = false;
1436 	struct dp_pdev *pdev;
1437 
1438 	/* Process node in the HTT message queue */
1439 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1440 		!= NULL) {
1441 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1442 		cookie_val = *(msg_word + 1);
1443 		htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET(
1444 					*(msg_word +
1445 					HTT_T2H_EXT_STATS_TLV_START_OFFSET));
1446 
1447 		if (cookie_val) {
1448 			if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg)
1449 					== QDF_STATUS_SUCCESS) {
1450 				continue;
1451 			}
1452 		}
1453 
1454 		cookie_msb = *(msg_word + 2);
1455 		pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1456 		pdev = soc->pdev_list[pdev_id];
1457 
1458 		if (cookie_msb >> 2) {
1459 			copy_stats = true;
1460 		}
1461 
1462 		/* read 5th word */
1463 		msg_word = msg_word + 4;
1464 		msg_remain_len = qdf_min(htt_stats->msg_len,
1465 				(uint32_t) DP_EXT_MSG_LENGTH);
1466 		/* Keep processing the node till node length is 0 */
1467 		while (msg_remain_len) {
1468 			/*
1469 			 * if message is not a continuation of previous message
1470 			 * read the tlv type and tlv length
1471 			 */
1472 			if (!tlv_buf_head) {
1473 				tlv_type = HTT_STATS_TLV_TAG_GET(
1474 						*msg_word);
1475 				tlv_remain_len = HTT_STATS_TLV_LENGTH_GET(
1476 						*msg_word);
1477 			}
1478 
1479 			if (tlv_remain_len == 0) {
1480 				msg_remain_len = 0;
1481 
1482 				if (tlv_buf_head) {
1483 					qdf_mem_free(tlv_buf_head);
1484 					tlv_buf_head = NULL;
1485 					tlv_buf_tail = NULL;
1486 				}
1487 
1488 				goto error;
1489 			}
1490 
1491 			if (!tlv_buf_head)
1492 				tlv_remain_len += HTT_TLV_HDR_LEN;
1493 
1494 			if ((tlv_remain_len <= msg_remain_len)) {
1495 				/* Case 3 */
1496 				if (tlv_buf_head) {
1497 					qdf_mem_copy(tlv_buf_tail,
1498 							(uint8_t *)msg_word,
1499 							tlv_remain_len);
1500 					tlv_start = (uint32_t *)tlv_buf_head;
1501 				} else {
1502 					/* Case 1 */
1503 					tlv_start = msg_word;
1504 				}
1505 
1506 				if (copy_stats)
1507 					dp_htt_stats_copy_tag(pdev, tlv_type, tlv_start);
1508 				else
1509 					dp_htt_stats_print_tag(tlv_type, tlv_start);
1510 
1511 				msg_remain_len -= tlv_remain_len;
1512 
1513 				msg_word = (uint32_t *)
1514 					(((uint8_t *)msg_word) +
1515 					tlv_remain_len);
1516 
1517 				tlv_remain_len = 0;
1518 
1519 				if (tlv_buf_head) {
1520 					qdf_mem_free(tlv_buf_head);
1521 					tlv_buf_head = NULL;
1522 					tlv_buf_tail = NULL;
1523 				}
1524 
1525 			} else { /* tlv_remain_len > msg_remain_len */
1526 				/* Case 2 & 3 */
1527 				if (!tlv_buf_head) {
1528 					tlv_buf_head = qdf_mem_malloc(
1529 							tlv_remain_len);
1530 
1531 					if (!tlv_buf_head) {
1532 						QDF_TRACE(QDF_MODULE_ID_TXRX,
1533 								QDF_TRACE_LEVEL_ERROR,
1534 								"Alloc failed");
1535 						goto error;
1536 					}
1537 
1538 					tlv_buf_tail = tlv_buf_head;
1539 				}
1540 
1541 				qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word,
1542 						msg_remain_len);
1543 				tlv_remain_len -= msg_remain_len;
1544 				tlv_buf_tail += msg_remain_len;
1545 			}
1546 		}
1547 
1548 		if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1549 			htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1550 		}
1551 
1552 		qdf_nbuf_free(htt_msg);
1553 	}
1554 	return;
1555 
1556 error:
1557 	qdf_nbuf_free(htt_msg);
1558 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1559 			!= NULL)
1560 		qdf_nbuf_free(htt_msg);
1561 }
1562 
1563 void htt_t2h_stats_handler(void *context)
1564 {
1565 	struct dp_soc *soc = (struct dp_soc *)context;
1566 	struct htt_stats_context htt_stats;
1567 	uint32_t *msg_word;
1568 	qdf_nbuf_t htt_msg = NULL;
1569 	uint8_t done;
1570 	uint8_t rem_stats;
1571 
1572 	if (!soc || !qdf_atomic_read(&soc->cmn_init_done)) {
1573 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1574 			"soc: 0x%pK, init_done: %d", soc,
1575 			qdf_atomic_read(&soc->cmn_init_done));
1576 		return;
1577 	}
1578 
1579 	qdf_mem_zero(&htt_stats, sizeof(htt_stats));
1580 	qdf_nbuf_queue_init(&htt_stats.msg);
1581 
1582 	/* pull one completed stats from soc->htt_stats_msg and process */
1583 	qdf_spin_lock_bh(&soc->htt_stats.lock);
1584 	if (!soc->htt_stats.num_stats) {
1585 		qdf_spin_unlock_bh(&soc->htt_stats.lock);
1586 		return;
1587 	}
1588 	while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) {
1589 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1590 		msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET;
1591 		done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
1592 		qdf_nbuf_queue_add(&htt_stats.msg, htt_msg);
1593 		/*
1594 		 * Done bit signifies that this is the last T2H buffer in the
1595 		 * stream of HTT EXT STATS message
1596 		 */
1597 		if (done)
1598 			break;
1599 	}
1600 	rem_stats = --soc->htt_stats.num_stats;
1601 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
1602 
1603 	dp_process_htt_stat_msg(&htt_stats, soc);
1604 	/* If there are more stats to process, schedule stats work again */
1605 	if (rem_stats)
1606 		qdf_sched_work(0, &soc->htt_stats.work);
1607 }
1608 
1609 /*
1610  * dp_get_ppdu_info_user_index: Find and allocate a per-user descriptor for a PPDU,
1611  * if a new peer id arrives in a PPDU
1612  * pdev: DP pdev handle
1613  * @peer_id : peer unique identifier
1614  * @ppdu_info: per ppdu tlv structure
1615  *
1616  * return:user index to be populated
1617  */
1618 #ifdef FEATURE_PERPKT_INFO
1619 static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev,
1620 						uint16_t peer_id,
1621 						struct ppdu_info *ppdu_info)
1622 {
1623 	uint8_t user_index = 0;
1624 	struct cdp_tx_completion_ppdu *ppdu_desc;
1625 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1626 
1627 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1628 
1629 	while ((user_index + 1) <= ppdu_info->last_user) {
1630 		ppdu_user_desc = &ppdu_desc->user[user_index];
1631 		if (ppdu_user_desc->peer_id != peer_id) {
1632 			user_index++;
1633 			continue;
1634 		} else {
1635 			/* Max users possible is 8 so user array index should
1636 			 * not exceed 7
1637 			 */
1638 			qdf_assert_always(user_index <= CDP_MU_MAX_USER_INDEX);
1639 			return user_index;
1640 		}
1641 	}
1642 
1643 	ppdu_info->last_user++;
1644 	/* Max users possible is 8 so last user should not exceed 8 */
1645 	qdf_assert_always(ppdu_info->last_user <= CDP_MU_MAX_USERS);
1646 	return ppdu_info->last_user - 1;
1647 }
1648 
1649 /*
1650  * dp_process_ppdu_stats_common_tlv: Process htt_ppdu_stats_common_tlv
1651  * pdev: DP pdev handle
1652  * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv
1653  * @ppdu_info: per ppdu tlv structure
1654  *
1655  * return:void
1656  */
1657 static void dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev,
1658 		uint32_t *tag_buf, struct ppdu_info *ppdu_info)
1659 {
1660 	uint16_t frame_type;
1661 	uint16_t freq;
1662 	struct dp_soc *soc = NULL;
1663 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
1664 
1665 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1666 
1667 	tag_buf += 2;
1668 	ppdu_desc->num_users =
1669 		HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf);
1670 	tag_buf++;
1671 	frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf);
1672 
1673 	if ((frame_type == HTT_STATS_FTYPE_TIDQ_DATA_SU) ||
1674 			(frame_type == HTT_STATS_FTYPE_TIDQ_DATA_MU))
1675 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA;
1676 	else if ((frame_type == HTT_STATS_FTYPE_SGEN_MU_BAR) ||
1677 		 (frame_type == HTT_STATS_FTYPE_SGEN_BAR))
1678 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_BAR;
1679 	else
1680 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
1681 
1682 	tag_buf += 2;
1683 	ppdu_desc->tx_duration = *tag_buf;
1684 	tag_buf += 3;
1685 	ppdu_desc->ppdu_start_timestamp = *tag_buf;
1686 
1687 	ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
1688 					ppdu_desc->tx_duration;
1689 	/* Ack time stamp is same as end time stamp*/
1690 	ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
1691 
1692 	tag_buf++;
1693 
1694 	freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf);
1695 	if (freq != ppdu_desc->channel) {
1696 		soc = pdev->soc;
1697 		ppdu_desc->channel = freq;
1698 		if (soc && soc->cdp_soc.ol_ops->freq_to_channel)
1699 			pdev->operating_channel =
1700 		soc->cdp_soc.ol_ops->freq_to_channel(pdev->ctrl_pdev, freq);
1701 	}
1702 
1703 	ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf);
1704 }
1705 
1706 /*
1707  * dp_process_ppdu_stats_user_common_tlv: Process ppdu_stats_user_common
1708  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv
1709  * @ppdu_info: per ppdu tlv structure
1710  *
1711  * return:void
1712  */
1713 static void dp_process_ppdu_stats_user_common_tlv(
1714 		struct dp_pdev *pdev, uint32_t *tag_buf,
1715 		struct ppdu_info *ppdu_info)
1716 {
1717 	uint16_t peer_id;
1718 	struct dp_peer *peer;
1719 	struct cdp_tx_completion_ppdu *ppdu_desc;
1720 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1721 	uint8_t curr_user_index = 0;
1722 
1723 	ppdu_desc =
1724 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1725 
1726 	tag_buf++;
1727 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
1728 
1729 	curr_user_index =
1730 		dp_get_ppdu_info_user_index(pdev,
1731 					    peer_id, ppdu_info);
1732 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1733 
1734 	if (peer_id == DP_SCAN_PEER_ID) {
1735 		ppdu_desc->vdev_id =
1736 			HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(*tag_buf);
1737 	} else {
1738 		peer = dp_peer_find_by_id(pdev->soc, peer_id);
1739 		if (!peer)
1740 			return;
1741 	}
1742 
1743 	ppdu_user_desc->peer_id = peer_id;
1744 
1745 	tag_buf++;
1746 
1747 	if (HTT_PPDU_STATS_USER_COMMON_TLV_DELAYED_BA_GET(*tag_buf)) {
1748 		ppdu_user_desc->delayed_ba = 1;
1749 	}
1750 
1751 	if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) {
1752 		ppdu_user_desc->is_mcast = true;
1753 		ppdu_user_desc->mpdu_tried_mcast =
1754 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
1755 		ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast;
1756 	} else {
1757 		ppdu_user_desc->mpdu_tried_ucast =
1758 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
1759 	}
1760 
1761 	tag_buf++;
1762 
1763 	ppdu_user_desc->qos_ctrl =
1764 		HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf);
1765 	ppdu_user_desc->frame_ctrl =
1766 		HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf);
1767 	ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl;
1768 
1769 	if (ppdu_user_desc->delayed_ba) {
1770 		ppdu_user_desc->mpdu_success = 0;
1771 		ppdu_user_desc->mpdu_tried_mcast = 0;
1772 		ppdu_user_desc->mpdu_tried_ucast = 0;
1773 	}
1774 }
1775 
1776 
1777 /**
1778  * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv
1779  * @pdev: DP pdev handle
1780  * @tag_buf: T2H message buffer carrying the user rate TLV
1781  * @ppdu_info: per ppdu tlv structure
1782  *
1783  * return:void
1784  */
1785 static void dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev,
1786 		uint32_t *tag_buf,
1787 		struct ppdu_info *ppdu_info)
1788 {
1789 	uint16_t peer_id;
1790 	struct dp_peer *peer;
1791 	struct cdp_tx_completion_ppdu *ppdu_desc;
1792 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1793 	uint8_t curr_user_index = 0;
1794 	struct dp_vdev *vdev;
1795 
1796 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1797 
1798 	tag_buf++;
1799 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
1800 
1801 	curr_user_index =
1802 		dp_get_ppdu_info_user_index(pdev,
1803 					    peer_id, ppdu_info);
1804 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1805 
1806 	if (peer_id == DP_SCAN_PEER_ID) {
1807 		vdev =
1808 		       dp_get_vdev_from_soc_vdev_id_wifi3(pdev->soc,
1809 							  ppdu_desc->vdev_id);
1810 		qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw,
1811 			     DP_MAC_ADDR_LEN);
1812 	} else {
1813 		peer = dp_peer_find_by_id(pdev->soc, peer_id);
1814 		if (!peer)
1815 			return;
1816 		qdf_mem_copy(ppdu_user_desc->mac_addr,
1817 			     peer->mac_addr.raw, DP_MAC_ADDR_LEN);
1818 	}
1819 
1820 	ppdu_user_desc->peer_id = peer_id;
1821 
1822 	ppdu_user_desc->tid =
1823 		HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf);
1824 
1825 	tag_buf += 2;
1826 
1827 	ppdu_user_desc->ru_tones =
1828 		(HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) -
1829 		HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1;
1830 
1831 	tag_buf += 2;
1832 
1833 	ppdu_user_desc->ppdu_type =
1834 		HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf);
1835 
1836 	tag_buf++;
1837 	ppdu_user_desc->tx_rate = *tag_buf;
1838 
1839 	ppdu_user_desc->ltf_size =
1840 		HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf);
1841 	ppdu_user_desc->stbc =
1842 		HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf);
1843 	ppdu_user_desc->he_re =
1844 		HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf);
1845 	ppdu_user_desc->txbf =
1846 		HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf);
1847 	ppdu_user_desc->bw =
1848 		HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf) - 2;
1849 	ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf);
1850 	ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf);
1851 	ppdu_user_desc->preamble =
1852 		HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf);
1853 	ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf);
1854 	ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf);
1855 	ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf);
1856 }
1857 
1858 /*
1859  * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv: Process
1860  * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
1861  * pdev: DP PDEV handle
1862  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
1863  * @ppdu_info: per ppdu tlv structure
1864  *
1865  * return:void
1866  */
1867 static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
1868 		struct dp_pdev *pdev, uint32_t *tag_buf,
1869 		struct ppdu_info *ppdu_info)
1870 {
1871 	htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf =
1872 		(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf;
1873 
1874 	struct cdp_tx_completion_ppdu *ppdu_desc;
1875 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1876 	uint8_t curr_user_index = 0;
1877 	uint16_t peer_id;
1878 	struct dp_peer *peer;
1879 
1880 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1881 
1882 	tag_buf++;
1883 
1884 	peer_id =
1885 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
1886 
1887 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
1888 
1889 	if (!peer)
1890 		return;
1891 
1892 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
1893 
1894 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1895 	ppdu_user_desc->peer_id = peer_id;
1896 
1897 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
1898 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
1899 					CDP_BA_64_BIT_MAP_SIZE_DWORDS);
1900 }
1901 
1902 /*
1903  * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv: Process
1904  * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
1905  * soc: DP SOC handle
1906  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
1907  * @ppdu_info: per ppdu tlv structure
1908  *
1909  * return:void
1910  */
1911 static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
1912 		struct dp_pdev *pdev, uint32_t *tag_buf,
1913 		struct ppdu_info *ppdu_info)
1914 {
1915 	htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf =
1916 		(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf;
1917 
1918 	struct cdp_tx_completion_ppdu *ppdu_desc;
1919 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1920 	uint8_t curr_user_index = 0;
1921 	uint16_t peer_id;
1922 	struct dp_peer *peer;
1923 
1924 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1925 
1926 	tag_buf++;
1927 
1928 	peer_id =
1929 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
1930 
1931 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
1932 
1933 	if (!peer)
1934 		return;
1935 
1936 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
1937 
1938 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1939 	ppdu_user_desc->peer_id = peer_id;
1940 
1941 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
1942 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
1943 					CDP_BA_256_BIT_MAP_SIZE_DWORDS);
1944 }
1945 
1946 /*
1947  * dp_process_ppdu_stats_user_cmpltn_common_tlv: Process
1948  * htt_ppdu_stats_user_cmpltn_common_tlv
1949  * soc: DP SOC handle
1950  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv
1951  * @ppdu_info: per ppdu tlv structure
1952  *
1953  * return:void
1954  */
1955 static void dp_process_ppdu_stats_user_cmpltn_common_tlv(
1956 		struct dp_pdev *pdev, uint32_t *tag_buf,
1957 		struct ppdu_info *ppdu_info)
1958 {
1959 	uint16_t peer_id;
1960 	struct dp_peer *peer;
1961 	struct cdp_tx_completion_ppdu *ppdu_desc;
1962 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1963 	uint8_t curr_user_index = 0;
1964 	htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf =
1965 		(htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf;
1966 
1967 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1968 
1969 	tag_buf++;
1970 	peer_id =
1971 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf);
1972 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
1973 
1974 	if (!peer)
1975 		return;
1976 
1977 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
1978 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1979 	ppdu_user_desc->peer_id = peer_id;
1980 
1981 	ppdu_user_desc->completion_status =
1982 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET(
1983 				*tag_buf);
1984 
1985 	ppdu_user_desc->tid =
1986 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf);
1987 
1988 
1989 	tag_buf++;
1990 	if (qdf_likely(ppdu_user_desc->completion_status)) {
1991 		ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi;
1992 		ppdu_user_desc->ack_rssi_valid = 1;
1993 	} else {
1994 		ppdu_user_desc->ack_rssi_valid = 0;
1995 	}
1996 
1997 	tag_buf++;
1998 
1999 	ppdu_user_desc->mpdu_success =
2000 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf);
2001 
2002 	tag_buf++;
2003 
2004 	ppdu_user_desc->long_retries =
2005 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf);
2006 
2007 	ppdu_user_desc->short_retries =
2008 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf);
2009 	ppdu_user_desc->retry_msdus =
2010 		ppdu_user_desc->long_retries + ppdu_user_desc->short_retries;
2011 
2012 	ppdu_user_desc->is_ampdu =
2013 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf);
2014 	ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu;
2015 
2016 }
2017 
2018 /*
2019  * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv: Process
2020  * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2021  * pdev: DP PDEV handle
2022  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2023  * @ppdu_info: per ppdu tlv structure
2024  *
2025  * return:void
2026  */
2027 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
2028 		struct dp_pdev *pdev, uint32_t *tag_buf,
2029 		struct ppdu_info *ppdu_info)
2030 {
2031 	htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf =
2032 		(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf;
2033 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2034 	struct cdp_tx_completion_ppdu *ppdu_desc;
2035 	uint8_t curr_user_index = 0;
2036 	uint16_t peer_id;
2037 	struct dp_peer *peer;
2038 
2039 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2040 
2041 	tag_buf++;
2042 
2043 	peer_id =
2044 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2045 
2046 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2047 
2048 	if (!peer)
2049 		return;
2050 
2051 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2052 
2053 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2054 	ppdu_user_desc->peer_id = peer_id;
2055 
2056 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2057 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2058 			CDP_BA_64_BIT_MAP_SIZE_DWORDS);
2059 }
2060 
2061 /*
2062  * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv: Process
2063  * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2064  * pdev: DP PDEV handle
2065  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2066  * @ppdu_info: per ppdu tlv structure
2067  *
2068  * return:void
2069  */
2070 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
2071 		struct dp_pdev *pdev, uint32_t *tag_buf,
2072 		struct ppdu_info *ppdu_info)
2073 {
2074 	htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf =
2075 		(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf;
2076 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2077 	struct cdp_tx_completion_ppdu *ppdu_desc;
2078 	uint8_t curr_user_index = 0;
2079 	uint16_t peer_id;
2080 	struct dp_peer *peer;
2081 
2082 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2083 
2084 	tag_buf++;
2085 
2086 	peer_id =
2087 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2088 
2089 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2090 
2091 	if (!peer)
2092 		return;
2093 
2094 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2095 
2096 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2097 	ppdu_user_desc->peer_id = peer_id;
2098 
2099 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2100 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2101 			CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2102 }
2103 
2104 /*
2105  * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv: Process
2106  * htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2107  * pdev: DP PDE handle
2108  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2109  * @ppdu_info: per ppdu tlv structure
2110  *
2111  * return:void
2112  */
2113 static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
2114 		struct dp_pdev *pdev, uint32_t *tag_buf,
2115 		struct ppdu_info *ppdu_info)
2116 {
2117 	uint16_t peer_id;
2118 	struct dp_peer *peer;
2119 	struct cdp_tx_completion_ppdu *ppdu_desc;
2120 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2121 	uint8_t curr_user_index = 0;
2122 
2123 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2124 
2125 	tag_buf += 2;
2126 	peer_id =
2127 	HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf);
2128 
2129 
2130 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2131 
2132 	if (!peer)
2133 		return;
2134 
2135 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2136 
2137 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2138 	ppdu_user_desc->peer_id = peer_id;
2139 
2140 	tag_buf++;
2141 	ppdu_user_desc->tid =
2142 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_TID_NUM_GET(*tag_buf);
2143 	ppdu_user_desc->num_mpdu =
2144 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf);
2145 
2146 	ppdu_user_desc->num_msdu =
2147 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf);
2148 
2149 	ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu;
2150 
2151 	tag_buf += 2;
2152 	ppdu_user_desc->success_bytes = *tag_buf;
2153 
2154 }
2155 
2156 /*
2157  * dp_process_ppdu_stats_user_common_array_tlv: Process
2158  * htt_ppdu_stats_user_common_array_tlv
2159  * pdev: DP PDEV handle
2160  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2161  * @ppdu_info: per ppdu tlv structure
2162  *
2163  * return:void
2164  */
2165 static void dp_process_ppdu_stats_user_common_array_tlv(
2166 		struct dp_pdev *pdev, uint32_t *tag_buf,
2167 		struct ppdu_info *ppdu_info)
2168 {
2169 	uint32_t peer_id;
2170 	struct dp_peer *peer;
2171 	struct cdp_tx_completion_ppdu *ppdu_desc;
2172 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2173 	uint8_t curr_user_index = 0;
2174 	struct htt_tx_ppdu_stats_info *dp_stats_buf;
2175 
2176 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2177 
2178 	tag_buf++;
2179 	dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf;
2180 	tag_buf += 3;
2181 	peer_id =
2182 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf);
2183 
2184 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2185 
2186 	if (!peer) {
2187 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2188 			"Invalid peer");
2189 		return;
2190 	}
2191 
2192 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2193 
2194 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2195 
2196 	ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes;
2197 	ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes;
2198 
2199 	tag_buf++;
2200 
2201 	ppdu_user_desc->success_msdus =
2202 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf);
2203 	ppdu_user_desc->retry_bytes =
2204 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf);
2205 	tag_buf++;
2206 	ppdu_user_desc->failed_msdus =
2207 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf);
2208 }
2209 
2210 /*
2211  * dp_process_ppdu_stats_flush_tlv: Process
2212  * htt_ppdu_stats_flush_tlv
2213  * @pdev: DP PDEV handle
2214  * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv
2215  *
2216  * return:void
2217  */
2218 static void dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev,
2219 						uint32_t *tag_buf)
2220 {
2221 	uint32_t peer_id;
2222 	uint32_t drop_reason;
2223 	uint8_t tid;
2224 	uint32_t num_msdu;
2225 	struct dp_peer *peer;
2226 
2227 	tag_buf++;
2228 	drop_reason = *tag_buf;
2229 
2230 	tag_buf++;
2231 	num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf);
2232 
2233 	tag_buf++;
2234 	peer_id =
2235 		HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf);
2236 
2237 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2238 	if (!peer)
2239 		return;
2240 
2241 	tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf);
2242 
2243 	if (drop_reason == HTT_FLUSH_EXCESS_RETRIES) {
2244 		DP_STATS_INC(peer, tx.excess_retries_per_ac[TID_TO_WME_AC(tid)],
2245 					num_msdu);
2246 	}
2247 }
2248 
2249 /*
2250  * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv: Process
2251  * htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2252  * @pdev: DP PDEV handle
2253  * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2254  * @length: tlv_length
2255  *
2256  * return:QDF_STATUS_SUCCESS if nbuf as to be freed in caller
2257  */
2258 static QDF_STATUS
2259 dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev,
2260 					      qdf_nbuf_t tag_buf,
2261 					      uint32_t ppdu_id)
2262 {
2263 	uint32_t *nbuf_ptr;
2264 	uint8_t trim_size;
2265 
2266 	if ((!pdev->tx_sniffer_enable) && (!pdev->mcopy_mode) &&
2267 	    (!pdev->bpr_enable))
2268 		return QDF_STATUS_SUCCESS;
2269 
2270 	trim_size = ((pdev->mgmtctrl_frm_info.mgmt_buf +
2271 		      HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) -
2272 		      qdf_nbuf_data(tag_buf));
2273 
2274 	if (!qdf_nbuf_pull_head(tag_buf, trim_size))
2275 		return QDF_STATUS_SUCCESS;
2276 
2277 	qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) -
2278 			    pdev->mgmtctrl_frm_info.mgmt_buf_len);
2279 
2280 	nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(
2281 				tag_buf, sizeof(ppdu_id));
2282 	*nbuf_ptr = ppdu_id;
2283 
2284 	if (pdev->bpr_enable) {
2285 		dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc,
2286 				     tag_buf, HTT_INVALID_PEER,
2287 				     WDI_NO_VAL, pdev->pdev_id);
2288 	}
2289 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2290 		dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc,
2291 				     tag_buf, HTT_INVALID_PEER,
2292 				     WDI_NO_VAL, pdev->pdev_id);
2293 	}
2294 
2295 	return QDF_STATUS_E_ALREADY;
2296 }
2297 
2298 /**
2299  * dp_process_ppdu_tag(): Function to process the PPDU TLVs
2300  * @pdev: DP pdev handle
2301  * @tag_buf: TLV buffer
2302  * @tlv_len: length of tlv
2303  * @ppdu_info: per ppdu tlv structure
2304  *
2305  * return: void
2306  */
2307 static void dp_process_ppdu_tag(struct dp_pdev *pdev, uint32_t *tag_buf,
2308 		uint32_t tlv_len, struct ppdu_info *ppdu_info)
2309 {
2310 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2311 
2312 	switch (tlv_type) {
2313 	case HTT_PPDU_STATS_COMMON_TLV:
2314 		qdf_assert_always(tlv_len ==
2315 				sizeof(htt_ppdu_stats_common_tlv));
2316 		dp_process_ppdu_stats_common_tlv(pdev, tag_buf, ppdu_info);
2317 		break;
2318 	case HTT_PPDU_STATS_USR_COMMON_TLV:
2319 		qdf_assert_always(tlv_len ==
2320 				sizeof(htt_ppdu_stats_user_common_tlv));
2321 		dp_process_ppdu_stats_user_common_tlv(
2322 				pdev, tag_buf, ppdu_info);
2323 		break;
2324 	case HTT_PPDU_STATS_USR_RATE_TLV:
2325 		qdf_assert_always(tlv_len ==
2326 				sizeof(htt_ppdu_stats_user_rate_tlv));
2327 		dp_process_ppdu_stats_user_rate_tlv(pdev, tag_buf, ppdu_info);
2328 		break;
2329 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV:
2330 		qdf_assert_always(tlv_len ==
2331 				sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv));
2332 		dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
2333 				pdev, tag_buf, ppdu_info);
2334 		break;
2335 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV:
2336 		qdf_assert_always(tlv_len ==
2337 				sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv));
2338 		dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
2339 				pdev, tag_buf, ppdu_info);
2340 		break;
2341 	case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV:
2342 		qdf_assert_always(tlv_len ==
2343 				sizeof(htt_ppdu_stats_user_cmpltn_common_tlv));
2344 		dp_process_ppdu_stats_user_cmpltn_common_tlv(
2345 				pdev, tag_buf, ppdu_info);
2346 		break;
2347 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV:
2348 		qdf_assert_always(tlv_len ==
2349 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv));
2350 		dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
2351 				pdev, tag_buf, ppdu_info);
2352 		break;
2353 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV:
2354 		qdf_assert_always(tlv_len ==
2355 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv));
2356 		dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
2357 				pdev, tag_buf, ppdu_info);
2358 		break;
2359 	case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV:
2360 		qdf_assert_always(tlv_len ==
2361 			sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv));
2362 		dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
2363 				pdev, tag_buf, ppdu_info);
2364 		break;
2365 	case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV:
2366 		qdf_assert_always(tlv_len ==
2367 			sizeof(htt_ppdu_stats_usr_common_array_tlv_v));
2368 		dp_process_ppdu_stats_user_common_array_tlv(
2369 				pdev, tag_buf, ppdu_info);
2370 		break;
2371 	case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV:
2372 		qdf_assert_always(tlv_len ==
2373 			sizeof(htt_ppdu_stats_flush_tlv));
2374 		dp_process_ppdu_stats_user_compltn_flush_tlv(
2375 				pdev, tag_buf);
2376 		break;
2377 	default:
2378 		break;
2379 	}
2380 }
2381 
2382 /**
2383  * dp_ppdu_desc_deliver(): Function to deliver Tx PPDU status descriptor
2384  * to upper layer
2385  * @pdev: DP pdev handle
2386  * @ppdu_info: per PPDU TLV descriptor
2387  *
2388  * return: void
2389  */
2390 static
2391 void dp_ppdu_desc_deliver(struct dp_pdev *pdev,
2392 			      struct ppdu_info *ppdu_info)
2393 {
2394 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
2395 	struct dp_peer *peer = NULL;
2396 	qdf_nbuf_t nbuf;
2397 	uint16_t i;
2398 
2399 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
2400 		qdf_nbuf_data(ppdu_info->nbuf);
2401 
2402 	ppdu_desc->num_users = ppdu_info->last_user;
2403 	ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
2404 
2405 	for (i = 0; i < ppdu_desc->num_users; i++) {
2406 
2407 
2408 		ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu;
2409 		ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu;
2410 
2411 		if (ppdu_desc->user[i].tid < CDP_DATA_TID_MAX) {
2412 			peer = dp_peer_find_by_id(pdev->soc,
2413 					ppdu_desc->user[i].peer_id);
2414 			/**
2415 			 * This check is to make sure peer is not deleted
2416 			 * after processing the TLVs.
2417 			 */
2418 			if (!peer)
2419 				continue;
2420 
2421 			dp_tx_stats_update(pdev->soc, peer,
2422 					&ppdu_desc->user[i],
2423 					ppdu_desc->ack_rssi);
2424 		}
2425 	}
2426 
2427 	/*
2428 	 * Remove from the list
2429 	 */
2430 	TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
2431 	nbuf = ppdu_info->nbuf;
2432 	pdev->list_depth--;
2433 	qdf_mem_free(ppdu_info);
2434 
2435 	qdf_assert_always(nbuf);
2436 
2437 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
2438 		qdf_nbuf_data(nbuf);
2439 
2440 	/**
2441 	 * Deliver PPDU stats only for valid (acked) data frames if
2442 	 * sniffer mode is not enabled.
2443 	 * If sniffer mode is enabled, PPDU stats for all frames
2444 	 * including mgmt/control frames should be delivered to upper layer
2445 	 */
2446 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2447 		dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC, pdev->soc,
2448 				nbuf, HTT_INVALID_PEER,
2449 				WDI_NO_VAL, pdev->pdev_id);
2450 	} else {
2451 		if (ppdu_desc->num_mpdu != 0 && ppdu_desc->num_users != 0 &&
2452 				ppdu_desc->frame_ctrl & HTT_FRAMECTRL_DATATYPE) {
2453 
2454 			dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
2455 					pdev->soc, nbuf, HTT_INVALID_PEER,
2456 					WDI_NO_VAL, pdev->pdev_id);
2457 		} else
2458 			qdf_nbuf_free(nbuf);
2459 	}
2460 	return;
2461 }
2462 
2463 /**
2464  * dp_get_ppdu_desc(): Function to allocate new PPDU status
2465  * desc for new ppdu id
2466  * @pdev: DP pdev handle
2467  * @ppdu_id: PPDU unique identifier
2468  * @tlv_type: TLV type received
2469  *
2470  * return: ppdu_info per ppdu tlv structure
2471  */
2472 static
2473 struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id,
2474 			uint8_t tlv_type)
2475 {
2476 	struct ppdu_info *ppdu_info = NULL;
2477 
2478 	/*
2479 	 * Find ppdu_id node exists or not
2480 	 */
2481 	TAILQ_FOREACH(ppdu_info, &pdev->ppdu_info_list, ppdu_info_list_elem) {
2482 
2483 		if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) {
2484 			break;
2485 		}
2486 	}
2487 
2488 	if (ppdu_info) {
2489 		/**
2490 		 * if we get tlv_type that is already been processed for ppdu,
2491 		 * that means we got a new ppdu with same ppdu id.
2492 		 * Hence Flush the older ppdu
2493 		 */
2494 		if (ppdu_info->tlv_bitmap & (1 << tlv_type))
2495 			dp_ppdu_desc_deliver(pdev, ppdu_info);
2496 		else
2497 			return ppdu_info;
2498 	}
2499 
2500 	/**
2501 	 * Flush the head ppdu descriptor if ppdu desc list reaches max
2502 	 * threshold
2503 	 */
2504 	if (pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
2505 		ppdu_info = TAILQ_FIRST(&pdev->ppdu_info_list);
2506 		dp_ppdu_desc_deliver(pdev, ppdu_info);
2507 	}
2508 
2509 	/*
2510 	 * Allocate new ppdu_info node
2511 	 */
2512 	ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info));
2513 	if (!ppdu_info)
2514 		return NULL;
2515 
2516 	ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
2517 			sizeof(struct cdp_tx_completion_ppdu), 0, 4,
2518 			TRUE);
2519 	if (!ppdu_info->nbuf) {
2520 		qdf_mem_free(ppdu_info);
2521 		return NULL;
2522 	}
2523 
2524 	qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf),
2525 			sizeof(struct cdp_tx_completion_ppdu));
2526 
2527 	if (qdf_nbuf_put_tail(ppdu_info->nbuf,
2528 			sizeof(struct cdp_tx_completion_ppdu)) == NULL) {
2529 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2530 				"No tailroom for HTT PPDU");
2531 		qdf_nbuf_free(ppdu_info->nbuf);
2532 		ppdu_info->nbuf = NULL;
2533 		ppdu_info->last_user = 0;
2534 		qdf_mem_free(ppdu_info);
2535 		return NULL;
2536 	}
2537 
2538 	/**
2539 	 * No lock is needed because all PPDU TLVs are processed in
2540 	 * same context and this list is updated in same context
2541 	 */
2542 	TAILQ_INSERT_TAIL(&pdev->ppdu_info_list, ppdu_info,
2543 			ppdu_info_list_elem);
2544 	pdev->list_depth++;
2545 	return ppdu_info;
2546 }
2547 
2548 /**
2549  * dp_htt_process_tlv(): Function to process each PPDU TLVs
2550  * @pdev: DP pdev handle
2551  * @htt_t2h_msg: HTT target to host message
2552  *
2553  * return: ppdu_info per ppdu tlv structure
2554  */
2555 
2556 static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev,
2557 		qdf_nbuf_t htt_t2h_msg)
2558 {
2559 	uint32_t length;
2560 	uint32_t ppdu_id;
2561 	uint8_t tlv_type;
2562 	uint32_t tlv_length, tlv_bitmap_expected;
2563 	uint8_t *tlv_buf;
2564 	struct ppdu_info *ppdu_info = NULL;
2565 
2566 	uint32_t *msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
2567 
2568 	length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word);
2569 
2570 	msg_word = msg_word + 1;
2571 	ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word);
2572 
2573 
2574 	msg_word = msg_word + 3;
2575 	while (length > 0) {
2576 		tlv_buf = (uint8_t *)msg_word;
2577 		tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
2578 		tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
2579 		if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG))
2580 			pdev->stats.ppdu_stats_counter[tlv_type]++;
2581 
2582 		if (tlv_length == 0)
2583 			break;
2584 
2585 		tlv_length += HTT_TLV_HDR_LEN;
2586 
2587 		/**
2588 		 * Not allocating separate ppdu descriptor for MGMT Payload
2589 		 * TLV as this is sent as separate WDI indication and it
2590 		 * doesn't contain any ppdu information
2591 		 */
2592 		if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) {
2593 			pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf;
2594 			pdev->mgmtctrl_frm_info.mgmt_buf_len = tlv_length;
2595 			pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id;
2596 			msg_word =
2597 				(uint32_t *)((uint8_t *)tlv_buf + tlv_length);
2598 			length -= (tlv_length);
2599 			continue;
2600 		}
2601 
2602 		ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type);
2603 		if (!ppdu_info)
2604 			return NULL;
2605 		ppdu_info->ppdu_id = ppdu_id;
2606 		ppdu_info->tlv_bitmap |= (1 << tlv_type);
2607 
2608 		dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info);
2609 
2610 		/**
2611 		 * Increment pdev level tlv count to monitor
2612 		 * missing TLVs
2613 		 */
2614 		pdev->tlv_count++;
2615 		ppdu_info->last_tlv_cnt = pdev->tlv_count;
2616 		msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
2617 		length -= (tlv_length);
2618 	}
2619 
2620 	if (!ppdu_info)
2621 		return NULL;
2622 
2623 	pdev->last_ppdu_id = ppdu_id;
2624 
2625 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
2626 
2627 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2628 		if (ppdu_info->is_ampdu)
2629 			tlv_bitmap_expected = HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP;
2630 	}
2631 
2632 	/**
2633 	 * Once all the TLVs for a given PPDU has been processed,
2634 	 * return PPDU status to be delivered to higher layer
2635 	 */
2636 	if (ppdu_info->tlv_bitmap == tlv_bitmap_expected)
2637 		return ppdu_info;
2638 
2639 	return NULL;
2640 }
2641 #endif /* FEATURE_PERPKT_INFO */
2642 
2643 /**
2644  * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW
2645  * @soc: DP SOC handle
2646  * @pdev_id: pdev id
2647  * @htt_t2h_msg: HTT message nbuf
2648  *
2649  * return:void
2650  */
2651 #if defined(WDI_EVENT_ENABLE)
2652 #ifdef FEATURE_PERPKT_INFO
2653 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
2654 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
2655 {
2656 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
2657 	struct ppdu_info *ppdu_info = NULL;
2658 	bool free_buf = true;
2659 
2660 	if (!pdev)
2661 		return true;
2662 
2663 	if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
2664 	    !pdev->mcopy_mode && !pdev->bpr_enable)
2665 		return free_buf;
2666 
2667 	ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg);
2668 
2669 	if (pdev->mgmtctrl_frm_info.mgmt_buf) {
2670 		if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv
2671 		    (pdev, htt_t2h_msg, pdev->mgmtctrl_frm_info.ppdu_id) !=
2672 		    QDF_STATUS_SUCCESS)
2673 			free_buf = false;
2674 
2675 		pdev->mgmtctrl_frm_info.mgmt_buf = NULL;
2676 		pdev->mgmtctrl_frm_info.mgmt_buf_len = 0;
2677 		pdev->mgmtctrl_frm_info.ppdu_id = 0;
2678 	}
2679 
2680 	if (ppdu_info)
2681 		dp_ppdu_desc_deliver(pdev, ppdu_info);
2682 
2683 	return free_buf;
2684 }
2685 #else
2686 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
2687 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
2688 {
2689 	return true;
2690 }
2691 #endif
2692 #endif
2693 
2694 /**
2695  * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats
2696  * @soc: DP SOC handle
2697  * @htt_t2h_msg: HTT message nbuf
2698  *
2699  * return:void
2700  */
2701 static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc,
2702 		qdf_nbuf_t htt_t2h_msg)
2703 {
2704 	uint8_t done;
2705 	qdf_nbuf_t msg_copy;
2706 	uint32_t *msg_word;
2707 
2708 	msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
2709 	msg_word = msg_word + 3;
2710 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2711 
2712 	/*
2713 	 * HTT EXT stats response comes as stream of TLVs which span over
2714 	 * multiple T2H messages.
2715 	 * The first message will carry length of the response.
2716 	 * For rest of the messages length will be zero.
2717 	 *
2718 	 * Clone the T2H message buffer and store it in a list to process
2719 	 * it later.
2720 	 *
2721 	 * The original T2H message buffers gets freed in the T2H HTT event
2722 	 * handler
2723 	 */
2724 	msg_copy = qdf_nbuf_clone(htt_t2h_msg);
2725 
2726 	if (!msg_copy) {
2727 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2728 				"T2H messge clone failed for HTT EXT STATS");
2729 		goto error;
2730 	}
2731 
2732 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2733 	qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy);
2734 	/*
2735 	 * Done bit signifies that this is the last T2H buffer in the stream of
2736 	 * HTT EXT STATS message
2737 	 */
2738 	if (done) {
2739 		soc->htt_stats.num_stats++;
2740 		qdf_sched_work(0, &soc->htt_stats.work);
2741 	}
2742 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2743 
2744 	return;
2745 
2746 error:
2747 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2748 	while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg))
2749 			!= NULL) {
2750 		qdf_nbuf_free(msg_copy);
2751 	}
2752 	soc->htt_stats.num_stats = 0;
2753 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2754 	return;
2755 
2756 }
2757 
2758 /*
2759  * htt_soc_attach_target() - SOC level HTT setup
2760  * @htt_soc:	HTT SOC handle
2761  *
2762  * Return: 0 on success; error code on failure
2763  */
2764 int htt_soc_attach_target(void *htt_soc)
2765 {
2766 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
2767 
2768 	return htt_h2t_ver_req_msg(soc);
2769 }
2770 
2771 
2772 #if defined(WDI_EVENT_ENABLE) && !defined(REMOVE_PKT_LOG)
2773 /*
2774  * dp_ppdu_stats_ind_handler() - PPDU stats msg handler
2775  * @htt_soc:	 HTT SOC handle
2776  * @msg_word:    Pointer to payload
2777  * @htt_t2h_msg: HTT msg nbuf
2778  *
2779  * Return: True if buffer should be freed by caller.
2780  */
2781 static bool
2782 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
2783 				uint32_t *msg_word,
2784 				qdf_nbuf_t htt_t2h_msg)
2785 {
2786 	u_int8_t pdev_id;
2787 	bool free_buf;
2788 	qdf_nbuf_set_pktlen(htt_t2h_msg, HTT_T2H_MAX_MSG_SIZE);
2789 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2790 		"received HTT_T2H_MSG_TYPE_PPDU_STATS_IND");
2791 	pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
2792 	pdev_id = DP_HW2SW_MACID(pdev_id);
2793 	free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id,
2794 					      htt_t2h_msg);
2795 	dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc,
2796 		htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL,
2797 		pdev_id);
2798 	return free_buf;
2799 }
2800 #else
2801 static bool
2802 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
2803 				uint32_t *msg_word,
2804 				qdf_nbuf_t htt_t2h_msg)
2805 {
2806 	return true;
2807 }
2808 #endif
2809 
2810 #if defined(WDI_EVENT_ENABLE) && \
2811 		!defined(REMOVE_PKT_LOG) && defined(CONFIG_WIN)
2812 /*
2813  * dp_pktlog_msg_handler() - Pktlog msg handler
2814  * @htt_soc:	 HTT SOC handle
2815  * @msg_word:    Pointer to payload
2816  *
2817  * Return: None
2818  */
2819 static void
2820 dp_pktlog_msg_handler(struct htt_soc *soc,
2821 				uint32_t *msg_word)
2822 {
2823 	uint8_t pdev_id;
2824 	uint32_t *pl_hdr;
2825 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2826 		"received HTT_T2H_MSG_TYPE_PKTLOG");
2827 	pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word);
2828 	pdev_id = DP_HW2SW_MACID(pdev_id);
2829 	pl_hdr = (msg_word + 1);
2830 	dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc,
2831 		pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL,
2832 		pdev_id);
2833 }
2834 #else
2835 static void
2836 dp_pktlog_msg_handler(struct htt_soc *soc,
2837 				uint32_t *msg_word)
2838 {
2839 }
2840 #endif
2841 
2842 /*
2843  * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler
2844  * @context:	Opaque context (HTT SOC handle)
2845  * @pkt:	HTC packet
2846  */
2847 static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
2848 {
2849 	struct htt_soc *soc = (struct htt_soc *) context;
2850 	qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
2851 	u_int32_t *msg_word;
2852 	enum htt_t2h_msg_type msg_type;
2853 	bool free_buf = true;
2854 
2855 	/* check for successful message reception */
2856 	if (pkt->Status != QDF_STATUS_SUCCESS) {
2857 		if (pkt->Status != QDF_STATUS_E_CANCELED)
2858 			soc->stats.htc_err_cnt++;
2859 
2860 		qdf_nbuf_free(htt_t2h_msg);
2861 		return;
2862 	}
2863 
2864 	/* TODO: Check if we should pop the HTC/HTT header alignment padding */
2865 
2866 	msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
2867 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
2868 	switch (msg_type) {
2869 	case HTT_T2H_MSG_TYPE_PEER_MAP:
2870 		{
2871 			u_int8_t mac_addr_deswizzle_buf[HTT_MAC_ADDR_LEN];
2872 			u_int8_t *peer_mac_addr;
2873 			u_int16_t peer_id;
2874 			u_int16_t hw_peer_id;
2875 			u_int8_t vdev_id;
2876 
2877 			peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
2878 			hw_peer_id =
2879 				HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2));
2880 			vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
2881 			peer_mac_addr = htt_t2h_mac_addr_deswizzle(
2882 				(u_int8_t *) (msg_word+1),
2883 				&mac_addr_deswizzle_buf[0]);
2884 			QDF_TRACE(QDF_MODULE_ID_TXRX,
2885 				QDF_TRACE_LEVEL_INFO,
2886 				"HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
2887 				peer_id, vdev_id);
2888 
2889 			dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id,
2890 						vdev_id, peer_mac_addr);
2891 			break;
2892 		}
2893 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
2894 		{
2895 			u_int16_t peer_id;
2896 			peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
2897 
2898 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id);
2899 			break;
2900 		}
2901 	case HTT_T2H_MSG_TYPE_SEC_IND:
2902 		{
2903 			u_int16_t peer_id;
2904 			enum htt_sec_type sec_type;
2905 			int is_unicast;
2906 
2907 			peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
2908 			sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
2909 			is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
2910 			/* point to the first part of the Michael key */
2911 			msg_word++;
2912 			dp_rx_sec_ind_handler(
2913 				soc->dp_soc, peer_id, sec_type, is_unicast,
2914 				msg_word, msg_word + 2);
2915 			break;
2916 		}
2917 
2918 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
2919 		{
2920 			free_buf = dp_ppdu_stats_ind_handler(soc, msg_word,
2921 							     htt_t2h_msg);
2922 			break;
2923 		}
2924 
2925 	case HTT_T2H_MSG_TYPE_PKTLOG:
2926 		{
2927 			dp_pktlog_msg_handler(soc, msg_word);
2928 			break;
2929 		}
2930 
2931 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
2932 		{
2933 			htc_pm_runtime_put(soc->htc_soc);
2934 			soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
2935 			soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
2936 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2937 				"target uses HTT version %d.%d; host uses %d.%d",
2938 				soc->tgt_ver.major, soc->tgt_ver.minor,
2939 				HTT_CURRENT_VERSION_MAJOR,
2940 				HTT_CURRENT_VERSION_MINOR);
2941 			if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
2942 				QDF_TRACE(QDF_MODULE_ID_TXRX,
2943 					QDF_TRACE_LEVEL_ERROR,
2944 					"*** Incompatible host/target HTT versions!");
2945 			}
2946 			/* abort if the target is incompatible with the host */
2947 			qdf_assert(soc->tgt_ver.major ==
2948 				HTT_CURRENT_VERSION_MAJOR);
2949 			if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
2950 				QDF_TRACE(QDF_MODULE_ID_TXRX,
2951 					QDF_TRACE_LEVEL_WARN,
2952 					"*** Warning: host/target HTT versions"
2953 					" are different, though compatible!");
2954 			}
2955 			break;
2956 		}
2957 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
2958 		{
2959 			uint16_t peer_id;
2960 			uint8_t tid;
2961 			uint8_t win_sz;
2962 			uint16_t status;
2963 			struct dp_peer *peer;
2964 
2965 			/*
2966 			 * Update REO Queue Desc with new values
2967 			 */
2968 			peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
2969 			tid = HTT_RX_ADDBA_TID_GET(*msg_word);
2970 			win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
2971 			peer = dp_peer_find_by_id(soc->dp_soc, peer_id);
2972 
2973 			/*
2974 			 * Window size needs to be incremented by 1
2975 			 * since fw needs to represent a value of 256
2976 			 * using just 8 bits
2977 			 */
2978 			if (peer) {
2979 				status = dp_addba_requestprocess_wifi3(peer,
2980 						0, tid, 0, win_sz + 1, 0xffff);
2981 				QDF_TRACE(QDF_MODULE_ID_TXRX,
2982 					QDF_TRACE_LEVEL_INFO,
2983 					FL("PeerID %d BAW %d TID %d stat %d"),
2984 					peer_id, win_sz, tid, status);
2985 
2986 			} else {
2987 				QDF_TRACE(QDF_MODULE_ID_TXRX,
2988 					QDF_TRACE_LEVEL_ERROR,
2989 					FL("Peer not found peer id %d"),
2990 					peer_id);
2991 			}
2992 			break;
2993 		}
2994 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
2995 		{
2996 			dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg);
2997 			break;
2998 		}
2999 	default:
3000 		break;
3001 	};
3002 
3003 	/* Free the indication buffer */
3004 	if (free_buf)
3005 		qdf_nbuf_free(htt_t2h_msg);
3006 }
3007 
3008 /*
3009  * dp_htt_h2t_full() - Send full handler (called from HTC)
3010  * @context:	Opaque context (HTT SOC handle)
3011  * @pkt:	HTC packet
3012  *
3013  * Return: enum htc_send_full_action
3014  */
3015 static enum htc_send_full_action
3016 dp_htt_h2t_full(void *context, HTC_PACKET *pkt)
3017 {
3018 	return HTC_SEND_FULL_KEEP;
3019 }
3020 
3021 /*
3022  * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages
3023  * @context:	Opaque context (HTT SOC handle)
3024  * @nbuf:	nbuf containing T2H message
3025  * @pipe_id:	HIF pipe ID
3026  *
3027  * Return: QDF_STATUS
3028  *
3029  * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which
3030  * will be used for packet log and other high-priority HTT messages. Proper
3031  * HTC connection to be added later once required FW changes are available
3032  */
3033 static QDF_STATUS
3034 dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id)
3035 {
3036 	A_STATUS rc = QDF_STATUS_SUCCESS;
3037 	HTC_PACKET htc_pkt;
3038 
3039 	qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE);
3040 	qdf_mem_zero(&htc_pkt, sizeof(htc_pkt));
3041 	htc_pkt.Status = QDF_STATUS_SUCCESS;
3042 	htc_pkt.pPktContext = (void *)nbuf;
3043 	dp_htt_t2h_msg_handler(context, &htc_pkt);
3044 
3045 	return rc;
3046 }
3047 
3048 /*
3049  * htt_htc_soc_attach() - Register SOC level HTT instance with HTC
3050  * @htt_soc:	HTT SOC handle
3051  *
3052  * Return: 0 on success; error code on failure
3053  */
3054 static int
3055 htt_htc_soc_attach(struct htt_soc *soc)
3056 {
3057 	struct htc_service_connect_req connect;
3058 	struct htc_service_connect_resp response;
3059 	A_STATUS status;
3060 	struct dp_soc *dpsoc = soc->dp_soc;
3061 
3062 	qdf_mem_set(&connect, sizeof(connect), 0);
3063 	qdf_mem_set(&response, sizeof(response), 0);
3064 
3065 	connect.pMetaData = NULL;
3066 	connect.MetaDataLength = 0;
3067 	connect.EpCallbacks.pContext = soc;
3068 	connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
3069 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
3070 	connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler;
3071 
3072 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
3073 	connect.EpCallbacks.EpRecvRefill = NULL;
3074 
3075 	/* N/A, fill is done by HIF */
3076 	connect.EpCallbacks.RecvRefillWaterMark = 1;
3077 
3078 	connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
3079 	/*
3080 	 * Specify how deep to let a queue get before htc_send_pkt will
3081 	 * call the EpSendFull function due to excessive send queue depth.
3082 	 */
3083 	connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
3084 
3085 	/* disable flow control for HTT data message service */
3086 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
3087 
3088 	/* connect to control service */
3089 	connect.service_id = HTT_DATA_MSG_SVC;
3090 
3091 	status = htc_connect_service(soc->htc_soc, &connect, &response);
3092 
3093 	if (status != A_OK)
3094 		return QDF_STATUS_E_FAILURE;
3095 
3096 	soc->htc_endpoint = response.Endpoint;
3097 
3098 	hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint);
3099 	dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc,
3100 		dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE);
3101 
3102 	return 0; /* success */
3103 }
3104 
3105 /*
3106  * htt_soc_attach() - SOC level HTT initialization
3107  * @dp_soc:	Opaque Data path SOC handle
3108  * @ctrl_psoc:	Opaque ctrl SOC handle
3109  * @htc_soc:	SOC level HTC handle
3110  * @hal_soc:	Opaque HAL SOC handle
3111  * @osdev:	QDF device
3112  *
3113  * Return: HTT handle on success; NULL on failure
3114  */
3115 void *
3116 htt_soc_attach(void *dp_soc, void *ctrl_psoc, HTC_HANDLE htc_soc,
3117 	void *hal_soc, qdf_device_t osdev)
3118 {
3119 	struct htt_soc *soc;
3120 	int i;
3121 
3122 	soc = qdf_mem_malloc(sizeof(*soc));
3123 
3124 	if (!soc)
3125 		goto fail1;
3126 
3127 	soc->osdev = osdev;
3128 	soc->ctrl_psoc = ctrl_psoc;
3129 	soc->dp_soc = dp_soc;
3130 	soc->htc_soc = htc_soc;
3131 	soc->hal_soc = hal_soc;
3132 
3133 	/* TODO: See if any NSS related context is required in htt_soc */
3134 
3135 	soc->htt_htc_pkt_freelist = NULL;
3136 
3137 	if (htt_htc_soc_attach(soc))
3138 		goto fail2;
3139 
3140 	/* TODO: See if any Rx data specific intialization is required. For
3141 	 * MCL use cases, the data will be received as single packet and
3142 	 * should not required any descriptor or reorder handling
3143 	 */
3144 
3145 	HTT_TX_MUTEX_INIT(&soc->htt_tx_mutex);
3146 
3147 	/* pre-allocate some HTC_PACKET objects */
3148 	for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
3149 		struct dp_htt_htc_pkt_union *pkt;
3150 		pkt = qdf_mem_malloc(sizeof(*pkt));
3151 		if (!pkt)
3152 			break;
3153 
3154 		htt_htc_pkt_free(soc, &pkt->u.pkt);
3155 	}
3156 
3157 	return soc;
3158 
3159 fail2:
3160 	qdf_mem_free(soc);
3161 
3162 fail1:
3163 	return NULL;
3164 }
3165 
3166 
3167 /*
3168  * htt_soc_detach() - Detach SOC level HTT
3169  * @htt_soc:	HTT SOC handle
3170  */
3171 void
3172 htt_soc_detach(void *htt_soc)
3173 {
3174 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
3175 
3176 	htt_htc_misc_pkt_pool_free(soc);
3177 	htt_htc_pkt_pool_free(soc);
3178 	HTT_TX_MUTEX_DESTROY(&soc->htt_tx_mutex);
3179 	qdf_mem_free(soc);
3180 }
3181 
3182 /**
3183  * dp_h2t_ext_stats_msg_send(): function to contruct HTT message to pass to FW
3184  * @pdev: DP PDEV handle
3185  * @stats_type_upload_mask: stats type requested by user
3186  * @config_param_0: extra configuration parameters
3187  * @config_param_1: extra configuration parameters
3188  * @config_param_2: extra configuration parameters
3189  * @config_param_3: extra configuration parameters
3190  * @mac_id: mac number
3191  *
3192  * return: QDF STATUS
3193  */
3194 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
3195 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
3196 		uint32_t config_param_1, uint32_t config_param_2,
3197 		uint32_t config_param_3, int cookie_val, int cookie_msb,
3198 		uint8_t mac_id)
3199 {
3200 	struct htt_soc *soc = pdev->soc->htt_handle;
3201 	struct dp_htt_htc_pkt *pkt;
3202 	qdf_nbuf_t msg;
3203 	uint32_t *msg_word;
3204 	uint8_t pdev_mask = 0;
3205 
3206 	msg = qdf_nbuf_alloc(
3207 			soc->osdev,
3208 			HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ),
3209 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
3210 
3211 	if (!msg)
3212 		return QDF_STATUS_E_NOMEM;
3213 
3214 	/*TODO:Add support for SOC stats
3215 	 * Bit 0: SOC Stats
3216 	 * Bit 1: Pdev stats for pdev id 0
3217 	 * Bit 2: Pdev stats for pdev id 1
3218 	 * Bit 3: Pdev stats for pdev id 2
3219 	 */
3220 	mac_id = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
3221 
3222 	pdev_mask = 1 << DP_SW2HW_MACID(mac_id);
3223 	/*
3224 	 * Set the length of the message.
3225 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3226 	 * separately during the below call to qdf_nbuf_push_head.
3227 	 * The contribution from the HTC header is added separately inside HTC.
3228 	 */
3229 	if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) {
3230 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3231 				"Failed to expand head for HTT_EXT_STATS");
3232 		qdf_nbuf_free(msg);
3233 		return QDF_STATUS_E_FAILURE;
3234 	}
3235 
3236 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3237 		"-----%s:%d----\n cookie <-> %d\n config_param_0 %u\n"
3238 		"config_param_1 %u\n config_param_2 %u\n"
3239 		"config_param_4 %u\n -------------",
3240 		__func__, __LINE__, cookie_val, config_param_0,
3241 		config_param_1, config_param_2,	config_param_3);
3242 
3243 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
3244 
3245 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3246 	*msg_word = 0;
3247 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ);
3248 	HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask);
3249 	HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask);
3250 
3251 	/* word 1 */
3252 	msg_word++;
3253 	*msg_word = 0;
3254 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0);
3255 
3256 	/* word 2 */
3257 	msg_word++;
3258 	*msg_word = 0;
3259 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1);
3260 
3261 	/* word 3 */
3262 	msg_word++;
3263 	*msg_word = 0;
3264 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2);
3265 
3266 	/* word 4 */
3267 	msg_word++;
3268 	*msg_word = 0;
3269 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3);
3270 
3271 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0);
3272 
3273 	/* word 5 */
3274 	msg_word++;
3275 
3276 	/* word 6 */
3277 	msg_word++;
3278 	*msg_word = 0;
3279 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val);
3280 
3281 	/* word 7 */
3282 	msg_word++;
3283 	*msg_word = 0;
3284 	/*Using last 2 bits for pdev_id */
3285 	cookie_msb = ((cookie_msb << 2) | pdev->pdev_id);
3286 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb);
3287 
3288 	pkt = htt_htc_pkt_alloc(soc);
3289 	if (!pkt) {
3290 		qdf_nbuf_free(msg);
3291 		return QDF_STATUS_E_NOMEM;
3292 	}
3293 
3294 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
3295 
3296 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
3297 			dp_htt_h2t_send_complete_free_netbuf,
3298 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
3299 			soc->htc_endpoint,
3300 			1); /* tag - not relevant here */
3301 
3302 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
3303 	DP_HTT_SEND_HTC_PKT(soc, pkt);
3304 	return 0;
3305 }
3306 
3307 /* This macro will revert once proper HTT header will define for
3308  * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file
3309  * */
3310 #if defined(WDI_EVENT_ENABLE)
3311 /**
3312  * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW
3313  * @pdev: DP PDEV handle
3314  * @stats_type_upload_mask: stats type requested by user
3315  * @mac_id: Mac id number
3316  *
3317  * return: QDF STATUS
3318  */
3319 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
3320 		uint32_t stats_type_upload_mask, uint8_t mac_id)
3321 {
3322 	struct htt_soc *soc = pdev->soc->htt_handle;
3323 	struct dp_htt_htc_pkt *pkt;
3324 	qdf_nbuf_t msg;
3325 	uint32_t *msg_word;
3326 	uint8_t pdev_mask;
3327 
3328 	msg = qdf_nbuf_alloc(
3329 			soc->osdev,
3330 			HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ),
3331 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
3332 
3333 	if (!msg) {
3334 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3335 		"Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer");
3336 		qdf_assert(0);
3337 		return QDF_STATUS_E_NOMEM;
3338 	}
3339 
3340 	/*TODO:Add support for SOC stats
3341 	 * Bit 0: SOC Stats
3342 	 * Bit 1: Pdev stats for pdev id 0
3343 	 * Bit 2: Pdev stats for pdev id 1
3344 	 * Bit 3: Pdev stats for pdev id 2
3345 	 */
3346 	pdev_mask = 1 << DP_SW2HW_MACID(mac_id);
3347 
3348 	/*
3349 	 * Set the length of the message.
3350 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3351 	 * separately during the below call to qdf_nbuf_push_head.
3352 	 * The contribution from the HTC header is added separately inside HTC.
3353 	 */
3354 	if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) {
3355 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3356 				"Failed to expand head for HTT_CFG_STATS");
3357 		qdf_nbuf_free(msg);
3358 		return QDF_STATUS_E_FAILURE;
3359 	}
3360 
3361 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
3362 
3363 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3364 	*msg_word = 0;
3365 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
3366 	HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask);
3367 	HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word,
3368 			stats_type_upload_mask);
3369 
3370 	pkt = htt_htc_pkt_alloc(soc);
3371 	if (!pkt) {
3372 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3373 				"Fail to allocate dp_htt_htc_pkt buffer");
3374 		qdf_assert(0);
3375 		qdf_nbuf_free(msg);
3376 		return QDF_STATUS_E_NOMEM;
3377 	}
3378 
3379 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
3380 
3381 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
3382 			dp_htt_h2t_send_complete_free_netbuf,
3383 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
3384 			soc->htc_endpoint,
3385 			1); /* tag - not relevant here */
3386 
3387 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
3388 	DP_HTT_SEND_HTC_PKT(soc, pkt);
3389 	return 0;
3390 }
3391 #endif
3392