xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c (revision edf9fd0441a5a3b63c14b7bb754f301dd8d5e57c)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <htt.h>
21 #include <hal_hw_headers.h>
22 #include <hal_api.h>
23 #include "dp_peer.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_rx.h"
27 #include "htt_stats.h"
28 #include "htt_ppdu_stats.h"
29 #include "dp_htt.h"
30 #ifdef WIFI_MONITOR_SUPPORT
31 #include <dp_mon.h>
32 #endif
33 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
34 #include "cdp_txrx_cmn_struct.h"
35 
36 #ifdef FEATURE_PERPKT_INFO
37 #include "dp_ratetable.h"
38 #endif
39 #include <qdf_module.h>
40 
41 #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
42 
43 #define HTT_HTC_PKT_POOL_INIT_SIZE 64
44 
45 #define HTT_MSG_BUF_SIZE(msg_bytes) \
46 	((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
47 
48 #define HTT_PID_BIT_MASK 0x3
49 
50 #define DP_EXT_MSG_LENGTH 2048
51 #define HTT_HEADER_LEN 16
52 #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16
53 
54 #define HTT_SHIFT_UPPER_TIMESTAMP 32
55 #define HTT_MASK_UPPER_TIMESTAMP 0xFFFFFFFF00000000
56 #define HTT_BKP_STATS_MAX_QUEUE_DEPTH 16
57 
58 struct dp_htt_htc_pkt *
59 htt_htc_pkt_alloc(struct htt_soc *soc)
60 {
61 	struct dp_htt_htc_pkt_union *pkt = NULL;
62 
63 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
64 	if (soc->htt_htc_pkt_freelist) {
65 		pkt = soc->htt_htc_pkt_freelist;
66 		soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next;
67 	}
68 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
69 
70 	if (!pkt)
71 		pkt = qdf_mem_malloc(sizeof(*pkt));
72 
73 	if (!pkt)
74 		return NULL;
75 
76 	htc_packet_set_magic_cookie(&(pkt->u.pkt.htc_pkt), 0);
77 
78 	return &pkt->u.pkt; /* not actually a dereference */
79 }
80 
81 qdf_export_symbol(htt_htc_pkt_alloc);
82 
83 void
84 htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
85 {
86 	struct dp_htt_htc_pkt_union *u_pkt =
87 		(struct dp_htt_htc_pkt_union *)pkt;
88 
89 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
90 	htc_packet_set_magic_cookie(&(u_pkt->u.pkt.htc_pkt), 0);
91 	u_pkt->u.next = soc->htt_htc_pkt_freelist;
92 	soc->htt_htc_pkt_freelist = u_pkt;
93 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
94 }
95 
96 qdf_export_symbol(htt_htc_pkt_free);
97 
98 /*
99  * htt_htc_pkt_pool_free() - Free HTC packet pool
100  * @htt_soc:	HTT SOC handle
101  */
102 void
103 htt_htc_pkt_pool_free(struct htt_soc *soc)
104 {
105 	struct dp_htt_htc_pkt_union *pkt, *next;
106 	pkt = soc->htt_htc_pkt_freelist;
107 	while (pkt) {
108 		next = pkt->u.next;
109 		qdf_mem_free(pkt);
110 		pkt = next;
111 	}
112 	soc->htt_htc_pkt_freelist = NULL;
113 }
114 
115 
116 #ifndef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST
117 
118 /*
119  * htt_htc_misc_pkt_list_trim() - trim misc list
120  * @htt_soc: HTT SOC handle
121  * @level: max no. of pkts in list
122  */
123 static void
124 htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level)
125 {
126 	struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL;
127 	int i = 0;
128 	qdf_nbuf_t netbuf;
129 
130 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
131 	pkt = soc->htt_htc_pkt_misclist;
132 	while (pkt) {
133 		next = pkt->u.next;
134 		/* trim the out grown list*/
135 		if (++i > level) {
136 			netbuf =
137 				(qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
138 			qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
139 			qdf_nbuf_free(netbuf);
140 			qdf_mem_free(pkt);
141 			pkt = NULL;
142 			if (prev)
143 				prev->u.next = NULL;
144 		}
145 		prev = pkt;
146 		pkt = next;
147 	}
148 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
149 }
150 
151 /*
152  * htt_htc_misc_pkt_list_add() - Add pkt to misc list
153  * @htt_soc:	HTT SOC handle
154  * @dp_htt_htc_pkt: pkt to be added to list
155  */
156 void
157 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
158 {
159 	struct dp_htt_htc_pkt_union *u_pkt =
160 				(struct dp_htt_htc_pkt_union *)pkt;
161 	int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc,
162 							pkt->htc_pkt.Endpoint)
163 				+ DP_HTT_HTC_PKT_MISCLIST_SIZE;
164 
165 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
166 	if (soc->htt_htc_pkt_misclist) {
167 		u_pkt->u.next = soc->htt_htc_pkt_misclist;
168 		soc->htt_htc_pkt_misclist = u_pkt;
169 	} else {
170 		soc->htt_htc_pkt_misclist = u_pkt;
171 	}
172 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
173 
174 	/* only ce pipe size + tx_queue_depth could possibly be in use
175 	 * free older packets in the misclist
176 	 */
177 	htt_htc_misc_pkt_list_trim(soc, misclist_trim_level);
178 }
179 
180 qdf_export_symbol(htt_htc_misc_pkt_list_add);
181 #endif  /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
182 
183 /*
184  * htt_htc_misc_pkt_pool_free() - free pkts in misc list
185  * @htt_soc:	HTT SOC handle
186  */
187 static void
188 htt_htc_misc_pkt_pool_free(struct htt_soc *soc)
189 {
190 	struct dp_htt_htc_pkt_union *pkt, *next;
191 	qdf_nbuf_t netbuf;
192 
193 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
194 	pkt = soc->htt_htc_pkt_misclist;
195 
196 	while (pkt) {
197 		next = pkt->u.next;
198 		if (htc_packet_get_magic_cookie(&(pkt->u.pkt.htc_pkt)) !=
199 		    HTC_PACKET_MAGIC_COOKIE) {
200 			pkt = next;
201 			soc->stats.skip_count++;
202 			continue;
203 		}
204 		netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
205 		qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
206 
207 		soc->stats.htc_pkt_free++;
208 		dp_htt_info("%pK: Pkt free count %d",
209 			    soc->dp_soc, soc->stats.htc_pkt_free);
210 
211 		qdf_nbuf_free(netbuf);
212 		qdf_mem_free(pkt);
213 		pkt = next;
214 	}
215 	soc->htt_htc_pkt_misclist = NULL;
216 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
217 	dp_info("HTC Packets, fail count = %d, skip count = %d",
218 		soc->stats.fail_count, soc->stats.skip_count);
219 }
220 
221 /*
222  * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ
223  * @tgt_mac_addr:	Target MAC
224  * @buffer:		Output buffer
225  */
226 static u_int8_t *
227 htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer)
228 {
229 #ifdef BIG_ENDIAN_HOST
230 	/*
231 	 * The host endianness is opposite of the target endianness.
232 	 * To make u_int32_t elements come out correctly, the target->host
233 	 * upload has swizzled the bytes in each u_int32_t element of the
234 	 * message.
235 	 * For byte-array message fields like the MAC address, this
236 	 * upload swizzling puts the bytes in the wrong order, and needs
237 	 * to be undone.
238 	 */
239 	buffer[0] = tgt_mac_addr[3];
240 	buffer[1] = tgt_mac_addr[2];
241 	buffer[2] = tgt_mac_addr[1];
242 	buffer[3] = tgt_mac_addr[0];
243 	buffer[4] = tgt_mac_addr[7];
244 	buffer[5] = tgt_mac_addr[6];
245 	return buffer;
246 #else
247 	/*
248 	 * The host endianness matches the target endianness -
249 	 * we can use the mac addr directly from the message buffer.
250 	 */
251 	return tgt_mac_addr;
252 #endif
253 }
254 
255 /*
256  * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
257  * @soc:	SOC handle
258  * @status:	Completion status
259  * @netbuf:	HTT buffer
260  */
261 static void
262 dp_htt_h2t_send_complete_free_netbuf(
263 	void *soc, A_STATUS status, qdf_nbuf_t netbuf)
264 {
265 	qdf_nbuf_free(netbuf);
266 }
267 
268 #ifdef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST
269 /*
270  * dp_htt_h2t_send_complete() - H2T completion handler
271  * @context:	Opaque context (HTT SOC handle)
272  * @htc_pkt:	HTC packet
273  */
274 static void
275 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
276 {
277 	struct htt_soc *soc =  (struct htt_soc *) context;
278 	struct dp_htt_htc_pkt *htt_pkt;
279 	qdf_nbuf_t netbuf;
280 
281 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
282 
283 	/* process (free or keep) the netbuf that held the message */
284 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
285 	/*
286 	 * adf sendcomplete is required for windows only
287 	 */
288 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
289 	/* free the htt_htc_pkt / HTC_PACKET object */
290 	qdf_nbuf_free(netbuf);
291 	htt_htc_pkt_free(soc, htt_pkt);
292 }
293 
294 #else /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
295 
296 /*
297  *  * dp_htt_h2t_send_complete() - H2T completion handler
298  *   * @context:    Opaque context (HTT SOC handle)
299  *    * @htc_pkt:    HTC packet
300  *     */
301 static void
302 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
303 {
304 	void (*send_complete_part2)(
305 	     void *soc, QDF_STATUS status, qdf_nbuf_t msdu);
306 	struct htt_soc *soc =  (struct htt_soc *) context;
307 	struct dp_htt_htc_pkt *htt_pkt;
308 	qdf_nbuf_t netbuf;
309 
310 	send_complete_part2 = htc_pkt->pPktContext;
311 
312 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
313 
314 	/* process (free or keep) the netbuf that held the message */
315 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
316 	/*
317 	 * adf sendcomplete is required for windows only
318 	*/
319 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
320 	if (send_complete_part2){
321 		send_complete_part2(
322 		    htt_pkt->soc_ctxt, htc_pkt->Status, netbuf);
323 	}
324 	/* free the htt_htc_pkt / HTC_PACKET object */
325 	htt_htc_pkt_free(soc, htt_pkt);
326 }
327 
328 #endif /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
329 
330 #ifdef WLAN_MCAST_MLO
331 /*
332  * dp_htt_h2t_add_tcl_metadata_verg() - Add tcl_metadata verion
333  * @htt_soc:	HTT SOC handle
334  * @msg:	Pointer to nbuf
335  *
336  * Return: 0 on success; error code on failure
337  */
338 static int dp_htt_h2t_add_tcl_metadata_ver(struct htt_soc *soc, qdf_nbuf_t *msg)
339 {
340 	uint32_t *msg_word;
341 
342 	*msg = qdf_nbuf_alloc(
343 		soc->osdev,
344 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES + HTT_TCL_METADATA_VER_SZ),
345 		/* reserve room for the HTC header */
346 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
347 	if (!*msg)
348 		return QDF_STATUS_E_NOMEM;
349 
350 	/*
351 	 * Set the length of the message.
352 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
353 	 * separately during the below call to qdf_nbuf_push_head.
354 	 * The contribution from the HTC header is added separately inside HTC.
355 	 */
356 	if (!qdf_nbuf_put_tail(*msg,
357 			       HTT_VER_REQ_BYTES + HTT_TCL_METADATA_VER_SZ)) {
358 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
359 			  "%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
360 			  __func__);
361 		return QDF_STATUS_E_FAILURE;
362 	}
363 
364 	/* fill in the message contents */
365 	msg_word = (u_int32_t *)qdf_nbuf_data(*msg);
366 
367 	/* rewind beyond alignment pad to get to the HTC header reserved area */
368 	qdf_nbuf_push_head(*msg, HTC_HDR_ALIGNMENT_PADDING);
369 
370 	*msg_word = 0;
371 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
372 
373 	/* word 1 */
374 	msg_word++;
375 	*msg_word = 0;
376 	HTT_OPTION_TLV_TAG_SET(*msg_word, HTT_OPTION_TLV_TAG_TCL_METADATA_VER);
377 	HTT_OPTION_TLV_LENGTH_SET(*msg_word, HTT_TCL_METADATA_VER_SZ);
378 	HTT_OPTION_TLV_TCL_METADATA_VER_SET(*msg_word,
379 					    HTT_OPTION_TLV_TCL_METADATA_V2);
380 
381 	return QDF_STATUS_SUCCESS;
382 }
383 #else
384 /*
385  * dp_htt_h2t_add_tcl_metadata_verg() - Add tcl_metadata verion
386  * @htt_soc:	HTT SOC handle
387  * @msg:	Pointer to nbuf
388  *
389  * Return: 0 on success; error code on failure
390  */
391 static int dp_htt_h2t_add_tcl_metadata_ver(struct htt_soc *soc, qdf_nbuf_t *msg)
392 {
393 	uint32_t *msg_word;
394 
395 	*msg = qdf_nbuf_alloc(
396 		soc->osdev,
397 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
398 		/* reserve room for the HTC header */
399 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
400 	if (!*msg)
401 		return QDF_STATUS_E_NOMEM;
402 
403 	/*
404 	 * Set the length of the message.
405 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
406 	 * separately during the below call to qdf_nbuf_push_head.
407 	 * The contribution from the HTC header is added separately inside HTC.
408 	 */
409 	if (!qdf_nbuf_put_tail(*msg, HTT_VER_REQ_BYTES)) {
410 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
411 			  "%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
412 			  __func__);
413 		return QDF_STATUS_E_FAILURE;
414 	}
415 
416 	/* fill in the message contents */
417 	msg_word = (u_int32_t *)qdf_nbuf_data(*msg);
418 
419 	/* rewind beyond alignment pad to get to the HTC header reserved area */
420 	qdf_nbuf_push_head(*msg, HTC_HDR_ALIGNMENT_PADDING);
421 
422 	*msg_word = 0;
423 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
424 
425 	return QDF_STATUS_SUCCESS;
426 }
427 #endif
428 
429 /*
430  * htt_h2t_ver_req_msg() - Send HTT version request message to target
431  * @htt_soc:	HTT SOC handle
432  *
433  * Return: 0 on success; error code on failure
434  */
435 static int htt_h2t_ver_req_msg(struct htt_soc *soc)
436 {
437 	struct dp_htt_htc_pkt *pkt;
438 	qdf_nbuf_t msg = NULL;
439 	QDF_STATUS status;
440 
441 	status = dp_htt_h2t_add_tcl_metadata_ver(soc, &msg);
442 	if (status != QDF_STATUS_SUCCESS)
443 		return status;
444 
445 	pkt = htt_htc_pkt_alloc(soc);
446 	if (!pkt) {
447 		qdf_nbuf_free(msg);
448 		return QDF_STATUS_E_FAILURE;
449 	}
450 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
451 
452 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
453 		dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg),
454 		qdf_nbuf_len(msg), soc->htc_endpoint,
455 		HTC_TX_PACKET_TAG_RTPM_PUT_RC);
456 
457 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
458 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_VERSION_REQ,
459 				     NULL);
460 
461 	if (status != QDF_STATUS_SUCCESS) {
462 		qdf_nbuf_free(msg);
463 		htt_htc_pkt_free(soc, pkt);
464 	}
465 
466 	return status;
467 }
468 
469 /*
470  * htt_srng_setup() - Send SRNG setup message to target
471  * @htt_soc:	HTT SOC handle
472  * @mac_id:	MAC Id
473  * @hal_srng:	Opaque HAL SRNG pointer
474  * @hal_ring_type:	SRNG ring type
475  *
476  * Return: 0 on success; error code on failure
477  */
478 int htt_srng_setup(struct htt_soc *soc, int mac_id,
479 		   hal_ring_handle_t hal_ring_hdl,
480 		   int hal_ring_type)
481 {
482 	struct dp_htt_htc_pkt *pkt;
483 	qdf_nbuf_t htt_msg;
484 	uint32_t *msg_word;
485 	struct hal_srng_params srng_params;
486 	qdf_dma_addr_t hp_addr, tp_addr;
487 	uint32_t ring_entry_size =
488 		hal_srng_get_entrysize(soc->hal_soc, hal_ring_type);
489 	int htt_ring_type, htt_ring_id;
490 	uint8_t *htt_logger_bufp;
491 	int target_pdev_id;
492 	int lmac_id = dp_get_lmac_id_for_pdev_id(soc->dp_soc, 0, mac_id);
493 	QDF_STATUS status;
494 
495 	/* Sizes should be set in 4-byte words */
496 	ring_entry_size = ring_entry_size >> 2;
497 
498 	htt_msg = qdf_nbuf_alloc(soc->osdev,
499 		HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ),
500 		/* reserve room for the HTC header */
501 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
502 	if (!htt_msg)
503 		goto fail0;
504 
505 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
506 	hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_ring_hdl);
507 	tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_ring_hdl);
508 
509 	switch (hal_ring_type) {
510 	case RXDMA_BUF:
511 #ifdef QCA_HOST2FW_RXBUF_RING
512 		if (srng_params.ring_id ==
513 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
514 		    (lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
515 			htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
516 			htt_ring_type = HTT_SW_TO_SW_RING;
517 #ifdef IPA_OFFLOAD
518 		} else if (srng_params.ring_id ==
519 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2 +
520 		    (lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
521 			htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING;
522 			htt_ring_type = HTT_SW_TO_SW_RING;
523 #endif
524 #else
525 		if (srng_params.ring_id ==
526 			(HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
527 			(lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
528 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
529 			htt_ring_type = HTT_SW_TO_HW_RING;
530 #endif
531 		} else if (srng_params.ring_id ==
532 #ifdef IPA_OFFLOAD
533 			 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 +
534 #else
535 			 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF +
536 #endif
537 			(lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
538 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
539 			htt_ring_type = HTT_SW_TO_HW_RING;
540 		} else {
541 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
542 				   "%s: Ring %d currently not supported",
543 				   __func__, srng_params.ring_id);
544 			goto fail1;
545 		}
546 
547 		dp_info("ring_type %d ring_id %d htt_ring_id %d hp_addr 0x%llx tp_addr 0x%llx",
548 			hal_ring_type, srng_params.ring_id, htt_ring_id,
549 			(uint64_t)hp_addr,
550 			(uint64_t)tp_addr);
551 		break;
552 	case RXDMA_MONITOR_BUF:
553 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
554 							 RXDMA_MONITOR_BUF);
555 		htt_ring_type = HTT_SW_TO_HW_RING;
556 		break;
557 	case RXDMA_MONITOR_STATUS:
558 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
559 		htt_ring_type = HTT_SW_TO_HW_RING;
560 		break;
561 	case RXDMA_MONITOR_DST:
562 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
563 							 RXDMA_MONITOR_DST);
564 		htt_ring_type = HTT_HW_TO_SW_RING;
565 		break;
566 	case RXDMA_MONITOR_DESC:
567 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
568 		htt_ring_type = HTT_SW_TO_HW_RING;
569 		break;
570 	case RXDMA_DST:
571 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
572 		htt_ring_type = HTT_HW_TO_SW_RING;
573 		break;
574 	case TX_MONITOR_BUF:
575 		htt_ring_id = HTT_TX_MON_HOST2MON_BUF_RING;
576 		htt_ring_type = HTT_SW_TO_HW_RING;
577 		break;
578 	case TX_MONITOR_DST:
579 		htt_ring_id = HTT_TX_MON_MON2HOST_DEST_RING;
580 		htt_ring_type = HTT_HW_TO_SW_RING;
581 		break;
582 
583 	default:
584 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
585 			"%s: Ring currently not supported", __func__);
586 			goto fail1;
587 	}
588 
589 	/*
590 	 * Set the length of the message.
591 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
592 	 * separately during the below call to qdf_nbuf_push_head.
593 	 * The contribution from the HTC header is added separately inside HTC.
594 	 */
595 	if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) {
596 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
597 			"%s: Failed to expand head for SRING_SETUP msg",
598 			__func__);
599 		return QDF_STATUS_E_FAILURE;
600 	}
601 
602 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
603 
604 	/* rewind beyond alignment pad to get to the HTC header reserved area */
605 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
606 
607 	/* word 0 */
608 	*msg_word = 0;
609 	htt_logger_bufp = (uint8_t *)msg_word;
610 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP);
611 	target_pdev_id =
612 	dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, mac_id);
613 
614 	if ((htt_ring_type == HTT_SW_TO_HW_RING) ||
615 			(htt_ring_type == HTT_HW_TO_SW_RING))
616 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, target_pdev_id);
617 	else
618 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id);
619 
620 	dp_info("mac_id %d", mac_id);
621 	HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type);
622 	/* TODO: Discuss with FW on changing this to unique ID and using
623 	 * htt_ring_type to send the type of ring
624 	 */
625 	HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id);
626 
627 	/* word 1 */
628 	msg_word++;
629 	*msg_word = 0;
630 	HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word,
631 		srng_params.ring_base_paddr & 0xffffffff);
632 
633 	/* word 2 */
634 	msg_word++;
635 	*msg_word = 0;
636 	HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word,
637 		(uint64_t)srng_params.ring_base_paddr >> 32);
638 
639 	/* word 3 */
640 	msg_word++;
641 	*msg_word = 0;
642 	HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size);
643 	HTT_SRING_SETUP_RING_SIZE_SET(*msg_word,
644 		(ring_entry_size * srng_params.num_entries));
645 	dp_info("entry_size %d", ring_entry_size);
646 	dp_info("num_entries %d", srng_params.num_entries);
647 	dp_info("ring_size %d", (ring_entry_size * srng_params.num_entries));
648 	if (htt_ring_type == HTT_SW_TO_HW_RING)
649 		HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET(
650 						*msg_word, 1);
651 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word,
652 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
653 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word,
654 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
655 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word,
656 		!!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP));
657 
658 	/* word 4 */
659 	msg_word++;
660 	*msg_word = 0;
661 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
662 		hp_addr & 0xffffffff);
663 
664 	/* word 5 */
665 	msg_word++;
666 	*msg_word = 0;
667 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
668 		(uint64_t)hp_addr >> 32);
669 
670 	/* word 6 */
671 	msg_word++;
672 	*msg_word = 0;
673 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
674 		tp_addr & 0xffffffff);
675 
676 	/* word 7 */
677 	msg_word++;
678 	*msg_word = 0;
679 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
680 		(uint64_t)tp_addr >> 32);
681 
682 	/* word 8 */
683 	msg_word++;
684 	*msg_word = 0;
685 	HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word,
686 		srng_params.msi_addr & 0xffffffff);
687 
688 	/* word 9 */
689 	msg_word++;
690 	*msg_word = 0;
691 	HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word,
692 		(uint64_t)(srng_params.msi_addr) >> 32);
693 
694 	/* word 10 */
695 	msg_word++;
696 	*msg_word = 0;
697 	HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word,
698 		qdf_cpu_to_le32(srng_params.msi_data));
699 
700 	/* word 11 */
701 	msg_word++;
702 	*msg_word = 0;
703 	HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word,
704 		srng_params.intr_batch_cntr_thres_entries *
705 		ring_entry_size);
706 	HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word,
707 		srng_params.intr_timer_thres_us >> 3);
708 
709 	/* word 12 */
710 	msg_word++;
711 	*msg_word = 0;
712 	if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
713 		/* TODO: Setting low threshold to 1/8th of ring size - see
714 		 * if this needs to be configurable
715 		 */
716 		HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word,
717 			srng_params.low_threshold);
718 	}
719 	/* "response_required" field should be set if a HTT response message is
720 	 * required after setting up the ring.
721 	 */
722 	pkt = htt_htc_pkt_alloc(soc);
723 	if (!pkt)
724 		goto fail1;
725 
726 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
727 
728 	SET_HTC_PACKET_INFO_TX(
729 		&pkt->htc_pkt,
730 		dp_htt_h2t_send_complete_free_netbuf,
731 		qdf_nbuf_data(htt_msg),
732 		qdf_nbuf_len(htt_msg),
733 		soc->htc_endpoint,
734 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
735 
736 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
737 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_SRING_SETUP,
738 				     htt_logger_bufp);
739 
740 	if (status != QDF_STATUS_SUCCESS) {
741 		qdf_nbuf_free(htt_msg);
742 		htt_htc_pkt_free(soc, pkt);
743 	}
744 
745 	return status;
746 
747 fail1:
748 	qdf_nbuf_free(htt_msg);
749 fail0:
750 	return QDF_STATUS_E_FAILURE;
751 }
752 
753 qdf_export_symbol(htt_srng_setup);
754 
755 #ifdef QCA_SUPPORT_FULL_MON
756 /**
757  * htt_h2t_full_mon_cfg() - Send full monitor configuarion msg to FW
758  *
759  * @htt_soc: HTT Soc handle
760  * @pdev_id: Radio id
761  * @dp_full_mon_config: enabled/disable configuration
762  *
763  * Return: Success when HTT message is sent, error on failure
764  */
765 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc,
766 			 uint8_t pdev_id,
767 			 enum dp_full_mon_config config)
768 {
769 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
770 	struct dp_htt_htc_pkt *pkt;
771 	qdf_nbuf_t htt_msg;
772 	uint32_t *msg_word;
773 	uint8_t *htt_logger_bufp;
774 
775 	htt_msg = qdf_nbuf_alloc(soc->osdev,
776 				 HTT_MSG_BUF_SIZE(
777 				 HTT_RX_FULL_MONITOR_MODE_SETUP_SZ),
778 				 /* reserve room for the HTC header */
779 				 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
780 				 4,
781 				 TRUE);
782 	if (!htt_msg)
783 		return QDF_STATUS_E_FAILURE;
784 
785 	/*
786 	 * Set the length of the message.
787 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
788 	 * separately during the below call to qdf_nbuf_push_head.
789 	 * The contribution from the HTC header is added separately inside HTC.
790 	 */
791 	if (!qdf_nbuf_put_tail(htt_msg, HTT_RX_FULL_MONITOR_MODE_SETUP_SZ)) {
792 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
793 			  "%s: Failed to expand head for RX Ring Cfg msg",
794 			  __func__);
795 		goto fail1;
796 	}
797 
798 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
799 
800 	/* rewind beyond alignment pad to get to the HTC header reserved area */
801 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
802 
803 	/* word 0 */
804 	*msg_word = 0;
805 	htt_logger_bufp = (uint8_t *)msg_word;
806 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE);
807 	HTT_RX_FULL_MONITOR_MODE_OPERATION_PDEV_ID_SET(
808 			*msg_word, DP_SW2HW_MACID(pdev_id));
809 
810 	msg_word++;
811 	*msg_word = 0;
812 	/* word 1 */
813 	if (config == DP_FULL_MON_ENABLE) {
814 		HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true);
815 		HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, true);
816 		HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, true);
817 		HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2);
818 	} else if (config == DP_FULL_MON_DISABLE) {
819 		/* As per MAC team's suggestion, While disbaling full monitor
820 		 * mode, Set 'en' bit to true in full monitor mode register.
821 		 */
822 		HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true);
823 		HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, false);
824 		HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, false);
825 		HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2);
826 	}
827 
828 	pkt = htt_htc_pkt_alloc(soc);
829 	if (!pkt) {
830 		qdf_err("HTC packet allocation failed");
831 		goto fail1;
832 	}
833 
834 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
835 
836 	SET_HTC_PACKET_INFO_TX(
837 		&pkt->htc_pkt,
838 		dp_htt_h2t_send_complete_free_netbuf,
839 		qdf_nbuf_data(htt_msg),
840 		qdf_nbuf_len(htt_msg),
841 		soc->htc_endpoint,
842 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
843 
844 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
845 	qdf_debug("config: %d", config);
846 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE,
847 			    htt_logger_bufp);
848 	return QDF_STATUS_SUCCESS;
849 fail1:
850 	qdf_nbuf_free(htt_msg);
851 	return QDF_STATUS_E_FAILURE;
852 }
853 
854 qdf_export_symbol(htt_h2t_full_mon_cfg);
855 #else
856 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc,
857 			 uint8_t pdev_id,
858 			 enum dp_full_mon_config config)
859 {
860 	return 0;
861 }
862 
863 qdf_export_symbol(htt_h2t_full_mon_cfg);
864 #endif
865 
866 /*
867  * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter
868  * config message to target
869  * @htt_soc:	HTT SOC handle
870  * @pdev_id:	WIN- PDEV Id, MCL- mac id
871  * @hal_srng:	Opaque HAL SRNG pointer
872  * @hal_ring_type:	SRNG ring type
873  * @ring_buf_size:	SRNG buffer size
874  * @htt_tlv_filter:	Rx SRNG TLV and filter setting
875  * Return: 0 on success; error code on failure
876  */
877 int htt_h2t_rx_ring_cfg(struct htt_soc *htt_soc, int pdev_id,
878 			hal_ring_handle_t hal_ring_hdl,
879 			int hal_ring_type, int ring_buf_size,
880 			struct htt_rx_ring_tlv_filter *htt_tlv_filter)
881 {
882 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
883 	struct dp_htt_htc_pkt *pkt;
884 	qdf_nbuf_t htt_msg;
885 	uint32_t *msg_word;
886 	struct hal_srng_params srng_params;
887 	uint32_t htt_ring_type, htt_ring_id;
888 	uint32_t tlv_filter;
889 	uint8_t *htt_logger_bufp;
890 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->dp_soc->wlan_cfg_ctx;
891 	uint32_t mon_drop_th = wlan_cfg_get_mon_drop_thresh(wlan_cfg_ctx);
892 	int target_pdev_id;
893 	QDF_STATUS status;
894 
895 	htt_msg = qdf_nbuf_alloc(soc->osdev,
896 		HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ),
897 	/* reserve room for the HTC header */
898 	HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
899 	if (!htt_msg)
900 		goto fail0;
901 
902 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
903 
904 	switch (hal_ring_type) {
905 	case RXDMA_BUF:
906 		htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
907 		htt_ring_type = HTT_SW_TO_HW_RING;
908 		break;
909 	case RXDMA_MONITOR_BUF:
910 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
911 							 RXDMA_MONITOR_BUF);
912 		htt_ring_type = HTT_SW_TO_HW_RING;
913 		break;
914 	case RXDMA_MONITOR_STATUS:
915 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
916 		htt_ring_type = HTT_SW_TO_HW_RING;
917 		break;
918 	case RXDMA_MONITOR_DST:
919 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
920 							 RXDMA_MONITOR_DST);
921 		htt_ring_type = HTT_HW_TO_SW_RING;
922 		break;
923 	case RXDMA_MONITOR_DESC:
924 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
925 		htt_ring_type = HTT_SW_TO_HW_RING;
926 		break;
927 	case RXDMA_DST:
928 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
929 		htt_ring_type = HTT_HW_TO_SW_RING;
930 		break;
931 
932 	default:
933 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
934 			"%s: Ring currently not supported", __func__);
935 		goto fail1;
936 	}
937 
938 	/*
939 	 * Set the length of the message.
940 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
941 	 * separately during the below call to qdf_nbuf_push_head.
942 	 * The contribution from the HTC header is added separately inside HTC.
943 	 */
944 	if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) {
945 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
946 			"%s: Failed to expand head for RX Ring Cfg msg",
947 			__func__);
948 		goto fail1; /* failure */
949 	}
950 
951 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
952 
953 	/* rewind beyond alignment pad to get to the HTC header reserved area */
954 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
955 
956 	/* word 0 */
957 	htt_logger_bufp = (uint8_t *)msg_word;
958 	*msg_word = 0;
959 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
960 
961 	if (htt_tlv_filter->rx_mon_global_en)
962 		*msg_word  |= (1 << RXMON_GLOBAL_EN_SHIFT);
963 
964 	/*
965 	 * pdev_id is indexed from 0 whereas mac_id is indexed from 1
966 	 * SW_TO_SW and SW_TO_HW rings are unaffected by this
967 	 */
968 	target_pdev_id =
969 	dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, pdev_id);
970 
971 	if (htt_ring_type == HTT_SW_TO_SW_RING ||
972 			htt_ring_type == HTT_SW_TO_HW_RING)
973 		HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word,
974 						      target_pdev_id);
975 
976 	/* TODO: Discuss with FW on changing this to unique ID and using
977 	 * htt_ring_type to send the type of ring
978 	 */
979 	HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id);
980 
981 	HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word,
982 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
983 
984 	HTT_RX_RING_SELECTION_CFG_RX_OFFSETS_VALID_SET(*msg_word,
985 						htt_tlv_filter->offset_valid);
986 
987 	if (mon_drop_th > 0)
988 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
989 								   1);
990 	else
991 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
992 								   0);
993 
994 	/* word 1 */
995 	msg_word++;
996 	*msg_word = 0;
997 	HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word,
998 		ring_buf_size);
999 
1000 	dp_mon_rx_packet_length_set(soc->dp_soc, msg_word, htt_tlv_filter);
1001 	dp_rx_mon_enable(soc->dp_soc, msg_word, htt_tlv_filter);
1002 
1003 	/* word 2 */
1004 	msg_word++;
1005 	*msg_word = 0;
1006 
1007 	if (htt_tlv_filter->enable_fp) {
1008 		/* TYPE: MGMT */
1009 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1010 			FP, MGMT, 0000,
1011 			(htt_tlv_filter->fp_mgmt_filter &
1012 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1013 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1014 			FP, MGMT, 0001,
1015 			(htt_tlv_filter->fp_mgmt_filter &
1016 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1017 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1018 			FP, MGMT, 0010,
1019 			(htt_tlv_filter->fp_mgmt_filter &
1020 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1021 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1022 			FP, MGMT, 0011,
1023 			(htt_tlv_filter->fp_mgmt_filter &
1024 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1025 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1026 			FP, MGMT, 0100,
1027 			(htt_tlv_filter->fp_mgmt_filter &
1028 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1029 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1030 			FP, MGMT, 0101,
1031 			(htt_tlv_filter->fp_mgmt_filter &
1032 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1033 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1034 			FP, MGMT, 0110,
1035 			(htt_tlv_filter->fp_mgmt_filter &
1036 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1037 		/* reserved */
1038 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
1039 			MGMT, 0111,
1040 			(htt_tlv_filter->fp_mgmt_filter &
1041 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1042 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1043 			FP, MGMT, 1000,
1044 			(htt_tlv_filter->fp_mgmt_filter &
1045 			FILTER_MGMT_BEACON) ? 1 : 0);
1046 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1047 			FP, MGMT, 1001,
1048 			(htt_tlv_filter->fp_mgmt_filter &
1049 			FILTER_MGMT_ATIM) ? 1 : 0);
1050 	}
1051 
1052 	if (htt_tlv_filter->enable_md) {
1053 			/* TYPE: MGMT */
1054 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1055 			MD, MGMT, 0000,
1056 			(htt_tlv_filter->md_mgmt_filter &
1057 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1058 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1059 			MD, MGMT, 0001,
1060 			(htt_tlv_filter->md_mgmt_filter &
1061 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1062 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1063 			MD, MGMT, 0010,
1064 			(htt_tlv_filter->md_mgmt_filter &
1065 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1066 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1067 			MD, MGMT, 0011,
1068 			(htt_tlv_filter->md_mgmt_filter &
1069 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1070 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1071 			MD, MGMT, 0100,
1072 			(htt_tlv_filter->md_mgmt_filter &
1073 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1074 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1075 			MD, MGMT, 0101,
1076 			(htt_tlv_filter->md_mgmt_filter &
1077 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1078 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1079 			MD, MGMT, 0110,
1080 			(htt_tlv_filter->md_mgmt_filter &
1081 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1082 		/* reserved */
1083 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
1084 			MGMT, 0111,
1085 			(htt_tlv_filter->md_mgmt_filter &
1086 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1087 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1088 			MD, MGMT, 1000,
1089 			(htt_tlv_filter->md_mgmt_filter &
1090 			FILTER_MGMT_BEACON) ? 1 : 0);
1091 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1092 			MD, MGMT, 1001,
1093 			(htt_tlv_filter->md_mgmt_filter &
1094 			FILTER_MGMT_ATIM) ? 1 : 0);
1095 	}
1096 
1097 	if (htt_tlv_filter->enable_mo) {
1098 		/* TYPE: MGMT */
1099 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1100 			MO, MGMT, 0000,
1101 			(htt_tlv_filter->mo_mgmt_filter &
1102 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1103 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1104 			MO, MGMT, 0001,
1105 			(htt_tlv_filter->mo_mgmt_filter &
1106 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1107 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1108 			MO, MGMT, 0010,
1109 			(htt_tlv_filter->mo_mgmt_filter &
1110 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1111 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1112 			MO, MGMT, 0011,
1113 			(htt_tlv_filter->mo_mgmt_filter &
1114 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1115 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1116 			MO, MGMT, 0100,
1117 			(htt_tlv_filter->mo_mgmt_filter &
1118 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1119 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1120 			MO, MGMT, 0101,
1121 			(htt_tlv_filter->mo_mgmt_filter &
1122 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1123 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1124 			MO, MGMT, 0110,
1125 			(htt_tlv_filter->mo_mgmt_filter &
1126 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1127 		/* reserved */
1128 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
1129 			MGMT, 0111,
1130 			(htt_tlv_filter->mo_mgmt_filter &
1131 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1132 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1133 			MO, MGMT, 1000,
1134 			(htt_tlv_filter->mo_mgmt_filter &
1135 			FILTER_MGMT_BEACON) ? 1 : 0);
1136 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1137 			MO, MGMT, 1001,
1138 			(htt_tlv_filter->mo_mgmt_filter &
1139 			FILTER_MGMT_ATIM) ? 1 : 0);
1140 	}
1141 
1142 	/* word 3 */
1143 	msg_word++;
1144 	*msg_word = 0;
1145 
1146 	if (htt_tlv_filter->enable_fp) {
1147 		/* TYPE: MGMT */
1148 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1149 			FP, MGMT, 1010,
1150 			(htt_tlv_filter->fp_mgmt_filter &
1151 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1152 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1153 			FP, MGMT, 1011,
1154 			(htt_tlv_filter->fp_mgmt_filter &
1155 			FILTER_MGMT_AUTH) ? 1 : 0);
1156 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1157 			FP, MGMT, 1100,
1158 			(htt_tlv_filter->fp_mgmt_filter &
1159 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1160 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1161 			FP, MGMT, 1101,
1162 			(htt_tlv_filter->fp_mgmt_filter &
1163 			FILTER_MGMT_ACTION) ? 1 : 0);
1164 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1165 			FP, MGMT, 1110,
1166 			(htt_tlv_filter->fp_mgmt_filter &
1167 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1168 		/* reserved*/
1169 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
1170 			MGMT, 1111,
1171 			(htt_tlv_filter->fp_mgmt_filter &
1172 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1173 	}
1174 
1175 	if (htt_tlv_filter->enable_md) {
1176 			/* TYPE: MGMT */
1177 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1178 			MD, MGMT, 1010,
1179 			(htt_tlv_filter->md_mgmt_filter &
1180 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1181 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1182 			MD, MGMT, 1011,
1183 			(htt_tlv_filter->md_mgmt_filter &
1184 			FILTER_MGMT_AUTH) ? 1 : 0);
1185 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1186 			MD, MGMT, 1100,
1187 			(htt_tlv_filter->md_mgmt_filter &
1188 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1189 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1190 			MD, MGMT, 1101,
1191 			(htt_tlv_filter->md_mgmt_filter &
1192 			FILTER_MGMT_ACTION) ? 1 : 0);
1193 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1194 			MD, MGMT, 1110,
1195 			(htt_tlv_filter->md_mgmt_filter &
1196 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1197 	}
1198 
1199 	if (htt_tlv_filter->enable_mo) {
1200 		/* TYPE: MGMT */
1201 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1202 			MO, MGMT, 1010,
1203 			(htt_tlv_filter->mo_mgmt_filter &
1204 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1205 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1206 			MO, MGMT, 1011,
1207 			(htt_tlv_filter->mo_mgmt_filter &
1208 			FILTER_MGMT_AUTH) ? 1 : 0);
1209 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1210 			MO, MGMT, 1100,
1211 			(htt_tlv_filter->mo_mgmt_filter &
1212 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1213 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1214 			MO, MGMT, 1101,
1215 			(htt_tlv_filter->mo_mgmt_filter &
1216 			FILTER_MGMT_ACTION) ? 1 : 0);
1217 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1218 			MO, MGMT, 1110,
1219 			(htt_tlv_filter->mo_mgmt_filter &
1220 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1221 		/* reserved*/
1222 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
1223 			MGMT, 1111,
1224 			(htt_tlv_filter->mo_mgmt_filter &
1225 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1226 	}
1227 
1228 	/* word 4 */
1229 	msg_word++;
1230 	*msg_word = 0;
1231 
1232 	if (htt_tlv_filter->enable_fp) {
1233 		/* TYPE: CTRL */
1234 		/* reserved */
1235 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1236 			CTRL, 0000,
1237 			(htt_tlv_filter->fp_ctrl_filter &
1238 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1239 		/* reserved */
1240 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1241 			CTRL, 0001,
1242 			(htt_tlv_filter->fp_ctrl_filter &
1243 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1244 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1245 			CTRL, 0010,
1246 			(htt_tlv_filter->fp_ctrl_filter &
1247 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1248 		/* reserved */
1249 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1250 			CTRL, 0011,
1251 			(htt_tlv_filter->fp_ctrl_filter &
1252 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1253 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1254 			CTRL, 0100,
1255 			(htt_tlv_filter->fp_ctrl_filter &
1256 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1257 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1258 			CTRL, 0101,
1259 			(htt_tlv_filter->fp_ctrl_filter &
1260 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1261 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1262 			CTRL, 0110,
1263 			(htt_tlv_filter->fp_ctrl_filter &
1264 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1265 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1266 			CTRL, 0111,
1267 			(htt_tlv_filter->fp_ctrl_filter &
1268 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1269 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1270 			CTRL, 1000,
1271 			(htt_tlv_filter->fp_ctrl_filter &
1272 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1273 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1274 			CTRL, 1001,
1275 			(htt_tlv_filter->fp_ctrl_filter &
1276 			FILTER_CTRL_BA) ? 1 : 0);
1277 	}
1278 
1279 	if (htt_tlv_filter->enable_md) {
1280 		/* TYPE: CTRL */
1281 		/* reserved */
1282 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1283 			CTRL, 0000,
1284 			(htt_tlv_filter->md_ctrl_filter &
1285 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1286 		/* reserved */
1287 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1288 			CTRL, 0001,
1289 			(htt_tlv_filter->md_ctrl_filter &
1290 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1291 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1292 			CTRL, 0010,
1293 			(htt_tlv_filter->md_ctrl_filter &
1294 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1295 		/* reserved */
1296 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1297 			CTRL, 0011,
1298 			(htt_tlv_filter->md_ctrl_filter &
1299 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1300 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1301 			CTRL, 0100,
1302 			(htt_tlv_filter->md_ctrl_filter &
1303 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1304 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1305 			CTRL, 0101,
1306 			(htt_tlv_filter->md_ctrl_filter &
1307 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1308 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1309 			CTRL, 0110,
1310 			(htt_tlv_filter->md_ctrl_filter &
1311 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1312 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1313 			CTRL, 0111,
1314 			(htt_tlv_filter->md_ctrl_filter &
1315 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1316 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1317 			CTRL, 1000,
1318 			(htt_tlv_filter->md_ctrl_filter &
1319 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1320 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1321 			CTRL, 1001,
1322 			(htt_tlv_filter->md_ctrl_filter &
1323 			FILTER_CTRL_BA) ? 1 : 0);
1324 	}
1325 
1326 	if (htt_tlv_filter->enable_mo) {
1327 		/* TYPE: CTRL */
1328 		/* reserved */
1329 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1330 			CTRL, 0000,
1331 			(htt_tlv_filter->mo_ctrl_filter &
1332 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1333 		/* reserved */
1334 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1335 			CTRL, 0001,
1336 			(htt_tlv_filter->mo_ctrl_filter &
1337 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1338 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1339 			CTRL, 0010,
1340 			(htt_tlv_filter->mo_ctrl_filter &
1341 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1342 		/* reserved */
1343 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1344 			CTRL, 0011,
1345 			(htt_tlv_filter->mo_ctrl_filter &
1346 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1347 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1348 			CTRL, 0100,
1349 			(htt_tlv_filter->mo_ctrl_filter &
1350 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1351 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1352 			CTRL, 0101,
1353 			(htt_tlv_filter->mo_ctrl_filter &
1354 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1355 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1356 			CTRL, 0110,
1357 			(htt_tlv_filter->mo_ctrl_filter &
1358 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1359 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1360 			CTRL, 0111,
1361 			(htt_tlv_filter->mo_ctrl_filter &
1362 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1363 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1364 			CTRL, 1000,
1365 			(htt_tlv_filter->mo_ctrl_filter &
1366 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1367 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1368 			CTRL, 1001,
1369 			(htt_tlv_filter->mo_ctrl_filter &
1370 			FILTER_CTRL_BA) ? 1 : 0);
1371 	}
1372 
1373 	/* word 5 */
1374 	msg_word++;
1375 	*msg_word = 0;
1376 	if (htt_tlv_filter->enable_fp) {
1377 		/* TYPE: CTRL */
1378 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1379 			CTRL, 1010,
1380 			(htt_tlv_filter->fp_ctrl_filter &
1381 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1382 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1383 			CTRL, 1011,
1384 			(htt_tlv_filter->fp_ctrl_filter &
1385 			FILTER_CTRL_RTS) ? 1 : 0);
1386 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1387 			CTRL, 1100,
1388 			(htt_tlv_filter->fp_ctrl_filter &
1389 			FILTER_CTRL_CTS) ? 1 : 0);
1390 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1391 			CTRL, 1101,
1392 			(htt_tlv_filter->fp_ctrl_filter &
1393 			FILTER_CTRL_ACK) ? 1 : 0);
1394 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1395 			CTRL, 1110,
1396 			(htt_tlv_filter->fp_ctrl_filter &
1397 			FILTER_CTRL_CFEND) ? 1 : 0);
1398 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1399 			CTRL, 1111,
1400 			(htt_tlv_filter->fp_ctrl_filter &
1401 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1402 		/* TYPE: DATA */
1403 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1404 			DATA, MCAST,
1405 			(htt_tlv_filter->fp_data_filter &
1406 			FILTER_DATA_MCAST) ? 1 : 0);
1407 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1408 			DATA, UCAST,
1409 			(htt_tlv_filter->fp_data_filter &
1410 			FILTER_DATA_UCAST) ? 1 : 0);
1411 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1412 			DATA, NULL,
1413 			(htt_tlv_filter->fp_data_filter &
1414 			FILTER_DATA_NULL) ? 1 : 0);
1415 	}
1416 
1417 	if (htt_tlv_filter->enable_md) {
1418 		/* TYPE: CTRL */
1419 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1420 			CTRL, 1010,
1421 			(htt_tlv_filter->md_ctrl_filter &
1422 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1423 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1424 			CTRL, 1011,
1425 			(htt_tlv_filter->md_ctrl_filter &
1426 			FILTER_CTRL_RTS) ? 1 : 0);
1427 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1428 			CTRL, 1100,
1429 			(htt_tlv_filter->md_ctrl_filter &
1430 			FILTER_CTRL_CTS) ? 1 : 0);
1431 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1432 			CTRL, 1101,
1433 			(htt_tlv_filter->md_ctrl_filter &
1434 			FILTER_CTRL_ACK) ? 1 : 0);
1435 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1436 			CTRL, 1110,
1437 			(htt_tlv_filter->md_ctrl_filter &
1438 			FILTER_CTRL_CFEND) ? 1 : 0);
1439 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1440 			CTRL, 1111,
1441 			(htt_tlv_filter->md_ctrl_filter &
1442 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1443 		/* TYPE: DATA */
1444 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1445 			DATA, MCAST,
1446 			(htt_tlv_filter->md_data_filter &
1447 			FILTER_DATA_MCAST) ? 1 : 0);
1448 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1449 			DATA, UCAST,
1450 			(htt_tlv_filter->md_data_filter &
1451 			FILTER_DATA_UCAST) ? 1 : 0);
1452 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1453 			DATA, NULL,
1454 			(htt_tlv_filter->md_data_filter &
1455 			FILTER_DATA_NULL) ? 1 : 0);
1456 	}
1457 
1458 	if (htt_tlv_filter->enable_mo) {
1459 		/* TYPE: CTRL */
1460 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1461 			CTRL, 1010,
1462 			(htt_tlv_filter->mo_ctrl_filter &
1463 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1464 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1465 			CTRL, 1011,
1466 			(htt_tlv_filter->mo_ctrl_filter &
1467 			FILTER_CTRL_RTS) ? 1 : 0);
1468 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1469 			CTRL, 1100,
1470 			(htt_tlv_filter->mo_ctrl_filter &
1471 			FILTER_CTRL_CTS) ? 1 : 0);
1472 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1473 			CTRL, 1101,
1474 			(htt_tlv_filter->mo_ctrl_filter &
1475 			FILTER_CTRL_ACK) ? 1 : 0);
1476 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1477 			CTRL, 1110,
1478 			(htt_tlv_filter->mo_ctrl_filter &
1479 			FILTER_CTRL_CFEND) ? 1 : 0);
1480 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1481 			CTRL, 1111,
1482 			(htt_tlv_filter->mo_ctrl_filter &
1483 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1484 		/* TYPE: DATA */
1485 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1486 			DATA, MCAST,
1487 			(htt_tlv_filter->mo_data_filter &
1488 			FILTER_DATA_MCAST) ? 1 : 0);
1489 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1490 			DATA, UCAST,
1491 			(htt_tlv_filter->mo_data_filter &
1492 			FILTER_DATA_UCAST) ? 1 : 0);
1493 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1494 			DATA, NULL,
1495 			(htt_tlv_filter->mo_data_filter &
1496 			FILTER_DATA_NULL) ? 1 : 0);
1497 	}
1498 
1499 	/* word 6 */
1500 	msg_word++;
1501 	*msg_word = 0;
1502 	tlv_filter = 0;
1503 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START,
1504 		htt_tlv_filter->mpdu_start);
1505 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START,
1506 		htt_tlv_filter->msdu_start);
1507 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET,
1508 		htt_tlv_filter->packet);
1509 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END,
1510 		htt_tlv_filter->msdu_end);
1511 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END,
1512 		htt_tlv_filter->mpdu_end);
1513 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER,
1514 		htt_tlv_filter->packet_header);
1515 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION,
1516 		htt_tlv_filter->attention);
1517 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START,
1518 		htt_tlv_filter->ppdu_start);
1519 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END,
1520 		htt_tlv_filter->ppdu_end);
1521 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS,
1522 		htt_tlv_filter->ppdu_end_user_stats);
1523 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter,
1524 		PPDU_END_USER_STATS_EXT,
1525 		htt_tlv_filter->ppdu_end_user_stats_ext);
1526 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE,
1527 		htt_tlv_filter->ppdu_end_status_done);
1528 	/* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/
1529 	 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED,
1530 		 htt_tlv_filter->header_per_msdu);
1531 
1532 	HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter);
1533 
1534 	msg_word++;
1535 	*msg_word = 0;
1536 	if (htt_tlv_filter->offset_valid) {
1537 		HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET_SET(*msg_word,
1538 					htt_tlv_filter->rx_packet_offset);
1539 		HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET_SET(*msg_word,
1540 					htt_tlv_filter->rx_header_offset);
1541 
1542 		msg_word++;
1543 		*msg_word = 0;
1544 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET_SET(*msg_word,
1545 					htt_tlv_filter->rx_mpdu_end_offset);
1546 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET_SET(*msg_word,
1547 					htt_tlv_filter->rx_mpdu_start_offset);
1548 
1549 		msg_word++;
1550 		*msg_word = 0;
1551 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET_SET(*msg_word,
1552 					htt_tlv_filter->rx_msdu_end_offset);
1553 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET_SET(*msg_word,
1554 					htt_tlv_filter->rx_msdu_start_offset);
1555 
1556 		msg_word++;
1557 		*msg_word = 0;
1558 		HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET_SET(*msg_word,
1559 					htt_tlv_filter->rx_attn_offset);
1560 		msg_word++;
1561 		*msg_word = 0;
1562 	} else {
1563 		msg_word += 4;
1564 		*msg_word = 0;
1565 	}
1566 
1567 	if (mon_drop_th > 0)
1568 		HTT_RX_RING_SELECTION_CFG_RX_DROP_THRESHOLD_SET(*msg_word,
1569 								mon_drop_th);
1570 	dp_mon_rx_enable_mpdu_logging(soc->dp_soc, msg_word, htt_tlv_filter);
1571 
1572 	msg_word++;
1573 	*msg_word = 0;
1574 	dp_mon_rx_wmask_subscribe(soc->dp_soc, msg_word, htt_tlv_filter);
1575 
1576 	/* "response_required" field should be set if a HTT response message is
1577 	 * required after setting up the ring.
1578 	 */
1579 	pkt = htt_htc_pkt_alloc(soc);
1580 	if (!pkt)
1581 		goto fail1;
1582 
1583 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1584 
1585 	SET_HTC_PACKET_INFO_TX(
1586 		&pkt->htc_pkt,
1587 		dp_htt_h2t_send_complete_free_netbuf,
1588 		qdf_nbuf_data(htt_msg),
1589 		qdf_nbuf_len(htt_msg),
1590 		soc->htc_endpoint,
1591 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
1592 
1593 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1594 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
1595 				     HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG,
1596 				     htt_logger_bufp);
1597 
1598 	if (status != QDF_STATUS_SUCCESS) {
1599 		qdf_nbuf_free(htt_msg);
1600 		htt_htc_pkt_free(soc, pkt);
1601 	}
1602 
1603 	return status;
1604 
1605 fail1:
1606 	qdf_nbuf_free(htt_msg);
1607 fail0:
1608 	return QDF_STATUS_E_FAILURE;
1609 }
1610 
1611 qdf_export_symbol(htt_h2t_rx_ring_cfg);
1612 
1613 #if defined(HTT_STATS_ENABLE)
1614 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1615 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1616 
1617 {
1618 	uint32_t pdev_id;
1619 	uint32_t *msg_word = NULL;
1620 	uint32_t msg_remain_len = 0;
1621 
1622 	msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1623 
1624 	/*COOKIE MSB*/
1625 	pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1626 
1627 	/* stats message length + 16 size of HTT header*/
1628 	msg_remain_len = qdf_min(htt_stats->msg_len + 16,
1629 				(uint32_t)DP_EXT_MSG_LENGTH);
1630 
1631 	dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc,
1632 			msg_word,  msg_remain_len,
1633 			WDI_NO_VAL, pdev_id);
1634 
1635 	if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1636 		htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1637 	}
1638 	/* Need to be freed here as WDI handler will
1639 	 * make a copy of pkt to send data to application
1640 	 */
1641 	qdf_nbuf_free(htt_msg);
1642 	return QDF_STATUS_SUCCESS;
1643 }
1644 #else
1645 static inline QDF_STATUS
1646 dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1647 		      struct dp_soc *soc, qdf_nbuf_t htt_msg)
1648 {
1649 	return QDF_STATUS_E_NOSUPPORT;
1650 }
1651 #endif
1652 
1653 #ifdef HTT_STATS_DEBUGFS_SUPPORT
1654 /* dp_send_htt_stats_dbgfs_msg() - Function to send htt data to upper layer.
1655  * @pdev: dp pdev handle
1656  * @msg_word: HTT msg
1657  * @msg_len: Length of HTT msg sent
1658  *
1659  * Return: none
1660  */
1661 static inline void
1662 dp_htt_stats_dbgfs_send_msg(struct dp_pdev *pdev, uint32_t *msg_word,
1663 			    uint32_t msg_len)
1664 {
1665 	struct htt_dbgfs_cfg dbgfs_cfg;
1666 	int done = 0;
1667 
1668 	/* send 5th word of HTT msg to upper layer */
1669 	dbgfs_cfg.msg_word = (msg_word + 4);
1670 	dbgfs_cfg.m = pdev->dbgfs_cfg->m;
1671 
1672 	/* stats message length + 16 size of HTT header*/
1673 	msg_len = qdf_min(msg_len + HTT_HEADER_LEN, (uint32_t)DP_EXT_MSG_LENGTH);
1674 
1675 	if (pdev->dbgfs_cfg->htt_stats_dbgfs_msg_process)
1676 		pdev->dbgfs_cfg->htt_stats_dbgfs_msg_process(&dbgfs_cfg,
1677 							     (msg_len - HTT_HEADER_LEN));
1678 
1679 	/* Get TLV Done bit from 4th msg word */
1680 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*(msg_word + 3));
1681 	if (done) {
1682 		if (qdf_event_set(&pdev->dbgfs_cfg->htt_stats_dbgfs_event))
1683 			dp_htt_err("%pK: Failed to set event for debugfs htt stats"
1684 				   , pdev->soc);
1685 	}
1686 }
1687 #else
1688 static inline void
1689 dp_htt_stats_dbgfs_send_msg(struct dp_pdev *pdev, uint32_t *msg_word,
1690 			    uint32_t msg_len)
1691 {
1692 }
1693 #endif /* HTT_STATS_DEBUGFS_SUPPORT */
1694 
1695 #ifdef WLAN_SYSFS_DP_STATS
1696 /* dp_htt_stats_sysfs_update_config() - Function to send htt data to upper layer.
1697  * @pdev: dp pdev handle
1698  *
1699  * This function sets the process id and printing mode within the sysfs config
1700  * struct. which enables DP_PRINT statements within this process to write to the
1701  * console buffer provided by the user space.
1702  *
1703  * Return: None
1704  */
1705 static inline void
1706 dp_htt_stats_sysfs_update_config(struct dp_pdev *pdev)
1707 {
1708 	struct dp_soc *soc = pdev->soc;
1709 
1710 	if (!soc) {
1711 		dp_htt_err("soc is null");
1712 		return;
1713 	}
1714 
1715 	if (!soc->sysfs_config) {
1716 		dp_htt_err("soc->sysfs_config is NULL");
1717 		return;
1718 	}
1719 
1720 	/* set sysfs config parameters */
1721 	soc->sysfs_config->process_id = qdf_get_current_pid();
1722 	soc->sysfs_config->printing_mode = PRINTING_MODE_ENABLED;
1723 }
1724 
1725 /*
1726  * dp_htt_stats_sysfs_set_event() - Set sysfs stats event.
1727  * @soc: soc handle.
1728  * @msg_word: Pointer to htt msg word.
1729  *
1730  * @return: void
1731  */
1732 static inline void
1733 dp_htt_stats_sysfs_set_event(struct dp_soc *soc, uint32_t *msg_word)
1734 {
1735 	int done = 0;
1736 
1737 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*(msg_word + 3));
1738 	if (done) {
1739 		if (qdf_event_set(&soc->sysfs_config->sysfs_txrx_fw_request_done))
1740 			dp_htt_err("%pK:event compl Fail to set event ",
1741 				   soc);
1742 	}
1743 }
1744 #else /* WLAN_SYSFS_DP_STATS */
1745 static inline void
1746 dp_htt_stats_sysfs_update_config(struct dp_pdev *pdev)
1747 {
1748 }
1749 
1750 static inline void
1751 dp_htt_stats_sysfs_set_event(struct dp_soc *dp_soc, uint32_t *msg_word)
1752 {
1753 }
1754 #endif /* WLAN_SYSFS_DP_STATS */
1755 
1756 /**
1757  * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats
1758  * @htt_stats: htt stats info
1759  *
1760  * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message
1761  * contains sub messages which are identified by a TLV header.
1762  * In this function we will process the stream of T2H messages and read all the
1763  * TLV contained in the message.
1764  *
1765  * THe following cases have been taken care of
1766  * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer
1767  *		In this case the buffer will contain multiple tlvs.
1768  * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer.
1769  *		Only one tlv will be contained in the HTT message and this tag
1770  *		will extend onto the next buffer.
1771  * Case 3: When the buffer is the continuation of the previous message
1772  * Case 4: tlv length is 0. which will indicate the end of message
1773  *
1774  * return: void
1775  */
1776 static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats,
1777 					struct dp_soc *soc)
1778 {
1779 	htt_tlv_tag_t tlv_type = 0xff;
1780 	qdf_nbuf_t htt_msg = NULL;
1781 	uint32_t *msg_word;
1782 	uint8_t *tlv_buf_head = NULL;
1783 	uint8_t *tlv_buf_tail = NULL;
1784 	uint32_t msg_remain_len = 0;
1785 	uint32_t tlv_remain_len = 0;
1786 	uint32_t *tlv_start;
1787 	int cookie_val = 0;
1788 	int cookie_msb = 0;
1789 	int pdev_id;
1790 	bool copy_stats = false;
1791 	struct dp_pdev *pdev;
1792 
1793 	/* Process node in the HTT message queue */
1794 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1795 		!= NULL) {
1796 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1797 		cookie_val = *(msg_word + 1);
1798 		htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET(
1799 					*(msg_word +
1800 					HTT_T2H_EXT_STATS_TLV_START_OFFSET));
1801 
1802 		if (cookie_val) {
1803 			if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg)
1804 					== QDF_STATUS_SUCCESS) {
1805 				continue;
1806 			}
1807 		}
1808 
1809 		cookie_msb = *(msg_word + 2);
1810 		pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1811 		pdev = soc->pdev_list[pdev_id];
1812 
1813 		if (!cookie_val && (cookie_msb & DBG_STATS_COOKIE_HTT_DBGFS)) {
1814 			dp_htt_stats_dbgfs_send_msg(pdev, msg_word,
1815 						    htt_stats->msg_len);
1816 			qdf_nbuf_free(htt_msg);
1817 			continue;
1818 		}
1819 
1820 		if (!cookie_val && (cookie_msb & DBG_SYSFS_STATS_COOKIE))
1821 			dp_htt_stats_sysfs_update_config(pdev);
1822 
1823 		if (cookie_msb & DBG_STATS_COOKIE_DP_STATS)
1824 			copy_stats = true;
1825 
1826 		/* read 5th word */
1827 		msg_word = msg_word + 4;
1828 		msg_remain_len = qdf_min(htt_stats->msg_len,
1829 				(uint32_t) DP_EXT_MSG_LENGTH);
1830 		/* Keep processing the node till node length is 0 */
1831 		while (msg_remain_len) {
1832 			/*
1833 			 * if message is not a continuation of previous message
1834 			 * read the tlv type and tlv length
1835 			 */
1836 			if (!tlv_buf_head) {
1837 				tlv_type = HTT_STATS_TLV_TAG_GET(
1838 						*msg_word);
1839 				tlv_remain_len = HTT_STATS_TLV_LENGTH_GET(
1840 						*msg_word);
1841 			}
1842 
1843 			if (tlv_remain_len == 0) {
1844 				msg_remain_len = 0;
1845 
1846 				if (tlv_buf_head) {
1847 					qdf_mem_free(tlv_buf_head);
1848 					tlv_buf_head = NULL;
1849 					tlv_buf_tail = NULL;
1850 				}
1851 
1852 				goto error;
1853 			}
1854 
1855 			if (!tlv_buf_head)
1856 				tlv_remain_len += HTT_TLV_HDR_LEN;
1857 
1858 			if ((tlv_remain_len <= msg_remain_len)) {
1859 				/* Case 3 */
1860 				if (tlv_buf_head) {
1861 					qdf_mem_copy(tlv_buf_tail,
1862 							(uint8_t *)msg_word,
1863 							tlv_remain_len);
1864 					tlv_start = (uint32_t *)tlv_buf_head;
1865 				} else {
1866 					/* Case 1 */
1867 					tlv_start = msg_word;
1868 				}
1869 
1870 				if (copy_stats)
1871 					dp_htt_stats_copy_tag(pdev,
1872 							      tlv_type,
1873 							      tlv_start);
1874 				else
1875 					dp_htt_stats_print_tag(pdev,
1876 							       tlv_type,
1877 							       tlv_start);
1878 
1879 				if (tlv_type == HTT_STATS_PEER_DETAILS_TAG ||
1880 				    tlv_type == HTT_STATS_PEER_STATS_CMN_TAG)
1881 					dp_peer_update_inactive_time(pdev,
1882 								     tlv_type,
1883 								     tlv_start);
1884 
1885 				msg_remain_len -= tlv_remain_len;
1886 
1887 				msg_word = (uint32_t *)
1888 					(((uint8_t *)msg_word) +
1889 					tlv_remain_len);
1890 
1891 				tlv_remain_len = 0;
1892 
1893 				if (tlv_buf_head) {
1894 					qdf_mem_free(tlv_buf_head);
1895 					tlv_buf_head = NULL;
1896 					tlv_buf_tail = NULL;
1897 				}
1898 
1899 			} else { /* tlv_remain_len > msg_remain_len */
1900 				/* Case 2 & 3 */
1901 				if (!tlv_buf_head) {
1902 					tlv_buf_head = qdf_mem_malloc(
1903 							tlv_remain_len);
1904 
1905 					if (!tlv_buf_head) {
1906 						QDF_TRACE(QDF_MODULE_ID_TXRX,
1907 								QDF_TRACE_LEVEL_ERROR,
1908 								"Alloc failed");
1909 						goto error;
1910 					}
1911 
1912 					tlv_buf_tail = tlv_buf_head;
1913 				}
1914 
1915 				qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word,
1916 						msg_remain_len);
1917 				tlv_remain_len -= msg_remain_len;
1918 				tlv_buf_tail += msg_remain_len;
1919 			}
1920 		}
1921 
1922 		if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1923 			htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1924 		}
1925 
1926 		/* indicate event completion in case the event is done */
1927 		if (!cookie_val && (cookie_msb & DBG_SYSFS_STATS_COOKIE))
1928 			dp_htt_stats_sysfs_set_event(soc, msg_word);
1929 
1930 		qdf_nbuf_free(htt_msg);
1931 	}
1932 	return;
1933 
1934 error:
1935 	qdf_nbuf_free(htt_msg);
1936 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1937 			!= NULL)
1938 		qdf_nbuf_free(htt_msg);
1939 }
1940 
1941 void htt_t2h_stats_handler(void *context)
1942 {
1943 	struct dp_soc *soc = (struct dp_soc *)context;
1944 	struct htt_stats_context htt_stats;
1945 	uint32_t *msg_word;
1946 	qdf_nbuf_t htt_msg = NULL;
1947 	uint8_t done;
1948 	uint32_t rem_stats;
1949 
1950 	if (!soc) {
1951 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1952 			  "soc is NULL");
1953 		return;
1954 	}
1955 
1956 	if (!qdf_atomic_read(&soc->cmn_init_done)) {
1957 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1958 			  "soc: 0x%pK, init_done: %d", soc,
1959 			  qdf_atomic_read(&soc->cmn_init_done));
1960 		return;
1961 	}
1962 
1963 	qdf_mem_zero(&htt_stats, sizeof(htt_stats));
1964 	qdf_nbuf_queue_init(&htt_stats.msg);
1965 
1966 	/* pull one completed stats from soc->htt_stats_msg and process */
1967 	qdf_spin_lock_bh(&soc->htt_stats.lock);
1968 	if (!soc->htt_stats.num_stats) {
1969 		qdf_spin_unlock_bh(&soc->htt_stats.lock);
1970 		return;
1971 	}
1972 	while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) {
1973 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1974 		msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET;
1975 		done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
1976 		qdf_nbuf_queue_add(&htt_stats.msg, htt_msg);
1977 		/*
1978 		 * Done bit signifies that this is the last T2H buffer in the
1979 		 * stream of HTT EXT STATS message
1980 		 */
1981 		if (done)
1982 			break;
1983 	}
1984 	rem_stats = --soc->htt_stats.num_stats;
1985 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
1986 
1987 	/* If there are more stats to process, schedule stats work again.
1988 	 * Scheduling prior to processing ht_stats to queue with early
1989 	 * index
1990 	 */
1991 	if (rem_stats)
1992 		qdf_sched_work(0, &soc->htt_stats.work);
1993 
1994 	dp_process_htt_stat_msg(&htt_stats, soc);
1995 }
1996 
1997 /**
1998  * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats
1999  * @soc: DP SOC handle
2000  * @htt_t2h_msg: HTT message nbuf
2001  *
2002  * return:void
2003  */
2004 static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc,
2005 					    qdf_nbuf_t htt_t2h_msg)
2006 {
2007 	uint8_t done;
2008 	qdf_nbuf_t msg_copy;
2009 	uint32_t *msg_word;
2010 
2011 	msg_word = (uint32_t *)qdf_nbuf_data(htt_t2h_msg);
2012 	msg_word = msg_word + 3;
2013 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2014 
2015 	/*
2016 	 * HTT EXT stats response comes as stream of TLVs which span over
2017 	 * multiple T2H messages.
2018 	 * The first message will carry length of the response.
2019 	 * For rest of the messages length will be zero.
2020 	 *
2021 	 * Clone the T2H message buffer and store it in a list to process
2022 	 * it later.
2023 	 *
2024 	 * The original T2H message buffers gets freed in the T2H HTT event
2025 	 * handler
2026 	 */
2027 	msg_copy = qdf_nbuf_clone(htt_t2h_msg);
2028 
2029 	if (!msg_copy) {
2030 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2031 			  "T2H messge clone failed for HTT EXT STATS");
2032 		goto error;
2033 	}
2034 
2035 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2036 	qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy);
2037 	/*
2038 	 * Done bit signifies that this is the last T2H buffer in the stream of
2039 	 * HTT EXT STATS message
2040 	 */
2041 	if (done) {
2042 		soc->htt_stats.num_stats++;
2043 		qdf_sched_work(0, &soc->htt_stats.work);
2044 	}
2045 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2046 
2047 	return;
2048 
2049 error:
2050 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2051 	while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg))
2052 			!= NULL) {
2053 		qdf_nbuf_free(msg_copy);
2054 	}
2055 	soc->htt_stats.num_stats = 0;
2056 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2057 	return;
2058 }
2059 
2060 /*
2061  * htt_soc_attach_target() - SOC level HTT setup
2062  * @htt_soc:	HTT SOC handle
2063  *
2064  * Return: 0 on success; error code on failure
2065  */
2066 int htt_soc_attach_target(struct htt_soc *htt_soc)
2067 {
2068 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
2069 
2070 	return htt_h2t_ver_req_msg(soc);
2071 }
2072 
2073 void htt_set_htc_handle(struct htt_soc *htt_soc, HTC_HANDLE htc_soc)
2074 {
2075 	htt_soc->htc_soc = htc_soc;
2076 }
2077 
2078 HTC_HANDLE htt_get_htc_handle(struct htt_soc *htt_soc)
2079 {
2080 	return htt_soc->htc_soc;
2081 }
2082 
2083 struct htt_soc *htt_soc_attach(struct dp_soc *soc, HTC_HANDLE htc_handle)
2084 {
2085 	int i;
2086 	int j;
2087 	int alloc_size = HTT_SW_UMAC_RING_IDX_MAX * sizeof(unsigned long);
2088 	struct htt_soc *htt_soc = NULL;
2089 
2090 	htt_soc = qdf_mem_malloc(sizeof(*htt_soc));
2091 	if (!htt_soc) {
2092 		dp_err("HTT attach failed");
2093 		return NULL;
2094 	}
2095 
2096 	for (i = 0; i < MAX_PDEV_CNT; i++) {
2097 		htt_soc->pdevid_tt[i].umac_ttt = qdf_mem_malloc(alloc_size);
2098 		if (!htt_soc->pdevid_tt[i].umac_ttt)
2099 			break;
2100 		qdf_mem_set(htt_soc->pdevid_tt[i].umac_ttt, alloc_size, -1);
2101 		htt_soc->pdevid_tt[i].lmac_ttt = qdf_mem_malloc(alloc_size);
2102 		if (!htt_soc->pdevid_tt[i].lmac_ttt) {
2103 			qdf_mem_free(htt_soc->pdevid_tt[i].umac_ttt);
2104 			break;
2105 		}
2106 		qdf_mem_set(htt_soc->pdevid_tt[i].lmac_ttt, alloc_size, -1);
2107 	}
2108 	if (i != MAX_PDEV_CNT) {
2109 		for (j = 0; j < i; j++) {
2110 			qdf_mem_free(htt_soc->pdevid_tt[j].umac_ttt);
2111 			qdf_mem_free(htt_soc->pdevid_tt[j].lmac_ttt);
2112 		}
2113 		qdf_mem_free(htt_soc);
2114 		return NULL;
2115 	}
2116 
2117 	htt_soc->dp_soc = soc;
2118 	htt_soc->htc_soc = htc_handle;
2119 	HTT_TX_MUTEX_INIT(&htt_soc->htt_tx_mutex);
2120 
2121 	return htt_soc;
2122 }
2123 
2124 #if defined(WDI_EVENT_ENABLE) && \
2125 	!defined(REMOVE_PKT_LOG)
2126 /*
2127  * dp_pktlog_msg_handler() - Pktlog msg handler
2128  * @htt_soc:	 HTT SOC handle
2129  * @msg_word:    Pointer to payload
2130  *
2131  * Return: None
2132  */
2133 static void
2134 dp_pktlog_msg_handler(struct htt_soc *soc,
2135 		      uint32_t *msg_word)
2136 {
2137 	uint8_t pdev_id;
2138 	uint8_t target_pdev_id;
2139 	uint32_t *pl_hdr;
2140 
2141 	target_pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word);
2142 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
2143 							 target_pdev_id);
2144 	pl_hdr = (msg_word + 1);
2145 	dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc,
2146 		pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL,
2147 		pdev_id);
2148 }
2149 #else
2150 static void
2151 dp_pktlog_msg_handler(struct htt_soc *soc,
2152 		      uint32_t *msg_word)
2153 {
2154 }
2155 #endif
2156 
2157 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
2158 /*
2159  * dp_vdev_txrx_hw_stats_handler - Handle vdev stats received from FW
2160  * @soc - htt soc handle
2161  * @ msg_word - buffer containing stats
2162  *
2163  * Return: void
2164  */
2165 static void dp_vdev_txrx_hw_stats_handler(struct htt_soc *soc,
2166 					  uint32_t *msg_word)
2167 {
2168 	struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
2169 	uint8_t pdev_id;
2170 	uint8_t vdev_id;
2171 	uint8_t target_pdev_id;
2172 	uint16_t payload_size;
2173 	struct dp_pdev *pdev;
2174 	struct dp_vdev *vdev;
2175 	uint8_t *tlv_buf;
2176 	uint32_t *tlv_buf_temp;
2177 	uint32_t *tag_buf;
2178 	htt_tlv_tag_t tlv_type;
2179 	uint16_t tlv_length;
2180 	uint64_t pkt_count = 0;
2181 	uint64_t byte_count = 0;
2182 	uint64_t soc_drop_cnt = 0;
2183 	struct cdp_pkt_info tx_comp = { 0 };
2184 	struct cdp_pkt_info tx_failed =  { 0 };
2185 
2186 	target_pdev_id =
2187 		HTT_T2H_VDEVS_TXRX_STATS_PERIODIC_IND_PDEV_ID_GET(*msg_word);
2188 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(dpsoc,
2189 							 target_pdev_id);
2190 
2191 	if (pdev_id >= MAX_PDEV_CNT)
2192 		return;
2193 
2194 	pdev = dpsoc->pdev_list[pdev_id];
2195 	if (!pdev) {
2196 		dp_err("PDEV is NULL for pdev_id:%d", pdev_id);
2197 		return;
2198 	}
2199 
2200 	payload_size =
2201 	HTT_T2H_VDEVS_TXRX_STATS_PERIODIC_IND_PAYLOAD_SIZE_GET(*msg_word);
2202 
2203 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2204 			   (void *)msg_word, payload_size + 16);
2205 
2206 	/* Adjust msg_word to point to the first TLV in buffer */
2207 	msg_word = msg_word + 4;
2208 
2209 	/* Parse the received buffer till payload size reaches 0 */
2210 	while (payload_size > 0) {
2211 		tlv_buf = (uint8_t *)msg_word;
2212 		tlv_buf_temp = msg_word;
2213 		tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
2214 		tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
2215 
2216 		/* Add header size to tlv length*/
2217 		tlv_length += 4;
2218 
2219 		switch (tlv_type) {
2220 		case HTT_STATS_SOC_TXRX_STATS_COMMON_TAG:
2221 		{
2222 			tag_buf = tlv_buf_temp +
2223 					HTT_VDEV_STATS_GET_INDEX(SOC_DROP_CNT);
2224 			soc_drop_cnt = HTT_VDEV_GET_STATS_U64(tag_buf);
2225 			DP_STATS_UPD(dpsoc, tx.tqm_drop_no_peer, soc_drop_cnt);
2226 			break;
2227 		}
2228 		case HTT_STATS_VDEV_TXRX_STATS_HW_STATS_TAG:
2229 		{
2230 			tag_buf = tlv_buf_temp +
2231 					HTT_VDEV_STATS_GET_INDEX(VDEV_ID);
2232 			vdev_id = (uint8_t)(*tag_buf);
2233 			vdev = dp_vdev_get_ref_by_id(dpsoc, vdev_id,
2234 						     DP_MOD_ID_HTT);
2235 
2236 			if (!vdev)
2237 				goto invalid_vdev;
2238 
2239 			/* Extract received packet count from buffer */
2240 			tag_buf = tlv_buf_temp +
2241 					HTT_VDEV_STATS_GET_INDEX(RX_PKT_CNT);
2242 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2243 			DP_STATS_UPD(vdev, rx_i.reo_rcvd_pkt.num, pkt_count);
2244 
2245 			/* Extract received packet byte count from buffer */
2246 			tag_buf = tlv_buf_temp +
2247 					HTT_VDEV_STATS_GET_INDEX(RX_BYTE_CNT);
2248 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2249 			DP_STATS_UPD(vdev, rx_i.reo_rcvd_pkt.bytes, byte_count);
2250 
2251 			/* Extract tx success packet count from buffer */
2252 			tag_buf = tlv_buf_temp +
2253 				HTT_VDEV_STATS_GET_INDEX(TX_SUCCESS_PKT_CNT);
2254 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2255 			tx_comp.num = pkt_count;
2256 
2257 			/* Extract tx success packet byte count from buffer */
2258 			tag_buf = tlv_buf_temp +
2259 				HTT_VDEV_STATS_GET_INDEX(TX_SUCCESS_BYTE_CNT);
2260 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2261 			tx_comp.bytes = byte_count;
2262 
2263 			/* Extract tx retry packet count from buffer */
2264 			tag_buf = tlv_buf_temp +
2265 				HTT_VDEV_STATS_GET_INDEX(TX_RETRY_PKT_CNT);
2266 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2267 			tx_comp.num += pkt_count;
2268 			tx_failed.num = pkt_count;
2269 
2270 			/* Extract tx retry packet byte count from buffer */
2271 			tag_buf = tlv_buf_temp +
2272 				HTT_VDEV_STATS_GET_INDEX(TX_RETRY_BYTE_CNT);
2273 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2274 			tx_comp.bytes += byte_count;
2275 			tx_failed.bytes = byte_count;
2276 
2277 			/* Extract tx drop packet count from buffer */
2278 			tag_buf = tlv_buf_temp +
2279 				HTT_VDEV_STATS_GET_INDEX(TX_DROP_PKT_CNT);
2280 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2281 			tx_comp.num += pkt_count;
2282 			tx_failed.num += pkt_count;
2283 
2284 			/* Extract tx drop packet byte count from buffer */
2285 			tag_buf = tlv_buf_temp +
2286 				HTT_VDEV_STATS_GET_INDEX(TX_DROP_BYTE_CNT);
2287 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2288 			tx_comp.bytes += byte_count;
2289 			tx_failed.bytes += byte_count;
2290 
2291 			/* Extract tx age-out packet count from buffer */
2292 			tag_buf = tlv_buf_temp +
2293 				HTT_VDEV_STATS_GET_INDEX(TX_AGE_OUT_PKT_CNT);
2294 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2295 			tx_comp.num += pkt_count;
2296 			tx_failed.num += pkt_count;
2297 
2298 			/* Extract tx age-out packet byte count from buffer */
2299 			tag_buf = tlv_buf_temp +
2300 				HTT_VDEV_STATS_GET_INDEX(TX_AGE_OUT_BYTE_CNT);
2301 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2302 			tx_comp.bytes += byte_count;
2303 			tx_failed.bytes += byte_count;
2304 
2305 			DP_STATS_UPD(vdev, tx.comp_pkt.num, tx_comp.num);
2306 			DP_STATS_UPD(vdev, tx.comp_pkt.bytes, tx_comp.bytes);
2307 
2308 			DP_STATS_UPD(vdev, tx.tx_failed, tx_failed.num);
2309 
2310 			dp_vdev_unref_delete(dpsoc, vdev, DP_MOD_ID_HTT);
2311 			break;
2312 		}
2313 		default:
2314 			qdf_assert(0);
2315 		}
2316 invalid_vdev:
2317 		msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
2318 		payload_size -= tlv_length;
2319 	}
2320 }
2321 #else
2322 static void dp_vdev_txrx_hw_stats_handler(struct htt_soc *soc,
2323 					  uint32_t *msg_word)
2324 {}
2325 #endif
2326 
2327 /*
2328  * time_allow_print() - time allow print
2329  * @htt_ring_tt:	ringi_id array of timestamps
2330  * @ring_id:		ring_id (index)
2331  *
2332  * Return: 1 for successfully saving timestamp in array
2333  *	and 0 for timestamp falling within 2 seconds after last one
2334  */
2335 static bool time_allow_print(unsigned long *htt_ring_tt, u_int8_t ring_id)
2336 {
2337 	unsigned long tstamp;
2338 	unsigned long delta;
2339 
2340 	tstamp = qdf_get_system_timestamp();
2341 
2342 	if (!htt_ring_tt)
2343 		return 0; //unable to print backpressure messages
2344 
2345 	if (htt_ring_tt[ring_id] == -1) {
2346 		htt_ring_tt[ring_id] = tstamp;
2347 		return 1;
2348 	}
2349 	delta = tstamp - htt_ring_tt[ring_id];
2350 	if (delta >= 2000) {
2351 		htt_ring_tt[ring_id] = tstamp;
2352 		return 1;
2353 	}
2354 
2355 	return 0;
2356 }
2357 
2358 static void dp_htt_alert_print(enum htt_t2h_msg_type msg_type,
2359 			       struct dp_pdev *pdev, u_int8_t ring_id,
2360 			       u_int16_t hp_idx, u_int16_t tp_idx,
2361 			       u_int32_t bkp_time, char *ring_stype)
2362 {
2363 	dp_alert("seq_num %u msg_type: %d pdev_id: %d ring_type: %s ",
2364 		 pdev->bkp_stats.seq_num, msg_type, pdev->pdev_id, ring_stype);
2365 	dp_alert("ring_id: %d hp_idx: %d tp_idx: %d bkpressure_time_ms: %d ",
2366 		 ring_id, hp_idx, tp_idx, bkp_time);
2367 }
2368 
2369 /**
2370  * dp_get_srng_ring_state_from_hal(): Get hal level ring stats
2371  * @soc: DP_SOC handle
2372  * @srng: DP_SRNG handle
2373  * @ring_type: srng src/dst ring
2374  *
2375  * Return: void
2376  */
2377 static QDF_STATUS
2378 dp_get_srng_ring_state_from_hal(struct dp_soc *soc,
2379 				struct dp_pdev *pdev,
2380 				struct dp_srng *srng,
2381 				enum hal_ring_type ring_type,
2382 				struct dp_srng_ring_state *state)
2383 {
2384 	struct hal_soc *hal_soc;
2385 
2386 	if (!soc || !srng || !srng->hal_srng || !state)
2387 		return QDF_STATUS_E_INVAL;
2388 
2389 	hal_soc = (struct hal_soc *)soc->hal_soc;
2390 
2391 	hal_get_sw_hptp(soc->hal_soc, srng->hal_srng, &state->sw_tail,
2392 			&state->sw_head);
2393 
2394 	hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &state->hw_head,
2395 			&state->hw_tail, ring_type);
2396 
2397 	state->ring_type = ring_type;
2398 
2399 	return QDF_STATUS_SUCCESS;
2400 }
2401 
2402 #ifdef QCA_MONITOR_PKT_SUPPORT
2403 static void
2404 dp_queue_mon_ring_stats(struct dp_pdev *pdev,
2405 			int lmac_id, uint32_t *num_srng,
2406 			struct dp_soc_srngs_state *soc_srngs_state)
2407 {
2408 	QDF_STATUS status;
2409 
2410 	if (pdev->soc->wlan_cfg_ctx->rxdma1_enable) {
2411 		status = dp_get_srng_ring_state_from_hal
2412 			(pdev->soc, pdev,
2413 			 &pdev->soc->rxdma_mon_buf_ring[lmac_id],
2414 			 RXDMA_MONITOR_BUF,
2415 			 &soc_srngs_state->ring_state[*num_srng]);
2416 
2417 		if (status == QDF_STATUS_SUCCESS)
2418 			qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS);
2419 
2420 		status = dp_get_srng_ring_state_from_hal
2421 			(pdev->soc, pdev,
2422 			 &pdev->soc->rxdma_mon_dst_ring[lmac_id],
2423 			 RXDMA_MONITOR_DST,
2424 			 &soc_srngs_state->ring_state[*num_srng]);
2425 
2426 		if (status == QDF_STATUS_SUCCESS)
2427 			qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS);
2428 
2429 		status = dp_get_srng_ring_state_from_hal
2430 			(pdev->soc, pdev,
2431 			 &pdev->soc->rxdma_mon_desc_ring[lmac_id],
2432 			 RXDMA_MONITOR_DESC,
2433 			 &soc_srngs_state->ring_state[*num_srng]);
2434 
2435 		if (status == QDF_STATUS_SUCCESS)
2436 			qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS);
2437 	}
2438 }
2439 #else
2440 static void
2441 dp_queue_mon_ring_stats(struct dp_pdev *pdev,
2442 			int lmac_id, uint32_t *num_srng,
2443 			struct dp_soc_srngs_state *soc_srngs_state)
2444 {
2445 }
2446 #endif
2447 
2448 /**
2449  * dp_queue_srng_ring_stats(): Print pdev hal level ring stats
2450  * @pdev: DP_pdev handle
2451  *
2452  * Return: void
2453  */
2454 static void dp_queue_ring_stats(struct dp_pdev *pdev)
2455 {
2456 	uint32_t i;
2457 	int mac_id;
2458 	int lmac_id;
2459 	uint32_t j = 0;
2460 	struct dp_soc *soc = pdev->soc;
2461 	struct dp_soc_srngs_state * soc_srngs_state = NULL;
2462 	struct dp_soc_srngs_state *drop_srngs_state = NULL;
2463 	QDF_STATUS status;
2464 
2465 	soc_srngs_state = qdf_mem_malloc(sizeof(struct dp_soc_srngs_state));
2466 	if (!soc_srngs_state) {
2467 		dp_htt_alert("Memory alloc failed for back pressure event");
2468 		return;
2469 	}
2470 
2471 	status = dp_get_srng_ring_state_from_hal
2472 				(pdev->soc, pdev,
2473 				 &pdev->soc->reo_exception_ring,
2474 				 REO_EXCEPTION,
2475 				 &soc_srngs_state->ring_state[j]);
2476 
2477 	if (status == QDF_STATUS_SUCCESS)
2478 		qdf_assert_always(++j < DP_MAX_SRNGS);
2479 
2480 	status = dp_get_srng_ring_state_from_hal
2481 				(pdev->soc, pdev,
2482 				 &pdev->soc->reo_reinject_ring,
2483 				 REO_REINJECT,
2484 				 &soc_srngs_state->ring_state[j]);
2485 
2486 	if (status == QDF_STATUS_SUCCESS)
2487 		qdf_assert_always(++j < DP_MAX_SRNGS);
2488 
2489 	status = dp_get_srng_ring_state_from_hal
2490 				(pdev->soc, pdev,
2491 				 &pdev->soc->reo_cmd_ring,
2492 				 REO_CMD,
2493 				 &soc_srngs_state->ring_state[j]);
2494 
2495 	if (status == QDF_STATUS_SUCCESS)
2496 		qdf_assert_always(++j < DP_MAX_SRNGS);
2497 
2498 	status = dp_get_srng_ring_state_from_hal
2499 				(pdev->soc, pdev,
2500 				 &pdev->soc->reo_status_ring,
2501 				 REO_STATUS,
2502 				 &soc_srngs_state->ring_state[j]);
2503 
2504 	if (status == QDF_STATUS_SUCCESS)
2505 		qdf_assert_always(++j < DP_MAX_SRNGS);
2506 
2507 	status = dp_get_srng_ring_state_from_hal
2508 				(pdev->soc, pdev,
2509 				 &pdev->soc->rx_rel_ring,
2510 				 WBM2SW_RELEASE,
2511 				 &soc_srngs_state->ring_state[j]);
2512 
2513 	if (status == QDF_STATUS_SUCCESS)
2514 		qdf_assert_always(++j < DP_MAX_SRNGS);
2515 
2516 	status = dp_get_srng_ring_state_from_hal
2517 				(pdev->soc, pdev,
2518 				 &pdev->soc->tcl_cmd_credit_ring,
2519 				 TCL_CMD_CREDIT,
2520 				 &soc_srngs_state->ring_state[j]);
2521 
2522 	if (status == QDF_STATUS_SUCCESS)
2523 		qdf_assert_always(++j < DP_MAX_SRNGS);
2524 
2525 	status = dp_get_srng_ring_state_from_hal
2526 				(pdev->soc, pdev,
2527 				 &pdev->soc->tcl_status_ring,
2528 				 TCL_STATUS,
2529 				 &soc_srngs_state->ring_state[j]);
2530 
2531 	if (status == QDF_STATUS_SUCCESS)
2532 		qdf_assert_always(++j < DP_MAX_SRNGS);
2533 
2534 	status = dp_get_srng_ring_state_from_hal
2535 				(pdev->soc, pdev,
2536 				 &pdev->soc->wbm_desc_rel_ring,
2537 				 SW2WBM_RELEASE,
2538 				 &soc_srngs_state->ring_state[j]);
2539 
2540 	if (status == QDF_STATUS_SUCCESS)
2541 		qdf_assert_always(++j < DP_MAX_SRNGS);
2542 
2543 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
2544 		status = dp_get_srng_ring_state_from_hal
2545 				(pdev->soc, pdev,
2546 				 &pdev->soc->reo_dest_ring[i],
2547 				 REO_DST,
2548 				 &soc_srngs_state->ring_state[j]);
2549 
2550 		if (status == QDF_STATUS_SUCCESS)
2551 			qdf_assert_always(++j < DP_MAX_SRNGS);
2552 	}
2553 
2554 	for (i = 0; i < pdev->soc->num_tcl_data_rings; i++) {
2555 		status = dp_get_srng_ring_state_from_hal
2556 				(pdev->soc, pdev,
2557 				 &pdev->soc->tcl_data_ring[i],
2558 				 TCL_DATA,
2559 				 &soc_srngs_state->ring_state[j]);
2560 
2561 		if (status == QDF_STATUS_SUCCESS)
2562 			qdf_assert_always(++j < DP_MAX_SRNGS);
2563 	}
2564 
2565 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
2566 		status = dp_get_srng_ring_state_from_hal
2567 				(pdev->soc, pdev,
2568 				 &pdev->soc->tx_comp_ring[i],
2569 				 WBM2SW_RELEASE,
2570 				 &soc_srngs_state->ring_state[j]);
2571 
2572 		if (status == QDF_STATUS_SUCCESS)
2573 			qdf_assert_always(++j < DP_MAX_SRNGS);
2574 	}
2575 
2576 	lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc, 0, pdev->pdev_id);
2577 	status = dp_get_srng_ring_state_from_hal
2578 				(pdev->soc, pdev,
2579 				 &pdev->soc->rx_refill_buf_ring
2580 				 [lmac_id],
2581 				 RXDMA_BUF,
2582 				 &soc_srngs_state->ring_state[j]);
2583 
2584 	if (status == QDF_STATUS_SUCCESS)
2585 		qdf_assert_always(++j < DP_MAX_SRNGS);
2586 
2587 	status = dp_get_srng_ring_state_from_hal
2588 				(pdev->soc, pdev,
2589 				 &pdev->rx_refill_buf_ring2,
2590 				 RXDMA_BUF,
2591 				 &soc_srngs_state->ring_state[j]);
2592 
2593 	if (status == QDF_STATUS_SUCCESS)
2594 		qdf_assert_always(++j < DP_MAX_SRNGS);
2595 
2596 
2597 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2598 		dp_get_srng_ring_state_from_hal
2599 				(pdev->soc, pdev,
2600 				 &pdev->rx_mac_buf_ring[i],
2601 				 RXDMA_BUF,
2602 				 &soc_srngs_state->ring_state[j]);
2603 
2604 		if (status == QDF_STATUS_SUCCESS)
2605 			qdf_assert_always(++j < DP_MAX_SRNGS);
2606 	}
2607 
2608 	for (mac_id = 0;
2609 	     mac_id  < soc->wlan_cfg_ctx->num_rxdma_status_rings_per_pdev;
2610 	     mac_id++) {
2611 		lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc,
2612 						     mac_id, pdev->pdev_id);
2613 
2614 		dp_queue_mon_ring_stats(pdev, lmac_id, &j,
2615 					soc_srngs_state);
2616 
2617 		status = dp_get_srng_ring_state_from_hal
2618 			(pdev->soc, pdev,
2619 			 &pdev->soc->rxdma_mon_status_ring[lmac_id],
2620 			 RXDMA_MONITOR_STATUS,
2621 			 &soc_srngs_state->ring_state[j]);
2622 
2623 		if (status == QDF_STATUS_SUCCESS)
2624 			qdf_assert_always(++j < DP_MAX_SRNGS);
2625 	}
2626 
2627 	for (i = 0; i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
2628 		lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc,
2629 						     i, pdev->pdev_id);
2630 
2631 		status = dp_get_srng_ring_state_from_hal
2632 				(pdev->soc, pdev,
2633 				 &pdev->soc->rxdma_err_dst_ring
2634 				 [lmac_id],
2635 				 RXDMA_DST,
2636 				 &soc_srngs_state->ring_state[j]);
2637 
2638 		if (status == QDF_STATUS_SUCCESS)
2639 			qdf_assert_always(++j < DP_MAX_SRNGS);
2640 	}
2641 	soc_srngs_state->max_ring_id = j;
2642 
2643 	qdf_spin_lock_bh(&pdev->bkp_stats.list_lock);
2644 
2645 	soc_srngs_state->seq_num = pdev->bkp_stats.seq_num;
2646 
2647 	if (pdev->bkp_stats.queue_depth >= HTT_BKP_STATS_MAX_QUEUE_DEPTH) {
2648 		drop_srngs_state = TAILQ_FIRST(&pdev->bkp_stats.list);
2649 		qdf_assert_always(drop_srngs_state);
2650 		TAILQ_REMOVE(&pdev->bkp_stats.list, drop_srngs_state,
2651 			     list_elem);
2652 		qdf_mem_free(drop_srngs_state);
2653 		pdev->bkp_stats.queue_depth--;
2654 	}
2655 
2656 	pdev->bkp_stats.queue_depth++;
2657 	TAILQ_INSERT_TAIL(&pdev->bkp_stats.list, soc_srngs_state,
2658 			  list_elem);
2659 	pdev->bkp_stats.seq_num++;
2660 	qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock);
2661 
2662 	qdf_queue_work(0, pdev->bkp_stats.work_queue,
2663 		       &pdev->bkp_stats.work);
2664 }
2665 
2666 /*
2667  * dp_htt_bkp_event_alert() - htt backpressure event alert
2668  * @msg_word:	htt packet context
2669  * @htt_soc:	HTT SOC handle
2670  *
2671  * Return: after attempting to print stats
2672  */
2673 static void dp_htt_bkp_event_alert(u_int32_t *msg_word, struct htt_soc *soc)
2674 {
2675 	u_int8_t ring_type;
2676 	u_int8_t pdev_id;
2677 	uint8_t target_pdev_id;
2678 	u_int8_t ring_id;
2679 	u_int16_t hp_idx;
2680 	u_int16_t tp_idx;
2681 	u_int32_t bkp_time;
2682 	enum htt_t2h_msg_type msg_type;
2683 	struct dp_soc *dpsoc;
2684 	struct dp_pdev *pdev;
2685 	struct dp_htt_timestamp *radio_tt;
2686 
2687 	if (!soc)
2688 		return;
2689 
2690 	dpsoc = (struct dp_soc *)soc->dp_soc;
2691 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
2692 	ring_type = HTT_T2H_RX_BKPRESSURE_RING_TYPE_GET(*msg_word);
2693 	target_pdev_id = HTT_T2H_RX_BKPRESSURE_PDEV_ID_GET(*msg_word);
2694 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
2695 							 target_pdev_id);
2696 	if (pdev_id >= MAX_PDEV_CNT) {
2697 		dp_htt_debug("%pK: pdev id %d is invalid", soc, pdev_id);
2698 		return;
2699 	}
2700 
2701 	pdev = (struct dp_pdev *)dpsoc->pdev_list[pdev_id];
2702 	ring_id = HTT_T2H_RX_BKPRESSURE_RINGID_GET(*msg_word);
2703 	hp_idx = HTT_T2H_RX_BKPRESSURE_HEAD_IDX_GET(*(msg_word + 1));
2704 	tp_idx = HTT_T2H_RX_BKPRESSURE_TAIL_IDX_GET(*(msg_word + 1));
2705 	bkp_time = HTT_T2H_RX_BKPRESSURE_TIME_MS_GET(*(msg_word + 2));
2706 	radio_tt = &soc->pdevid_tt[pdev_id];
2707 
2708 	switch (ring_type) {
2709 	case HTT_SW_RING_TYPE_UMAC:
2710 		if (!time_allow_print(radio_tt->umac_ttt, ring_id))
2711 			return;
2712 		dp_htt_alert_print(msg_type, pdev, ring_id, hp_idx, tp_idx,
2713 				   bkp_time, "HTT_SW_RING_TYPE_UMAC");
2714 	break;
2715 	case HTT_SW_RING_TYPE_LMAC:
2716 		if (!time_allow_print(radio_tt->lmac_ttt, ring_id))
2717 			return;
2718 		dp_htt_alert_print(msg_type, pdev, ring_id, hp_idx, tp_idx,
2719 				   bkp_time, "HTT_SW_RING_TYPE_LMAC");
2720 	break;
2721 	default:
2722 		dp_htt_alert_print(msg_type, pdev, ring_id, hp_idx, tp_idx,
2723 				   bkp_time, "UNKNOWN");
2724 	break;
2725 	}
2726 
2727 	dp_queue_ring_stats(pdev);
2728 }
2729 
2730 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
2731 /*
2732  * dp_offload_ind_handler() - offload msg handler
2733  * @htt_soc: HTT SOC handle
2734  * @msg_word: Pointer to payload
2735  *
2736  * Return: None
2737  */
2738 static void
2739 dp_offload_ind_handler(struct htt_soc *soc, uint32_t *msg_word)
2740 {
2741 	u_int8_t pdev_id;
2742 	u_int8_t target_pdev_id;
2743 
2744 	target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
2745 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
2746 							 target_pdev_id);
2747 	dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_OFFLOAD_TX_DATA, soc->dp_soc,
2748 			     msg_word, HTT_INVALID_VDEV, WDI_NO_VAL,
2749 			     pdev_id);
2750 }
2751 #else
2752 static void
2753 dp_offload_ind_handler(struct htt_soc *soc, uint32_t *msg_word)
2754 {
2755 }
2756 #endif
2757 
2758 #ifdef WLAN_FEATURE_11BE_MLO
2759 static void dp_htt_mlo_peer_map_handler(struct htt_soc *soc,
2760 					uint32_t *msg_word)
2761 {
2762 	uint8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
2763 	uint8_t *mlo_peer_mac_addr;
2764 	uint16_t mlo_peer_id;
2765 	uint8_t num_links;
2766 	struct dp_mlo_flow_override_info mlo_flow_info[DP_MLO_FLOW_INFO_MAX];
2767 
2768 	mlo_peer_id = HTT_RX_MLO_PEER_MAP_MLO_PEER_ID_GET(*msg_word);
2769 	num_links =
2770 		HTT_RX_MLO_PEER_MAP_NUM_LOGICAL_LINKS_GET(*msg_word);
2771 	mlo_peer_mac_addr =
2772 	htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
2773 				   &mac_addr_deswizzle_buf[0]);
2774 
2775 	mlo_flow_info[0].ast_idx =
2776 		HTT_RX_MLO_PEER_MAP_PRIMARY_AST_INDEX_GET(*(msg_word + 3));
2777 	mlo_flow_info[0].ast_idx_valid =
2778 		HTT_RX_MLO_PEER_MAP_AST_INDEX_VALID_FLAG_GET(*(msg_word + 3));
2779 	mlo_flow_info[0].chip_id =
2780 		HTT_RX_MLO_PEER_MAP_CHIP_ID_AST_INDEX_GET(*(msg_word + 3));
2781 	mlo_flow_info[0].tidmask =
2782 		HTT_RX_MLO_PEER_MAP_TIDMASK_AST_INDEX_GET(*(msg_word + 3));
2783 	mlo_flow_info[0].cache_set_num =
2784 	HTT_RX_MLO_PEER_MAP_CACHE_SET_NUM_AST_INDEX_GET(*(msg_word + 3));
2785 
2786 	mlo_flow_info[1].ast_idx =
2787 		HTT_RX_MLO_PEER_MAP_PRIMARY_AST_INDEX_GET(*(msg_word + 3));
2788 	mlo_flow_info[1].ast_idx_valid =
2789 		HTT_RX_MLO_PEER_MAP_AST_INDEX_VALID_FLAG_GET(*(msg_word + 3));
2790 	mlo_flow_info[1].chip_id =
2791 		HTT_RX_MLO_PEER_MAP_CHIP_ID_AST_INDEX_GET(*(msg_word + 3));
2792 	mlo_flow_info[1].tidmask =
2793 		HTT_RX_MLO_PEER_MAP_TIDMASK_AST_INDEX_GET(*(msg_word + 3));
2794 	mlo_flow_info[1].cache_set_num =
2795 	HTT_RX_MLO_PEER_MAP_CACHE_SET_NUM_AST_INDEX_GET(*(msg_word + 3));
2796 
2797 	mlo_flow_info[2].ast_idx =
2798 		HTT_RX_MLO_PEER_MAP_PRIMARY_AST_INDEX_GET(*(msg_word + 3));
2799 	mlo_flow_info[2].ast_idx_valid =
2800 		HTT_RX_MLO_PEER_MAP_AST_INDEX_VALID_FLAG_GET(*(msg_word + 3));
2801 	mlo_flow_info[2].chip_id =
2802 		HTT_RX_MLO_PEER_MAP_CHIP_ID_AST_INDEX_GET(*(msg_word + 3));
2803 	mlo_flow_info[2].tidmask =
2804 		HTT_RX_MLO_PEER_MAP_TIDMASK_AST_INDEX_GET(*(msg_word + 3));
2805 	mlo_flow_info[2].cache_set_num =
2806 	HTT_RX_MLO_PEER_MAP_CACHE_SET_NUM_AST_INDEX_GET(*(msg_word + 3));
2807 
2808 	dp_rx_mlo_peer_map_handler(soc->dp_soc, mlo_peer_id,
2809 				   mlo_peer_mac_addr,
2810 				   mlo_flow_info);
2811 }
2812 
2813 static void dp_htt_mlo_peer_unmap_handler(struct htt_soc *soc,
2814 					  uint32_t *msg_word)
2815 {
2816 	uint16_t mlo_peer_id;
2817 
2818 	mlo_peer_id = HTT_RX_MLO_PEER_UNMAP_MLO_PEER_ID_GET(*msg_word);
2819 	dp_rx_mlo_peer_unmap_handler(soc->dp_soc, mlo_peer_id);
2820 }
2821 
2822 static void
2823 dp_rx_mlo_timestamp_ind_handler(struct dp_soc *soc,
2824 				uint32_t *msg_word)
2825 {
2826 	uint8_t pdev_id;
2827 	uint8_t target_pdev_id;
2828 	struct dp_pdev *pdev;
2829 
2830 	if (!soc)
2831 		return;
2832 
2833 	target_pdev_id = HTT_T2H_MLO_TIMESTAMP_OFFSET_PDEV_ID_GET(*msg_word);
2834 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc,
2835 							 target_pdev_id);
2836 
2837 	if (pdev_id >= MAX_PDEV_CNT) {
2838 		dp_htt_debug("%pK: pdev id %d is invalid", soc, pdev_id);
2839 		return;
2840 	}
2841 
2842 	pdev = (struct dp_pdev *)soc->pdev_list[pdev_id];
2843 
2844 	if (!pdev) {
2845 		dp_err("Invalid pdev");
2846 		return;
2847 	}
2848 	dp_wdi_event_handler(WDI_EVENT_MLO_TSTMP, soc,
2849 			     msg_word, HTT_INVALID_PEER, WDI_NO_VAL,
2850 			     pdev_id);
2851 
2852 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2853 	pdev->timestamp.msg_type =
2854 		HTT_T2H_MLO_TIMESTAMP_OFFSET_MSG_TYPE_GET(*msg_word);
2855 	pdev->timestamp.pdev_id = pdev_id;
2856 	pdev->timestamp.chip_id =
2857 		HTT_T2H_MLO_TIMESTAMP_OFFSET_CHIP_ID_GET(*msg_word);
2858 	pdev->timestamp.mac_clk_freq =
2859 		HTT_T2H_MLO_TIMESTAMP_OFFSET_MAC_CLK_FREQ_MHZ_GET(*msg_word);
2860 	pdev->timestamp.sync_tstmp_lo_us = *(msg_word + 1);
2861 	pdev->timestamp.sync_tstmp_hi_us = *(msg_word + 2);
2862 	pdev->timestamp.mlo_offset_lo_us = *(msg_word + 3);
2863 	pdev->timestamp.mlo_offset_hi_us = *(msg_word + 4);
2864 	pdev->timestamp.mlo_offset_clks  = *(msg_word + 5);
2865 	pdev->timestamp.mlo_comp_us =
2866 	HTT_T2H_MLO_TIMESTAMP_OFFSET_MLO_TIMESTAMP_COMP_US_GET(
2867 							*(msg_word + 6));
2868 	pdev->timestamp.mlo_comp_clks =
2869 	HTT_T2H_MLO_TIMESTAMP_OFFSET_MLO_TIMESTAMP_COMP_CLKS_GET(
2870 							*(msg_word + 6));
2871 	pdev->timestamp.mlo_comp_timer =
2872 	HTT_T2H_MLO_TIMESTAMP_OFFSET_MLO_TIMESTAMP_COMP_PERIOD_US_GET(
2873 							*(msg_word + 7));
2874 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2875 }
2876 #else
2877 static void dp_htt_mlo_peer_map_handler(struct htt_soc *soc,
2878 					uint32_t *msg_word)
2879 {
2880 	qdf_assert_always(0);
2881 }
2882 
2883 static void dp_htt_mlo_peer_unmap_handler(struct htt_soc *soc,
2884 					 uint32_t *msg_word)
2885 {
2886 	qdf_assert_always(0);
2887 }
2888 
2889 static void
2890 dp_rx_mlo_timestamp_ind_handler(void *soc_handle,
2891 				uint32_t *msg_word)
2892 {
2893 	qdf_assert_always(0);
2894 }
2895 #endif
2896 
2897 /*
2898  * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler
2899  * @context:	Opaque context (HTT SOC handle)
2900  * @pkt:	HTC packet
2901  */
2902 static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
2903 {
2904 	struct htt_soc *soc = (struct htt_soc *) context;
2905 	qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
2906 	u_int32_t *msg_word;
2907 	enum htt_t2h_msg_type msg_type;
2908 	bool free_buf = true;
2909 
2910 	/* check for successful message reception */
2911 	if (pkt->Status != QDF_STATUS_SUCCESS) {
2912 		if (pkt->Status != QDF_STATUS_E_CANCELED)
2913 			soc->stats.htc_err_cnt++;
2914 
2915 		qdf_nbuf_free(htt_t2h_msg);
2916 		return;
2917 	}
2918 
2919 	/* TODO: Check if we should pop the HTC/HTT header alignment padding */
2920 
2921 	msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
2922 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
2923 	htt_event_record(soc->htt_logger_handle,
2924 			 msg_type, (uint8_t *)msg_word);
2925 	switch (msg_type) {
2926 	case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
2927 	{
2928 		dp_htt_bkp_event_alert(msg_word, soc);
2929 		break;
2930 	}
2931 	case HTT_T2H_MSG_TYPE_PEER_MAP:
2932 		{
2933 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
2934 			u_int8_t *peer_mac_addr;
2935 			u_int16_t peer_id;
2936 			u_int16_t hw_peer_id;
2937 			u_int8_t vdev_id;
2938 			u_int8_t is_wds;
2939 			struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
2940 
2941 			peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
2942 			hw_peer_id =
2943 				HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2));
2944 			vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
2945 			peer_mac_addr = htt_t2h_mac_addr_deswizzle(
2946 				(u_int8_t *) (msg_word+1),
2947 				&mac_addr_deswizzle_buf[0]);
2948 			QDF_TRACE(QDF_MODULE_ID_TXRX,
2949 				QDF_TRACE_LEVEL_INFO,
2950 				"HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
2951 				peer_id, vdev_id);
2952 
2953 			/*
2954 			 * check if peer already exists for this peer_id, if so
2955 			 * this peer map event is in response for a wds peer add
2956 			 * wmi command sent during wds source port learning.
2957 			 * in this case just add the ast entry to the existing
2958 			 * peer ast_list.
2959 			 */
2960 			is_wds = !!(dpsoc->peer_id_to_obj_map[peer_id]);
2961 			dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id,
2962 					       vdev_id, peer_mac_addr, 0,
2963 					       is_wds);
2964 			break;
2965 		}
2966 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
2967 		{
2968 			u_int16_t peer_id;
2969 			u_int8_t vdev_id;
2970 			u_int8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0};
2971 			peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
2972 			vdev_id = HTT_RX_PEER_UNMAP_VDEV_ID_GET(*msg_word);
2973 
2974 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
2975 						 vdev_id, mac_addr, 0,
2976 						 DP_PEER_WDS_COUNT_INVALID);
2977 			break;
2978 		}
2979 	case HTT_T2H_MSG_TYPE_SEC_IND:
2980 		{
2981 			u_int16_t peer_id;
2982 			enum cdp_sec_type sec_type;
2983 			int is_unicast;
2984 
2985 			peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
2986 			sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
2987 			is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
2988 			/* point to the first part of the Michael key */
2989 			msg_word++;
2990 			dp_rx_sec_ind_handler(
2991 				soc->dp_soc, peer_id, sec_type, is_unicast,
2992 				msg_word, msg_word + 2);
2993 			break;
2994 		}
2995 
2996 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
2997 		{
2998 			free_buf =
2999 				dp_monitor_ppdu_stats_ind_handler(soc,
3000 								  msg_word,
3001 								  htt_t2h_msg);
3002 			break;
3003 		}
3004 
3005 	case HTT_T2H_MSG_TYPE_PKTLOG:
3006 		{
3007 			dp_pktlog_msg_handler(soc, msg_word);
3008 			break;
3009 		}
3010 
3011 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
3012 		{
3013 			/*
3014 			 * HTC maintains runtime pm count for H2T messages that
3015 			 * have a response msg from FW. This count ensures that
3016 			 * in the case FW does not sent out the response or host
3017 			 * did not process this indication runtime_put happens
3018 			 * properly in the cleanup path.
3019 			 */
3020 			if (htc_dec_return_runtime_cnt(soc->htc_soc) >= 0)
3021 				htc_pm_runtime_put(soc->htc_soc);
3022 			else
3023 				soc->stats.htt_ver_req_put_skip++;
3024 			soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
3025 			soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
3026 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
3027 				"target uses HTT version %d.%d; host uses %d.%d",
3028 				soc->tgt_ver.major, soc->tgt_ver.minor,
3029 				HTT_CURRENT_VERSION_MAJOR,
3030 				HTT_CURRENT_VERSION_MINOR);
3031 			if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
3032 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3033 					QDF_TRACE_LEVEL_WARN,
3034 					"*** Incompatible host/target HTT versions!");
3035 			}
3036 			/* abort if the target is incompatible with the host */
3037 			qdf_assert(soc->tgt_ver.major ==
3038 				HTT_CURRENT_VERSION_MAJOR);
3039 			if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
3040 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3041 					QDF_TRACE_LEVEL_INFO_LOW,
3042 					"*** Warning: host/target HTT versions"
3043 					" are different, though compatible!");
3044 			}
3045 			break;
3046 		}
3047 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
3048 		{
3049 			uint16_t peer_id;
3050 			uint8_t tid;
3051 			uint8_t win_sz;
3052 			uint16_t status;
3053 			struct dp_peer *peer;
3054 
3055 			/*
3056 			 * Update REO Queue Desc with new values
3057 			 */
3058 			peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
3059 			tid = HTT_RX_ADDBA_TID_GET(*msg_word);
3060 			win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
3061 			peer = dp_peer_get_ref_by_id(soc->dp_soc, peer_id,
3062 						     DP_MOD_ID_HTT);
3063 
3064 			/*
3065 			 * Window size needs to be incremented by 1
3066 			 * since fw needs to represent a value of 256
3067 			 * using just 8 bits
3068 			 */
3069 			if (peer) {
3070 				status = dp_addba_requestprocess_wifi3(
3071 					(struct cdp_soc_t *)soc->dp_soc,
3072 					peer->mac_addr.raw, peer->vdev->vdev_id,
3073 					0, tid, 0, win_sz + 1, 0xffff);
3074 
3075 				/*
3076 				 * If PEER_LOCK_REF_PROTECT enbled dec ref
3077 				 * which is inc by dp_peer_get_ref_by_id
3078 				 */
3079 				dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3080 
3081 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3082 					QDF_TRACE_LEVEL_INFO,
3083 					FL("PeerID %d BAW %d TID %d stat %d"),
3084 					peer_id, win_sz, tid, status);
3085 
3086 			} else {
3087 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3088 					QDF_TRACE_LEVEL_ERROR,
3089 					FL("Peer not found peer id %d"),
3090 					peer_id);
3091 			}
3092 			break;
3093 		}
3094 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
3095 		{
3096 			dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg);
3097 			break;
3098 		}
3099 	case HTT_T2H_MSG_TYPE_PEER_MAP_V2:
3100 		{
3101 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3102 			u_int8_t *peer_mac_addr;
3103 			u_int16_t peer_id;
3104 			u_int16_t hw_peer_id;
3105 			u_int8_t vdev_id;
3106 			bool is_wds;
3107 			u_int16_t ast_hash;
3108 			struct dp_ast_flow_override_info ast_flow_info;
3109 
3110 			qdf_mem_set(&ast_flow_info, 0,
3111 					    sizeof(struct dp_ast_flow_override_info));
3112 
3113 			peer_id = HTT_RX_PEER_MAP_V2_SW_PEER_ID_GET(*msg_word);
3114 			hw_peer_id =
3115 			HTT_RX_PEER_MAP_V2_HW_PEER_ID_GET(*(msg_word + 2));
3116 			vdev_id = HTT_RX_PEER_MAP_V2_VDEV_ID_GET(*msg_word);
3117 			peer_mac_addr =
3118 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3119 						   &mac_addr_deswizzle_buf[0]);
3120 			is_wds =
3121 			HTT_RX_PEER_MAP_V2_NEXT_HOP_GET(*(msg_word + 3));
3122 			ast_hash =
3123 			HTT_RX_PEER_MAP_V2_AST_HASH_VALUE_GET(*(msg_word + 3));
3124 			/*
3125 			 * Update 4 ast_index per peer, ast valid mask
3126 			 * and TID flow valid mask.
3127 			 * AST valid mask is 3 bit field corresponds to
3128 			 * ast_index[3:1]. ast_index 0 is always valid.
3129 			 */
3130 			ast_flow_info.ast_valid_mask =
3131 			HTT_RX_PEER_MAP_V2_AST_VALID_MASK_GET(*(msg_word + 3));
3132 			ast_flow_info.ast_idx[0] = hw_peer_id;
3133 			ast_flow_info.ast_flow_mask[0] =
3134 			HTT_RX_PEER_MAP_V2_AST_0_FLOW_MASK_GET(*(msg_word + 4));
3135 			ast_flow_info.ast_idx[1] =
3136 			HTT_RX_PEER_MAP_V2_AST_INDEX_1_GET(*(msg_word + 4));
3137 			ast_flow_info.ast_flow_mask[1] =
3138 			HTT_RX_PEER_MAP_V2_AST_1_FLOW_MASK_GET(*(msg_word + 4));
3139 			ast_flow_info.ast_idx[2] =
3140 			HTT_RX_PEER_MAP_V2_AST_INDEX_2_GET(*(msg_word + 5));
3141 			ast_flow_info.ast_flow_mask[2] =
3142 			HTT_RX_PEER_MAP_V2_AST_2_FLOW_MASK_GET(*(msg_word + 4));
3143 			ast_flow_info.ast_idx[3] =
3144 			HTT_RX_PEER_MAP_V2_AST_INDEX_3_GET(*(msg_word + 6));
3145 			ast_flow_info.ast_flow_mask[3] =
3146 			HTT_RX_PEER_MAP_V2_AST_3_FLOW_MASK_GET(*(msg_word + 4));
3147 			/*
3148 			 * TID valid mask is applicable only
3149 			 * for HI and LOW priority flows.
3150 			 * tid_valid_mas is 8 bit field corresponds
3151 			 * to TID[7:0]
3152 			 */
3153 			ast_flow_info.tid_valid_low_pri_mask =
3154 			HTT_RX_PEER_MAP_V2_TID_VALID_LOW_PRI_GET(*(msg_word + 5));
3155 			ast_flow_info.tid_valid_hi_pri_mask =
3156 			HTT_RX_PEER_MAP_V2_TID_VALID_HI_PRI_GET(*(msg_word + 5));
3157 
3158 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3159 				  QDF_TRACE_LEVEL_INFO,
3160 				  "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3161 				  peer_id, vdev_id);
3162 
3163 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3164 				  QDF_TRACE_LEVEL_INFO,
3165 				  "ast_idx[0] %d ast_idx[1] %d ast_idx[2] %d ast_idx[3] %d n",
3166 				  ast_flow_info.ast_idx[0],
3167 				  ast_flow_info.ast_idx[1],
3168 				  ast_flow_info.ast_idx[2],
3169 				  ast_flow_info.ast_idx[3]);
3170 
3171 			dp_rx_peer_map_handler(soc->dp_soc, peer_id,
3172 					       hw_peer_id, vdev_id,
3173 					       peer_mac_addr, ast_hash,
3174 					       is_wds);
3175 
3176 			/*
3177 			 * Update ast indexes for flow override support
3178 			 * Applicable only for non wds peers
3179 			 */
3180 			if (!soc->dp_soc->ast_offload_support)
3181 				dp_peer_ast_index_flow_queue_map_create(
3182 						soc->dp_soc, is_wds,
3183 						peer_id, peer_mac_addr,
3184 						&ast_flow_info);
3185 
3186 			break;
3187 		}
3188 	case HTT_T2H_MSG_TYPE_PEER_UNMAP_V2:
3189 		{
3190 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3191 			u_int8_t *mac_addr;
3192 			u_int16_t peer_id;
3193 			u_int8_t vdev_id;
3194 			u_int8_t is_wds;
3195 			u_int32_t free_wds_count;
3196 
3197 			peer_id =
3198 			HTT_RX_PEER_UNMAP_V2_SW_PEER_ID_GET(*msg_word);
3199 			vdev_id = HTT_RX_PEER_UNMAP_V2_VDEV_ID_GET(*msg_word);
3200 			mac_addr =
3201 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3202 						   &mac_addr_deswizzle_buf[0]);
3203 			is_wds =
3204 			HTT_RX_PEER_UNMAP_V2_NEXT_HOP_GET(*(msg_word + 2));
3205 			free_wds_count =
3206 			HTT_RX_PEER_UNMAP_V2_PEER_WDS_FREE_COUNT_GET(*(msg_word + 4));
3207 
3208 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3209 				  QDF_TRACE_LEVEL_INFO,
3210 				  "HTT_T2H_MSG_TYPE_PEER_UNMAP msg for peer id %d vdev id %d n",
3211 				  peer_id, vdev_id);
3212 
3213 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
3214 						 vdev_id, mac_addr,
3215 						 is_wds, free_wds_count);
3216 			break;
3217 		}
3218 	case HTT_T2H_MSG_TYPE_RX_DELBA:
3219 		{
3220 			uint16_t peer_id;
3221 			uint8_t tid;
3222 			uint8_t win_sz;
3223 			QDF_STATUS status;
3224 
3225 			peer_id = HTT_RX_DELBA_PEER_ID_GET(*msg_word);
3226 			tid = HTT_RX_DELBA_TID_GET(*msg_word);
3227 			win_sz = HTT_RX_DELBA_WIN_SIZE_GET(*msg_word);
3228 
3229 			status = dp_rx_delba_ind_handler(
3230 				soc->dp_soc,
3231 				peer_id, tid, win_sz);
3232 
3233 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3234 				  QDF_TRACE_LEVEL_INFO,
3235 				  FL("DELBA PeerID %d BAW %d TID %d stat %d"),
3236 				  peer_id, win_sz, tid, status);
3237 			break;
3238 		}
3239 	case HTT_T2H_MSG_TYPE_FSE_CMEM_BASE_SEND:
3240 		{
3241 			uint16_t num_entries;
3242 			uint32_t cmem_ba_lo;
3243 			uint32_t cmem_ba_hi;
3244 
3245 			num_entries = HTT_CMEM_BASE_SEND_NUM_ENTRIES_GET(*msg_word);
3246 			cmem_ba_lo = *(msg_word + 1);
3247 			cmem_ba_hi = *(msg_word + 2);
3248 
3249 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
3250 				  FL("CMEM FSE num_entries %u CMEM BA LO %x HI %x"),
3251 				  num_entries, cmem_ba_lo, cmem_ba_hi);
3252 
3253 			dp_rx_fst_update_cmem_params(soc->dp_soc, num_entries,
3254 						     cmem_ba_lo, cmem_ba_hi);
3255 			break;
3256 		}
3257 	case HTT_T2H_MSG_TYPE_TX_OFFLOAD_DELIVER_IND:
3258 		{
3259 			dp_offload_ind_handler(soc, msg_word);
3260 			break;
3261 		}
3262 	case HTT_T2H_MSG_TYPE_PEER_MAP_V3:
3263 	{
3264 		u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3265 		u_int8_t *peer_mac_addr;
3266 		u_int16_t peer_id;
3267 		u_int16_t hw_peer_id;
3268 		u_int8_t vdev_id;
3269 		uint8_t is_wds;
3270 		u_int16_t ast_hash = 0;
3271 
3272 		peer_id = HTT_RX_PEER_MAP_V3_SW_PEER_ID_GET(*msg_word);
3273 		vdev_id = HTT_RX_PEER_MAP_V3_VDEV_ID_GET(*msg_word);
3274 		peer_mac_addr =
3275 		htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3276 					   &mac_addr_deswizzle_buf[0]);
3277 		hw_peer_id = HTT_RX_PEER_MAP_V3_HW_PEER_ID_GET(*(msg_word + 3));
3278 		ast_hash = HTT_RX_PEER_MAP_V3_CACHE_SET_NUM_GET(*(msg_word + 3));
3279 		is_wds = HTT_RX_PEER_MAP_V3_NEXT_HOP_GET(*(msg_word + 4));
3280 
3281 		dp_htt_info("HTT_T2H_MSG_TYPE_PEER_MAP_V3 msg for peer id %d vdev id %d n",
3282 			    peer_id, vdev_id);
3283 
3284 		dp_rx_peer_map_handler(soc->dp_soc, peer_id,
3285 				       hw_peer_id, vdev_id,
3286 				       peer_mac_addr, ast_hash,
3287 				       is_wds);
3288 
3289 		break;
3290 	}
3291 	case HTT_T2H_MSG_TYPE_MLO_RX_PEER_MAP:
3292 	{
3293 		dp_htt_mlo_peer_map_handler(soc, msg_word);
3294 		break;
3295 	}
3296 	case HTT_T2H_MSG_TYPE_MLO_RX_PEER_UNMAP:
3297 	{
3298 		dp_htt_mlo_peer_unmap_handler(soc, msg_word);
3299 		break;
3300 	}
3301 	case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND:
3302 	{
3303 		dp_rx_mlo_timestamp_ind_handler(soc->dp_soc, msg_word);
3304 		break;
3305 	}
3306 	case HTT_T2H_MSG_TYPE_VDEVS_TXRX_STATS_PERIODIC_IND:
3307 	{
3308 		dp_vdev_txrx_hw_stats_handler(soc, msg_word);
3309 		break;
3310 	}
3311 	default:
3312 		break;
3313 	};
3314 
3315 	/* Free the indication buffer */
3316 	if (free_buf)
3317 		qdf_nbuf_free(htt_t2h_msg);
3318 }
3319 
3320 /*
3321  * dp_htt_h2t_full() - Send full handler (called from HTC)
3322  * @context:	Opaque context (HTT SOC handle)
3323  * @pkt:	HTC packet
3324  *
3325  * Return: enum htc_send_full_action
3326  */
3327 static enum htc_send_full_action
3328 dp_htt_h2t_full(void *context, HTC_PACKET *pkt)
3329 {
3330 	return HTC_SEND_FULL_KEEP;
3331 }
3332 
3333 /*
3334  * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages
3335  * @context:	Opaque context (HTT SOC handle)
3336  * @nbuf:	nbuf containing T2H message
3337  * @pipe_id:	HIF pipe ID
3338  *
3339  * Return: QDF_STATUS
3340  *
3341  * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which
3342  * will be used for packet log and other high-priority HTT messages. Proper
3343  * HTC connection to be added later once required FW changes are available
3344  */
3345 static QDF_STATUS
3346 dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id)
3347 {
3348 	QDF_STATUS rc = QDF_STATUS_SUCCESS;
3349 	HTC_PACKET htc_pkt;
3350 
3351 	qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE);
3352 	qdf_mem_zero(&htc_pkt, sizeof(htc_pkt));
3353 	htc_pkt.Status = QDF_STATUS_SUCCESS;
3354 	htc_pkt.pPktContext = (void *)nbuf;
3355 	dp_htt_t2h_msg_handler(context, &htc_pkt);
3356 
3357 	return rc;
3358 }
3359 
3360 /*
3361  * htt_htc_soc_attach() - Register SOC level HTT instance with HTC
3362  * @htt_soc:	HTT SOC handle
3363  *
3364  * Return: QDF_STATUS
3365  */
3366 static QDF_STATUS
3367 htt_htc_soc_attach(struct htt_soc *soc)
3368 {
3369 	struct htc_service_connect_req connect;
3370 	struct htc_service_connect_resp response;
3371 	QDF_STATUS status;
3372 	struct dp_soc *dpsoc = soc->dp_soc;
3373 
3374 	qdf_mem_zero(&connect, sizeof(connect));
3375 	qdf_mem_zero(&response, sizeof(response));
3376 
3377 	connect.pMetaData = NULL;
3378 	connect.MetaDataLength = 0;
3379 	connect.EpCallbacks.pContext = soc;
3380 	connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
3381 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
3382 	connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler;
3383 
3384 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
3385 	connect.EpCallbacks.EpRecvRefill = NULL;
3386 
3387 	/* N/A, fill is done by HIF */
3388 	connect.EpCallbacks.RecvRefillWaterMark = 1;
3389 
3390 	connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
3391 	/*
3392 	 * Specify how deep to let a queue get before htc_send_pkt will
3393 	 * call the EpSendFull function due to excessive send queue depth.
3394 	 */
3395 	connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
3396 
3397 	/* disable flow control for HTT data message service */
3398 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
3399 
3400 	/* connect to control service */
3401 	connect.service_id = HTT_DATA_MSG_SVC;
3402 
3403 	status = htc_connect_service(soc->htc_soc, &connect, &response);
3404 
3405 	if (status != QDF_STATUS_SUCCESS)
3406 		return status;
3407 
3408 	soc->htc_endpoint = response.Endpoint;
3409 
3410 	hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint);
3411 
3412 	htt_interface_logging_init(&soc->htt_logger_handle, soc->ctrl_psoc);
3413 	dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc,
3414 		dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE);
3415 
3416 	return QDF_STATUS_SUCCESS; /* success */
3417 }
3418 
3419 /*
3420  * htt_soc_initialize() - SOC level HTT initialization
3421  * @htt_soc: Opaque htt SOC handle
3422  * @ctrl_psoc: Opaque ctrl SOC handle
3423  * @htc_soc: SOC level HTC handle
3424  * @hal_soc: Opaque HAL SOC handle
3425  * @osdev: QDF device
3426  *
3427  * Return: HTT handle on success; NULL on failure
3428  */
3429 void *
3430 htt_soc_initialize(struct htt_soc *htt_soc,
3431 		   struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
3432 		   HTC_HANDLE htc_soc,
3433 		   hal_soc_handle_t hal_soc_hdl, qdf_device_t osdev)
3434 {
3435 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
3436 
3437 	soc->osdev = osdev;
3438 	soc->ctrl_psoc = ctrl_psoc;
3439 	soc->htc_soc = htc_soc;
3440 	soc->hal_soc = hal_soc_hdl;
3441 
3442 	if (htt_htc_soc_attach(soc))
3443 		goto fail2;
3444 
3445 	return soc;
3446 
3447 fail2:
3448 	return NULL;
3449 }
3450 
3451 void htt_soc_htc_dealloc(struct htt_soc *htt_handle)
3452 {
3453 	htt_interface_logging_deinit(htt_handle->htt_logger_handle);
3454 	htt_htc_misc_pkt_pool_free(htt_handle);
3455 	htt_htc_pkt_pool_free(htt_handle);
3456 }
3457 
3458 /*
3459  * htt_soc_htc_prealloc() - HTC memory prealloc
3460  * @htt_soc: SOC level HTT handle
3461  *
3462  * Return: QDF_STATUS_SUCCESS on Success or
3463  * QDF_STATUS_E_NOMEM on allocation failure
3464  */
3465 QDF_STATUS htt_soc_htc_prealloc(struct htt_soc *soc)
3466 {
3467 	int i;
3468 
3469 	soc->htt_htc_pkt_freelist = NULL;
3470 	/* pre-allocate some HTC_PACKET objects */
3471 	for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
3472 		struct dp_htt_htc_pkt_union *pkt;
3473 		pkt = qdf_mem_malloc(sizeof(*pkt));
3474 		if (!pkt)
3475 			return QDF_STATUS_E_NOMEM;
3476 
3477 		htt_htc_pkt_free(soc, &pkt->u.pkt);
3478 	}
3479 	return QDF_STATUS_SUCCESS;
3480 }
3481 
3482 /*
3483  * htt_soc_detach() - Free SOC level HTT handle
3484  * @htt_hdl: HTT SOC handle
3485  */
3486 void htt_soc_detach(struct htt_soc *htt_hdl)
3487 {
3488 	int i;
3489 	struct htt_soc *htt_handle = (struct htt_soc *)htt_hdl;
3490 
3491 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3492 		qdf_mem_free(htt_handle->pdevid_tt[i].umac_ttt);
3493 		qdf_mem_free(htt_handle->pdevid_tt[i].lmac_ttt);
3494 	}
3495 
3496 	HTT_TX_MUTEX_DESTROY(&htt_handle->htt_tx_mutex);
3497 	qdf_mem_free(htt_handle);
3498 
3499 }
3500 
3501 /**
3502  * dp_h2t_ext_stats_msg_send(): function to contruct HTT message to pass to FW
3503  * @pdev: DP PDEV handle
3504  * @stats_type_upload_mask: stats type requested by user
3505  * @config_param_0: extra configuration parameters
3506  * @config_param_1: extra configuration parameters
3507  * @config_param_2: extra configuration parameters
3508  * @config_param_3: extra configuration parameters
3509  * @mac_id: mac number
3510  *
3511  * return: QDF STATUS
3512  */
3513 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
3514 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
3515 		uint32_t config_param_1, uint32_t config_param_2,
3516 		uint32_t config_param_3, int cookie_val, int cookie_msb,
3517 		uint8_t mac_id)
3518 {
3519 	struct htt_soc *soc = pdev->soc->htt_handle;
3520 	struct dp_htt_htc_pkt *pkt;
3521 	qdf_nbuf_t msg;
3522 	uint32_t *msg_word;
3523 	uint8_t pdev_mask = 0;
3524 	uint8_t *htt_logger_bufp;
3525 	int mac_for_pdev;
3526 	int target_pdev_id;
3527 	QDF_STATUS status;
3528 
3529 	msg = qdf_nbuf_alloc(
3530 			soc->osdev,
3531 			HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ),
3532 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
3533 
3534 	if (!msg)
3535 		return QDF_STATUS_E_NOMEM;
3536 
3537 	/*TODO:Add support for SOC stats
3538 	 * Bit 0: SOC Stats
3539 	 * Bit 1: Pdev stats for pdev id 0
3540 	 * Bit 2: Pdev stats for pdev id 1
3541 	 * Bit 3: Pdev stats for pdev id 2
3542 	 */
3543 	mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
3544 	target_pdev_id =
3545 	dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev);
3546 
3547 	pdev_mask = 1 << target_pdev_id;
3548 
3549 	/*
3550 	 * Set the length of the message.
3551 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3552 	 * separately during the below call to qdf_nbuf_push_head.
3553 	 * The contribution from the HTC header is added separately inside HTC.
3554 	 */
3555 	if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) {
3556 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3557 				"Failed to expand head for HTT_EXT_STATS");
3558 		qdf_nbuf_free(msg);
3559 		return QDF_STATUS_E_FAILURE;
3560 	}
3561 
3562 	dp_htt_tx_stats_info("%pK: cookie <-> %d\n config_param_0 %u\n"
3563 			     "config_param_1 %u\n config_param_2 %u\n"
3564 			     "config_param_4 %u\n -------------",
3565 			     pdev->soc, cookie_val,
3566 			     config_param_0,
3567 			     config_param_1, config_param_2, config_param_3);
3568 
3569 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
3570 
3571 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3572 	htt_logger_bufp = (uint8_t *)msg_word;
3573 	*msg_word = 0;
3574 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ);
3575 	HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask);
3576 	HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask);
3577 
3578 	/* word 1 */
3579 	msg_word++;
3580 	*msg_word = 0;
3581 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0);
3582 
3583 	/* word 2 */
3584 	msg_word++;
3585 	*msg_word = 0;
3586 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1);
3587 
3588 	/* word 3 */
3589 	msg_word++;
3590 	*msg_word = 0;
3591 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2);
3592 
3593 	/* word 4 */
3594 	msg_word++;
3595 	*msg_word = 0;
3596 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3);
3597 
3598 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0);
3599 
3600 	/* word 5 */
3601 	msg_word++;
3602 
3603 	/* word 6 */
3604 	msg_word++;
3605 	*msg_word = 0;
3606 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val);
3607 
3608 	/* word 7 */
3609 	msg_word++;
3610 	*msg_word = 0;
3611 	/* Currently Using last 2 bits for pdev_id
3612 	 * For future reference, reserving 3 bits in cookie_msb for pdev_id
3613 	 */
3614 	cookie_msb = (cookie_msb | pdev->pdev_id);
3615 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb);
3616 
3617 	pkt = htt_htc_pkt_alloc(soc);
3618 	if (!pkt) {
3619 		qdf_nbuf_free(msg);
3620 		return QDF_STATUS_E_NOMEM;
3621 	}
3622 
3623 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
3624 
3625 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
3626 			dp_htt_h2t_send_complete_free_netbuf,
3627 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
3628 			soc->htc_endpoint,
3629 			/* tag for FW response msg not guaranteed */
3630 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
3631 
3632 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
3633 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_EXT_STATS_REQ,
3634 				     htt_logger_bufp);
3635 
3636 	if (status != QDF_STATUS_SUCCESS) {
3637 		qdf_nbuf_free(msg);
3638 		htt_htc_pkt_free(soc, pkt);
3639 	}
3640 
3641 	return status;
3642 }
3643 
3644 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
3645 #define HTT_VDEV_TXRX_STATS_RESET_BITMASK_L32_MASK 0xFFFFFFFF
3646 #define HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_MASK 0xFFFFFFFF00000000
3647 #define HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_SHIFT 32
3648 
3649 QDF_STATUS dp_h2t_hw_vdev_stats_config_send(struct dp_soc *dpsoc,
3650 					    uint8_t pdev_id, bool enable,
3651 					    bool reset, uint64_t reset_bitmask)
3652 {
3653 	struct htt_soc *soc = dpsoc->htt_handle;
3654 	struct dp_htt_htc_pkt *pkt;
3655 	qdf_nbuf_t msg;
3656 	uint32_t *msg_word;
3657 	uint8_t *htt_logger_bufp;
3658 	QDF_STATUS status;
3659 	int duration;
3660 	uint32_t bitmask;
3661 	int target_pdev_id;
3662 
3663 	msg = qdf_nbuf_alloc(
3664 			soc->osdev,
3665 			HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_vdevs_txrx_stats_cfg)),
3666 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
3667 
3668 	if (!msg) {
3669 		dp_htt_err("%pK: Fail to allocate "
3670 		"HTT_H2T_HW_VDEV_TXRX_STATS_CFG_MSG_SZ msg buffer", dpsoc);
3671 		return QDF_STATUS_E_NOMEM;
3672 	}
3673 
3674 	if (pdev_id != INVALID_PDEV_ID)
3675 		target_pdev_id = DP_SW2HW_MACID(pdev_id);
3676 	else
3677 		target_pdev_id = 0;
3678 
3679 	duration =
3680 	wlan_cfg_get_vdev_stats_hw_offload_timer(dpsoc->wlan_cfg_ctx);
3681 
3682 	/*
3683 	 * Set the length of the message.
3684 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3685 	 * separately during the below call to qdf_nbuf_push_head.
3686 	 * The contribution from the HTC header is added separately inside HTC.
3687 	 */
3688 	if (!qdf_nbuf_put_tail(msg,
3689 			       sizeof(struct htt_h2t_vdevs_txrx_stats_cfg))) {
3690 		dp_htt_err("%pK: Failed to expand head for HTT_HW_VDEV_STATS"
3691 			   , dpsoc);
3692 		qdf_nbuf_free(msg);
3693 		return QDF_STATUS_E_FAILURE;
3694 	}
3695 
3696 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
3697 
3698 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3699 	htt_logger_bufp = (uint8_t *)msg_word;
3700 	*msg_word = 0;
3701 
3702 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VDEVS_TXRX_STATS_CFG);
3703 	HTT_RX_VDEVS_TXRX_STATS_PDEV_ID_SET(*msg_word, target_pdev_id);
3704 
3705 	HTT_RX_VDEVS_TXRX_STATS_ENABLE_SET(*msg_word, enable);
3706 
3707 	HTT_RX_VDEVS_TXRX_STATS_PERIODIC_INTERVAL_SET(*msg_word,
3708 						      (duration >> 3));
3709 
3710 	HTT_RX_VDEVS_TXRX_STATS_RESET_STATS_BITS_SET(*msg_word, reset);
3711 
3712 	msg_word++;
3713 	*msg_word = 0;
3714 	bitmask = (reset_bitmask & HTT_VDEV_TXRX_STATS_RESET_BITMASK_L32_MASK);
3715 	*msg_word = bitmask;
3716 
3717 	msg_word++;
3718 	*msg_word = 0;
3719 	bitmask =
3720 		((reset_bitmask & HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_MASK) >>
3721 		 HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_SHIFT);
3722 	*msg_word = bitmask;
3723 
3724 	pkt = htt_htc_pkt_alloc(soc);
3725 	if (!pkt) {
3726 		dp_htt_err("%pK: Fail to allocate dp_htt_htc_pkt buffer",
3727 			   dpsoc);
3728 		qdf_assert(0);
3729 		qdf_nbuf_free(msg);
3730 		return QDF_STATUS_E_NOMEM;
3731 	}
3732 
3733 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
3734 
3735 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
3736 			       dp_htt_h2t_send_complete_free_netbuf,
3737 			       qdf_nbuf_data(msg), qdf_nbuf_len(msg),
3738 			       soc->htc_endpoint,
3739 			       /* tag for no FW response msg */
3740 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
3741 
3742 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
3743 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
3744 				     HTT_H2T_MSG_TYPE_VDEVS_TXRX_STATS_CFG,
3745 				     htt_logger_bufp);
3746 
3747 	if (status != QDF_STATUS_SUCCESS) {
3748 		qdf_nbuf_free(msg);
3749 		htt_htc_pkt_free(soc, pkt);
3750 	}
3751 
3752 	return status;
3753 }
3754 #else
3755 QDF_STATUS dp_h2t_hw_vdev_stats_config_send(struct dp_soc *dpsoc,
3756 					    uint8_t pdev_id, bool enable,
3757 					    bool reset, uint64_t reset_bitmask)
3758 {
3759 	return QDF_STATUS_SUCCESS;
3760 }
3761 #endif
3762 
3763 /**
3764  * dp_h2t_3tuple_config_send(): function to contruct 3 tuple configuration
3765  * HTT message to pass to FW
3766  * @pdev: DP PDEV handle
3767  * @tuple_mask: tuple configuration to report 3 tuple hash value in either
3768  * toeplitz_2_or_4 or flow_id_toeplitz in MSDU START TLV.
3769  *
3770  * tuple_mask[1:0]:
3771  *   00 - Do not report 3 tuple hash value
3772  *   10 - Report 3 tuple hash value in toeplitz_2_or_4
3773  *   01 - Report 3 tuple hash value in flow_id_toeplitz
3774  *   11 - Report 3 tuple hash value in both toeplitz_2_or_4 & flow_id_toeplitz
3775  *
3776  * return: QDF STATUS
3777  */
3778 QDF_STATUS dp_h2t_3tuple_config_send(struct dp_pdev *pdev,
3779 				     uint32_t tuple_mask, uint8_t mac_id)
3780 {
3781 	struct htt_soc *soc = pdev->soc->htt_handle;
3782 	struct dp_htt_htc_pkt *pkt;
3783 	qdf_nbuf_t msg;
3784 	uint32_t *msg_word;
3785 	uint8_t *htt_logger_bufp;
3786 	int mac_for_pdev;
3787 	int target_pdev_id;
3788 
3789 	msg = qdf_nbuf_alloc(
3790 			soc->osdev,
3791 			HTT_MSG_BUF_SIZE(HTT_3_TUPLE_HASH_CFG_REQ_BYTES),
3792 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
3793 
3794 	if (!msg)
3795 		return QDF_STATUS_E_NOMEM;
3796 
3797 	mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
3798 	target_pdev_id =
3799 	dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev);
3800 
3801 	/*
3802 	 * Set the length of the message.
3803 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3804 	 * separately during the below call to qdf_nbuf_push_head.
3805 	 * The contribution from the HTC header is added separately inside HTC.
3806 	 */
3807 	if (!qdf_nbuf_put_tail(msg, HTT_3_TUPLE_HASH_CFG_REQ_BYTES)) {
3808 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3809 			  "Failed to expand head for HTT_3TUPLE_CONFIG");
3810 		qdf_nbuf_free(msg);
3811 		return QDF_STATUS_E_FAILURE;
3812 	}
3813 
3814 	dp_htt_info("%pK: config_param_sent 0x%x for target_pdev %d\n -------------",
3815 		    pdev->soc, tuple_mask, target_pdev_id);
3816 
3817 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
3818 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3819 	htt_logger_bufp = (uint8_t *)msg_word;
3820 
3821 	*msg_word = 0;
3822 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG);
3823 	HTT_RX_3_TUPLE_HASH_PDEV_ID_SET(*msg_word, target_pdev_id);
3824 
3825 	msg_word++;
3826 	*msg_word = 0;
3827 	HTT_H2T_FLOW_ID_TOEPLITZ_FIELD_CONFIG_SET(*msg_word, tuple_mask);
3828 	HTT_H2T_TOEPLITZ_2_OR_4_FIELD_CONFIG_SET(*msg_word, tuple_mask);
3829 
3830 	pkt = htt_htc_pkt_alloc(soc);
3831 	if (!pkt) {
3832 		qdf_nbuf_free(msg);
3833 		return QDF_STATUS_E_NOMEM;
3834 	}
3835 
3836 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
3837 
3838 	SET_HTC_PACKET_INFO_TX(
3839 			&pkt->htc_pkt,
3840 			dp_htt_h2t_send_complete_free_netbuf,
3841 			qdf_nbuf_data(msg),
3842 			qdf_nbuf_len(msg),
3843 			soc->htc_endpoint,
3844 			/* tag for no FW response msg */
3845 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
3846 
3847 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
3848 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG,
3849 			    htt_logger_bufp);
3850 
3851 	return QDF_STATUS_SUCCESS;
3852 }
3853 
3854 /* This macro will revert once proper HTT header will define for
3855  * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file
3856  * */
3857 #if defined(WDI_EVENT_ENABLE)
3858 /**
3859  * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW
3860  * @pdev: DP PDEV handle
3861  * @stats_type_upload_mask: stats type requested by user
3862  * @mac_id: Mac id number
3863  *
3864  * return: QDF STATUS
3865  */
3866 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
3867 		uint32_t stats_type_upload_mask, uint8_t mac_id)
3868 {
3869 	struct htt_soc *soc = pdev->soc->htt_handle;
3870 	struct dp_htt_htc_pkt *pkt;
3871 	qdf_nbuf_t msg;
3872 	uint32_t *msg_word;
3873 	uint8_t pdev_mask;
3874 	QDF_STATUS status;
3875 
3876 	msg = qdf_nbuf_alloc(
3877 			soc->osdev,
3878 			HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ),
3879 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
3880 
3881 	if (!msg) {
3882 		dp_htt_err("%pK: Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer"
3883 			   , pdev->soc);
3884 		qdf_assert(0);
3885 		return QDF_STATUS_E_NOMEM;
3886 	}
3887 
3888 	/*TODO:Add support for SOC stats
3889 	 * Bit 0: SOC Stats
3890 	 * Bit 1: Pdev stats for pdev id 0
3891 	 * Bit 2: Pdev stats for pdev id 1
3892 	 * Bit 3: Pdev stats for pdev id 2
3893 	 */
3894 	pdev_mask = 1 << dp_get_target_pdev_id_for_host_pdev_id(pdev->soc,
3895 								mac_id);
3896 
3897 	/*
3898 	 * Set the length of the message.
3899 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3900 	 * separately during the below call to qdf_nbuf_push_head.
3901 	 * The contribution from the HTC header is added separately inside HTC.
3902 	 */
3903 	if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) {
3904 		dp_htt_err("%pK: Failed to expand head for HTT_CFG_STATS"
3905 			   , pdev->soc);
3906 		qdf_nbuf_free(msg);
3907 		return QDF_STATUS_E_FAILURE;
3908 	}
3909 
3910 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
3911 
3912 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3913 	*msg_word = 0;
3914 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
3915 	HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask);
3916 	HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word,
3917 			stats_type_upload_mask);
3918 
3919 	pkt = htt_htc_pkt_alloc(soc);
3920 	if (!pkt) {
3921 		dp_htt_err("%pK: Fail to allocate dp_htt_htc_pkt buffer", pdev->soc);
3922 		qdf_assert(0);
3923 		qdf_nbuf_free(msg);
3924 		return QDF_STATUS_E_NOMEM;
3925 	}
3926 
3927 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
3928 
3929 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
3930 			dp_htt_h2t_send_complete_free_netbuf,
3931 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
3932 			soc->htc_endpoint,
3933 			/* tag for no FW response msg */
3934 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
3935 
3936 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
3937 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG,
3938 				     (uint8_t *)msg_word);
3939 
3940 	if (status != QDF_STATUS_SUCCESS) {
3941 		qdf_nbuf_free(msg);
3942 		htt_htc_pkt_free(soc, pkt);
3943 	}
3944 
3945 	return status;
3946 }
3947 
3948 qdf_export_symbol(dp_h2t_cfg_stats_msg_send);
3949 #endif
3950 
3951 void
3952 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
3953 			     uint32_t *tag_buf)
3954 {
3955 	struct dp_peer *peer = NULL;
3956 	switch (tag_type) {
3957 	case HTT_STATS_PEER_DETAILS_TAG:
3958 	{
3959 		htt_peer_details_tlv *dp_stats_buf =
3960 			(htt_peer_details_tlv *)tag_buf;
3961 
3962 		pdev->fw_stats_peer_id = dp_stats_buf->sw_peer_id;
3963 	}
3964 	break;
3965 	case HTT_STATS_PEER_STATS_CMN_TAG:
3966 	{
3967 		htt_peer_stats_cmn_tlv *dp_stats_buf =
3968 			(htt_peer_stats_cmn_tlv *)tag_buf;
3969 
3970 		peer = dp_peer_get_ref_by_id(pdev->soc, pdev->fw_stats_peer_id,
3971 					     DP_MOD_ID_HTT);
3972 
3973 		if (peer && !peer->bss_peer) {
3974 			peer->stats.tx.inactive_time =
3975 				dp_stats_buf->inactive_time;
3976 			qdf_event_set(&pdev->fw_peer_stats_event);
3977 		}
3978 		if (peer)
3979 			dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3980 	}
3981 	break;
3982 	default:
3983 		qdf_err("Invalid tag_type");
3984 	}
3985 }
3986 
3987 /**
3988  * dp_htt_rx_flow_fst_setup(): Send HTT Rx FST setup message to FW
3989  * @pdev: DP pdev handle
3990  * @fse_setup_info: FST setup parameters
3991  *
3992  * Return: Success when HTT message is sent, error on failure
3993  */
3994 QDF_STATUS
3995 dp_htt_rx_flow_fst_setup(struct dp_pdev *pdev,
3996 			 struct dp_htt_rx_flow_fst_setup *fse_setup_info)
3997 {
3998 	struct htt_soc *soc = pdev->soc->htt_handle;
3999 	struct dp_htt_htc_pkt *pkt;
4000 	qdf_nbuf_t msg;
4001 	u_int32_t *msg_word;
4002 	struct htt_h2t_msg_rx_fse_setup_t *fse_setup;
4003 	uint8_t *htt_logger_bufp;
4004 	u_int32_t *key;
4005 	QDF_STATUS status;
4006 
4007 	msg = qdf_nbuf_alloc(
4008 		soc->osdev,
4009 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_setup_t)),
4010 		/* reserve room for the HTC header */
4011 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4012 
4013 	if (!msg)
4014 		return QDF_STATUS_E_NOMEM;
4015 
4016 	/*
4017 	 * Set the length of the message.
4018 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4019 	 * separately during the below call to qdf_nbuf_push_head.
4020 	 * The contribution from the HTC header is added separately inside HTC.
4021 	 */
4022 	if (!qdf_nbuf_put_tail(msg,
4023 			       sizeof(struct htt_h2t_msg_rx_fse_setup_t))) {
4024 		qdf_err("Failed to expand head for HTT RX_FSE_SETUP msg");
4025 		return QDF_STATUS_E_FAILURE;
4026 	}
4027 
4028 	/* fill in the message contents */
4029 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4030 
4031 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_setup_t));
4032 	/* rewind beyond alignment pad to get to the HTC header reserved area */
4033 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4034 	htt_logger_bufp = (uint8_t *)msg_word;
4035 
4036 	*msg_word = 0;
4037 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG);
4038 
4039 	fse_setup = (struct htt_h2t_msg_rx_fse_setup_t *)msg_word;
4040 
4041 	HTT_RX_FSE_SETUP_PDEV_ID_SET(*msg_word, fse_setup_info->pdev_id);
4042 
4043 	msg_word++;
4044 	HTT_RX_FSE_SETUP_NUM_REC_SET(*msg_word, fse_setup_info->max_entries);
4045 	HTT_RX_FSE_SETUP_MAX_SEARCH_SET(*msg_word, fse_setup_info->max_search);
4046 	HTT_RX_FSE_SETUP_IP_DA_SA_PREFIX_SET(*msg_word,
4047 					     fse_setup_info->ip_da_sa_prefix);
4048 
4049 	msg_word++;
4050 	HTT_RX_FSE_SETUP_BASE_ADDR_LO_SET(*msg_word,
4051 					  fse_setup_info->base_addr_lo);
4052 	msg_word++;
4053 	HTT_RX_FSE_SETUP_BASE_ADDR_HI_SET(*msg_word,
4054 					  fse_setup_info->base_addr_hi);
4055 
4056 	key = (u_int32_t *)fse_setup_info->hash_key;
4057 	fse_setup->toeplitz31_0 = *key++;
4058 	fse_setup->toeplitz63_32 = *key++;
4059 	fse_setup->toeplitz95_64 = *key++;
4060 	fse_setup->toeplitz127_96 = *key++;
4061 	fse_setup->toeplitz159_128 = *key++;
4062 	fse_setup->toeplitz191_160 = *key++;
4063 	fse_setup->toeplitz223_192 = *key++;
4064 	fse_setup->toeplitz255_224 = *key++;
4065 	fse_setup->toeplitz287_256 = *key++;
4066 	fse_setup->toeplitz314_288 = *key;
4067 
4068 	msg_word++;
4069 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz31_0);
4070 	msg_word++;
4071 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz63_32);
4072 	msg_word++;
4073 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz95_64);
4074 	msg_word++;
4075 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz127_96);
4076 	msg_word++;
4077 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz159_128);
4078 	msg_word++;
4079 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz191_160);
4080 	msg_word++;
4081 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz223_192);
4082 	msg_word++;
4083 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz255_224);
4084 	msg_word++;
4085 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz287_256);
4086 	msg_word++;
4087 	HTT_RX_FSE_SETUP_HASH_314_288_SET(*msg_word,
4088 					  fse_setup->toeplitz314_288);
4089 
4090 	pkt = htt_htc_pkt_alloc(soc);
4091 	if (!pkt) {
4092 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4093 		qdf_assert(0);
4094 		qdf_nbuf_free(msg);
4095 		return QDF_STATUS_E_RESOURCES; /* failure */
4096 	}
4097 
4098 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4099 
4100 	SET_HTC_PACKET_INFO_TX(
4101 		&pkt->htc_pkt,
4102 		dp_htt_h2t_send_complete_free_netbuf,
4103 		qdf_nbuf_data(msg),
4104 		qdf_nbuf_len(msg),
4105 		soc->htc_endpoint,
4106 		/* tag for no FW response msg */
4107 		HTC_TX_PACKET_TAG_RUNTIME_PUT);
4108 
4109 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4110 
4111 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
4112 				     HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG,
4113 				     htt_logger_bufp);
4114 
4115 	if (status == QDF_STATUS_SUCCESS) {
4116 		dp_info("HTT_H2T RX_FSE_SETUP sent to FW for pdev = %u",
4117 			fse_setup_info->pdev_id);
4118 		QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_DEBUG,
4119 				   (void *)fse_setup_info->hash_key,
4120 				   fse_setup_info->hash_key_len);
4121 	} else {
4122 		qdf_nbuf_free(msg);
4123 		htt_htc_pkt_free(soc, pkt);
4124 	}
4125 
4126 	return status;
4127 }
4128 
4129 /**
4130  * dp_htt_rx_flow_fse_operation(): Send HTT Flow Search Entry msg to
4131  * add/del a flow in HW
4132  * @pdev: DP pdev handle
4133  * @fse_op_info: Flow entry parameters
4134  *
4135  * Return: Success when HTT message is sent, error on failure
4136  */
4137 QDF_STATUS
4138 dp_htt_rx_flow_fse_operation(struct dp_pdev *pdev,
4139 			     struct dp_htt_rx_flow_fst_operation *fse_op_info)
4140 {
4141 	struct htt_soc *soc = pdev->soc->htt_handle;
4142 	struct dp_htt_htc_pkt *pkt;
4143 	qdf_nbuf_t msg;
4144 	u_int32_t *msg_word;
4145 	struct htt_h2t_msg_rx_fse_operation_t *fse_operation;
4146 	uint8_t *htt_logger_bufp;
4147 	QDF_STATUS status;
4148 
4149 	msg = qdf_nbuf_alloc(
4150 		soc->osdev,
4151 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_operation_t)),
4152 		/* reserve room for the HTC header */
4153 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4154 	if (!msg)
4155 		return QDF_STATUS_E_NOMEM;
4156 
4157 	/*
4158 	 * Set the length of the message.
4159 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4160 	 * separately during the below call to qdf_nbuf_push_head.
4161 	 * The contribution from the HTC header is added separately inside HTC.
4162 	 */
4163 	if (!qdf_nbuf_put_tail(msg,
4164 			       sizeof(struct htt_h2t_msg_rx_fse_operation_t))) {
4165 		qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
4166 		qdf_nbuf_free(msg);
4167 		return QDF_STATUS_E_FAILURE;
4168 	}
4169 
4170 	/* fill in the message contents */
4171 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4172 
4173 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_operation_t));
4174 	/* rewind beyond alignment pad to get to the HTC header reserved area */
4175 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4176 	htt_logger_bufp = (uint8_t *)msg_word;
4177 
4178 	*msg_word = 0;
4179 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG);
4180 
4181 	fse_operation = (struct htt_h2t_msg_rx_fse_operation_t *)msg_word;
4182 
4183 	HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, fse_op_info->pdev_id);
4184 	msg_word++;
4185 	HTT_RX_FSE_IPSEC_VALID_SET(*msg_word, false);
4186 	if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_ENTRY) {
4187 		HTT_RX_FSE_OPERATION_SET(*msg_word,
4188 					 HTT_RX_FSE_CACHE_INVALIDATE_ENTRY);
4189 		msg_word++;
4190 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4191 		*msg_word,
4192 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_31_0));
4193 		msg_word++;
4194 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4195 		*msg_word,
4196 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_63_32));
4197 		msg_word++;
4198 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4199 		*msg_word,
4200 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_95_64));
4201 		msg_word++;
4202 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4203 		*msg_word,
4204 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_127_96));
4205 		msg_word++;
4206 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4207 		*msg_word,
4208 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_31_0));
4209 		msg_word++;
4210 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4211 		*msg_word,
4212 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_63_32));
4213 		msg_word++;
4214 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4215 		*msg_word,
4216 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_95_64));
4217 		msg_word++;
4218 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4219 		*msg_word,
4220 		qdf_htonl(
4221 		fse_op_info->rx_flow->flow_tuple_info.dest_ip_127_96));
4222 		msg_word++;
4223 		HTT_RX_FSE_SOURCEPORT_SET(
4224 			*msg_word,
4225 			fse_op_info->rx_flow->flow_tuple_info.src_port);
4226 		HTT_RX_FSE_DESTPORT_SET(
4227 			*msg_word,
4228 			fse_op_info->rx_flow->flow_tuple_info.dest_port);
4229 		msg_word++;
4230 		HTT_RX_FSE_L4_PROTO_SET(
4231 			*msg_word,
4232 			fse_op_info->rx_flow->flow_tuple_info.l4_protocol);
4233 	} else if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_FULL) {
4234 		HTT_RX_FSE_OPERATION_SET(*msg_word,
4235 					 HTT_RX_FSE_CACHE_INVALIDATE_FULL);
4236 	} else if (fse_op_info->op_code == DP_HTT_FST_DISABLE) {
4237 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_DISABLE);
4238 	} else if (fse_op_info->op_code == DP_HTT_FST_ENABLE) {
4239 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_ENABLE);
4240 	}
4241 
4242 	pkt = htt_htc_pkt_alloc(soc);
4243 	if (!pkt) {
4244 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4245 		qdf_assert(0);
4246 		qdf_nbuf_free(msg);
4247 		return QDF_STATUS_E_RESOURCES; /* failure */
4248 	}
4249 
4250 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4251 
4252 	SET_HTC_PACKET_INFO_TX(
4253 		&pkt->htc_pkt,
4254 		dp_htt_h2t_send_complete_free_netbuf,
4255 		qdf_nbuf_data(msg),
4256 		qdf_nbuf_len(msg),
4257 		soc->htc_endpoint,
4258 		/* tag for no FW response msg */
4259 		HTC_TX_PACKET_TAG_RUNTIME_PUT);
4260 
4261 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4262 
4263 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
4264 				     HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG,
4265 				     htt_logger_bufp);
4266 
4267 	if (status == QDF_STATUS_SUCCESS) {
4268 		dp_info("HTT_H2T RX_FSE_OPERATION_CFG sent to FW for pdev = %u",
4269 			fse_op_info->pdev_id);
4270 	} else {
4271 		qdf_nbuf_free(msg);
4272 		htt_htc_pkt_free(soc, pkt);
4273 	}
4274 
4275 	return status;
4276 }
4277 
4278 /**
4279  * dp_htt_rx_fisa_config(): Send HTT msg to configure FISA
4280  * @pdev: DP pdev handle
4281  * @fse_op_info: Flow entry parameters
4282  *
4283  * Return: Success when HTT message is sent, error on failure
4284  */
4285 QDF_STATUS
4286 dp_htt_rx_fisa_config(struct dp_pdev *pdev,
4287 		      struct dp_htt_rx_fisa_cfg *fisa_config)
4288 {
4289 	struct htt_soc *soc = pdev->soc->htt_handle;
4290 	struct dp_htt_htc_pkt *pkt;
4291 	qdf_nbuf_t msg;
4292 	u_int32_t *msg_word;
4293 	struct htt_h2t_msg_type_fisa_config_t *htt_fisa_config;
4294 	uint8_t *htt_logger_bufp;
4295 	uint32_t len;
4296 	QDF_STATUS status;
4297 
4298 	len = HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_type_fisa_config_t));
4299 
4300 	msg = qdf_nbuf_alloc(soc->osdev,
4301 			     len,
4302 			     /* reserve room for the HTC header */
4303 			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
4304 			     4,
4305 			     TRUE);
4306 	if (!msg)
4307 		return QDF_STATUS_E_NOMEM;
4308 
4309 	/*
4310 	 * Set the length of the message.
4311 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4312 	 * separately during the below call to qdf_nbuf_push_head.
4313 	 * The contribution from the HTC header is added separately inside HTC.
4314 	 */
4315 	if (!qdf_nbuf_put_tail(msg,
4316 			       sizeof(struct htt_h2t_msg_type_fisa_config_t))) {
4317 		qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
4318 		qdf_nbuf_free(msg);
4319 		return QDF_STATUS_E_FAILURE;
4320 	}
4321 
4322 	/* fill in the message contents */
4323 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4324 
4325 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_type_fisa_config_t));
4326 	/* rewind beyond alignment pad to get to the HTC header reserved area */
4327 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4328 	htt_logger_bufp = (uint8_t *)msg_word;
4329 
4330 	*msg_word = 0;
4331 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FISA_CFG);
4332 
4333 	htt_fisa_config = (struct htt_h2t_msg_type_fisa_config_t *)msg_word;
4334 
4335 	HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, htt_fisa_config->pdev_id);
4336 
4337 	msg_word++;
4338 	HTT_RX_FISA_CONFIG_FISA_V2_ENABLE_SET(*msg_word, 1);
4339 	HTT_RX_FISA_CONFIG_FISA_V2_AGGR_LIMIT_SET(*msg_word, 0xf);
4340 
4341 	msg_word++;
4342 	htt_fisa_config->fisa_timeout_threshold = fisa_config->fisa_timeout;
4343 
4344 	pkt = htt_htc_pkt_alloc(soc);
4345 	if (!pkt) {
4346 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4347 		qdf_assert(0);
4348 		qdf_nbuf_free(msg);
4349 		return QDF_STATUS_E_RESOURCES; /* failure */
4350 	}
4351 
4352 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4353 
4354 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4355 			       dp_htt_h2t_send_complete_free_netbuf,
4356 			       qdf_nbuf_data(msg),
4357 			       qdf_nbuf_len(msg),
4358 			       soc->htc_endpoint,
4359 			       /* tag for no FW response msg */
4360 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
4361 
4362 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4363 
4364 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FISA_CFG,
4365 				     htt_logger_bufp);
4366 
4367 	if (status == QDF_STATUS_SUCCESS) {
4368 		dp_info("HTT_H2T_MSG_TYPE_RX_FISA_CFG sent to FW for pdev = %u",
4369 			fisa_config->pdev_id);
4370 	} else {
4371 		qdf_nbuf_free(msg);
4372 		htt_htc_pkt_free(soc, pkt);
4373 	}
4374 
4375 	return status;
4376 }
4377 
4378 /**
4379  * dp_bk_pressure_stats_handler(): worker function to print back pressure
4380  *				   stats
4381  *
4382  * @context : argument to work function
4383  */
4384 static void dp_bk_pressure_stats_handler(void *context)
4385 {
4386 	struct dp_pdev *pdev = (struct dp_pdev *)context;
4387 	struct dp_soc_srngs_state *soc_srngs_state = NULL;
4388 	const char *ring_name;
4389 	int i;
4390 	struct dp_srng_ring_state *ring_state;
4391 	bool empty_flag;
4392 
4393 	qdf_spin_lock_bh(&pdev->bkp_stats.list_lock);
4394 
4395 	/* Extract only first entry for printing in one work event */
4396 	if (pdev->bkp_stats.queue_depth &&
4397 	    !TAILQ_EMPTY(&pdev->bkp_stats.list)) {
4398 		soc_srngs_state = TAILQ_FIRST(&pdev->bkp_stats.list);
4399 		TAILQ_REMOVE(&pdev->bkp_stats.list, soc_srngs_state,
4400 			     list_elem);
4401 		pdev->bkp_stats.queue_depth--;
4402 	}
4403 
4404 	empty_flag = TAILQ_EMPTY(&pdev->bkp_stats.list);
4405 	qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock);
4406 
4407 	if (soc_srngs_state) {
4408 		DP_PRINT_STATS("### BKP stats for seq_num %u START ###",
4409 			       soc_srngs_state->seq_num);
4410 		for (i = 0; i < soc_srngs_state->max_ring_id; i++) {
4411 			ring_state = &soc_srngs_state->ring_state[i];
4412 			ring_name = dp_srng_get_str_from_hal_ring_type
4413 						(ring_state->ring_type);
4414 			DP_PRINT_STATS("%s: SW:Head pointer = %d Tail Pointer = %d\n",
4415 				       ring_name,
4416 				       ring_state->sw_head,
4417 				       ring_state->sw_tail);
4418 
4419 			DP_PRINT_STATS("%s: HW:Head pointer = %d Tail Pointer = %d\n",
4420 				       ring_name,
4421 				       ring_state->hw_head,
4422 				       ring_state->hw_tail);
4423 		}
4424 
4425 		DP_PRINT_STATS("### BKP stats for seq_num %u COMPLETE ###",
4426 			       soc_srngs_state->seq_num);
4427 		qdf_mem_free(soc_srngs_state);
4428 	}
4429 	dp_print_napi_stats(pdev->soc);
4430 
4431 	/* Schedule work again if queue is not empty */
4432 	if (!empty_flag)
4433 		qdf_queue_work(0, pdev->bkp_stats.work_queue,
4434 			       &pdev->bkp_stats.work);
4435 }
4436 
4437 /*
4438  * dp_pdev_bkp_stats_detach() - detach resources for back pressure stats
4439  *				processing
4440  * @pdev: Datapath PDEV handle
4441  *
4442  */
4443 void dp_pdev_bkp_stats_detach(struct dp_pdev *pdev)
4444 {
4445 	struct dp_soc_srngs_state *ring_state, *ring_state_next;
4446 
4447 	if (!pdev->bkp_stats.work_queue)
4448 		return;
4449 
4450 	qdf_flush_workqueue(0, pdev->bkp_stats.work_queue);
4451 	qdf_destroy_workqueue(0, pdev->bkp_stats.work_queue);
4452 	qdf_flush_work(&pdev->bkp_stats.work);
4453 	qdf_disable_work(&pdev->bkp_stats.work);
4454 	qdf_spin_lock_bh(&pdev->bkp_stats.list_lock);
4455 	TAILQ_FOREACH_SAFE(ring_state, &pdev->bkp_stats.list,
4456 			   list_elem, ring_state_next) {
4457 		TAILQ_REMOVE(&pdev->bkp_stats.list, ring_state,
4458 			     list_elem);
4459 		qdf_mem_free(ring_state);
4460 	}
4461 	qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock);
4462 	qdf_spinlock_destroy(&pdev->bkp_stats.list_lock);
4463 }
4464 
4465 /*
4466  * dp_pdev_bkp_stats_attach() - attach resources for back pressure stats
4467  *				processing
4468  * @pdev: Datapath PDEV handle
4469  *
4470  * Return: QDF_STATUS_SUCCESS: Success
4471  *         QDF_STATUS_E_NOMEM: Error
4472  */
4473 QDF_STATUS dp_pdev_bkp_stats_attach(struct dp_pdev *pdev)
4474 {
4475 	TAILQ_INIT(&pdev->bkp_stats.list);
4476 	pdev->bkp_stats.seq_num = 0;
4477 	pdev->bkp_stats.queue_depth = 0;
4478 
4479 	qdf_create_work(0, &pdev->bkp_stats.work,
4480 			dp_bk_pressure_stats_handler, pdev);
4481 
4482 	pdev->bkp_stats.work_queue =
4483 		qdf_alloc_unbound_workqueue("dp_bkp_work_queue");
4484 	if (!pdev->bkp_stats.work_queue)
4485 		goto fail;
4486 
4487 	qdf_spinlock_create(&pdev->bkp_stats.list_lock);
4488 	return QDF_STATUS_SUCCESS;
4489 
4490 fail:
4491 	dp_htt_alert("BKP stats attach failed");
4492 	qdf_flush_work(&pdev->bkp_stats.work);
4493 	qdf_disable_work(&pdev->bkp_stats.work);
4494 	return QDF_STATUS_E_FAILURE;
4495 }
4496