xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c (revision 8b3dca18206e1a0461492f082fa6e270b092c035)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <htt.h>
21 #include <hal_hw_headers.h>
22 #include <hal_api.h>
23 #include "dp_peer.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_rx.h"
27 #include "htt_stats.h"
28 #include "htt_ppdu_stats.h"
29 #include "dp_htt.h"
30 #ifdef WIFI_MONITOR_SUPPORT
31 #include <dp_mon.h>
32 #endif
33 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
34 #include "cdp_txrx_cmn_struct.h"
35 
36 #ifdef FEATURE_PERPKT_INFO
37 #include "dp_ratetable.h"
38 #endif
39 #include <qdf_module.h>
40 #ifdef CONFIG_SAWF_DEF_QUEUES
41 #include <dp_sawf_htt.h>
42 #endif
43 
44 #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
45 
46 #define HTT_HTC_PKT_POOL_INIT_SIZE 64
47 
48 #define HTT_MSG_BUF_SIZE(msg_bytes) \
49 	((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
50 
51 #define HTT_PID_BIT_MASK 0x3
52 
53 #define DP_EXT_MSG_LENGTH 2048
54 #define HTT_HEADER_LEN 16
55 #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16
56 
57 #define HTT_SHIFT_UPPER_TIMESTAMP 32
58 #define HTT_MASK_UPPER_TIMESTAMP 0xFFFFFFFF00000000
59 #define HTT_BKP_STATS_MAX_QUEUE_DEPTH 16
60 
61 struct dp_htt_htc_pkt *
62 htt_htc_pkt_alloc(struct htt_soc *soc)
63 {
64 	struct dp_htt_htc_pkt_union *pkt = NULL;
65 
66 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
67 	if (soc->htt_htc_pkt_freelist) {
68 		pkt = soc->htt_htc_pkt_freelist;
69 		soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next;
70 	}
71 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
72 
73 	if (!pkt)
74 		pkt = qdf_mem_malloc(sizeof(*pkt));
75 
76 	if (!pkt)
77 		return NULL;
78 
79 	htc_packet_set_magic_cookie(&(pkt->u.pkt.htc_pkt), 0);
80 
81 	return &pkt->u.pkt; /* not actually a dereference */
82 }
83 
84 qdf_export_symbol(htt_htc_pkt_alloc);
85 
86 void
87 htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
88 {
89 	struct dp_htt_htc_pkt_union *u_pkt =
90 		(struct dp_htt_htc_pkt_union *)pkt;
91 
92 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
93 	htc_packet_set_magic_cookie(&(u_pkt->u.pkt.htc_pkt), 0);
94 	u_pkt->u.next = soc->htt_htc_pkt_freelist;
95 	soc->htt_htc_pkt_freelist = u_pkt;
96 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
97 }
98 
99 qdf_export_symbol(htt_htc_pkt_free);
100 
101 /*
102  * htt_htc_pkt_pool_free() - Free HTC packet pool
103  * @htt_soc:	HTT SOC handle
104  */
105 void
106 htt_htc_pkt_pool_free(struct htt_soc *soc)
107 {
108 	struct dp_htt_htc_pkt_union *pkt, *next;
109 	pkt = soc->htt_htc_pkt_freelist;
110 	while (pkt) {
111 		next = pkt->u.next;
112 		qdf_mem_free(pkt);
113 		pkt = next;
114 	}
115 	soc->htt_htc_pkt_freelist = NULL;
116 }
117 
118 
119 #ifndef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST
120 
121 /*
122  * htt_htc_misc_pkt_list_trim() - trim misc list
123  * @htt_soc: HTT SOC handle
124  * @level: max no. of pkts in list
125  */
126 static void
127 htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level)
128 {
129 	struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL;
130 	int i = 0;
131 	qdf_nbuf_t netbuf;
132 
133 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
134 	pkt = soc->htt_htc_pkt_misclist;
135 	while (pkt) {
136 		next = pkt->u.next;
137 		/* trim the out grown list*/
138 		if (++i > level) {
139 			netbuf =
140 				(qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
141 			qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
142 			qdf_nbuf_free(netbuf);
143 			qdf_mem_free(pkt);
144 			pkt = NULL;
145 			if (prev)
146 				prev->u.next = NULL;
147 		}
148 		prev = pkt;
149 		pkt = next;
150 	}
151 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
152 }
153 
154 /*
155  * htt_htc_misc_pkt_list_add() - Add pkt to misc list
156  * @htt_soc:	HTT SOC handle
157  * @dp_htt_htc_pkt: pkt to be added to list
158  */
159 void
160 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
161 {
162 	struct dp_htt_htc_pkt_union *u_pkt =
163 				(struct dp_htt_htc_pkt_union *)pkt;
164 	int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc,
165 							pkt->htc_pkt.Endpoint)
166 				+ DP_HTT_HTC_PKT_MISCLIST_SIZE;
167 
168 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
169 	if (soc->htt_htc_pkt_misclist) {
170 		u_pkt->u.next = soc->htt_htc_pkt_misclist;
171 		soc->htt_htc_pkt_misclist = u_pkt;
172 	} else {
173 		soc->htt_htc_pkt_misclist = u_pkt;
174 	}
175 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
176 
177 	/* only ce pipe size + tx_queue_depth could possibly be in use
178 	 * free older packets in the misclist
179 	 */
180 	htt_htc_misc_pkt_list_trim(soc, misclist_trim_level);
181 }
182 
183 qdf_export_symbol(htt_htc_misc_pkt_list_add);
184 #endif  /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
185 
186 /*
187  * htt_htc_misc_pkt_pool_free() - free pkts in misc list
188  * @htt_soc:	HTT SOC handle
189  */
190 static void
191 htt_htc_misc_pkt_pool_free(struct htt_soc *soc)
192 {
193 	struct dp_htt_htc_pkt_union *pkt, *next;
194 	qdf_nbuf_t netbuf;
195 
196 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
197 	pkt = soc->htt_htc_pkt_misclist;
198 
199 	while (pkt) {
200 		next = pkt->u.next;
201 		if (htc_packet_get_magic_cookie(&(pkt->u.pkt.htc_pkt)) !=
202 		    HTC_PACKET_MAGIC_COOKIE) {
203 			pkt = next;
204 			soc->stats.skip_count++;
205 			continue;
206 		}
207 		netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
208 		qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
209 
210 		soc->stats.htc_pkt_free++;
211 		dp_htt_info("%pK: Pkt free count %d",
212 			    soc->dp_soc, soc->stats.htc_pkt_free);
213 
214 		qdf_nbuf_free(netbuf);
215 		qdf_mem_free(pkt);
216 		pkt = next;
217 	}
218 	soc->htt_htc_pkt_misclist = NULL;
219 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
220 	dp_info("HTC Packets, fail count = %d, skip count = %d",
221 		soc->stats.fail_count, soc->stats.skip_count);
222 }
223 
224 /*
225  * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ
226  * @tgt_mac_addr:	Target MAC
227  * @buffer:		Output buffer
228  */
229 static u_int8_t *
230 htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer)
231 {
232 #ifdef BIG_ENDIAN_HOST
233 	/*
234 	 * The host endianness is opposite of the target endianness.
235 	 * To make u_int32_t elements come out correctly, the target->host
236 	 * upload has swizzled the bytes in each u_int32_t element of the
237 	 * message.
238 	 * For byte-array message fields like the MAC address, this
239 	 * upload swizzling puts the bytes in the wrong order, and needs
240 	 * to be undone.
241 	 */
242 	buffer[0] = tgt_mac_addr[3];
243 	buffer[1] = tgt_mac_addr[2];
244 	buffer[2] = tgt_mac_addr[1];
245 	buffer[3] = tgt_mac_addr[0];
246 	buffer[4] = tgt_mac_addr[7];
247 	buffer[5] = tgt_mac_addr[6];
248 	return buffer;
249 #else
250 	/*
251 	 * The host endianness matches the target endianness -
252 	 * we can use the mac addr directly from the message buffer.
253 	 */
254 	return tgt_mac_addr;
255 #endif
256 }
257 
258 /*
259  * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
260  * @soc:	SOC handle
261  * @status:	Completion status
262  * @netbuf:	HTT buffer
263  */
264 static void
265 dp_htt_h2t_send_complete_free_netbuf(
266 	void *soc, A_STATUS status, qdf_nbuf_t netbuf)
267 {
268 	qdf_nbuf_free(netbuf);
269 }
270 
271 #ifdef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST
272 /*
273  * dp_htt_h2t_send_complete() - H2T completion handler
274  * @context:	Opaque context (HTT SOC handle)
275  * @htc_pkt:	HTC packet
276  */
277 static void
278 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
279 {
280 	struct htt_soc *soc =  (struct htt_soc *) context;
281 	struct dp_htt_htc_pkt *htt_pkt;
282 	qdf_nbuf_t netbuf;
283 
284 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
285 
286 	/* process (free or keep) the netbuf that held the message */
287 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
288 	/*
289 	 * adf sendcomplete is required for windows only
290 	 */
291 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
292 	/* free the htt_htc_pkt / HTC_PACKET object */
293 	qdf_nbuf_free(netbuf);
294 	htt_htc_pkt_free(soc, htt_pkt);
295 }
296 
297 #else /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
298 
299 /*
300  *  * dp_htt_h2t_send_complete() - H2T completion handler
301  *   * @context:    Opaque context (HTT SOC handle)
302  *    * @htc_pkt:    HTC packet
303  *     */
304 static void
305 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
306 {
307 	void (*send_complete_part2)(
308 	     void *soc, QDF_STATUS status, qdf_nbuf_t msdu);
309 	struct htt_soc *soc =  (struct htt_soc *) context;
310 	struct dp_htt_htc_pkt *htt_pkt;
311 	qdf_nbuf_t netbuf;
312 
313 	send_complete_part2 = htc_pkt->pPktContext;
314 
315 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
316 
317 	/* process (free or keep) the netbuf that held the message */
318 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
319 	/*
320 	 * adf sendcomplete is required for windows only
321 	*/
322 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
323 	if (send_complete_part2){
324 		send_complete_part2(
325 		    htt_pkt->soc_ctxt, htc_pkt->Status, netbuf);
326 	}
327 	/* free the htt_htc_pkt / HTC_PACKET object */
328 	htt_htc_pkt_free(soc, htt_pkt);
329 }
330 
331 #endif /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
332 
333 /*
334  * dp_htt_h2t_add_tcl_metadata_ver_v1() - Add tcl_metadata verion V1
335  * @htt_soc:	HTT SOC handle
336  * @msg:	Pointer to nbuf
337  *
338  * Return: 0 on success; error code on failure
339  */
340 static int dp_htt_h2t_add_tcl_metadata_ver_v1(struct htt_soc *soc,
341 					      qdf_nbuf_t *msg)
342 {
343 	uint32_t *msg_word;
344 
345 	*msg = qdf_nbuf_alloc(
346 		soc->osdev,
347 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
348 		/* reserve room for the HTC header */
349 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
350 	if (!*msg)
351 		return QDF_STATUS_E_NOMEM;
352 
353 	/*
354 	 * Set the length of the message.
355 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
356 	 * separately during the below call to qdf_nbuf_push_head.
357 	 * The contribution from the HTC header is added separately inside HTC.
358 	 */
359 	if (!qdf_nbuf_put_tail(*msg, HTT_VER_REQ_BYTES)) {
360 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
361 			  "%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
362 			  __func__);
363 		return QDF_STATUS_E_FAILURE;
364 	}
365 
366 	/* fill in the message contents */
367 	msg_word = (u_int32_t *)qdf_nbuf_data(*msg);
368 
369 	/* rewind beyond alignment pad to get to the HTC header reserved area */
370 	qdf_nbuf_push_head(*msg, HTC_HDR_ALIGNMENT_PADDING);
371 
372 	*msg_word = 0;
373 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
374 
375 	return QDF_STATUS_SUCCESS;
376 }
377 
378 #ifdef QCA_DP_TX_FW_METADATA_V2
379 /*
380  * dp_htt_h2t_add_tcl_metadata_ver_v2() - Add tcl_metadata verion V2
381  * @htt_soc:	HTT SOC handle
382  * @msg:	Pointer to nbuf
383  *
384  * Return: 0 on success; error code on failure
385  */
386 static int dp_htt_h2t_add_tcl_metadata_ver_v2(struct htt_soc *soc,
387 					      qdf_nbuf_t *msg)
388 {
389 	uint32_t *msg_word;
390 
391 	*msg = qdf_nbuf_alloc(
392 		soc->osdev,
393 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES + HTT_TCL_METADATA_VER_SZ),
394 		/* reserve room for the HTC header */
395 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
396 	if (!*msg)
397 		return QDF_STATUS_E_NOMEM;
398 
399 	/*
400 	 * Set the length of the message.
401 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
402 	 * separately during the below call to qdf_nbuf_push_head.
403 	 * The contribution from the HTC header is added separately inside HTC.
404 	 */
405 	if (!qdf_nbuf_put_tail(*msg,
406 			       HTT_VER_REQ_BYTES + HTT_TCL_METADATA_VER_SZ)) {
407 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
408 			  "%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
409 			  __func__);
410 		return QDF_STATUS_E_FAILURE;
411 	}
412 
413 	/* fill in the message contents */
414 	msg_word = (u_int32_t *)qdf_nbuf_data(*msg);
415 
416 	/* rewind beyond alignment pad to get to the HTC header reserved area */
417 	qdf_nbuf_push_head(*msg, HTC_HDR_ALIGNMENT_PADDING);
418 
419 	*msg_word = 0;
420 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
421 
422 	/* word 1 */
423 	msg_word++;
424 	*msg_word = 0;
425 	HTT_OPTION_TLV_TAG_SET(*msg_word, HTT_OPTION_TLV_TAG_TCL_METADATA_VER);
426 	HTT_OPTION_TLV_LENGTH_SET(*msg_word, HTT_TCL_METADATA_VER_SZ);
427 	HTT_OPTION_TLV_TCL_METADATA_VER_SET(*msg_word,
428 					    HTT_OPTION_TLV_TCL_METADATA_V2);
429 
430 	return QDF_STATUS_SUCCESS;
431 }
432 
433 /*
434  * dp_htt_h2t_add_tcl_metadata_ver() - Add tcl_metadata verion
435  * @htt_soc:	HTT SOC handle
436  * @msg:	Pointer to nbuf
437  *
438  * Return: 0 on success; error code on failure
439  */
440 static int dp_htt_h2t_add_tcl_metadata_ver(struct htt_soc *soc, qdf_nbuf_t *msg)
441 {
442 	/* Use tcl_metadata_v1 when NSS offload is enabled */
443 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->dp_soc->wlan_cfg_ctx) ||
444 	    soc->dp_soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_FTM_MODE)
445 		return dp_htt_h2t_add_tcl_metadata_ver_v1(soc, msg);
446 	else
447 		return dp_htt_h2t_add_tcl_metadata_ver_v2(soc, msg);
448 }
449 #else
450 static int dp_htt_h2t_add_tcl_metadata_ver(struct htt_soc *soc, qdf_nbuf_t *msg)
451 {
452 	return dp_htt_h2t_add_tcl_metadata_ver_v1(soc, msg);
453 }
454 #endif
455 
456 /*
457  * htt_h2t_ver_req_msg() - Send HTT version request message to target
458  * @htt_soc:	HTT SOC handle
459  *
460  * Return: 0 on success; error code on failure
461  */
462 static int htt_h2t_ver_req_msg(struct htt_soc *soc)
463 {
464 	struct dp_htt_htc_pkt *pkt;
465 	qdf_nbuf_t msg = NULL;
466 	QDF_STATUS status;
467 
468 	status = dp_htt_h2t_add_tcl_metadata_ver(soc, &msg);
469 	if (status != QDF_STATUS_SUCCESS)
470 		return status;
471 
472 	pkt = htt_htc_pkt_alloc(soc);
473 	if (!pkt) {
474 		qdf_nbuf_free(msg);
475 		return QDF_STATUS_E_FAILURE;
476 	}
477 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
478 
479 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
480 		dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg),
481 		qdf_nbuf_len(msg), soc->htc_endpoint,
482 		HTC_TX_PACKET_TAG_RTPM_PUT_RC);
483 
484 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
485 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_VERSION_REQ,
486 				     NULL);
487 
488 	if (status != QDF_STATUS_SUCCESS) {
489 		qdf_nbuf_free(msg);
490 		htt_htc_pkt_free(soc, pkt);
491 	}
492 
493 	return status;
494 }
495 
496 /*
497  * htt_srng_setup() - Send SRNG setup message to target
498  * @htt_soc:	HTT SOC handle
499  * @mac_id:	MAC Id
500  * @hal_srng:	Opaque HAL SRNG pointer
501  * @hal_ring_type:	SRNG ring type
502  *
503  * Return: 0 on success; error code on failure
504  */
505 int htt_srng_setup(struct htt_soc *soc, int mac_id,
506 		   hal_ring_handle_t hal_ring_hdl,
507 		   int hal_ring_type)
508 {
509 	struct dp_htt_htc_pkt *pkt;
510 	qdf_nbuf_t htt_msg;
511 	uint32_t *msg_word;
512 	struct hal_srng_params srng_params;
513 	qdf_dma_addr_t hp_addr, tp_addr;
514 	uint32_t ring_entry_size =
515 		hal_srng_get_entrysize(soc->hal_soc, hal_ring_type);
516 	int htt_ring_type, htt_ring_id;
517 	uint8_t *htt_logger_bufp;
518 	int target_pdev_id;
519 	int lmac_id = dp_get_lmac_id_for_pdev_id(soc->dp_soc, 0, mac_id);
520 	QDF_STATUS status;
521 
522 	/* Sizes should be set in 4-byte words */
523 	ring_entry_size = ring_entry_size >> 2;
524 
525 	htt_msg = qdf_nbuf_alloc(soc->osdev,
526 		HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ),
527 		/* reserve room for the HTC header */
528 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
529 	if (!htt_msg) {
530 		dp_err("htt_msg alloc failed ring type %d", hal_ring_type);
531 		goto fail0;
532 	}
533 
534 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
535 	hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_ring_hdl);
536 	tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_ring_hdl);
537 
538 	switch (hal_ring_type) {
539 	case RXDMA_BUF:
540 #ifdef QCA_HOST2FW_RXBUF_RING
541 		if (srng_params.ring_id ==
542 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
543 		    (lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
544 			htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
545 			htt_ring_type = HTT_SW_TO_SW_RING;
546 #ifdef IPA_OFFLOAD
547 		} else if (srng_params.ring_id ==
548 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2 +
549 		    (lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
550 			htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING;
551 			htt_ring_type = HTT_SW_TO_SW_RING;
552 #ifdef IPA_WDI3_VLAN_SUPPORT
553 		} else if (srng_params.ring_id ==
554 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF3 +
555 		    (lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
556 			htt_ring_id = HTT_HOST3_TO_FW_RXBUF_RING;
557 			htt_ring_type = HTT_SW_TO_SW_RING;
558 #endif
559 #endif
560 #else
561 		if (srng_params.ring_id ==
562 			(HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
563 			(lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
564 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
565 			htt_ring_type = HTT_SW_TO_HW_RING;
566 #endif
567 		} else if (srng_params.ring_id ==
568 #ifdef IPA_OFFLOAD
569 			 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 +
570 #else
571 			 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF +
572 #endif
573 			(lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
574 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
575 			htt_ring_type = HTT_SW_TO_HW_RING;
576 		} else {
577 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
578 				   "%s: Ring %d currently not supported",
579 				   __func__, srng_params.ring_id);
580 			goto fail1;
581 		}
582 
583 		break;
584 	case RXDMA_MONITOR_BUF:
585 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
586 							 RXDMA_MONITOR_BUF);
587 		htt_ring_type = HTT_SW_TO_HW_RING;
588 		break;
589 	case RXDMA_MONITOR_STATUS:
590 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
591 		htt_ring_type = HTT_SW_TO_HW_RING;
592 		break;
593 	case RXDMA_MONITOR_DST:
594 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
595 							 RXDMA_MONITOR_DST);
596 		htt_ring_type = HTT_HW_TO_SW_RING;
597 		break;
598 	case RXDMA_MONITOR_DESC:
599 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
600 		htt_ring_type = HTT_SW_TO_HW_RING;
601 		break;
602 	case RXDMA_DST:
603 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
604 		htt_ring_type = HTT_HW_TO_SW_RING;
605 		break;
606 	case TX_MONITOR_BUF:
607 		htt_ring_id = HTT_TX_MON_HOST2MON_BUF_RING;
608 		htt_ring_type = HTT_SW_TO_HW_RING;
609 		break;
610 	case TX_MONITOR_DST:
611 		htt_ring_id = HTT_TX_MON_MON2HOST_DEST_RING;
612 		htt_ring_type = HTT_HW_TO_SW_RING;
613 		break;
614 
615 	default:
616 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
617 			"%s: Ring currently not supported", __func__);
618 			goto fail1;
619 	}
620 
621 	dp_info("ring_type %d ring_id %d htt_ring_id %d hp_addr 0x%llx tp_addr 0x%llx",
622 		hal_ring_type, srng_params.ring_id, htt_ring_id,
623 		(uint64_t)hp_addr,
624 		(uint64_t)tp_addr);
625 	/*
626 	 * Set the length of the message.
627 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
628 	 * separately during the below call to qdf_nbuf_push_head.
629 	 * The contribution from the HTC header is added separately inside HTC.
630 	 */
631 	if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) {
632 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
633 			"%s: Failed to expand head for SRING_SETUP msg",
634 			__func__);
635 		return QDF_STATUS_E_FAILURE;
636 	}
637 
638 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
639 
640 	/* rewind beyond alignment pad to get to the HTC header reserved area */
641 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
642 
643 	/* word 0 */
644 	*msg_word = 0;
645 	htt_logger_bufp = (uint8_t *)msg_word;
646 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP);
647 	target_pdev_id =
648 	dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, mac_id);
649 
650 	if ((htt_ring_type == HTT_SW_TO_HW_RING) ||
651 			(htt_ring_type == HTT_HW_TO_SW_RING))
652 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, target_pdev_id);
653 	else
654 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id);
655 
656 	dp_info("mac_id %d", mac_id);
657 	HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type);
658 	/* TODO: Discuss with FW on changing this to unique ID and using
659 	 * htt_ring_type to send the type of ring
660 	 */
661 	HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id);
662 
663 	/* word 1 */
664 	msg_word++;
665 	*msg_word = 0;
666 	HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word,
667 		srng_params.ring_base_paddr & 0xffffffff);
668 
669 	/* word 2 */
670 	msg_word++;
671 	*msg_word = 0;
672 	HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word,
673 		(uint64_t)srng_params.ring_base_paddr >> 32);
674 
675 	/* word 3 */
676 	msg_word++;
677 	*msg_word = 0;
678 	HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size);
679 	HTT_SRING_SETUP_RING_SIZE_SET(*msg_word,
680 		(ring_entry_size * srng_params.num_entries));
681 	dp_info("entry_size %d", ring_entry_size);
682 	dp_info("num_entries %d", srng_params.num_entries);
683 	dp_info("ring_size %d", (ring_entry_size * srng_params.num_entries));
684 	if (htt_ring_type == HTT_SW_TO_HW_RING)
685 		HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET(
686 						*msg_word, 1);
687 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word,
688 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
689 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word,
690 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
691 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word,
692 		!!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP));
693 
694 	/* word 4 */
695 	msg_word++;
696 	*msg_word = 0;
697 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
698 		hp_addr & 0xffffffff);
699 
700 	/* word 5 */
701 	msg_word++;
702 	*msg_word = 0;
703 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
704 		(uint64_t)hp_addr >> 32);
705 
706 	/* word 6 */
707 	msg_word++;
708 	*msg_word = 0;
709 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
710 		tp_addr & 0xffffffff);
711 
712 	/* word 7 */
713 	msg_word++;
714 	*msg_word = 0;
715 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
716 		(uint64_t)tp_addr >> 32);
717 
718 	/* word 8 */
719 	msg_word++;
720 	*msg_word = 0;
721 	HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word,
722 		srng_params.msi_addr & 0xffffffff);
723 
724 	/* word 9 */
725 	msg_word++;
726 	*msg_word = 0;
727 	HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word,
728 		(uint64_t)(srng_params.msi_addr) >> 32);
729 
730 	/* word 10 */
731 	msg_word++;
732 	*msg_word = 0;
733 	HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word,
734 		qdf_cpu_to_le32(srng_params.msi_data));
735 
736 	/* word 11 */
737 	msg_word++;
738 	*msg_word = 0;
739 	HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word,
740 		srng_params.intr_batch_cntr_thres_entries *
741 		ring_entry_size);
742 	HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word,
743 		srng_params.intr_timer_thres_us >> 3);
744 
745 	/* word 12 */
746 	msg_word++;
747 	*msg_word = 0;
748 	if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
749 		/* TODO: Setting low threshold to 1/8th of ring size - see
750 		 * if this needs to be configurable
751 		 */
752 		HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word,
753 			srng_params.low_threshold);
754 	}
755 	/* "response_required" field should be set if a HTT response message is
756 	 * required after setting up the ring.
757 	 */
758 	pkt = htt_htc_pkt_alloc(soc);
759 	if (!pkt) {
760 		dp_err("pkt alloc failed, ring_type %d ring_id %d htt_ring_id %d",
761 		       hal_ring_type, srng_params.ring_id, htt_ring_id);
762 		goto fail1;
763 	}
764 
765 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
766 
767 	SET_HTC_PACKET_INFO_TX(
768 		&pkt->htc_pkt,
769 		dp_htt_h2t_send_complete_free_netbuf,
770 		qdf_nbuf_data(htt_msg),
771 		qdf_nbuf_len(htt_msg),
772 		soc->htc_endpoint,
773 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
774 
775 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
776 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_SRING_SETUP,
777 				     htt_logger_bufp);
778 
779 	if (status != QDF_STATUS_SUCCESS) {
780 		qdf_nbuf_free(htt_msg);
781 		htt_htc_pkt_free(soc, pkt);
782 	}
783 
784 	return status;
785 
786 fail1:
787 	qdf_nbuf_free(htt_msg);
788 fail0:
789 	return QDF_STATUS_E_FAILURE;
790 }
791 
792 qdf_export_symbol(htt_srng_setup);
793 
794 #ifdef QCA_SUPPORT_FULL_MON
795 /**
796  * htt_h2t_full_mon_cfg() - Send full monitor configuarion msg to FW
797  *
798  * @htt_soc: HTT Soc handle
799  * @pdev_id: Radio id
800  * @dp_full_mon_config: enabled/disable configuration
801  *
802  * Return: Success when HTT message is sent, error on failure
803  */
804 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc,
805 			 uint8_t pdev_id,
806 			 enum dp_full_mon_config config)
807 {
808 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
809 	struct dp_htt_htc_pkt *pkt;
810 	qdf_nbuf_t htt_msg;
811 	uint32_t *msg_word;
812 	uint8_t *htt_logger_bufp;
813 
814 	htt_msg = qdf_nbuf_alloc(soc->osdev,
815 				 HTT_MSG_BUF_SIZE(
816 				 HTT_RX_FULL_MONITOR_MODE_SETUP_SZ),
817 				 /* reserve room for the HTC header */
818 				 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
819 				 4,
820 				 TRUE);
821 	if (!htt_msg)
822 		return QDF_STATUS_E_FAILURE;
823 
824 	/*
825 	 * Set the length of the message.
826 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
827 	 * separately during the below call to qdf_nbuf_push_head.
828 	 * The contribution from the HTC header is added separately inside HTC.
829 	 */
830 	if (!qdf_nbuf_put_tail(htt_msg, HTT_RX_FULL_MONITOR_MODE_SETUP_SZ)) {
831 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
832 			  "%s: Failed to expand head for RX Ring Cfg msg",
833 			  __func__);
834 		goto fail1;
835 	}
836 
837 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
838 
839 	/* rewind beyond alignment pad to get to the HTC header reserved area */
840 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
841 
842 	/* word 0 */
843 	*msg_word = 0;
844 	htt_logger_bufp = (uint8_t *)msg_word;
845 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE);
846 	HTT_RX_FULL_MONITOR_MODE_OPERATION_PDEV_ID_SET(
847 			*msg_word, DP_SW2HW_MACID(pdev_id));
848 
849 	msg_word++;
850 	*msg_word = 0;
851 	/* word 1 */
852 	if (config == DP_FULL_MON_ENABLE) {
853 		HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true);
854 		HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, true);
855 		HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, true);
856 		HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2);
857 	} else if (config == DP_FULL_MON_DISABLE) {
858 		/* As per MAC team's suggestion, While disbaling full monitor
859 		 * mode, Set 'en' bit to true in full monitor mode register.
860 		 */
861 		HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true);
862 		HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, false);
863 		HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, false);
864 		HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2);
865 	}
866 
867 	pkt = htt_htc_pkt_alloc(soc);
868 	if (!pkt) {
869 		qdf_err("HTC packet allocation failed");
870 		goto fail1;
871 	}
872 
873 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
874 
875 	SET_HTC_PACKET_INFO_TX(
876 		&pkt->htc_pkt,
877 		dp_htt_h2t_send_complete_free_netbuf,
878 		qdf_nbuf_data(htt_msg),
879 		qdf_nbuf_len(htt_msg),
880 		soc->htc_endpoint,
881 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
882 
883 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
884 	qdf_debug("config: %d", config);
885 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE,
886 			    htt_logger_bufp);
887 	return QDF_STATUS_SUCCESS;
888 fail1:
889 	qdf_nbuf_free(htt_msg);
890 	return QDF_STATUS_E_FAILURE;
891 }
892 
893 qdf_export_symbol(htt_h2t_full_mon_cfg);
894 #else
895 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc,
896 			 uint8_t pdev_id,
897 			 enum dp_full_mon_config config)
898 {
899 	return 0;
900 }
901 
902 qdf_export_symbol(htt_h2t_full_mon_cfg);
903 #endif
904 
905 #ifdef QCA_UNDECODED_METADATA_SUPPORT
906 static inline void
907 dp_mon_rx_enable_phy_errors(uint32_t *msg_word,
908 			    struct htt_rx_ring_tlv_filter *htt_tlv_filter)
909 {
910 	if (htt_tlv_filter->phy_err_filter_valid) {
911 		HTT_RX_RING_SELECTION_CFG_FP_PHY_ERR_SET
912 			(*msg_word, htt_tlv_filter->fp_phy_err);
913 		HTT_RX_RING_SELECTION_CFG_FP_PHY_ERR_BUF_SRC_SET
914 			(*msg_word, htt_tlv_filter->fp_phy_err_buf_src);
915 		HTT_RX_RING_SELECTION_CFG_FP_PHY_ERR_BUF_DEST_SET
916 			(*msg_word, htt_tlv_filter->fp_phy_err_buf_dest);
917 
918 		/* word 12*/
919 		msg_word++;
920 		*msg_word = 0;
921 		HTT_RX_RING_SELECTION_CFG_PHY_ERR_MASK_SET
922 			(*msg_word, htt_tlv_filter->phy_err_mask);
923 
924 		/* word 13*/
925 		msg_word++;
926 		*msg_word = 0;
927 		HTT_RX_RING_SELECTION_CFG_PHY_ERR_MASK_CONT_SET
928 			(*msg_word, htt_tlv_filter->phy_err_mask_cont);
929 	}
930 }
931 #else
932 static inline void
933 dp_mon_rx_enable_phy_errors(uint32_t *msg_word,
934 			    struct htt_rx_ring_tlv_filter *htt_tlv_filter)
935 {
936 }
937 #endif
938 
939 /*
940  * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter
941  * config message to target
942  * @htt_soc:	HTT SOC handle
943  * @pdev_id:	WIN- PDEV Id, MCL- mac id
944  * @hal_srng:	Opaque HAL SRNG pointer
945  * @hal_ring_type:	SRNG ring type
946  * @ring_buf_size:	SRNG buffer size
947  * @htt_tlv_filter:	Rx SRNG TLV and filter setting
948  * Return: 0 on success; error code on failure
949  */
950 int htt_h2t_rx_ring_cfg(struct htt_soc *htt_soc, int pdev_id,
951 			hal_ring_handle_t hal_ring_hdl,
952 			int hal_ring_type, int ring_buf_size,
953 			struct htt_rx_ring_tlv_filter *htt_tlv_filter)
954 {
955 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
956 	struct dp_htt_htc_pkt *pkt;
957 	qdf_nbuf_t htt_msg;
958 	uint32_t *msg_word;
959 	uint32_t *msg_word_data;
960 	struct hal_srng_params srng_params;
961 	uint32_t htt_ring_type, htt_ring_id;
962 	uint32_t tlv_filter;
963 	uint8_t *htt_logger_bufp;
964 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->dp_soc->wlan_cfg_ctx;
965 	uint32_t mon_drop_th = wlan_cfg_get_mon_drop_thresh(wlan_cfg_ctx);
966 	int target_pdev_id;
967 	QDF_STATUS status;
968 
969 	htt_msg = qdf_nbuf_alloc(soc->osdev,
970 		HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ),
971 	/* reserve room for the HTC header */
972 	HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
973 	if (!htt_msg) {
974 		dp_err("htt_msg alloc failed ring type %d", hal_ring_type);
975 		goto fail0;
976 	}
977 
978 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
979 
980 	switch (hal_ring_type) {
981 	case RXDMA_BUF:
982 		htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
983 		htt_ring_type = HTT_SW_TO_HW_RING;
984 		break;
985 	case RXDMA_MONITOR_BUF:
986 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
987 							 RXDMA_MONITOR_BUF);
988 		htt_ring_type = HTT_SW_TO_HW_RING;
989 		break;
990 	case RXDMA_MONITOR_STATUS:
991 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
992 		htt_ring_type = HTT_SW_TO_HW_RING;
993 		break;
994 	case RXDMA_MONITOR_DST:
995 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
996 							 RXDMA_MONITOR_DST);
997 		htt_ring_type = HTT_HW_TO_SW_RING;
998 		break;
999 	case RXDMA_MONITOR_DESC:
1000 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
1001 		htt_ring_type = HTT_SW_TO_HW_RING;
1002 		break;
1003 	case RXDMA_DST:
1004 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
1005 		htt_ring_type = HTT_HW_TO_SW_RING;
1006 		break;
1007 
1008 	default:
1009 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1010 			"%s: Ring currently not supported", __func__);
1011 		goto fail1;
1012 	}
1013 
1014 	dp_info("ring_type %d ring_id %d htt_ring_id %d",
1015 		hal_ring_type, srng_params.ring_id, htt_ring_id);
1016 
1017 	/*
1018 	 * Set the length of the message.
1019 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
1020 	 * separately during the below call to qdf_nbuf_push_head.
1021 	 * The contribution from the HTC header is added separately inside HTC.
1022 	 */
1023 	if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) {
1024 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1025 			"%s: Failed to expand head for RX Ring Cfg msg",
1026 			__func__);
1027 		goto fail1; /* failure */
1028 	}
1029 
1030 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
1031 
1032 	/* rewind beyond alignment pad to get to the HTC header reserved area */
1033 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
1034 
1035 	/* word 0 */
1036 	htt_logger_bufp = (uint8_t *)msg_word;
1037 	*msg_word = 0;
1038 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
1039 
1040 	/* applicable only for post Li */
1041 	dp_rx_mon_enable(soc->dp_soc, msg_word, htt_tlv_filter);
1042 
1043 	/*
1044 	 * pdev_id is indexed from 0 whereas mac_id is indexed from 1
1045 	 * SW_TO_SW and SW_TO_HW rings are unaffected by this
1046 	 */
1047 	target_pdev_id =
1048 	dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, pdev_id);
1049 
1050 	if (htt_ring_type == HTT_SW_TO_SW_RING ||
1051 			htt_ring_type == HTT_SW_TO_HW_RING ||
1052 			htt_ring_type == HTT_HW_TO_SW_RING)
1053 		HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word,
1054 						      target_pdev_id);
1055 
1056 	/* TODO: Discuss with FW on changing this to unique ID and using
1057 	 * htt_ring_type to send the type of ring
1058 	 */
1059 	HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id);
1060 
1061 	HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word,
1062 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
1063 
1064 	HTT_RX_RING_SELECTION_CFG_RX_OFFSETS_VALID_SET(*msg_word,
1065 						htt_tlv_filter->offset_valid);
1066 
1067 	if (mon_drop_th > 0)
1068 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1069 								   1);
1070 	else
1071 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1072 								   0);
1073 
1074 	/* word 1 */
1075 	msg_word++;
1076 	*msg_word = 0;
1077 	HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word,
1078 		ring_buf_size);
1079 
1080 	dp_mon_rx_packet_length_set(soc->dp_soc, msg_word, htt_tlv_filter);
1081 	dp_mon_rx_hdr_length_set(soc->dp_soc, msg_word, htt_tlv_filter);
1082 
1083 	/* word 2 */
1084 	msg_word++;
1085 	*msg_word = 0;
1086 
1087 	if (htt_tlv_filter->enable_fp) {
1088 		/* TYPE: MGMT */
1089 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1090 			FP, MGMT, 0000,
1091 			(htt_tlv_filter->fp_mgmt_filter &
1092 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1093 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1094 			FP, MGMT, 0001,
1095 			(htt_tlv_filter->fp_mgmt_filter &
1096 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1097 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1098 			FP, MGMT, 0010,
1099 			(htt_tlv_filter->fp_mgmt_filter &
1100 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1101 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1102 			FP, MGMT, 0011,
1103 			(htt_tlv_filter->fp_mgmt_filter &
1104 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1105 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1106 			FP, MGMT, 0100,
1107 			(htt_tlv_filter->fp_mgmt_filter &
1108 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1109 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1110 			FP, MGMT, 0101,
1111 			(htt_tlv_filter->fp_mgmt_filter &
1112 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1113 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1114 			FP, MGMT, 0110,
1115 			(htt_tlv_filter->fp_mgmt_filter &
1116 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1117 		/* reserved */
1118 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
1119 			MGMT, 0111,
1120 			(htt_tlv_filter->fp_mgmt_filter &
1121 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1122 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1123 			FP, MGMT, 1000,
1124 			(htt_tlv_filter->fp_mgmt_filter &
1125 			FILTER_MGMT_BEACON) ? 1 : 0);
1126 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1127 			FP, MGMT, 1001,
1128 			(htt_tlv_filter->fp_mgmt_filter &
1129 			FILTER_MGMT_ATIM) ? 1 : 0);
1130 	}
1131 
1132 	if (htt_tlv_filter->enable_md) {
1133 			/* TYPE: MGMT */
1134 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1135 			MD, MGMT, 0000,
1136 			(htt_tlv_filter->md_mgmt_filter &
1137 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1138 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1139 			MD, MGMT, 0001,
1140 			(htt_tlv_filter->md_mgmt_filter &
1141 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1142 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1143 			MD, MGMT, 0010,
1144 			(htt_tlv_filter->md_mgmt_filter &
1145 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1146 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1147 			MD, MGMT, 0011,
1148 			(htt_tlv_filter->md_mgmt_filter &
1149 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1150 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1151 			MD, MGMT, 0100,
1152 			(htt_tlv_filter->md_mgmt_filter &
1153 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1154 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1155 			MD, MGMT, 0101,
1156 			(htt_tlv_filter->md_mgmt_filter &
1157 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1158 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1159 			MD, MGMT, 0110,
1160 			(htt_tlv_filter->md_mgmt_filter &
1161 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1162 		/* reserved */
1163 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
1164 			MGMT, 0111,
1165 			(htt_tlv_filter->md_mgmt_filter &
1166 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1167 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1168 			MD, MGMT, 1000,
1169 			(htt_tlv_filter->md_mgmt_filter &
1170 			FILTER_MGMT_BEACON) ? 1 : 0);
1171 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1172 			MD, MGMT, 1001,
1173 			(htt_tlv_filter->md_mgmt_filter &
1174 			FILTER_MGMT_ATIM) ? 1 : 0);
1175 	}
1176 
1177 	if (htt_tlv_filter->enable_mo) {
1178 		/* TYPE: MGMT */
1179 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1180 			MO, MGMT, 0000,
1181 			(htt_tlv_filter->mo_mgmt_filter &
1182 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1183 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1184 			MO, MGMT, 0001,
1185 			(htt_tlv_filter->mo_mgmt_filter &
1186 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1187 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1188 			MO, MGMT, 0010,
1189 			(htt_tlv_filter->mo_mgmt_filter &
1190 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1191 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1192 			MO, MGMT, 0011,
1193 			(htt_tlv_filter->mo_mgmt_filter &
1194 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1195 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1196 			MO, MGMT, 0100,
1197 			(htt_tlv_filter->mo_mgmt_filter &
1198 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1199 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1200 			MO, MGMT, 0101,
1201 			(htt_tlv_filter->mo_mgmt_filter &
1202 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1203 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1204 			MO, MGMT, 0110,
1205 			(htt_tlv_filter->mo_mgmt_filter &
1206 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1207 		/* reserved */
1208 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
1209 			MGMT, 0111,
1210 			(htt_tlv_filter->mo_mgmt_filter &
1211 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1212 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1213 			MO, MGMT, 1000,
1214 			(htt_tlv_filter->mo_mgmt_filter &
1215 			FILTER_MGMT_BEACON) ? 1 : 0);
1216 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1217 			MO, MGMT, 1001,
1218 			(htt_tlv_filter->mo_mgmt_filter &
1219 			FILTER_MGMT_ATIM) ? 1 : 0);
1220 	}
1221 
1222 	/* word 3 */
1223 	msg_word++;
1224 	*msg_word = 0;
1225 
1226 	if (htt_tlv_filter->enable_fp) {
1227 		/* TYPE: MGMT */
1228 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1229 			FP, MGMT, 1010,
1230 			(htt_tlv_filter->fp_mgmt_filter &
1231 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1232 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1233 			FP, MGMT, 1011,
1234 			(htt_tlv_filter->fp_mgmt_filter &
1235 			FILTER_MGMT_AUTH) ? 1 : 0);
1236 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1237 			FP, MGMT, 1100,
1238 			(htt_tlv_filter->fp_mgmt_filter &
1239 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1240 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1241 			FP, MGMT, 1101,
1242 			(htt_tlv_filter->fp_mgmt_filter &
1243 			FILTER_MGMT_ACTION) ? 1 : 0);
1244 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1245 			FP, MGMT, 1110,
1246 			(htt_tlv_filter->fp_mgmt_filter &
1247 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1248 		/* reserved*/
1249 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
1250 			MGMT, 1111,
1251 			(htt_tlv_filter->fp_mgmt_filter &
1252 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1253 	}
1254 
1255 	if (htt_tlv_filter->enable_md) {
1256 			/* TYPE: MGMT */
1257 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1258 			MD, MGMT, 1010,
1259 			(htt_tlv_filter->md_mgmt_filter &
1260 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1261 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1262 			MD, MGMT, 1011,
1263 			(htt_tlv_filter->md_mgmt_filter &
1264 			FILTER_MGMT_AUTH) ? 1 : 0);
1265 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1266 			MD, MGMT, 1100,
1267 			(htt_tlv_filter->md_mgmt_filter &
1268 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1269 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1270 			MD, MGMT, 1101,
1271 			(htt_tlv_filter->md_mgmt_filter &
1272 			FILTER_MGMT_ACTION) ? 1 : 0);
1273 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1274 			MD, MGMT, 1110,
1275 			(htt_tlv_filter->md_mgmt_filter &
1276 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1277 	}
1278 
1279 	if (htt_tlv_filter->enable_mo) {
1280 		/* TYPE: MGMT */
1281 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1282 			MO, MGMT, 1010,
1283 			(htt_tlv_filter->mo_mgmt_filter &
1284 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1285 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1286 			MO, MGMT, 1011,
1287 			(htt_tlv_filter->mo_mgmt_filter &
1288 			FILTER_MGMT_AUTH) ? 1 : 0);
1289 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1290 			MO, MGMT, 1100,
1291 			(htt_tlv_filter->mo_mgmt_filter &
1292 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1293 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1294 			MO, MGMT, 1101,
1295 			(htt_tlv_filter->mo_mgmt_filter &
1296 			FILTER_MGMT_ACTION) ? 1 : 0);
1297 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1298 			MO, MGMT, 1110,
1299 			(htt_tlv_filter->mo_mgmt_filter &
1300 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1301 		/* reserved*/
1302 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
1303 			MGMT, 1111,
1304 			(htt_tlv_filter->mo_mgmt_filter &
1305 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1306 	}
1307 
1308 	/* word 4 */
1309 	msg_word++;
1310 	*msg_word = 0;
1311 
1312 	if (htt_tlv_filter->enable_fp) {
1313 		/* TYPE: CTRL */
1314 		/* reserved */
1315 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1316 			CTRL, 0000,
1317 			(htt_tlv_filter->fp_ctrl_filter &
1318 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1319 		/* reserved */
1320 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1321 			CTRL, 0001,
1322 			(htt_tlv_filter->fp_ctrl_filter &
1323 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1324 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1325 			CTRL, 0010,
1326 			(htt_tlv_filter->fp_ctrl_filter &
1327 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1328 		/* reserved */
1329 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1330 			CTRL, 0011,
1331 			(htt_tlv_filter->fp_ctrl_filter &
1332 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1333 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1334 			CTRL, 0100,
1335 			(htt_tlv_filter->fp_ctrl_filter &
1336 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1337 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1338 			CTRL, 0101,
1339 			(htt_tlv_filter->fp_ctrl_filter &
1340 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1341 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1342 			CTRL, 0110,
1343 			(htt_tlv_filter->fp_ctrl_filter &
1344 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1345 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1346 			CTRL, 0111,
1347 			(htt_tlv_filter->fp_ctrl_filter &
1348 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1349 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1350 			CTRL, 1000,
1351 			(htt_tlv_filter->fp_ctrl_filter &
1352 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1353 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1354 			CTRL, 1001,
1355 			(htt_tlv_filter->fp_ctrl_filter &
1356 			FILTER_CTRL_BA) ? 1 : 0);
1357 	}
1358 
1359 	if (htt_tlv_filter->enable_md) {
1360 		/* TYPE: CTRL */
1361 		/* reserved */
1362 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1363 			CTRL, 0000,
1364 			(htt_tlv_filter->md_ctrl_filter &
1365 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1366 		/* reserved */
1367 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1368 			CTRL, 0001,
1369 			(htt_tlv_filter->md_ctrl_filter &
1370 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1371 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1372 			CTRL, 0010,
1373 			(htt_tlv_filter->md_ctrl_filter &
1374 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1375 		/* reserved */
1376 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1377 			CTRL, 0011,
1378 			(htt_tlv_filter->md_ctrl_filter &
1379 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1380 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1381 			CTRL, 0100,
1382 			(htt_tlv_filter->md_ctrl_filter &
1383 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1384 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1385 			CTRL, 0101,
1386 			(htt_tlv_filter->md_ctrl_filter &
1387 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1388 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1389 			CTRL, 0110,
1390 			(htt_tlv_filter->md_ctrl_filter &
1391 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1392 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1393 			CTRL, 0111,
1394 			(htt_tlv_filter->md_ctrl_filter &
1395 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1396 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1397 			CTRL, 1000,
1398 			(htt_tlv_filter->md_ctrl_filter &
1399 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1400 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1401 			CTRL, 1001,
1402 			(htt_tlv_filter->md_ctrl_filter &
1403 			FILTER_CTRL_BA) ? 1 : 0);
1404 	}
1405 
1406 	if (htt_tlv_filter->enable_mo) {
1407 		/* TYPE: CTRL */
1408 		/* reserved */
1409 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1410 			CTRL, 0000,
1411 			(htt_tlv_filter->mo_ctrl_filter &
1412 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1413 		/* reserved */
1414 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1415 			CTRL, 0001,
1416 			(htt_tlv_filter->mo_ctrl_filter &
1417 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1418 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1419 			CTRL, 0010,
1420 			(htt_tlv_filter->mo_ctrl_filter &
1421 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1422 		/* reserved */
1423 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1424 			CTRL, 0011,
1425 			(htt_tlv_filter->mo_ctrl_filter &
1426 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1427 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1428 			CTRL, 0100,
1429 			(htt_tlv_filter->mo_ctrl_filter &
1430 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1431 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1432 			CTRL, 0101,
1433 			(htt_tlv_filter->mo_ctrl_filter &
1434 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1435 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1436 			CTRL, 0110,
1437 			(htt_tlv_filter->mo_ctrl_filter &
1438 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1439 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1440 			CTRL, 0111,
1441 			(htt_tlv_filter->mo_ctrl_filter &
1442 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1443 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1444 			CTRL, 1000,
1445 			(htt_tlv_filter->mo_ctrl_filter &
1446 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1447 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1448 			CTRL, 1001,
1449 			(htt_tlv_filter->mo_ctrl_filter &
1450 			FILTER_CTRL_BA) ? 1 : 0);
1451 	}
1452 
1453 	/* word 5 */
1454 	msg_word++;
1455 	*msg_word = 0;
1456 	if (htt_tlv_filter->enable_fp) {
1457 		/* TYPE: CTRL */
1458 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1459 			CTRL, 1010,
1460 			(htt_tlv_filter->fp_ctrl_filter &
1461 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1462 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1463 			CTRL, 1011,
1464 			(htt_tlv_filter->fp_ctrl_filter &
1465 			FILTER_CTRL_RTS) ? 1 : 0);
1466 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1467 			CTRL, 1100,
1468 			(htt_tlv_filter->fp_ctrl_filter &
1469 			FILTER_CTRL_CTS) ? 1 : 0);
1470 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1471 			CTRL, 1101,
1472 			(htt_tlv_filter->fp_ctrl_filter &
1473 			FILTER_CTRL_ACK) ? 1 : 0);
1474 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1475 			CTRL, 1110,
1476 			(htt_tlv_filter->fp_ctrl_filter &
1477 			FILTER_CTRL_CFEND) ? 1 : 0);
1478 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1479 			CTRL, 1111,
1480 			(htt_tlv_filter->fp_ctrl_filter &
1481 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1482 		/* TYPE: DATA */
1483 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1484 			DATA, MCAST,
1485 			(htt_tlv_filter->fp_data_filter &
1486 			FILTER_DATA_MCAST) ? 1 : 0);
1487 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1488 			DATA, UCAST,
1489 			(htt_tlv_filter->fp_data_filter &
1490 			FILTER_DATA_UCAST) ? 1 : 0);
1491 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1492 			DATA, NULL,
1493 			(htt_tlv_filter->fp_data_filter &
1494 			FILTER_DATA_NULL) ? 1 : 0);
1495 	}
1496 
1497 	if (htt_tlv_filter->enable_md) {
1498 		/* TYPE: CTRL */
1499 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1500 			CTRL, 1010,
1501 			(htt_tlv_filter->md_ctrl_filter &
1502 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1503 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1504 			CTRL, 1011,
1505 			(htt_tlv_filter->md_ctrl_filter &
1506 			FILTER_CTRL_RTS) ? 1 : 0);
1507 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1508 			CTRL, 1100,
1509 			(htt_tlv_filter->md_ctrl_filter &
1510 			FILTER_CTRL_CTS) ? 1 : 0);
1511 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1512 			CTRL, 1101,
1513 			(htt_tlv_filter->md_ctrl_filter &
1514 			FILTER_CTRL_ACK) ? 1 : 0);
1515 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1516 			CTRL, 1110,
1517 			(htt_tlv_filter->md_ctrl_filter &
1518 			FILTER_CTRL_CFEND) ? 1 : 0);
1519 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1520 			CTRL, 1111,
1521 			(htt_tlv_filter->md_ctrl_filter &
1522 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1523 		/* TYPE: DATA */
1524 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1525 			DATA, MCAST,
1526 			(htt_tlv_filter->md_data_filter &
1527 			FILTER_DATA_MCAST) ? 1 : 0);
1528 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1529 			DATA, UCAST,
1530 			(htt_tlv_filter->md_data_filter &
1531 			FILTER_DATA_UCAST) ? 1 : 0);
1532 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1533 			DATA, NULL,
1534 			(htt_tlv_filter->md_data_filter &
1535 			FILTER_DATA_NULL) ? 1 : 0);
1536 	}
1537 
1538 	if (htt_tlv_filter->enable_mo) {
1539 		/* TYPE: CTRL */
1540 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1541 			CTRL, 1010,
1542 			(htt_tlv_filter->mo_ctrl_filter &
1543 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1544 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1545 			CTRL, 1011,
1546 			(htt_tlv_filter->mo_ctrl_filter &
1547 			FILTER_CTRL_RTS) ? 1 : 0);
1548 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1549 			CTRL, 1100,
1550 			(htt_tlv_filter->mo_ctrl_filter &
1551 			FILTER_CTRL_CTS) ? 1 : 0);
1552 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1553 			CTRL, 1101,
1554 			(htt_tlv_filter->mo_ctrl_filter &
1555 			FILTER_CTRL_ACK) ? 1 : 0);
1556 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1557 			CTRL, 1110,
1558 			(htt_tlv_filter->mo_ctrl_filter &
1559 			FILTER_CTRL_CFEND) ? 1 : 0);
1560 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1561 			CTRL, 1111,
1562 			(htt_tlv_filter->mo_ctrl_filter &
1563 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1564 		/* TYPE: DATA */
1565 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1566 			DATA, MCAST,
1567 			(htt_tlv_filter->mo_data_filter &
1568 			FILTER_DATA_MCAST) ? 1 : 0);
1569 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1570 			DATA, UCAST,
1571 			(htt_tlv_filter->mo_data_filter &
1572 			FILTER_DATA_UCAST) ? 1 : 0);
1573 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1574 			DATA, NULL,
1575 			(htt_tlv_filter->mo_data_filter &
1576 			FILTER_DATA_NULL) ? 1 : 0);
1577 	}
1578 
1579 	/* word 6 */
1580 	msg_word++;
1581 	*msg_word = 0;
1582 	tlv_filter = 0;
1583 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START,
1584 		htt_tlv_filter->mpdu_start);
1585 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START,
1586 		htt_tlv_filter->msdu_start);
1587 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET,
1588 		htt_tlv_filter->packet);
1589 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END,
1590 		htt_tlv_filter->msdu_end);
1591 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END,
1592 		htt_tlv_filter->mpdu_end);
1593 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER,
1594 		htt_tlv_filter->packet_header);
1595 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION,
1596 		htt_tlv_filter->attention);
1597 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START,
1598 		htt_tlv_filter->ppdu_start);
1599 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END,
1600 		htt_tlv_filter->ppdu_end);
1601 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS,
1602 		htt_tlv_filter->ppdu_end_user_stats);
1603 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter,
1604 		PPDU_END_USER_STATS_EXT,
1605 		htt_tlv_filter->ppdu_end_user_stats_ext);
1606 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE,
1607 		htt_tlv_filter->ppdu_end_status_done);
1608 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START_USER_INFO,
1609 		htt_tlv_filter->ppdu_start_user_info);
1610 	/* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/
1611 	 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED,
1612 		 htt_tlv_filter->header_per_msdu);
1613 
1614 	HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter);
1615 
1616 	msg_word_data = (uint32_t *)qdf_nbuf_data(htt_msg);
1617 	dp_info("config_data: [0x%x][0x%x][0x%x][0x%x][0x%x][0x%x][0x%x]",
1618 		msg_word_data[0], msg_word_data[1], msg_word_data[2],
1619 		msg_word_data[3], msg_word_data[4], msg_word_data[5],
1620 		msg_word_data[6]);
1621 
1622 	/* word 7 */
1623 	msg_word++;
1624 	*msg_word = 0;
1625 	if (htt_tlv_filter->offset_valid) {
1626 		HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET_SET(*msg_word,
1627 					htt_tlv_filter->rx_packet_offset);
1628 		HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET_SET(*msg_word,
1629 					htt_tlv_filter->rx_header_offset);
1630 
1631 		/* word 8 */
1632 		msg_word++;
1633 		*msg_word = 0;
1634 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET_SET(*msg_word,
1635 					htt_tlv_filter->rx_mpdu_end_offset);
1636 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET_SET(*msg_word,
1637 					htt_tlv_filter->rx_mpdu_start_offset);
1638 
1639 		/* word 9 */
1640 		msg_word++;
1641 		*msg_word = 0;
1642 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET_SET(*msg_word,
1643 					htt_tlv_filter->rx_msdu_end_offset);
1644 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET_SET(*msg_word,
1645 					htt_tlv_filter->rx_msdu_start_offset);
1646 
1647 		/* word 10 */
1648 		msg_word++;
1649 		*msg_word = 0;
1650 		HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET_SET(*msg_word,
1651 					htt_tlv_filter->rx_attn_offset);
1652 
1653 		/* word 11 */
1654 		msg_word++;
1655 		*msg_word = 0;
1656 	} else {
1657 		/* word 11 */
1658 		msg_word += 4;
1659 		*msg_word = 0;
1660 	}
1661 
1662 	if (mon_drop_th > 0)
1663 		HTT_RX_RING_SELECTION_CFG_RX_DROP_THRESHOLD_SET(*msg_word,
1664 								mon_drop_th);
1665 	dp_mon_rx_enable_mpdu_logging(soc->dp_soc, msg_word, htt_tlv_filter);
1666 
1667 	dp_mon_rx_enable_phy_errors(msg_word, htt_tlv_filter);
1668 
1669 	/* word 14*/
1670 	msg_word += 3;
1671 	*msg_word = 0;
1672 
1673 	dp_mon_rx_wmask_subscribe(soc->dp_soc, msg_word, htt_tlv_filter);
1674 
1675 #ifdef FW_SUPPORT_NOT_YET
1676 	/* word 17*/
1677 	msg_word += 3;
1678 	*msg_word = 0;
1679 
1680 	dp_mon_rx_enable_fpmo(soc->dp_soc, msg_word, htt_tlv_filter);
1681 #endif/* FW_SUPPORT_NOT_YET */
1682 
1683 	/* "response_required" field should be set if a HTT response message is
1684 	 * required after setting up the ring.
1685 	 */
1686 	pkt = htt_htc_pkt_alloc(soc);
1687 	if (!pkt) {
1688 		dp_err("pkt alloc failed, ring_type %d ring_id %d htt_ring_id %d",
1689 		       hal_ring_type, srng_params.ring_id, htt_ring_id);
1690 		goto fail1;
1691 	}
1692 
1693 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1694 
1695 	SET_HTC_PACKET_INFO_TX(
1696 		&pkt->htc_pkt,
1697 		dp_htt_h2t_send_complete_free_netbuf,
1698 		qdf_nbuf_data(htt_msg),
1699 		qdf_nbuf_len(htt_msg),
1700 		soc->htc_endpoint,
1701 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
1702 
1703 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1704 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
1705 				     HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG,
1706 				     htt_logger_bufp);
1707 
1708 	if (status != QDF_STATUS_SUCCESS) {
1709 		qdf_nbuf_free(htt_msg);
1710 		htt_htc_pkt_free(soc, pkt);
1711 	}
1712 
1713 	return status;
1714 
1715 fail1:
1716 	qdf_nbuf_free(htt_msg);
1717 fail0:
1718 	return QDF_STATUS_E_FAILURE;
1719 }
1720 
1721 qdf_export_symbol(htt_h2t_rx_ring_cfg);
1722 
1723 #if defined(HTT_STATS_ENABLE)
1724 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1725 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1726 
1727 {
1728 	uint32_t pdev_id;
1729 	uint32_t *msg_word = NULL;
1730 	uint32_t msg_remain_len = 0;
1731 
1732 	msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1733 
1734 	/*COOKIE MSB*/
1735 	pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1736 
1737 	/* stats message length + 16 size of HTT header*/
1738 	msg_remain_len = qdf_min(htt_stats->msg_len + 16,
1739 				(uint32_t)DP_EXT_MSG_LENGTH);
1740 
1741 	dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc,
1742 			msg_word,  msg_remain_len,
1743 			WDI_NO_VAL, pdev_id);
1744 
1745 	if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1746 		htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1747 	}
1748 	/* Need to be freed here as WDI handler will
1749 	 * make a copy of pkt to send data to application
1750 	 */
1751 	qdf_nbuf_free(htt_msg);
1752 	return QDF_STATUS_SUCCESS;
1753 }
1754 #else
1755 static inline QDF_STATUS
1756 dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1757 		      struct dp_soc *soc, qdf_nbuf_t htt_msg)
1758 {
1759 	return QDF_STATUS_E_NOSUPPORT;
1760 }
1761 #endif
1762 
1763 #ifdef HTT_STATS_DEBUGFS_SUPPORT
1764 /* dp_send_htt_stats_dbgfs_msg() - Function to send htt data to upper layer.
1765  * @pdev: dp pdev handle
1766  * @msg_word: HTT msg
1767  * @msg_len: Length of HTT msg sent
1768  *
1769  * Return: none
1770  */
1771 static inline void
1772 dp_htt_stats_dbgfs_send_msg(struct dp_pdev *pdev, uint32_t *msg_word,
1773 			    uint32_t msg_len)
1774 {
1775 	struct htt_dbgfs_cfg dbgfs_cfg;
1776 	int done = 0;
1777 
1778 	/* send 5th word of HTT msg to upper layer */
1779 	dbgfs_cfg.msg_word = (msg_word + 4);
1780 	dbgfs_cfg.m = pdev->dbgfs_cfg->m;
1781 
1782 	/* stats message length + 16 size of HTT header*/
1783 	msg_len = qdf_min(msg_len + HTT_HEADER_LEN, (uint32_t)DP_EXT_MSG_LENGTH);
1784 
1785 	if (pdev->dbgfs_cfg->htt_stats_dbgfs_msg_process)
1786 		pdev->dbgfs_cfg->htt_stats_dbgfs_msg_process(&dbgfs_cfg,
1787 							     (msg_len - HTT_HEADER_LEN));
1788 
1789 	/* Get TLV Done bit from 4th msg word */
1790 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*(msg_word + 3));
1791 	if (done) {
1792 		if (qdf_event_set(&pdev->dbgfs_cfg->htt_stats_dbgfs_event))
1793 			dp_htt_err("%pK: Failed to set event for debugfs htt stats"
1794 				   , pdev->soc);
1795 	}
1796 }
1797 #else
1798 static inline void
1799 dp_htt_stats_dbgfs_send_msg(struct dp_pdev *pdev, uint32_t *msg_word,
1800 			    uint32_t msg_len)
1801 {
1802 }
1803 #endif /* HTT_STATS_DEBUGFS_SUPPORT */
1804 
1805 #ifdef WLAN_SYSFS_DP_STATS
1806 /* dp_htt_stats_sysfs_update_config() - Function to send htt data to upper layer.
1807  * @pdev: dp pdev handle
1808  *
1809  * This function sets the process id and printing mode within the sysfs config
1810  * struct. which enables DP_PRINT statements within this process to write to the
1811  * console buffer provided by the user space.
1812  *
1813  * Return: None
1814  */
1815 static inline void
1816 dp_htt_stats_sysfs_update_config(struct dp_pdev *pdev)
1817 {
1818 	struct dp_soc *soc = pdev->soc;
1819 
1820 	if (!soc) {
1821 		dp_htt_err("soc is null");
1822 		return;
1823 	}
1824 
1825 	if (!soc->sysfs_config) {
1826 		dp_htt_err("soc->sysfs_config is NULL");
1827 		return;
1828 	}
1829 
1830 	/* set sysfs config parameters */
1831 	soc->sysfs_config->process_id = qdf_get_current_pid();
1832 	soc->sysfs_config->printing_mode = PRINTING_MODE_ENABLED;
1833 }
1834 
1835 /*
1836  * dp_htt_stats_sysfs_set_event() - Set sysfs stats event.
1837  * @soc: soc handle.
1838  * @msg_word: Pointer to htt msg word.
1839  *
1840  * @return: void
1841  */
1842 static inline void
1843 dp_htt_stats_sysfs_set_event(struct dp_soc *soc, uint32_t *msg_word)
1844 {
1845 	int done = 0;
1846 
1847 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*(msg_word + 3));
1848 	if (done) {
1849 		if (qdf_event_set(&soc->sysfs_config->sysfs_txrx_fw_request_done))
1850 			dp_htt_err("%pK:event compl Fail to set event ",
1851 				   soc);
1852 	}
1853 }
1854 #else /* WLAN_SYSFS_DP_STATS */
1855 static inline void
1856 dp_htt_stats_sysfs_update_config(struct dp_pdev *pdev)
1857 {
1858 }
1859 
1860 static inline void
1861 dp_htt_stats_sysfs_set_event(struct dp_soc *dp_soc, uint32_t *msg_word)
1862 {
1863 }
1864 #endif /* WLAN_SYSFS_DP_STATS */
1865 
1866 /* dp_htt_set_pdev_obss_stats() - Function to set pdev obss stats.
1867  * @pdev: dp pdev handle
1868  * @tag_type: HTT TLV tag type
1869  * @tag_buf: TLV buffer pointer
1870  *
1871  * Return: None
1872  */
1873 static inline void
1874 dp_htt_set_pdev_obss_stats(struct dp_pdev *pdev, uint32_t tag_type,
1875 			   uint32_t *tag_buf)
1876 {
1877 	if (tag_type != HTT_STATS_PDEV_OBSS_PD_TAG) {
1878 		dp_err("Tag mismatch");
1879 		return;
1880 	}
1881 	qdf_mem_copy(&pdev->stats.htt_tx_pdev_stats.obss_pd_stats_tlv,
1882 		     tag_buf, sizeof(struct cdp_pdev_obss_pd_stats_tlv));
1883 	qdf_event_set(&pdev->fw_obss_stats_event);
1884 }
1885 
1886 /**
1887  * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats
1888  * @htt_stats: htt stats info
1889  *
1890  * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message
1891  * contains sub messages which are identified by a TLV header.
1892  * In this function we will process the stream of T2H messages and read all the
1893  * TLV contained in the message.
1894  *
1895  * THe following cases have been taken care of
1896  * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer
1897  *		In this case the buffer will contain multiple tlvs.
1898  * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer.
1899  *		Only one tlv will be contained in the HTT message and this tag
1900  *		will extend onto the next buffer.
1901  * Case 3: When the buffer is the continuation of the previous message
1902  * Case 4: tlv length is 0. which will indicate the end of message
1903  *
1904  * return: void
1905  */
1906 static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats,
1907 					struct dp_soc *soc)
1908 {
1909 	htt_tlv_tag_t tlv_type = 0xff;
1910 	qdf_nbuf_t htt_msg = NULL;
1911 	uint32_t *msg_word;
1912 	uint8_t *tlv_buf_head = NULL;
1913 	uint8_t *tlv_buf_tail = NULL;
1914 	uint32_t msg_remain_len = 0;
1915 	uint32_t tlv_remain_len = 0;
1916 	uint32_t *tlv_start;
1917 	int cookie_val = 0;
1918 	int cookie_msb = 0;
1919 	int pdev_id;
1920 	bool copy_stats = false;
1921 	struct dp_pdev *pdev;
1922 
1923 	/* Process node in the HTT message queue */
1924 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1925 		!= NULL) {
1926 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1927 		cookie_val = *(msg_word + 1);
1928 		htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET(
1929 					*(msg_word +
1930 					HTT_T2H_EXT_STATS_TLV_START_OFFSET));
1931 
1932 		if (cookie_val) {
1933 			if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg)
1934 					== QDF_STATUS_SUCCESS) {
1935 				continue;
1936 			}
1937 		}
1938 
1939 		cookie_msb = *(msg_word + 2);
1940 		pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1941 		pdev = soc->pdev_list[pdev_id];
1942 
1943 		if (!cookie_val && (cookie_msb & DBG_STATS_COOKIE_HTT_DBGFS)) {
1944 			dp_htt_stats_dbgfs_send_msg(pdev, msg_word,
1945 						    htt_stats->msg_len);
1946 			qdf_nbuf_free(htt_msg);
1947 			continue;
1948 		}
1949 
1950 		if (!cookie_val && (cookie_msb & DBG_SYSFS_STATS_COOKIE))
1951 			dp_htt_stats_sysfs_update_config(pdev);
1952 
1953 		if (cookie_msb & DBG_STATS_COOKIE_DP_STATS)
1954 			copy_stats = true;
1955 
1956 		/* read 5th word */
1957 		msg_word = msg_word + 4;
1958 		msg_remain_len = qdf_min(htt_stats->msg_len,
1959 				(uint32_t) DP_EXT_MSG_LENGTH);
1960 		/* Keep processing the node till node length is 0 */
1961 		while (msg_remain_len) {
1962 			/*
1963 			 * if message is not a continuation of previous message
1964 			 * read the tlv type and tlv length
1965 			 */
1966 			if (!tlv_buf_head) {
1967 				tlv_type = HTT_STATS_TLV_TAG_GET(
1968 						*msg_word);
1969 				tlv_remain_len = HTT_STATS_TLV_LENGTH_GET(
1970 						*msg_word);
1971 			}
1972 
1973 			if (tlv_remain_len == 0) {
1974 				msg_remain_len = 0;
1975 
1976 				if (tlv_buf_head) {
1977 					qdf_mem_free(tlv_buf_head);
1978 					tlv_buf_head = NULL;
1979 					tlv_buf_tail = NULL;
1980 				}
1981 
1982 				goto error;
1983 			}
1984 
1985 			if (!tlv_buf_head)
1986 				tlv_remain_len += HTT_TLV_HDR_LEN;
1987 
1988 			if ((tlv_remain_len <= msg_remain_len)) {
1989 				/* Case 3 */
1990 				if (tlv_buf_head) {
1991 					qdf_mem_copy(tlv_buf_tail,
1992 							(uint8_t *)msg_word,
1993 							tlv_remain_len);
1994 					tlv_start = (uint32_t *)tlv_buf_head;
1995 				} else {
1996 					/* Case 1 */
1997 					tlv_start = msg_word;
1998 				}
1999 
2000 				if (copy_stats)
2001 					dp_htt_stats_copy_tag(pdev,
2002 							      tlv_type,
2003 							      tlv_start);
2004 				else
2005 					dp_htt_stats_print_tag(pdev,
2006 							       tlv_type,
2007 							       tlv_start);
2008 
2009 				if (tlv_type == HTT_STATS_PEER_DETAILS_TAG ||
2010 				    tlv_type == HTT_STATS_PEER_STATS_CMN_TAG)
2011 					dp_peer_update_inactive_time(pdev,
2012 								     tlv_type,
2013 								     tlv_start);
2014 
2015 				if (cookie_msb & DBG_STATS_COOKIE_HTT_OBSS)
2016 					dp_htt_set_pdev_obss_stats(pdev,
2017 								   tlv_type,
2018 								   tlv_start);
2019 
2020 				msg_remain_len -= tlv_remain_len;
2021 
2022 				msg_word = (uint32_t *)
2023 					(((uint8_t *)msg_word) +
2024 					tlv_remain_len);
2025 
2026 				tlv_remain_len = 0;
2027 
2028 				if (tlv_buf_head) {
2029 					qdf_mem_free(tlv_buf_head);
2030 					tlv_buf_head = NULL;
2031 					tlv_buf_tail = NULL;
2032 				}
2033 
2034 			} else { /* tlv_remain_len > msg_remain_len */
2035 				/* Case 2 & 3 */
2036 				if (!tlv_buf_head) {
2037 					tlv_buf_head = qdf_mem_malloc(
2038 							tlv_remain_len);
2039 
2040 					if (!tlv_buf_head) {
2041 						QDF_TRACE(QDF_MODULE_ID_TXRX,
2042 								QDF_TRACE_LEVEL_ERROR,
2043 								"Alloc failed");
2044 						goto error;
2045 					}
2046 
2047 					tlv_buf_tail = tlv_buf_head;
2048 				}
2049 
2050 				qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word,
2051 						msg_remain_len);
2052 				tlv_remain_len -= msg_remain_len;
2053 				tlv_buf_tail += msg_remain_len;
2054 			}
2055 		}
2056 
2057 		if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
2058 			htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
2059 		}
2060 
2061 		/* indicate event completion in case the event is done */
2062 		if (!cookie_val && (cookie_msb & DBG_SYSFS_STATS_COOKIE))
2063 			dp_htt_stats_sysfs_set_event(soc, msg_word);
2064 
2065 		qdf_nbuf_free(htt_msg);
2066 	}
2067 	return;
2068 
2069 error:
2070 	qdf_nbuf_free(htt_msg);
2071 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
2072 			!= NULL)
2073 		qdf_nbuf_free(htt_msg);
2074 }
2075 
2076 void htt_t2h_stats_handler(void *context)
2077 {
2078 	struct dp_soc *soc = (struct dp_soc *)context;
2079 	struct htt_stats_context htt_stats;
2080 	uint32_t *msg_word;
2081 	qdf_nbuf_t htt_msg = NULL;
2082 	uint8_t done;
2083 	uint32_t rem_stats;
2084 
2085 	if (!soc) {
2086 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2087 			  "soc is NULL");
2088 		return;
2089 	}
2090 
2091 	if (!qdf_atomic_read(&soc->cmn_init_done)) {
2092 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2093 			  "soc: 0x%pK, init_done: %d", soc,
2094 			  qdf_atomic_read(&soc->cmn_init_done));
2095 		return;
2096 	}
2097 
2098 	qdf_mem_zero(&htt_stats, sizeof(htt_stats));
2099 	qdf_nbuf_queue_init(&htt_stats.msg);
2100 
2101 	/* pull one completed stats from soc->htt_stats_msg and process */
2102 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2103 	if (!soc->htt_stats.num_stats) {
2104 		qdf_spin_unlock_bh(&soc->htt_stats.lock);
2105 		return;
2106 	}
2107 	while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) {
2108 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
2109 		msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET;
2110 		done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2111 		qdf_nbuf_queue_add(&htt_stats.msg, htt_msg);
2112 		/*
2113 		 * Done bit signifies that this is the last T2H buffer in the
2114 		 * stream of HTT EXT STATS message
2115 		 */
2116 		if (done)
2117 			break;
2118 	}
2119 	rem_stats = --soc->htt_stats.num_stats;
2120 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2121 
2122 	/* If there are more stats to process, schedule stats work again.
2123 	 * Scheduling prior to processing ht_stats to queue with early
2124 	 * index
2125 	 */
2126 	if (rem_stats)
2127 		qdf_sched_work(0, &soc->htt_stats.work);
2128 
2129 	dp_process_htt_stat_msg(&htt_stats, soc);
2130 }
2131 
2132 /**
2133  * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats
2134  * @soc: DP SOC handle
2135  * @htt_t2h_msg: HTT message nbuf
2136  *
2137  * return:void
2138  */
2139 static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc,
2140 					    qdf_nbuf_t htt_t2h_msg)
2141 {
2142 	uint8_t done;
2143 	qdf_nbuf_t msg_copy;
2144 	uint32_t *msg_word;
2145 
2146 	msg_word = (uint32_t *)qdf_nbuf_data(htt_t2h_msg);
2147 	msg_word = msg_word + 3;
2148 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2149 
2150 	/*
2151 	 * HTT EXT stats response comes as stream of TLVs which span over
2152 	 * multiple T2H messages.
2153 	 * The first message will carry length of the response.
2154 	 * For rest of the messages length will be zero.
2155 	 *
2156 	 * Clone the T2H message buffer and store it in a list to process
2157 	 * it later.
2158 	 *
2159 	 * The original T2H message buffers gets freed in the T2H HTT event
2160 	 * handler
2161 	 */
2162 	msg_copy = qdf_nbuf_clone(htt_t2h_msg);
2163 
2164 	if (!msg_copy) {
2165 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2166 			  "T2H messge clone failed for HTT EXT STATS");
2167 		goto error;
2168 	}
2169 
2170 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2171 	qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy);
2172 	/*
2173 	 * Done bit signifies that this is the last T2H buffer in the stream of
2174 	 * HTT EXT STATS message
2175 	 */
2176 	if (done) {
2177 		soc->htt_stats.num_stats++;
2178 		qdf_sched_work(0, &soc->htt_stats.work);
2179 	}
2180 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2181 
2182 	return;
2183 
2184 error:
2185 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2186 	while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg))
2187 			!= NULL) {
2188 		qdf_nbuf_free(msg_copy);
2189 	}
2190 	soc->htt_stats.num_stats = 0;
2191 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2192 	return;
2193 }
2194 
2195 /*
2196  * htt_soc_attach_target() - SOC level HTT setup
2197  * @htt_soc:	HTT SOC handle
2198  *
2199  * Return: 0 on success; error code on failure
2200  */
2201 int htt_soc_attach_target(struct htt_soc *htt_soc)
2202 {
2203 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
2204 
2205 	return htt_h2t_ver_req_msg(soc);
2206 }
2207 
2208 void htt_set_htc_handle(struct htt_soc *htt_soc, HTC_HANDLE htc_soc)
2209 {
2210 	htt_soc->htc_soc = htc_soc;
2211 }
2212 
2213 HTC_HANDLE htt_get_htc_handle(struct htt_soc *htt_soc)
2214 {
2215 	return htt_soc->htc_soc;
2216 }
2217 
2218 struct htt_soc *htt_soc_attach(struct dp_soc *soc, HTC_HANDLE htc_handle)
2219 {
2220 	int i;
2221 	int j;
2222 	int umac_alloc_size = HTT_SW_UMAC_RING_IDX_MAX *
2223 			      sizeof(struct bp_handler);
2224 	int lmac_alloc_size = HTT_SW_LMAC_RING_IDX_MAX *
2225 			      sizeof(struct bp_handler);
2226 	struct htt_soc *htt_soc = NULL;
2227 
2228 	htt_soc = qdf_mem_malloc(sizeof(*htt_soc));
2229 	if (!htt_soc) {
2230 		dp_err("HTT attach failed");
2231 		return NULL;
2232 	}
2233 
2234 	for (i = 0; i < MAX_PDEV_CNT; i++) {
2235 		htt_soc->pdevid_tt[i].umac_path =
2236 			qdf_mem_malloc(umac_alloc_size);
2237 		if (!htt_soc->pdevid_tt[i].umac_path)
2238 			break;
2239 		for (j = 0; j < HTT_SW_UMAC_RING_IDX_MAX; j++)
2240 			htt_soc->pdevid_tt[i].umac_path[j].bp_start_tt = -1;
2241 		htt_soc->pdevid_tt[i].lmac_path =
2242 			qdf_mem_malloc(lmac_alloc_size);
2243 		if (!htt_soc->pdevid_tt[i].lmac_path) {
2244 			qdf_mem_free(htt_soc->pdevid_tt[i].umac_path);
2245 			break;
2246 		}
2247 		for (j = 0; j < HTT_SW_LMAC_RING_IDX_MAX ; j++)
2248 			htt_soc->pdevid_tt[i].lmac_path[j].bp_start_tt = -1;
2249 	}
2250 
2251 	if (i != MAX_PDEV_CNT) {
2252 		for (j = 0; j < i; j++) {
2253 			qdf_mem_free(htt_soc->pdevid_tt[j].umac_path);
2254 			qdf_mem_free(htt_soc->pdevid_tt[j].lmac_path);
2255 		}
2256 		qdf_mem_free(htt_soc);
2257 		return NULL;
2258 	}
2259 
2260 	htt_soc->dp_soc = soc;
2261 	htt_soc->htc_soc = htc_handle;
2262 	HTT_TX_MUTEX_INIT(&htt_soc->htt_tx_mutex);
2263 
2264 	return htt_soc;
2265 }
2266 
2267 #if defined(WDI_EVENT_ENABLE) && \
2268 	!defined(REMOVE_PKT_LOG)
2269 /*
2270  * dp_pktlog_msg_handler() - Pktlog msg handler
2271  * @htt_soc:	 HTT SOC handle
2272  * @msg_word:    Pointer to payload
2273  *
2274  * Return: None
2275  */
2276 static void
2277 dp_pktlog_msg_handler(struct htt_soc *soc,
2278 		      uint32_t *msg_word)
2279 {
2280 	uint8_t pdev_id;
2281 	uint8_t target_pdev_id;
2282 	uint32_t *pl_hdr;
2283 
2284 	target_pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word);
2285 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
2286 							 target_pdev_id);
2287 	pl_hdr = (msg_word + 1);
2288 	dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc,
2289 		pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL,
2290 		pdev_id);
2291 }
2292 #else
2293 static void
2294 dp_pktlog_msg_handler(struct htt_soc *soc,
2295 		      uint32_t *msg_word)
2296 {
2297 }
2298 #endif
2299 
2300 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
2301 /*
2302  * dp_vdev_txrx_hw_stats_handler - Handle vdev stats received from FW
2303  * @soc - htt soc handle
2304  * @ msg_word - buffer containing stats
2305  *
2306  * Return: void
2307  */
2308 static void dp_vdev_txrx_hw_stats_handler(struct htt_soc *soc,
2309 					  uint32_t *msg_word)
2310 {
2311 	struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
2312 	uint8_t pdev_id;
2313 	uint8_t vdev_id;
2314 	uint8_t target_pdev_id;
2315 	uint16_t payload_size;
2316 	struct dp_pdev *pdev;
2317 	struct dp_vdev *vdev;
2318 	uint8_t *tlv_buf;
2319 	uint32_t *tlv_buf_temp;
2320 	uint32_t *tag_buf;
2321 	htt_tlv_tag_t tlv_type;
2322 	uint16_t tlv_length;
2323 	uint64_t pkt_count = 0;
2324 	uint64_t byte_count = 0;
2325 	uint64_t soc_drop_cnt = 0;
2326 	struct cdp_pkt_info tx_comp = { 0 };
2327 	struct cdp_pkt_info tx_failed =  { 0 };
2328 
2329 	target_pdev_id =
2330 		HTT_T2H_VDEVS_TXRX_STATS_PERIODIC_IND_PDEV_ID_GET(*msg_word);
2331 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(dpsoc,
2332 							 target_pdev_id);
2333 
2334 	if (pdev_id >= MAX_PDEV_CNT)
2335 		return;
2336 
2337 	pdev = dpsoc->pdev_list[pdev_id];
2338 	if (!pdev) {
2339 		dp_err("PDEV is NULL for pdev_id:%d", pdev_id);
2340 		return;
2341 	}
2342 
2343 	payload_size =
2344 	HTT_T2H_VDEVS_TXRX_STATS_PERIODIC_IND_PAYLOAD_SIZE_GET(*msg_word);
2345 
2346 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2347 			   (void *)msg_word, payload_size + 16);
2348 
2349 	/* Adjust msg_word to point to the first TLV in buffer */
2350 	msg_word = msg_word + 4;
2351 
2352 	/* Parse the received buffer till payload size reaches 0 */
2353 	while (payload_size > 0) {
2354 		tlv_buf = (uint8_t *)msg_word;
2355 		tlv_buf_temp = msg_word;
2356 		tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
2357 		tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
2358 
2359 		/* Add header size to tlv length*/
2360 		tlv_length += 4;
2361 
2362 		switch (tlv_type) {
2363 		case HTT_STATS_SOC_TXRX_STATS_COMMON_TAG:
2364 		{
2365 			tag_buf = tlv_buf_temp +
2366 					HTT_VDEV_STATS_GET_INDEX(SOC_DROP_CNT);
2367 			soc_drop_cnt = HTT_VDEV_GET_STATS_U64(tag_buf);
2368 			DP_STATS_UPD(dpsoc, tx.tqm_drop_no_peer, soc_drop_cnt);
2369 			break;
2370 		}
2371 		case HTT_STATS_VDEV_TXRX_STATS_HW_STATS_TAG:
2372 		{
2373 			tag_buf = tlv_buf_temp +
2374 					HTT_VDEV_STATS_GET_INDEX(VDEV_ID);
2375 			vdev_id = (uint8_t)(*tag_buf);
2376 			vdev = dp_vdev_get_ref_by_id(dpsoc, vdev_id,
2377 						     DP_MOD_ID_HTT);
2378 
2379 			if (!vdev)
2380 				goto invalid_vdev;
2381 
2382 			/* Extract received packet count from buffer */
2383 			tag_buf = tlv_buf_temp +
2384 					HTT_VDEV_STATS_GET_INDEX(RX_PKT_CNT);
2385 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2386 			DP_STATS_UPD(vdev, rx_i.reo_rcvd_pkt.num, pkt_count);
2387 
2388 			/* Extract received packet byte count from buffer */
2389 			tag_buf = tlv_buf_temp +
2390 					HTT_VDEV_STATS_GET_INDEX(RX_BYTE_CNT);
2391 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2392 			DP_STATS_UPD(vdev, rx_i.reo_rcvd_pkt.bytes, byte_count);
2393 
2394 			/* Extract tx success packet count from buffer */
2395 			tag_buf = tlv_buf_temp +
2396 				HTT_VDEV_STATS_GET_INDEX(TX_SUCCESS_PKT_CNT);
2397 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2398 			tx_comp.num = pkt_count;
2399 
2400 			/* Extract tx success packet byte count from buffer */
2401 			tag_buf = tlv_buf_temp +
2402 				HTT_VDEV_STATS_GET_INDEX(TX_SUCCESS_BYTE_CNT);
2403 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2404 			tx_comp.bytes = byte_count;
2405 
2406 			/* Extract tx retry packet count from buffer */
2407 			tag_buf = tlv_buf_temp +
2408 				HTT_VDEV_STATS_GET_INDEX(TX_RETRY_PKT_CNT);
2409 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2410 			tx_comp.num += pkt_count;
2411 			tx_failed.num = pkt_count;
2412 
2413 			/* Extract tx retry packet byte count from buffer */
2414 			tag_buf = tlv_buf_temp +
2415 				HTT_VDEV_STATS_GET_INDEX(TX_RETRY_BYTE_CNT);
2416 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2417 			tx_comp.bytes += byte_count;
2418 			tx_failed.bytes = byte_count;
2419 
2420 			/* Extract tx drop packet count from buffer */
2421 			tag_buf = tlv_buf_temp +
2422 				HTT_VDEV_STATS_GET_INDEX(TX_DROP_PKT_CNT);
2423 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2424 			tx_comp.num += pkt_count;
2425 			tx_failed.num += pkt_count;
2426 
2427 			/* Extract tx drop packet byte count from buffer */
2428 			tag_buf = tlv_buf_temp +
2429 				HTT_VDEV_STATS_GET_INDEX(TX_DROP_BYTE_CNT);
2430 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2431 			tx_comp.bytes += byte_count;
2432 			tx_failed.bytes += byte_count;
2433 
2434 			/* Extract tx age-out packet count from buffer */
2435 			tag_buf = tlv_buf_temp +
2436 				HTT_VDEV_STATS_GET_INDEX(TX_AGE_OUT_PKT_CNT);
2437 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2438 			tx_comp.num += pkt_count;
2439 			tx_failed.num += pkt_count;
2440 
2441 			/* Extract tx age-out packet byte count from buffer */
2442 			tag_buf = tlv_buf_temp +
2443 				HTT_VDEV_STATS_GET_INDEX(TX_AGE_OUT_BYTE_CNT);
2444 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2445 			tx_comp.bytes += byte_count;
2446 			tx_failed.bytes += byte_count;
2447 
2448 			/* Extract tqm bypass packet count from buffer */
2449 			tag_buf = tlv_buf_temp +
2450 				HTT_VDEV_STATS_GET_INDEX(TX_TQM_BYPASS_PKT_CNT);
2451 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2452 			tx_comp.num += pkt_count;
2453 
2454 			/* Extract tx bypass packet byte count from buffer */
2455 			tag_buf = tlv_buf_temp +
2456 				HTT_VDEV_STATS_GET_INDEX(TX_TQM_BYPASS_BYTE_CNT);
2457 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2458 			tx_comp.bytes += byte_count;
2459 
2460 			DP_STATS_UPD(vdev, tx.comp_pkt.num, tx_comp.num);
2461 			DP_STATS_UPD(vdev, tx.comp_pkt.bytes, tx_comp.bytes);
2462 
2463 			DP_STATS_UPD(vdev, tx.tx_failed, tx_failed.num);
2464 
2465 			dp_vdev_unref_delete(dpsoc, vdev, DP_MOD_ID_HTT);
2466 			break;
2467 		}
2468 		default:
2469 			qdf_assert(0);
2470 		}
2471 invalid_vdev:
2472 		msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
2473 		payload_size -= tlv_length;
2474 	}
2475 }
2476 #else
2477 static void dp_vdev_txrx_hw_stats_handler(struct htt_soc *soc,
2478 					  uint32_t *msg_word)
2479 {}
2480 #endif
2481 
2482 #ifdef CONFIG_SAWF_DEF_QUEUES
2483 static void dp_sawf_def_queues_update_map_report_conf(struct htt_soc *soc,
2484 						      uint32_t *msg_word,
2485 						      qdf_nbuf_t htt_t2h_msg)
2486 {
2487 	dp_htt_sawf_def_queues_map_report_conf(soc, msg_word, htt_t2h_msg);
2488 }
2489 #else
2490 static void dp_sawf_def_queues_update_map_report_conf(struct htt_soc *soc,
2491 						      uint32_t *msg_word,
2492 						      qdf_nbuf_t htt_t2h_msg)
2493 {}
2494 #endif
2495 
2496 #ifdef CONFIG_SAWF
2497 /*
2498  * dp_sawf_msduq_map() - Msdu queue creation information received
2499  * from target
2500  * @soc: soc handle.
2501  * @msg_word: Pointer to htt msg word.
2502  * @htt_t2h_msg: HTT message nbuf
2503  *
2504  * @return: void
2505  */
2506 static void dp_sawf_msduq_map(struct htt_soc *soc, uint32_t *msg_word,
2507 			      qdf_nbuf_t htt_t2h_msg)
2508 {
2509 	dp_htt_sawf_msduq_map(soc, msg_word, htt_t2h_msg);
2510 }
2511 
2512 /*
2513  * dp_sawf_mpdu_stats_handler() - HTT message handler for MPDU stats
2514  * @soc: soc handle.
2515  * @htt_t2h_msg: HTT message nbuf
2516  *
2517  * @return: void
2518  */
2519 static void dp_sawf_mpdu_stats_handler(struct htt_soc *soc,
2520 				       qdf_nbuf_t htt_t2h_msg)
2521 {
2522 	dp_sawf_htt_mpdu_stats_handler(soc, htt_t2h_msg);
2523 }
2524 #else
2525 static void dp_sawf_msduq_map(struct htt_soc *soc, uint32_t *msg_word,
2526 			      qdf_nbuf_t htt_t2h_msg)
2527 {}
2528 
2529 static void dp_sawf_mpdu_stats_handler(struct htt_soc *soc,
2530 				       qdf_nbuf_t htt_t2h_msg)
2531 {}
2532 #endif
2533 
2534 /*
2535  * time_allow_print() - time allow print
2536  * @htt_ring_tt:	ringi_id array of timestamps
2537  * @ring_id:		ring_id (index)
2538  *
2539  * Return: 1 for successfully saving timestamp in array
2540  *	and 0 for timestamp falling within 2 seconds after last one
2541  */
2542 static bool time_allow_print(struct bp_handler *htt_bp_handler,
2543 			     u_int8_t ring_id, u_int32_t th_time)
2544 {
2545 	unsigned long tstamp;
2546 	struct bp_handler *path = &htt_bp_handler[ring_id];
2547 
2548 	tstamp = qdf_get_system_timestamp();
2549 
2550 	if (!path)
2551 		return 0; //unable to print backpressure messages
2552 
2553 	if (path->bp_start_tt == -1) {
2554 		path->bp_start_tt = tstamp;
2555 		path->bp_duration = 0;
2556 		path->bp_last_tt = tstamp;
2557 		path->bp_counter = 1;
2558 		return 1;
2559 	}
2560 
2561 	path->bp_duration = tstamp - path->bp_start_tt;
2562 	path->bp_last_tt = tstamp;
2563 	path->bp_counter++;
2564 
2565 	if (path->bp_duration >= th_time) {
2566 		path->bp_start_tt = -1;
2567 		return 1;
2568 	}
2569 
2570 	return 0;
2571 }
2572 
2573 static void dp_htt_alert_print(enum htt_t2h_msg_type msg_type,
2574 			       struct dp_pdev *pdev, u_int8_t ring_id,
2575 			       u_int16_t hp_idx, u_int16_t tp_idx,
2576 			       u_int32_t bkp_time,
2577 			       struct bp_handler *htt_bp_handler,
2578 			       char *ring_stype)
2579 {
2580 	dp_alert("seq_num %u msg_type: %d pdev_id: %d ring_type: %s ",
2581 		 pdev->bkp_stats.seq_num, msg_type, pdev->pdev_id, ring_stype);
2582 	dp_alert("ring_id: %d hp_idx: %d tp_idx: %d bkpressure_time_ms: %d ",
2583 		 ring_id, hp_idx, tp_idx, bkp_time);
2584 	dp_alert("last_bp_event: %ld, total_bp_duration: %ld, bp_counter: %ld",
2585 		 htt_bp_handler[ring_id].bp_last_tt,
2586 		 htt_bp_handler[ring_id].bp_duration,
2587 		 htt_bp_handler[ring_id].bp_counter);
2588 }
2589 
2590 /**
2591  * dp_get_srng_ring_state_from_hal(): Get hal level ring stats
2592  * @soc: DP_SOC handle
2593  * @srng: DP_SRNG handle
2594  * @ring_type: srng src/dst ring
2595  *
2596  * Return: void
2597  */
2598 static QDF_STATUS
2599 dp_get_srng_ring_state_from_hal(struct dp_soc *soc,
2600 				struct dp_pdev *pdev,
2601 				struct dp_srng *srng,
2602 				enum hal_ring_type ring_type,
2603 				struct dp_srng_ring_state *state)
2604 {
2605 	struct hal_soc *hal_soc;
2606 
2607 	if (!soc || !srng || !srng->hal_srng || !state)
2608 		return QDF_STATUS_E_INVAL;
2609 
2610 	hal_soc = (struct hal_soc *)soc->hal_soc;
2611 
2612 	hal_get_sw_hptp(soc->hal_soc, srng->hal_srng, &state->sw_tail,
2613 			&state->sw_head);
2614 
2615 	hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &state->hw_head,
2616 			&state->hw_tail, ring_type);
2617 
2618 	state->ring_type = ring_type;
2619 
2620 	return QDF_STATUS_SUCCESS;
2621 }
2622 
2623 #ifdef QCA_MONITOR_PKT_SUPPORT
2624 static void
2625 dp_queue_mon_ring_stats(struct dp_pdev *pdev,
2626 			int lmac_id, uint32_t *num_srng,
2627 			struct dp_soc_srngs_state *soc_srngs_state)
2628 {
2629 	QDF_STATUS status;
2630 
2631 	if (pdev->soc->wlan_cfg_ctx->rxdma1_enable) {
2632 		status = dp_get_srng_ring_state_from_hal
2633 			(pdev->soc, pdev,
2634 			 &pdev->soc->rxdma_mon_buf_ring[lmac_id],
2635 			 RXDMA_MONITOR_BUF,
2636 			 &soc_srngs_state->ring_state[*num_srng]);
2637 
2638 		if (status == QDF_STATUS_SUCCESS)
2639 			qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS);
2640 
2641 		status = dp_get_srng_ring_state_from_hal
2642 			(pdev->soc, pdev,
2643 			 &pdev->soc->rxdma_mon_dst_ring[lmac_id],
2644 			 RXDMA_MONITOR_DST,
2645 			 &soc_srngs_state->ring_state[*num_srng]);
2646 
2647 		if (status == QDF_STATUS_SUCCESS)
2648 			qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS);
2649 
2650 		status = dp_get_srng_ring_state_from_hal
2651 			(pdev->soc, pdev,
2652 			 &pdev->soc->rxdma_mon_desc_ring[lmac_id],
2653 			 RXDMA_MONITOR_DESC,
2654 			 &soc_srngs_state->ring_state[*num_srng]);
2655 
2656 		if (status == QDF_STATUS_SUCCESS)
2657 			qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS);
2658 	}
2659 }
2660 #else
2661 static void
2662 dp_queue_mon_ring_stats(struct dp_pdev *pdev,
2663 			int lmac_id, uint32_t *num_srng,
2664 			struct dp_soc_srngs_state *soc_srngs_state)
2665 {
2666 }
2667 #endif
2668 
2669 #ifndef WLAN_DP_DISABLE_TCL_CMD_CRED_SRNG
2670 static inline QDF_STATUS
2671 dp_get_tcl_cmd_cred_ring_state_from_hal(struct dp_pdev *pdev,
2672 					struct dp_srng_ring_state *ring_state)
2673 {
2674 	return dp_get_srng_ring_state_from_hal(pdev->soc, pdev,
2675 					       &pdev->soc->tcl_cmd_credit_ring,
2676 					       TCL_CMD_CREDIT, ring_state);
2677 }
2678 #else
2679 static inline QDF_STATUS
2680 dp_get_tcl_cmd_cred_ring_state_from_hal(struct dp_pdev *pdev,
2681 					struct dp_srng_ring_state *ring_state)
2682 {
2683 	return QDF_STATUS_SUCCESS;
2684 }
2685 #endif
2686 
2687 #ifndef WLAN_DP_DISABLE_TCL_STATUS_SRNG
2688 static inline QDF_STATUS
2689 dp_get_tcl_status_ring_state_from_hal(struct dp_pdev *pdev,
2690 				      struct dp_srng_ring_state *ring_state)
2691 {
2692 	return dp_get_srng_ring_state_from_hal(pdev->soc, pdev,
2693 					       &pdev->soc->tcl_status_ring,
2694 					       TCL_STATUS, ring_state);
2695 }
2696 #else
2697 static inline QDF_STATUS
2698 dp_get_tcl_status_ring_state_from_hal(struct dp_pdev *pdev,
2699 				      struct dp_srng_ring_state *ring_state)
2700 {
2701 	return QDF_STATUS_SUCCESS;
2702 }
2703 #endif
2704 
2705 /**
2706  * dp_queue_srng_ring_stats(): Print pdev hal level ring stats
2707  * @pdev: DP_pdev handle
2708  *
2709  * Return: void
2710  */
2711 static void dp_queue_ring_stats(struct dp_pdev *pdev)
2712 {
2713 	uint32_t i;
2714 	int mac_id;
2715 	int lmac_id;
2716 	uint32_t j = 0;
2717 	struct dp_soc *soc = pdev->soc;
2718 	struct dp_soc_srngs_state * soc_srngs_state = NULL;
2719 	struct dp_soc_srngs_state *drop_srngs_state = NULL;
2720 	QDF_STATUS status;
2721 
2722 	soc_srngs_state = qdf_mem_malloc(sizeof(struct dp_soc_srngs_state));
2723 	if (!soc_srngs_state) {
2724 		dp_htt_alert("Memory alloc failed for back pressure event");
2725 		return;
2726 	}
2727 
2728 	status = dp_get_srng_ring_state_from_hal
2729 				(pdev->soc, pdev,
2730 				 &pdev->soc->reo_exception_ring,
2731 				 REO_EXCEPTION,
2732 				 &soc_srngs_state->ring_state[j]);
2733 
2734 	if (status == QDF_STATUS_SUCCESS)
2735 		qdf_assert_always(++j < DP_MAX_SRNGS);
2736 
2737 	status = dp_get_srng_ring_state_from_hal
2738 				(pdev->soc, pdev,
2739 				 &pdev->soc->reo_reinject_ring,
2740 				 REO_REINJECT,
2741 				 &soc_srngs_state->ring_state[j]);
2742 
2743 	if (status == QDF_STATUS_SUCCESS)
2744 		qdf_assert_always(++j < DP_MAX_SRNGS);
2745 
2746 	status = dp_get_srng_ring_state_from_hal
2747 				(pdev->soc, pdev,
2748 				 &pdev->soc->reo_cmd_ring,
2749 				 REO_CMD,
2750 				 &soc_srngs_state->ring_state[j]);
2751 
2752 	if (status == QDF_STATUS_SUCCESS)
2753 		qdf_assert_always(++j < DP_MAX_SRNGS);
2754 
2755 	status = dp_get_srng_ring_state_from_hal
2756 				(pdev->soc, pdev,
2757 				 &pdev->soc->reo_status_ring,
2758 				 REO_STATUS,
2759 				 &soc_srngs_state->ring_state[j]);
2760 
2761 	if (status == QDF_STATUS_SUCCESS)
2762 		qdf_assert_always(++j < DP_MAX_SRNGS);
2763 
2764 	status = dp_get_srng_ring_state_from_hal
2765 				(pdev->soc, pdev,
2766 				 &pdev->soc->rx_rel_ring,
2767 				 WBM2SW_RELEASE,
2768 				 &soc_srngs_state->ring_state[j]);
2769 
2770 	if (status == QDF_STATUS_SUCCESS)
2771 		qdf_assert_always(++j < DP_MAX_SRNGS);
2772 
2773 	status = dp_get_tcl_cmd_cred_ring_state_from_hal
2774 				(pdev, &soc_srngs_state->ring_state[j]);
2775 	if (status == QDF_STATUS_SUCCESS)
2776 		qdf_assert_always(++j < DP_MAX_SRNGS);
2777 
2778 	status = dp_get_tcl_status_ring_state_from_hal
2779 				(pdev, &soc_srngs_state->ring_state[j]);
2780 	if (status == QDF_STATUS_SUCCESS)
2781 		qdf_assert_always(++j < DP_MAX_SRNGS);
2782 
2783 	status = dp_get_srng_ring_state_from_hal
2784 				(pdev->soc, pdev,
2785 				 &pdev->soc->wbm_desc_rel_ring,
2786 				 SW2WBM_RELEASE,
2787 				 &soc_srngs_state->ring_state[j]);
2788 
2789 	if (status == QDF_STATUS_SUCCESS)
2790 		qdf_assert_always(++j < DP_MAX_SRNGS);
2791 
2792 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
2793 		status = dp_get_srng_ring_state_from_hal
2794 				(pdev->soc, pdev,
2795 				 &pdev->soc->reo_dest_ring[i],
2796 				 REO_DST,
2797 				 &soc_srngs_state->ring_state[j]);
2798 
2799 		if (status == QDF_STATUS_SUCCESS)
2800 			qdf_assert_always(++j < DP_MAX_SRNGS);
2801 	}
2802 
2803 	for (i = 0; i < pdev->soc->num_tcl_data_rings; i++) {
2804 		status = dp_get_srng_ring_state_from_hal
2805 				(pdev->soc, pdev,
2806 				 &pdev->soc->tcl_data_ring[i],
2807 				 TCL_DATA,
2808 				 &soc_srngs_state->ring_state[j]);
2809 
2810 		if (status == QDF_STATUS_SUCCESS)
2811 			qdf_assert_always(++j < DP_MAX_SRNGS);
2812 	}
2813 
2814 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
2815 		status = dp_get_srng_ring_state_from_hal
2816 				(pdev->soc, pdev,
2817 				 &pdev->soc->tx_comp_ring[i],
2818 				 WBM2SW_RELEASE,
2819 				 &soc_srngs_state->ring_state[j]);
2820 
2821 		if (status == QDF_STATUS_SUCCESS)
2822 			qdf_assert_always(++j < DP_MAX_SRNGS);
2823 	}
2824 
2825 	lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc, 0, pdev->pdev_id);
2826 	status = dp_get_srng_ring_state_from_hal
2827 				(pdev->soc, pdev,
2828 				 &pdev->soc->rx_refill_buf_ring
2829 				 [lmac_id],
2830 				 RXDMA_BUF,
2831 				 &soc_srngs_state->ring_state[j]);
2832 
2833 	if (status == QDF_STATUS_SUCCESS)
2834 		qdf_assert_always(++j < DP_MAX_SRNGS);
2835 
2836 	status = dp_get_srng_ring_state_from_hal
2837 				(pdev->soc, pdev,
2838 				 &pdev->rx_refill_buf_ring2,
2839 				 RXDMA_BUF,
2840 				 &soc_srngs_state->ring_state[j]);
2841 
2842 	if (status == QDF_STATUS_SUCCESS)
2843 		qdf_assert_always(++j < DP_MAX_SRNGS);
2844 
2845 
2846 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2847 		dp_get_srng_ring_state_from_hal
2848 				(pdev->soc, pdev,
2849 				 &pdev->rx_mac_buf_ring[i],
2850 				 RXDMA_BUF,
2851 				 &soc_srngs_state->ring_state[j]);
2852 
2853 		if (status == QDF_STATUS_SUCCESS)
2854 			qdf_assert_always(++j < DP_MAX_SRNGS);
2855 	}
2856 
2857 	for (mac_id = 0;
2858 	     mac_id  < soc->wlan_cfg_ctx->num_rxdma_status_rings_per_pdev;
2859 	     mac_id++) {
2860 		lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc,
2861 						     mac_id, pdev->pdev_id);
2862 
2863 		dp_queue_mon_ring_stats(pdev, lmac_id, &j,
2864 					soc_srngs_state);
2865 
2866 		status = dp_get_srng_ring_state_from_hal
2867 			(pdev->soc, pdev,
2868 			 &pdev->soc->rxdma_mon_status_ring[lmac_id],
2869 			 RXDMA_MONITOR_STATUS,
2870 			 &soc_srngs_state->ring_state[j]);
2871 
2872 		if (status == QDF_STATUS_SUCCESS)
2873 			qdf_assert_always(++j < DP_MAX_SRNGS);
2874 	}
2875 
2876 	for (i = 0; i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
2877 		lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc,
2878 						     i, pdev->pdev_id);
2879 
2880 		status = dp_get_srng_ring_state_from_hal
2881 				(pdev->soc, pdev,
2882 				 &pdev->soc->rxdma_err_dst_ring
2883 				 [lmac_id],
2884 				 RXDMA_DST,
2885 				 &soc_srngs_state->ring_state[j]);
2886 
2887 		if (status == QDF_STATUS_SUCCESS)
2888 			qdf_assert_always(++j < DP_MAX_SRNGS);
2889 	}
2890 	soc_srngs_state->max_ring_id = j;
2891 
2892 	qdf_spin_lock_bh(&pdev->bkp_stats.list_lock);
2893 
2894 	soc_srngs_state->seq_num = pdev->bkp_stats.seq_num;
2895 
2896 	if (pdev->bkp_stats.queue_depth >= HTT_BKP_STATS_MAX_QUEUE_DEPTH) {
2897 		drop_srngs_state = TAILQ_FIRST(&pdev->bkp_stats.list);
2898 		qdf_assert_always(drop_srngs_state);
2899 		TAILQ_REMOVE(&pdev->bkp_stats.list, drop_srngs_state,
2900 			     list_elem);
2901 		qdf_mem_free(drop_srngs_state);
2902 		pdev->bkp_stats.queue_depth--;
2903 	}
2904 
2905 	pdev->bkp_stats.queue_depth++;
2906 	TAILQ_INSERT_TAIL(&pdev->bkp_stats.list, soc_srngs_state,
2907 			  list_elem);
2908 	pdev->bkp_stats.seq_num++;
2909 	qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock);
2910 
2911 	qdf_queue_work(0, pdev->bkp_stats.work_queue,
2912 		       &pdev->bkp_stats.work);
2913 }
2914 
2915 /*
2916  * dp_htt_bkp_event_alert() - htt backpressure event alert
2917  * @msg_word:	htt packet context
2918  * @htt_soc:	HTT SOC handle
2919  *
2920  * Return: after attempting to print stats
2921  */
2922 static void dp_htt_bkp_event_alert(u_int32_t *msg_word, struct htt_soc *soc)
2923 {
2924 	u_int8_t ring_type;
2925 	u_int8_t pdev_id;
2926 	uint8_t target_pdev_id;
2927 	u_int8_t ring_id;
2928 	u_int16_t hp_idx;
2929 	u_int16_t tp_idx;
2930 	u_int32_t bkp_time;
2931 	u_int32_t th_time;
2932 	enum htt_t2h_msg_type msg_type;
2933 	struct dp_soc *dpsoc;
2934 	struct dp_pdev *pdev;
2935 	struct dp_htt_timestamp *radio_tt;
2936 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2937 
2938 
2939 	if (!soc)
2940 		return;
2941 
2942 	dpsoc = (struct dp_soc *)soc->dp_soc;
2943 	soc_cfg_ctx = dpsoc->wlan_cfg_ctx;
2944 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
2945 	ring_type = HTT_T2H_RX_BKPRESSURE_RING_TYPE_GET(*msg_word);
2946 	target_pdev_id = HTT_T2H_RX_BKPRESSURE_PDEV_ID_GET(*msg_word);
2947 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
2948 							 target_pdev_id);
2949 	if (pdev_id >= MAX_PDEV_CNT) {
2950 		dp_htt_debug("%pK: pdev id %d is invalid", soc, pdev_id);
2951 		return;
2952 	}
2953 
2954 	th_time = wlan_cfg_time_control_bp(soc_cfg_ctx);
2955 	pdev = (struct dp_pdev *)dpsoc->pdev_list[pdev_id];
2956 	ring_id = HTT_T2H_RX_BKPRESSURE_RINGID_GET(*msg_word);
2957 	hp_idx = HTT_T2H_RX_BKPRESSURE_HEAD_IDX_GET(*(msg_word + 1));
2958 	tp_idx = HTT_T2H_RX_BKPRESSURE_TAIL_IDX_GET(*(msg_word + 1));
2959 	bkp_time = HTT_T2H_RX_BKPRESSURE_TIME_MS_GET(*(msg_word + 2));
2960 	radio_tt = &soc->pdevid_tt[pdev_id];
2961 
2962 	switch (ring_type) {
2963 	case HTT_SW_RING_TYPE_UMAC:
2964 		if (!time_allow_print(radio_tt->umac_path, ring_id, th_time))
2965 			return;
2966 		dp_htt_alert_print(msg_type, pdev, ring_id, hp_idx, tp_idx,
2967 				   bkp_time, radio_tt->umac_path,
2968 				   "HTT_SW_RING_TYPE_UMAC");
2969 	break;
2970 	case HTT_SW_RING_TYPE_LMAC:
2971 		if (!time_allow_print(radio_tt->lmac_path, ring_id, th_time))
2972 			return;
2973 		dp_htt_alert_print(msg_type, pdev, ring_id, hp_idx, tp_idx,
2974 				   bkp_time, radio_tt->lmac_path,
2975 				   "HTT_SW_RING_TYPE_LMAC");
2976 	break;
2977 	default:
2978 		dp_alert("Invalid ring type: %d", ring_type);
2979 	break;
2980 	}
2981 
2982 	dp_queue_ring_stats(pdev);
2983 }
2984 
2985 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
2986 /*
2987  * dp_offload_ind_handler() - offload msg handler
2988  * @htt_soc: HTT SOC handle
2989  * @msg_word: Pointer to payload
2990  *
2991  * Return: None
2992  */
2993 static void
2994 dp_offload_ind_handler(struct htt_soc *soc, uint32_t *msg_word)
2995 {
2996 	u_int8_t pdev_id;
2997 	u_int8_t target_pdev_id;
2998 
2999 	target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
3000 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
3001 							 target_pdev_id);
3002 	dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_OFFLOAD_TX_DATA, soc->dp_soc,
3003 			     msg_word, HTT_INVALID_VDEV, WDI_NO_VAL,
3004 			     pdev_id);
3005 }
3006 #else
3007 static void
3008 dp_offload_ind_handler(struct htt_soc *soc, uint32_t *msg_word)
3009 {
3010 }
3011 #endif
3012 
3013 #ifdef WLAN_FEATURE_11BE_MLO
3014 #ifdef WLAN_MLO_MULTI_CHIP
3015 static inline void dp_update_mlo_ts_offset(struct dp_soc *soc,
3016 					   uint32_t ts_lo, uint32_t ts_hi)
3017 {
3018 	uint64_t mlo_offset;
3019 
3020 	mlo_offset = ((uint64_t)(ts_hi) << 32 | ts_lo);
3021 	soc->cdp_soc.ops->mlo_ops->mlo_update_mlo_ts_offset
3022 		((struct cdp_soc_t *)soc, mlo_offset);
3023 }
3024 #else
3025 static inline void dp_update_mlo_ts_offset(struct dp_soc *soc,
3026 					   uint32_t ts_lo, uint32_t ts_hi)
3027 {}
3028 #endif
3029 static void dp_htt_mlo_peer_map_handler(struct htt_soc *soc,
3030 					uint32_t *msg_word)
3031 {
3032 	uint8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3033 	uint8_t *mlo_peer_mac_addr;
3034 	uint16_t mlo_peer_id;
3035 	uint8_t num_links;
3036 	struct dp_mlo_flow_override_info mlo_flow_info[DP_MLO_FLOW_INFO_MAX];
3037 	struct dp_mlo_link_info mlo_link_info[DP_MAX_MLO_LINKS];
3038 	MLO_PEER_MAP_TLV_TAG_ID tlv_type = 0xff;
3039 	uint16_t tlv_len = 0;
3040 	int i = 0;
3041 
3042 	mlo_peer_id = HTT_RX_MLO_PEER_MAP_MLO_PEER_ID_GET(*msg_word);
3043 	num_links =
3044 		HTT_RX_MLO_PEER_MAP_NUM_LOGICAL_LINKS_GET(*msg_word);
3045 	mlo_peer_mac_addr =
3046 	htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3047 				   &mac_addr_deswizzle_buf[0]);
3048 
3049 	mlo_flow_info[0].ast_idx =
3050 		HTT_RX_MLO_PEER_MAP_PRIMARY_AST_INDEX_GET(*(msg_word + 3));
3051 	mlo_flow_info[0].ast_idx_valid =
3052 		HTT_RX_MLO_PEER_MAP_AST_INDEX_VALID_FLAG_GET(*(msg_word + 3));
3053 	mlo_flow_info[0].chip_id =
3054 		HTT_RX_MLO_PEER_MAP_CHIP_ID_AST_INDEX_GET(*(msg_word + 3));
3055 	mlo_flow_info[0].tidmask =
3056 		HTT_RX_MLO_PEER_MAP_TIDMASK_AST_INDEX_GET(*(msg_word + 3));
3057 	mlo_flow_info[0].cache_set_num =
3058 	HTT_RX_MLO_PEER_MAP_CACHE_SET_NUM_AST_INDEX_GET(*(msg_word + 3));
3059 
3060 	mlo_flow_info[1].ast_idx =
3061 		HTT_RX_MLO_PEER_MAP_PRIMARY_AST_INDEX_GET(*(msg_word + 3));
3062 	mlo_flow_info[1].ast_idx_valid =
3063 		HTT_RX_MLO_PEER_MAP_AST_INDEX_VALID_FLAG_GET(*(msg_word + 3));
3064 	mlo_flow_info[1].chip_id =
3065 		HTT_RX_MLO_PEER_MAP_CHIP_ID_AST_INDEX_GET(*(msg_word + 3));
3066 	mlo_flow_info[1].tidmask =
3067 		HTT_RX_MLO_PEER_MAP_TIDMASK_AST_INDEX_GET(*(msg_word + 3));
3068 	mlo_flow_info[1].cache_set_num =
3069 	HTT_RX_MLO_PEER_MAP_CACHE_SET_NUM_AST_INDEX_GET(*(msg_word + 3));
3070 
3071 	mlo_flow_info[2].ast_idx =
3072 		HTT_RX_MLO_PEER_MAP_PRIMARY_AST_INDEX_GET(*(msg_word + 3));
3073 	mlo_flow_info[2].ast_idx_valid =
3074 		HTT_RX_MLO_PEER_MAP_AST_INDEX_VALID_FLAG_GET(*(msg_word + 3));
3075 	mlo_flow_info[2].chip_id =
3076 		HTT_RX_MLO_PEER_MAP_CHIP_ID_AST_INDEX_GET(*(msg_word + 3));
3077 	mlo_flow_info[2].tidmask =
3078 		HTT_RX_MLO_PEER_MAP_TIDMASK_AST_INDEX_GET(*(msg_word + 3));
3079 	mlo_flow_info[2].cache_set_num =
3080 	HTT_RX_MLO_PEER_MAP_CACHE_SET_NUM_AST_INDEX_GET(*(msg_word + 3));
3081 
3082 	msg_word = msg_word + 8;
3083 	while (msg_word && (i < DP_MAX_MLO_LINKS)) {
3084 		mlo_link_info[i].peer_chip_id = 0xFF;
3085 		mlo_link_info[i].vdev_id = 0xFF;
3086 
3087 		tlv_type = HTT_RX_MLO_PEER_MAP_TLV_TAG_GET(*msg_word);
3088 		tlv_len = HTT_RX_MLO_PEER_MAP_TLV_LENGTH_GET(*msg_word);
3089 
3090 		if (tlv_len == 0) {
3091 			dp_err("TLV Length is 0");
3092 			break;
3093 		}
3094 
3095 		if (tlv_type == MLO_PEER_MAP_TLV_STRUCT_SOC_VDEV_PEER_IDS) {
3096 			mlo_link_info[i].peer_chip_id =
3097 				HTT_RX_MLO_PEER_MAP_CHIP_ID_GET(
3098 							*(msg_word + 1));
3099 			mlo_link_info[i].vdev_id =
3100 				HTT_RX_MLO_PEER_MAP_VDEV_ID_GET(
3101 							*(msg_word + 1));
3102 		}
3103 		/* Add header size to tlv length */
3104 		tlv_len = tlv_len + HTT_TLV_HDR_LEN;
3105 		msg_word = (uint32_t *)(((uint8_t *)msg_word) + tlv_len);
3106 		i++;
3107 	}
3108 
3109 	dp_rx_mlo_peer_map_handler(soc->dp_soc, mlo_peer_id,
3110 				   mlo_peer_mac_addr,
3111 				   mlo_flow_info, mlo_link_info);
3112 }
3113 
3114 static void dp_htt_mlo_peer_unmap_handler(struct htt_soc *soc,
3115 					  uint32_t *msg_word)
3116 {
3117 	uint16_t mlo_peer_id;
3118 
3119 	mlo_peer_id = HTT_RX_MLO_PEER_UNMAP_MLO_PEER_ID_GET(*msg_word);
3120 	dp_rx_mlo_peer_unmap_handler(soc->dp_soc, mlo_peer_id);
3121 }
3122 
3123 static void
3124 dp_rx_mlo_timestamp_ind_handler(struct dp_soc *soc,
3125 				uint32_t *msg_word)
3126 {
3127 	uint8_t pdev_id;
3128 	uint8_t target_pdev_id;
3129 	struct dp_pdev *pdev;
3130 
3131 	if (!soc)
3132 		return;
3133 
3134 	target_pdev_id = HTT_T2H_MLO_TIMESTAMP_OFFSET_PDEV_ID_GET(*msg_word);
3135 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc,
3136 							 target_pdev_id);
3137 
3138 	if (pdev_id >= MAX_PDEV_CNT) {
3139 		dp_htt_debug("%pK: pdev id %d is invalid", soc, pdev_id);
3140 		return;
3141 	}
3142 
3143 	pdev = (struct dp_pdev *)soc->pdev_list[pdev_id];
3144 
3145 	if (!pdev) {
3146 		dp_err("Invalid pdev");
3147 		return;
3148 	}
3149 	dp_wdi_event_handler(WDI_EVENT_MLO_TSTMP, soc,
3150 			     msg_word, HTT_INVALID_PEER, WDI_NO_VAL,
3151 			     pdev_id);
3152 
3153 	qdf_spin_lock_bh(&soc->htt_stats.lock);
3154 	pdev->timestamp.msg_type =
3155 		HTT_T2H_MLO_TIMESTAMP_OFFSET_MSG_TYPE_GET(*msg_word);
3156 	pdev->timestamp.pdev_id = pdev_id;
3157 	pdev->timestamp.chip_id =
3158 		HTT_T2H_MLO_TIMESTAMP_OFFSET_CHIP_ID_GET(*msg_word);
3159 	pdev->timestamp.mac_clk_freq =
3160 		HTT_T2H_MLO_TIMESTAMP_OFFSET_MAC_CLK_FREQ_MHZ_GET(*msg_word);
3161 	pdev->timestamp.sync_tstmp_lo_us = *(msg_word + 1);
3162 	pdev->timestamp.sync_tstmp_hi_us = *(msg_word + 2);
3163 	pdev->timestamp.mlo_offset_lo_us = *(msg_word + 3);
3164 	pdev->timestamp.mlo_offset_hi_us = *(msg_word + 4);
3165 	pdev->timestamp.mlo_offset_clks  = *(msg_word + 5);
3166 	pdev->timestamp.mlo_comp_us =
3167 	HTT_T2H_MLO_TIMESTAMP_OFFSET_MLO_TIMESTAMP_COMP_US_GET(
3168 							*(msg_word + 6));
3169 	pdev->timestamp.mlo_comp_clks =
3170 	HTT_T2H_MLO_TIMESTAMP_OFFSET_MLO_TIMESTAMP_COMP_CLKS_GET(
3171 							*(msg_word + 6));
3172 	pdev->timestamp.mlo_comp_timer =
3173 	HTT_T2H_MLO_TIMESTAMP_OFFSET_MLO_TIMESTAMP_COMP_PERIOD_US_GET(
3174 							*(msg_word + 7));
3175 
3176 	dp_htt_debug("tsf_lo=%d tsf_hi=%d, mlo_ofst_lo=%d, mlo_ofst_hi=%d\n",
3177 		     pdev->timestamp.sync_tstmp_lo_us,
3178 		     pdev->timestamp.sync_tstmp_hi_us,
3179 		     pdev->timestamp.mlo_offset_lo_us,
3180 		     pdev->timestamp.mlo_offset_hi_us);
3181 
3182 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
3183 
3184 	dp_update_mlo_ts_offset(soc,
3185 				pdev->timestamp.mlo_offset_lo_us,
3186 				pdev->timestamp.mlo_offset_hi_us);
3187 }
3188 #else
3189 static void dp_htt_mlo_peer_map_handler(struct htt_soc *soc,
3190 					uint32_t *msg_word)
3191 {
3192 	qdf_assert_always(0);
3193 }
3194 
3195 static void dp_htt_mlo_peer_unmap_handler(struct htt_soc *soc,
3196 					 uint32_t *msg_word)
3197 {
3198 	qdf_assert_always(0);
3199 }
3200 
3201 static void
3202 dp_rx_mlo_timestamp_ind_handler(void *soc_handle,
3203 				uint32_t *msg_word)
3204 {
3205 	qdf_assert_always(0);
3206 }
3207 #endif
3208 
3209 /*
3210  * dp_htt_rx_addba_handler() - RX Addba HTT msg handler
3211  * @soc: DP Soc handler
3212  * @peer_id: ID of peer
3213  * @tid: TID number
3214  * @win_sz: BA window size
3215  *
3216  * Return: None
3217  */
3218 static void
3219 dp_htt_rx_addba_handler(struct dp_soc *soc, uint16_t peer_id,
3220 			uint8_t tid, uint16_t win_sz)
3221 {
3222 	uint16_t status;
3223 	struct dp_peer *peer;
3224 
3225 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
3226 
3227 	if (!peer) {
3228 		dp_err("Peer not found peer id %d", peer_id);
3229 		return;
3230 	}
3231 
3232 	status = dp_addba_requestprocess_wifi3((struct cdp_soc_t *)soc,
3233 					       peer->mac_addr.raw,
3234 					       peer->vdev->vdev_id, 0,
3235 					       tid, 0, win_sz, 0xffff);
3236 
3237 	dp_addba_resp_tx_completion_wifi3(
3238 		(struct cdp_soc_t *)soc,
3239 		peer->mac_addr.raw, peer->vdev->vdev_id,
3240 		tid,
3241 		status);
3242 
3243 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3244 
3245 	dp_info("PeerID %d BAW %d TID %d stat %d",
3246 		peer_id, win_sz, tid, status);
3247 }
3248 
3249 /*
3250  * dp_htt_ppdu_id_fmt_handler() - PPDU ID Format handler
3251  * @htt_soc: HTT SOC handle
3252  * @msg_word: Pointer to payload
3253  *
3254  * Return: None
3255  */
3256 static void
3257 dp_htt_ppdu_id_fmt_handler(struct dp_soc *soc, uint32_t *msg_word)
3258 {
3259 	uint8_t msg_type, valid, bits, offset;
3260 
3261 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
3262 
3263 	msg_word += HTT_PPDU_ID_FMT_IND_LINK_ID_OFFSET;
3264 	valid = HTT_PPDU_ID_FMT_IND_VALID_GET_BITS31_16(*msg_word);
3265 	bits = HTT_PPDU_ID_FMT_IND_BITS_GET_BITS31_16(*msg_word);
3266 	offset = HTT_PPDU_ID_FMT_IND_OFFSET_GET_BITS31_16(*msg_word);
3267 
3268 	dp_info("link_id: valid %u bits %u offset %u", valid, bits, offset);
3269 
3270 	if (valid) {
3271 		soc->link_id_offset = offset;
3272 		soc->link_id_bits = bits;
3273 	}
3274 }
3275 
3276 /*
3277  * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler
3278  * @context:	Opaque context (HTT SOC handle)
3279  * @pkt:	HTC packet
3280  */
3281 static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
3282 {
3283 	struct htt_soc *soc = (struct htt_soc *) context;
3284 	qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
3285 	u_int32_t *msg_word;
3286 	enum htt_t2h_msg_type msg_type;
3287 	bool free_buf = true;
3288 
3289 	/* check for successful message reception */
3290 	if (pkt->Status != QDF_STATUS_SUCCESS) {
3291 		if (pkt->Status != QDF_STATUS_E_CANCELED)
3292 			soc->stats.htc_err_cnt++;
3293 
3294 		qdf_nbuf_free(htt_t2h_msg);
3295 		return;
3296 	}
3297 
3298 	/* TODO: Check if we should pop the HTC/HTT header alignment padding */
3299 
3300 	msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
3301 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
3302 	htt_event_record(soc->htt_logger_handle,
3303 			 msg_type, (uint8_t *)msg_word);
3304 	switch (msg_type) {
3305 	case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
3306 	{
3307 		dp_htt_bkp_event_alert(msg_word, soc);
3308 		break;
3309 	}
3310 	case HTT_T2H_MSG_TYPE_PEER_MAP:
3311 		{
3312 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3313 			u_int8_t *peer_mac_addr;
3314 			u_int16_t peer_id;
3315 			u_int16_t hw_peer_id;
3316 			u_int8_t vdev_id;
3317 			u_int8_t is_wds;
3318 			struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
3319 
3320 			peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
3321 			hw_peer_id =
3322 				HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2));
3323 			vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
3324 			peer_mac_addr = htt_t2h_mac_addr_deswizzle(
3325 				(u_int8_t *) (msg_word+1),
3326 				&mac_addr_deswizzle_buf[0]);
3327 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3328 				QDF_TRACE_LEVEL_DEBUG,
3329 				"HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3330 				peer_id, vdev_id);
3331 
3332 			/*
3333 			 * check if peer already exists for this peer_id, if so
3334 			 * this peer map event is in response for a wds peer add
3335 			 * wmi command sent during wds source port learning.
3336 			 * in this case just add the ast entry to the existing
3337 			 * peer ast_list.
3338 			 */
3339 			is_wds = !!(dpsoc->peer_id_to_obj_map[peer_id]);
3340 			dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id,
3341 					       vdev_id, peer_mac_addr, 0,
3342 					       is_wds);
3343 			break;
3344 		}
3345 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
3346 		{
3347 			u_int16_t peer_id;
3348 			u_int8_t vdev_id;
3349 			u_int8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0};
3350 			peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
3351 			vdev_id = HTT_RX_PEER_UNMAP_VDEV_ID_GET(*msg_word);
3352 
3353 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
3354 						 vdev_id, mac_addr, 0,
3355 						 DP_PEER_WDS_COUNT_INVALID);
3356 			break;
3357 		}
3358 	case HTT_T2H_MSG_TYPE_SEC_IND:
3359 		{
3360 			u_int16_t peer_id;
3361 			enum cdp_sec_type sec_type;
3362 			int is_unicast;
3363 
3364 			peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
3365 			sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
3366 			is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
3367 			/* point to the first part of the Michael key */
3368 			msg_word++;
3369 			dp_rx_sec_ind_handler(
3370 				soc->dp_soc, peer_id, sec_type, is_unicast,
3371 				msg_word, msg_word + 2);
3372 			break;
3373 		}
3374 
3375 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
3376 		{
3377 			free_buf =
3378 				dp_monitor_ppdu_stats_ind_handler(soc,
3379 								  msg_word,
3380 								  htt_t2h_msg);
3381 			break;
3382 		}
3383 
3384 	case HTT_T2H_MSG_TYPE_PKTLOG:
3385 		{
3386 			dp_pktlog_msg_handler(soc, msg_word);
3387 			break;
3388 		}
3389 
3390 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
3391 		{
3392 			/*
3393 			 * HTC maintains runtime pm count for H2T messages that
3394 			 * have a response msg from FW. This count ensures that
3395 			 * in the case FW does not sent out the response or host
3396 			 * did not process this indication runtime_put happens
3397 			 * properly in the cleanup path.
3398 			 */
3399 			if (htc_dec_return_runtime_cnt(soc->htc_soc) >= 0)
3400 				htc_pm_runtime_put(soc->htc_soc);
3401 			else
3402 				soc->stats.htt_ver_req_put_skip++;
3403 			soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
3404 			soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
3405 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
3406 				"target uses HTT version %d.%d; host uses %d.%d",
3407 				soc->tgt_ver.major, soc->tgt_ver.minor,
3408 				HTT_CURRENT_VERSION_MAJOR,
3409 				HTT_CURRENT_VERSION_MINOR);
3410 			if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
3411 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3412 					QDF_TRACE_LEVEL_WARN,
3413 					"*** Incompatible host/target HTT versions!");
3414 			}
3415 			/* abort if the target is incompatible with the host */
3416 			qdf_assert(soc->tgt_ver.major ==
3417 				HTT_CURRENT_VERSION_MAJOR);
3418 			if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
3419 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3420 					QDF_TRACE_LEVEL_INFO_LOW,
3421 					"*** Warning: host/target HTT versions"
3422 					" are different, though compatible!");
3423 			}
3424 			break;
3425 		}
3426 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
3427 		{
3428 			uint16_t peer_id;
3429 			uint8_t tid;
3430 			uint16_t win_sz;
3431 
3432 			/*
3433 			 * Update REO Queue Desc with new values
3434 			 */
3435 			peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
3436 			tid = HTT_RX_ADDBA_TID_GET(*msg_word);
3437 			win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
3438 
3439 			/*
3440 			 * Window size needs to be incremented by 1
3441 			 * since fw needs to represent a value of 256
3442 			 * using just 8 bits
3443 			 */
3444 			dp_htt_rx_addba_handler(soc->dp_soc, peer_id,
3445 						tid, win_sz + 1);
3446 			break;
3447 		}
3448 	case HTT_T2H_MSG_TYPE_RX_ADDBA_EXTN:
3449 		{
3450 			uint16_t peer_id;
3451 			uint8_t tid;
3452 			uint16_t win_sz;
3453 
3454 			peer_id = HTT_RX_ADDBA_EXTN_PEER_ID_GET(*msg_word);
3455 			tid = HTT_RX_ADDBA_EXTN_TID_GET(*msg_word);
3456 
3457 			msg_word++;
3458 			win_sz = HTT_RX_ADDBA_EXTN_WIN_SIZE_GET(*msg_word);
3459 
3460 			dp_htt_rx_addba_handler(soc->dp_soc, peer_id,
3461 						tid, win_sz);
3462 			break;
3463 		}
3464 	case HTT_T2H_PPDU_ID_FMT_IND:
3465 		{
3466 			dp_htt_ppdu_id_fmt_handler(soc->dp_soc, msg_word);
3467 			break;
3468 		}
3469 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
3470 		{
3471 			dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg);
3472 			break;
3473 		}
3474 	case HTT_T2H_MSG_TYPE_PEER_MAP_V2:
3475 		{
3476 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3477 			u_int8_t *peer_mac_addr;
3478 			u_int16_t peer_id;
3479 			u_int16_t hw_peer_id;
3480 			u_int8_t vdev_id;
3481 			bool is_wds;
3482 			u_int16_t ast_hash;
3483 			struct dp_ast_flow_override_info ast_flow_info;
3484 
3485 			qdf_mem_set(&ast_flow_info, 0,
3486 					    sizeof(struct dp_ast_flow_override_info));
3487 
3488 			peer_id = HTT_RX_PEER_MAP_V2_SW_PEER_ID_GET(*msg_word);
3489 			hw_peer_id =
3490 			HTT_RX_PEER_MAP_V2_HW_PEER_ID_GET(*(msg_word + 2));
3491 			vdev_id = HTT_RX_PEER_MAP_V2_VDEV_ID_GET(*msg_word);
3492 			peer_mac_addr =
3493 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3494 						   &mac_addr_deswizzle_buf[0]);
3495 			is_wds =
3496 			HTT_RX_PEER_MAP_V2_NEXT_HOP_GET(*(msg_word + 3));
3497 			ast_hash =
3498 			HTT_RX_PEER_MAP_V2_AST_HASH_VALUE_GET(*(msg_word + 3));
3499 			/*
3500 			 * Update 4 ast_index per peer, ast valid mask
3501 			 * and TID flow valid mask.
3502 			 * AST valid mask is 3 bit field corresponds to
3503 			 * ast_index[3:1]. ast_index 0 is always valid.
3504 			 */
3505 			ast_flow_info.ast_valid_mask =
3506 			HTT_RX_PEER_MAP_V2_AST_VALID_MASK_GET(*(msg_word + 3));
3507 			ast_flow_info.ast_idx[0] = hw_peer_id;
3508 			ast_flow_info.ast_flow_mask[0] =
3509 			HTT_RX_PEER_MAP_V2_AST_0_FLOW_MASK_GET(*(msg_word + 4));
3510 			ast_flow_info.ast_idx[1] =
3511 			HTT_RX_PEER_MAP_V2_AST_INDEX_1_GET(*(msg_word + 4));
3512 			ast_flow_info.ast_flow_mask[1] =
3513 			HTT_RX_PEER_MAP_V2_AST_1_FLOW_MASK_GET(*(msg_word + 4));
3514 			ast_flow_info.ast_idx[2] =
3515 			HTT_RX_PEER_MAP_V2_AST_INDEX_2_GET(*(msg_word + 5));
3516 			ast_flow_info.ast_flow_mask[2] =
3517 			HTT_RX_PEER_MAP_V2_AST_2_FLOW_MASK_GET(*(msg_word + 4));
3518 			ast_flow_info.ast_idx[3] =
3519 			HTT_RX_PEER_MAP_V2_AST_INDEX_3_GET(*(msg_word + 6));
3520 			ast_flow_info.ast_flow_mask[3] =
3521 			HTT_RX_PEER_MAP_V2_AST_3_FLOW_MASK_GET(*(msg_word + 4));
3522 			/*
3523 			 * TID valid mask is applicable only
3524 			 * for HI and LOW priority flows.
3525 			 * tid_valid_mas is 8 bit field corresponds
3526 			 * to TID[7:0]
3527 			 */
3528 			ast_flow_info.tid_valid_low_pri_mask =
3529 			HTT_RX_PEER_MAP_V2_TID_VALID_LOW_PRI_GET(*(msg_word + 5));
3530 			ast_flow_info.tid_valid_hi_pri_mask =
3531 			HTT_RX_PEER_MAP_V2_TID_VALID_HI_PRI_GET(*(msg_word + 5));
3532 
3533 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3534 				  QDF_TRACE_LEVEL_DEBUG,
3535 				  "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3536 				  peer_id, vdev_id);
3537 
3538 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3539 				  QDF_TRACE_LEVEL_INFO,
3540 				  "ast_idx[0] %d ast_idx[1] %d ast_idx[2] %d ast_idx[3] %d n",
3541 				  ast_flow_info.ast_idx[0],
3542 				  ast_flow_info.ast_idx[1],
3543 				  ast_flow_info.ast_idx[2],
3544 				  ast_flow_info.ast_idx[3]);
3545 
3546 			dp_rx_peer_map_handler(soc->dp_soc, peer_id,
3547 					       hw_peer_id, vdev_id,
3548 					       peer_mac_addr, ast_hash,
3549 					       is_wds);
3550 
3551 			/*
3552 			 * Update ast indexes for flow override support
3553 			 * Applicable only for non wds peers
3554 			 */
3555 			if (!soc->dp_soc->ast_offload_support)
3556 				dp_peer_ast_index_flow_queue_map_create(
3557 						soc->dp_soc, is_wds,
3558 						peer_id, peer_mac_addr,
3559 						&ast_flow_info);
3560 
3561 			break;
3562 		}
3563 	case HTT_T2H_MSG_TYPE_PEER_UNMAP_V2:
3564 		{
3565 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3566 			u_int8_t *mac_addr;
3567 			u_int16_t peer_id;
3568 			u_int8_t vdev_id;
3569 			u_int8_t is_wds;
3570 			u_int32_t free_wds_count;
3571 
3572 			peer_id =
3573 			HTT_RX_PEER_UNMAP_V2_SW_PEER_ID_GET(*msg_word);
3574 			vdev_id = HTT_RX_PEER_UNMAP_V2_VDEV_ID_GET(*msg_word);
3575 			mac_addr =
3576 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3577 						   &mac_addr_deswizzle_buf[0]);
3578 			is_wds =
3579 			HTT_RX_PEER_UNMAP_V2_NEXT_HOP_GET(*(msg_word + 2));
3580 			free_wds_count =
3581 			HTT_RX_PEER_UNMAP_V2_PEER_WDS_FREE_COUNT_GET(*(msg_word + 4));
3582 
3583 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3584 				  QDF_TRACE_LEVEL_INFO,
3585 				  "HTT_T2H_MSG_TYPE_PEER_UNMAP msg for peer id %d vdev id %d n",
3586 				  peer_id, vdev_id);
3587 
3588 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
3589 						 vdev_id, mac_addr,
3590 						 is_wds, free_wds_count);
3591 			break;
3592 		}
3593 	case HTT_T2H_MSG_TYPE_RX_DELBA:
3594 		{
3595 			uint16_t peer_id;
3596 			uint8_t tid;
3597 			uint8_t win_sz;
3598 			QDF_STATUS status;
3599 
3600 			peer_id = HTT_RX_DELBA_PEER_ID_GET(*msg_word);
3601 			tid = HTT_RX_DELBA_TID_GET(*msg_word);
3602 			win_sz = HTT_RX_DELBA_WIN_SIZE_GET(*msg_word);
3603 
3604 			status = dp_rx_delba_ind_handler(
3605 				soc->dp_soc,
3606 				peer_id, tid, win_sz);
3607 
3608 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3609 				  QDF_TRACE_LEVEL_INFO,
3610 				  FL("DELBA PeerID %d BAW %d TID %d stat %d"),
3611 				  peer_id, win_sz, tid, status);
3612 			break;
3613 		}
3614 	case HTT_T2H_MSG_TYPE_RX_DELBA_EXTN:
3615 		{
3616 			uint16_t peer_id;
3617 			uint8_t tid;
3618 			uint16_t win_sz;
3619 			QDF_STATUS status;
3620 
3621 			peer_id = HTT_RX_DELBA_EXTN_PEER_ID_GET(*msg_word);
3622 			tid = HTT_RX_DELBA_EXTN_TID_GET(*msg_word);
3623 
3624 			msg_word++;
3625 			win_sz = HTT_RX_DELBA_EXTN_WIN_SIZE_GET(*msg_word);
3626 
3627 			status = dp_rx_delba_ind_handler(soc->dp_soc,
3628 							 peer_id, tid,
3629 							 win_sz);
3630 
3631 			dp_info("DELBA PeerID %d BAW %d TID %d stat %d",
3632 				peer_id, win_sz, tid, status);
3633 			break;
3634 		}
3635 	case HTT_T2H_MSG_TYPE_FSE_CMEM_BASE_SEND:
3636 		{
3637 			uint16_t num_entries;
3638 			uint32_t cmem_ba_lo;
3639 			uint32_t cmem_ba_hi;
3640 
3641 			num_entries = HTT_CMEM_BASE_SEND_NUM_ENTRIES_GET(*msg_word);
3642 			cmem_ba_lo = *(msg_word + 1);
3643 			cmem_ba_hi = *(msg_word + 2);
3644 
3645 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
3646 				  FL("CMEM FSE num_entries %u CMEM BA LO %x HI %x"),
3647 				  num_entries, cmem_ba_lo, cmem_ba_hi);
3648 
3649 			dp_rx_fst_update_cmem_params(soc->dp_soc, num_entries,
3650 						     cmem_ba_lo, cmem_ba_hi);
3651 			break;
3652 		}
3653 	case HTT_T2H_MSG_TYPE_TX_OFFLOAD_DELIVER_IND:
3654 		{
3655 			dp_offload_ind_handler(soc, msg_word);
3656 			break;
3657 		}
3658 	case HTT_T2H_MSG_TYPE_PEER_MAP_V3:
3659 	{
3660 		u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3661 		u_int8_t *peer_mac_addr;
3662 		u_int16_t peer_id;
3663 		u_int16_t hw_peer_id;
3664 		u_int8_t vdev_id;
3665 		uint8_t is_wds;
3666 		u_int16_t ast_hash = 0;
3667 
3668 		peer_id = HTT_RX_PEER_MAP_V3_SW_PEER_ID_GET(*msg_word);
3669 		vdev_id = HTT_RX_PEER_MAP_V3_VDEV_ID_GET(*msg_word);
3670 		peer_mac_addr =
3671 		htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3672 					   &mac_addr_deswizzle_buf[0]);
3673 		hw_peer_id = HTT_RX_PEER_MAP_V3_HW_PEER_ID_GET(*(msg_word + 3));
3674 		ast_hash = HTT_RX_PEER_MAP_V3_CACHE_SET_NUM_GET(*(msg_word + 3));
3675 		is_wds = HTT_RX_PEER_MAP_V3_NEXT_HOP_GET(*(msg_word + 4));
3676 
3677 		dp_htt_info("HTT_T2H_MSG_TYPE_PEER_MAP_V3 msg for peer id %d vdev id %d n",
3678 			    peer_id, vdev_id);
3679 
3680 		dp_rx_peer_map_handler(soc->dp_soc, peer_id,
3681 				       hw_peer_id, vdev_id,
3682 				       peer_mac_addr, ast_hash,
3683 				       is_wds);
3684 
3685 		break;
3686 	}
3687 	case HTT_T2H_MSG_TYPE_MLO_RX_PEER_MAP:
3688 	{
3689 		dp_htt_mlo_peer_map_handler(soc, msg_word);
3690 		break;
3691 	}
3692 	case HTT_T2H_MSG_TYPE_MLO_RX_PEER_UNMAP:
3693 	{
3694 		dp_htt_mlo_peer_unmap_handler(soc, msg_word);
3695 		break;
3696 	}
3697 	case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND:
3698 	{
3699 		dp_rx_mlo_timestamp_ind_handler(soc->dp_soc, msg_word);
3700 		break;
3701 	}
3702 	case HTT_T2H_MSG_TYPE_VDEVS_TXRX_STATS_PERIODIC_IND:
3703 	{
3704 		dp_vdev_txrx_hw_stats_handler(soc, msg_word);
3705 		break;
3706 	}
3707 	case HTT_T2H_SAWF_DEF_QUEUES_MAP_REPORT_CONF:
3708 	{
3709 		dp_sawf_def_queues_update_map_report_conf(soc, msg_word,
3710 							  htt_t2h_msg);
3711 		break;
3712 	}
3713 	case HTT_T2H_SAWF_MSDUQ_INFO_IND:
3714 	{
3715 		dp_sawf_msduq_map(soc, msg_word, htt_t2h_msg);
3716 		break;
3717 	}
3718 	case HTT_T2H_MSG_TYPE_STREAMING_STATS_IND:
3719 	{
3720 		dp_sawf_mpdu_stats_handler(soc, htt_t2h_msg);
3721 		break;
3722 	}
3723 
3724 	default:
3725 		break;
3726 	};
3727 
3728 	/* Free the indication buffer */
3729 	if (free_buf)
3730 		qdf_nbuf_free(htt_t2h_msg);
3731 }
3732 
3733 /*
3734  * dp_htt_h2t_full() - Send full handler (called from HTC)
3735  * @context:	Opaque context (HTT SOC handle)
3736  * @pkt:	HTC packet
3737  *
3738  * Return: enum htc_send_full_action
3739  */
3740 static enum htc_send_full_action
3741 dp_htt_h2t_full(void *context, HTC_PACKET *pkt)
3742 {
3743 	return HTC_SEND_FULL_KEEP;
3744 }
3745 
3746 /*
3747  * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages
3748  * @context:	Opaque context (HTT SOC handle)
3749  * @nbuf:	nbuf containing T2H message
3750  * @pipe_id:	HIF pipe ID
3751  *
3752  * Return: QDF_STATUS
3753  *
3754  * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which
3755  * will be used for packet log and other high-priority HTT messages. Proper
3756  * HTC connection to be added later once required FW changes are available
3757  */
3758 static QDF_STATUS
3759 dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id)
3760 {
3761 	QDF_STATUS rc = QDF_STATUS_SUCCESS;
3762 	HTC_PACKET htc_pkt;
3763 
3764 	qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE);
3765 	qdf_mem_zero(&htc_pkt, sizeof(htc_pkt));
3766 	htc_pkt.Status = QDF_STATUS_SUCCESS;
3767 	htc_pkt.pPktContext = (void *)nbuf;
3768 	dp_htt_t2h_msg_handler(context, &htc_pkt);
3769 
3770 	return rc;
3771 }
3772 
3773 /*
3774  * htt_htc_soc_attach() - Register SOC level HTT instance with HTC
3775  * @htt_soc:	HTT SOC handle
3776  *
3777  * Return: QDF_STATUS
3778  */
3779 static QDF_STATUS
3780 htt_htc_soc_attach(struct htt_soc *soc)
3781 {
3782 	struct htc_service_connect_req connect;
3783 	struct htc_service_connect_resp response;
3784 	QDF_STATUS status;
3785 	struct dp_soc *dpsoc = soc->dp_soc;
3786 
3787 	qdf_mem_zero(&connect, sizeof(connect));
3788 	qdf_mem_zero(&response, sizeof(response));
3789 
3790 	connect.pMetaData = NULL;
3791 	connect.MetaDataLength = 0;
3792 	connect.EpCallbacks.pContext = soc;
3793 	connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
3794 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
3795 	connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler;
3796 
3797 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
3798 	connect.EpCallbacks.EpRecvRefill = NULL;
3799 
3800 	/* N/A, fill is done by HIF */
3801 	connect.EpCallbacks.RecvRefillWaterMark = 1;
3802 
3803 	connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
3804 	/*
3805 	 * Specify how deep to let a queue get before htc_send_pkt will
3806 	 * call the EpSendFull function due to excessive send queue depth.
3807 	 */
3808 	connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
3809 
3810 	/* disable flow control for HTT data message service */
3811 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
3812 
3813 	/* connect to control service */
3814 	connect.service_id = HTT_DATA_MSG_SVC;
3815 
3816 	status = htc_connect_service(soc->htc_soc, &connect, &response);
3817 
3818 	if (status != QDF_STATUS_SUCCESS)
3819 		return status;
3820 
3821 	soc->htc_endpoint = response.Endpoint;
3822 
3823 	hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint);
3824 
3825 	htt_interface_logging_init(&soc->htt_logger_handle, soc->ctrl_psoc);
3826 	dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc,
3827 		dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE);
3828 
3829 	return QDF_STATUS_SUCCESS; /* success */
3830 }
3831 
3832 /*
3833  * htt_soc_initialize() - SOC level HTT initialization
3834  * @htt_soc: Opaque htt SOC handle
3835  * @ctrl_psoc: Opaque ctrl SOC handle
3836  * @htc_soc: SOC level HTC handle
3837  * @hal_soc: Opaque HAL SOC handle
3838  * @osdev: QDF device
3839  *
3840  * Return: HTT handle on success; NULL on failure
3841  */
3842 void *
3843 htt_soc_initialize(struct htt_soc *htt_soc,
3844 		   struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
3845 		   HTC_HANDLE htc_soc,
3846 		   hal_soc_handle_t hal_soc_hdl, qdf_device_t osdev)
3847 {
3848 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
3849 
3850 	soc->osdev = osdev;
3851 	soc->ctrl_psoc = ctrl_psoc;
3852 	soc->htc_soc = htc_soc;
3853 	soc->hal_soc = hal_soc_hdl;
3854 
3855 	if (htt_htc_soc_attach(soc))
3856 		goto fail2;
3857 
3858 	return soc;
3859 
3860 fail2:
3861 	return NULL;
3862 }
3863 
3864 void htt_soc_htc_dealloc(struct htt_soc *htt_handle)
3865 {
3866 	htt_interface_logging_deinit(htt_handle->htt_logger_handle);
3867 	htt_htc_misc_pkt_pool_free(htt_handle);
3868 	htt_htc_pkt_pool_free(htt_handle);
3869 }
3870 
3871 /*
3872  * htt_soc_htc_prealloc() - HTC memory prealloc
3873  * @htt_soc: SOC level HTT handle
3874  *
3875  * Return: QDF_STATUS_SUCCESS on Success or
3876  * QDF_STATUS_E_NOMEM on allocation failure
3877  */
3878 QDF_STATUS htt_soc_htc_prealloc(struct htt_soc *soc)
3879 {
3880 	int i;
3881 
3882 	soc->htt_htc_pkt_freelist = NULL;
3883 	/* pre-allocate some HTC_PACKET objects */
3884 	for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
3885 		struct dp_htt_htc_pkt_union *pkt;
3886 		pkt = qdf_mem_malloc(sizeof(*pkt));
3887 		if (!pkt)
3888 			return QDF_STATUS_E_NOMEM;
3889 
3890 		htt_htc_pkt_free(soc, &pkt->u.pkt);
3891 	}
3892 	return QDF_STATUS_SUCCESS;
3893 }
3894 
3895 /*
3896  * htt_soc_detach() - Free SOC level HTT handle
3897  * @htt_hdl: HTT SOC handle
3898  */
3899 void htt_soc_detach(struct htt_soc *htt_hdl)
3900 {
3901 	int i;
3902 	struct htt_soc *htt_handle = (struct htt_soc *)htt_hdl;
3903 
3904 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3905 		qdf_mem_free(htt_handle->pdevid_tt[i].umac_path);
3906 		qdf_mem_free(htt_handle->pdevid_tt[i].lmac_path);
3907 	}
3908 
3909 	HTT_TX_MUTEX_DESTROY(&htt_handle->htt_tx_mutex);
3910 	qdf_mem_free(htt_handle);
3911 
3912 }
3913 
3914 /**
3915  * dp_h2t_ext_stats_msg_send(): function to contruct HTT message to pass to FW
3916  * @pdev: DP PDEV handle
3917  * @stats_type_upload_mask: stats type requested by user
3918  * @config_param_0: extra configuration parameters
3919  * @config_param_1: extra configuration parameters
3920  * @config_param_2: extra configuration parameters
3921  * @config_param_3: extra configuration parameters
3922  * @mac_id: mac number
3923  *
3924  * return: QDF STATUS
3925  */
3926 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
3927 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
3928 		uint32_t config_param_1, uint32_t config_param_2,
3929 		uint32_t config_param_3, int cookie_val, int cookie_msb,
3930 		uint8_t mac_id)
3931 {
3932 	struct htt_soc *soc = pdev->soc->htt_handle;
3933 	struct dp_htt_htc_pkt *pkt;
3934 	qdf_nbuf_t msg;
3935 	uint32_t *msg_word;
3936 	uint8_t pdev_mask = 0;
3937 	uint8_t *htt_logger_bufp;
3938 	int mac_for_pdev;
3939 	int target_pdev_id;
3940 	QDF_STATUS status;
3941 
3942 	msg = qdf_nbuf_alloc(
3943 			soc->osdev,
3944 			HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ),
3945 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
3946 
3947 	if (!msg)
3948 		return QDF_STATUS_E_NOMEM;
3949 
3950 	/*TODO:Add support for SOC stats
3951 	 * Bit 0: SOC Stats
3952 	 * Bit 1: Pdev stats for pdev id 0
3953 	 * Bit 2: Pdev stats for pdev id 1
3954 	 * Bit 3: Pdev stats for pdev id 2
3955 	 */
3956 	mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
3957 	target_pdev_id =
3958 	dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev);
3959 
3960 	pdev_mask = 1 << target_pdev_id;
3961 
3962 	/*
3963 	 * Set the length of the message.
3964 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3965 	 * separately during the below call to qdf_nbuf_push_head.
3966 	 * The contribution from the HTC header is added separately inside HTC.
3967 	 */
3968 	if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) {
3969 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3970 				"Failed to expand head for HTT_EXT_STATS");
3971 		qdf_nbuf_free(msg);
3972 		return QDF_STATUS_E_FAILURE;
3973 	}
3974 
3975 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
3976 
3977 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3978 	htt_logger_bufp = (uint8_t *)msg_word;
3979 	*msg_word = 0;
3980 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ);
3981 	HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask);
3982 	HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask);
3983 
3984 	/* word 1 */
3985 	msg_word++;
3986 	*msg_word = 0;
3987 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0);
3988 
3989 	/* word 2 */
3990 	msg_word++;
3991 	*msg_word = 0;
3992 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1);
3993 
3994 	/* word 3 */
3995 	msg_word++;
3996 	*msg_word = 0;
3997 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2);
3998 
3999 	/* word 4 */
4000 	msg_word++;
4001 	*msg_word = 0;
4002 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3);
4003 
4004 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0);
4005 
4006 	/* word 5 */
4007 	msg_word++;
4008 
4009 	/* word 6 */
4010 	msg_word++;
4011 	*msg_word = 0;
4012 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val);
4013 
4014 	/* word 7 */
4015 	msg_word++;
4016 	*msg_word = 0;
4017 	/* Currently Using last 2 bits for pdev_id
4018 	 * For future reference, reserving 3 bits in cookie_msb for pdev_id
4019 	 */
4020 	cookie_msb = (cookie_msb | pdev->pdev_id);
4021 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb);
4022 
4023 	pkt = htt_htc_pkt_alloc(soc);
4024 	if (!pkt) {
4025 		qdf_nbuf_free(msg);
4026 		return QDF_STATUS_E_NOMEM;
4027 	}
4028 
4029 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4030 
4031 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4032 			dp_htt_h2t_send_complete_free_netbuf,
4033 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
4034 			soc->htc_endpoint,
4035 			/* tag for FW response msg not guaranteed */
4036 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
4037 
4038 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4039 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_EXT_STATS_REQ,
4040 				     htt_logger_bufp);
4041 
4042 	if (status != QDF_STATUS_SUCCESS) {
4043 		qdf_nbuf_free(msg);
4044 		htt_htc_pkt_free(soc, pkt);
4045 	}
4046 
4047 	return status;
4048 }
4049 
4050 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
4051 #define HTT_VDEV_TXRX_STATS_RESET_BITMASK_L32_MASK 0xFFFFFFFF
4052 #define HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_MASK 0xFFFFFFFF00000000
4053 #define HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_SHIFT 32
4054 
4055 QDF_STATUS dp_h2t_hw_vdev_stats_config_send(struct dp_soc *dpsoc,
4056 					    uint8_t pdev_id, bool enable,
4057 					    bool reset, uint64_t reset_bitmask)
4058 {
4059 	struct htt_soc *soc = dpsoc->htt_handle;
4060 	struct dp_htt_htc_pkt *pkt;
4061 	qdf_nbuf_t msg;
4062 	uint32_t *msg_word;
4063 	uint8_t *htt_logger_bufp;
4064 	QDF_STATUS status;
4065 	int duration;
4066 	uint32_t bitmask;
4067 	int target_pdev_id;
4068 
4069 	msg = qdf_nbuf_alloc(
4070 			soc->osdev,
4071 			HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_vdevs_txrx_stats_cfg)),
4072 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
4073 
4074 	if (!msg) {
4075 		dp_htt_err("%pK: Fail to allocate "
4076 		"HTT_H2T_HW_VDEV_TXRX_STATS_CFG_MSG_SZ msg buffer", dpsoc);
4077 		return QDF_STATUS_E_NOMEM;
4078 	}
4079 
4080 	if (pdev_id != INVALID_PDEV_ID)
4081 		target_pdev_id = DP_SW2HW_MACID(pdev_id);
4082 	else
4083 		target_pdev_id = 0;
4084 
4085 	duration =
4086 	wlan_cfg_get_vdev_stats_hw_offload_timer(dpsoc->wlan_cfg_ctx);
4087 
4088 	/*
4089 	 * Set the length of the message.
4090 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4091 	 * separately during the below call to qdf_nbuf_push_head.
4092 	 * The contribution from the HTC header is added separately inside HTC.
4093 	 */
4094 	if (!qdf_nbuf_put_tail(msg,
4095 			       sizeof(struct htt_h2t_vdevs_txrx_stats_cfg))) {
4096 		dp_htt_err("%pK: Failed to expand head for HTT_HW_VDEV_STATS"
4097 			   , dpsoc);
4098 		qdf_nbuf_free(msg);
4099 		return QDF_STATUS_E_FAILURE;
4100 	}
4101 
4102 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
4103 
4104 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4105 	htt_logger_bufp = (uint8_t *)msg_word;
4106 	*msg_word = 0;
4107 
4108 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VDEVS_TXRX_STATS_CFG);
4109 	HTT_RX_VDEVS_TXRX_STATS_PDEV_ID_SET(*msg_word, target_pdev_id);
4110 
4111 	HTT_RX_VDEVS_TXRX_STATS_ENABLE_SET(*msg_word, enable);
4112 
4113 	HTT_RX_VDEVS_TXRX_STATS_PERIODIC_INTERVAL_SET(*msg_word,
4114 						      (duration >> 3));
4115 
4116 	HTT_RX_VDEVS_TXRX_STATS_RESET_STATS_BITS_SET(*msg_word, reset);
4117 
4118 	msg_word++;
4119 	*msg_word = 0;
4120 	bitmask = (reset_bitmask & HTT_VDEV_TXRX_STATS_RESET_BITMASK_L32_MASK);
4121 	*msg_word = bitmask;
4122 
4123 	msg_word++;
4124 	*msg_word = 0;
4125 	bitmask =
4126 		((reset_bitmask & HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_MASK) >>
4127 		 HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_SHIFT);
4128 	*msg_word = bitmask;
4129 
4130 	pkt = htt_htc_pkt_alloc(soc);
4131 	if (!pkt) {
4132 		dp_htt_err("%pK: Fail to allocate dp_htt_htc_pkt buffer",
4133 			   dpsoc);
4134 		qdf_assert(0);
4135 		qdf_nbuf_free(msg);
4136 		return QDF_STATUS_E_NOMEM;
4137 	}
4138 
4139 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4140 
4141 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4142 			       dp_htt_h2t_send_complete_free_netbuf,
4143 			       qdf_nbuf_data(msg), qdf_nbuf_len(msg),
4144 			       soc->htc_endpoint,
4145 			       /* tag for no FW response msg */
4146 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
4147 
4148 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4149 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
4150 				     HTT_H2T_MSG_TYPE_VDEVS_TXRX_STATS_CFG,
4151 				     htt_logger_bufp);
4152 
4153 	if (status != QDF_STATUS_SUCCESS) {
4154 		qdf_nbuf_free(msg);
4155 		htt_htc_pkt_free(soc, pkt);
4156 	}
4157 
4158 	return status;
4159 }
4160 #else
4161 QDF_STATUS dp_h2t_hw_vdev_stats_config_send(struct dp_soc *dpsoc,
4162 					    uint8_t pdev_id, bool enable,
4163 					    bool reset, uint64_t reset_bitmask)
4164 {
4165 	return QDF_STATUS_SUCCESS;
4166 }
4167 #endif
4168 
4169 /**
4170  * dp_h2t_3tuple_config_send(): function to contruct 3 tuple configuration
4171  * HTT message to pass to FW
4172  * @pdev: DP PDEV handle
4173  * @tuple_mask: tuple configuration to report 3 tuple hash value in either
4174  * toeplitz_2_or_4 or flow_id_toeplitz in MSDU START TLV.
4175  *
4176  * tuple_mask[1:0]:
4177  *   00 - Do not report 3 tuple hash value
4178  *   10 - Report 3 tuple hash value in toeplitz_2_or_4
4179  *   01 - Report 3 tuple hash value in flow_id_toeplitz
4180  *   11 - Report 3 tuple hash value in both toeplitz_2_or_4 & flow_id_toeplitz
4181  *
4182  * return: QDF STATUS
4183  */
4184 QDF_STATUS dp_h2t_3tuple_config_send(struct dp_pdev *pdev,
4185 				     uint32_t tuple_mask, uint8_t mac_id)
4186 {
4187 	struct htt_soc *soc = pdev->soc->htt_handle;
4188 	struct dp_htt_htc_pkt *pkt;
4189 	qdf_nbuf_t msg;
4190 	uint32_t *msg_word;
4191 	uint8_t *htt_logger_bufp;
4192 	int mac_for_pdev;
4193 	int target_pdev_id;
4194 
4195 	msg = qdf_nbuf_alloc(
4196 			soc->osdev,
4197 			HTT_MSG_BUF_SIZE(HTT_3_TUPLE_HASH_CFG_REQ_BYTES),
4198 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4199 
4200 	if (!msg)
4201 		return QDF_STATUS_E_NOMEM;
4202 
4203 	mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
4204 	target_pdev_id =
4205 	dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev);
4206 
4207 	/*
4208 	 * Set the length of the message.
4209 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4210 	 * separately during the below call to qdf_nbuf_push_head.
4211 	 * The contribution from the HTC header is added separately inside HTC.
4212 	 */
4213 	if (!qdf_nbuf_put_tail(msg, HTT_3_TUPLE_HASH_CFG_REQ_BYTES)) {
4214 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4215 			  "Failed to expand head for HTT_3TUPLE_CONFIG");
4216 		qdf_nbuf_free(msg);
4217 		return QDF_STATUS_E_FAILURE;
4218 	}
4219 
4220 	dp_htt_info("%pK: config_param_sent 0x%x for target_pdev %d\n -------------",
4221 		    pdev->soc, tuple_mask, target_pdev_id);
4222 
4223 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
4224 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4225 	htt_logger_bufp = (uint8_t *)msg_word;
4226 
4227 	*msg_word = 0;
4228 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG);
4229 	HTT_RX_3_TUPLE_HASH_PDEV_ID_SET(*msg_word, target_pdev_id);
4230 
4231 	msg_word++;
4232 	*msg_word = 0;
4233 	HTT_H2T_FLOW_ID_TOEPLITZ_FIELD_CONFIG_SET(*msg_word, tuple_mask);
4234 	HTT_H2T_TOEPLITZ_2_OR_4_FIELD_CONFIG_SET(*msg_word, tuple_mask);
4235 
4236 	pkt = htt_htc_pkt_alloc(soc);
4237 	if (!pkt) {
4238 		qdf_nbuf_free(msg);
4239 		return QDF_STATUS_E_NOMEM;
4240 	}
4241 
4242 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4243 
4244 	SET_HTC_PACKET_INFO_TX(
4245 			&pkt->htc_pkt,
4246 			dp_htt_h2t_send_complete_free_netbuf,
4247 			qdf_nbuf_data(msg),
4248 			qdf_nbuf_len(msg),
4249 			soc->htc_endpoint,
4250 			/* tag for no FW response msg */
4251 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
4252 
4253 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4254 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG,
4255 			    htt_logger_bufp);
4256 
4257 	return QDF_STATUS_SUCCESS;
4258 }
4259 
4260 /* This macro will revert once proper HTT header will define for
4261  * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file
4262  * */
4263 #if defined(WDI_EVENT_ENABLE)
4264 /**
4265  * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW
4266  * @pdev: DP PDEV handle
4267  * @stats_type_upload_mask: stats type requested by user
4268  * @mac_id: Mac id number
4269  *
4270  * return: QDF STATUS
4271  */
4272 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
4273 		uint32_t stats_type_upload_mask, uint8_t mac_id)
4274 {
4275 	struct htt_soc *soc = pdev->soc->htt_handle;
4276 	struct dp_htt_htc_pkt *pkt;
4277 	qdf_nbuf_t msg;
4278 	uint32_t *msg_word;
4279 	uint8_t pdev_mask;
4280 	QDF_STATUS status;
4281 
4282 	msg = qdf_nbuf_alloc(
4283 			soc->osdev,
4284 			HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ),
4285 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
4286 
4287 	if (!msg) {
4288 		dp_htt_err("%pK: Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer"
4289 			   , pdev->soc);
4290 		qdf_assert(0);
4291 		return QDF_STATUS_E_NOMEM;
4292 	}
4293 
4294 	/*TODO:Add support for SOC stats
4295 	 * Bit 0: SOC Stats
4296 	 * Bit 1: Pdev stats for pdev id 0
4297 	 * Bit 2: Pdev stats for pdev id 1
4298 	 * Bit 3: Pdev stats for pdev id 2
4299 	 */
4300 	pdev_mask = 1 << dp_get_target_pdev_id_for_host_pdev_id(pdev->soc,
4301 								mac_id);
4302 
4303 	/*
4304 	 * Set the length of the message.
4305 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4306 	 * separately during the below call to qdf_nbuf_push_head.
4307 	 * The contribution from the HTC header is added separately inside HTC.
4308 	 */
4309 	if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) {
4310 		dp_htt_err("%pK: Failed to expand head for HTT_CFG_STATS"
4311 			   , pdev->soc);
4312 		qdf_nbuf_free(msg);
4313 		return QDF_STATUS_E_FAILURE;
4314 	}
4315 
4316 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
4317 
4318 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4319 	*msg_word = 0;
4320 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
4321 	HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask);
4322 	HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word,
4323 			stats_type_upload_mask);
4324 
4325 	pkt = htt_htc_pkt_alloc(soc);
4326 	if (!pkt) {
4327 		dp_htt_err("%pK: Fail to allocate dp_htt_htc_pkt buffer", pdev->soc);
4328 		qdf_assert(0);
4329 		qdf_nbuf_free(msg);
4330 		return QDF_STATUS_E_NOMEM;
4331 	}
4332 
4333 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4334 
4335 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4336 			dp_htt_h2t_send_complete_free_netbuf,
4337 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
4338 			soc->htc_endpoint,
4339 			/* tag for no FW response msg */
4340 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
4341 
4342 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4343 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG,
4344 				     (uint8_t *)msg_word);
4345 
4346 	if (status != QDF_STATUS_SUCCESS) {
4347 		qdf_nbuf_free(msg);
4348 		htt_htc_pkt_free(soc, pkt);
4349 	}
4350 
4351 	return status;
4352 }
4353 
4354 qdf_export_symbol(dp_h2t_cfg_stats_msg_send);
4355 #endif
4356 
4357 void
4358 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
4359 			     uint32_t *tag_buf)
4360 {
4361 	struct dp_peer *peer = NULL;
4362 	switch (tag_type) {
4363 	case HTT_STATS_PEER_DETAILS_TAG:
4364 	{
4365 		htt_peer_details_tlv *dp_stats_buf =
4366 			(htt_peer_details_tlv *)tag_buf;
4367 
4368 		pdev->fw_stats_peer_id = dp_stats_buf->sw_peer_id;
4369 	}
4370 	break;
4371 	case HTT_STATS_PEER_STATS_CMN_TAG:
4372 	{
4373 		htt_peer_stats_cmn_tlv *dp_stats_buf =
4374 			(htt_peer_stats_cmn_tlv *)tag_buf;
4375 
4376 		peer = dp_peer_get_ref_by_id(pdev->soc, pdev->fw_stats_peer_id,
4377 					     DP_MOD_ID_HTT);
4378 
4379 		if (peer && !peer->bss_peer) {
4380 			peer->stats.tx.inactive_time =
4381 				dp_stats_buf->inactive_time;
4382 			qdf_event_set(&pdev->fw_peer_stats_event);
4383 		}
4384 		if (peer)
4385 			dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
4386 	}
4387 	break;
4388 	default:
4389 		qdf_err("Invalid tag_type");
4390 	}
4391 }
4392 
4393 /**
4394  * dp_htt_rx_flow_fst_setup(): Send HTT Rx FST setup message to FW
4395  * @pdev: DP pdev handle
4396  * @fse_setup_info: FST setup parameters
4397  *
4398  * Return: Success when HTT message is sent, error on failure
4399  */
4400 QDF_STATUS
4401 dp_htt_rx_flow_fst_setup(struct dp_pdev *pdev,
4402 			 struct dp_htt_rx_flow_fst_setup *fse_setup_info)
4403 {
4404 	struct htt_soc *soc = pdev->soc->htt_handle;
4405 	struct dp_htt_htc_pkt *pkt;
4406 	qdf_nbuf_t msg;
4407 	u_int32_t *msg_word;
4408 	struct htt_h2t_msg_rx_fse_setup_t *fse_setup;
4409 	uint8_t *htt_logger_bufp;
4410 	u_int32_t *key;
4411 	QDF_STATUS status;
4412 
4413 	msg = qdf_nbuf_alloc(
4414 		soc->osdev,
4415 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_setup_t)),
4416 		/* reserve room for the HTC header */
4417 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4418 
4419 	if (!msg)
4420 		return QDF_STATUS_E_NOMEM;
4421 
4422 	/*
4423 	 * Set the length of the message.
4424 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4425 	 * separately during the below call to qdf_nbuf_push_head.
4426 	 * The contribution from the HTC header is added separately inside HTC.
4427 	 */
4428 	if (!qdf_nbuf_put_tail(msg,
4429 			       sizeof(struct htt_h2t_msg_rx_fse_setup_t))) {
4430 		qdf_err("Failed to expand head for HTT RX_FSE_SETUP msg");
4431 		return QDF_STATUS_E_FAILURE;
4432 	}
4433 
4434 	/* fill in the message contents */
4435 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4436 
4437 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_setup_t));
4438 	/* rewind beyond alignment pad to get to the HTC header reserved area */
4439 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4440 	htt_logger_bufp = (uint8_t *)msg_word;
4441 
4442 	*msg_word = 0;
4443 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG);
4444 
4445 	fse_setup = (struct htt_h2t_msg_rx_fse_setup_t *)msg_word;
4446 
4447 	HTT_RX_FSE_SETUP_PDEV_ID_SET(*msg_word, fse_setup_info->pdev_id);
4448 
4449 	msg_word++;
4450 	HTT_RX_FSE_SETUP_NUM_REC_SET(*msg_word, fse_setup_info->max_entries);
4451 	HTT_RX_FSE_SETUP_MAX_SEARCH_SET(*msg_word, fse_setup_info->max_search);
4452 	HTT_RX_FSE_SETUP_IP_DA_SA_PREFIX_SET(*msg_word,
4453 					     fse_setup_info->ip_da_sa_prefix);
4454 
4455 	msg_word++;
4456 	HTT_RX_FSE_SETUP_BASE_ADDR_LO_SET(*msg_word,
4457 					  fse_setup_info->base_addr_lo);
4458 	msg_word++;
4459 	HTT_RX_FSE_SETUP_BASE_ADDR_HI_SET(*msg_word,
4460 					  fse_setup_info->base_addr_hi);
4461 
4462 	key = (u_int32_t *)fse_setup_info->hash_key;
4463 	fse_setup->toeplitz31_0 = *key++;
4464 	fse_setup->toeplitz63_32 = *key++;
4465 	fse_setup->toeplitz95_64 = *key++;
4466 	fse_setup->toeplitz127_96 = *key++;
4467 	fse_setup->toeplitz159_128 = *key++;
4468 	fse_setup->toeplitz191_160 = *key++;
4469 	fse_setup->toeplitz223_192 = *key++;
4470 	fse_setup->toeplitz255_224 = *key++;
4471 	fse_setup->toeplitz287_256 = *key++;
4472 	fse_setup->toeplitz314_288 = *key;
4473 
4474 	msg_word++;
4475 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz31_0);
4476 	msg_word++;
4477 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz63_32);
4478 	msg_word++;
4479 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz95_64);
4480 	msg_word++;
4481 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz127_96);
4482 	msg_word++;
4483 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz159_128);
4484 	msg_word++;
4485 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz191_160);
4486 	msg_word++;
4487 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz223_192);
4488 	msg_word++;
4489 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz255_224);
4490 	msg_word++;
4491 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz287_256);
4492 	msg_word++;
4493 	HTT_RX_FSE_SETUP_HASH_314_288_SET(*msg_word,
4494 					  fse_setup->toeplitz314_288);
4495 
4496 	pkt = htt_htc_pkt_alloc(soc);
4497 	if (!pkt) {
4498 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4499 		qdf_assert(0);
4500 		qdf_nbuf_free(msg);
4501 		return QDF_STATUS_E_RESOURCES; /* failure */
4502 	}
4503 
4504 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4505 
4506 	SET_HTC_PACKET_INFO_TX(
4507 		&pkt->htc_pkt,
4508 		dp_htt_h2t_send_complete_free_netbuf,
4509 		qdf_nbuf_data(msg),
4510 		qdf_nbuf_len(msg),
4511 		soc->htc_endpoint,
4512 		/* tag for no FW response msg */
4513 		HTC_TX_PACKET_TAG_RUNTIME_PUT);
4514 
4515 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4516 
4517 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
4518 				     HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG,
4519 				     htt_logger_bufp);
4520 
4521 	if (status == QDF_STATUS_SUCCESS) {
4522 		dp_info("HTT_H2T RX_FSE_SETUP sent to FW for pdev = %u",
4523 			fse_setup_info->pdev_id);
4524 		QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_DEBUG,
4525 				   (void *)fse_setup_info->hash_key,
4526 				   fse_setup_info->hash_key_len);
4527 	} else {
4528 		qdf_nbuf_free(msg);
4529 		htt_htc_pkt_free(soc, pkt);
4530 	}
4531 
4532 	return status;
4533 }
4534 
4535 /**
4536  * dp_htt_rx_flow_fse_operation(): Send HTT Flow Search Entry msg to
4537  * add/del a flow in HW
4538  * @pdev: DP pdev handle
4539  * @fse_op_info: Flow entry parameters
4540  *
4541  * Return: Success when HTT message is sent, error on failure
4542  */
4543 QDF_STATUS
4544 dp_htt_rx_flow_fse_operation(struct dp_pdev *pdev,
4545 			     struct dp_htt_rx_flow_fst_operation *fse_op_info)
4546 {
4547 	struct htt_soc *soc = pdev->soc->htt_handle;
4548 	struct dp_htt_htc_pkt *pkt;
4549 	qdf_nbuf_t msg;
4550 	u_int32_t *msg_word;
4551 	struct htt_h2t_msg_rx_fse_operation_t *fse_operation;
4552 	uint8_t *htt_logger_bufp;
4553 	QDF_STATUS status;
4554 
4555 	msg = qdf_nbuf_alloc(
4556 		soc->osdev,
4557 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_operation_t)),
4558 		/* reserve room for the HTC header */
4559 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4560 	if (!msg)
4561 		return QDF_STATUS_E_NOMEM;
4562 
4563 	/*
4564 	 * Set the length of the message.
4565 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4566 	 * separately during the below call to qdf_nbuf_push_head.
4567 	 * The contribution from the HTC header is added separately inside HTC.
4568 	 */
4569 	if (!qdf_nbuf_put_tail(msg,
4570 			       sizeof(struct htt_h2t_msg_rx_fse_operation_t))) {
4571 		qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
4572 		qdf_nbuf_free(msg);
4573 		return QDF_STATUS_E_FAILURE;
4574 	}
4575 
4576 	/* fill in the message contents */
4577 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4578 
4579 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_operation_t));
4580 	/* rewind beyond alignment pad to get to the HTC header reserved area */
4581 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4582 	htt_logger_bufp = (uint8_t *)msg_word;
4583 
4584 	*msg_word = 0;
4585 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG);
4586 
4587 	fse_operation = (struct htt_h2t_msg_rx_fse_operation_t *)msg_word;
4588 
4589 	HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, fse_op_info->pdev_id);
4590 	msg_word++;
4591 	HTT_RX_FSE_IPSEC_VALID_SET(*msg_word, false);
4592 	if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_ENTRY) {
4593 		HTT_RX_FSE_OPERATION_SET(*msg_word,
4594 					 HTT_RX_FSE_CACHE_INVALIDATE_ENTRY);
4595 		msg_word++;
4596 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4597 		*msg_word,
4598 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_31_0));
4599 		msg_word++;
4600 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4601 		*msg_word,
4602 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_63_32));
4603 		msg_word++;
4604 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4605 		*msg_word,
4606 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_95_64));
4607 		msg_word++;
4608 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4609 		*msg_word,
4610 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_127_96));
4611 		msg_word++;
4612 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4613 		*msg_word,
4614 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_31_0));
4615 		msg_word++;
4616 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4617 		*msg_word,
4618 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_63_32));
4619 		msg_word++;
4620 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4621 		*msg_word,
4622 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_95_64));
4623 		msg_word++;
4624 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4625 		*msg_word,
4626 		qdf_htonl(
4627 		fse_op_info->rx_flow->flow_tuple_info.dest_ip_127_96));
4628 		msg_word++;
4629 		HTT_RX_FSE_SOURCEPORT_SET(
4630 			*msg_word,
4631 			fse_op_info->rx_flow->flow_tuple_info.src_port);
4632 		HTT_RX_FSE_DESTPORT_SET(
4633 			*msg_word,
4634 			fse_op_info->rx_flow->flow_tuple_info.dest_port);
4635 		msg_word++;
4636 		HTT_RX_FSE_L4_PROTO_SET(
4637 			*msg_word,
4638 			fse_op_info->rx_flow->flow_tuple_info.l4_protocol);
4639 	} else if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_FULL) {
4640 		HTT_RX_FSE_OPERATION_SET(*msg_word,
4641 					 HTT_RX_FSE_CACHE_INVALIDATE_FULL);
4642 	} else if (fse_op_info->op_code == DP_HTT_FST_DISABLE) {
4643 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_DISABLE);
4644 	} else if (fse_op_info->op_code == DP_HTT_FST_ENABLE) {
4645 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_ENABLE);
4646 	}
4647 
4648 	pkt = htt_htc_pkt_alloc(soc);
4649 	if (!pkt) {
4650 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4651 		qdf_assert(0);
4652 		qdf_nbuf_free(msg);
4653 		return QDF_STATUS_E_RESOURCES; /* failure */
4654 	}
4655 
4656 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4657 
4658 	SET_HTC_PACKET_INFO_TX(
4659 		&pkt->htc_pkt,
4660 		dp_htt_h2t_send_complete_free_netbuf,
4661 		qdf_nbuf_data(msg),
4662 		qdf_nbuf_len(msg),
4663 		soc->htc_endpoint,
4664 		/* tag for no FW response msg */
4665 		HTC_TX_PACKET_TAG_RUNTIME_PUT);
4666 
4667 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4668 
4669 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
4670 				     HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG,
4671 				     htt_logger_bufp);
4672 
4673 	if (status == QDF_STATUS_SUCCESS) {
4674 		dp_info("HTT_H2T RX_FSE_OPERATION_CFG sent to FW for pdev = %u",
4675 			fse_op_info->pdev_id);
4676 	} else {
4677 		qdf_nbuf_free(msg);
4678 		htt_htc_pkt_free(soc, pkt);
4679 	}
4680 
4681 	return status;
4682 }
4683 
4684 /**
4685  * dp_htt_rx_fisa_config(): Send HTT msg to configure FISA
4686  * @pdev: DP pdev handle
4687  * @fse_op_info: Flow entry parameters
4688  *
4689  * Return: Success when HTT message is sent, error on failure
4690  */
4691 QDF_STATUS
4692 dp_htt_rx_fisa_config(struct dp_pdev *pdev,
4693 		      struct dp_htt_rx_fisa_cfg *fisa_config)
4694 {
4695 	struct htt_soc *soc = pdev->soc->htt_handle;
4696 	struct dp_htt_htc_pkt *pkt;
4697 	qdf_nbuf_t msg;
4698 	u_int32_t *msg_word;
4699 	struct htt_h2t_msg_type_fisa_config_t *htt_fisa_config;
4700 	uint8_t *htt_logger_bufp;
4701 	uint32_t len;
4702 	QDF_STATUS status;
4703 
4704 	len = HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_type_fisa_config_t));
4705 
4706 	msg = qdf_nbuf_alloc(soc->osdev,
4707 			     len,
4708 			     /* reserve room for the HTC header */
4709 			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
4710 			     4,
4711 			     TRUE);
4712 	if (!msg)
4713 		return QDF_STATUS_E_NOMEM;
4714 
4715 	/*
4716 	 * Set the length of the message.
4717 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4718 	 * separately during the below call to qdf_nbuf_push_head.
4719 	 * The contribution from the HTC header is added separately inside HTC.
4720 	 */
4721 	if (!qdf_nbuf_put_tail(msg,
4722 			       sizeof(struct htt_h2t_msg_type_fisa_config_t))) {
4723 		qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
4724 		qdf_nbuf_free(msg);
4725 		return QDF_STATUS_E_FAILURE;
4726 	}
4727 
4728 	/* fill in the message contents */
4729 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4730 
4731 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_type_fisa_config_t));
4732 	/* rewind beyond alignment pad to get to the HTC header reserved area */
4733 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4734 	htt_logger_bufp = (uint8_t *)msg_word;
4735 
4736 	*msg_word = 0;
4737 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FISA_CFG);
4738 
4739 	htt_fisa_config = (struct htt_h2t_msg_type_fisa_config_t *)msg_word;
4740 
4741 	HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, htt_fisa_config->pdev_id);
4742 
4743 	msg_word++;
4744 	HTT_RX_FISA_CONFIG_FISA_V2_ENABLE_SET(*msg_word, 1);
4745 	HTT_RX_FISA_CONFIG_FISA_V2_AGGR_LIMIT_SET(*msg_word, 0xf);
4746 
4747 	msg_word++;
4748 	htt_fisa_config->fisa_timeout_threshold = fisa_config->fisa_timeout;
4749 
4750 	pkt = htt_htc_pkt_alloc(soc);
4751 	if (!pkt) {
4752 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4753 		qdf_assert(0);
4754 		qdf_nbuf_free(msg);
4755 		return QDF_STATUS_E_RESOURCES; /* failure */
4756 	}
4757 
4758 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4759 
4760 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4761 			       dp_htt_h2t_send_complete_free_netbuf,
4762 			       qdf_nbuf_data(msg),
4763 			       qdf_nbuf_len(msg),
4764 			       soc->htc_endpoint,
4765 			       /* tag for no FW response msg */
4766 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
4767 
4768 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4769 
4770 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FISA_CFG,
4771 				     htt_logger_bufp);
4772 
4773 	if (status == QDF_STATUS_SUCCESS) {
4774 		dp_info("HTT_H2T_MSG_TYPE_RX_FISA_CFG sent to FW for pdev = %u",
4775 			fisa_config->pdev_id);
4776 	} else {
4777 		qdf_nbuf_free(msg);
4778 		htt_htc_pkt_free(soc, pkt);
4779 	}
4780 
4781 	return status;
4782 }
4783 
4784 #ifdef WLAN_SUPPORT_PPEDS
4785 /**
4786  * dp_htt_rxdma_rxole_ppe_cfg_set() - Send RxOLE and RxDMA PPE config
4787  * @dp_osc: Data path SoC handle
4788  * @cfg: RxDMA and RxOLE PPE config
4789  *
4790  * Return: Success when HTT message is sent, error on failure
4791  */
4792 QDF_STATUS
4793 dp_htt_rxdma_rxole_ppe_cfg_set(struct dp_soc *soc,
4794 			       struct dp_htt_rxdma_rxole_ppe_config *cfg)
4795 {
4796 	struct htt_soc *htt_handle = soc->htt_handle;
4797 	uint32_t len;
4798 	qdf_nbuf_t msg;
4799 	u_int32_t *msg_word;
4800 	QDF_STATUS status;
4801 	uint8_t *htt_logger_bufp;
4802 	struct dp_htt_htc_pkt *pkt;
4803 
4804 	len = HTT_MSG_BUF_SIZE(
4805 	      sizeof(struct htt_h2t_msg_type_rxdma_rxole_ppe_cfg_t));
4806 
4807 	msg = qdf_nbuf_alloc(soc->osdev,
4808 			     len,
4809 			     /* reserve room for the HTC header */
4810 			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
4811 			     4,
4812 			     TRUE);
4813 	if (!msg)
4814 		return QDF_STATUS_E_NOMEM;
4815 
4816 	/*
4817 	 * Set the length of the message.
4818 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4819 	 * separately during the below call to qdf_nbuf_push_head.
4820 	 * The contribution from the HTC header is added separately inside HTC.
4821 	 */
4822 	if (!qdf_nbuf_put_tail(
4823 		msg, sizeof(struct htt_h2t_msg_type_rxdma_rxole_ppe_cfg_t))) {
4824 		qdf_err("Failed to expand head for HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG msg");
4825 		qdf_nbuf_free(msg);
4826 		return QDF_STATUS_E_FAILURE;
4827 	}
4828 
4829 	/* fill in the message contents */
4830 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4831 
4832 	memset(msg_word, 0,
4833 	       sizeof(struct htt_h2t_msg_type_rxdma_rxole_ppe_cfg_t));
4834 
4835 	/* Rewind beyond alignment pad to get to the HTC header reserved area */
4836 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4837 	htt_logger_bufp = (uint8_t *)msg_word;
4838 
4839 	*msg_word = 0;
4840 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG);
4841 	HTT_PPE_CFG_OVERRIDE_SET(*msg_word, cfg->override);
4842 	HTT_PPE_CFG_REO_DEST_IND_SET(
4843 			*msg_word, cfg->reo_destination_indication);
4844 	HTT_PPE_CFG_MULTI_BUF_MSDU_OVERRIDE_EN_SET(
4845 			*msg_word, cfg->multi_buffer_msdu_override_en);
4846 	HTT_PPE_CFG_INTRA_BSS_OVERRIDE_EN_SET(
4847 			*msg_word, cfg->intra_bss_override);
4848 	HTT_PPE_CFG_DECAP_RAW_OVERRIDE_EN_SET(
4849 			*msg_word, cfg->decap_raw_override);
4850 	HTT_PPE_CFG_DECAP_NWIFI_OVERRIDE_EN_SET(
4851 			*msg_word, cfg->decap_nwifi_override);
4852 	HTT_PPE_CFG_IP_FRAG_OVERRIDE_EN_SET(
4853 			*msg_word, cfg->ip_frag_override);
4854 
4855 	pkt = htt_htc_pkt_alloc(htt_handle);
4856 	if (!pkt) {
4857 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4858 		qdf_assert(0);
4859 		qdf_nbuf_free(msg);
4860 		return QDF_STATUS_E_RESOURCES; /* failure */
4861 	}
4862 
4863 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4864 
4865 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4866 			       dp_htt_h2t_send_complete_free_netbuf,
4867 			       qdf_nbuf_data(msg),
4868 			       qdf_nbuf_len(msg),
4869 			       htt_handle->htc_endpoint,
4870 			       /* tag for no FW response msg */
4871 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
4872 
4873 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4874 
4875 	status = DP_HTT_SEND_HTC_PKT(htt_handle, pkt,
4876 				     HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG,
4877 				     htt_logger_bufp);
4878 
4879 	if (status != QDF_STATUS_SUCCESS) {
4880 		qdf_nbuf_free(msg);
4881 		htt_htc_pkt_free(htt_handle, pkt);
4882 		return status;
4883 	}
4884 
4885 	dp_info("HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG sent");
4886 	return status;
4887 }
4888 #endif /* WLAN_SUPPORT_PPEDS */
4889 
4890 /**
4891  * dp_bk_pressure_stats_handler(): worker function to print back pressure
4892  *				   stats
4893  *
4894  * @context : argument to work function
4895  */
4896 static void dp_bk_pressure_stats_handler(void *context)
4897 {
4898 	struct dp_pdev *pdev = (struct dp_pdev *)context;
4899 	struct dp_soc_srngs_state *soc_srngs_state = NULL;
4900 	const char *ring_name;
4901 	int i;
4902 	struct dp_srng_ring_state *ring_state;
4903 	bool empty_flag;
4904 
4905 	qdf_spin_lock_bh(&pdev->bkp_stats.list_lock);
4906 
4907 	/* Extract only first entry for printing in one work event */
4908 	if (pdev->bkp_stats.queue_depth &&
4909 	    !TAILQ_EMPTY(&pdev->bkp_stats.list)) {
4910 		soc_srngs_state = TAILQ_FIRST(&pdev->bkp_stats.list);
4911 		TAILQ_REMOVE(&pdev->bkp_stats.list, soc_srngs_state,
4912 			     list_elem);
4913 		pdev->bkp_stats.queue_depth--;
4914 	}
4915 
4916 	empty_flag = TAILQ_EMPTY(&pdev->bkp_stats.list);
4917 	qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock);
4918 
4919 	if (soc_srngs_state) {
4920 		DP_PRINT_STATS("### BKP stats for seq_num %u START ###",
4921 			       soc_srngs_state->seq_num);
4922 		for (i = 0; i < soc_srngs_state->max_ring_id; i++) {
4923 			ring_state = &soc_srngs_state->ring_state[i];
4924 			ring_name = dp_srng_get_str_from_hal_ring_type
4925 						(ring_state->ring_type);
4926 			DP_PRINT_STATS("%s: SW:Head pointer = %d Tail Pointer = %d\n",
4927 				       ring_name,
4928 				       ring_state->sw_head,
4929 				       ring_state->sw_tail);
4930 
4931 			DP_PRINT_STATS("%s: HW:Head pointer = %d Tail Pointer = %d\n",
4932 				       ring_name,
4933 				       ring_state->hw_head,
4934 				       ring_state->hw_tail);
4935 		}
4936 
4937 		DP_PRINT_STATS("### BKP stats for seq_num %u COMPLETE ###",
4938 			       soc_srngs_state->seq_num);
4939 		qdf_mem_free(soc_srngs_state);
4940 	}
4941 	dp_print_napi_stats(pdev->soc);
4942 
4943 	/* Schedule work again if queue is not empty */
4944 	if (!empty_flag)
4945 		qdf_queue_work(0, pdev->bkp_stats.work_queue,
4946 			       &pdev->bkp_stats.work);
4947 }
4948 
4949 /*
4950  * dp_pdev_bkp_stats_detach() - detach resources for back pressure stats
4951  *				processing
4952  * @pdev: Datapath PDEV handle
4953  *
4954  */
4955 void dp_pdev_bkp_stats_detach(struct dp_pdev *pdev)
4956 {
4957 	struct dp_soc_srngs_state *ring_state, *ring_state_next;
4958 
4959 	if (!pdev->bkp_stats.work_queue)
4960 		return;
4961 
4962 	qdf_flush_workqueue(0, pdev->bkp_stats.work_queue);
4963 	qdf_destroy_workqueue(0, pdev->bkp_stats.work_queue);
4964 	qdf_flush_work(&pdev->bkp_stats.work);
4965 	qdf_disable_work(&pdev->bkp_stats.work);
4966 	qdf_spin_lock_bh(&pdev->bkp_stats.list_lock);
4967 	TAILQ_FOREACH_SAFE(ring_state, &pdev->bkp_stats.list,
4968 			   list_elem, ring_state_next) {
4969 		TAILQ_REMOVE(&pdev->bkp_stats.list, ring_state,
4970 			     list_elem);
4971 		qdf_mem_free(ring_state);
4972 	}
4973 	qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock);
4974 	qdf_spinlock_destroy(&pdev->bkp_stats.list_lock);
4975 }
4976 
4977 /*
4978  * dp_pdev_bkp_stats_attach() - attach resources for back pressure stats
4979  *				processing
4980  * @pdev: Datapath PDEV handle
4981  *
4982  * Return: QDF_STATUS_SUCCESS: Success
4983  *         QDF_STATUS_E_NOMEM: Error
4984  */
4985 QDF_STATUS dp_pdev_bkp_stats_attach(struct dp_pdev *pdev)
4986 {
4987 	TAILQ_INIT(&pdev->bkp_stats.list);
4988 	pdev->bkp_stats.seq_num = 0;
4989 	pdev->bkp_stats.queue_depth = 0;
4990 
4991 	qdf_create_work(0, &pdev->bkp_stats.work,
4992 			dp_bk_pressure_stats_handler, pdev);
4993 
4994 	pdev->bkp_stats.work_queue =
4995 		qdf_alloc_unbound_workqueue("dp_bkp_work_queue");
4996 	if (!pdev->bkp_stats.work_queue)
4997 		goto fail;
4998 
4999 	qdf_spinlock_create(&pdev->bkp_stats.list_lock);
5000 	return QDF_STATUS_SUCCESS;
5001 
5002 fail:
5003 	dp_htt_alert("BKP stats attach failed");
5004 	qdf_flush_work(&pdev->bkp_stats.work);
5005 	qdf_disable_work(&pdev->bkp_stats.work);
5006 	return QDF_STATUS_E_FAILURE;
5007 }
5008 
5009 #ifdef DP_UMAC_HW_RESET_SUPPORT
5010 QDF_STATUS dp_htt_umac_reset_send_setup_cmd(
5011 		struct dp_soc *soc,
5012 		const struct dp_htt_umac_reset_setup_cmd_params *setup_params)
5013 {
5014 	struct htt_soc *htt_handle = soc->htt_handle;
5015 	uint32_t len;
5016 	qdf_nbuf_t msg;
5017 	u_int32_t *msg_word;
5018 	QDF_STATUS status;
5019 	uint8_t *htt_logger_bufp;
5020 	struct dp_htt_htc_pkt *pkt;
5021 
5022 	len = HTT_MSG_BUF_SIZE(
5023 		HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_BYTES);
5024 
5025 	msg = qdf_nbuf_alloc(soc->osdev,
5026 			     len,
5027 			     /* reserve room for the HTC header */
5028 			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
5029 			     4,
5030 			     TRUE);
5031 	if (!msg)
5032 		return QDF_STATUS_E_NOMEM;
5033 
5034 	/*
5035 	 * Set the length of the message.
5036 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
5037 	 * separately during the below call to qdf_nbuf_push_head.
5038 	 * The contribution from the HTC header is added separately inside HTC.
5039 	 */
5040 	if (!qdf_nbuf_put_tail(
5041 		msg, HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_BYTES)) {
5042 		dp_htt_err("Failed to expand head");
5043 		qdf_nbuf_free(msg);
5044 		return QDF_STATUS_E_FAILURE;
5045 	}
5046 
5047 	/* fill in the message contents */
5048 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
5049 
5050 	/* Rewind beyond alignment pad to get to the HTC header reserved area */
5051 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
5052 	htt_logger_bufp = (uint8_t *)msg_word;
5053 
5054 	qdf_mem_zero(msg_word,
5055 		     HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_BYTES);
5056 
5057 	HTT_H2T_MSG_TYPE_SET(
5058 		*msg_word,
5059 		HTT_H2T_MSG_TYPE_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP);
5060 	HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_T2H_MSG_METHOD_SET(
5061 		*msg_word, htt_umac_hang_recovery_msg_t2h_msi_and_h2t_polling);
5062 	HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_H2T_MSG_METHOD_SET(
5063 		*msg_word, htt_umac_hang_recovery_msg_t2h_msi_and_h2t_polling);
5064 
5065 	msg_word++;
5066 	*msg_word = setup_params->msi_data;
5067 
5068 	msg_word++;
5069 	*msg_word = sizeof(htt_umac_hang_recovery_msg_shmem_t);
5070 
5071 	msg_word++;
5072 	*msg_word = setup_params->shmem_addr_low;
5073 
5074 	msg_word++;
5075 	*msg_word = setup_params->shmem_addr_high;
5076 
5077 	pkt = htt_htc_pkt_alloc(htt_handle);
5078 	if (!pkt) {
5079 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
5080 		qdf_assert(0);
5081 		qdf_nbuf_free(msg);
5082 		return QDF_STATUS_E_NOMEM;
5083 	}
5084 
5085 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5086 
5087 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
5088 			       dp_htt_h2t_send_complete_free_netbuf,
5089 			       qdf_nbuf_data(msg),
5090 			       qdf_nbuf_len(msg),
5091 			       htt_handle->htc_endpoint,
5092 			       /* tag for no FW response msg */
5093 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
5094 
5095 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5096 
5097 	status = DP_HTT_SEND_HTC_PKT(
5098 			htt_handle, pkt,
5099 			HTT_H2T_MSG_TYPE_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP,
5100 			htt_logger_bufp);
5101 
5102 	if (QDF_IS_STATUS_ERROR(status)) {
5103 		qdf_nbuf_free(msg);
5104 		htt_htc_pkt_free(htt_handle, pkt);
5105 		return status;
5106 	}
5107 
5108 	dp_info("HTT_H2T_MSG_TYPE_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP sent");
5109 	return status;
5110 }
5111 #endif
5112