xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c (revision 70a19e16789e308182f63b15c75decec7bf0b342)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <htt.h>
21 #include <hal_hw_headers.h>
22 #include <hal_api.h>
23 #include "dp_peer.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_rx.h"
27 #include "htt_stats.h"
28 #include "htt_ppdu_stats.h"
29 #include "dp_htt.h"
30 #ifdef WIFI_MONITOR_SUPPORT
31 #include <dp_mon.h>
32 #endif
33 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
34 #include "cdp_txrx_cmn_struct.h"
35 
36 #ifdef FEATURE_PERPKT_INFO
37 #include "dp_ratetable.h"
38 #endif
39 #include <qdf_module.h>
40 #ifdef CONFIG_SAWF_DEF_QUEUES
41 #include <dp_sawf_htt.h>
42 #endif
43 
44 #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
45 
46 #define HTT_HTC_PKT_POOL_INIT_SIZE 64
47 
48 #define HTT_MSG_BUF_SIZE(msg_bytes) \
49 	((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
50 
51 #define HTT_PID_BIT_MASK 0x3
52 
53 #define DP_EXT_MSG_LENGTH 2048
54 #define HTT_HEADER_LEN 16
55 #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16
56 
57 #define HTT_SHIFT_UPPER_TIMESTAMP 32
58 #define HTT_MASK_UPPER_TIMESTAMP 0xFFFFFFFF00000000
59 #define HTT_BKP_STATS_MAX_QUEUE_DEPTH 16
60 
61 struct dp_htt_htc_pkt *
62 htt_htc_pkt_alloc(struct htt_soc *soc)
63 {
64 	struct dp_htt_htc_pkt_union *pkt = NULL;
65 
66 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
67 	if (soc->htt_htc_pkt_freelist) {
68 		pkt = soc->htt_htc_pkt_freelist;
69 		soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next;
70 	}
71 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
72 
73 	if (!pkt)
74 		pkt = qdf_mem_malloc(sizeof(*pkt));
75 
76 	if (!pkt)
77 		return NULL;
78 
79 	htc_packet_set_magic_cookie(&(pkt->u.pkt.htc_pkt), 0);
80 
81 	return &pkt->u.pkt; /* not actually a dereference */
82 }
83 
84 qdf_export_symbol(htt_htc_pkt_alloc);
85 
86 void
87 htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
88 {
89 	struct dp_htt_htc_pkt_union *u_pkt =
90 		(struct dp_htt_htc_pkt_union *)pkt;
91 
92 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
93 	htc_packet_set_magic_cookie(&(u_pkt->u.pkt.htc_pkt), 0);
94 	u_pkt->u.next = soc->htt_htc_pkt_freelist;
95 	soc->htt_htc_pkt_freelist = u_pkt;
96 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
97 }
98 
99 qdf_export_symbol(htt_htc_pkt_free);
100 
101 /*
102  * htt_htc_pkt_pool_free() - Free HTC packet pool
103  * @htt_soc:	HTT SOC handle
104  */
105 void
106 htt_htc_pkt_pool_free(struct htt_soc *soc)
107 {
108 	struct dp_htt_htc_pkt_union *pkt, *next;
109 	pkt = soc->htt_htc_pkt_freelist;
110 	while (pkt) {
111 		next = pkt->u.next;
112 		qdf_mem_free(pkt);
113 		pkt = next;
114 	}
115 	soc->htt_htc_pkt_freelist = NULL;
116 }
117 
118 
119 #ifndef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST
120 
121 /*
122  * htt_htc_misc_pkt_list_trim() - trim misc list
123  * @htt_soc: HTT SOC handle
124  * @level: max no. of pkts in list
125  */
126 static void
127 htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level)
128 {
129 	struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL;
130 	int i = 0;
131 	qdf_nbuf_t netbuf;
132 
133 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
134 	pkt = soc->htt_htc_pkt_misclist;
135 	while (pkt) {
136 		next = pkt->u.next;
137 		/* trim the out grown list*/
138 		if (++i > level) {
139 			netbuf =
140 				(qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
141 			qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
142 			qdf_nbuf_free(netbuf);
143 			qdf_mem_free(pkt);
144 			pkt = NULL;
145 			if (prev)
146 				prev->u.next = NULL;
147 		}
148 		prev = pkt;
149 		pkt = next;
150 	}
151 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
152 }
153 
154 /*
155  * htt_htc_misc_pkt_list_add() - Add pkt to misc list
156  * @htt_soc:	HTT SOC handle
157  * @dp_htt_htc_pkt: pkt to be added to list
158  */
159 void
160 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
161 {
162 	struct dp_htt_htc_pkt_union *u_pkt =
163 				(struct dp_htt_htc_pkt_union *)pkt;
164 	int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc,
165 							pkt->htc_pkt.Endpoint)
166 				+ DP_HTT_HTC_PKT_MISCLIST_SIZE;
167 
168 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
169 	if (soc->htt_htc_pkt_misclist) {
170 		u_pkt->u.next = soc->htt_htc_pkt_misclist;
171 		soc->htt_htc_pkt_misclist = u_pkt;
172 	} else {
173 		soc->htt_htc_pkt_misclist = u_pkt;
174 	}
175 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
176 
177 	/* only ce pipe size + tx_queue_depth could possibly be in use
178 	 * free older packets in the misclist
179 	 */
180 	htt_htc_misc_pkt_list_trim(soc, misclist_trim_level);
181 }
182 
183 qdf_export_symbol(htt_htc_misc_pkt_list_add);
184 #endif  /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
185 
186 /*
187  * htt_htc_misc_pkt_pool_free() - free pkts in misc list
188  * @htt_soc:	HTT SOC handle
189  */
190 static void
191 htt_htc_misc_pkt_pool_free(struct htt_soc *soc)
192 {
193 	struct dp_htt_htc_pkt_union *pkt, *next;
194 	qdf_nbuf_t netbuf;
195 
196 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
197 	pkt = soc->htt_htc_pkt_misclist;
198 
199 	while (pkt) {
200 		next = pkt->u.next;
201 		if (htc_packet_get_magic_cookie(&(pkt->u.pkt.htc_pkt)) !=
202 		    HTC_PACKET_MAGIC_COOKIE) {
203 			pkt = next;
204 			soc->stats.skip_count++;
205 			continue;
206 		}
207 		netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
208 		qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
209 
210 		soc->stats.htc_pkt_free++;
211 		dp_htt_info("%pK: Pkt free count %d",
212 			    soc->dp_soc, soc->stats.htc_pkt_free);
213 
214 		qdf_nbuf_free(netbuf);
215 		qdf_mem_free(pkt);
216 		pkt = next;
217 	}
218 	soc->htt_htc_pkt_misclist = NULL;
219 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
220 	dp_info("HTC Packets, fail count = %d, skip count = %d",
221 		soc->stats.fail_count, soc->stats.skip_count);
222 }
223 
224 /*
225  * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ
226  * @tgt_mac_addr:	Target MAC
227  * @buffer:		Output buffer
228  */
229 static u_int8_t *
230 htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer)
231 {
232 #ifdef BIG_ENDIAN_HOST
233 	/*
234 	 * The host endianness is opposite of the target endianness.
235 	 * To make u_int32_t elements come out correctly, the target->host
236 	 * upload has swizzled the bytes in each u_int32_t element of the
237 	 * message.
238 	 * For byte-array message fields like the MAC address, this
239 	 * upload swizzling puts the bytes in the wrong order, and needs
240 	 * to be undone.
241 	 */
242 	buffer[0] = tgt_mac_addr[3];
243 	buffer[1] = tgt_mac_addr[2];
244 	buffer[2] = tgt_mac_addr[1];
245 	buffer[3] = tgt_mac_addr[0];
246 	buffer[4] = tgt_mac_addr[7];
247 	buffer[5] = tgt_mac_addr[6];
248 	return buffer;
249 #else
250 	/*
251 	 * The host endianness matches the target endianness -
252 	 * we can use the mac addr directly from the message buffer.
253 	 */
254 	return tgt_mac_addr;
255 #endif
256 }
257 
258 /*
259  * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
260  * @soc:	SOC handle
261  * @status:	Completion status
262  * @netbuf:	HTT buffer
263  */
264 static void
265 dp_htt_h2t_send_complete_free_netbuf(
266 	void *soc, A_STATUS status, qdf_nbuf_t netbuf)
267 {
268 	qdf_nbuf_free(netbuf);
269 }
270 
271 #ifdef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST
272 /*
273  * dp_htt_h2t_send_complete() - H2T completion handler
274  * @context:	Opaque context (HTT SOC handle)
275  * @htc_pkt:	HTC packet
276  */
277 static void
278 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
279 {
280 	struct htt_soc *soc =  (struct htt_soc *) context;
281 	struct dp_htt_htc_pkt *htt_pkt;
282 	qdf_nbuf_t netbuf;
283 
284 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
285 
286 	/* process (free or keep) the netbuf that held the message */
287 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
288 	/*
289 	 * adf sendcomplete is required for windows only
290 	 */
291 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
292 	/* free the htt_htc_pkt / HTC_PACKET object */
293 	qdf_nbuf_free(netbuf);
294 	htt_htc_pkt_free(soc, htt_pkt);
295 }
296 
297 #else /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
298 
299 /*
300  *  * dp_htt_h2t_send_complete() - H2T completion handler
301  *   * @context:    Opaque context (HTT SOC handle)
302  *    * @htc_pkt:    HTC packet
303  *     */
304 static void
305 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
306 {
307 	void (*send_complete_part2)(
308 	     void *soc, QDF_STATUS status, qdf_nbuf_t msdu);
309 	struct htt_soc *soc =  (struct htt_soc *) context;
310 	struct dp_htt_htc_pkt *htt_pkt;
311 	qdf_nbuf_t netbuf;
312 
313 	send_complete_part2 = htc_pkt->pPktContext;
314 
315 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
316 
317 	/* process (free or keep) the netbuf that held the message */
318 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
319 	/*
320 	 * adf sendcomplete is required for windows only
321 	*/
322 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
323 	if (send_complete_part2){
324 		send_complete_part2(
325 		    htt_pkt->soc_ctxt, htc_pkt->Status, netbuf);
326 	}
327 	/* free the htt_htc_pkt / HTC_PACKET object */
328 	htt_htc_pkt_free(soc, htt_pkt);
329 }
330 
331 #endif /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
332 
333 /*
334  * dp_htt_h2t_add_tcl_metadata_ver_v1() - Add tcl_metadata version V1
335  * @htt_soc:	HTT SOC handle
336  * @msg:	Pointer to nbuf
337  *
338  * Return: 0 on success; error code on failure
339  */
340 static int dp_htt_h2t_add_tcl_metadata_ver_v1(struct htt_soc *soc,
341 					      qdf_nbuf_t *msg)
342 {
343 	uint32_t *msg_word;
344 
345 	*msg = qdf_nbuf_alloc(
346 		soc->osdev,
347 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
348 		/* reserve room for the HTC header */
349 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
350 	if (!*msg)
351 		return QDF_STATUS_E_NOMEM;
352 
353 	/*
354 	 * Set the length of the message.
355 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
356 	 * separately during the below call to qdf_nbuf_push_head.
357 	 * The contribution from the HTC header is added separately inside HTC.
358 	 */
359 	if (!qdf_nbuf_put_tail(*msg, HTT_VER_REQ_BYTES)) {
360 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
361 			  "%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
362 			  __func__);
363 		return QDF_STATUS_E_FAILURE;
364 	}
365 
366 	/* fill in the message contents */
367 	msg_word = (u_int32_t *)qdf_nbuf_data(*msg);
368 
369 	/* rewind beyond alignment pad to get to the HTC header reserved area */
370 	qdf_nbuf_push_head(*msg, HTC_HDR_ALIGNMENT_PADDING);
371 
372 	*msg_word = 0;
373 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
374 
375 	return QDF_STATUS_SUCCESS;
376 }
377 
378 #ifdef QCA_DP_TX_FW_METADATA_V2
379 /*
380  * dp_htt_h2t_add_tcl_metadata_ver_v2() - Add tcl_metadata version V2
381  * @htt_soc:	HTT SOC handle
382  * @msg:	Pointer to nbuf
383  *
384  * Return: 0 on success; error code on failure
385  */
386 static int dp_htt_h2t_add_tcl_metadata_ver_v2(struct htt_soc *soc,
387 					      qdf_nbuf_t *msg)
388 {
389 	uint32_t *msg_word;
390 
391 	*msg = qdf_nbuf_alloc(
392 		soc->osdev,
393 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES + HTT_TCL_METADATA_VER_SZ),
394 		/* reserve room for the HTC header */
395 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
396 	if (!*msg)
397 		return QDF_STATUS_E_NOMEM;
398 
399 	/*
400 	 * Set the length of the message.
401 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
402 	 * separately during the below call to qdf_nbuf_push_head.
403 	 * The contribution from the HTC header is added separately inside HTC.
404 	 */
405 	if (!qdf_nbuf_put_tail(*msg,
406 			       HTT_VER_REQ_BYTES + HTT_TCL_METADATA_VER_SZ)) {
407 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
408 			  "%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
409 			  __func__);
410 		return QDF_STATUS_E_FAILURE;
411 	}
412 
413 	/* fill in the message contents */
414 	msg_word = (u_int32_t *)qdf_nbuf_data(*msg);
415 
416 	/* rewind beyond alignment pad to get to the HTC header reserved area */
417 	qdf_nbuf_push_head(*msg, HTC_HDR_ALIGNMENT_PADDING);
418 
419 	*msg_word = 0;
420 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
421 
422 	/* word 1 */
423 	msg_word++;
424 	*msg_word = 0;
425 	HTT_OPTION_TLV_TAG_SET(*msg_word, HTT_OPTION_TLV_TAG_TCL_METADATA_VER);
426 	HTT_OPTION_TLV_LENGTH_SET(*msg_word, HTT_TCL_METADATA_VER_SZ);
427 	HTT_OPTION_TLV_TCL_METADATA_VER_SET(*msg_word,
428 					    HTT_OPTION_TLV_TCL_METADATA_V2);
429 
430 	return QDF_STATUS_SUCCESS;
431 }
432 
433 /*
434  * dp_htt_h2t_add_tcl_metadata_ver() - Add tcl_metadata version
435  * @htt_soc:	HTT SOC handle
436  * @msg:	Pointer to nbuf
437  *
438  * Return: 0 on success; error code on failure
439  */
440 static int dp_htt_h2t_add_tcl_metadata_ver(struct htt_soc *soc, qdf_nbuf_t *msg)
441 {
442 	/* Use tcl_metadata_v1 when NSS offload is enabled */
443 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->dp_soc->wlan_cfg_ctx) ||
444 	    soc->dp_soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_FTM_MODE)
445 		return dp_htt_h2t_add_tcl_metadata_ver_v1(soc, msg);
446 	else
447 		return dp_htt_h2t_add_tcl_metadata_ver_v2(soc, msg);
448 }
449 #else
450 static int dp_htt_h2t_add_tcl_metadata_ver(struct htt_soc *soc, qdf_nbuf_t *msg)
451 {
452 	return dp_htt_h2t_add_tcl_metadata_ver_v1(soc, msg);
453 }
454 #endif
455 
456 /*
457  * htt_h2t_ver_req_msg() - Send HTT version request message to target
458  * @htt_soc:	HTT SOC handle
459  *
460  * Return: 0 on success; error code on failure
461  */
462 static int htt_h2t_ver_req_msg(struct htt_soc *soc)
463 {
464 	struct dp_htt_htc_pkt *pkt;
465 	qdf_nbuf_t msg = NULL;
466 	QDF_STATUS status;
467 
468 	status = dp_htt_h2t_add_tcl_metadata_ver(soc, &msg);
469 	if (status != QDF_STATUS_SUCCESS)
470 		return status;
471 
472 	pkt = htt_htc_pkt_alloc(soc);
473 	if (!pkt) {
474 		qdf_nbuf_free(msg);
475 		return QDF_STATUS_E_FAILURE;
476 	}
477 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
478 
479 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
480 		dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg),
481 		qdf_nbuf_len(msg), soc->htc_endpoint,
482 		HTC_TX_PACKET_TAG_RTPM_PUT_RC);
483 
484 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
485 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_VERSION_REQ,
486 				     NULL);
487 
488 	if (status != QDF_STATUS_SUCCESS) {
489 		qdf_nbuf_free(msg);
490 		htt_htc_pkt_free(soc, pkt);
491 	}
492 
493 	return status;
494 }
495 
496 /*
497  * htt_srng_setup() - Send SRNG setup message to target
498  * @htt_soc:	HTT SOC handle
499  * @mac_id:	MAC Id
500  * @hal_srng:	Opaque HAL SRNG pointer
501  * @hal_ring_type:	SRNG ring type
502  *
503  * Return: 0 on success; error code on failure
504  */
505 int htt_srng_setup(struct htt_soc *soc, int mac_id,
506 		   hal_ring_handle_t hal_ring_hdl,
507 		   int hal_ring_type)
508 {
509 	struct dp_htt_htc_pkt *pkt;
510 	qdf_nbuf_t htt_msg;
511 	uint32_t *msg_word;
512 	struct hal_srng_params srng_params;
513 	qdf_dma_addr_t hp_addr, tp_addr;
514 	uint32_t ring_entry_size =
515 		hal_srng_get_entrysize(soc->hal_soc, hal_ring_type);
516 	int htt_ring_type, htt_ring_id;
517 	uint8_t *htt_logger_bufp;
518 	int target_pdev_id;
519 	int lmac_id = dp_get_lmac_id_for_pdev_id(soc->dp_soc, 0, mac_id);
520 	QDF_STATUS status;
521 
522 	/* Sizes should be set in 4-byte words */
523 	ring_entry_size = ring_entry_size >> 2;
524 
525 	htt_msg = qdf_nbuf_alloc(soc->osdev,
526 		HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ),
527 		/* reserve room for the HTC header */
528 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
529 	if (!htt_msg) {
530 		dp_err("htt_msg alloc failed ring type %d", hal_ring_type);
531 		goto fail0;
532 	}
533 
534 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
535 	hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_ring_hdl);
536 	tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_ring_hdl);
537 
538 	switch (hal_ring_type) {
539 	case RXDMA_BUF:
540 #ifdef QCA_HOST2FW_RXBUF_RING
541 		if (srng_params.ring_id ==
542 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
543 		    (lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
544 			htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
545 			htt_ring_type = HTT_SW_TO_SW_RING;
546 #ifdef IPA_OFFLOAD
547 		} else if (srng_params.ring_id ==
548 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2 +
549 		    (lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
550 			htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING;
551 			htt_ring_type = HTT_SW_TO_SW_RING;
552 #ifdef IPA_WDI3_VLAN_SUPPORT
553 		} else if (srng_params.ring_id ==
554 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF3 +
555 		    (lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
556 			htt_ring_id = HTT_HOST3_TO_FW_RXBUF_RING;
557 			htt_ring_type = HTT_SW_TO_SW_RING;
558 #endif
559 #endif
560 #else
561 		if (srng_params.ring_id ==
562 			(HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
563 			(lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
564 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
565 			htt_ring_type = HTT_SW_TO_HW_RING;
566 #endif
567 		} else if (srng_params.ring_id ==
568 #ifdef IPA_OFFLOAD
569 			 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 +
570 #else
571 			 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF +
572 #endif
573 			(lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
574 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
575 			htt_ring_type = HTT_SW_TO_HW_RING;
576 #ifdef FEATURE_DIRECT_LINK
577 		} else if (srng_params.ring_id ==
578 			   (HAL_SRNG_WMAC1_RX_DIRECT_LINK_SW_REFILL_RING +
579 			    (lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
580 			htt_ring_id = HTT_LPASS_TO_FW_RXBUF_RING;
581 			htt_ring_type = HTT_SW_TO_SW_RING;
582 #endif
583 		} else {
584 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
585 				   "%s: Ring %d currently not supported",
586 				   __func__, srng_params.ring_id);
587 			goto fail1;
588 		}
589 
590 		break;
591 	case RXDMA_MONITOR_BUF:
592 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
593 							 RXDMA_MONITOR_BUF);
594 		htt_ring_type = HTT_SW_TO_HW_RING;
595 		break;
596 	case RXDMA_MONITOR_STATUS:
597 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
598 		htt_ring_type = HTT_SW_TO_HW_RING;
599 		break;
600 	case RXDMA_MONITOR_DST:
601 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
602 							 RXDMA_MONITOR_DST);
603 		htt_ring_type = HTT_HW_TO_SW_RING;
604 		break;
605 	case RXDMA_MONITOR_DESC:
606 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
607 		htt_ring_type = HTT_SW_TO_HW_RING;
608 		break;
609 	case RXDMA_DST:
610 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
611 		htt_ring_type = HTT_HW_TO_SW_RING;
612 		break;
613 	case TX_MONITOR_BUF:
614 		htt_ring_id = HTT_TX_MON_HOST2MON_BUF_RING;
615 		htt_ring_type = HTT_SW_TO_HW_RING;
616 		break;
617 	case TX_MONITOR_DST:
618 		htt_ring_id = HTT_TX_MON_MON2HOST_DEST_RING;
619 		htt_ring_type = HTT_HW_TO_SW_RING;
620 		break;
621 
622 	default:
623 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
624 			"%s: Ring currently not supported", __func__);
625 			goto fail1;
626 	}
627 
628 	dp_info("ring_type %d ring_id %d htt_ring_id %d hp_addr 0x%llx tp_addr 0x%llx",
629 		hal_ring_type, srng_params.ring_id, htt_ring_id,
630 		(uint64_t)hp_addr,
631 		(uint64_t)tp_addr);
632 	/*
633 	 * Set the length of the message.
634 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
635 	 * separately during the below call to qdf_nbuf_push_head.
636 	 * The contribution from the HTC header is added separately inside HTC.
637 	 */
638 	if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) {
639 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
640 			"%s: Failed to expand head for SRING_SETUP msg",
641 			__func__);
642 		return QDF_STATUS_E_FAILURE;
643 	}
644 
645 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
646 
647 	/* rewind beyond alignment pad to get to the HTC header reserved area */
648 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
649 
650 	/* word 0 */
651 	*msg_word = 0;
652 	htt_logger_bufp = (uint8_t *)msg_word;
653 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP);
654 	target_pdev_id =
655 	dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, mac_id);
656 
657 	if ((htt_ring_type == HTT_SW_TO_HW_RING) ||
658 			(htt_ring_type == HTT_HW_TO_SW_RING))
659 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, target_pdev_id);
660 	else
661 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id);
662 
663 	dp_info("mac_id %d", mac_id);
664 	HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type);
665 	/* TODO: Discuss with FW on changing this to unique ID and using
666 	 * htt_ring_type to send the type of ring
667 	 */
668 	HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id);
669 
670 	/* word 1 */
671 	msg_word++;
672 	*msg_word = 0;
673 	HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word,
674 		srng_params.ring_base_paddr & 0xffffffff);
675 
676 	/* word 2 */
677 	msg_word++;
678 	*msg_word = 0;
679 	HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word,
680 		(uint64_t)srng_params.ring_base_paddr >> 32);
681 
682 	/* word 3 */
683 	msg_word++;
684 	*msg_word = 0;
685 	HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size);
686 	HTT_SRING_SETUP_RING_SIZE_SET(*msg_word,
687 		(ring_entry_size * srng_params.num_entries));
688 	dp_info("entry_size %d", ring_entry_size);
689 	dp_info("num_entries %d", srng_params.num_entries);
690 	dp_info("ring_size %d", (ring_entry_size * srng_params.num_entries));
691 	if (htt_ring_type == HTT_SW_TO_HW_RING)
692 		HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET(
693 						*msg_word, 1);
694 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word,
695 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
696 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word,
697 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
698 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word,
699 		!!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP));
700 
701 	/* word 4 */
702 	msg_word++;
703 	*msg_word = 0;
704 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
705 		hp_addr & 0xffffffff);
706 
707 	/* word 5 */
708 	msg_word++;
709 	*msg_word = 0;
710 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
711 		(uint64_t)hp_addr >> 32);
712 
713 	/* word 6 */
714 	msg_word++;
715 	*msg_word = 0;
716 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
717 		tp_addr & 0xffffffff);
718 
719 	/* word 7 */
720 	msg_word++;
721 	*msg_word = 0;
722 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
723 		(uint64_t)tp_addr >> 32);
724 
725 	/* word 8 */
726 	msg_word++;
727 	*msg_word = 0;
728 	HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word,
729 		srng_params.msi_addr & 0xffffffff);
730 
731 	/* word 9 */
732 	msg_word++;
733 	*msg_word = 0;
734 	HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word,
735 		(uint64_t)(srng_params.msi_addr) >> 32);
736 
737 	/* word 10 */
738 	msg_word++;
739 	*msg_word = 0;
740 	HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word,
741 		qdf_cpu_to_le32(srng_params.msi_data));
742 
743 	/* word 11 */
744 	msg_word++;
745 	*msg_word = 0;
746 	HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word,
747 		srng_params.intr_batch_cntr_thres_entries *
748 		ring_entry_size);
749 	HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word,
750 		srng_params.intr_timer_thres_us >> 3);
751 
752 	/* word 12 */
753 	msg_word++;
754 	*msg_word = 0;
755 	if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
756 		/* TODO: Setting low threshold to 1/8th of ring size - see
757 		 * if this needs to be configurable
758 		 */
759 		HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word,
760 			srng_params.low_threshold);
761 	}
762 	/* "response_required" field should be set if a HTT response message is
763 	 * required after setting up the ring.
764 	 */
765 	pkt = htt_htc_pkt_alloc(soc);
766 	if (!pkt) {
767 		dp_err("pkt alloc failed, ring_type %d ring_id %d htt_ring_id %d",
768 		       hal_ring_type, srng_params.ring_id, htt_ring_id);
769 		goto fail1;
770 	}
771 
772 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
773 
774 	SET_HTC_PACKET_INFO_TX(
775 		&pkt->htc_pkt,
776 		dp_htt_h2t_send_complete_free_netbuf,
777 		qdf_nbuf_data(htt_msg),
778 		qdf_nbuf_len(htt_msg),
779 		soc->htc_endpoint,
780 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
781 
782 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
783 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_SRING_SETUP,
784 				     htt_logger_bufp);
785 
786 	if (status != QDF_STATUS_SUCCESS) {
787 		qdf_nbuf_free(htt_msg);
788 		htt_htc_pkt_free(soc, pkt);
789 	}
790 
791 	return status;
792 
793 fail1:
794 	qdf_nbuf_free(htt_msg);
795 fail0:
796 	return QDF_STATUS_E_FAILURE;
797 }
798 
799 qdf_export_symbol(htt_srng_setup);
800 
801 #ifdef QCA_SUPPORT_FULL_MON
802 /**
803  * htt_h2t_full_mon_cfg() - Send full monitor configuration msg to FW
804  *
805  * @htt_soc: HTT Soc handle
806  * @pdev_id: Radio id
807  * @dp_full_mon_config: enabled/disable configuration
808  *
809  * Return: Success when HTT message is sent, error on failure
810  */
811 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc,
812 			 uint8_t pdev_id,
813 			 enum dp_full_mon_config config)
814 {
815 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
816 	struct dp_htt_htc_pkt *pkt;
817 	qdf_nbuf_t htt_msg;
818 	uint32_t *msg_word;
819 	uint8_t *htt_logger_bufp;
820 
821 	htt_msg = qdf_nbuf_alloc(soc->osdev,
822 				 HTT_MSG_BUF_SIZE(
823 				 HTT_RX_FULL_MONITOR_MODE_SETUP_SZ),
824 				 /* reserve room for the HTC header */
825 				 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
826 				 4,
827 				 TRUE);
828 	if (!htt_msg)
829 		return QDF_STATUS_E_FAILURE;
830 
831 	/*
832 	 * Set the length of the message.
833 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
834 	 * separately during the below call to qdf_nbuf_push_head.
835 	 * The contribution from the HTC header is added separately inside HTC.
836 	 */
837 	if (!qdf_nbuf_put_tail(htt_msg, HTT_RX_FULL_MONITOR_MODE_SETUP_SZ)) {
838 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
839 			  "%s: Failed to expand head for RX Ring Cfg msg",
840 			  __func__);
841 		goto fail1;
842 	}
843 
844 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
845 
846 	/* rewind beyond alignment pad to get to the HTC header reserved area */
847 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
848 
849 	/* word 0 */
850 	*msg_word = 0;
851 	htt_logger_bufp = (uint8_t *)msg_word;
852 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE);
853 	HTT_RX_FULL_MONITOR_MODE_OPERATION_PDEV_ID_SET(
854 			*msg_word, DP_SW2HW_MACID(pdev_id));
855 
856 	msg_word++;
857 	*msg_word = 0;
858 	/* word 1 */
859 	if (config == DP_FULL_MON_ENABLE) {
860 		HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true);
861 		HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, true);
862 		HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, true);
863 		HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2);
864 	} else if (config == DP_FULL_MON_DISABLE) {
865 		/* As per MAC team's suggestion, While disabling full monitor
866 		 * mode, Set 'en' bit to true in full monitor mode register.
867 		 */
868 		HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true);
869 		HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, false);
870 		HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, false);
871 		HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2);
872 	}
873 
874 	pkt = htt_htc_pkt_alloc(soc);
875 	if (!pkt) {
876 		qdf_err("HTC packet allocation failed");
877 		goto fail1;
878 	}
879 
880 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
881 
882 	SET_HTC_PACKET_INFO_TX(
883 		&pkt->htc_pkt,
884 		dp_htt_h2t_send_complete_free_netbuf,
885 		qdf_nbuf_data(htt_msg),
886 		qdf_nbuf_len(htt_msg),
887 		soc->htc_endpoint,
888 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
889 
890 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
891 	qdf_debug("config: %d", config);
892 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE,
893 			    htt_logger_bufp);
894 	return QDF_STATUS_SUCCESS;
895 fail1:
896 	qdf_nbuf_free(htt_msg);
897 	return QDF_STATUS_E_FAILURE;
898 }
899 
900 qdf_export_symbol(htt_h2t_full_mon_cfg);
901 #else
902 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc,
903 			 uint8_t pdev_id,
904 			 enum dp_full_mon_config config)
905 {
906 	return 0;
907 }
908 
909 qdf_export_symbol(htt_h2t_full_mon_cfg);
910 #endif
911 
912 #ifdef QCA_UNDECODED_METADATA_SUPPORT
913 static inline void
914 dp_mon_rx_enable_phy_errors(uint32_t *msg_word,
915 			    struct htt_rx_ring_tlv_filter *htt_tlv_filter)
916 {
917 	if (htt_tlv_filter->phy_err_filter_valid) {
918 		HTT_RX_RING_SELECTION_CFG_FP_PHY_ERR_SET
919 			(*msg_word, htt_tlv_filter->fp_phy_err);
920 		HTT_RX_RING_SELECTION_CFG_FP_PHY_ERR_BUF_SRC_SET
921 			(*msg_word, htt_tlv_filter->fp_phy_err_buf_src);
922 		HTT_RX_RING_SELECTION_CFG_FP_PHY_ERR_BUF_DEST_SET
923 			(*msg_word, htt_tlv_filter->fp_phy_err_buf_dest);
924 
925 		/* word 12*/
926 		msg_word++;
927 		*msg_word = 0;
928 		HTT_RX_RING_SELECTION_CFG_PHY_ERR_MASK_SET
929 			(*msg_word, htt_tlv_filter->phy_err_mask);
930 
931 		/* word 13*/
932 		msg_word++;
933 		*msg_word = 0;
934 		HTT_RX_RING_SELECTION_CFG_PHY_ERR_MASK_CONT_SET
935 			(*msg_word, htt_tlv_filter->phy_err_mask_cont);
936 	}
937 }
938 #else
939 static inline void
940 dp_mon_rx_enable_phy_errors(uint32_t *msg_word,
941 			    struct htt_rx_ring_tlv_filter *htt_tlv_filter)
942 {
943 }
944 #endif
945 
946 /*
947  * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter
948  * config message to target
949  * @htt_soc:	HTT SOC handle
950  * @pdev_id:	WIN- PDEV Id, MCL- mac id
951  * @hal_srng:	Opaque HAL SRNG pointer
952  * @hal_ring_type:	SRNG ring type
953  * @ring_buf_size:	SRNG buffer size
954  * @htt_tlv_filter:	Rx SRNG TLV and filter setting
955  * Return: 0 on success; error code on failure
956  */
957 int htt_h2t_rx_ring_cfg(struct htt_soc *htt_soc, int pdev_id,
958 			hal_ring_handle_t hal_ring_hdl,
959 			int hal_ring_type, int ring_buf_size,
960 			struct htt_rx_ring_tlv_filter *htt_tlv_filter)
961 {
962 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
963 	struct dp_htt_htc_pkt *pkt;
964 	qdf_nbuf_t htt_msg;
965 	uint32_t *msg_word;
966 	uint32_t *msg_word_data;
967 	struct hal_srng_params srng_params;
968 	uint32_t htt_ring_type, htt_ring_id;
969 	uint32_t tlv_filter;
970 	uint8_t *htt_logger_bufp;
971 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->dp_soc->wlan_cfg_ctx;
972 	uint32_t mon_drop_th = wlan_cfg_get_mon_drop_thresh(wlan_cfg_ctx);
973 	int target_pdev_id;
974 	QDF_STATUS status;
975 
976 	htt_msg = qdf_nbuf_alloc(soc->osdev,
977 		HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ),
978 	/* reserve room for the HTC header */
979 	HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
980 	if (!htt_msg) {
981 		dp_err("htt_msg alloc failed ring type %d", hal_ring_type);
982 		goto fail0;
983 	}
984 
985 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
986 
987 	switch (hal_ring_type) {
988 	case RXDMA_BUF:
989 		htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
990 		htt_ring_type = HTT_SW_TO_HW_RING;
991 		break;
992 	case RXDMA_MONITOR_BUF:
993 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
994 							 RXDMA_MONITOR_BUF);
995 		htt_ring_type = HTT_SW_TO_HW_RING;
996 		break;
997 	case RXDMA_MONITOR_STATUS:
998 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
999 		htt_ring_type = HTT_SW_TO_HW_RING;
1000 		break;
1001 	case RXDMA_MONITOR_DST:
1002 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
1003 							 RXDMA_MONITOR_DST);
1004 		htt_ring_type = HTT_HW_TO_SW_RING;
1005 		break;
1006 	case RXDMA_MONITOR_DESC:
1007 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
1008 		htt_ring_type = HTT_SW_TO_HW_RING;
1009 		break;
1010 	case RXDMA_DST:
1011 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
1012 		htt_ring_type = HTT_HW_TO_SW_RING;
1013 		break;
1014 
1015 	default:
1016 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1017 			"%s: Ring currently not supported", __func__);
1018 		goto fail1;
1019 	}
1020 
1021 	dp_info("ring_type %d ring_id %d htt_ring_id %d",
1022 		hal_ring_type, srng_params.ring_id, htt_ring_id);
1023 
1024 	/*
1025 	 * Set the length of the message.
1026 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
1027 	 * separately during the below call to qdf_nbuf_push_head.
1028 	 * The contribution from the HTC header is added separately inside HTC.
1029 	 */
1030 	if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) {
1031 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1032 			"%s: Failed to expand head for RX Ring Cfg msg",
1033 			__func__);
1034 		goto fail1; /* failure */
1035 	}
1036 
1037 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
1038 
1039 	/* rewind beyond alignment pad to get to the HTC header reserved area */
1040 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
1041 
1042 	/* word 0 */
1043 	htt_logger_bufp = (uint8_t *)msg_word;
1044 	*msg_word = 0;
1045 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
1046 
1047 	/* applicable only for post Li */
1048 	dp_rx_mon_enable(soc->dp_soc, msg_word, htt_tlv_filter);
1049 
1050 	/*
1051 	 * pdev_id is indexed from 0 whereas mac_id is indexed from 1
1052 	 * SW_TO_SW and SW_TO_HW rings are unaffected by this
1053 	 */
1054 	target_pdev_id =
1055 	dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, pdev_id);
1056 
1057 	if (htt_ring_type == HTT_SW_TO_SW_RING ||
1058 			htt_ring_type == HTT_SW_TO_HW_RING ||
1059 			htt_ring_type == HTT_HW_TO_SW_RING)
1060 		HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word,
1061 						      target_pdev_id);
1062 
1063 	/* TODO: Discuss with FW on changing this to unique ID and using
1064 	 * htt_ring_type to send the type of ring
1065 	 */
1066 	HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id);
1067 
1068 	HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word,
1069 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
1070 
1071 	HTT_RX_RING_SELECTION_CFG_RX_OFFSETS_VALID_SET(*msg_word,
1072 						htt_tlv_filter->offset_valid);
1073 
1074 	if (mon_drop_th > 0)
1075 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1076 								   1);
1077 	else
1078 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1079 								   0);
1080 
1081 	/* word 1 */
1082 	msg_word++;
1083 	*msg_word = 0;
1084 	HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word,
1085 		ring_buf_size);
1086 
1087 	dp_mon_rx_packet_length_set(soc->dp_soc, msg_word, htt_tlv_filter);
1088 	dp_mon_rx_hdr_length_set(soc->dp_soc, msg_word, htt_tlv_filter);
1089 	dp_mon_rx_mac_filter_set(soc->dp_soc, msg_word, htt_tlv_filter);
1090 
1091 	/* word 2 */
1092 	msg_word++;
1093 	*msg_word = 0;
1094 
1095 	if (htt_tlv_filter->enable_fp) {
1096 		/* TYPE: MGMT */
1097 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1098 			FP, MGMT, 0000,
1099 			(htt_tlv_filter->fp_mgmt_filter &
1100 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1101 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1102 			FP, MGMT, 0001,
1103 			(htt_tlv_filter->fp_mgmt_filter &
1104 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1105 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1106 			FP, MGMT, 0010,
1107 			(htt_tlv_filter->fp_mgmt_filter &
1108 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1109 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1110 			FP, MGMT, 0011,
1111 			(htt_tlv_filter->fp_mgmt_filter &
1112 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1113 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1114 			FP, MGMT, 0100,
1115 			(htt_tlv_filter->fp_mgmt_filter &
1116 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1117 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1118 			FP, MGMT, 0101,
1119 			(htt_tlv_filter->fp_mgmt_filter &
1120 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1121 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1122 			FP, MGMT, 0110,
1123 			(htt_tlv_filter->fp_mgmt_filter &
1124 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1125 		/* reserved */
1126 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
1127 			MGMT, 0111,
1128 			(htt_tlv_filter->fp_mgmt_filter &
1129 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1130 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1131 			FP, MGMT, 1000,
1132 			(htt_tlv_filter->fp_mgmt_filter &
1133 			FILTER_MGMT_BEACON) ? 1 : 0);
1134 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1135 			FP, MGMT, 1001,
1136 			(htt_tlv_filter->fp_mgmt_filter &
1137 			FILTER_MGMT_ATIM) ? 1 : 0);
1138 	}
1139 
1140 	if (htt_tlv_filter->enable_md) {
1141 			/* TYPE: MGMT */
1142 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1143 			MD, MGMT, 0000,
1144 			(htt_tlv_filter->md_mgmt_filter &
1145 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1146 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1147 			MD, MGMT, 0001,
1148 			(htt_tlv_filter->md_mgmt_filter &
1149 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1150 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1151 			MD, MGMT, 0010,
1152 			(htt_tlv_filter->md_mgmt_filter &
1153 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1154 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1155 			MD, MGMT, 0011,
1156 			(htt_tlv_filter->md_mgmt_filter &
1157 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1158 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1159 			MD, MGMT, 0100,
1160 			(htt_tlv_filter->md_mgmt_filter &
1161 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1162 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1163 			MD, MGMT, 0101,
1164 			(htt_tlv_filter->md_mgmt_filter &
1165 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1166 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1167 			MD, MGMT, 0110,
1168 			(htt_tlv_filter->md_mgmt_filter &
1169 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1170 		/* reserved */
1171 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
1172 			MGMT, 0111,
1173 			(htt_tlv_filter->md_mgmt_filter &
1174 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1175 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1176 			MD, MGMT, 1000,
1177 			(htt_tlv_filter->md_mgmt_filter &
1178 			FILTER_MGMT_BEACON) ? 1 : 0);
1179 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1180 			MD, MGMT, 1001,
1181 			(htt_tlv_filter->md_mgmt_filter &
1182 			FILTER_MGMT_ATIM) ? 1 : 0);
1183 	}
1184 
1185 	if (htt_tlv_filter->enable_mo) {
1186 		/* TYPE: MGMT */
1187 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1188 			MO, MGMT, 0000,
1189 			(htt_tlv_filter->mo_mgmt_filter &
1190 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1191 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1192 			MO, MGMT, 0001,
1193 			(htt_tlv_filter->mo_mgmt_filter &
1194 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1195 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1196 			MO, MGMT, 0010,
1197 			(htt_tlv_filter->mo_mgmt_filter &
1198 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1199 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1200 			MO, MGMT, 0011,
1201 			(htt_tlv_filter->mo_mgmt_filter &
1202 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1203 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1204 			MO, MGMT, 0100,
1205 			(htt_tlv_filter->mo_mgmt_filter &
1206 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1207 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1208 			MO, MGMT, 0101,
1209 			(htt_tlv_filter->mo_mgmt_filter &
1210 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1211 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1212 			MO, MGMT, 0110,
1213 			(htt_tlv_filter->mo_mgmt_filter &
1214 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1215 		/* reserved */
1216 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
1217 			MGMT, 0111,
1218 			(htt_tlv_filter->mo_mgmt_filter &
1219 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1220 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1221 			MO, MGMT, 1000,
1222 			(htt_tlv_filter->mo_mgmt_filter &
1223 			FILTER_MGMT_BEACON) ? 1 : 0);
1224 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1225 			MO, MGMT, 1001,
1226 			(htt_tlv_filter->mo_mgmt_filter &
1227 			FILTER_MGMT_ATIM) ? 1 : 0);
1228 	}
1229 
1230 	/* word 3 */
1231 	msg_word++;
1232 	*msg_word = 0;
1233 
1234 	if (htt_tlv_filter->enable_fp) {
1235 		/* TYPE: MGMT */
1236 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1237 			FP, MGMT, 1010,
1238 			(htt_tlv_filter->fp_mgmt_filter &
1239 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1240 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1241 			FP, MGMT, 1011,
1242 			(htt_tlv_filter->fp_mgmt_filter &
1243 			FILTER_MGMT_AUTH) ? 1 : 0);
1244 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1245 			FP, MGMT, 1100,
1246 			(htt_tlv_filter->fp_mgmt_filter &
1247 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1248 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1249 			FP, MGMT, 1101,
1250 			(htt_tlv_filter->fp_mgmt_filter &
1251 			FILTER_MGMT_ACTION) ? 1 : 0);
1252 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1253 			FP, MGMT, 1110,
1254 			(htt_tlv_filter->fp_mgmt_filter &
1255 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1256 		/* reserved*/
1257 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
1258 			MGMT, 1111,
1259 			(htt_tlv_filter->fp_mgmt_filter &
1260 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1261 	}
1262 
1263 	if (htt_tlv_filter->enable_md) {
1264 			/* TYPE: MGMT */
1265 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1266 			MD, MGMT, 1010,
1267 			(htt_tlv_filter->md_mgmt_filter &
1268 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1269 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1270 			MD, MGMT, 1011,
1271 			(htt_tlv_filter->md_mgmt_filter &
1272 			FILTER_MGMT_AUTH) ? 1 : 0);
1273 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1274 			MD, MGMT, 1100,
1275 			(htt_tlv_filter->md_mgmt_filter &
1276 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1277 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1278 			MD, MGMT, 1101,
1279 			(htt_tlv_filter->md_mgmt_filter &
1280 			FILTER_MGMT_ACTION) ? 1 : 0);
1281 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1282 			MD, MGMT, 1110,
1283 			(htt_tlv_filter->md_mgmt_filter &
1284 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1285 	}
1286 
1287 	if (htt_tlv_filter->enable_mo) {
1288 		/* TYPE: MGMT */
1289 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1290 			MO, MGMT, 1010,
1291 			(htt_tlv_filter->mo_mgmt_filter &
1292 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1293 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1294 			MO, MGMT, 1011,
1295 			(htt_tlv_filter->mo_mgmt_filter &
1296 			FILTER_MGMT_AUTH) ? 1 : 0);
1297 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1298 			MO, MGMT, 1100,
1299 			(htt_tlv_filter->mo_mgmt_filter &
1300 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1301 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1302 			MO, MGMT, 1101,
1303 			(htt_tlv_filter->mo_mgmt_filter &
1304 			FILTER_MGMT_ACTION) ? 1 : 0);
1305 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1306 			MO, MGMT, 1110,
1307 			(htt_tlv_filter->mo_mgmt_filter &
1308 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1309 		/* reserved*/
1310 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
1311 			MGMT, 1111,
1312 			(htt_tlv_filter->mo_mgmt_filter &
1313 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1314 	}
1315 
1316 	/* word 4 */
1317 	msg_word++;
1318 	*msg_word = 0;
1319 
1320 	if (htt_tlv_filter->enable_fp) {
1321 		/* TYPE: CTRL */
1322 		/* reserved */
1323 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1324 			CTRL, 0000,
1325 			(htt_tlv_filter->fp_ctrl_filter &
1326 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1327 		/* reserved */
1328 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1329 			CTRL, 0001,
1330 			(htt_tlv_filter->fp_ctrl_filter &
1331 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1332 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1333 			CTRL, 0010,
1334 			(htt_tlv_filter->fp_ctrl_filter &
1335 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1336 		/* reserved */
1337 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1338 			CTRL, 0011,
1339 			(htt_tlv_filter->fp_ctrl_filter &
1340 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1341 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1342 			CTRL, 0100,
1343 			(htt_tlv_filter->fp_ctrl_filter &
1344 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1345 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1346 			CTRL, 0101,
1347 			(htt_tlv_filter->fp_ctrl_filter &
1348 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1349 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1350 			CTRL, 0110,
1351 			(htt_tlv_filter->fp_ctrl_filter &
1352 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1353 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1354 			CTRL, 0111,
1355 			(htt_tlv_filter->fp_ctrl_filter &
1356 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1357 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1358 			CTRL, 1000,
1359 			(htt_tlv_filter->fp_ctrl_filter &
1360 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1361 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1362 			CTRL, 1001,
1363 			(htt_tlv_filter->fp_ctrl_filter &
1364 			FILTER_CTRL_BA) ? 1 : 0);
1365 	}
1366 
1367 	if (htt_tlv_filter->enable_md) {
1368 		/* TYPE: CTRL */
1369 		/* reserved */
1370 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1371 			CTRL, 0000,
1372 			(htt_tlv_filter->md_ctrl_filter &
1373 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1374 		/* reserved */
1375 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1376 			CTRL, 0001,
1377 			(htt_tlv_filter->md_ctrl_filter &
1378 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1379 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1380 			CTRL, 0010,
1381 			(htt_tlv_filter->md_ctrl_filter &
1382 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1383 		/* reserved */
1384 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1385 			CTRL, 0011,
1386 			(htt_tlv_filter->md_ctrl_filter &
1387 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1388 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1389 			CTRL, 0100,
1390 			(htt_tlv_filter->md_ctrl_filter &
1391 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1392 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1393 			CTRL, 0101,
1394 			(htt_tlv_filter->md_ctrl_filter &
1395 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1396 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1397 			CTRL, 0110,
1398 			(htt_tlv_filter->md_ctrl_filter &
1399 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1400 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1401 			CTRL, 0111,
1402 			(htt_tlv_filter->md_ctrl_filter &
1403 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1404 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1405 			CTRL, 1000,
1406 			(htt_tlv_filter->md_ctrl_filter &
1407 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1408 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1409 			CTRL, 1001,
1410 			(htt_tlv_filter->md_ctrl_filter &
1411 			FILTER_CTRL_BA) ? 1 : 0);
1412 	}
1413 
1414 	if (htt_tlv_filter->enable_mo) {
1415 		/* TYPE: CTRL */
1416 		/* reserved */
1417 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1418 			CTRL, 0000,
1419 			(htt_tlv_filter->mo_ctrl_filter &
1420 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1421 		/* reserved */
1422 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1423 			CTRL, 0001,
1424 			(htt_tlv_filter->mo_ctrl_filter &
1425 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1426 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1427 			CTRL, 0010,
1428 			(htt_tlv_filter->mo_ctrl_filter &
1429 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1430 		/* reserved */
1431 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1432 			CTRL, 0011,
1433 			(htt_tlv_filter->mo_ctrl_filter &
1434 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1435 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1436 			CTRL, 0100,
1437 			(htt_tlv_filter->mo_ctrl_filter &
1438 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1439 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1440 			CTRL, 0101,
1441 			(htt_tlv_filter->mo_ctrl_filter &
1442 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1443 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1444 			CTRL, 0110,
1445 			(htt_tlv_filter->mo_ctrl_filter &
1446 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1447 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1448 			CTRL, 0111,
1449 			(htt_tlv_filter->mo_ctrl_filter &
1450 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1451 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1452 			CTRL, 1000,
1453 			(htt_tlv_filter->mo_ctrl_filter &
1454 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1455 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1456 			CTRL, 1001,
1457 			(htt_tlv_filter->mo_ctrl_filter &
1458 			FILTER_CTRL_BA) ? 1 : 0);
1459 	}
1460 
1461 	/* word 5 */
1462 	msg_word++;
1463 	*msg_word = 0;
1464 	if (htt_tlv_filter->enable_fp) {
1465 		/* TYPE: CTRL */
1466 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1467 			CTRL, 1010,
1468 			(htt_tlv_filter->fp_ctrl_filter &
1469 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1470 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1471 			CTRL, 1011,
1472 			(htt_tlv_filter->fp_ctrl_filter &
1473 			FILTER_CTRL_RTS) ? 1 : 0);
1474 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1475 			CTRL, 1100,
1476 			(htt_tlv_filter->fp_ctrl_filter &
1477 			FILTER_CTRL_CTS) ? 1 : 0);
1478 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1479 			CTRL, 1101,
1480 			(htt_tlv_filter->fp_ctrl_filter &
1481 			FILTER_CTRL_ACK) ? 1 : 0);
1482 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1483 			CTRL, 1110,
1484 			(htt_tlv_filter->fp_ctrl_filter &
1485 			FILTER_CTRL_CFEND) ? 1 : 0);
1486 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1487 			CTRL, 1111,
1488 			(htt_tlv_filter->fp_ctrl_filter &
1489 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1490 		/* TYPE: DATA */
1491 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1492 			DATA, MCAST,
1493 			(htt_tlv_filter->fp_data_filter &
1494 			FILTER_DATA_MCAST) ? 1 : 0);
1495 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1496 			DATA, UCAST,
1497 			(htt_tlv_filter->fp_data_filter &
1498 			FILTER_DATA_UCAST) ? 1 : 0);
1499 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1500 			DATA, NULL,
1501 			(htt_tlv_filter->fp_data_filter &
1502 			FILTER_DATA_NULL) ? 1 : 0);
1503 	}
1504 
1505 	if (htt_tlv_filter->enable_md) {
1506 		/* TYPE: CTRL */
1507 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1508 			CTRL, 1010,
1509 			(htt_tlv_filter->md_ctrl_filter &
1510 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1511 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1512 			CTRL, 1011,
1513 			(htt_tlv_filter->md_ctrl_filter &
1514 			FILTER_CTRL_RTS) ? 1 : 0);
1515 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1516 			CTRL, 1100,
1517 			(htt_tlv_filter->md_ctrl_filter &
1518 			FILTER_CTRL_CTS) ? 1 : 0);
1519 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1520 			CTRL, 1101,
1521 			(htt_tlv_filter->md_ctrl_filter &
1522 			FILTER_CTRL_ACK) ? 1 : 0);
1523 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1524 			CTRL, 1110,
1525 			(htt_tlv_filter->md_ctrl_filter &
1526 			FILTER_CTRL_CFEND) ? 1 : 0);
1527 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1528 			CTRL, 1111,
1529 			(htt_tlv_filter->md_ctrl_filter &
1530 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1531 		/* TYPE: DATA */
1532 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1533 			DATA, MCAST,
1534 			(htt_tlv_filter->md_data_filter &
1535 			FILTER_DATA_MCAST) ? 1 : 0);
1536 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1537 			DATA, UCAST,
1538 			(htt_tlv_filter->md_data_filter &
1539 			FILTER_DATA_UCAST) ? 1 : 0);
1540 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1541 			DATA, NULL,
1542 			(htt_tlv_filter->md_data_filter &
1543 			FILTER_DATA_NULL) ? 1 : 0);
1544 	}
1545 
1546 	if (htt_tlv_filter->enable_mo) {
1547 		/* TYPE: CTRL */
1548 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1549 			CTRL, 1010,
1550 			(htt_tlv_filter->mo_ctrl_filter &
1551 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1552 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1553 			CTRL, 1011,
1554 			(htt_tlv_filter->mo_ctrl_filter &
1555 			FILTER_CTRL_RTS) ? 1 : 0);
1556 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1557 			CTRL, 1100,
1558 			(htt_tlv_filter->mo_ctrl_filter &
1559 			FILTER_CTRL_CTS) ? 1 : 0);
1560 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1561 			CTRL, 1101,
1562 			(htt_tlv_filter->mo_ctrl_filter &
1563 			FILTER_CTRL_ACK) ? 1 : 0);
1564 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1565 			CTRL, 1110,
1566 			(htt_tlv_filter->mo_ctrl_filter &
1567 			FILTER_CTRL_CFEND) ? 1 : 0);
1568 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1569 			CTRL, 1111,
1570 			(htt_tlv_filter->mo_ctrl_filter &
1571 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1572 		/* TYPE: DATA */
1573 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1574 			DATA, MCAST,
1575 			(htt_tlv_filter->mo_data_filter &
1576 			FILTER_DATA_MCAST) ? 1 : 0);
1577 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1578 			DATA, UCAST,
1579 			(htt_tlv_filter->mo_data_filter &
1580 			FILTER_DATA_UCAST) ? 1 : 0);
1581 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1582 			DATA, NULL,
1583 			(htt_tlv_filter->mo_data_filter &
1584 			FILTER_DATA_NULL) ? 1 : 0);
1585 	}
1586 
1587 	/* word 6 */
1588 	msg_word++;
1589 	*msg_word = 0;
1590 	tlv_filter = 0;
1591 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START,
1592 		htt_tlv_filter->mpdu_start);
1593 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START,
1594 		htt_tlv_filter->msdu_start);
1595 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET,
1596 		htt_tlv_filter->packet);
1597 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END,
1598 		htt_tlv_filter->msdu_end);
1599 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END,
1600 		htt_tlv_filter->mpdu_end);
1601 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER,
1602 		htt_tlv_filter->packet_header);
1603 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION,
1604 		htt_tlv_filter->attention);
1605 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START,
1606 		htt_tlv_filter->ppdu_start);
1607 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END,
1608 		htt_tlv_filter->ppdu_end);
1609 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS,
1610 		htt_tlv_filter->ppdu_end_user_stats);
1611 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter,
1612 		PPDU_END_USER_STATS_EXT,
1613 		htt_tlv_filter->ppdu_end_user_stats_ext);
1614 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE,
1615 		htt_tlv_filter->ppdu_end_status_done);
1616 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START_USER_INFO,
1617 		htt_tlv_filter->ppdu_start_user_info);
1618 	/* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/
1619 	 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED,
1620 		 htt_tlv_filter->header_per_msdu);
1621 
1622 	HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter);
1623 
1624 	msg_word_data = (uint32_t *)qdf_nbuf_data(htt_msg);
1625 	dp_info("config_data: [0x%x][0x%x][0x%x][0x%x][0x%x][0x%x][0x%x]",
1626 		msg_word_data[0], msg_word_data[1], msg_word_data[2],
1627 		msg_word_data[3], msg_word_data[4], msg_word_data[5],
1628 		msg_word_data[6]);
1629 
1630 	/* word 7 */
1631 	msg_word++;
1632 	*msg_word = 0;
1633 	if (htt_tlv_filter->offset_valid) {
1634 		HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET_SET(*msg_word,
1635 					htt_tlv_filter->rx_packet_offset);
1636 		HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET_SET(*msg_word,
1637 					htt_tlv_filter->rx_header_offset);
1638 
1639 		/* word 8 */
1640 		msg_word++;
1641 		*msg_word = 0;
1642 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET_SET(*msg_word,
1643 					htt_tlv_filter->rx_mpdu_end_offset);
1644 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET_SET(*msg_word,
1645 					htt_tlv_filter->rx_mpdu_start_offset);
1646 
1647 		/* word 9 */
1648 		msg_word++;
1649 		*msg_word = 0;
1650 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET_SET(*msg_word,
1651 					htt_tlv_filter->rx_msdu_end_offset);
1652 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET_SET(*msg_word,
1653 					htt_tlv_filter->rx_msdu_start_offset);
1654 
1655 		/* word 10 */
1656 		msg_word++;
1657 		*msg_word = 0;
1658 		HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET_SET(*msg_word,
1659 					htt_tlv_filter->rx_attn_offset);
1660 
1661 		/* word 11 */
1662 		msg_word++;
1663 		*msg_word = 0;
1664 	} else {
1665 		/* word 11 */
1666 		msg_word += 4;
1667 		*msg_word = 0;
1668 	}
1669 
1670 	soc->dp_soc->arch_ops.dp_rx_word_mask_subscribe(
1671 						soc->dp_soc,
1672 						msg_word,
1673 						(void *)htt_tlv_filter);
1674 
1675 	if (mon_drop_th > 0)
1676 		HTT_RX_RING_SELECTION_CFG_RX_DROP_THRESHOLD_SET(*msg_word,
1677 								mon_drop_th);
1678 	dp_mon_rx_enable_mpdu_logging(soc->dp_soc, msg_word, htt_tlv_filter);
1679 
1680 	dp_mon_rx_enable_phy_errors(msg_word, htt_tlv_filter);
1681 
1682 	/* word 14*/
1683 	msg_word += 3;
1684 	/* word 15*/
1685 	msg_word++;
1686 
1687 #ifdef FW_SUPPORT_NOT_YET
1688 	/* word 17*/
1689 	msg_word += 3;
1690 	*msg_word = 0;
1691 
1692 	dp_mon_rx_enable_fpmo(soc->dp_soc, msg_word, htt_tlv_filter);
1693 #endif/* FW_SUPPORT_NOT_YET */
1694 
1695 	/* "response_required" field should be set if a HTT response message is
1696 	 * required after setting up the ring.
1697 	 */
1698 	pkt = htt_htc_pkt_alloc(soc);
1699 	if (!pkt) {
1700 		dp_err("pkt alloc failed, ring_type %d ring_id %d htt_ring_id %d",
1701 		       hal_ring_type, srng_params.ring_id, htt_ring_id);
1702 		goto fail1;
1703 	}
1704 
1705 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1706 
1707 	SET_HTC_PACKET_INFO_TX(
1708 		&pkt->htc_pkt,
1709 		dp_htt_h2t_send_complete_free_netbuf,
1710 		qdf_nbuf_data(htt_msg),
1711 		qdf_nbuf_len(htt_msg),
1712 		soc->htc_endpoint,
1713 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
1714 
1715 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1716 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
1717 				     HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG,
1718 				     htt_logger_bufp);
1719 
1720 	if (status != QDF_STATUS_SUCCESS) {
1721 		qdf_nbuf_free(htt_msg);
1722 		htt_htc_pkt_free(soc, pkt);
1723 	}
1724 
1725 	return status;
1726 
1727 fail1:
1728 	qdf_nbuf_free(htt_msg);
1729 fail0:
1730 	return QDF_STATUS_E_FAILURE;
1731 }
1732 
1733 qdf_export_symbol(htt_h2t_rx_ring_cfg);
1734 
1735 #if defined(HTT_STATS_ENABLE)
1736 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1737 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1738 
1739 {
1740 	uint32_t pdev_id;
1741 	uint32_t *msg_word = NULL;
1742 	uint32_t msg_remain_len = 0;
1743 
1744 	msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1745 
1746 	/*COOKIE MSB*/
1747 	pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1748 
1749 	/* stats message length + 16 size of HTT header*/
1750 	msg_remain_len = qdf_min(htt_stats->msg_len + 16,
1751 				(uint32_t)DP_EXT_MSG_LENGTH);
1752 
1753 	dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc,
1754 			msg_word,  msg_remain_len,
1755 			WDI_NO_VAL, pdev_id);
1756 
1757 	if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1758 		htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1759 	}
1760 	/* Need to be freed here as WDI handler will
1761 	 * make a copy of pkt to send data to application
1762 	 */
1763 	qdf_nbuf_free(htt_msg);
1764 	return QDF_STATUS_SUCCESS;
1765 }
1766 #else
1767 static inline QDF_STATUS
1768 dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1769 		      struct dp_soc *soc, qdf_nbuf_t htt_msg)
1770 {
1771 	return QDF_STATUS_E_NOSUPPORT;
1772 }
1773 #endif
1774 
1775 #ifdef HTT_STATS_DEBUGFS_SUPPORT
1776 /* dp_send_htt_stats_dbgfs_msg() - Function to send htt data to upper layer.
1777  * @pdev: dp pdev handle
1778  * @msg_word: HTT msg
1779  * @msg_len: Length of HTT msg sent
1780  *
1781  * Return: none
1782  */
1783 static inline void
1784 dp_htt_stats_dbgfs_send_msg(struct dp_pdev *pdev, uint32_t *msg_word,
1785 			    uint32_t msg_len)
1786 {
1787 	struct htt_dbgfs_cfg dbgfs_cfg;
1788 	int done = 0;
1789 
1790 	/* send 5th word of HTT msg to upper layer */
1791 	dbgfs_cfg.msg_word = (msg_word + 4);
1792 	dbgfs_cfg.m = pdev->dbgfs_cfg->m;
1793 
1794 	/* stats message length + 16 size of HTT header*/
1795 	msg_len = qdf_min(msg_len + HTT_HEADER_LEN, (uint32_t)DP_EXT_MSG_LENGTH);
1796 
1797 	if (pdev->dbgfs_cfg->htt_stats_dbgfs_msg_process)
1798 		pdev->dbgfs_cfg->htt_stats_dbgfs_msg_process(&dbgfs_cfg,
1799 							     (msg_len - HTT_HEADER_LEN));
1800 
1801 	/* Get TLV Done bit from 4th msg word */
1802 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*(msg_word + 3));
1803 	if (done) {
1804 		if (qdf_event_set(&pdev->dbgfs_cfg->htt_stats_dbgfs_event))
1805 			dp_htt_err("%pK: Failed to set event for debugfs htt stats"
1806 				   , pdev->soc);
1807 	}
1808 }
1809 #else
1810 static inline void
1811 dp_htt_stats_dbgfs_send_msg(struct dp_pdev *pdev, uint32_t *msg_word,
1812 			    uint32_t msg_len)
1813 {
1814 }
1815 #endif /* HTT_STATS_DEBUGFS_SUPPORT */
1816 
1817 #ifdef WLAN_SYSFS_DP_STATS
1818 /* dp_htt_stats_sysfs_update_config() - Function to send htt data to upper layer.
1819  * @pdev: dp pdev handle
1820  *
1821  * This function sets the process id and printing mode within the sysfs config
1822  * struct. which enables DP_PRINT statements within this process to write to the
1823  * console buffer provided by the user space.
1824  *
1825  * Return: None
1826  */
1827 static inline void
1828 dp_htt_stats_sysfs_update_config(struct dp_pdev *pdev)
1829 {
1830 	struct dp_soc *soc = pdev->soc;
1831 
1832 	if (!soc) {
1833 		dp_htt_err("soc is null");
1834 		return;
1835 	}
1836 
1837 	if (!soc->sysfs_config) {
1838 		dp_htt_err("soc->sysfs_config is NULL");
1839 		return;
1840 	}
1841 
1842 	/* set sysfs config parameters */
1843 	soc->sysfs_config->process_id = qdf_get_current_pid();
1844 	soc->sysfs_config->printing_mode = PRINTING_MODE_ENABLED;
1845 }
1846 
1847 /*
1848  * dp_htt_stats_sysfs_set_event() - Set sysfs stats event.
1849  * @soc: soc handle.
1850  * @msg_word: Pointer to htt msg word.
1851  *
1852  * @return: void
1853  */
1854 static inline void
1855 dp_htt_stats_sysfs_set_event(struct dp_soc *soc, uint32_t *msg_word)
1856 {
1857 	int done = 0;
1858 
1859 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*(msg_word + 3));
1860 	if (done) {
1861 		if (qdf_event_set(&soc->sysfs_config->sysfs_txrx_fw_request_done))
1862 			dp_htt_err("%pK:event compl Fail to set event ",
1863 				   soc);
1864 	}
1865 }
1866 #else /* WLAN_SYSFS_DP_STATS */
1867 static inline void
1868 dp_htt_stats_sysfs_update_config(struct dp_pdev *pdev)
1869 {
1870 }
1871 
1872 static inline void
1873 dp_htt_stats_sysfs_set_event(struct dp_soc *dp_soc, uint32_t *msg_word)
1874 {
1875 }
1876 #endif /* WLAN_SYSFS_DP_STATS */
1877 
1878 /* dp_htt_set_pdev_obss_stats() - Function to set pdev obss stats.
1879  * @pdev: dp pdev handle
1880  * @tag_type: HTT TLV tag type
1881  * @tag_buf: TLV buffer pointer
1882  *
1883  * Return: None
1884  */
1885 static inline void
1886 dp_htt_set_pdev_obss_stats(struct dp_pdev *pdev, uint32_t tag_type,
1887 			   uint32_t *tag_buf)
1888 {
1889 	if (tag_type != HTT_STATS_PDEV_OBSS_PD_TAG) {
1890 		dp_err("Tag mismatch");
1891 		return;
1892 	}
1893 	qdf_mem_copy(&pdev->stats.htt_tx_pdev_stats.obss_pd_stats_tlv,
1894 		     tag_buf, sizeof(struct cdp_pdev_obss_pd_stats_tlv));
1895 	qdf_event_set(&pdev->fw_obss_stats_event);
1896 }
1897 
1898 /**
1899  * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats
1900  * @htt_stats: htt stats info
1901  *
1902  * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message
1903  * contains sub messages which are identified by a TLV header.
1904  * In this function we will process the stream of T2H messages and read all the
1905  * TLV contained in the message.
1906  *
1907  * THe following cases have been taken care of
1908  * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer
1909  *		In this case the buffer will contain multiple tlvs.
1910  * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer.
1911  *		Only one tlv will be contained in the HTT message and this tag
1912  *		will extend onto the next buffer.
1913  * Case 3: When the buffer is the continuation of the previous message
1914  * Case 4: tlv length is 0. which will indicate the end of message
1915  *
1916  * return: void
1917  */
1918 static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats,
1919 					struct dp_soc *soc)
1920 {
1921 	htt_tlv_tag_t tlv_type = 0xff;
1922 	qdf_nbuf_t htt_msg = NULL;
1923 	uint32_t *msg_word;
1924 	uint8_t *tlv_buf_head = NULL;
1925 	uint8_t *tlv_buf_tail = NULL;
1926 	uint32_t msg_remain_len = 0;
1927 	uint32_t tlv_remain_len = 0;
1928 	uint32_t *tlv_start;
1929 	int cookie_val = 0;
1930 	int cookie_msb = 0;
1931 	int pdev_id;
1932 	bool copy_stats = false;
1933 	struct dp_pdev *pdev;
1934 
1935 	/* Process node in the HTT message queue */
1936 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1937 		!= NULL) {
1938 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1939 		cookie_val = *(msg_word + 1);
1940 		htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET(
1941 					*(msg_word +
1942 					HTT_T2H_EXT_STATS_TLV_START_OFFSET));
1943 
1944 		if (cookie_val) {
1945 			if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg)
1946 					== QDF_STATUS_SUCCESS) {
1947 				continue;
1948 			}
1949 		}
1950 
1951 		cookie_msb = *(msg_word + 2);
1952 		pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1953 		pdev = soc->pdev_list[pdev_id];
1954 
1955 		if (!cookie_val && (cookie_msb & DBG_STATS_COOKIE_HTT_DBGFS)) {
1956 			dp_htt_stats_dbgfs_send_msg(pdev, msg_word,
1957 						    htt_stats->msg_len);
1958 			qdf_nbuf_free(htt_msg);
1959 			continue;
1960 		}
1961 
1962 		if (!cookie_val && (cookie_msb & DBG_SYSFS_STATS_COOKIE))
1963 			dp_htt_stats_sysfs_update_config(pdev);
1964 
1965 		if (cookie_msb & DBG_STATS_COOKIE_DP_STATS)
1966 			copy_stats = true;
1967 
1968 		/* read 5th word */
1969 		msg_word = msg_word + 4;
1970 		msg_remain_len = qdf_min(htt_stats->msg_len,
1971 				(uint32_t) DP_EXT_MSG_LENGTH);
1972 		/* Keep processing the node till node length is 0 */
1973 		while (msg_remain_len) {
1974 			/*
1975 			 * if message is not a continuation of previous message
1976 			 * read the tlv type and tlv length
1977 			 */
1978 			if (!tlv_buf_head) {
1979 				tlv_type = HTT_STATS_TLV_TAG_GET(
1980 						*msg_word);
1981 				tlv_remain_len = HTT_STATS_TLV_LENGTH_GET(
1982 						*msg_word);
1983 			}
1984 
1985 			if (tlv_remain_len == 0) {
1986 				msg_remain_len = 0;
1987 
1988 				if (tlv_buf_head) {
1989 					qdf_mem_free(tlv_buf_head);
1990 					tlv_buf_head = NULL;
1991 					tlv_buf_tail = NULL;
1992 				}
1993 
1994 				goto error;
1995 			}
1996 
1997 			if (!tlv_buf_head)
1998 				tlv_remain_len += HTT_TLV_HDR_LEN;
1999 
2000 			if ((tlv_remain_len <= msg_remain_len)) {
2001 				/* Case 3 */
2002 				if (tlv_buf_head) {
2003 					qdf_mem_copy(tlv_buf_tail,
2004 							(uint8_t *)msg_word,
2005 							tlv_remain_len);
2006 					tlv_start = (uint32_t *)tlv_buf_head;
2007 				} else {
2008 					/* Case 1 */
2009 					tlv_start = msg_word;
2010 				}
2011 
2012 				if (copy_stats)
2013 					dp_htt_stats_copy_tag(pdev,
2014 							      tlv_type,
2015 							      tlv_start);
2016 				else
2017 					dp_htt_stats_print_tag(pdev,
2018 							       tlv_type,
2019 							       tlv_start);
2020 
2021 				if (tlv_type == HTT_STATS_PEER_DETAILS_TAG ||
2022 				    tlv_type == HTT_STATS_PEER_STATS_CMN_TAG)
2023 					dp_peer_update_inactive_time(pdev,
2024 								     tlv_type,
2025 								     tlv_start);
2026 
2027 				if (cookie_msb & DBG_STATS_COOKIE_HTT_OBSS)
2028 					dp_htt_set_pdev_obss_stats(pdev,
2029 								   tlv_type,
2030 								   tlv_start);
2031 
2032 				msg_remain_len -= tlv_remain_len;
2033 
2034 				msg_word = (uint32_t *)
2035 					(((uint8_t *)msg_word) +
2036 					tlv_remain_len);
2037 
2038 				tlv_remain_len = 0;
2039 
2040 				if (tlv_buf_head) {
2041 					qdf_mem_free(tlv_buf_head);
2042 					tlv_buf_head = NULL;
2043 					tlv_buf_tail = NULL;
2044 				}
2045 
2046 			} else { /* tlv_remain_len > msg_remain_len */
2047 				/* Case 2 & 3 */
2048 				if (!tlv_buf_head) {
2049 					tlv_buf_head = qdf_mem_malloc(
2050 							tlv_remain_len);
2051 
2052 					if (!tlv_buf_head) {
2053 						QDF_TRACE(QDF_MODULE_ID_TXRX,
2054 								QDF_TRACE_LEVEL_ERROR,
2055 								"Alloc failed");
2056 						goto error;
2057 					}
2058 
2059 					tlv_buf_tail = tlv_buf_head;
2060 				}
2061 
2062 				qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word,
2063 						msg_remain_len);
2064 				tlv_remain_len -= msg_remain_len;
2065 				tlv_buf_tail += msg_remain_len;
2066 			}
2067 		}
2068 
2069 		if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
2070 			htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
2071 		}
2072 
2073 		/* indicate event completion in case the event is done */
2074 		if (!cookie_val && (cookie_msb & DBG_SYSFS_STATS_COOKIE))
2075 			dp_htt_stats_sysfs_set_event(soc, msg_word);
2076 
2077 		qdf_nbuf_free(htt_msg);
2078 	}
2079 	return;
2080 
2081 error:
2082 	qdf_nbuf_free(htt_msg);
2083 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
2084 			!= NULL)
2085 		qdf_nbuf_free(htt_msg);
2086 }
2087 
2088 void htt_t2h_stats_handler(void *context)
2089 {
2090 	struct dp_soc *soc = (struct dp_soc *)context;
2091 	struct htt_stats_context htt_stats;
2092 	uint32_t *msg_word;
2093 	qdf_nbuf_t htt_msg = NULL;
2094 	uint8_t done;
2095 	uint32_t rem_stats;
2096 
2097 	if (!soc) {
2098 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2099 			  "soc is NULL");
2100 		return;
2101 	}
2102 
2103 	if (!qdf_atomic_read(&soc->cmn_init_done)) {
2104 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2105 			  "soc: 0x%pK, init_done: %d", soc,
2106 			  qdf_atomic_read(&soc->cmn_init_done));
2107 		return;
2108 	}
2109 
2110 	qdf_mem_zero(&htt_stats, sizeof(htt_stats));
2111 	qdf_nbuf_queue_init(&htt_stats.msg);
2112 
2113 	/* pull one completed stats from soc->htt_stats_msg and process */
2114 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2115 	if (!soc->htt_stats.num_stats) {
2116 		qdf_spin_unlock_bh(&soc->htt_stats.lock);
2117 		return;
2118 	}
2119 	while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) {
2120 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
2121 		msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET;
2122 		done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2123 		qdf_nbuf_queue_add(&htt_stats.msg, htt_msg);
2124 		/*
2125 		 * Done bit signifies that this is the last T2H buffer in the
2126 		 * stream of HTT EXT STATS message
2127 		 */
2128 		if (done)
2129 			break;
2130 	}
2131 	rem_stats = --soc->htt_stats.num_stats;
2132 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2133 
2134 	/* If there are more stats to process, schedule stats work again.
2135 	 * Scheduling prior to processing ht_stats to queue with early
2136 	 * index
2137 	 */
2138 	if (rem_stats)
2139 		qdf_sched_work(0, &soc->htt_stats.work);
2140 
2141 	dp_process_htt_stat_msg(&htt_stats, soc);
2142 }
2143 
2144 /**
2145  * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats
2146  * @soc: DP SOC handle
2147  * @htt_t2h_msg: HTT message nbuf
2148  *
2149  * return:void
2150  */
2151 static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc,
2152 					    qdf_nbuf_t htt_t2h_msg)
2153 {
2154 	uint8_t done;
2155 	qdf_nbuf_t msg_copy;
2156 	uint32_t *msg_word;
2157 
2158 	msg_word = (uint32_t *)qdf_nbuf_data(htt_t2h_msg);
2159 	msg_word = msg_word + 3;
2160 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2161 
2162 	/*
2163 	 * HTT EXT stats response comes as stream of TLVs which span over
2164 	 * multiple T2H messages.
2165 	 * The first message will carry length of the response.
2166 	 * For rest of the messages length will be zero.
2167 	 *
2168 	 * Clone the T2H message buffer and store it in a list to process
2169 	 * it later.
2170 	 *
2171 	 * The original T2H message buffers gets freed in the T2H HTT event
2172 	 * handler
2173 	 */
2174 	msg_copy = qdf_nbuf_clone(htt_t2h_msg);
2175 
2176 	if (!msg_copy) {
2177 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2178 			  "T2H message clone failed for HTT EXT STATS");
2179 		goto error;
2180 	}
2181 
2182 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2183 	qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy);
2184 	/*
2185 	 * Done bit signifies that this is the last T2H buffer in the stream of
2186 	 * HTT EXT STATS message
2187 	 */
2188 	if (done) {
2189 		soc->htt_stats.num_stats++;
2190 		qdf_sched_work(0, &soc->htt_stats.work);
2191 	}
2192 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2193 
2194 	return;
2195 
2196 error:
2197 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2198 	while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg))
2199 			!= NULL) {
2200 		qdf_nbuf_free(msg_copy);
2201 	}
2202 	soc->htt_stats.num_stats = 0;
2203 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2204 	return;
2205 }
2206 
2207 /*
2208  * htt_soc_attach_target() - SOC level HTT setup
2209  * @htt_soc:	HTT SOC handle
2210  *
2211  * Return: 0 on success; error code on failure
2212  */
2213 int htt_soc_attach_target(struct htt_soc *htt_soc)
2214 {
2215 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
2216 
2217 	return htt_h2t_ver_req_msg(soc);
2218 }
2219 
2220 void htt_set_htc_handle(struct htt_soc *htt_soc, HTC_HANDLE htc_soc)
2221 {
2222 	htt_soc->htc_soc = htc_soc;
2223 }
2224 
2225 HTC_HANDLE htt_get_htc_handle(struct htt_soc *htt_soc)
2226 {
2227 	return htt_soc->htc_soc;
2228 }
2229 
2230 struct htt_soc *htt_soc_attach(struct dp_soc *soc, HTC_HANDLE htc_handle)
2231 {
2232 	int i;
2233 	int j;
2234 	int umac_alloc_size = HTT_SW_UMAC_RING_IDX_MAX *
2235 			      sizeof(struct bp_handler);
2236 	int lmac_alloc_size = HTT_SW_LMAC_RING_IDX_MAX *
2237 			      sizeof(struct bp_handler);
2238 	struct htt_soc *htt_soc = NULL;
2239 
2240 	htt_soc = qdf_mem_malloc(sizeof(*htt_soc));
2241 	if (!htt_soc) {
2242 		dp_err("HTT attach failed");
2243 		return NULL;
2244 	}
2245 
2246 	for (i = 0; i < MAX_PDEV_CNT; i++) {
2247 		htt_soc->pdevid_tt[i].umac_path =
2248 			qdf_mem_malloc(umac_alloc_size);
2249 		if (!htt_soc->pdevid_tt[i].umac_path)
2250 			break;
2251 		for (j = 0; j < HTT_SW_UMAC_RING_IDX_MAX; j++)
2252 			htt_soc->pdevid_tt[i].umac_path[j].bp_start_tt = -1;
2253 		htt_soc->pdevid_tt[i].lmac_path =
2254 			qdf_mem_malloc(lmac_alloc_size);
2255 		if (!htt_soc->pdevid_tt[i].lmac_path) {
2256 			qdf_mem_free(htt_soc->pdevid_tt[i].umac_path);
2257 			break;
2258 		}
2259 		for (j = 0; j < HTT_SW_LMAC_RING_IDX_MAX ; j++)
2260 			htt_soc->pdevid_tt[i].lmac_path[j].bp_start_tt = -1;
2261 	}
2262 
2263 	if (i != MAX_PDEV_CNT) {
2264 		for (j = 0; j < i; j++) {
2265 			qdf_mem_free(htt_soc->pdevid_tt[j].umac_path);
2266 			qdf_mem_free(htt_soc->pdevid_tt[j].lmac_path);
2267 		}
2268 		qdf_mem_free(htt_soc);
2269 		return NULL;
2270 	}
2271 
2272 	htt_soc->dp_soc = soc;
2273 	htt_soc->htc_soc = htc_handle;
2274 	HTT_TX_MUTEX_INIT(&htt_soc->htt_tx_mutex);
2275 
2276 	return htt_soc;
2277 }
2278 
2279 #if defined(WDI_EVENT_ENABLE) && \
2280 	!defined(REMOVE_PKT_LOG)
2281 /*
2282  * dp_pktlog_msg_handler() - Pktlog msg handler
2283  * @htt_soc:	 HTT SOC handle
2284  * @msg_word:    Pointer to payload
2285  *
2286  * Return: None
2287  */
2288 static void
2289 dp_pktlog_msg_handler(struct htt_soc *soc,
2290 		      uint32_t *msg_word)
2291 {
2292 	uint8_t pdev_id;
2293 	uint8_t target_pdev_id;
2294 	uint32_t *pl_hdr;
2295 
2296 	target_pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word);
2297 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
2298 							 target_pdev_id);
2299 	pl_hdr = (msg_word + 1);
2300 	dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc,
2301 		pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL,
2302 		pdev_id);
2303 }
2304 #else
2305 static void
2306 dp_pktlog_msg_handler(struct htt_soc *soc,
2307 		      uint32_t *msg_word)
2308 {
2309 }
2310 #endif
2311 
2312 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
2313 /*
2314  * dp_vdev_txrx_hw_stats_handler - Handle vdev stats received from FW
2315  * @soc - htt soc handle
2316  * @ msg_word - buffer containing stats
2317  *
2318  * Return: void
2319  */
2320 static void dp_vdev_txrx_hw_stats_handler(struct htt_soc *soc,
2321 					  uint32_t *msg_word)
2322 {
2323 	struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
2324 	uint8_t pdev_id;
2325 	uint8_t vdev_id;
2326 	uint8_t target_pdev_id;
2327 	uint16_t payload_size;
2328 	struct dp_pdev *pdev;
2329 	struct dp_vdev *vdev;
2330 	uint8_t *tlv_buf;
2331 	uint32_t *tlv_buf_temp;
2332 	uint32_t *tag_buf;
2333 	htt_tlv_tag_t tlv_type;
2334 	uint16_t tlv_length;
2335 	uint64_t pkt_count = 0;
2336 	uint64_t byte_count = 0;
2337 	uint64_t soc_drop_cnt = 0;
2338 	struct cdp_pkt_info tx_comp = { 0 };
2339 	struct cdp_pkt_info tx_failed =  { 0 };
2340 
2341 	target_pdev_id =
2342 		HTT_T2H_VDEVS_TXRX_STATS_PERIODIC_IND_PDEV_ID_GET(*msg_word);
2343 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(dpsoc,
2344 							 target_pdev_id);
2345 
2346 	if (pdev_id >= MAX_PDEV_CNT)
2347 		return;
2348 
2349 	pdev = dpsoc->pdev_list[pdev_id];
2350 	if (!pdev) {
2351 		dp_err("PDEV is NULL for pdev_id:%d", pdev_id);
2352 		return;
2353 	}
2354 
2355 	payload_size =
2356 	HTT_T2H_VDEVS_TXRX_STATS_PERIODIC_IND_PAYLOAD_SIZE_GET(*msg_word);
2357 
2358 	qdf_trace_hex_dump(QDF_MODULE_ID_DP_HTT, QDF_TRACE_LEVEL_INFO,
2359 			   (void *)msg_word, payload_size + 16);
2360 
2361 	/* Adjust msg_word to point to the first TLV in buffer */
2362 	msg_word = msg_word + 4;
2363 
2364 	/* Parse the received buffer till payload size reaches 0 */
2365 	while (payload_size > 0) {
2366 		tlv_buf = (uint8_t *)msg_word;
2367 		tlv_buf_temp = msg_word;
2368 		tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
2369 		tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
2370 
2371 		/* Add header size to tlv length*/
2372 		tlv_length += 4;
2373 
2374 		switch (tlv_type) {
2375 		case HTT_STATS_SOC_TXRX_STATS_COMMON_TAG:
2376 		{
2377 			tag_buf = tlv_buf_temp +
2378 					HTT_VDEV_STATS_GET_INDEX(SOC_DROP_CNT);
2379 			soc_drop_cnt = HTT_VDEV_GET_STATS_U64(tag_buf);
2380 			DP_STATS_UPD(dpsoc, tx.tqm_drop_no_peer, soc_drop_cnt);
2381 			break;
2382 		}
2383 		case HTT_STATS_VDEV_TXRX_STATS_HW_STATS_TAG:
2384 		{
2385 			tag_buf = tlv_buf_temp +
2386 					HTT_VDEV_STATS_GET_INDEX(VDEV_ID);
2387 			vdev_id = (uint8_t)(*tag_buf);
2388 			vdev = dp_vdev_get_ref_by_id(dpsoc, vdev_id,
2389 						     DP_MOD_ID_HTT);
2390 
2391 			if (!vdev)
2392 				goto invalid_vdev;
2393 
2394 			/* Extract received packet count from buffer */
2395 			tag_buf = tlv_buf_temp +
2396 					HTT_VDEV_STATS_GET_INDEX(RX_PKT_CNT);
2397 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2398 			DP_STATS_UPD(vdev, rx_i.reo_rcvd_pkt.num, pkt_count);
2399 
2400 			/* Extract received packet byte count from buffer */
2401 			tag_buf = tlv_buf_temp +
2402 					HTT_VDEV_STATS_GET_INDEX(RX_BYTE_CNT);
2403 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2404 			DP_STATS_UPD(vdev, rx_i.reo_rcvd_pkt.bytes, byte_count);
2405 
2406 			/* Extract tx success packet count from buffer */
2407 			tag_buf = tlv_buf_temp +
2408 				HTT_VDEV_STATS_GET_INDEX(TX_SUCCESS_PKT_CNT);
2409 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2410 			tx_comp.num = pkt_count;
2411 
2412 			/* Extract tx success packet byte count from buffer */
2413 			tag_buf = tlv_buf_temp +
2414 				HTT_VDEV_STATS_GET_INDEX(TX_SUCCESS_BYTE_CNT);
2415 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2416 			tx_comp.bytes = byte_count;
2417 
2418 			/* Extract tx retry packet count from buffer */
2419 			tag_buf = tlv_buf_temp +
2420 				HTT_VDEV_STATS_GET_INDEX(TX_RETRY_PKT_CNT);
2421 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2422 			tx_comp.num += pkt_count;
2423 			tx_failed.num = pkt_count;
2424 
2425 			/* Extract tx retry packet byte count from buffer */
2426 			tag_buf = tlv_buf_temp +
2427 				HTT_VDEV_STATS_GET_INDEX(TX_RETRY_BYTE_CNT);
2428 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2429 			tx_comp.bytes += byte_count;
2430 			tx_failed.bytes = byte_count;
2431 
2432 			/* Extract tx drop packet count from buffer */
2433 			tag_buf = tlv_buf_temp +
2434 				HTT_VDEV_STATS_GET_INDEX(TX_DROP_PKT_CNT);
2435 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2436 			tx_comp.num += pkt_count;
2437 			tx_failed.num += pkt_count;
2438 
2439 			/* Extract tx drop packet byte count from buffer */
2440 			tag_buf = tlv_buf_temp +
2441 				HTT_VDEV_STATS_GET_INDEX(TX_DROP_BYTE_CNT);
2442 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2443 			tx_comp.bytes += byte_count;
2444 			tx_failed.bytes += byte_count;
2445 
2446 			/* Extract tx age-out packet count from buffer */
2447 			tag_buf = tlv_buf_temp +
2448 				HTT_VDEV_STATS_GET_INDEX(TX_AGE_OUT_PKT_CNT);
2449 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2450 			tx_comp.num += pkt_count;
2451 			tx_failed.num += pkt_count;
2452 
2453 			/* Extract tx age-out packet byte count from buffer */
2454 			tag_buf = tlv_buf_temp +
2455 				HTT_VDEV_STATS_GET_INDEX(TX_AGE_OUT_BYTE_CNT);
2456 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2457 			tx_comp.bytes += byte_count;
2458 			tx_failed.bytes += byte_count;
2459 
2460 			/* Extract tqm bypass packet count from buffer */
2461 			tag_buf = tlv_buf_temp +
2462 				HTT_VDEV_STATS_GET_INDEX(TX_TQM_BYPASS_PKT_CNT);
2463 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2464 			tx_comp.num += pkt_count;
2465 
2466 			/* Extract tx bypass packet byte count from buffer */
2467 			tag_buf = tlv_buf_temp +
2468 				HTT_VDEV_STATS_GET_INDEX(TX_TQM_BYPASS_BYTE_CNT);
2469 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2470 			tx_comp.bytes += byte_count;
2471 
2472 			DP_STATS_UPD(vdev, tx.comp_pkt.num, tx_comp.num);
2473 			DP_STATS_UPD(vdev, tx.comp_pkt.bytes, tx_comp.bytes);
2474 
2475 			DP_STATS_UPD(vdev, tx.tx_failed, tx_failed.num);
2476 
2477 			dp_vdev_unref_delete(dpsoc, vdev, DP_MOD_ID_HTT);
2478 			break;
2479 		}
2480 		default:
2481 			qdf_assert(0);
2482 		}
2483 invalid_vdev:
2484 		msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
2485 		payload_size -= tlv_length;
2486 	}
2487 }
2488 #else
2489 static void dp_vdev_txrx_hw_stats_handler(struct htt_soc *soc,
2490 					  uint32_t *msg_word)
2491 {}
2492 #endif
2493 
2494 #ifdef CONFIG_SAWF_DEF_QUEUES
2495 static void dp_sawf_def_queues_update_map_report_conf(struct htt_soc *soc,
2496 						      uint32_t *msg_word,
2497 						      qdf_nbuf_t htt_t2h_msg)
2498 {
2499 	dp_htt_sawf_def_queues_map_report_conf(soc, msg_word, htt_t2h_msg);
2500 }
2501 #else
2502 static void dp_sawf_def_queues_update_map_report_conf(struct htt_soc *soc,
2503 						      uint32_t *msg_word,
2504 						      qdf_nbuf_t htt_t2h_msg)
2505 {}
2506 #endif
2507 
2508 #ifdef CONFIG_SAWF
2509 /*
2510  * dp_sawf_msduq_map() - Msdu queue creation information received
2511  * from target
2512  * @soc: soc handle.
2513  * @msg_word: Pointer to htt msg word.
2514  * @htt_t2h_msg: HTT message nbuf
2515  *
2516  * @return: void
2517  */
2518 static void dp_sawf_msduq_map(struct htt_soc *soc, uint32_t *msg_word,
2519 			      qdf_nbuf_t htt_t2h_msg)
2520 {
2521 	dp_htt_sawf_msduq_map(soc, msg_word, htt_t2h_msg);
2522 }
2523 
2524 /*
2525  * dp_sawf_mpdu_stats_handler() - HTT message handler for MPDU stats
2526  * @soc: soc handle.
2527  * @htt_t2h_msg: HTT message nbuf
2528  *
2529  * @return: void
2530  */
2531 static void dp_sawf_mpdu_stats_handler(struct htt_soc *soc,
2532 				       qdf_nbuf_t htt_t2h_msg)
2533 {
2534 	dp_sawf_htt_mpdu_stats_handler(soc, htt_t2h_msg);
2535 }
2536 #else
2537 static void dp_sawf_msduq_map(struct htt_soc *soc, uint32_t *msg_word,
2538 			      qdf_nbuf_t htt_t2h_msg)
2539 {}
2540 
2541 static void dp_sawf_mpdu_stats_handler(struct htt_soc *soc,
2542 				       qdf_nbuf_t htt_t2h_msg)
2543 {}
2544 #endif
2545 
2546 /*
2547  * time_allow_print() - time allow print
2548  * @htt_ring_tt:	ringi_id array of timestamps
2549  * @ring_id:		ring_id (index)
2550  *
2551  * Return: 1 for successfully saving timestamp in array
2552  *	and 0 for timestamp falling within 2 seconds after last one
2553  */
2554 static bool time_allow_print(struct bp_handler *htt_bp_handler,
2555 			     u_int8_t ring_id, u_int32_t th_time)
2556 {
2557 	unsigned long tstamp;
2558 	struct bp_handler *path = &htt_bp_handler[ring_id];
2559 
2560 	tstamp = qdf_get_system_timestamp();
2561 
2562 	if (!path)
2563 		return 0; //unable to print backpressure messages
2564 
2565 	if (path->bp_start_tt == -1) {
2566 		path->bp_start_tt = tstamp;
2567 		path->bp_duration = 0;
2568 		path->bp_last_tt = tstamp;
2569 		path->bp_counter = 1;
2570 		return 1;
2571 	}
2572 
2573 	path->bp_duration = tstamp - path->bp_start_tt;
2574 	path->bp_last_tt = tstamp;
2575 	path->bp_counter++;
2576 
2577 	if (path->bp_duration >= th_time) {
2578 		path->bp_start_tt = -1;
2579 		return 1;
2580 	}
2581 
2582 	return 0;
2583 }
2584 
2585 static void dp_htt_alert_print(enum htt_t2h_msg_type msg_type,
2586 			       struct dp_pdev *pdev, u_int8_t ring_id,
2587 			       u_int16_t hp_idx, u_int16_t tp_idx,
2588 			       u_int32_t bkp_time,
2589 			       struct bp_handler *htt_bp_handler,
2590 			       char *ring_stype)
2591 {
2592 	dp_alert("seq_num %u msg_type: %d pdev_id: %d ring_type: %s ",
2593 		 pdev->bkp_stats.seq_num, msg_type, pdev->pdev_id, ring_stype);
2594 	dp_alert("ring_id: %d hp_idx: %d tp_idx: %d bkpressure_time_ms: %d ",
2595 		 ring_id, hp_idx, tp_idx, bkp_time);
2596 	dp_alert("last_bp_event: %ld, total_bp_duration: %ld, bp_counter: %ld",
2597 		 htt_bp_handler[ring_id].bp_last_tt,
2598 		 htt_bp_handler[ring_id].bp_duration,
2599 		 htt_bp_handler[ring_id].bp_counter);
2600 }
2601 
2602 /**
2603  * dp_get_srng_ring_state_from_hal(): Get hal level ring stats
2604  * @soc: DP_SOC handle
2605  * @srng: DP_SRNG handle
2606  * @ring_type: srng src/dst ring
2607  *
2608  * Return: void
2609  */
2610 static QDF_STATUS
2611 dp_get_srng_ring_state_from_hal(struct dp_soc *soc,
2612 				struct dp_pdev *pdev,
2613 				struct dp_srng *srng,
2614 				enum hal_ring_type ring_type,
2615 				struct dp_srng_ring_state *state)
2616 {
2617 	struct hal_soc *hal_soc;
2618 
2619 	if (!soc || !srng || !srng->hal_srng || !state)
2620 		return QDF_STATUS_E_INVAL;
2621 
2622 	hal_soc = (struct hal_soc *)soc->hal_soc;
2623 
2624 	hal_get_sw_hptp(soc->hal_soc, srng->hal_srng, &state->sw_tail,
2625 			&state->sw_head);
2626 
2627 	hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &state->hw_head,
2628 			&state->hw_tail, ring_type);
2629 
2630 	state->ring_type = ring_type;
2631 
2632 	return QDF_STATUS_SUCCESS;
2633 }
2634 
2635 #ifdef QCA_MONITOR_PKT_SUPPORT
2636 static void
2637 dp_queue_mon_ring_stats(struct dp_pdev *pdev,
2638 			int lmac_id, uint32_t *num_srng,
2639 			struct dp_soc_srngs_state *soc_srngs_state)
2640 {
2641 	QDF_STATUS status;
2642 
2643 	if (pdev->soc->wlan_cfg_ctx->rxdma1_enable) {
2644 		status = dp_get_srng_ring_state_from_hal
2645 			(pdev->soc, pdev,
2646 			 &pdev->soc->rxdma_mon_buf_ring[lmac_id],
2647 			 RXDMA_MONITOR_BUF,
2648 			 &soc_srngs_state->ring_state[*num_srng]);
2649 
2650 		if (status == QDF_STATUS_SUCCESS)
2651 			qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS);
2652 
2653 		status = dp_get_srng_ring_state_from_hal
2654 			(pdev->soc, pdev,
2655 			 &pdev->soc->rxdma_mon_dst_ring[lmac_id],
2656 			 RXDMA_MONITOR_DST,
2657 			 &soc_srngs_state->ring_state[*num_srng]);
2658 
2659 		if (status == QDF_STATUS_SUCCESS)
2660 			qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS);
2661 
2662 		status = dp_get_srng_ring_state_from_hal
2663 			(pdev->soc, pdev,
2664 			 &pdev->soc->rxdma_mon_desc_ring[lmac_id],
2665 			 RXDMA_MONITOR_DESC,
2666 			 &soc_srngs_state->ring_state[*num_srng]);
2667 
2668 		if (status == QDF_STATUS_SUCCESS)
2669 			qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS);
2670 	}
2671 }
2672 #else
2673 static void
2674 dp_queue_mon_ring_stats(struct dp_pdev *pdev,
2675 			int lmac_id, uint32_t *num_srng,
2676 			struct dp_soc_srngs_state *soc_srngs_state)
2677 {
2678 }
2679 #endif
2680 
2681 #ifndef WLAN_DP_DISABLE_TCL_CMD_CRED_SRNG
2682 static inline QDF_STATUS
2683 dp_get_tcl_cmd_cred_ring_state_from_hal(struct dp_pdev *pdev,
2684 					struct dp_srng_ring_state *ring_state)
2685 {
2686 	return dp_get_srng_ring_state_from_hal(pdev->soc, pdev,
2687 					       &pdev->soc->tcl_cmd_credit_ring,
2688 					       TCL_CMD_CREDIT, ring_state);
2689 }
2690 #else
2691 static inline QDF_STATUS
2692 dp_get_tcl_cmd_cred_ring_state_from_hal(struct dp_pdev *pdev,
2693 					struct dp_srng_ring_state *ring_state)
2694 {
2695 	return QDF_STATUS_SUCCESS;
2696 }
2697 #endif
2698 
2699 #ifndef WLAN_DP_DISABLE_TCL_STATUS_SRNG
2700 static inline QDF_STATUS
2701 dp_get_tcl_status_ring_state_from_hal(struct dp_pdev *pdev,
2702 				      struct dp_srng_ring_state *ring_state)
2703 {
2704 	return dp_get_srng_ring_state_from_hal(pdev->soc, pdev,
2705 					       &pdev->soc->tcl_status_ring,
2706 					       TCL_STATUS, ring_state);
2707 }
2708 #else
2709 static inline QDF_STATUS
2710 dp_get_tcl_status_ring_state_from_hal(struct dp_pdev *pdev,
2711 				      struct dp_srng_ring_state *ring_state)
2712 {
2713 	return QDF_STATUS_SUCCESS;
2714 }
2715 #endif
2716 
2717 /**
2718  * dp_queue_srng_ring_stats(): Print pdev hal level ring stats
2719  * @pdev: DP_pdev handle
2720  *
2721  * Return: void
2722  */
2723 static void dp_queue_ring_stats(struct dp_pdev *pdev)
2724 {
2725 	uint32_t i;
2726 	int mac_id;
2727 	int lmac_id;
2728 	uint32_t j = 0;
2729 	struct dp_soc *soc = pdev->soc;
2730 	struct dp_soc_srngs_state * soc_srngs_state = NULL;
2731 	struct dp_soc_srngs_state *drop_srngs_state = NULL;
2732 	QDF_STATUS status;
2733 
2734 	soc_srngs_state = qdf_mem_malloc(sizeof(struct dp_soc_srngs_state));
2735 	if (!soc_srngs_state) {
2736 		dp_htt_alert("Memory alloc failed for back pressure event");
2737 		return;
2738 	}
2739 
2740 	status = dp_get_srng_ring_state_from_hal
2741 				(pdev->soc, pdev,
2742 				 &pdev->soc->reo_exception_ring,
2743 				 REO_EXCEPTION,
2744 				 &soc_srngs_state->ring_state[j]);
2745 
2746 	if (status == QDF_STATUS_SUCCESS)
2747 		qdf_assert_always(++j < DP_MAX_SRNGS);
2748 
2749 	status = dp_get_srng_ring_state_from_hal
2750 				(pdev->soc, pdev,
2751 				 &pdev->soc->reo_reinject_ring,
2752 				 REO_REINJECT,
2753 				 &soc_srngs_state->ring_state[j]);
2754 
2755 	if (status == QDF_STATUS_SUCCESS)
2756 		qdf_assert_always(++j < DP_MAX_SRNGS);
2757 
2758 	status = dp_get_srng_ring_state_from_hal
2759 				(pdev->soc, pdev,
2760 				 &pdev->soc->reo_cmd_ring,
2761 				 REO_CMD,
2762 				 &soc_srngs_state->ring_state[j]);
2763 
2764 	if (status == QDF_STATUS_SUCCESS)
2765 		qdf_assert_always(++j < DP_MAX_SRNGS);
2766 
2767 	status = dp_get_srng_ring_state_from_hal
2768 				(pdev->soc, pdev,
2769 				 &pdev->soc->reo_status_ring,
2770 				 REO_STATUS,
2771 				 &soc_srngs_state->ring_state[j]);
2772 
2773 	if (status == QDF_STATUS_SUCCESS)
2774 		qdf_assert_always(++j < DP_MAX_SRNGS);
2775 
2776 	status = dp_get_srng_ring_state_from_hal
2777 				(pdev->soc, pdev,
2778 				 &pdev->soc->rx_rel_ring,
2779 				 WBM2SW_RELEASE,
2780 				 &soc_srngs_state->ring_state[j]);
2781 
2782 	if (status == QDF_STATUS_SUCCESS)
2783 		qdf_assert_always(++j < DP_MAX_SRNGS);
2784 
2785 	status = dp_get_tcl_cmd_cred_ring_state_from_hal
2786 				(pdev, &soc_srngs_state->ring_state[j]);
2787 	if (status == QDF_STATUS_SUCCESS)
2788 		qdf_assert_always(++j < DP_MAX_SRNGS);
2789 
2790 	status = dp_get_tcl_status_ring_state_from_hal
2791 				(pdev, &soc_srngs_state->ring_state[j]);
2792 	if (status == QDF_STATUS_SUCCESS)
2793 		qdf_assert_always(++j < DP_MAX_SRNGS);
2794 
2795 	status = dp_get_srng_ring_state_from_hal
2796 				(pdev->soc, pdev,
2797 				 &pdev->soc->wbm_desc_rel_ring,
2798 				 SW2WBM_RELEASE,
2799 				 &soc_srngs_state->ring_state[j]);
2800 
2801 	if (status == QDF_STATUS_SUCCESS)
2802 		qdf_assert_always(++j < DP_MAX_SRNGS);
2803 
2804 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
2805 		status = dp_get_srng_ring_state_from_hal
2806 				(pdev->soc, pdev,
2807 				 &pdev->soc->reo_dest_ring[i],
2808 				 REO_DST,
2809 				 &soc_srngs_state->ring_state[j]);
2810 
2811 		if (status == QDF_STATUS_SUCCESS)
2812 			qdf_assert_always(++j < DP_MAX_SRNGS);
2813 	}
2814 
2815 	for (i = 0; i < pdev->soc->num_tcl_data_rings; i++) {
2816 		status = dp_get_srng_ring_state_from_hal
2817 				(pdev->soc, pdev,
2818 				 &pdev->soc->tcl_data_ring[i],
2819 				 TCL_DATA,
2820 				 &soc_srngs_state->ring_state[j]);
2821 
2822 		if (status == QDF_STATUS_SUCCESS)
2823 			qdf_assert_always(++j < DP_MAX_SRNGS);
2824 	}
2825 
2826 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
2827 		status = dp_get_srng_ring_state_from_hal
2828 				(pdev->soc, pdev,
2829 				 &pdev->soc->tx_comp_ring[i],
2830 				 WBM2SW_RELEASE,
2831 				 &soc_srngs_state->ring_state[j]);
2832 
2833 		if (status == QDF_STATUS_SUCCESS)
2834 			qdf_assert_always(++j < DP_MAX_SRNGS);
2835 	}
2836 
2837 	lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc, 0, pdev->pdev_id);
2838 	status = dp_get_srng_ring_state_from_hal
2839 				(pdev->soc, pdev,
2840 				 &pdev->soc->rx_refill_buf_ring
2841 				 [lmac_id],
2842 				 RXDMA_BUF,
2843 				 &soc_srngs_state->ring_state[j]);
2844 
2845 	if (status == QDF_STATUS_SUCCESS)
2846 		qdf_assert_always(++j < DP_MAX_SRNGS);
2847 
2848 	status = dp_get_srng_ring_state_from_hal
2849 				(pdev->soc, pdev,
2850 				 &pdev->rx_refill_buf_ring2,
2851 				 RXDMA_BUF,
2852 				 &soc_srngs_state->ring_state[j]);
2853 
2854 	if (status == QDF_STATUS_SUCCESS)
2855 		qdf_assert_always(++j < DP_MAX_SRNGS);
2856 
2857 
2858 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2859 		dp_get_srng_ring_state_from_hal
2860 				(pdev->soc, pdev,
2861 				 &pdev->rx_mac_buf_ring[i],
2862 				 RXDMA_BUF,
2863 				 &soc_srngs_state->ring_state[j]);
2864 
2865 		if (status == QDF_STATUS_SUCCESS)
2866 			qdf_assert_always(++j < DP_MAX_SRNGS);
2867 	}
2868 
2869 	for (mac_id = 0;
2870 	     mac_id  < soc->wlan_cfg_ctx->num_rxdma_status_rings_per_pdev;
2871 	     mac_id++) {
2872 		lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc,
2873 						     mac_id, pdev->pdev_id);
2874 
2875 		dp_queue_mon_ring_stats(pdev, lmac_id, &j,
2876 					soc_srngs_state);
2877 
2878 		status = dp_get_srng_ring_state_from_hal
2879 			(pdev->soc, pdev,
2880 			 &pdev->soc->rxdma_mon_status_ring[lmac_id],
2881 			 RXDMA_MONITOR_STATUS,
2882 			 &soc_srngs_state->ring_state[j]);
2883 
2884 		if (status == QDF_STATUS_SUCCESS)
2885 			qdf_assert_always(++j < DP_MAX_SRNGS);
2886 	}
2887 
2888 	for (i = 0; i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
2889 		lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc,
2890 						     i, pdev->pdev_id);
2891 
2892 		status = dp_get_srng_ring_state_from_hal
2893 				(pdev->soc, pdev,
2894 				 &pdev->soc->rxdma_err_dst_ring
2895 				 [lmac_id],
2896 				 RXDMA_DST,
2897 				 &soc_srngs_state->ring_state[j]);
2898 
2899 		if (status == QDF_STATUS_SUCCESS)
2900 			qdf_assert_always(++j < DP_MAX_SRNGS);
2901 	}
2902 	soc_srngs_state->max_ring_id = j;
2903 
2904 	qdf_spin_lock_bh(&pdev->bkp_stats.list_lock);
2905 
2906 	soc_srngs_state->seq_num = pdev->bkp_stats.seq_num;
2907 
2908 	if (pdev->bkp_stats.queue_depth >= HTT_BKP_STATS_MAX_QUEUE_DEPTH) {
2909 		drop_srngs_state = TAILQ_FIRST(&pdev->bkp_stats.list);
2910 		qdf_assert_always(drop_srngs_state);
2911 		TAILQ_REMOVE(&pdev->bkp_stats.list, drop_srngs_state,
2912 			     list_elem);
2913 		qdf_mem_free(drop_srngs_state);
2914 		pdev->bkp_stats.queue_depth--;
2915 	}
2916 
2917 	pdev->bkp_stats.queue_depth++;
2918 	TAILQ_INSERT_TAIL(&pdev->bkp_stats.list, soc_srngs_state,
2919 			  list_elem);
2920 	pdev->bkp_stats.seq_num++;
2921 	qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock);
2922 
2923 	qdf_queue_work(0, pdev->bkp_stats.work_queue,
2924 		       &pdev->bkp_stats.work);
2925 }
2926 
2927 /*
2928  * dp_htt_bkp_event_alert() - htt backpressure event alert
2929  * @msg_word:	htt packet context
2930  * @htt_soc:	HTT SOC handle
2931  *
2932  * Return: after attempting to print stats
2933  */
2934 static void dp_htt_bkp_event_alert(u_int32_t *msg_word, struct htt_soc *soc)
2935 {
2936 	u_int8_t ring_type;
2937 	u_int8_t pdev_id;
2938 	uint8_t target_pdev_id;
2939 	u_int8_t ring_id;
2940 	u_int16_t hp_idx;
2941 	u_int16_t tp_idx;
2942 	u_int32_t bkp_time;
2943 	u_int32_t th_time;
2944 	enum htt_t2h_msg_type msg_type;
2945 	struct dp_soc *dpsoc;
2946 	struct dp_pdev *pdev;
2947 	struct dp_htt_timestamp *radio_tt;
2948 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2949 
2950 
2951 	if (!soc)
2952 		return;
2953 
2954 	dpsoc = (struct dp_soc *)soc->dp_soc;
2955 	soc_cfg_ctx = dpsoc->wlan_cfg_ctx;
2956 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
2957 	ring_type = HTT_T2H_RX_BKPRESSURE_RING_TYPE_GET(*msg_word);
2958 	target_pdev_id = HTT_T2H_RX_BKPRESSURE_PDEV_ID_GET(*msg_word);
2959 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
2960 							 target_pdev_id);
2961 	if (pdev_id >= MAX_PDEV_CNT) {
2962 		dp_htt_debug("%pK: pdev id %d is invalid", soc, pdev_id);
2963 		return;
2964 	}
2965 
2966 	th_time = wlan_cfg_time_control_bp(soc_cfg_ctx);
2967 	pdev = (struct dp_pdev *)dpsoc->pdev_list[pdev_id];
2968 	ring_id = HTT_T2H_RX_BKPRESSURE_RINGID_GET(*msg_word);
2969 	hp_idx = HTT_T2H_RX_BKPRESSURE_HEAD_IDX_GET(*(msg_word + 1));
2970 	tp_idx = HTT_T2H_RX_BKPRESSURE_TAIL_IDX_GET(*(msg_word + 1));
2971 	bkp_time = HTT_T2H_RX_BKPRESSURE_TIME_MS_GET(*(msg_word + 2));
2972 	radio_tt = &soc->pdevid_tt[pdev_id];
2973 
2974 	switch (ring_type) {
2975 	case HTT_SW_RING_TYPE_UMAC:
2976 		if (!time_allow_print(radio_tt->umac_path, ring_id, th_time))
2977 			return;
2978 		dp_htt_alert_print(msg_type, pdev, ring_id, hp_idx, tp_idx,
2979 				   bkp_time, radio_tt->umac_path,
2980 				   "HTT_SW_RING_TYPE_UMAC");
2981 	break;
2982 	case HTT_SW_RING_TYPE_LMAC:
2983 		if (!time_allow_print(radio_tt->lmac_path, ring_id, th_time))
2984 			return;
2985 		dp_htt_alert_print(msg_type, pdev, ring_id, hp_idx, tp_idx,
2986 				   bkp_time, radio_tt->lmac_path,
2987 				   "HTT_SW_RING_TYPE_LMAC");
2988 	break;
2989 	default:
2990 		dp_alert("Invalid ring type: %d", ring_type);
2991 	break;
2992 	}
2993 
2994 	dp_queue_ring_stats(pdev);
2995 }
2996 
2997 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
2998 /*
2999  * dp_offload_ind_handler() - offload msg handler
3000  * @htt_soc: HTT SOC handle
3001  * @msg_word: Pointer to payload
3002  *
3003  * Return: None
3004  */
3005 static void
3006 dp_offload_ind_handler(struct htt_soc *soc, uint32_t *msg_word)
3007 {
3008 	u_int8_t pdev_id;
3009 	u_int8_t target_pdev_id;
3010 
3011 	target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
3012 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
3013 							 target_pdev_id);
3014 	dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_OFFLOAD_TX_DATA, soc->dp_soc,
3015 			     msg_word, HTT_INVALID_VDEV, WDI_NO_VAL,
3016 			     pdev_id);
3017 }
3018 #else
3019 static void
3020 dp_offload_ind_handler(struct htt_soc *soc, uint32_t *msg_word)
3021 {
3022 }
3023 #endif
3024 
3025 #ifdef WLAN_FEATURE_11BE_MLO
3026 #ifdef WLAN_MLO_MULTI_CHIP
3027 static inline void dp_update_mlo_ts_offset(struct dp_soc *soc,
3028 					   uint32_t ts_lo, uint32_t ts_hi)
3029 {
3030 	uint64_t mlo_offset;
3031 
3032 	mlo_offset = ((uint64_t)(ts_hi) << 32 | ts_lo);
3033 	soc->cdp_soc.ops->mlo_ops->mlo_update_mlo_ts_offset
3034 		((struct cdp_soc_t *)soc, mlo_offset);
3035 }
3036 #else
3037 static inline void dp_update_mlo_ts_offset(struct dp_soc *soc,
3038 					   uint32_t ts_lo, uint32_t ts_hi)
3039 {}
3040 #endif
3041 static void dp_htt_mlo_peer_map_handler(struct htt_soc *soc,
3042 					uint32_t *msg_word)
3043 {
3044 	uint8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3045 	uint8_t *mlo_peer_mac_addr;
3046 	uint16_t mlo_peer_id;
3047 	uint8_t num_links;
3048 	struct dp_mlo_flow_override_info mlo_flow_info[DP_MLO_FLOW_INFO_MAX];
3049 	struct dp_mlo_link_info mlo_link_info[DP_MAX_MLO_LINKS];
3050 	MLO_PEER_MAP_TLV_TAG_ID tlv_type = 0xff;
3051 	uint16_t tlv_len = 0;
3052 	int i = 0;
3053 
3054 	mlo_peer_id = HTT_RX_MLO_PEER_MAP_MLO_PEER_ID_GET(*msg_word);
3055 	num_links =
3056 		HTT_RX_MLO_PEER_MAP_NUM_LOGICAL_LINKS_GET(*msg_word);
3057 	mlo_peer_mac_addr =
3058 	htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3059 				   &mac_addr_deswizzle_buf[0]);
3060 
3061 	mlo_flow_info[0].ast_idx =
3062 		HTT_RX_MLO_PEER_MAP_PRIMARY_AST_INDEX_GET(*(msg_word + 3));
3063 	mlo_flow_info[0].ast_idx_valid =
3064 		HTT_RX_MLO_PEER_MAP_AST_INDEX_VALID_FLAG_GET(*(msg_word + 3));
3065 	mlo_flow_info[0].chip_id =
3066 		HTT_RX_MLO_PEER_MAP_CHIP_ID_AST_INDEX_GET(*(msg_word + 3));
3067 	mlo_flow_info[0].tidmask =
3068 		HTT_RX_MLO_PEER_MAP_TIDMASK_AST_INDEX_GET(*(msg_word + 3));
3069 	mlo_flow_info[0].cache_set_num =
3070 	HTT_RX_MLO_PEER_MAP_CACHE_SET_NUM_AST_INDEX_GET(*(msg_word + 3));
3071 
3072 	mlo_flow_info[1].ast_idx =
3073 		HTT_RX_MLO_PEER_MAP_PRIMARY_AST_INDEX_GET(*(msg_word + 3));
3074 	mlo_flow_info[1].ast_idx_valid =
3075 		HTT_RX_MLO_PEER_MAP_AST_INDEX_VALID_FLAG_GET(*(msg_word + 3));
3076 	mlo_flow_info[1].chip_id =
3077 		HTT_RX_MLO_PEER_MAP_CHIP_ID_AST_INDEX_GET(*(msg_word + 3));
3078 	mlo_flow_info[1].tidmask =
3079 		HTT_RX_MLO_PEER_MAP_TIDMASK_AST_INDEX_GET(*(msg_word + 3));
3080 	mlo_flow_info[1].cache_set_num =
3081 	HTT_RX_MLO_PEER_MAP_CACHE_SET_NUM_AST_INDEX_GET(*(msg_word + 3));
3082 
3083 	mlo_flow_info[2].ast_idx =
3084 		HTT_RX_MLO_PEER_MAP_PRIMARY_AST_INDEX_GET(*(msg_word + 3));
3085 	mlo_flow_info[2].ast_idx_valid =
3086 		HTT_RX_MLO_PEER_MAP_AST_INDEX_VALID_FLAG_GET(*(msg_word + 3));
3087 	mlo_flow_info[2].chip_id =
3088 		HTT_RX_MLO_PEER_MAP_CHIP_ID_AST_INDEX_GET(*(msg_word + 3));
3089 	mlo_flow_info[2].tidmask =
3090 		HTT_RX_MLO_PEER_MAP_TIDMASK_AST_INDEX_GET(*(msg_word + 3));
3091 	mlo_flow_info[2].cache_set_num =
3092 	HTT_RX_MLO_PEER_MAP_CACHE_SET_NUM_AST_INDEX_GET(*(msg_word + 3));
3093 
3094 	msg_word = msg_word + 8;
3095 	while (msg_word && (i < DP_MAX_MLO_LINKS)) {
3096 		mlo_link_info[i].peer_chip_id = 0xFF;
3097 		mlo_link_info[i].vdev_id = 0xFF;
3098 
3099 		tlv_type = HTT_RX_MLO_PEER_MAP_TLV_TAG_GET(*msg_word);
3100 		tlv_len = HTT_RX_MLO_PEER_MAP_TLV_LENGTH_GET(*msg_word);
3101 
3102 		if (tlv_len == 0) {
3103 			dp_err("TLV Length is 0");
3104 			break;
3105 		}
3106 
3107 		if (tlv_type == MLO_PEER_MAP_TLV_STRUCT_SOC_VDEV_PEER_IDS) {
3108 			mlo_link_info[i].peer_chip_id =
3109 				HTT_RX_MLO_PEER_MAP_CHIP_ID_GET(
3110 							*(msg_word + 1));
3111 			mlo_link_info[i].vdev_id =
3112 				HTT_RX_MLO_PEER_MAP_VDEV_ID_GET(
3113 							*(msg_word + 1));
3114 		}
3115 		/* Add header size to tlv length */
3116 		tlv_len = tlv_len + HTT_TLV_HDR_LEN;
3117 		msg_word = (uint32_t *)(((uint8_t *)msg_word) + tlv_len);
3118 		i++;
3119 	}
3120 
3121 	dp_rx_mlo_peer_map_handler(soc->dp_soc, mlo_peer_id,
3122 				   mlo_peer_mac_addr,
3123 				   mlo_flow_info, mlo_link_info);
3124 }
3125 
3126 static void dp_htt_mlo_peer_unmap_handler(struct htt_soc *soc,
3127 					  uint32_t *msg_word)
3128 {
3129 	uint16_t mlo_peer_id;
3130 
3131 	mlo_peer_id = HTT_RX_MLO_PEER_UNMAP_MLO_PEER_ID_GET(*msg_word);
3132 	dp_rx_mlo_peer_unmap_handler(soc->dp_soc, mlo_peer_id);
3133 }
3134 
3135 static void
3136 dp_rx_mlo_timestamp_ind_handler(struct dp_soc *soc,
3137 				uint32_t *msg_word)
3138 {
3139 	uint8_t pdev_id;
3140 	uint8_t target_pdev_id;
3141 	struct dp_pdev *pdev;
3142 
3143 	if (!soc)
3144 		return;
3145 
3146 	target_pdev_id = HTT_T2H_MLO_TIMESTAMP_OFFSET_PDEV_ID_GET(*msg_word);
3147 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc,
3148 							 target_pdev_id);
3149 
3150 	if (pdev_id >= MAX_PDEV_CNT) {
3151 		dp_htt_debug("%pK: pdev id %d is invalid", soc, pdev_id);
3152 		return;
3153 	}
3154 
3155 	pdev = (struct dp_pdev *)soc->pdev_list[pdev_id];
3156 
3157 	if (!pdev) {
3158 		dp_err("Invalid pdev");
3159 		return;
3160 	}
3161 	dp_wdi_event_handler(WDI_EVENT_MLO_TSTMP, soc,
3162 			     msg_word, HTT_INVALID_PEER, WDI_NO_VAL,
3163 			     pdev_id);
3164 
3165 	qdf_spin_lock_bh(&soc->htt_stats.lock);
3166 	pdev->timestamp.msg_type =
3167 		HTT_T2H_MLO_TIMESTAMP_OFFSET_MSG_TYPE_GET(*msg_word);
3168 	pdev->timestamp.pdev_id = pdev_id;
3169 	pdev->timestamp.chip_id =
3170 		HTT_T2H_MLO_TIMESTAMP_OFFSET_CHIP_ID_GET(*msg_word);
3171 	pdev->timestamp.mac_clk_freq =
3172 		HTT_T2H_MLO_TIMESTAMP_OFFSET_MAC_CLK_FREQ_MHZ_GET(*msg_word);
3173 	pdev->timestamp.sync_tstmp_lo_us = *(msg_word + 1);
3174 	pdev->timestamp.sync_tstmp_hi_us = *(msg_word + 2);
3175 	pdev->timestamp.mlo_offset_lo_us = *(msg_word + 3);
3176 	pdev->timestamp.mlo_offset_hi_us = *(msg_word + 4);
3177 	pdev->timestamp.mlo_offset_clks  = *(msg_word + 5);
3178 	pdev->timestamp.mlo_comp_us =
3179 	HTT_T2H_MLO_TIMESTAMP_OFFSET_MLO_TIMESTAMP_COMP_US_GET(
3180 							*(msg_word + 6));
3181 	pdev->timestamp.mlo_comp_clks =
3182 	HTT_T2H_MLO_TIMESTAMP_OFFSET_MLO_TIMESTAMP_COMP_CLKS_GET(
3183 							*(msg_word + 6));
3184 	pdev->timestamp.mlo_comp_timer =
3185 	HTT_T2H_MLO_TIMESTAMP_OFFSET_MLO_TIMESTAMP_COMP_PERIOD_US_GET(
3186 							*(msg_word + 7));
3187 
3188 	dp_htt_debug("tsf_lo=%d tsf_hi=%d, mlo_ofst_lo=%d, mlo_ofst_hi=%d\n",
3189 		     pdev->timestamp.sync_tstmp_lo_us,
3190 		     pdev->timestamp.sync_tstmp_hi_us,
3191 		     pdev->timestamp.mlo_offset_lo_us,
3192 		     pdev->timestamp.mlo_offset_hi_us);
3193 
3194 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
3195 
3196 	dp_update_mlo_ts_offset(soc,
3197 				pdev->timestamp.mlo_offset_lo_us,
3198 				pdev->timestamp.mlo_offset_hi_us);
3199 }
3200 #else
3201 static void dp_htt_mlo_peer_map_handler(struct htt_soc *soc,
3202 					uint32_t *msg_word)
3203 {
3204 	qdf_assert_always(0);
3205 }
3206 
3207 static void dp_htt_mlo_peer_unmap_handler(struct htt_soc *soc,
3208 					 uint32_t *msg_word)
3209 {
3210 	qdf_assert_always(0);
3211 }
3212 
3213 static void
3214 dp_rx_mlo_timestamp_ind_handler(void *soc_handle,
3215 				uint32_t *msg_word)
3216 {
3217 	qdf_assert_always(0);
3218 }
3219 #endif
3220 
3221 /*
3222  * dp_htt_rx_addba_handler() - RX Addba HTT msg handler
3223  * @soc: DP Soc handler
3224  * @peer_id: ID of peer
3225  * @tid: TID number
3226  * @win_sz: BA window size
3227  *
3228  * Return: None
3229  */
3230 static void
3231 dp_htt_rx_addba_handler(struct dp_soc *soc, uint16_t peer_id,
3232 			uint8_t tid, uint16_t win_sz)
3233 {
3234 	uint16_t status;
3235 	struct dp_peer *peer;
3236 
3237 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
3238 
3239 	if (!peer) {
3240 		dp_err("Peer not found peer id %d", peer_id);
3241 		return;
3242 	}
3243 
3244 	status = dp_addba_requestprocess_wifi3((struct cdp_soc_t *)soc,
3245 					       peer->mac_addr.raw,
3246 					       peer->vdev->vdev_id, 0,
3247 					       tid, 0, win_sz, 0xffff);
3248 
3249 	dp_addba_resp_tx_completion_wifi3(
3250 		(struct cdp_soc_t *)soc,
3251 		peer->mac_addr.raw, peer->vdev->vdev_id,
3252 		tid,
3253 		status);
3254 
3255 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3256 
3257 	dp_info("PeerID %d BAW %d TID %d stat %d",
3258 		peer_id, win_sz, tid, status);
3259 }
3260 
3261 /*
3262  * dp_htt_ppdu_id_fmt_handler() - PPDU ID Format handler
3263  * @htt_soc: HTT SOC handle
3264  * @msg_word: Pointer to payload
3265  *
3266  * Return: None
3267  */
3268 static void
3269 dp_htt_ppdu_id_fmt_handler(struct dp_soc *soc, uint32_t *msg_word)
3270 {
3271 	uint8_t msg_type, valid, bits, offset;
3272 
3273 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
3274 
3275 	msg_word += HTT_PPDU_ID_FMT_IND_LINK_ID_OFFSET;
3276 	valid = HTT_PPDU_ID_FMT_IND_VALID_GET_BITS31_16(*msg_word);
3277 	bits = HTT_PPDU_ID_FMT_IND_BITS_GET_BITS31_16(*msg_word);
3278 	offset = HTT_PPDU_ID_FMT_IND_OFFSET_GET_BITS31_16(*msg_word);
3279 
3280 	dp_info("link_id: valid %u bits %u offset %u", valid, bits, offset);
3281 
3282 	if (valid) {
3283 		soc->link_id_offset = offset;
3284 		soc->link_id_bits = bits;
3285 	}
3286 }
3287 
3288 /*
3289  * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler
3290  * @context:	Opaque context (HTT SOC handle)
3291  * @pkt:	HTC packet
3292  */
3293 static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
3294 {
3295 	struct htt_soc *soc = (struct htt_soc *) context;
3296 	qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
3297 	u_int32_t *msg_word;
3298 	enum htt_t2h_msg_type msg_type;
3299 	bool free_buf = true;
3300 
3301 	/* check for successful message reception */
3302 	if (pkt->Status != QDF_STATUS_SUCCESS) {
3303 		if (pkt->Status != QDF_STATUS_E_CANCELED)
3304 			soc->stats.htc_err_cnt++;
3305 
3306 		qdf_nbuf_free(htt_t2h_msg);
3307 		return;
3308 	}
3309 
3310 	/* TODO: Check if we should pop the HTC/HTT header alignment padding */
3311 
3312 	msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
3313 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
3314 	htt_event_record(soc->htt_logger_handle,
3315 			 msg_type, (uint8_t *)msg_word);
3316 	switch (msg_type) {
3317 	case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
3318 	{
3319 		dp_htt_bkp_event_alert(msg_word, soc);
3320 		break;
3321 	}
3322 	case HTT_T2H_MSG_TYPE_PEER_MAP:
3323 		{
3324 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3325 			u_int8_t *peer_mac_addr;
3326 			u_int16_t peer_id;
3327 			u_int16_t hw_peer_id;
3328 			u_int8_t vdev_id;
3329 			u_int8_t is_wds;
3330 			struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
3331 
3332 			peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
3333 			hw_peer_id =
3334 				HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2));
3335 			vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
3336 			peer_mac_addr = htt_t2h_mac_addr_deswizzle(
3337 				(u_int8_t *) (msg_word+1),
3338 				&mac_addr_deswizzle_buf[0]);
3339 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3340 				QDF_TRACE_LEVEL_DEBUG,
3341 				"HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3342 				peer_id, vdev_id);
3343 
3344 			/*
3345 			 * check if peer already exists for this peer_id, if so
3346 			 * this peer map event is in response for a wds peer add
3347 			 * wmi command sent during wds source port learning.
3348 			 * in this case just add the ast entry to the existing
3349 			 * peer ast_list.
3350 			 */
3351 			is_wds = !!(dpsoc->peer_id_to_obj_map[peer_id]);
3352 			dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id,
3353 					       vdev_id, peer_mac_addr, 0,
3354 					       is_wds);
3355 			break;
3356 		}
3357 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
3358 		{
3359 			u_int16_t peer_id;
3360 			u_int8_t vdev_id;
3361 			u_int8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0};
3362 			peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
3363 			vdev_id = HTT_RX_PEER_UNMAP_VDEV_ID_GET(*msg_word);
3364 
3365 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
3366 						 vdev_id, mac_addr, 0,
3367 						 DP_PEER_WDS_COUNT_INVALID);
3368 			break;
3369 		}
3370 	case HTT_T2H_MSG_TYPE_SEC_IND:
3371 		{
3372 			u_int16_t peer_id;
3373 			enum cdp_sec_type sec_type;
3374 			int is_unicast;
3375 
3376 			peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
3377 			sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
3378 			is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
3379 			/* point to the first part of the Michael key */
3380 			msg_word++;
3381 			dp_rx_sec_ind_handler(
3382 				soc->dp_soc, peer_id, sec_type, is_unicast,
3383 				msg_word, msg_word + 2);
3384 			break;
3385 		}
3386 
3387 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
3388 		{
3389 			free_buf =
3390 				dp_monitor_ppdu_stats_ind_handler(soc,
3391 								  msg_word,
3392 								  htt_t2h_msg);
3393 			break;
3394 		}
3395 
3396 	case HTT_T2H_MSG_TYPE_PKTLOG:
3397 		{
3398 			dp_pktlog_msg_handler(soc, msg_word);
3399 			break;
3400 		}
3401 
3402 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
3403 		{
3404 			/*
3405 			 * HTC maintains runtime pm count for H2T messages that
3406 			 * have a response msg from FW. This count ensures that
3407 			 * in the case FW does not sent out the response or host
3408 			 * did not process this indication runtime_put happens
3409 			 * properly in the cleanup path.
3410 			 */
3411 			if (htc_dec_return_htt_runtime_cnt(soc->htc_soc) >= 0)
3412 				htc_pm_runtime_put(soc->htc_soc);
3413 			else
3414 				soc->stats.htt_ver_req_put_skip++;
3415 			soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
3416 			soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
3417 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
3418 				"target uses HTT version %d.%d; host uses %d.%d",
3419 				soc->tgt_ver.major, soc->tgt_ver.minor,
3420 				HTT_CURRENT_VERSION_MAJOR,
3421 				HTT_CURRENT_VERSION_MINOR);
3422 			if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
3423 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3424 					QDF_TRACE_LEVEL_WARN,
3425 					"*** Incompatible host/target HTT versions!");
3426 			}
3427 			/* abort if the target is incompatible with the host */
3428 			qdf_assert(soc->tgt_ver.major ==
3429 				HTT_CURRENT_VERSION_MAJOR);
3430 			if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
3431 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3432 					QDF_TRACE_LEVEL_INFO_LOW,
3433 					"*** Warning: host/target HTT versions"
3434 					" are different, though compatible!");
3435 			}
3436 			break;
3437 		}
3438 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
3439 		{
3440 			uint16_t peer_id;
3441 			uint8_t tid;
3442 			uint16_t win_sz;
3443 
3444 			/*
3445 			 * Update REO Queue Desc with new values
3446 			 */
3447 			peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
3448 			tid = HTT_RX_ADDBA_TID_GET(*msg_word);
3449 			win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
3450 
3451 			/*
3452 			 * Window size needs to be incremented by 1
3453 			 * since fw needs to represent a value of 256
3454 			 * using just 8 bits
3455 			 */
3456 			dp_htt_rx_addba_handler(soc->dp_soc, peer_id,
3457 						tid, win_sz + 1);
3458 			break;
3459 		}
3460 	case HTT_T2H_MSG_TYPE_RX_ADDBA_EXTN:
3461 		{
3462 			uint16_t peer_id;
3463 			uint8_t tid;
3464 			uint16_t win_sz;
3465 
3466 			peer_id = HTT_RX_ADDBA_EXTN_PEER_ID_GET(*msg_word);
3467 			tid = HTT_RX_ADDBA_EXTN_TID_GET(*msg_word);
3468 
3469 			msg_word++;
3470 			win_sz = HTT_RX_ADDBA_EXTN_WIN_SIZE_GET(*msg_word);
3471 
3472 			dp_htt_rx_addba_handler(soc->dp_soc, peer_id,
3473 						tid, win_sz);
3474 			break;
3475 		}
3476 	case HTT_T2H_PPDU_ID_FMT_IND:
3477 		{
3478 			dp_htt_ppdu_id_fmt_handler(soc->dp_soc, msg_word);
3479 			break;
3480 		}
3481 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
3482 		{
3483 			dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg);
3484 			break;
3485 		}
3486 	case HTT_T2H_MSG_TYPE_PEER_MAP_V2:
3487 		{
3488 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3489 			u_int8_t *peer_mac_addr;
3490 			u_int16_t peer_id;
3491 			u_int16_t hw_peer_id;
3492 			u_int8_t vdev_id;
3493 			bool is_wds;
3494 			u_int16_t ast_hash;
3495 			struct dp_ast_flow_override_info ast_flow_info;
3496 
3497 			qdf_mem_set(&ast_flow_info, 0,
3498 					    sizeof(struct dp_ast_flow_override_info));
3499 
3500 			peer_id = HTT_RX_PEER_MAP_V2_SW_PEER_ID_GET(*msg_word);
3501 			hw_peer_id =
3502 			HTT_RX_PEER_MAP_V2_HW_PEER_ID_GET(*(msg_word + 2));
3503 			vdev_id = HTT_RX_PEER_MAP_V2_VDEV_ID_GET(*msg_word);
3504 			peer_mac_addr =
3505 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3506 						   &mac_addr_deswizzle_buf[0]);
3507 			is_wds =
3508 			HTT_RX_PEER_MAP_V2_NEXT_HOP_GET(*(msg_word + 3));
3509 			ast_hash =
3510 			HTT_RX_PEER_MAP_V2_AST_HASH_VALUE_GET(*(msg_word + 3));
3511 			/*
3512 			 * Update 4 ast_index per peer, ast valid mask
3513 			 * and TID flow valid mask.
3514 			 * AST valid mask is 3 bit field corresponds to
3515 			 * ast_index[3:1]. ast_index 0 is always valid.
3516 			 */
3517 			ast_flow_info.ast_valid_mask =
3518 			HTT_RX_PEER_MAP_V2_AST_VALID_MASK_GET(*(msg_word + 3));
3519 			ast_flow_info.ast_idx[0] = hw_peer_id;
3520 			ast_flow_info.ast_flow_mask[0] =
3521 			HTT_RX_PEER_MAP_V2_AST_0_FLOW_MASK_GET(*(msg_word + 4));
3522 			ast_flow_info.ast_idx[1] =
3523 			HTT_RX_PEER_MAP_V2_AST_INDEX_1_GET(*(msg_word + 4));
3524 			ast_flow_info.ast_flow_mask[1] =
3525 			HTT_RX_PEER_MAP_V2_AST_1_FLOW_MASK_GET(*(msg_word + 4));
3526 			ast_flow_info.ast_idx[2] =
3527 			HTT_RX_PEER_MAP_V2_AST_INDEX_2_GET(*(msg_word + 5));
3528 			ast_flow_info.ast_flow_mask[2] =
3529 			HTT_RX_PEER_MAP_V2_AST_2_FLOW_MASK_GET(*(msg_word + 4));
3530 			ast_flow_info.ast_idx[3] =
3531 			HTT_RX_PEER_MAP_V2_AST_INDEX_3_GET(*(msg_word + 6));
3532 			ast_flow_info.ast_flow_mask[3] =
3533 			HTT_RX_PEER_MAP_V2_AST_3_FLOW_MASK_GET(*(msg_word + 4));
3534 			/*
3535 			 * TID valid mask is applicable only
3536 			 * for HI and LOW priority flows.
3537 			 * tid_valid_mas is 8 bit field corresponds
3538 			 * to TID[7:0]
3539 			 */
3540 			ast_flow_info.tid_valid_low_pri_mask =
3541 			HTT_RX_PEER_MAP_V2_TID_VALID_LOW_PRI_GET(*(msg_word + 5));
3542 			ast_flow_info.tid_valid_hi_pri_mask =
3543 			HTT_RX_PEER_MAP_V2_TID_VALID_HI_PRI_GET(*(msg_word + 5));
3544 
3545 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3546 				  QDF_TRACE_LEVEL_DEBUG,
3547 				  "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3548 				  peer_id, vdev_id);
3549 
3550 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3551 				  QDF_TRACE_LEVEL_INFO,
3552 				  "ast_idx[0] %d ast_idx[1] %d ast_idx[2] %d ast_idx[3] %d n",
3553 				  ast_flow_info.ast_idx[0],
3554 				  ast_flow_info.ast_idx[1],
3555 				  ast_flow_info.ast_idx[2],
3556 				  ast_flow_info.ast_idx[3]);
3557 
3558 			dp_rx_peer_map_handler(soc->dp_soc, peer_id,
3559 					       hw_peer_id, vdev_id,
3560 					       peer_mac_addr, ast_hash,
3561 					       is_wds);
3562 
3563 			/*
3564 			 * Update ast indexes for flow override support
3565 			 * Applicable only for non wds peers
3566 			 */
3567 			if (!soc->dp_soc->ast_offload_support)
3568 				dp_peer_ast_index_flow_queue_map_create(
3569 						soc->dp_soc, is_wds,
3570 						peer_id, peer_mac_addr,
3571 						&ast_flow_info);
3572 
3573 			break;
3574 		}
3575 	case HTT_T2H_MSG_TYPE_PEER_UNMAP_V2:
3576 		{
3577 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3578 			u_int8_t *mac_addr;
3579 			u_int16_t peer_id;
3580 			u_int8_t vdev_id;
3581 			u_int8_t is_wds;
3582 			u_int32_t free_wds_count;
3583 
3584 			peer_id =
3585 			HTT_RX_PEER_UNMAP_V2_SW_PEER_ID_GET(*msg_word);
3586 			vdev_id = HTT_RX_PEER_UNMAP_V2_VDEV_ID_GET(*msg_word);
3587 			mac_addr =
3588 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3589 						   &mac_addr_deswizzle_buf[0]);
3590 			is_wds =
3591 			HTT_RX_PEER_UNMAP_V2_NEXT_HOP_GET(*(msg_word + 2));
3592 			free_wds_count =
3593 			HTT_RX_PEER_UNMAP_V2_PEER_WDS_FREE_COUNT_GET(*(msg_word + 4));
3594 
3595 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3596 				  QDF_TRACE_LEVEL_INFO,
3597 				  "HTT_T2H_MSG_TYPE_PEER_UNMAP msg for peer id %d vdev id %d n",
3598 				  peer_id, vdev_id);
3599 
3600 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
3601 						 vdev_id, mac_addr,
3602 						 is_wds, free_wds_count);
3603 			break;
3604 		}
3605 	case HTT_T2H_MSG_TYPE_RX_DELBA:
3606 		{
3607 			uint16_t peer_id;
3608 			uint8_t tid;
3609 			uint8_t win_sz;
3610 			QDF_STATUS status;
3611 
3612 			peer_id = HTT_RX_DELBA_PEER_ID_GET(*msg_word);
3613 			tid = HTT_RX_DELBA_TID_GET(*msg_word);
3614 			win_sz = HTT_RX_DELBA_WIN_SIZE_GET(*msg_word);
3615 
3616 			status = dp_rx_delba_ind_handler(
3617 				soc->dp_soc,
3618 				peer_id, tid, win_sz);
3619 
3620 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3621 				  QDF_TRACE_LEVEL_INFO,
3622 				  FL("DELBA PeerID %d BAW %d TID %d stat %d"),
3623 				  peer_id, win_sz, tid, status);
3624 			break;
3625 		}
3626 	case HTT_T2H_MSG_TYPE_RX_DELBA_EXTN:
3627 		{
3628 			uint16_t peer_id;
3629 			uint8_t tid;
3630 			uint16_t win_sz;
3631 			QDF_STATUS status;
3632 
3633 			peer_id = HTT_RX_DELBA_EXTN_PEER_ID_GET(*msg_word);
3634 			tid = HTT_RX_DELBA_EXTN_TID_GET(*msg_word);
3635 
3636 			msg_word++;
3637 			win_sz = HTT_RX_DELBA_EXTN_WIN_SIZE_GET(*msg_word);
3638 
3639 			status = dp_rx_delba_ind_handler(soc->dp_soc,
3640 							 peer_id, tid,
3641 							 win_sz);
3642 
3643 			dp_info("DELBA PeerID %d BAW %d TID %d stat %d",
3644 				peer_id, win_sz, tid, status);
3645 			break;
3646 		}
3647 	case HTT_T2H_MSG_TYPE_FSE_CMEM_BASE_SEND:
3648 		{
3649 			uint16_t num_entries;
3650 			uint32_t cmem_ba_lo;
3651 			uint32_t cmem_ba_hi;
3652 
3653 			num_entries = HTT_CMEM_BASE_SEND_NUM_ENTRIES_GET(*msg_word);
3654 			cmem_ba_lo = *(msg_word + 1);
3655 			cmem_ba_hi = *(msg_word + 2);
3656 
3657 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
3658 				  FL("CMEM FSE num_entries %u CMEM BA LO %x HI %x"),
3659 				  num_entries, cmem_ba_lo, cmem_ba_hi);
3660 
3661 			dp_rx_fst_update_cmem_params(soc->dp_soc, num_entries,
3662 						     cmem_ba_lo, cmem_ba_hi);
3663 			break;
3664 		}
3665 	case HTT_T2H_MSG_TYPE_TX_OFFLOAD_DELIVER_IND:
3666 		{
3667 			dp_offload_ind_handler(soc, msg_word);
3668 			break;
3669 		}
3670 	case HTT_T2H_MSG_TYPE_PEER_MAP_V3:
3671 	{
3672 		u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3673 		u_int8_t *peer_mac_addr;
3674 		u_int16_t peer_id;
3675 		u_int16_t hw_peer_id;
3676 		u_int8_t vdev_id;
3677 		uint8_t is_wds;
3678 		u_int16_t ast_hash = 0;
3679 
3680 		peer_id = HTT_RX_PEER_MAP_V3_SW_PEER_ID_GET(*msg_word);
3681 		vdev_id = HTT_RX_PEER_MAP_V3_VDEV_ID_GET(*msg_word);
3682 		peer_mac_addr =
3683 		htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3684 					   &mac_addr_deswizzle_buf[0]);
3685 		hw_peer_id = HTT_RX_PEER_MAP_V3_HW_PEER_ID_GET(*(msg_word + 3));
3686 		ast_hash = HTT_RX_PEER_MAP_V3_CACHE_SET_NUM_GET(*(msg_word + 3));
3687 		is_wds = HTT_RX_PEER_MAP_V3_NEXT_HOP_GET(*(msg_word + 4));
3688 
3689 		dp_htt_info("HTT_T2H_MSG_TYPE_PEER_MAP_V3 msg for peer id %d vdev id %d n",
3690 			    peer_id, vdev_id);
3691 
3692 		dp_rx_peer_map_handler(soc->dp_soc, peer_id,
3693 				       hw_peer_id, vdev_id,
3694 				       peer_mac_addr, ast_hash,
3695 				       is_wds);
3696 
3697 		break;
3698 	}
3699 	case HTT_T2H_MSG_TYPE_MLO_RX_PEER_MAP:
3700 	{
3701 		dp_htt_mlo_peer_map_handler(soc, msg_word);
3702 		break;
3703 	}
3704 	case HTT_T2H_MSG_TYPE_MLO_RX_PEER_UNMAP:
3705 	{
3706 		dp_htt_mlo_peer_unmap_handler(soc, msg_word);
3707 		break;
3708 	}
3709 	case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND:
3710 	{
3711 		dp_rx_mlo_timestamp_ind_handler(soc->dp_soc, msg_word);
3712 		break;
3713 	}
3714 	case HTT_T2H_MSG_TYPE_VDEVS_TXRX_STATS_PERIODIC_IND:
3715 	{
3716 		dp_vdev_txrx_hw_stats_handler(soc, msg_word);
3717 		break;
3718 	}
3719 	case HTT_T2H_SAWF_DEF_QUEUES_MAP_REPORT_CONF:
3720 	{
3721 		dp_sawf_def_queues_update_map_report_conf(soc, msg_word,
3722 							  htt_t2h_msg);
3723 		break;
3724 	}
3725 	case HTT_T2H_SAWF_MSDUQ_INFO_IND:
3726 	{
3727 		dp_sawf_msduq_map(soc, msg_word, htt_t2h_msg);
3728 		break;
3729 	}
3730 	case HTT_T2H_MSG_TYPE_STREAMING_STATS_IND:
3731 	{
3732 		dp_sawf_mpdu_stats_handler(soc, htt_t2h_msg);
3733 		break;
3734 	}
3735 
3736 	default:
3737 		break;
3738 	};
3739 
3740 	/* Free the indication buffer */
3741 	if (free_buf)
3742 		qdf_nbuf_free(htt_t2h_msg);
3743 }
3744 
3745 /*
3746  * dp_htt_h2t_full() - Send full handler (called from HTC)
3747  * @context:	Opaque context (HTT SOC handle)
3748  * @pkt:	HTC packet
3749  *
3750  * Return: enum htc_send_full_action
3751  */
3752 static enum htc_send_full_action
3753 dp_htt_h2t_full(void *context, HTC_PACKET *pkt)
3754 {
3755 	return HTC_SEND_FULL_KEEP;
3756 }
3757 
3758 /*
3759  * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages
3760  * @context:	Opaque context (HTT SOC handle)
3761  * @nbuf:	nbuf containing T2H message
3762  * @pipe_id:	HIF pipe ID
3763  *
3764  * Return: QDF_STATUS
3765  *
3766  * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which
3767  * will be used for packet log and other high-priority HTT messages. Proper
3768  * HTC connection to be added later once required FW changes are available
3769  */
3770 static QDF_STATUS
3771 dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id)
3772 {
3773 	QDF_STATUS rc = QDF_STATUS_SUCCESS;
3774 	HTC_PACKET htc_pkt;
3775 
3776 	qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE);
3777 	qdf_mem_zero(&htc_pkt, sizeof(htc_pkt));
3778 	htc_pkt.Status = QDF_STATUS_SUCCESS;
3779 	htc_pkt.pPktContext = (void *)nbuf;
3780 	dp_htt_t2h_msg_handler(context, &htc_pkt);
3781 
3782 	return rc;
3783 }
3784 
3785 /*
3786  * htt_htc_soc_attach() - Register SOC level HTT instance with HTC
3787  * @htt_soc:	HTT SOC handle
3788  *
3789  * Return: QDF_STATUS
3790  */
3791 static QDF_STATUS
3792 htt_htc_soc_attach(struct htt_soc *soc)
3793 {
3794 	struct htc_service_connect_req connect;
3795 	struct htc_service_connect_resp response;
3796 	QDF_STATUS status;
3797 	struct dp_soc *dpsoc = soc->dp_soc;
3798 
3799 	qdf_mem_zero(&connect, sizeof(connect));
3800 	qdf_mem_zero(&response, sizeof(response));
3801 
3802 	connect.pMetaData = NULL;
3803 	connect.MetaDataLength = 0;
3804 	connect.EpCallbacks.pContext = soc;
3805 	connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
3806 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
3807 	connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler;
3808 
3809 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
3810 	connect.EpCallbacks.EpRecvRefill = NULL;
3811 
3812 	/* N/A, fill is done by HIF */
3813 	connect.EpCallbacks.RecvRefillWaterMark = 1;
3814 
3815 	connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
3816 	/*
3817 	 * Specify how deep to let a queue get before htc_send_pkt will
3818 	 * call the EpSendFull function due to excessive send queue depth.
3819 	 */
3820 	connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
3821 
3822 	/* disable flow control for HTT data message service */
3823 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
3824 
3825 	/* connect to control service */
3826 	connect.service_id = HTT_DATA_MSG_SVC;
3827 
3828 	status = htc_connect_service(soc->htc_soc, &connect, &response);
3829 
3830 	if (status != QDF_STATUS_SUCCESS)
3831 		return status;
3832 
3833 	soc->htc_endpoint = response.Endpoint;
3834 
3835 	hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint);
3836 
3837 	htt_interface_logging_init(&soc->htt_logger_handle, soc->ctrl_psoc);
3838 	dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc,
3839 		dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE);
3840 
3841 	return QDF_STATUS_SUCCESS; /* success */
3842 }
3843 
3844 /*
3845  * htt_soc_initialize() - SOC level HTT initialization
3846  * @htt_soc: Opaque htt SOC handle
3847  * @ctrl_psoc: Opaque ctrl SOC handle
3848  * @htc_soc: SOC level HTC handle
3849  * @hal_soc: Opaque HAL SOC handle
3850  * @osdev: QDF device
3851  *
3852  * Return: HTT handle on success; NULL on failure
3853  */
3854 void *
3855 htt_soc_initialize(struct htt_soc *htt_soc,
3856 		   struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
3857 		   HTC_HANDLE htc_soc,
3858 		   hal_soc_handle_t hal_soc_hdl, qdf_device_t osdev)
3859 {
3860 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
3861 
3862 	soc->osdev = osdev;
3863 	soc->ctrl_psoc = ctrl_psoc;
3864 	soc->htc_soc = htc_soc;
3865 	soc->hal_soc = hal_soc_hdl;
3866 
3867 	if (htt_htc_soc_attach(soc))
3868 		goto fail2;
3869 
3870 	return soc;
3871 
3872 fail2:
3873 	return NULL;
3874 }
3875 
3876 void htt_soc_htc_dealloc(struct htt_soc *htt_handle)
3877 {
3878 	htt_interface_logging_deinit(htt_handle->htt_logger_handle);
3879 	htt_htc_misc_pkt_pool_free(htt_handle);
3880 	htt_htc_pkt_pool_free(htt_handle);
3881 }
3882 
3883 /*
3884  * htt_soc_htc_prealloc() - HTC memory prealloc
3885  * @htt_soc: SOC level HTT handle
3886  *
3887  * Return: QDF_STATUS_SUCCESS on Success or
3888  * QDF_STATUS_E_NOMEM on allocation failure
3889  */
3890 QDF_STATUS htt_soc_htc_prealloc(struct htt_soc *soc)
3891 {
3892 	int i;
3893 
3894 	soc->htt_htc_pkt_freelist = NULL;
3895 	/* pre-allocate some HTC_PACKET objects */
3896 	for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
3897 		struct dp_htt_htc_pkt_union *pkt;
3898 		pkt = qdf_mem_malloc(sizeof(*pkt));
3899 		if (!pkt)
3900 			return QDF_STATUS_E_NOMEM;
3901 
3902 		htt_htc_pkt_free(soc, &pkt->u.pkt);
3903 	}
3904 	return QDF_STATUS_SUCCESS;
3905 }
3906 
3907 /*
3908  * htt_soc_detach() - Free SOC level HTT handle
3909  * @htt_hdl: HTT SOC handle
3910  */
3911 void htt_soc_detach(struct htt_soc *htt_hdl)
3912 {
3913 	int i;
3914 	struct htt_soc *htt_handle = (struct htt_soc *)htt_hdl;
3915 
3916 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3917 		qdf_mem_free(htt_handle->pdevid_tt[i].umac_path);
3918 		qdf_mem_free(htt_handle->pdevid_tt[i].lmac_path);
3919 	}
3920 
3921 	HTT_TX_MUTEX_DESTROY(&htt_handle->htt_tx_mutex);
3922 	qdf_mem_free(htt_handle);
3923 
3924 }
3925 
3926 /**
3927  * dp_h2t_ext_stats_msg_send(): function to construct HTT message to pass to FW
3928  * @pdev: DP PDEV handle
3929  * @stats_type_upload_mask: stats type requested by user
3930  * @config_param_0: extra configuration parameters
3931  * @config_param_1: extra configuration parameters
3932  * @config_param_2: extra configuration parameters
3933  * @config_param_3: extra configuration parameters
3934  * @mac_id: mac number
3935  *
3936  * return: QDF STATUS
3937  */
3938 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
3939 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
3940 		uint32_t config_param_1, uint32_t config_param_2,
3941 		uint32_t config_param_3, int cookie_val, int cookie_msb,
3942 		uint8_t mac_id)
3943 {
3944 	struct htt_soc *soc = pdev->soc->htt_handle;
3945 	struct dp_htt_htc_pkt *pkt;
3946 	qdf_nbuf_t msg;
3947 	uint32_t *msg_word;
3948 	uint8_t pdev_mask = 0;
3949 	uint8_t *htt_logger_bufp;
3950 	int mac_for_pdev;
3951 	int target_pdev_id;
3952 	QDF_STATUS status;
3953 
3954 	msg = qdf_nbuf_alloc(
3955 			soc->osdev,
3956 			HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ),
3957 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
3958 
3959 	if (!msg)
3960 		return QDF_STATUS_E_NOMEM;
3961 
3962 	/*TODO:Add support for SOC stats
3963 	 * Bit 0: SOC Stats
3964 	 * Bit 1: Pdev stats for pdev id 0
3965 	 * Bit 2: Pdev stats for pdev id 1
3966 	 * Bit 3: Pdev stats for pdev id 2
3967 	 */
3968 	mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
3969 	target_pdev_id =
3970 	dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev);
3971 
3972 	pdev_mask = 1 << target_pdev_id;
3973 
3974 	/*
3975 	 * Set the length of the message.
3976 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3977 	 * separately during the below call to qdf_nbuf_push_head.
3978 	 * The contribution from the HTC header is added separately inside HTC.
3979 	 */
3980 	if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) {
3981 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3982 				"Failed to expand head for HTT_EXT_STATS");
3983 		qdf_nbuf_free(msg);
3984 		return QDF_STATUS_E_FAILURE;
3985 	}
3986 
3987 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
3988 
3989 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3990 	htt_logger_bufp = (uint8_t *)msg_word;
3991 	*msg_word = 0;
3992 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ);
3993 	HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask);
3994 	HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask);
3995 
3996 	/* word 1 */
3997 	msg_word++;
3998 	*msg_word = 0;
3999 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0);
4000 
4001 	/* word 2 */
4002 	msg_word++;
4003 	*msg_word = 0;
4004 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1);
4005 
4006 	/* word 3 */
4007 	msg_word++;
4008 	*msg_word = 0;
4009 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2);
4010 
4011 	/* word 4 */
4012 	msg_word++;
4013 	*msg_word = 0;
4014 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3);
4015 
4016 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0);
4017 
4018 	/* word 5 */
4019 	msg_word++;
4020 
4021 	/* word 6 */
4022 	msg_word++;
4023 	*msg_word = 0;
4024 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val);
4025 
4026 	/* word 7 */
4027 	msg_word++;
4028 	*msg_word = 0;
4029 	/* Currently Using last 2 bits for pdev_id
4030 	 * For future reference, reserving 3 bits in cookie_msb for pdev_id
4031 	 */
4032 	cookie_msb = (cookie_msb | pdev->pdev_id);
4033 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb);
4034 
4035 	pkt = htt_htc_pkt_alloc(soc);
4036 	if (!pkt) {
4037 		qdf_nbuf_free(msg);
4038 		return QDF_STATUS_E_NOMEM;
4039 	}
4040 
4041 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4042 
4043 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4044 			dp_htt_h2t_send_complete_free_netbuf,
4045 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
4046 			soc->htc_endpoint,
4047 			/* tag for FW response msg not guaranteed */
4048 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
4049 
4050 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4051 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_EXT_STATS_REQ,
4052 				     htt_logger_bufp);
4053 
4054 	if (status != QDF_STATUS_SUCCESS) {
4055 		qdf_nbuf_free(msg);
4056 		htt_htc_pkt_free(soc, pkt);
4057 	}
4058 
4059 	return status;
4060 }
4061 
4062 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
4063 #define HTT_VDEV_TXRX_STATS_RESET_BITMASK_L32_MASK 0xFFFFFFFF
4064 #define HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_MASK 0xFFFFFFFF00000000
4065 #define HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_SHIFT 32
4066 
4067 QDF_STATUS dp_h2t_hw_vdev_stats_config_send(struct dp_soc *dpsoc,
4068 					    uint8_t pdev_id, bool enable,
4069 					    bool reset, uint64_t reset_bitmask)
4070 {
4071 	struct htt_soc *soc = dpsoc->htt_handle;
4072 	struct dp_htt_htc_pkt *pkt;
4073 	qdf_nbuf_t msg;
4074 	uint32_t *msg_word;
4075 	uint8_t *htt_logger_bufp;
4076 	QDF_STATUS status;
4077 	int duration;
4078 	uint32_t bitmask;
4079 	int target_pdev_id;
4080 
4081 	msg = qdf_nbuf_alloc(
4082 			soc->osdev,
4083 			HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_vdevs_txrx_stats_cfg)),
4084 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
4085 
4086 	if (!msg) {
4087 		dp_htt_err("%pK: Fail to allocate "
4088 		"HTT_H2T_HW_VDEV_TXRX_STATS_CFG_MSG_SZ msg buffer", dpsoc);
4089 		return QDF_STATUS_E_NOMEM;
4090 	}
4091 
4092 	if (pdev_id != INVALID_PDEV_ID)
4093 		target_pdev_id = DP_SW2HW_MACID(pdev_id);
4094 	else
4095 		target_pdev_id = 0;
4096 
4097 	duration =
4098 	wlan_cfg_get_vdev_stats_hw_offload_timer(dpsoc->wlan_cfg_ctx);
4099 
4100 	/*
4101 	 * Set the length of the message.
4102 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4103 	 * separately during the below call to qdf_nbuf_push_head.
4104 	 * The contribution from the HTC header is added separately inside HTC.
4105 	 */
4106 	if (!qdf_nbuf_put_tail(msg,
4107 			       sizeof(struct htt_h2t_vdevs_txrx_stats_cfg))) {
4108 		dp_htt_err("%pK: Failed to expand head for HTT_HW_VDEV_STATS"
4109 			   , dpsoc);
4110 		qdf_nbuf_free(msg);
4111 		return QDF_STATUS_E_FAILURE;
4112 	}
4113 
4114 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
4115 
4116 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4117 	htt_logger_bufp = (uint8_t *)msg_word;
4118 	*msg_word = 0;
4119 
4120 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VDEVS_TXRX_STATS_CFG);
4121 	HTT_RX_VDEVS_TXRX_STATS_PDEV_ID_SET(*msg_word, target_pdev_id);
4122 
4123 	HTT_RX_VDEVS_TXRX_STATS_ENABLE_SET(*msg_word, enable);
4124 
4125 	HTT_RX_VDEVS_TXRX_STATS_PERIODIC_INTERVAL_SET(*msg_word,
4126 						      (duration >> 3));
4127 
4128 	HTT_RX_VDEVS_TXRX_STATS_RESET_STATS_BITS_SET(*msg_word, reset);
4129 
4130 	msg_word++;
4131 	*msg_word = 0;
4132 	bitmask = (reset_bitmask & HTT_VDEV_TXRX_STATS_RESET_BITMASK_L32_MASK);
4133 	*msg_word = bitmask;
4134 
4135 	msg_word++;
4136 	*msg_word = 0;
4137 	bitmask =
4138 		((reset_bitmask & HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_MASK) >>
4139 		 HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_SHIFT);
4140 	*msg_word = bitmask;
4141 
4142 	pkt = htt_htc_pkt_alloc(soc);
4143 	if (!pkt) {
4144 		dp_htt_err("%pK: Fail to allocate dp_htt_htc_pkt buffer",
4145 			   dpsoc);
4146 		qdf_assert(0);
4147 		qdf_nbuf_free(msg);
4148 		return QDF_STATUS_E_NOMEM;
4149 	}
4150 
4151 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4152 
4153 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4154 			       dp_htt_h2t_send_complete_free_netbuf,
4155 			       qdf_nbuf_data(msg), qdf_nbuf_len(msg),
4156 			       soc->htc_endpoint,
4157 			       /* tag for no FW response msg */
4158 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
4159 
4160 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4161 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
4162 				     HTT_H2T_MSG_TYPE_VDEVS_TXRX_STATS_CFG,
4163 				     htt_logger_bufp);
4164 
4165 	if (status != QDF_STATUS_SUCCESS) {
4166 		qdf_nbuf_free(msg);
4167 		htt_htc_pkt_free(soc, pkt);
4168 	}
4169 
4170 	return status;
4171 }
4172 #else
4173 QDF_STATUS dp_h2t_hw_vdev_stats_config_send(struct dp_soc *dpsoc,
4174 					    uint8_t pdev_id, bool enable,
4175 					    bool reset, uint64_t reset_bitmask)
4176 {
4177 	return QDF_STATUS_SUCCESS;
4178 }
4179 #endif
4180 
4181 /**
4182  * dp_h2t_3tuple_config_send(): function to construct 3 tuple configuration
4183  * HTT message to pass to FW
4184  * @pdev: DP PDEV handle
4185  * @tuple_mask: tuple configuration to report 3 tuple hash value in either
4186  * toeplitz_2_or_4 or flow_id_toeplitz in MSDU START TLV.
4187  *
4188  * tuple_mask[1:0]:
4189  *   00 - Do not report 3 tuple hash value
4190  *   10 - Report 3 tuple hash value in toeplitz_2_or_4
4191  *   01 - Report 3 tuple hash value in flow_id_toeplitz
4192  *   11 - Report 3 tuple hash value in both toeplitz_2_or_4 & flow_id_toeplitz
4193  *
4194  * return: QDF STATUS
4195  */
4196 QDF_STATUS dp_h2t_3tuple_config_send(struct dp_pdev *pdev,
4197 				     uint32_t tuple_mask, uint8_t mac_id)
4198 {
4199 	struct htt_soc *soc = pdev->soc->htt_handle;
4200 	struct dp_htt_htc_pkt *pkt;
4201 	qdf_nbuf_t msg;
4202 	uint32_t *msg_word;
4203 	uint8_t *htt_logger_bufp;
4204 	int mac_for_pdev;
4205 	int target_pdev_id;
4206 
4207 	msg = qdf_nbuf_alloc(
4208 			soc->osdev,
4209 			HTT_MSG_BUF_SIZE(HTT_3_TUPLE_HASH_CFG_REQ_BYTES),
4210 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4211 
4212 	if (!msg)
4213 		return QDF_STATUS_E_NOMEM;
4214 
4215 	mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
4216 	target_pdev_id =
4217 	dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev);
4218 
4219 	/*
4220 	 * Set the length of the message.
4221 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4222 	 * separately during the below call to qdf_nbuf_push_head.
4223 	 * The contribution from the HTC header is added separately inside HTC.
4224 	 */
4225 	if (!qdf_nbuf_put_tail(msg, HTT_3_TUPLE_HASH_CFG_REQ_BYTES)) {
4226 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4227 			  "Failed to expand head for HTT_3TUPLE_CONFIG");
4228 		qdf_nbuf_free(msg);
4229 		return QDF_STATUS_E_FAILURE;
4230 	}
4231 
4232 	dp_htt_info("%pK: config_param_sent 0x%x for target_pdev %d\n -------------",
4233 		    pdev->soc, tuple_mask, target_pdev_id);
4234 
4235 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
4236 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4237 	htt_logger_bufp = (uint8_t *)msg_word;
4238 
4239 	*msg_word = 0;
4240 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG);
4241 	HTT_RX_3_TUPLE_HASH_PDEV_ID_SET(*msg_word, target_pdev_id);
4242 
4243 	msg_word++;
4244 	*msg_word = 0;
4245 	HTT_H2T_FLOW_ID_TOEPLITZ_FIELD_CONFIG_SET(*msg_word, tuple_mask);
4246 	HTT_H2T_TOEPLITZ_2_OR_4_FIELD_CONFIG_SET(*msg_word, tuple_mask);
4247 
4248 	pkt = htt_htc_pkt_alloc(soc);
4249 	if (!pkt) {
4250 		qdf_nbuf_free(msg);
4251 		return QDF_STATUS_E_NOMEM;
4252 	}
4253 
4254 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4255 
4256 	SET_HTC_PACKET_INFO_TX(
4257 			&pkt->htc_pkt,
4258 			dp_htt_h2t_send_complete_free_netbuf,
4259 			qdf_nbuf_data(msg),
4260 			qdf_nbuf_len(msg),
4261 			soc->htc_endpoint,
4262 			/* tag for no FW response msg */
4263 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
4264 
4265 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4266 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG,
4267 			    htt_logger_bufp);
4268 
4269 	return QDF_STATUS_SUCCESS;
4270 }
4271 
4272 /* This macro will revert once proper HTT header will define for
4273  * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file
4274  * */
4275 #if defined(WDI_EVENT_ENABLE)
4276 /**
4277  * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW
4278  * @pdev: DP PDEV handle
4279  * @stats_type_upload_mask: stats type requested by user
4280  * @mac_id: Mac id number
4281  *
4282  * return: QDF STATUS
4283  */
4284 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
4285 		uint32_t stats_type_upload_mask, uint8_t mac_id)
4286 {
4287 	struct htt_soc *soc = pdev->soc->htt_handle;
4288 	struct dp_htt_htc_pkt *pkt;
4289 	qdf_nbuf_t msg;
4290 	uint32_t *msg_word;
4291 	uint8_t pdev_mask;
4292 	QDF_STATUS status;
4293 
4294 	msg = qdf_nbuf_alloc(
4295 			soc->osdev,
4296 			HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ),
4297 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
4298 
4299 	if (!msg) {
4300 		dp_htt_err("%pK: Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer"
4301 			   , pdev->soc);
4302 		qdf_assert(0);
4303 		return QDF_STATUS_E_NOMEM;
4304 	}
4305 
4306 	/*TODO:Add support for SOC stats
4307 	 * Bit 0: SOC Stats
4308 	 * Bit 1: Pdev stats for pdev id 0
4309 	 * Bit 2: Pdev stats for pdev id 1
4310 	 * Bit 3: Pdev stats for pdev id 2
4311 	 */
4312 	pdev_mask = 1 << dp_get_target_pdev_id_for_host_pdev_id(pdev->soc,
4313 								mac_id);
4314 
4315 	/*
4316 	 * Set the length of the message.
4317 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4318 	 * separately during the below call to qdf_nbuf_push_head.
4319 	 * The contribution from the HTC header is added separately inside HTC.
4320 	 */
4321 	if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) {
4322 		dp_htt_err("%pK: Failed to expand head for HTT_CFG_STATS"
4323 			   , pdev->soc);
4324 		qdf_nbuf_free(msg);
4325 		return QDF_STATUS_E_FAILURE;
4326 	}
4327 
4328 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
4329 
4330 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4331 	*msg_word = 0;
4332 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
4333 	HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask);
4334 	HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word,
4335 			stats_type_upload_mask);
4336 
4337 	pkt = htt_htc_pkt_alloc(soc);
4338 	if (!pkt) {
4339 		dp_htt_err("%pK: Fail to allocate dp_htt_htc_pkt buffer", pdev->soc);
4340 		qdf_assert(0);
4341 		qdf_nbuf_free(msg);
4342 		return QDF_STATUS_E_NOMEM;
4343 	}
4344 
4345 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4346 
4347 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4348 			dp_htt_h2t_send_complete_free_netbuf,
4349 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
4350 			soc->htc_endpoint,
4351 			/* tag for no FW response msg */
4352 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
4353 
4354 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4355 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG,
4356 				     (uint8_t *)msg_word);
4357 
4358 	if (status != QDF_STATUS_SUCCESS) {
4359 		qdf_nbuf_free(msg);
4360 		htt_htc_pkt_free(soc, pkt);
4361 	}
4362 
4363 	return status;
4364 }
4365 
4366 qdf_export_symbol(dp_h2t_cfg_stats_msg_send);
4367 #endif
4368 
4369 void
4370 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
4371 			     uint32_t *tag_buf)
4372 {
4373 	struct dp_peer *peer = NULL;
4374 	switch (tag_type) {
4375 	case HTT_STATS_PEER_DETAILS_TAG:
4376 	{
4377 		htt_peer_details_tlv *dp_stats_buf =
4378 			(htt_peer_details_tlv *)tag_buf;
4379 
4380 		pdev->fw_stats_peer_id = dp_stats_buf->sw_peer_id;
4381 	}
4382 	break;
4383 	case HTT_STATS_PEER_STATS_CMN_TAG:
4384 	{
4385 		htt_peer_stats_cmn_tlv *dp_stats_buf =
4386 			(htt_peer_stats_cmn_tlv *)tag_buf;
4387 
4388 		peer = dp_peer_get_ref_by_id(pdev->soc, pdev->fw_stats_peer_id,
4389 					     DP_MOD_ID_HTT);
4390 
4391 		if (peer && !peer->bss_peer) {
4392 			peer->stats.tx.inactive_time =
4393 				dp_stats_buf->inactive_time;
4394 			qdf_event_set(&pdev->fw_peer_stats_event);
4395 		}
4396 		if (peer)
4397 			dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
4398 	}
4399 	break;
4400 	default:
4401 		qdf_err("Invalid tag_type");
4402 	}
4403 }
4404 
4405 /**
4406  * dp_htt_rx_flow_fst_setup(): Send HTT Rx FST setup message to FW
4407  * @pdev: DP pdev handle
4408  * @fse_setup_info: FST setup parameters
4409  *
4410  * Return: Success when HTT message is sent, error on failure
4411  */
4412 QDF_STATUS
4413 dp_htt_rx_flow_fst_setup(struct dp_pdev *pdev,
4414 			 struct dp_htt_rx_flow_fst_setup *fse_setup_info)
4415 {
4416 	struct htt_soc *soc = pdev->soc->htt_handle;
4417 	struct dp_htt_htc_pkt *pkt;
4418 	qdf_nbuf_t msg;
4419 	u_int32_t *msg_word;
4420 	struct htt_h2t_msg_rx_fse_setup_t *fse_setup;
4421 	uint8_t *htt_logger_bufp;
4422 	u_int32_t *key;
4423 	QDF_STATUS status;
4424 
4425 	msg = qdf_nbuf_alloc(
4426 		soc->osdev,
4427 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_setup_t)),
4428 		/* reserve room for the HTC header */
4429 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4430 
4431 	if (!msg)
4432 		return QDF_STATUS_E_NOMEM;
4433 
4434 	/*
4435 	 * Set the length of the message.
4436 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4437 	 * separately during the below call to qdf_nbuf_push_head.
4438 	 * The contribution from the HTC header is added separately inside HTC.
4439 	 */
4440 	if (!qdf_nbuf_put_tail(msg,
4441 			       sizeof(struct htt_h2t_msg_rx_fse_setup_t))) {
4442 		qdf_err("Failed to expand head for HTT RX_FSE_SETUP msg");
4443 		return QDF_STATUS_E_FAILURE;
4444 	}
4445 
4446 	/* fill in the message contents */
4447 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4448 
4449 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_setup_t));
4450 	/* rewind beyond alignment pad to get to the HTC header reserved area */
4451 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4452 	htt_logger_bufp = (uint8_t *)msg_word;
4453 
4454 	*msg_word = 0;
4455 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG);
4456 
4457 	fse_setup = (struct htt_h2t_msg_rx_fse_setup_t *)msg_word;
4458 
4459 	HTT_RX_FSE_SETUP_PDEV_ID_SET(*msg_word, fse_setup_info->pdev_id);
4460 
4461 	msg_word++;
4462 	HTT_RX_FSE_SETUP_NUM_REC_SET(*msg_word, fse_setup_info->max_entries);
4463 	HTT_RX_FSE_SETUP_MAX_SEARCH_SET(*msg_word, fse_setup_info->max_search);
4464 	HTT_RX_FSE_SETUP_IP_DA_SA_PREFIX_SET(*msg_word,
4465 					     fse_setup_info->ip_da_sa_prefix);
4466 
4467 	msg_word++;
4468 	HTT_RX_FSE_SETUP_BASE_ADDR_LO_SET(*msg_word,
4469 					  fse_setup_info->base_addr_lo);
4470 	msg_word++;
4471 	HTT_RX_FSE_SETUP_BASE_ADDR_HI_SET(*msg_word,
4472 					  fse_setup_info->base_addr_hi);
4473 
4474 	key = (u_int32_t *)fse_setup_info->hash_key;
4475 	fse_setup->toeplitz31_0 = *key++;
4476 	fse_setup->toeplitz63_32 = *key++;
4477 	fse_setup->toeplitz95_64 = *key++;
4478 	fse_setup->toeplitz127_96 = *key++;
4479 	fse_setup->toeplitz159_128 = *key++;
4480 	fse_setup->toeplitz191_160 = *key++;
4481 	fse_setup->toeplitz223_192 = *key++;
4482 	fse_setup->toeplitz255_224 = *key++;
4483 	fse_setup->toeplitz287_256 = *key++;
4484 	fse_setup->toeplitz314_288 = *key;
4485 
4486 	msg_word++;
4487 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz31_0);
4488 	msg_word++;
4489 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz63_32);
4490 	msg_word++;
4491 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz95_64);
4492 	msg_word++;
4493 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz127_96);
4494 	msg_word++;
4495 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz159_128);
4496 	msg_word++;
4497 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz191_160);
4498 	msg_word++;
4499 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz223_192);
4500 	msg_word++;
4501 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz255_224);
4502 	msg_word++;
4503 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz287_256);
4504 	msg_word++;
4505 	HTT_RX_FSE_SETUP_HASH_314_288_SET(*msg_word,
4506 					  fse_setup->toeplitz314_288);
4507 
4508 	pkt = htt_htc_pkt_alloc(soc);
4509 	if (!pkt) {
4510 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4511 		qdf_assert(0);
4512 		qdf_nbuf_free(msg);
4513 		return QDF_STATUS_E_RESOURCES; /* failure */
4514 	}
4515 
4516 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4517 
4518 	SET_HTC_PACKET_INFO_TX(
4519 		&pkt->htc_pkt,
4520 		dp_htt_h2t_send_complete_free_netbuf,
4521 		qdf_nbuf_data(msg),
4522 		qdf_nbuf_len(msg),
4523 		soc->htc_endpoint,
4524 		/* tag for no FW response msg */
4525 		HTC_TX_PACKET_TAG_RUNTIME_PUT);
4526 
4527 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4528 
4529 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
4530 				     HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG,
4531 				     htt_logger_bufp);
4532 
4533 	if (status == QDF_STATUS_SUCCESS) {
4534 		dp_info("HTT_H2T RX_FSE_SETUP sent to FW for pdev = %u",
4535 			fse_setup_info->pdev_id);
4536 		QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_DEBUG,
4537 				   (void *)fse_setup_info->hash_key,
4538 				   fse_setup_info->hash_key_len);
4539 	} else {
4540 		qdf_nbuf_free(msg);
4541 		htt_htc_pkt_free(soc, pkt);
4542 	}
4543 
4544 	return status;
4545 }
4546 
4547 /**
4548  * dp_htt_rx_flow_fse_operation(): Send HTT Flow Search Entry msg to
4549  * add/del a flow in HW
4550  * @pdev: DP pdev handle
4551  * @fse_op_info: Flow entry parameters
4552  *
4553  * Return: Success when HTT message is sent, error on failure
4554  */
4555 QDF_STATUS
4556 dp_htt_rx_flow_fse_operation(struct dp_pdev *pdev,
4557 			     struct dp_htt_rx_flow_fst_operation *fse_op_info)
4558 {
4559 	struct htt_soc *soc = pdev->soc->htt_handle;
4560 	struct dp_htt_htc_pkt *pkt;
4561 	qdf_nbuf_t msg;
4562 	u_int32_t *msg_word;
4563 	struct htt_h2t_msg_rx_fse_operation_t *fse_operation;
4564 	uint8_t *htt_logger_bufp;
4565 	QDF_STATUS status;
4566 
4567 	msg = qdf_nbuf_alloc(
4568 		soc->osdev,
4569 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_operation_t)),
4570 		/* reserve room for the HTC header */
4571 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4572 	if (!msg)
4573 		return QDF_STATUS_E_NOMEM;
4574 
4575 	/*
4576 	 * Set the length of the message.
4577 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4578 	 * separately during the below call to qdf_nbuf_push_head.
4579 	 * The contribution from the HTC header is added separately inside HTC.
4580 	 */
4581 	if (!qdf_nbuf_put_tail(msg,
4582 			       sizeof(struct htt_h2t_msg_rx_fse_operation_t))) {
4583 		qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
4584 		qdf_nbuf_free(msg);
4585 		return QDF_STATUS_E_FAILURE;
4586 	}
4587 
4588 	/* fill in the message contents */
4589 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4590 
4591 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_operation_t));
4592 	/* rewind beyond alignment pad to get to the HTC header reserved area */
4593 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4594 	htt_logger_bufp = (uint8_t *)msg_word;
4595 
4596 	*msg_word = 0;
4597 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG);
4598 
4599 	fse_operation = (struct htt_h2t_msg_rx_fse_operation_t *)msg_word;
4600 
4601 	HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, fse_op_info->pdev_id);
4602 	msg_word++;
4603 	HTT_RX_FSE_IPSEC_VALID_SET(*msg_word, false);
4604 	if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_ENTRY) {
4605 		HTT_RX_FSE_OPERATION_SET(*msg_word,
4606 					 HTT_RX_FSE_CACHE_INVALIDATE_ENTRY);
4607 		msg_word++;
4608 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4609 		*msg_word,
4610 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_31_0));
4611 		msg_word++;
4612 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4613 		*msg_word,
4614 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_63_32));
4615 		msg_word++;
4616 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4617 		*msg_word,
4618 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_95_64));
4619 		msg_word++;
4620 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4621 		*msg_word,
4622 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_127_96));
4623 		msg_word++;
4624 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4625 		*msg_word,
4626 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_31_0));
4627 		msg_word++;
4628 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4629 		*msg_word,
4630 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_63_32));
4631 		msg_word++;
4632 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4633 		*msg_word,
4634 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_95_64));
4635 		msg_word++;
4636 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4637 		*msg_word,
4638 		qdf_htonl(
4639 		fse_op_info->rx_flow->flow_tuple_info.dest_ip_127_96));
4640 		msg_word++;
4641 		HTT_RX_FSE_SOURCEPORT_SET(
4642 			*msg_word,
4643 			fse_op_info->rx_flow->flow_tuple_info.src_port);
4644 		HTT_RX_FSE_DESTPORT_SET(
4645 			*msg_word,
4646 			fse_op_info->rx_flow->flow_tuple_info.dest_port);
4647 		msg_word++;
4648 		HTT_RX_FSE_L4_PROTO_SET(
4649 			*msg_word,
4650 			fse_op_info->rx_flow->flow_tuple_info.l4_protocol);
4651 	} else if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_FULL) {
4652 		HTT_RX_FSE_OPERATION_SET(*msg_word,
4653 					 HTT_RX_FSE_CACHE_INVALIDATE_FULL);
4654 	} else if (fse_op_info->op_code == DP_HTT_FST_DISABLE) {
4655 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_DISABLE);
4656 	} else if (fse_op_info->op_code == DP_HTT_FST_ENABLE) {
4657 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_ENABLE);
4658 	}
4659 
4660 	pkt = htt_htc_pkt_alloc(soc);
4661 	if (!pkt) {
4662 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4663 		qdf_assert(0);
4664 		qdf_nbuf_free(msg);
4665 		return QDF_STATUS_E_RESOURCES; /* failure */
4666 	}
4667 
4668 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4669 
4670 	SET_HTC_PACKET_INFO_TX(
4671 		&pkt->htc_pkt,
4672 		dp_htt_h2t_send_complete_free_netbuf,
4673 		qdf_nbuf_data(msg),
4674 		qdf_nbuf_len(msg),
4675 		soc->htc_endpoint,
4676 		/* tag for no FW response msg */
4677 		HTC_TX_PACKET_TAG_RUNTIME_PUT);
4678 
4679 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4680 
4681 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
4682 				     HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG,
4683 				     htt_logger_bufp);
4684 
4685 	if (status == QDF_STATUS_SUCCESS) {
4686 		dp_info("HTT_H2T RX_FSE_OPERATION_CFG sent to FW for pdev = %u",
4687 			fse_op_info->pdev_id);
4688 	} else {
4689 		qdf_nbuf_free(msg);
4690 		htt_htc_pkt_free(soc, pkt);
4691 	}
4692 
4693 	return status;
4694 }
4695 
4696 /**
4697  * dp_htt_rx_fisa_config(): Send HTT msg to configure FISA
4698  * @pdev: DP pdev handle
4699  * @fse_op_info: Flow entry parameters
4700  *
4701  * Return: Success when HTT message is sent, error on failure
4702  */
4703 QDF_STATUS
4704 dp_htt_rx_fisa_config(struct dp_pdev *pdev,
4705 		      struct dp_htt_rx_fisa_cfg *fisa_config)
4706 {
4707 	struct htt_soc *soc = pdev->soc->htt_handle;
4708 	struct dp_htt_htc_pkt *pkt;
4709 	qdf_nbuf_t msg;
4710 	u_int32_t *msg_word;
4711 	struct htt_h2t_msg_type_fisa_config_t *htt_fisa_config;
4712 	uint8_t *htt_logger_bufp;
4713 	uint32_t len;
4714 	QDF_STATUS status;
4715 
4716 	len = HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_type_fisa_config_t));
4717 
4718 	msg = qdf_nbuf_alloc(soc->osdev,
4719 			     len,
4720 			     /* reserve room for the HTC header */
4721 			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
4722 			     4,
4723 			     TRUE);
4724 	if (!msg)
4725 		return QDF_STATUS_E_NOMEM;
4726 
4727 	/*
4728 	 * Set the length of the message.
4729 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4730 	 * separately during the below call to qdf_nbuf_push_head.
4731 	 * The contribution from the HTC header is added separately inside HTC.
4732 	 */
4733 	if (!qdf_nbuf_put_tail(msg,
4734 			       sizeof(struct htt_h2t_msg_type_fisa_config_t))) {
4735 		qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
4736 		qdf_nbuf_free(msg);
4737 		return QDF_STATUS_E_FAILURE;
4738 	}
4739 
4740 	/* fill in the message contents */
4741 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4742 
4743 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_type_fisa_config_t));
4744 	/* rewind beyond alignment pad to get to the HTC header reserved area */
4745 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4746 	htt_logger_bufp = (uint8_t *)msg_word;
4747 
4748 	*msg_word = 0;
4749 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FISA_CFG);
4750 
4751 	htt_fisa_config = (struct htt_h2t_msg_type_fisa_config_t *)msg_word;
4752 
4753 	HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, htt_fisa_config->pdev_id);
4754 
4755 	msg_word++;
4756 	HTT_RX_FISA_CONFIG_FISA_V2_ENABLE_SET(*msg_word, 1);
4757 	HTT_RX_FISA_CONFIG_FISA_V2_AGGR_LIMIT_SET(*msg_word, 0xf);
4758 
4759 	msg_word++;
4760 	htt_fisa_config->fisa_timeout_threshold = fisa_config->fisa_timeout;
4761 
4762 	pkt = htt_htc_pkt_alloc(soc);
4763 	if (!pkt) {
4764 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4765 		qdf_assert(0);
4766 		qdf_nbuf_free(msg);
4767 		return QDF_STATUS_E_RESOURCES; /* failure */
4768 	}
4769 
4770 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4771 
4772 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4773 			       dp_htt_h2t_send_complete_free_netbuf,
4774 			       qdf_nbuf_data(msg),
4775 			       qdf_nbuf_len(msg),
4776 			       soc->htc_endpoint,
4777 			       /* tag for no FW response msg */
4778 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
4779 
4780 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4781 
4782 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FISA_CFG,
4783 				     htt_logger_bufp);
4784 
4785 	if (status == QDF_STATUS_SUCCESS) {
4786 		dp_info("HTT_H2T_MSG_TYPE_RX_FISA_CFG sent to FW for pdev = %u",
4787 			fisa_config->pdev_id);
4788 	} else {
4789 		qdf_nbuf_free(msg);
4790 		htt_htc_pkt_free(soc, pkt);
4791 	}
4792 
4793 	return status;
4794 }
4795 
4796 #ifdef WLAN_SUPPORT_PPEDS
4797 /**
4798  * dp_htt_rxdma_rxole_ppe_cfg_set() - Send RxOLE and RxDMA PPE config
4799  * @dp_osc: Data path SoC handle
4800  * @cfg: RxDMA and RxOLE PPE config
4801  *
4802  * Return: Success when HTT message is sent, error on failure
4803  */
4804 QDF_STATUS
4805 dp_htt_rxdma_rxole_ppe_cfg_set(struct dp_soc *soc,
4806 			       struct dp_htt_rxdma_rxole_ppe_config *cfg)
4807 {
4808 	struct htt_soc *htt_handle = soc->htt_handle;
4809 	uint32_t len;
4810 	qdf_nbuf_t msg;
4811 	u_int32_t *msg_word;
4812 	QDF_STATUS status;
4813 	uint8_t *htt_logger_bufp;
4814 	struct dp_htt_htc_pkt *pkt;
4815 
4816 	len = HTT_MSG_BUF_SIZE(
4817 	      sizeof(struct htt_h2t_msg_type_rxdma_rxole_ppe_cfg_t));
4818 
4819 	msg = qdf_nbuf_alloc(soc->osdev,
4820 			     len,
4821 			     /* reserve room for the HTC header */
4822 			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
4823 			     4,
4824 			     TRUE);
4825 	if (!msg)
4826 		return QDF_STATUS_E_NOMEM;
4827 
4828 	/*
4829 	 * Set the length of the message.
4830 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4831 	 * separately during the below call to qdf_nbuf_push_head.
4832 	 * The contribution from the HTC header is added separately inside HTC.
4833 	 */
4834 	if (!qdf_nbuf_put_tail(
4835 		msg, sizeof(struct htt_h2t_msg_type_rxdma_rxole_ppe_cfg_t))) {
4836 		qdf_err("Failed to expand head for HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG msg");
4837 		qdf_nbuf_free(msg);
4838 		return QDF_STATUS_E_FAILURE;
4839 	}
4840 
4841 	/* fill in the message contents */
4842 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4843 
4844 	memset(msg_word, 0,
4845 	       sizeof(struct htt_h2t_msg_type_rxdma_rxole_ppe_cfg_t));
4846 
4847 	/* Rewind beyond alignment pad to get to the HTC header reserved area */
4848 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4849 	htt_logger_bufp = (uint8_t *)msg_word;
4850 
4851 	*msg_word = 0;
4852 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG);
4853 	HTT_PPE_CFG_OVERRIDE_SET(*msg_word, cfg->override);
4854 	HTT_PPE_CFG_REO_DEST_IND_SET(
4855 			*msg_word, cfg->reo_destination_indication);
4856 	HTT_PPE_CFG_MULTI_BUF_MSDU_OVERRIDE_EN_SET(
4857 			*msg_word, cfg->multi_buffer_msdu_override_en);
4858 	HTT_PPE_CFG_INTRA_BSS_OVERRIDE_EN_SET(
4859 			*msg_word, cfg->intra_bss_override);
4860 	HTT_PPE_CFG_DECAP_RAW_OVERRIDE_EN_SET(
4861 			*msg_word, cfg->decap_raw_override);
4862 	HTT_PPE_CFG_DECAP_NWIFI_OVERRIDE_EN_SET(
4863 			*msg_word, cfg->decap_nwifi_override);
4864 	HTT_PPE_CFG_IP_FRAG_OVERRIDE_EN_SET(
4865 			*msg_word, cfg->ip_frag_override);
4866 
4867 	pkt = htt_htc_pkt_alloc(htt_handle);
4868 	if (!pkt) {
4869 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4870 		qdf_assert(0);
4871 		qdf_nbuf_free(msg);
4872 		return QDF_STATUS_E_RESOURCES; /* failure */
4873 	}
4874 
4875 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4876 
4877 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4878 			       dp_htt_h2t_send_complete_free_netbuf,
4879 			       qdf_nbuf_data(msg),
4880 			       qdf_nbuf_len(msg),
4881 			       htt_handle->htc_endpoint,
4882 			       /* tag for no FW response msg */
4883 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
4884 
4885 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4886 
4887 	status = DP_HTT_SEND_HTC_PKT(htt_handle, pkt,
4888 				     HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG,
4889 				     htt_logger_bufp);
4890 
4891 	if (status != QDF_STATUS_SUCCESS) {
4892 		qdf_nbuf_free(msg);
4893 		htt_htc_pkt_free(htt_handle, pkt);
4894 		return status;
4895 	}
4896 
4897 	dp_info("HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG sent");
4898 	return status;
4899 }
4900 #endif /* WLAN_SUPPORT_PPEDS */
4901 
4902 /**
4903  * dp_bk_pressure_stats_handler(): worker function to print back pressure
4904  *				   stats
4905  *
4906  * @context : argument to work function
4907  */
4908 static void dp_bk_pressure_stats_handler(void *context)
4909 {
4910 	struct dp_pdev *pdev = (struct dp_pdev *)context;
4911 	struct dp_soc_srngs_state *soc_srngs_state = NULL;
4912 	const char *ring_name;
4913 	int i;
4914 	struct dp_srng_ring_state *ring_state;
4915 	bool empty_flag;
4916 
4917 	qdf_spin_lock_bh(&pdev->bkp_stats.list_lock);
4918 
4919 	/* Extract only first entry for printing in one work event */
4920 	if (pdev->bkp_stats.queue_depth &&
4921 	    !TAILQ_EMPTY(&pdev->bkp_stats.list)) {
4922 		soc_srngs_state = TAILQ_FIRST(&pdev->bkp_stats.list);
4923 		TAILQ_REMOVE(&pdev->bkp_stats.list, soc_srngs_state,
4924 			     list_elem);
4925 		pdev->bkp_stats.queue_depth--;
4926 	}
4927 
4928 	empty_flag = TAILQ_EMPTY(&pdev->bkp_stats.list);
4929 	qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock);
4930 
4931 	if (soc_srngs_state) {
4932 		DP_PRINT_STATS("### BKP stats for seq_num %u START ###",
4933 			       soc_srngs_state->seq_num);
4934 		for (i = 0; i < soc_srngs_state->max_ring_id; i++) {
4935 			ring_state = &soc_srngs_state->ring_state[i];
4936 			ring_name = dp_srng_get_str_from_hal_ring_type
4937 						(ring_state->ring_type);
4938 			DP_PRINT_STATS("%s: SW:Head pointer = %d Tail Pointer = %d\n",
4939 				       ring_name,
4940 				       ring_state->sw_head,
4941 				       ring_state->sw_tail);
4942 
4943 			DP_PRINT_STATS("%s: HW:Head pointer = %d Tail Pointer = %d\n",
4944 				       ring_name,
4945 				       ring_state->hw_head,
4946 				       ring_state->hw_tail);
4947 		}
4948 
4949 		DP_PRINT_STATS("### BKP stats for seq_num %u COMPLETE ###",
4950 			       soc_srngs_state->seq_num);
4951 		qdf_mem_free(soc_srngs_state);
4952 	}
4953 	dp_print_napi_stats(pdev->soc);
4954 
4955 	/* Schedule work again if queue is not empty */
4956 	if (!empty_flag)
4957 		qdf_queue_work(0, pdev->bkp_stats.work_queue,
4958 			       &pdev->bkp_stats.work);
4959 }
4960 
4961 /*
4962  * dp_pdev_bkp_stats_detach() - detach resources for back pressure stats
4963  *				processing
4964  * @pdev: Datapath PDEV handle
4965  *
4966  */
4967 void dp_pdev_bkp_stats_detach(struct dp_pdev *pdev)
4968 {
4969 	struct dp_soc_srngs_state *ring_state, *ring_state_next;
4970 
4971 	if (!pdev->bkp_stats.work_queue)
4972 		return;
4973 
4974 	qdf_flush_workqueue(0, pdev->bkp_stats.work_queue);
4975 	qdf_destroy_workqueue(0, pdev->bkp_stats.work_queue);
4976 	qdf_flush_work(&pdev->bkp_stats.work);
4977 	qdf_disable_work(&pdev->bkp_stats.work);
4978 	qdf_spin_lock_bh(&pdev->bkp_stats.list_lock);
4979 	TAILQ_FOREACH_SAFE(ring_state, &pdev->bkp_stats.list,
4980 			   list_elem, ring_state_next) {
4981 		TAILQ_REMOVE(&pdev->bkp_stats.list, ring_state,
4982 			     list_elem);
4983 		qdf_mem_free(ring_state);
4984 	}
4985 	qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock);
4986 	qdf_spinlock_destroy(&pdev->bkp_stats.list_lock);
4987 }
4988 
4989 /*
4990  * dp_pdev_bkp_stats_attach() - attach resources for back pressure stats
4991  *				processing
4992  * @pdev: Datapath PDEV handle
4993  *
4994  * Return: QDF_STATUS_SUCCESS: Success
4995  *         QDF_STATUS_E_NOMEM: Error
4996  */
4997 QDF_STATUS dp_pdev_bkp_stats_attach(struct dp_pdev *pdev)
4998 {
4999 	TAILQ_INIT(&pdev->bkp_stats.list);
5000 	pdev->bkp_stats.seq_num = 0;
5001 	pdev->bkp_stats.queue_depth = 0;
5002 
5003 	qdf_create_work(0, &pdev->bkp_stats.work,
5004 			dp_bk_pressure_stats_handler, pdev);
5005 
5006 	pdev->bkp_stats.work_queue =
5007 		qdf_alloc_unbound_workqueue("dp_bkp_work_queue");
5008 	if (!pdev->bkp_stats.work_queue)
5009 		goto fail;
5010 
5011 	qdf_spinlock_create(&pdev->bkp_stats.list_lock);
5012 	return QDF_STATUS_SUCCESS;
5013 
5014 fail:
5015 	dp_htt_alert("BKP stats attach failed");
5016 	qdf_flush_work(&pdev->bkp_stats.work);
5017 	qdf_disable_work(&pdev->bkp_stats.work);
5018 	return QDF_STATUS_E_FAILURE;
5019 }
5020 
5021 #ifdef DP_UMAC_HW_RESET_SUPPORT
5022 QDF_STATUS dp_htt_umac_reset_send_setup_cmd(
5023 		struct dp_soc *soc,
5024 		const struct dp_htt_umac_reset_setup_cmd_params *setup_params)
5025 {
5026 	struct htt_soc *htt_handle = soc->htt_handle;
5027 	uint32_t len;
5028 	qdf_nbuf_t msg;
5029 	u_int32_t *msg_word;
5030 	QDF_STATUS status;
5031 	uint8_t *htt_logger_bufp;
5032 	struct dp_htt_htc_pkt *pkt;
5033 
5034 	len = HTT_MSG_BUF_SIZE(
5035 		HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_BYTES);
5036 
5037 	msg = qdf_nbuf_alloc(soc->osdev,
5038 			     len,
5039 			     /* reserve room for the HTC header */
5040 			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
5041 			     4,
5042 			     TRUE);
5043 	if (!msg)
5044 		return QDF_STATUS_E_NOMEM;
5045 
5046 	/*
5047 	 * Set the length of the message.
5048 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
5049 	 * separately during the below call to qdf_nbuf_push_head.
5050 	 * The contribution from the HTC header is added separately inside HTC.
5051 	 */
5052 	if (!qdf_nbuf_put_tail(
5053 		msg, HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_BYTES)) {
5054 		dp_htt_err("Failed to expand head");
5055 		qdf_nbuf_free(msg);
5056 		return QDF_STATUS_E_FAILURE;
5057 	}
5058 
5059 	/* fill in the message contents */
5060 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
5061 
5062 	/* Rewind beyond alignment pad to get to the HTC header reserved area */
5063 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
5064 	htt_logger_bufp = (uint8_t *)msg_word;
5065 
5066 	qdf_mem_zero(msg_word,
5067 		     HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_BYTES);
5068 
5069 	HTT_H2T_MSG_TYPE_SET(
5070 		*msg_word,
5071 		HTT_H2T_MSG_TYPE_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP);
5072 	HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_T2H_MSG_METHOD_SET(
5073 		*msg_word, htt_umac_hang_recovery_msg_t2h_msi_and_h2t_polling);
5074 	HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_H2T_MSG_METHOD_SET(
5075 		*msg_word, htt_umac_hang_recovery_msg_t2h_msi_and_h2t_polling);
5076 
5077 	msg_word++;
5078 	*msg_word = setup_params->msi_data;
5079 
5080 	msg_word++;
5081 	*msg_word = sizeof(htt_umac_hang_recovery_msg_shmem_t);
5082 
5083 	msg_word++;
5084 	*msg_word = setup_params->shmem_addr_low;
5085 
5086 	msg_word++;
5087 	*msg_word = setup_params->shmem_addr_high;
5088 
5089 	pkt = htt_htc_pkt_alloc(htt_handle);
5090 	if (!pkt) {
5091 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
5092 		qdf_assert(0);
5093 		qdf_nbuf_free(msg);
5094 		return QDF_STATUS_E_NOMEM;
5095 	}
5096 
5097 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5098 
5099 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
5100 			       dp_htt_h2t_send_complete_free_netbuf,
5101 			       qdf_nbuf_data(msg),
5102 			       qdf_nbuf_len(msg),
5103 			       htt_handle->htc_endpoint,
5104 			       /* tag for no FW response msg */
5105 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
5106 
5107 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5108 
5109 	status = DP_HTT_SEND_HTC_PKT(
5110 			htt_handle, pkt,
5111 			HTT_H2T_MSG_TYPE_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP,
5112 			htt_logger_bufp);
5113 
5114 	if (QDF_IS_STATUS_ERROR(status)) {
5115 		qdf_nbuf_free(msg);
5116 		htt_htc_pkt_free(htt_handle, pkt);
5117 		return status;
5118 	}
5119 
5120 	dp_info("HTT_H2T_MSG_TYPE_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP sent");
5121 	return status;
5122 }
5123 #endif
5124