xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c (revision 2888b71da71bce103343119fa1b31f4a0cee07c8)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <htt.h>
21 #include <hal_hw_headers.h>
22 #include <hal_api.h>
23 #include "dp_peer.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_rx.h"
27 #include "htt_stats.h"
28 #include "htt_ppdu_stats.h"
29 #include "dp_htt.h"
30 #ifdef WIFI_MONITOR_SUPPORT
31 #include <dp_mon.h>
32 #endif
33 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
34 #include "cdp_txrx_cmn_struct.h"
35 
36 #ifdef FEATURE_PERPKT_INFO
37 #include "dp_ratetable.h"
38 #endif
39 #include <qdf_module.h>
40 #ifdef CONFIG_SAWF_DEF_QUEUES
41 #include <dp_sawf_htt.h>
42 #endif
43 
44 #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
45 
46 #define HTT_HTC_PKT_POOL_INIT_SIZE 64
47 
48 #define HTT_MSG_BUF_SIZE(msg_bytes) \
49 	((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
50 
51 #define HTT_PID_BIT_MASK 0x3
52 
53 #define DP_EXT_MSG_LENGTH 2048
54 #define HTT_HEADER_LEN 16
55 #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16
56 
57 #define HTT_SHIFT_UPPER_TIMESTAMP 32
58 #define HTT_MASK_UPPER_TIMESTAMP 0xFFFFFFFF00000000
59 #define HTT_BKP_STATS_MAX_QUEUE_DEPTH 16
60 
61 struct dp_htt_htc_pkt *
62 htt_htc_pkt_alloc(struct htt_soc *soc)
63 {
64 	struct dp_htt_htc_pkt_union *pkt = NULL;
65 
66 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
67 	if (soc->htt_htc_pkt_freelist) {
68 		pkt = soc->htt_htc_pkt_freelist;
69 		soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next;
70 	}
71 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
72 
73 	if (!pkt)
74 		pkt = qdf_mem_malloc(sizeof(*pkt));
75 
76 	if (!pkt)
77 		return NULL;
78 
79 	htc_packet_set_magic_cookie(&(pkt->u.pkt.htc_pkt), 0);
80 
81 	return &pkt->u.pkt; /* not actually a dereference */
82 }
83 
84 qdf_export_symbol(htt_htc_pkt_alloc);
85 
86 void
87 htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
88 {
89 	struct dp_htt_htc_pkt_union *u_pkt =
90 		(struct dp_htt_htc_pkt_union *)pkt;
91 
92 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
93 	htc_packet_set_magic_cookie(&(u_pkt->u.pkt.htc_pkt), 0);
94 	u_pkt->u.next = soc->htt_htc_pkt_freelist;
95 	soc->htt_htc_pkt_freelist = u_pkt;
96 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
97 }
98 
99 qdf_export_symbol(htt_htc_pkt_free);
100 
101 /*
102  * htt_htc_pkt_pool_free() - Free HTC packet pool
103  * @htt_soc:	HTT SOC handle
104  */
105 void
106 htt_htc_pkt_pool_free(struct htt_soc *soc)
107 {
108 	struct dp_htt_htc_pkt_union *pkt, *next;
109 	pkt = soc->htt_htc_pkt_freelist;
110 	while (pkt) {
111 		next = pkt->u.next;
112 		qdf_mem_free(pkt);
113 		pkt = next;
114 	}
115 	soc->htt_htc_pkt_freelist = NULL;
116 }
117 
118 
119 #ifndef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST
120 
121 /*
122  * htt_htc_misc_pkt_list_trim() - trim misc list
123  * @htt_soc: HTT SOC handle
124  * @level: max no. of pkts in list
125  */
126 static void
127 htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level)
128 {
129 	struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL;
130 	int i = 0;
131 	qdf_nbuf_t netbuf;
132 
133 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
134 	pkt = soc->htt_htc_pkt_misclist;
135 	while (pkt) {
136 		next = pkt->u.next;
137 		/* trim the out grown list*/
138 		if (++i > level) {
139 			netbuf =
140 				(qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
141 			qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
142 			qdf_nbuf_free(netbuf);
143 			qdf_mem_free(pkt);
144 			pkt = NULL;
145 			if (prev)
146 				prev->u.next = NULL;
147 		}
148 		prev = pkt;
149 		pkt = next;
150 	}
151 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
152 }
153 
154 /*
155  * htt_htc_misc_pkt_list_add() - Add pkt to misc list
156  * @htt_soc:	HTT SOC handle
157  * @dp_htt_htc_pkt: pkt to be added to list
158  */
159 void
160 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
161 {
162 	struct dp_htt_htc_pkt_union *u_pkt =
163 				(struct dp_htt_htc_pkt_union *)pkt;
164 	int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc,
165 							pkt->htc_pkt.Endpoint)
166 				+ DP_HTT_HTC_PKT_MISCLIST_SIZE;
167 
168 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
169 	if (soc->htt_htc_pkt_misclist) {
170 		u_pkt->u.next = soc->htt_htc_pkt_misclist;
171 		soc->htt_htc_pkt_misclist = u_pkt;
172 	} else {
173 		soc->htt_htc_pkt_misclist = u_pkt;
174 	}
175 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
176 
177 	/* only ce pipe size + tx_queue_depth could possibly be in use
178 	 * free older packets in the misclist
179 	 */
180 	htt_htc_misc_pkt_list_trim(soc, misclist_trim_level);
181 }
182 
183 qdf_export_symbol(htt_htc_misc_pkt_list_add);
184 #endif  /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
185 
186 /*
187  * htt_htc_misc_pkt_pool_free() - free pkts in misc list
188  * @htt_soc:	HTT SOC handle
189  */
190 static void
191 htt_htc_misc_pkt_pool_free(struct htt_soc *soc)
192 {
193 	struct dp_htt_htc_pkt_union *pkt, *next;
194 	qdf_nbuf_t netbuf;
195 
196 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
197 	pkt = soc->htt_htc_pkt_misclist;
198 
199 	while (pkt) {
200 		next = pkt->u.next;
201 		if (htc_packet_get_magic_cookie(&(pkt->u.pkt.htc_pkt)) !=
202 		    HTC_PACKET_MAGIC_COOKIE) {
203 			pkt = next;
204 			soc->stats.skip_count++;
205 			continue;
206 		}
207 		netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
208 		qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
209 
210 		soc->stats.htc_pkt_free++;
211 		dp_htt_info("%pK: Pkt free count %d",
212 			    soc->dp_soc, soc->stats.htc_pkt_free);
213 
214 		qdf_nbuf_free(netbuf);
215 		qdf_mem_free(pkt);
216 		pkt = next;
217 	}
218 	soc->htt_htc_pkt_misclist = NULL;
219 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
220 	dp_info("HTC Packets, fail count = %d, skip count = %d",
221 		soc->stats.fail_count, soc->stats.skip_count);
222 }
223 
224 /*
225  * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ
226  * @tgt_mac_addr:	Target MAC
227  * @buffer:		Output buffer
228  */
229 static u_int8_t *
230 htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer)
231 {
232 #ifdef BIG_ENDIAN_HOST
233 	/*
234 	 * The host endianness is opposite of the target endianness.
235 	 * To make u_int32_t elements come out correctly, the target->host
236 	 * upload has swizzled the bytes in each u_int32_t element of the
237 	 * message.
238 	 * For byte-array message fields like the MAC address, this
239 	 * upload swizzling puts the bytes in the wrong order, and needs
240 	 * to be undone.
241 	 */
242 	buffer[0] = tgt_mac_addr[3];
243 	buffer[1] = tgt_mac_addr[2];
244 	buffer[2] = tgt_mac_addr[1];
245 	buffer[3] = tgt_mac_addr[0];
246 	buffer[4] = tgt_mac_addr[7];
247 	buffer[5] = tgt_mac_addr[6];
248 	return buffer;
249 #else
250 	/*
251 	 * The host endianness matches the target endianness -
252 	 * we can use the mac addr directly from the message buffer.
253 	 */
254 	return tgt_mac_addr;
255 #endif
256 }
257 
258 /*
259  * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
260  * @soc:	SOC handle
261  * @status:	Completion status
262  * @netbuf:	HTT buffer
263  */
264 static void
265 dp_htt_h2t_send_complete_free_netbuf(
266 	void *soc, A_STATUS status, qdf_nbuf_t netbuf)
267 {
268 	qdf_nbuf_free(netbuf);
269 }
270 
271 #ifdef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST
272 /*
273  * dp_htt_h2t_send_complete() - H2T completion handler
274  * @context:	Opaque context (HTT SOC handle)
275  * @htc_pkt:	HTC packet
276  */
277 static void
278 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
279 {
280 	struct htt_soc *soc =  (struct htt_soc *) context;
281 	struct dp_htt_htc_pkt *htt_pkt;
282 	qdf_nbuf_t netbuf;
283 
284 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
285 
286 	/* process (free or keep) the netbuf that held the message */
287 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
288 	/*
289 	 * adf sendcomplete is required for windows only
290 	 */
291 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
292 	/* free the htt_htc_pkt / HTC_PACKET object */
293 	qdf_nbuf_free(netbuf);
294 	htt_htc_pkt_free(soc, htt_pkt);
295 }
296 
297 #else /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
298 
299 /*
300  *  * dp_htt_h2t_send_complete() - H2T completion handler
301  *   * @context:    Opaque context (HTT SOC handle)
302  *    * @htc_pkt:    HTC packet
303  *     */
304 static void
305 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
306 {
307 	void (*send_complete_part2)(
308 	     void *soc, QDF_STATUS status, qdf_nbuf_t msdu);
309 	struct htt_soc *soc =  (struct htt_soc *) context;
310 	struct dp_htt_htc_pkt *htt_pkt;
311 	qdf_nbuf_t netbuf;
312 
313 	send_complete_part2 = htc_pkt->pPktContext;
314 
315 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
316 
317 	/* process (free or keep) the netbuf that held the message */
318 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
319 	/*
320 	 * adf sendcomplete is required for windows only
321 	*/
322 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
323 	if (send_complete_part2){
324 		send_complete_part2(
325 		    htt_pkt->soc_ctxt, htc_pkt->Status, netbuf);
326 	}
327 	/* free the htt_htc_pkt / HTC_PACKET object */
328 	htt_htc_pkt_free(soc, htt_pkt);
329 }
330 
331 #endif /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
332 
333 /*
334  * dp_htt_h2t_add_tcl_metadata_ver_v1() - Add tcl_metadata verion V1
335  * @htt_soc:	HTT SOC handle
336  * @msg:	Pointer to nbuf
337  *
338  * Return: 0 on success; error code on failure
339  */
340 static int dp_htt_h2t_add_tcl_metadata_ver_v1(struct htt_soc *soc,
341 					      qdf_nbuf_t *msg)
342 {
343 	uint32_t *msg_word;
344 
345 	*msg = qdf_nbuf_alloc(
346 		soc->osdev,
347 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
348 		/* reserve room for the HTC header */
349 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
350 	if (!*msg)
351 		return QDF_STATUS_E_NOMEM;
352 
353 	/*
354 	 * Set the length of the message.
355 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
356 	 * separately during the below call to qdf_nbuf_push_head.
357 	 * The contribution from the HTC header is added separately inside HTC.
358 	 */
359 	if (!qdf_nbuf_put_tail(*msg, HTT_VER_REQ_BYTES)) {
360 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
361 			  "%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
362 			  __func__);
363 		return QDF_STATUS_E_FAILURE;
364 	}
365 
366 	/* fill in the message contents */
367 	msg_word = (u_int32_t *)qdf_nbuf_data(*msg);
368 
369 	/* rewind beyond alignment pad to get to the HTC header reserved area */
370 	qdf_nbuf_push_head(*msg, HTC_HDR_ALIGNMENT_PADDING);
371 
372 	*msg_word = 0;
373 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
374 
375 	return QDF_STATUS_SUCCESS;
376 }
377 
378 #ifdef QCA_DP_TX_FW_METADATA_V2
379 /*
380  * dp_htt_h2t_add_tcl_metadata_ver_v2() - Add tcl_metadata verion V2
381  * @htt_soc:	HTT SOC handle
382  * @msg:	Pointer to nbuf
383  *
384  * Return: 0 on success; error code on failure
385  */
386 static int dp_htt_h2t_add_tcl_metadata_ver_v2(struct htt_soc *soc,
387 					      qdf_nbuf_t *msg)
388 {
389 	uint32_t *msg_word;
390 
391 	*msg = qdf_nbuf_alloc(
392 		soc->osdev,
393 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES + HTT_TCL_METADATA_VER_SZ),
394 		/* reserve room for the HTC header */
395 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
396 	if (!*msg)
397 		return QDF_STATUS_E_NOMEM;
398 
399 	/*
400 	 * Set the length of the message.
401 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
402 	 * separately during the below call to qdf_nbuf_push_head.
403 	 * The contribution from the HTC header is added separately inside HTC.
404 	 */
405 	if (!qdf_nbuf_put_tail(*msg,
406 			       HTT_VER_REQ_BYTES + HTT_TCL_METADATA_VER_SZ)) {
407 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
408 			  "%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
409 			  __func__);
410 		return QDF_STATUS_E_FAILURE;
411 	}
412 
413 	/* fill in the message contents */
414 	msg_word = (u_int32_t *)qdf_nbuf_data(*msg);
415 
416 	/* rewind beyond alignment pad to get to the HTC header reserved area */
417 	qdf_nbuf_push_head(*msg, HTC_HDR_ALIGNMENT_PADDING);
418 
419 	*msg_word = 0;
420 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
421 
422 	/* word 1 */
423 	msg_word++;
424 	*msg_word = 0;
425 	HTT_OPTION_TLV_TAG_SET(*msg_word, HTT_OPTION_TLV_TAG_TCL_METADATA_VER);
426 	HTT_OPTION_TLV_LENGTH_SET(*msg_word, HTT_TCL_METADATA_VER_SZ);
427 	HTT_OPTION_TLV_TCL_METADATA_VER_SET(*msg_word,
428 					    HTT_OPTION_TLV_TCL_METADATA_V2);
429 
430 	return QDF_STATUS_SUCCESS;
431 }
432 
433 /*
434  * dp_htt_h2t_add_tcl_metadata_ver() - Add tcl_metadata verion
435  * @htt_soc:	HTT SOC handle
436  * @msg:	Pointer to nbuf
437  *
438  * Return: 0 on success; error code on failure
439  */
440 static int dp_htt_h2t_add_tcl_metadata_ver(struct htt_soc *soc, qdf_nbuf_t *msg)
441 {
442 	/* Use tcl_metadata_v1 when NSS offload is enabled */
443 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->dp_soc->wlan_cfg_ctx) ||
444 	    soc->dp_soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_FTM_MODE)
445 		return dp_htt_h2t_add_tcl_metadata_ver_v1(soc, msg);
446 	else
447 		return dp_htt_h2t_add_tcl_metadata_ver_v2(soc, msg);
448 }
449 #else
450 static int dp_htt_h2t_add_tcl_metadata_ver(struct htt_soc *soc, qdf_nbuf_t *msg)
451 {
452 	return dp_htt_h2t_add_tcl_metadata_ver_v1(soc, msg);
453 }
454 #endif
455 
456 /*
457  * htt_h2t_ver_req_msg() - Send HTT version request message to target
458  * @htt_soc:	HTT SOC handle
459  *
460  * Return: 0 on success; error code on failure
461  */
462 static int htt_h2t_ver_req_msg(struct htt_soc *soc)
463 {
464 	struct dp_htt_htc_pkt *pkt;
465 	qdf_nbuf_t msg = NULL;
466 	QDF_STATUS status;
467 
468 	status = dp_htt_h2t_add_tcl_metadata_ver(soc, &msg);
469 	if (status != QDF_STATUS_SUCCESS)
470 		return status;
471 
472 	pkt = htt_htc_pkt_alloc(soc);
473 	if (!pkt) {
474 		qdf_nbuf_free(msg);
475 		return QDF_STATUS_E_FAILURE;
476 	}
477 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
478 
479 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
480 		dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg),
481 		qdf_nbuf_len(msg), soc->htc_endpoint,
482 		HTC_TX_PACKET_TAG_RTPM_PUT_RC);
483 
484 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
485 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_VERSION_REQ,
486 				     NULL);
487 
488 	if (status != QDF_STATUS_SUCCESS) {
489 		qdf_nbuf_free(msg);
490 		htt_htc_pkt_free(soc, pkt);
491 	}
492 
493 	return status;
494 }
495 
496 /*
497  * htt_srng_setup() - Send SRNG setup message to target
498  * @htt_soc:	HTT SOC handle
499  * @mac_id:	MAC Id
500  * @hal_srng:	Opaque HAL SRNG pointer
501  * @hal_ring_type:	SRNG ring type
502  *
503  * Return: 0 on success; error code on failure
504  */
505 int htt_srng_setup(struct htt_soc *soc, int mac_id,
506 		   hal_ring_handle_t hal_ring_hdl,
507 		   int hal_ring_type)
508 {
509 	struct dp_htt_htc_pkt *pkt;
510 	qdf_nbuf_t htt_msg;
511 	uint32_t *msg_word;
512 	struct hal_srng_params srng_params;
513 	qdf_dma_addr_t hp_addr, tp_addr;
514 	uint32_t ring_entry_size =
515 		hal_srng_get_entrysize(soc->hal_soc, hal_ring_type);
516 	int htt_ring_type, htt_ring_id;
517 	uint8_t *htt_logger_bufp;
518 	int target_pdev_id;
519 	int lmac_id = dp_get_lmac_id_for_pdev_id(soc->dp_soc, 0, mac_id);
520 	QDF_STATUS status;
521 
522 	/* Sizes should be set in 4-byte words */
523 	ring_entry_size = ring_entry_size >> 2;
524 
525 	htt_msg = qdf_nbuf_alloc(soc->osdev,
526 		HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ),
527 		/* reserve room for the HTC header */
528 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
529 	if (!htt_msg) {
530 		dp_err("htt_msg alloc failed ring type %d", hal_ring_type);
531 		goto fail0;
532 	}
533 
534 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
535 	hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_ring_hdl);
536 	tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_ring_hdl);
537 
538 	switch (hal_ring_type) {
539 	case RXDMA_BUF:
540 #ifdef QCA_HOST2FW_RXBUF_RING
541 		if (srng_params.ring_id ==
542 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
543 		    (lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
544 			htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
545 			htt_ring_type = HTT_SW_TO_SW_RING;
546 #ifdef IPA_OFFLOAD
547 		} else if (srng_params.ring_id ==
548 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2 +
549 		    (lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
550 			htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING;
551 			htt_ring_type = HTT_SW_TO_SW_RING;
552 #ifdef IPA_WDI3_VLAN_SUPPORT
553 		} else if (srng_params.ring_id ==
554 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF3 +
555 		    (lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
556 			htt_ring_id = HTT_HOST3_TO_FW_RXBUF_RING;
557 			htt_ring_type = HTT_SW_TO_SW_RING;
558 #endif
559 #endif
560 #else
561 		if (srng_params.ring_id ==
562 			(HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
563 			(lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
564 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
565 			htt_ring_type = HTT_SW_TO_HW_RING;
566 #endif
567 		} else if (srng_params.ring_id ==
568 #ifdef IPA_OFFLOAD
569 			 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 +
570 #else
571 			 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF +
572 #endif
573 			(lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
574 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
575 			htt_ring_type = HTT_SW_TO_HW_RING;
576 		} else {
577 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
578 				   "%s: Ring %d currently not supported",
579 				   __func__, srng_params.ring_id);
580 			goto fail1;
581 		}
582 
583 		break;
584 	case RXDMA_MONITOR_BUF:
585 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
586 							 RXDMA_MONITOR_BUF);
587 		htt_ring_type = HTT_SW_TO_HW_RING;
588 		break;
589 	case RXDMA_MONITOR_STATUS:
590 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
591 		htt_ring_type = HTT_SW_TO_HW_RING;
592 		break;
593 	case RXDMA_MONITOR_DST:
594 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
595 							 RXDMA_MONITOR_DST);
596 		htt_ring_type = HTT_HW_TO_SW_RING;
597 		break;
598 	case RXDMA_MONITOR_DESC:
599 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
600 		htt_ring_type = HTT_SW_TO_HW_RING;
601 		break;
602 	case RXDMA_DST:
603 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
604 		htt_ring_type = HTT_HW_TO_SW_RING;
605 		break;
606 	case TX_MONITOR_BUF:
607 		htt_ring_id = HTT_TX_MON_HOST2MON_BUF_RING;
608 		htt_ring_type = HTT_SW_TO_HW_RING;
609 		break;
610 	case TX_MONITOR_DST:
611 		htt_ring_id = HTT_TX_MON_MON2HOST_DEST_RING;
612 		htt_ring_type = HTT_HW_TO_SW_RING;
613 		break;
614 
615 	default:
616 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
617 			"%s: Ring currently not supported", __func__);
618 			goto fail1;
619 	}
620 
621 	dp_info("ring_type %d ring_id %d htt_ring_id %d hp_addr 0x%llx tp_addr 0x%llx",
622 		hal_ring_type, srng_params.ring_id, htt_ring_id,
623 		(uint64_t)hp_addr,
624 		(uint64_t)tp_addr);
625 	/*
626 	 * Set the length of the message.
627 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
628 	 * separately during the below call to qdf_nbuf_push_head.
629 	 * The contribution from the HTC header is added separately inside HTC.
630 	 */
631 	if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) {
632 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
633 			"%s: Failed to expand head for SRING_SETUP msg",
634 			__func__);
635 		return QDF_STATUS_E_FAILURE;
636 	}
637 
638 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
639 
640 	/* rewind beyond alignment pad to get to the HTC header reserved area */
641 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
642 
643 	/* word 0 */
644 	*msg_word = 0;
645 	htt_logger_bufp = (uint8_t *)msg_word;
646 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP);
647 	target_pdev_id =
648 	dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, mac_id);
649 
650 	if ((htt_ring_type == HTT_SW_TO_HW_RING) ||
651 			(htt_ring_type == HTT_HW_TO_SW_RING))
652 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, target_pdev_id);
653 	else
654 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id);
655 
656 	dp_info("mac_id %d", mac_id);
657 	HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type);
658 	/* TODO: Discuss with FW on changing this to unique ID and using
659 	 * htt_ring_type to send the type of ring
660 	 */
661 	HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id);
662 
663 	/* word 1 */
664 	msg_word++;
665 	*msg_word = 0;
666 	HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word,
667 		srng_params.ring_base_paddr & 0xffffffff);
668 
669 	/* word 2 */
670 	msg_word++;
671 	*msg_word = 0;
672 	HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word,
673 		(uint64_t)srng_params.ring_base_paddr >> 32);
674 
675 	/* word 3 */
676 	msg_word++;
677 	*msg_word = 0;
678 	HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size);
679 	HTT_SRING_SETUP_RING_SIZE_SET(*msg_word,
680 		(ring_entry_size * srng_params.num_entries));
681 	dp_info("entry_size %d", ring_entry_size);
682 	dp_info("num_entries %d", srng_params.num_entries);
683 	dp_info("ring_size %d", (ring_entry_size * srng_params.num_entries));
684 	if (htt_ring_type == HTT_SW_TO_HW_RING)
685 		HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET(
686 						*msg_word, 1);
687 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word,
688 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
689 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word,
690 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
691 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word,
692 		!!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP));
693 
694 	/* word 4 */
695 	msg_word++;
696 	*msg_word = 0;
697 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
698 		hp_addr & 0xffffffff);
699 
700 	/* word 5 */
701 	msg_word++;
702 	*msg_word = 0;
703 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
704 		(uint64_t)hp_addr >> 32);
705 
706 	/* word 6 */
707 	msg_word++;
708 	*msg_word = 0;
709 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
710 		tp_addr & 0xffffffff);
711 
712 	/* word 7 */
713 	msg_word++;
714 	*msg_word = 0;
715 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
716 		(uint64_t)tp_addr >> 32);
717 
718 	/* word 8 */
719 	msg_word++;
720 	*msg_word = 0;
721 	HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word,
722 		srng_params.msi_addr & 0xffffffff);
723 
724 	/* word 9 */
725 	msg_word++;
726 	*msg_word = 0;
727 	HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word,
728 		(uint64_t)(srng_params.msi_addr) >> 32);
729 
730 	/* word 10 */
731 	msg_word++;
732 	*msg_word = 0;
733 	HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word,
734 		qdf_cpu_to_le32(srng_params.msi_data));
735 
736 	/* word 11 */
737 	msg_word++;
738 	*msg_word = 0;
739 	HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word,
740 		srng_params.intr_batch_cntr_thres_entries *
741 		ring_entry_size);
742 	HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word,
743 		srng_params.intr_timer_thres_us >> 3);
744 
745 	/* word 12 */
746 	msg_word++;
747 	*msg_word = 0;
748 	if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
749 		/* TODO: Setting low threshold to 1/8th of ring size - see
750 		 * if this needs to be configurable
751 		 */
752 		HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word,
753 			srng_params.low_threshold);
754 	}
755 	/* "response_required" field should be set if a HTT response message is
756 	 * required after setting up the ring.
757 	 */
758 	pkt = htt_htc_pkt_alloc(soc);
759 	if (!pkt) {
760 		dp_err("pkt alloc failed, ring_type %d ring_id %d htt_ring_id %d",
761 		       hal_ring_type, srng_params.ring_id, htt_ring_id);
762 		goto fail1;
763 	}
764 
765 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
766 
767 	SET_HTC_PACKET_INFO_TX(
768 		&pkt->htc_pkt,
769 		dp_htt_h2t_send_complete_free_netbuf,
770 		qdf_nbuf_data(htt_msg),
771 		qdf_nbuf_len(htt_msg),
772 		soc->htc_endpoint,
773 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
774 
775 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
776 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_SRING_SETUP,
777 				     htt_logger_bufp);
778 
779 	if (status != QDF_STATUS_SUCCESS) {
780 		qdf_nbuf_free(htt_msg);
781 		htt_htc_pkt_free(soc, pkt);
782 	}
783 
784 	return status;
785 
786 fail1:
787 	qdf_nbuf_free(htt_msg);
788 fail0:
789 	return QDF_STATUS_E_FAILURE;
790 }
791 
792 qdf_export_symbol(htt_srng_setup);
793 
794 #ifdef QCA_SUPPORT_FULL_MON
795 /**
796  * htt_h2t_full_mon_cfg() - Send full monitor configuarion msg to FW
797  *
798  * @htt_soc: HTT Soc handle
799  * @pdev_id: Radio id
800  * @dp_full_mon_config: enabled/disable configuration
801  *
802  * Return: Success when HTT message is sent, error on failure
803  */
804 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc,
805 			 uint8_t pdev_id,
806 			 enum dp_full_mon_config config)
807 {
808 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
809 	struct dp_htt_htc_pkt *pkt;
810 	qdf_nbuf_t htt_msg;
811 	uint32_t *msg_word;
812 	uint8_t *htt_logger_bufp;
813 
814 	htt_msg = qdf_nbuf_alloc(soc->osdev,
815 				 HTT_MSG_BUF_SIZE(
816 				 HTT_RX_FULL_MONITOR_MODE_SETUP_SZ),
817 				 /* reserve room for the HTC header */
818 				 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
819 				 4,
820 				 TRUE);
821 	if (!htt_msg)
822 		return QDF_STATUS_E_FAILURE;
823 
824 	/*
825 	 * Set the length of the message.
826 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
827 	 * separately during the below call to qdf_nbuf_push_head.
828 	 * The contribution from the HTC header is added separately inside HTC.
829 	 */
830 	if (!qdf_nbuf_put_tail(htt_msg, HTT_RX_FULL_MONITOR_MODE_SETUP_SZ)) {
831 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
832 			  "%s: Failed to expand head for RX Ring Cfg msg",
833 			  __func__);
834 		goto fail1;
835 	}
836 
837 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
838 
839 	/* rewind beyond alignment pad to get to the HTC header reserved area */
840 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
841 
842 	/* word 0 */
843 	*msg_word = 0;
844 	htt_logger_bufp = (uint8_t *)msg_word;
845 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE);
846 	HTT_RX_FULL_MONITOR_MODE_OPERATION_PDEV_ID_SET(
847 			*msg_word, DP_SW2HW_MACID(pdev_id));
848 
849 	msg_word++;
850 	*msg_word = 0;
851 	/* word 1 */
852 	if (config == DP_FULL_MON_ENABLE) {
853 		HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true);
854 		HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, true);
855 		HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, true);
856 		HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2);
857 	} else if (config == DP_FULL_MON_DISABLE) {
858 		/* As per MAC team's suggestion, While disbaling full monitor
859 		 * mode, Set 'en' bit to true in full monitor mode register.
860 		 */
861 		HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true);
862 		HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, false);
863 		HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, false);
864 		HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2);
865 	}
866 
867 	pkt = htt_htc_pkt_alloc(soc);
868 	if (!pkt) {
869 		qdf_err("HTC packet allocation failed");
870 		goto fail1;
871 	}
872 
873 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
874 
875 	SET_HTC_PACKET_INFO_TX(
876 		&pkt->htc_pkt,
877 		dp_htt_h2t_send_complete_free_netbuf,
878 		qdf_nbuf_data(htt_msg),
879 		qdf_nbuf_len(htt_msg),
880 		soc->htc_endpoint,
881 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
882 
883 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
884 	qdf_debug("config: %d", config);
885 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE,
886 			    htt_logger_bufp);
887 	return QDF_STATUS_SUCCESS;
888 fail1:
889 	qdf_nbuf_free(htt_msg);
890 	return QDF_STATUS_E_FAILURE;
891 }
892 
893 qdf_export_symbol(htt_h2t_full_mon_cfg);
894 #else
895 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc,
896 			 uint8_t pdev_id,
897 			 enum dp_full_mon_config config)
898 {
899 	return 0;
900 }
901 
902 qdf_export_symbol(htt_h2t_full_mon_cfg);
903 #endif
904 
905 #ifdef QCA_UNDECODED_METADATA_SUPPORT
906 static inline void
907 dp_mon_rx_enable_phy_errors(uint32_t *msg_word,
908 			    struct htt_rx_ring_tlv_filter *htt_tlv_filter)
909 {
910 	if (htt_tlv_filter->phy_err_filter_valid) {
911 		HTT_RX_RING_SELECTION_CFG_FP_PHY_ERR_SET
912 			(*msg_word, htt_tlv_filter->fp_phy_err);
913 		HTT_RX_RING_SELECTION_CFG_FP_PHY_ERR_BUF_SRC_SET
914 			(*msg_word, htt_tlv_filter->fp_phy_err_buf_src);
915 		HTT_RX_RING_SELECTION_CFG_FP_PHY_ERR_BUF_DEST_SET
916 			(*msg_word, htt_tlv_filter->fp_phy_err_buf_dest);
917 
918 		/* word 12*/
919 		msg_word++;
920 		*msg_word = 0;
921 		HTT_RX_RING_SELECTION_CFG_PHY_ERR_MASK_SET
922 			(*msg_word, htt_tlv_filter->phy_err_mask);
923 
924 		/* word 13*/
925 		msg_word++;
926 		*msg_word = 0;
927 		HTT_RX_RING_SELECTION_CFG_PHY_ERR_MASK_CONT_SET
928 			(*msg_word, htt_tlv_filter->phy_err_mask_cont);
929 	}
930 }
931 #else
932 static inline void
933 dp_mon_rx_enable_phy_errors(uint32_t *msg_word,
934 			    struct htt_rx_ring_tlv_filter *htt_tlv_filter)
935 {
936 }
937 #endif
938 
939 /*
940  * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter
941  * config message to target
942  * @htt_soc:	HTT SOC handle
943  * @pdev_id:	WIN- PDEV Id, MCL- mac id
944  * @hal_srng:	Opaque HAL SRNG pointer
945  * @hal_ring_type:	SRNG ring type
946  * @ring_buf_size:	SRNG buffer size
947  * @htt_tlv_filter:	Rx SRNG TLV and filter setting
948  * Return: 0 on success; error code on failure
949  */
950 int htt_h2t_rx_ring_cfg(struct htt_soc *htt_soc, int pdev_id,
951 			hal_ring_handle_t hal_ring_hdl,
952 			int hal_ring_type, int ring_buf_size,
953 			struct htt_rx_ring_tlv_filter *htt_tlv_filter)
954 {
955 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
956 	struct dp_htt_htc_pkt *pkt;
957 	qdf_nbuf_t htt_msg;
958 	uint32_t *msg_word;
959 	uint32_t *msg_word_data;
960 	struct hal_srng_params srng_params;
961 	uint32_t htt_ring_type, htt_ring_id;
962 	uint32_t tlv_filter;
963 	uint8_t *htt_logger_bufp;
964 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->dp_soc->wlan_cfg_ctx;
965 	uint32_t mon_drop_th = wlan_cfg_get_mon_drop_thresh(wlan_cfg_ctx);
966 	int target_pdev_id;
967 	QDF_STATUS status;
968 
969 	htt_msg = qdf_nbuf_alloc(soc->osdev,
970 		HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ),
971 	/* reserve room for the HTC header */
972 	HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
973 	if (!htt_msg) {
974 		dp_err("htt_msg alloc failed ring type %d", hal_ring_type);
975 		goto fail0;
976 	}
977 
978 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
979 
980 	switch (hal_ring_type) {
981 	case RXDMA_BUF:
982 		htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
983 		htt_ring_type = HTT_SW_TO_HW_RING;
984 		break;
985 	case RXDMA_MONITOR_BUF:
986 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
987 							 RXDMA_MONITOR_BUF);
988 		htt_ring_type = HTT_SW_TO_HW_RING;
989 		break;
990 	case RXDMA_MONITOR_STATUS:
991 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
992 		htt_ring_type = HTT_SW_TO_HW_RING;
993 		break;
994 	case RXDMA_MONITOR_DST:
995 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
996 							 RXDMA_MONITOR_DST);
997 		htt_ring_type = HTT_HW_TO_SW_RING;
998 		break;
999 	case RXDMA_MONITOR_DESC:
1000 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
1001 		htt_ring_type = HTT_SW_TO_HW_RING;
1002 		break;
1003 	case RXDMA_DST:
1004 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
1005 		htt_ring_type = HTT_HW_TO_SW_RING;
1006 		break;
1007 
1008 	default:
1009 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1010 			"%s: Ring currently not supported", __func__);
1011 		goto fail1;
1012 	}
1013 
1014 	dp_info("ring_type %d ring_id %d htt_ring_id %d",
1015 		hal_ring_type, srng_params.ring_id, htt_ring_id);
1016 
1017 	/*
1018 	 * Set the length of the message.
1019 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
1020 	 * separately during the below call to qdf_nbuf_push_head.
1021 	 * The contribution from the HTC header is added separately inside HTC.
1022 	 */
1023 	if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) {
1024 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1025 			"%s: Failed to expand head for RX Ring Cfg msg",
1026 			__func__);
1027 		goto fail1; /* failure */
1028 	}
1029 
1030 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
1031 
1032 	/* rewind beyond alignment pad to get to the HTC header reserved area */
1033 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
1034 
1035 	/* word 0 */
1036 	htt_logger_bufp = (uint8_t *)msg_word;
1037 	*msg_word = 0;
1038 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
1039 
1040 	/* applicable only for post Li */
1041 	dp_rx_mon_enable(soc->dp_soc, msg_word, htt_tlv_filter);
1042 
1043 	/*
1044 	 * pdev_id is indexed from 0 whereas mac_id is indexed from 1
1045 	 * SW_TO_SW and SW_TO_HW rings are unaffected by this
1046 	 */
1047 	target_pdev_id =
1048 	dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, pdev_id);
1049 
1050 	if (htt_ring_type == HTT_SW_TO_SW_RING ||
1051 			htt_ring_type == HTT_SW_TO_HW_RING ||
1052 			htt_ring_type == HTT_HW_TO_SW_RING)
1053 		HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word,
1054 						      target_pdev_id);
1055 
1056 	/* TODO: Discuss with FW on changing this to unique ID and using
1057 	 * htt_ring_type to send the type of ring
1058 	 */
1059 	HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id);
1060 
1061 	HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word,
1062 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
1063 
1064 	HTT_RX_RING_SELECTION_CFG_RX_OFFSETS_VALID_SET(*msg_word,
1065 						htt_tlv_filter->offset_valid);
1066 
1067 	if (mon_drop_th > 0)
1068 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1069 								   1);
1070 	else
1071 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1072 								   0);
1073 
1074 	/* word 1 */
1075 	msg_word++;
1076 	*msg_word = 0;
1077 	HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word,
1078 		ring_buf_size);
1079 
1080 	dp_mon_rx_packet_length_set(soc->dp_soc, msg_word, htt_tlv_filter);
1081 	dp_mon_rx_hdr_length_set(soc->dp_soc, msg_word, htt_tlv_filter);
1082 
1083 	/* word 2 */
1084 	msg_word++;
1085 	*msg_word = 0;
1086 
1087 	if (htt_tlv_filter->enable_fp) {
1088 		/* TYPE: MGMT */
1089 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1090 			FP, MGMT, 0000,
1091 			(htt_tlv_filter->fp_mgmt_filter &
1092 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1093 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1094 			FP, MGMT, 0001,
1095 			(htt_tlv_filter->fp_mgmt_filter &
1096 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1097 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1098 			FP, MGMT, 0010,
1099 			(htt_tlv_filter->fp_mgmt_filter &
1100 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1101 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1102 			FP, MGMT, 0011,
1103 			(htt_tlv_filter->fp_mgmt_filter &
1104 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1105 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1106 			FP, MGMT, 0100,
1107 			(htt_tlv_filter->fp_mgmt_filter &
1108 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1109 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1110 			FP, MGMT, 0101,
1111 			(htt_tlv_filter->fp_mgmt_filter &
1112 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1113 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1114 			FP, MGMT, 0110,
1115 			(htt_tlv_filter->fp_mgmt_filter &
1116 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1117 		/* reserved */
1118 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
1119 			MGMT, 0111,
1120 			(htt_tlv_filter->fp_mgmt_filter &
1121 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1122 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1123 			FP, MGMT, 1000,
1124 			(htt_tlv_filter->fp_mgmt_filter &
1125 			FILTER_MGMT_BEACON) ? 1 : 0);
1126 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1127 			FP, MGMT, 1001,
1128 			(htt_tlv_filter->fp_mgmt_filter &
1129 			FILTER_MGMT_ATIM) ? 1 : 0);
1130 	}
1131 
1132 	if (htt_tlv_filter->enable_md) {
1133 			/* TYPE: MGMT */
1134 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1135 			MD, MGMT, 0000,
1136 			(htt_tlv_filter->md_mgmt_filter &
1137 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1138 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1139 			MD, MGMT, 0001,
1140 			(htt_tlv_filter->md_mgmt_filter &
1141 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1142 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1143 			MD, MGMT, 0010,
1144 			(htt_tlv_filter->md_mgmt_filter &
1145 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1146 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1147 			MD, MGMT, 0011,
1148 			(htt_tlv_filter->md_mgmt_filter &
1149 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1150 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1151 			MD, MGMT, 0100,
1152 			(htt_tlv_filter->md_mgmt_filter &
1153 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1154 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1155 			MD, MGMT, 0101,
1156 			(htt_tlv_filter->md_mgmt_filter &
1157 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1158 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1159 			MD, MGMT, 0110,
1160 			(htt_tlv_filter->md_mgmt_filter &
1161 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1162 		/* reserved */
1163 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
1164 			MGMT, 0111,
1165 			(htt_tlv_filter->md_mgmt_filter &
1166 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1167 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1168 			MD, MGMT, 1000,
1169 			(htt_tlv_filter->md_mgmt_filter &
1170 			FILTER_MGMT_BEACON) ? 1 : 0);
1171 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1172 			MD, MGMT, 1001,
1173 			(htt_tlv_filter->md_mgmt_filter &
1174 			FILTER_MGMT_ATIM) ? 1 : 0);
1175 	}
1176 
1177 	if (htt_tlv_filter->enable_mo) {
1178 		/* TYPE: MGMT */
1179 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1180 			MO, MGMT, 0000,
1181 			(htt_tlv_filter->mo_mgmt_filter &
1182 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1183 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1184 			MO, MGMT, 0001,
1185 			(htt_tlv_filter->mo_mgmt_filter &
1186 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1187 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1188 			MO, MGMT, 0010,
1189 			(htt_tlv_filter->mo_mgmt_filter &
1190 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1191 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1192 			MO, MGMT, 0011,
1193 			(htt_tlv_filter->mo_mgmt_filter &
1194 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1195 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1196 			MO, MGMT, 0100,
1197 			(htt_tlv_filter->mo_mgmt_filter &
1198 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1199 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1200 			MO, MGMT, 0101,
1201 			(htt_tlv_filter->mo_mgmt_filter &
1202 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1203 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1204 			MO, MGMT, 0110,
1205 			(htt_tlv_filter->mo_mgmt_filter &
1206 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1207 		/* reserved */
1208 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
1209 			MGMT, 0111,
1210 			(htt_tlv_filter->mo_mgmt_filter &
1211 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1212 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1213 			MO, MGMT, 1000,
1214 			(htt_tlv_filter->mo_mgmt_filter &
1215 			FILTER_MGMT_BEACON) ? 1 : 0);
1216 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1217 			MO, MGMT, 1001,
1218 			(htt_tlv_filter->mo_mgmt_filter &
1219 			FILTER_MGMT_ATIM) ? 1 : 0);
1220 	}
1221 
1222 	/* word 3 */
1223 	msg_word++;
1224 	*msg_word = 0;
1225 
1226 	if (htt_tlv_filter->enable_fp) {
1227 		/* TYPE: MGMT */
1228 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1229 			FP, MGMT, 1010,
1230 			(htt_tlv_filter->fp_mgmt_filter &
1231 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1232 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1233 			FP, MGMT, 1011,
1234 			(htt_tlv_filter->fp_mgmt_filter &
1235 			FILTER_MGMT_AUTH) ? 1 : 0);
1236 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1237 			FP, MGMT, 1100,
1238 			(htt_tlv_filter->fp_mgmt_filter &
1239 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1240 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1241 			FP, MGMT, 1101,
1242 			(htt_tlv_filter->fp_mgmt_filter &
1243 			FILTER_MGMT_ACTION) ? 1 : 0);
1244 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1245 			FP, MGMT, 1110,
1246 			(htt_tlv_filter->fp_mgmt_filter &
1247 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1248 		/* reserved*/
1249 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
1250 			MGMT, 1111,
1251 			(htt_tlv_filter->fp_mgmt_filter &
1252 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1253 	}
1254 
1255 	if (htt_tlv_filter->enable_md) {
1256 			/* TYPE: MGMT */
1257 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1258 			MD, MGMT, 1010,
1259 			(htt_tlv_filter->md_mgmt_filter &
1260 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1261 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1262 			MD, MGMT, 1011,
1263 			(htt_tlv_filter->md_mgmt_filter &
1264 			FILTER_MGMT_AUTH) ? 1 : 0);
1265 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1266 			MD, MGMT, 1100,
1267 			(htt_tlv_filter->md_mgmt_filter &
1268 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1269 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1270 			MD, MGMT, 1101,
1271 			(htt_tlv_filter->md_mgmt_filter &
1272 			FILTER_MGMT_ACTION) ? 1 : 0);
1273 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1274 			MD, MGMT, 1110,
1275 			(htt_tlv_filter->md_mgmt_filter &
1276 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1277 	}
1278 
1279 	if (htt_tlv_filter->enable_mo) {
1280 		/* TYPE: MGMT */
1281 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1282 			MO, MGMT, 1010,
1283 			(htt_tlv_filter->mo_mgmt_filter &
1284 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1285 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1286 			MO, MGMT, 1011,
1287 			(htt_tlv_filter->mo_mgmt_filter &
1288 			FILTER_MGMT_AUTH) ? 1 : 0);
1289 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1290 			MO, MGMT, 1100,
1291 			(htt_tlv_filter->mo_mgmt_filter &
1292 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1293 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1294 			MO, MGMT, 1101,
1295 			(htt_tlv_filter->mo_mgmt_filter &
1296 			FILTER_MGMT_ACTION) ? 1 : 0);
1297 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1298 			MO, MGMT, 1110,
1299 			(htt_tlv_filter->mo_mgmt_filter &
1300 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1301 		/* reserved*/
1302 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
1303 			MGMT, 1111,
1304 			(htt_tlv_filter->mo_mgmt_filter &
1305 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1306 	}
1307 
1308 	/* word 4 */
1309 	msg_word++;
1310 	*msg_word = 0;
1311 
1312 	if (htt_tlv_filter->enable_fp) {
1313 		/* TYPE: CTRL */
1314 		/* reserved */
1315 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1316 			CTRL, 0000,
1317 			(htt_tlv_filter->fp_ctrl_filter &
1318 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1319 		/* reserved */
1320 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1321 			CTRL, 0001,
1322 			(htt_tlv_filter->fp_ctrl_filter &
1323 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1324 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1325 			CTRL, 0010,
1326 			(htt_tlv_filter->fp_ctrl_filter &
1327 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1328 		/* reserved */
1329 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1330 			CTRL, 0011,
1331 			(htt_tlv_filter->fp_ctrl_filter &
1332 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1333 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1334 			CTRL, 0100,
1335 			(htt_tlv_filter->fp_ctrl_filter &
1336 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1337 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1338 			CTRL, 0101,
1339 			(htt_tlv_filter->fp_ctrl_filter &
1340 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1341 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1342 			CTRL, 0110,
1343 			(htt_tlv_filter->fp_ctrl_filter &
1344 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1345 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1346 			CTRL, 0111,
1347 			(htt_tlv_filter->fp_ctrl_filter &
1348 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1349 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1350 			CTRL, 1000,
1351 			(htt_tlv_filter->fp_ctrl_filter &
1352 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1353 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1354 			CTRL, 1001,
1355 			(htt_tlv_filter->fp_ctrl_filter &
1356 			FILTER_CTRL_BA) ? 1 : 0);
1357 	}
1358 
1359 	if (htt_tlv_filter->enable_md) {
1360 		/* TYPE: CTRL */
1361 		/* reserved */
1362 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1363 			CTRL, 0000,
1364 			(htt_tlv_filter->md_ctrl_filter &
1365 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1366 		/* reserved */
1367 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1368 			CTRL, 0001,
1369 			(htt_tlv_filter->md_ctrl_filter &
1370 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1371 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1372 			CTRL, 0010,
1373 			(htt_tlv_filter->md_ctrl_filter &
1374 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1375 		/* reserved */
1376 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1377 			CTRL, 0011,
1378 			(htt_tlv_filter->md_ctrl_filter &
1379 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1380 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1381 			CTRL, 0100,
1382 			(htt_tlv_filter->md_ctrl_filter &
1383 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1384 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1385 			CTRL, 0101,
1386 			(htt_tlv_filter->md_ctrl_filter &
1387 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1388 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1389 			CTRL, 0110,
1390 			(htt_tlv_filter->md_ctrl_filter &
1391 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1392 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1393 			CTRL, 0111,
1394 			(htt_tlv_filter->md_ctrl_filter &
1395 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1396 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1397 			CTRL, 1000,
1398 			(htt_tlv_filter->md_ctrl_filter &
1399 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1400 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1401 			CTRL, 1001,
1402 			(htt_tlv_filter->md_ctrl_filter &
1403 			FILTER_CTRL_BA) ? 1 : 0);
1404 	}
1405 
1406 	if (htt_tlv_filter->enable_mo) {
1407 		/* TYPE: CTRL */
1408 		/* reserved */
1409 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1410 			CTRL, 0000,
1411 			(htt_tlv_filter->mo_ctrl_filter &
1412 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1413 		/* reserved */
1414 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1415 			CTRL, 0001,
1416 			(htt_tlv_filter->mo_ctrl_filter &
1417 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1418 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1419 			CTRL, 0010,
1420 			(htt_tlv_filter->mo_ctrl_filter &
1421 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1422 		/* reserved */
1423 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1424 			CTRL, 0011,
1425 			(htt_tlv_filter->mo_ctrl_filter &
1426 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1427 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1428 			CTRL, 0100,
1429 			(htt_tlv_filter->mo_ctrl_filter &
1430 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1431 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1432 			CTRL, 0101,
1433 			(htt_tlv_filter->mo_ctrl_filter &
1434 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1435 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1436 			CTRL, 0110,
1437 			(htt_tlv_filter->mo_ctrl_filter &
1438 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1439 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1440 			CTRL, 0111,
1441 			(htt_tlv_filter->mo_ctrl_filter &
1442 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1443 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1444 			CTRL, 1000,
1445 			(htt_tlv_filter->mo_ctrl_filter &
1446 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1447 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1448 			CTRL, 1001,
1449 			(htt_tlv_filter->mo_ctrl_filter &
1450 			FILTER_CTRL_BA) ? 1 : 0);
1451 	}
1452 
1453 	/* word 5 */
1454 	msg_word++;
1455 	*msg_word = 0;
1456 	if (htt_tlv_filter->enable_fp) {
1457 		/* TYPE: CTRL */
1458 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1459 			CTRL, 1010,
1460 			(htt_tlv_filter->fp_ctrl_filter &
1461 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1462 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1463 			CTRL, 1011,
1464 			(htt_tlv_filter->fp_ctrl_filter &
1465 			FILTER_CTRL_RTS) ? 1 : 0);
1466 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1467 			CTRL, 1100,
1468 			(htt_tlv_filter->fp_ctrl_filter &
1469 			FILTER_CTRL_CTS) ? 1 : 0);
1470 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1471 			CTRL, 1101,
1472 			(htt_tlv_filter->fp_ctrl_filter &
1473 			FILTER_CTRL_ACK) ? 1 : 0);
1474 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1475 			CTRL, 1110,
1476 			(htt_tlv_filter->fp_ctrl_filter &
1477 			FILTER_CTRL_CFEND) ? 1 : 0);
1478 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1479 			CTRL, 1111,
1480 			(htt_tlv_filter->fp_ctrl_filter &
1481 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1482 		/* TYPE: DATA */
1483 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1484 			DATA, MCAST,
1485 			(htt_tlv_filter->fp_data_filter &
1486 			FILTER_DATA_MCAST) ? 1 : 0);
1487 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1488 			DATA, UCAST,
1489 			(htt_tlv_filter->fp_data_filter &
1490 			FILTER_DATA_UCAST) ? 1 : 0);
1491 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1492 			DATA, NULL,
1493 			(htt_tlv_filter->fp_data_filter &
1494 			FILTER_DATA_NULL) ? 1 : 0);
1495 	}
1496 
1497 	if (htt_tlv_filter->enable_md) {
1498 		/* TYPE: CTRL */
1499 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1500 			CTRL, 1010,
1501 			(htt_tlv_filter->md_ctrl_filter &
1502 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1503 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1504 			CTRL, 1011,
1505 			(htt_tlv_filter->md_ctrl_filter &
1506 			FILTER_CTRL_RTS) ? 1 : 0);
1507 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1508 			CTRL, 1100,
1509 			(htt_tlv_filter->md_ctrl_filter &
1510 			FILTER_CTRL_CTS) ? 1 : 0);
1511 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1512 			CTRL, 1101,
1513 			(htt_tlv_filter->md_ctrl_filter &
1514 			FILTER_CTRL_ACK) ? 1 : 0);
1515 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1516 			CTRL, 1110,
1517 			(htt_tlv_filter->md_ctrl_filter &
1518 			FILTER_CTRL_CFEND) ? 1 : 0);
1519 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1520 			CTRL, 1111,
1521 			(htt_tlv_filter->md_ctrl_filter &
1522 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1523 		/* TYPE: DATA */
1524 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1525 			DATA, MCAST,
1526 			(htt_tlv_filter->md_data_filter &
1527 			FILTER_DATA_MCAST) ? 1 : 0);
1528 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1529 			DATA, UCAST,
1530 			(htt_tlv_filter->md_data_filter &
1531 			FILTER_DATA_UCAST) ? 1 : 0);
1532 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1533 			DATA, NULL,
1534 			(htt_tlv_filter->md_data_filter &
1535 			FILTER_DATA_NULL) ? 1 : 0);
1536 	}
1537 
1538 	if (htt_tlv_filter->enable_mo) {
1539 		/* TYPE: CTRL */
1540 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1541 			CTRL, 1010,
1542 			(htt_tlv_filter->mo_ctrl_filter &
1543 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1544 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1545 			CTRL, 1011,
1546 			(htt_tlv_filter->mo_ctrl_filter &
1547 			FILTER_CTRL_RTS) ? 1 : 0);
1548 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1549 			CTRL, 1100,
1550 			(htt_tlv_filter->mo_ctrl_filter &
1551 			FILTER_CTRL_CTS) ? 1 : 0);
1552 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1553 			CTRL, 1101,
1554 			(htt_tlv_filter->mo_ctrl_filter &
1555 			FILTER_CTRL_ACK) ? 1 : 0);
1556 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1557 			CTRL, 1110,
1558 			(htt_tlv_filter->mo_ctrl_filter &
1559 			FILTER_CTRL_CFEND) ? 1 : 0);
1560 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1561 			CTRL, 1111,
1562 			(htt_tlv_filter->mo_ctrl_filter &
1563 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1564 		/* TYPE: DATA */
1565 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1566 			DATA, MCAST,
1567 			(htt_tlv_filter->mo_data_filter &
1568 			FILTER_DATA_MCAST) ? 1 : 0);
1569 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1570 			DATA, UCAST,
1571 			(htt_tlv_filter->mo_data_filter &
1572 			FILTER_DATA_UCAST) ? 1 : 0);
1573 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1574 			DATA, NULL,
1575 			(htt_tlv_filter->mo_data_filter &
1576 			FILTER_DATA_NULL) ? 1 : 0);
1577 	}
1578 
1579 	/* word 6 */
1580 	msg_word++;
1581 	*msg_word = 0;
1582 	tlv_filter = 0;
1583 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START,
1584 		htt_tlv_filter->mpdu_start);
1585 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START,
1586 		htt_tlv_filter->msdu_start);
1587 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET,
1588 		htt_tlv_filter->packet);
1589 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END,
1590 		htt_tlv_filter->msdu_end);
1591 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END,
1592 		htt_tlv_filter->mpdu_end);
1593 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER,
1594 		htt_tlv_filter->packet_header);
1595 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION,
1596 		htt_tlv_filter->attention);
1597 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START,
1598 		htt_tlv_filter->ppdu_start);
1599 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END,
1600 		htt_tlv_filter->ppdu_end);
1601 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS,
1602 		htt_tlv_filter->ppdu_end_user_stats);
1603 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter,
1604 		PPDU_END_USER_STATS_EXT,
1605 		htt_tlv_filter->ppdu_end_user_stats_ext);
1606 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE,
1607 		htt_tlv_filter->ppdu_end_status_done);
1608 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START_USER_INFO,
1609 		htt_tlv_filter->ppdu_start_user_info);
1610 	/* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/
1611 	 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED,
1612 		 htt_tlv_filter->header_per_msdu);
1613 
1614 	HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter);
1615 
1616 	msg_word_data = (uint32_t *)qdf_nbuf_data(htt_msg);
1617 	dp_info("config_data: [0x%x][0x%x][0x%x][0x%x][0x%x][0x%x][0x%x]",
1618 		msg_word_data[0], msg_word_data[1], msg_word_data[2],
1619 		msg_word_data[3], msg_word_data[4], msg_word_data[5],
1620 		msg_word_data[6]);
1621 
1622 	/* word 7 */
1623 	msg_word++;
1624 	*msg_word = 0;
1625 	if (htt_tlv_filter->offset_valid) {
1626 		HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET_SET(*msg_word,
1627 					htt_tlv_filter->rx_packet_offset);
1628 		HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET_SET(*msg_word,
1629 					htt_tlv_filter->rx_header_offset);
1630 
1631 		/* word 8 */
1632 		msg_word++;
1633 		*msg_word = 0;
1634 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET_SET(*msg_word,
1635 					htt_tlv_filter->rx_mpdu_end_offset);
1636 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET_SET(*msg_word,
1637 					htt_tlv_filter->rx_mpdu_start_offset);
1638 
1639 		/* word 9 */
1640 		msg_word++;
1641 		*msg_word = 0;
1642 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET_SET(*msg_word,
1643 					htt_tlv_filter->rx_msdu_end_offset);
1644 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET_SET(*msg_word,
1645 					htt_tlv_filter->rx_msdu_start_offset);
1646 
1647 		/* word 10 */
1648 		msg_word++;
1649 		*msg_word = 0;
1650 		HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET_SET(*msg_word,
1651 					htt_tlv_filter->rx_attn_offset);
1652 
1653 		/* word 11 */
1654 		msg_word++;
1655 		*msg_word = 0;
1656 	} else {
1657 		/* word 11 */
1658 		msg_word += 4;
1659 		*msg_word = 0;
1660 	}
1661 
1662 	if (mon_drop_th > 0)
1663 		HTT_RX_RING_SELECTION_CFG_RX_DROP_THRESHOLD_SET(*msg_word,
1664 								mon_drop_th);
1665 	dp_mon_rx_enable_mpdu_logging(soc->dp_soc, msg_word, htt_tlv_filter);
1666 
1667 	dp_mon_rx_enable_phy_errors(msg_word, htt_tlv_filter);
1668 
1669 	/* word 14*/
1670 	msg_word += 3;
1671 	*msg_word = 0;
1672 
1673 	dp_mon_rx_wmask_subscribe(soc->dp_soc, msg_word, htt_tlv_filter);
1674 
1675 #ifdef FW_SUPPORT_NOT_YET
1676 	/* word 17*/
1677 	msg_word += 3;
1678 	*msg_word = 0;
1679 
1680 	dp_mon_rx_enable_fpmo(soc->dp_soc, msg_word, htt_tlv_filter);
1681 #endif/* FW_SUPPORT_NOT_YET */
1682 
1683 	/* "response_required" field should be set if a HTT response message is
1684 	 * required after setting up the ring.
1685 	 */
1686 	pkt = htt_htc_pkt_alloc(soc);
1687 	if (!pkt) {
1688 		dp_err("pkt alloc failed, ring_type %d ring_id %d htt_ring_id %d",
1689 		       hal_ring_type, srng_params.ring_id, htt_ring_id);
1690 		goto fail1;
1691 	}
1692 
1693 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1694 
1695 	SET_HTC_PACKET_INFO_TX(
1696 		&pkt->htc_pkt,
1697 		dp_htt_h2t_send_complete_free_netbuf,
1698 		qdf_nbuf_data(htt_msg),
1699 		qdf_nbuf_len(htt_msg),
1700 		soc->htc_endpoint,
1701 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
1702 
1703 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1704 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
1705 				     HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG,
1706 				     htt_logger_bufp);
1707 
1708 	if (status != QDF_STATUS_SUCCESS) {
1709 		qdf_nbuf_free(htt_msg);
1710 		htt_htc_pkt_free(soc, pkt);
1711 	}
1712 
1713 	return status;
1714 
1715 fail1:
1716 	qdf_nbuf_free(htt_msg);
1717 fail0:
1718 	return QDF_STATUS_E_FAILURE;
1719 }
1720 
1721 qdf_export_symbol(htt_h2t_rx_ring_cfg);
1722 
1723 #if defined(HTT_STATS_ENABLE)
1724 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1725 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1726 
1727 {
1728 	uint32_t pdev_id;
1729 	uint32_t *msg_word = NULL;
1730 	uint32_t msg_remain_len = 0;
1731 
1732 	msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1733 
1734 	/*COOKIE MSB*/
1735 	pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1736 
1737 	/* stats message length + 16 size of HTT header*/
1738 	msg_remain_len = qdf_min(htt_stats->msg_len + 16,
1739 				(uint32_t)DP_EXT_MSG_LENGTH);
1740 
1741 	dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc,
1742 			msg_word,  msg_remain_len,
1743 			WDI_NO_VAL, pdev_id);
1744 
1745 	if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1746 		htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1747 	}
1748 	/* Need to be freed here as WDI handler will
1749 	 * make a copy of pkt to send data to application
1750 	 */
1751 	qdf_nbuf_free(htt_msg);
1752 	return QDF_STATUS_SUCCESS;
1753 }
1754 #else
1755 static inline QDF_STATUS
1756 dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1757 		      struct dp_soc *soc, qdf_nbuf_t htt_msg)
1758 {
1759 	return QDF_STATUS_E_NOSUPPORT;
1760 }
1761 #endif
1762 
1763 #ifdef HTT_STATS_DEBUGFS_SUPPORT
1764 /* dp_send_htt_stats_dbgfs_msg() - Function to send htt data to upper layer.
1765  * @pdev: dp pdev handle
1766  * @msg_word: HTT msg
1767  * @msg_len: Length of HTT msg sent
1768  *
1769  * Return: none
1770  */
1771 static inline void
1772 dp_htt_stats_dbgfs_send_msg(struct dp_pdev *pdev, uint32_t *msg_word,
1773 			    uint32_t msg_len)
1774 {
1775 	struct htt_dbgfs_cfg dbgfs_cfg;
1776 	int done = 0;
1777 
1778 	/* send 5th word of HTT msg to upper layer */
1779 	dbgfs_cfg.msg_word = (msg_word + 4);
1780 	dbgfs_cfg.m = pdev->dbgfs_cfg->m;
1781 
1782 	/* stats message length + 16 size of HTT header*/
1783 	msg_len = qdf_min(msg_len + HTT_HEADER_LEN, (uint32_t)DP_EXT_MSG_LENGTH);
1784 
1785 	if (pdev->dbgfs_cfg->htt_stats_dbgfs_msg_process)
1786 		pdev->dbgfs_cfg->htt_stats_dbgfs_msg_process(&dbgfs_cfg,
1787 							     (msg_len - HTT_HEADER_LEN));
1788 
1789 	/* Get TLV Done bit from 4th msg word */
1790 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*(msg_word + 3));
1791 	if (done) {
1792 		if (qdf_event_set(&pdev->dbgfs_cfg->htt_stats_dbgfs_event))
1793 			dp_htt_err("%pK: Failed to set event for debugfs htt stats"
1794 				   , pdev->soc);
1795 	}
1796 }
1797 #else
1798 static inline void
1799 dp_htt_stats_dbgfs_send_msg(struct dp_pdev *pdev, uint32_t *msg_word,
1800 			    uint32_t msg_len)
1801 {
1802 }
1803 #endif /* HTT_STATS_DEBUGFS_SUPPORT */
1804 
1805 #ifdef WLAN_SYSFS_DP_STATS
1806 /* dp_htt_stats_sysfs_update_config() - Function to send htt data to upper layer.
1807  * @pdev: dp pdev handle
1808  *
1809  * This function sets the process id and printing mode within the sysfs config
1810  * struct. which enables DP_PRINT statements within this process to write to the
1811  * console buffer provided by the user space.
1812  *
1813  * Return: None
1814  */
1815 static inline void
1816 dp_htt_stats_sysfs_update_config(struct dp_pdev *pdev)
1817 {
1818 	struct dp_soc *soc = pdev->soc;
1819 
1820 	if (!soc) {
1821 		dp_htt_err("soc is null");
1822 		return;
1823 	}
1824 
1825 	if (!soc->sysfs_config) {
1826 		dp_htt_err("soc->sysfs_config is NULL");
1827 		return;
1828 	}
1829 
1830 	/* set sysfs config parameters */
1831 	soc->sysfs_config->process_id = qdf_get_current_pid();
1832 	soc->sysfs_config->printing_mode = PRINTING_MODE_ENABLED;
1833 }
1834 
1835 /*
1836  * dp_htt_stats_sysfs_set_event() - Set sysfs stats event.
1837  * @soc: soc handle.
1838  * @msg_word: Pointer to htt msg word.
1839  *
1840  * @return: void
1841  */
1842 static inline void
1843 dp_htt_stats_sysfs_set_event(struct dp_soc *soc, uint32_t *msg_word)
1844 {
1845 	int done = 0;
1846 
1847 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*(msg_word + 3));
1848 	if (done) {
1849 		if (qdf_event_set(&soc->sysfs_config->sysfs_txrx_fw_request_done))
1850 			dp_htt_err("%pK:event compl Fail to set event ",
1851 				   soc);
1852 	}
1853 }
1854 #else /* WLAN_SYSFS_DP_STATS */
1855 static inline void
1856 dp_htt_stats_sysfs_update_config(struct dp_pdev *pdev)
1857 {
1858 }
1859 
1860 static inline void
1861 dp_htt_stats_sysfs_set_event(struct dp_soc *dp_soc, uint32_t *msg_word)
1862 {
1863 }
1864 #endif /* WLAN_SYSFS_DP_STATS */
1865 
1866 /**
1867  * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats
1868  * @htt_stats: htt stats info
1869  *
1870  * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message
1871  * contains sub messages which are identified by a TLV header.
1872  * In this function we will process the stream of T2H messages and read all the
1873  * TLV contained in the message.
1874  *
1875  * THe following cases have been taken care of
1876  * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer
1877  *		In this case the buffer will contain multiple tlvs.
1878  * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer.
1879  *		Only one tlv will be contained in the HTT message and this tag
1880  *		will extend onto the next buffer.
1881  * Case 3: When the buffer is the continuation of the previous message
1882  * Case 4: tlv length is 0. which will indicate the end of message
1883  *
1884  * return: void
1885  */
1886 static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats,
1887 					struct dp_soc *soc)
1888 {
1889 	htt_tlv_tag_t tlv_type = 0xff;
1890 	qdf_nbuf_t htt_msg = NULL;
1891 	uint32_t *msg_word;
1892 	uint8_t *tlv_buf_head = NULL;
1893 	uint8_t *tlv_buf_tail = NULL;
1894 	uint32_t msg_remain_len = 0;
1895 	uint32_t tlv_remain_len = 0;
1896 	uint32_t *tlv_start;
1897 	int cookie_val = 0;
1898 	int cookie_msb = 0;
1899 	int pdev_id;
1900 	bool copy_stats = false;
1901 	struct dp_pdev *pdev;
1902 
1903 	/* Process node in the HTT message queue */
1904 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1905 		!= NULL) {
1906 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1907 		cookie_val = *(msg_word + 1);
1908 		htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET(
1909 					*(msg_word +
1910 					HTT_T2H_EXT_STATS_TLV_START_OFFSET));
1911 
1912 		if (cookie_val) {
1913 			if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg)
1914 					== QDF_STATUS_SUCCESS) {
1915 				continue;
1916 			}
1917 		}
1918 
1919 		cookie_msb = *(msg_word + 2);
1920 		pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1921 		pdev = soc->pdev_list[pdev_id];
1922 
1923 		if (!cookie_val && (cookie_msb & DBG_STATS_COOKIE_HTT_DBGFS)) {
1924 			dp_htt_stats_dbgfs_send_msg(pdev, msg_word,
1925 						    htt_stats->msg_len);
1926 			qdf_nbuf_free(htt_msg);
1927 			continue;
1928 		}
1929 
1930 		if (!cookie_val && (cookie_msb & DBG_SYSFS_STATS_COOKIE))
1931 			dp_htt_stats_sysfs_update_config(pdev);
1932 
1933 		if (cookie_msb & DBG_STATS_COOKIE_DP_STATS)
1934 			copy_stats = true;
1935 
1936 		/* read 5th word */
1937 		msg_word = msg_word + 4;
1938 		msg_remain_len = qdf_min(htt_stats->msg_len,
1939 				(uint32_t) DP_EXT_MSG_LENGTH);
1940 		/* Keep processing the node till node length is 0 */
1941 		while (msg_remain_len) {
1942 			/*
1943 			 * if message is not a continuation of previous message
1944 			 * read the tlv type and tlv length
1945 			 */
1946 			if (!tlv_buf_head) {
1947 				tlv_type = HTT_STATS_TLV_TAG_GET(
1948 						*msg_word);
1949 				tlv_remain_len = HTT_STATS_TLV_LENGTH_GET(
1950 						*msg_word);
1951 			}
1952 
1953 			if (tlv_remain_len == 0) {
1954 				msg_remain_len = 0;
1955 
1956 				if (tlv_buf_head) {
1957 					qdf_mem_free(tlv_buf_head);
1958 					tlv_buf_head = NULL;
1959 					tlv_buf_tail = NULL;
1960 				}
1961 
1962 				goto error;
1963 			}
1964 
1965 			if (!tlv_buf_head)
1966 				tlv_remain_len += HTT_TLV_HDR_LEN;
1967 
1968 			if ((tlv_remain_len <= msg_remain_len)) {
1969 				/* Case 3 */
1970 				if (tlv_buf_head) {
1971 					qdf_mem_copy(tlv_buf_tail,
1972 							(uint8_t *)msg_word,
1973 							tlv_remain_len);
1974 					tlv_start = (uint32_t *)tlv_buf_head;
1975 				} else {
1976 					/* Case 1 */
1977 					tlv_start = msg_word;
1978 				}
1979 
1980 				if (copy_stats)
1981 					dp_htt_stats_copy_tag(pdev,
1982 							      tlv_type,
1983 							      tlv_start);
1984 				else
1985 					dp_htt_stats_print_tag(pdev,
1986 							       tlv_type,
1987 							       tlv_start);
1988 
1989 				if (tlv_type == HTT_STATS_PEER_DETAILS_TAG ||
1990 				    tlv_type == HTT_STATS_PEER_STATS_CMN_TAG)
1991 					dp_peer_update_inactive_time(pdev,
1992 								     tlv_type,
1993 								     tlv_start);
1994 
1995 				msg_remain_len -= tlv_remain_len;
1996 
1997 				msg_word = (uint32_t *)
1998 					(((uint8_t *)msg_word) +
1999 					tlv_remain_len);
2000 
2001 				tlv_remain_len = 0;
2002 
2003 				if (tlv_buf_head) {
2004 					qdf_mem_free(tlv_buf_head);
2005 					tlv_buf_head = NULL;
2006 					tlv_buf_tail = NULL;
2007 				}
2008 
2009 			} else { /* tlv_remain_len > msg_remain_len */
2010 				/* Case 2 & 3 */
2011 				if (!tlv_buf_head) {
2012 					tlv_buf_head = qdf_mem_malloc(
2013 							tlv_remain_len);
2014 
2015 					if (!tlv_buf_head) {
2016 						QDF_TRACE(QDF_MODULE_ID_TXRX,
2017 								QDF_TRACE_LEVEL_ERROR,
2018 								"Alloc failed");
2019 						goto error;
2020 					}
2021 
2022 					tlv_buf_tail = tlv_buf_head;
2023 				}
2024 
2025 				qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word,
2026 						msg_remain_len);
2027 				tlv_remain_len -= msg_remain_len;
2028 				tlv_buf_tail += msg_remain_len;
2029 			}
2030 		}
2031 
2032 		if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
2033 			htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
2034 		}
2035 
2036 		/* indicate event completion in case the event is done */
2037 		if (!cookie_val && (cookie_msb & DBG_SYSFS_STATS_COOKIE))
2038 			dp_htt_stats_sysfs_set_event(soc, msg_word);
2039 
2040 		qdf_nbuf_free(htt_msg);
2041 	}
2042 	return;
2043 
2044 error:
2045 	qdf_nbuf_free(htt_msg);
2046 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
2047 			!= NULL)
2048 		qdf_nbuf_free(htt_msg);
2049 }
2050 
2051 void htt_t2h_stats_handler(void *context)
2052 {
2053 	struct dp_soc *soc = (struct dp_soc *)context;
2054 	struct htt_stats_context htt_stats;
2055 	uint32_t *msg_word;
2056 	qdf_nbuf_t htt_msg = NULL;
2057 	uint8_t done;
2058 	uint32_t rem_stats;
2059 
2060 	if (!soc) {
2061 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2062 			  "soc is NULL");
2063 		return;
2064 	}
2065 
2066 	if (!qdf_atomic_read(&soc->cmn_init_done)) {
2067 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2068 			  "soc: 0x%pK, init_done: %d", soc,
2069 			  qdf_atomic_read(&soc->cmn_init_done));
2070 		return;
2071 	}
2072 
2073 	qdf_mem_zero(&htt_stats, sizeof(htt_stats));
2074 	qdf_nbuf_queue_init(&htt_stats.msg);
2075 
2076 	/* pull one completed stats from soc->htt_stats_msg and process */
2077 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2078 	if (!soc->htt_stats.num_stats) {
2079 		qdf_spin_unlock_bh(&soc->htt_stats.lock);
2080 		return;
2081 	}
2082 	while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) {
2083 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
2084 		msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET;
2085 		done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2086 		qdf_nbuf_queue_add(&htt_stats.msg, htt_msg);
2087 		/*
2088 		 * Done bit signifies that this is the last T2H buffer in the
2089 		 * stream of HTT EXT STATS message
2090 		 */
2091 		if (done)
2092 			break;
2093 	}
2094 	rem_stats = --soc->htt_stats.num_stats;
2095 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2096 
2097 	/* If there are more stats to process, schedule stats work again.
2098 	 * Scheduling prior to processing ht_stats to queue with early
2099 	 * index
2100 	 */
2101 	if (rem_stats)
2102 		qdf_sched_work(0, &soc->htt_stats.work);
2103 
2104 	dp_process_htt_stat_msg(&htt_stats, soc);
2105 }
2106 
2107 /**
2108  * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats
2109  * @soc: DP SOC handle
2110  * @htt_t2h_msg: HTT message nbuf
2111  *
2112  * return:void
2113  */
2114 static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc,
2115 					    qdf_nbuf_t htt_t2h_msg)
2116 {
2117 	uint8_t done;
2118 	qdf_nbuf_t msg_copy;
2119 	uint32_t *msg_word;
2120 
2121 	msg_word = (uint32_t *)qdf_nbuf_data(htt_t2h_msg);
2122 	msg_word = msg_word + 3;
2123 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2124 
2125 	/*
2126 	 * HTT EXT stats response comes as stream of TLVs which span over
2127 	 * multiple T2H messages.
2128 	 * The first message will carry length of the response.
2129 	 * For rest of the messages length will be zero.
2130 	 *
2131 	 * Clone the T2H message buffer and store it in a list to process
2132 	 * it later.
2133 	 *
2134 	 * The original T2H message buffers gets freed in the T2H HTT event
2135 	 * handler
2136 	 */
2137 	msg_copy = qdf_nbuf_clone(htt_t2h_msg);
2138 
2139 	if (!msg_copy) {
2140 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2141 			  "T2H messge clone failed for HTT EXT STATS");
2142 		goto error;
2143 	}
2144 
2145 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2146 	qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy);
2147 	/*
2148 	 * Done bit signifies that this is the last T2H buffer in the stream of
2149 	 * HTT EXT STATS message
2150 	 */
2151 	if (done) {
2152 		soc->htt_stats.num_stats++;
2153 		qdf_sched_work(0, &soc->htt_stats.work);
2154 	}
2155 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2156 
2157 	return;
2158 
2159 error:
2160 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2161 	while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg))
2162 			!= NULL) {
2163 		qdf_nbuf_free(msg_copy);
2164 	}
2165 	soc->htt_stats.num_stats = 0;
2166 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2167 	return;
2168 }
2169 
2170 /*
2171  * htt_soc_attach_target() - SOC level HTT setup
2172  * @htt_soc:	HTT SOC handle
2173  *
2174  * Return: 0 on success; error code on failure
2175  */
2176 int htt_soc_attach_target(struct htt_soc *htt_soc)
2177 {
2178 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
2179 
2180 	return htt_h2t_ver_req_msg(soc);
2181 }
2182 
2183 void htt_set_htc_handle(struct htt_soc *htt_soc, HTC_HANDLE htc_soc)
2184 {
2185 	htt_soc->htc_soc = htc_soc;
2186 }
2187 
2188 HTC_HANDLE htt_get_htc_handle(struct htt_soc *htt_soc)
2189 {
2190 	return htt_soc->htc_soc;
2191 }
2192 
2193 struct htt_soc *htt_soc_attach(struct dp_soc *soc, HTC_HANDLE htc_handle)
2194 {
2195 	int i;
2196 	int j;
2197 	int umac_alloc_size = HTT_SW_UMAC_RING_IDX_MAX *
2198 			      sizeof(struct bp_handler);
2199 	int lmac_alloc_size = HTT_SW_LMAC_RING_IDX_MAX *
2200 			      sizeof(struct bp_handler);
2201 	struct htt_soc *htt_soc = NULL;
2202 
2203 	htt_soc = qdf_mem_malloc(sizeof(*htt_soc));
2204 	if (!htt_soc) {
2205 		dp_err("HTT attach failed");
2206 		return NULL;
2207 	}
2208 
2209 	for (i = 0; i < MAX_PDEV_CNT; i++) {
2210 		htt_soc->pdevid_tt[i].umac_path =
2211 			qdf_mem_malloc(umac_alloc_size);
2212 		if (!htt_soc->pdevid_tt[i].umac_path)
2213 			break;
2214 		for (j = 0; j < HTT_SW_UMAC_RING_IDX_MAX; j++)
2215 			htt_soc->pdevid_tt[i].umac_path[j].bp_start_tt = -1;
2216 		htt_soc->pdevid_tt[i].lmac_path =
2217 			qdf_mem_malloc(lmac_alloc_size);
2218 		if (!htt_soc->pdevid_tt[i].lmac_path) {
2219 			qdf_mem_free(htt_soc->pdevid_tt[i].umac_path);
2220 			break;
2221 		}
2222 		for (j = 0; j < HTT_SW_LMAC_RING_IDX_MAX ; j++)
2223 			htt_soc->pdevid_tt[i].lmac_path[j].bp_start_tt = -1;
2224 	}
2225 
2226 	if (i != MAX_PDEV_CNT) {
2227 		for (j = 0; j < i; j++) {
2228 			qdf_mem_free(htt_soc->pdevid_tt[j].umac_path);
2229 			qdf_mem_free(htt_soc->pdevid_tt[j].lmac_path);
2230 		}
2231 		qdf_mem_free(htt_soc);
2232 		return NULL;
2233 	}
2234 
2235 	htt_soc->dp_soc = soc;
2236 	htt_soc->htc_soc = htc_handle;
2237 	HTT_TX_MUTEX_INIT(&htt_soc->htt_tx_mutex);
2238 
2239 	return htt_soc;
2240 }
2241 
2242 #if defined(WDI_EVENT_ENABLE) && \
2243 	!defined(REMOVE_PKT_LOG)
2244 /*
2245  * dp_pktlog_msg_handler() - Pktlog msg handler
2246  * @htt_soc:	 HTT SOC handle
2247  * @msg_word:    Pointer to payload
2248  *
2249  * Return: None
2250  */
2251 static void
2252 dp_pktlog_msg_handler(struct htt_soc *soc,
2253 		      uint32_t *msg_word)
2254 {
2255 	uint8_t pdev_id;
2256 	uint8_t target_pdev_id;
2257 	uint32_t *pl_hdr;
2258 
2259 	target_pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word);
2260 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
2261 							 target_pdev_id);
2262 	pl_hdr = (msg_word + 1);
2263 	dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc,
2264 		pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL,
2265 		pdev_id);
2266 }
2267 #else
2268 static void
2269 dp_pktlog_msg_handler(struct htt_soc *soc,
2270 		      uint32_t *msg_word)
2271 {
2272 }
2273 #endif
2274 
2275 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
2276 /*
2277  * dp_vdev_txrx_hw_stats_handler - Handle vdev stats received from FW
2278  * @soc - htt soc handle
2279  * @ msg_word - buffer containing stats
2280  *
2281  * Return: void
2282  */
2283 static void dp_vdev_txrx_hw_stats_handler(struct htt_soc *soc,
2284 					  uint32_t *msg_word)
2285 {
2286 	struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
2287 	uint8_t pdev_id;
2288 	uint8_t vdev_id;
2289 	uint8_t target_pdev_id;
2290 	uint16_t payload_size;
2291 	struct dp_pdev *pdev;
2292 	struct dp_vdev *vdev;
2293 	uint8_t *tlv_buf;
2294 	uint32_t *tlv_buf_temp;
2295 	uint32_t *tag_buf;
2296 	htt_tlv_tag_t tlv_type;
2297 	uint16_t tlv_length;
2298 	uint64_t pkt_count = 0;
2299 	uint64_t byte_count = 0;
2300 	uint64_t soc_drop_cnt = 0;
2301 	struct cdp_pkt_info tx_comp = { 0 };
2302 	struct cdp_pkt_info tx_failed =  { 0 };
2303 
2304 	target_pdev_id =
2305 		HTT_T2H_VDEVS_TXRX_STATS_PERIODIC_IND_PDEV_ID_GET(*msg_word);
2306 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(dpsoc,
2307 							 target_pdev_id);
2308 
2309 	if (pdev_id >= MAX_PDEV_CNT)
2310 		return;
2311 
2312 	pdev = dpsoc->pdev_list[pdev_id];
2313 	if (!pdev) {
2314 		dp_err("PDEV is NULL for pdev_id:%d", pdev_id);
2315 		return;
2316 	}
2317 
2318 	payload_size =
2319 	HTT_T2H_VDEVS_TXRX_STATS_PERIODIC_IND_PAYLOAD_SIZE_GET(*msg_word);
2320 
2321 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2322 			   (void *)msg_word, payload_size + 16);
2323 
2324 	/* Adjust msg_word to point to the first TLV in buffer */
2325 	msg_word = msg_word + 4;
2326 
2327 	/* Parse the received buffer till payload size reaches 0 */
2328 	while (payload_size > 0) {
2329 		tlv_buf = (uint8_t *)msg_word;
2330 		tlv_buf_temp = msg_word;
2331 		tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
2332 		tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
2333 
2334 		/* Add header size to tlv length*/
2335 		tlv_length += 4;
2336 
2337 		switch (tlv_type) {
2338 		case HTT_STATS_SOC_TXRX_STATS_COMMON_TAG:
2339 		{
2340 			tag_buf = tlv_buf_temp +
2341 					HTT_VDEV_STATS_GET_INDEX(SOC_DROP_CNT);
2342 			soc_drop_cnt = HTT_VDEV_GET_STATS_U64(tag_buf);
2343 			DP_STATS_UPD(dpsoc, tx.tqm_drop_no_peer, soc_drop_cnt);
2344 			break;
2345 		}
2346 		case HTT_STATS_VDEV_TXRX_STATS_HW_STATS_TAG:
2347 		{
2348 			tag_buf = tlv_buf_temp +
2349 					HTT_VDEV_STATS_GET_INDEX(VDEV_ID);
2350 			vdev_id = (uint8_t)(*tag_buf);
2351 			vdev = dp_vdev_get_ref_by_id(dpsoc, vdev_id,
2352 						     DP_MOD_ID_HTT);
2353 
2354 			if (!vdev)
2355 				goto invalid_vdev;
2356 
2357 			/* Extract received packet count from buffer */
2358 			tag_buf = tlv_buf_temp +
2359 					HTT_VDEV_STATS_GET_INDEX(RX_PKT_CNT);
2360 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2361 			DP_STATS_UPD(vdev, rx_i.reo_rcvd_pkt.num, pkt_count);
2362 
2363 			/* Extract received packet byte count from buffer */
2364 			tag_buf = tlv_buf_temp +
2365 					HTT_VDEV_STATS_GET_INDEX(RX_BYTE_CNT);
2366 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2367 			DP_STATS_UPD(vdev, rx_i.reo_rcvd_pkt.bytes, byte_count);
2368 
2369 			/* Extract tx success packet count from buffer */
2370 			tag_buf = tlv_buf_temp +
2371 				HTT_VDEV_STATS_GET_INDEX(TX_SUCCESS_PKT_CNT);
2372 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2373 			tx_comp.num = pkt_count;
2374 
2375 			/* Extract tx success packet byte count from buffer */
2376 			tag_buf = tlv_buf_temp +
2377 				HTT_VDEV_STATS_GET_INDEX(TX_SUCCESS_BYTE_CNT);
2378 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2379 			tx_comp.bytes = byte_count;
2380 
2381 			/* Extract tx retry packet count from buffer */
2382 			tag_buf = tlv_buf_temp +
2383 				HTT_VDEV_STATS_GET_INDEX(TX_RETRY_PKT_CNT);
2384 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2385 			tx_comp.num += pkt_count;
2386 			tx_failed.num = pkt_count;
2387 
2388 			/* Extract tx retry packet byte count from buffer */
2389 			tag_buf = tlv_buf_temp +
2390 				HTT_VDEV_STATS_GET_INDEX(TX_RETRY_BYTE_CNT);
2391 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2392 			tx_comp.bytes += byte_count;
2393 			tx_failed.bytes = byte_count;
2394 
2395 			/* Extract tx drop packet count from buffer */
2396 			tag_buf = tlv_buf_temp +
2397 				HTT_VDEV_STATS_GET_INDEX(TX_DROP_PKT_CNT);
2398 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2399 			tx_comp.num += pkt_count;
2400 			tx_failed.num += pkt_count;
2401 
2402 			/* Extract tx drop packet byte count from buffer */
2403 			tag_buf = tlv_buf_temp +
2404 				HTT_VDEV_STATS_GET_INDEX(TX_DROP_BYTE_CNT);
2405 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2406 			tx_comp.bytes += byte_count;
2407 			tx_failed.bytes += byte_count;
2408 
2409 			/* Extract tx age-out packet count from buffer */
2410 			tag_buf = tlv_buf_temp +
2411 				HTT_VDEV_STATS_GET_INDEX(TX_AGE_OUT_PKT_CNT);
2412 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2413 			tx_comp.num += pkt_count;
2414 			tx_failed.num += pkt_count;
2415 
2416 			/* Extract tx age-out packet byte count from buffer */
2417 			tag_buf = tlv_buf_temp +
2418 				HTT_VDEV_STATS_GET_INDEX(TX_AGE_OUT_BYTE_CNT);
2419 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2420 			tx_comp.bytes += byte_count;
2421 			tx_failed.bytes += byte_count;
2422 
2423 			/* Extract tqm bypass packet count from buffer */
2424 			tag_buf = tlv_buf_temp +
2425 				HTT_VDEV_STATS_GET_INDEX(TX_TQM_BYPASS_PKT_CNT);
2426 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2427 			tx_comp.num += pkt_count;
2428 
2429 			/* Extract tx bypass packet byte count from buffer */
2430 			tag_buf = tlv_buf_temp +
2431 				HTT_VDEV_STATS_GET_INDEX(TX_TQM_BYPASS_BYTE_CNT);
2432 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2433 			tx_comp.bytes += byte_count;
2434 
2435 			DP_STATS_UPD(vdev, tx.comp_pkt.num, tx_comp.num);
2436 			DP_STATS_UPD(vdev, tx.comp_pkt.bytes, tx_comp.bytes);
2437 
2438 			DP_STATS_UPD(vdev, tx.tx_failed, tx_failed.num);
2439 
2440 			dp_vdev_unref_delete(dpsoc, vdev, DP_MOD_ID_HTT);
2441 			break;
2442 		}
2443 		default:
2444 			qdf_assert(0);
2445 		}
2446 invalid_vdev:
2447 		msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
2448 		payload_size -= tlv_length;
2449 	}
2450 }
2451 #else
2452 static void dp_vdev_txrx_hw_stats_handler(struct htt_soc *soc,
2453 					  uint32_t *msg_word)
2454 {}
2455 #endif
2456 
2457 #ifdef CONFIG_SAWF_DEF_QUEUES
2458 static void dp_sawf_def_queues_update_map_report_conf(struct htt_soc *soc,
2459 						      uint32_t *msg_word,
2460 						      qdf_nbuf_t htt_t2h_msg)
2461 {
2462 	dp_htt_sawf_def_queues_map_report_conf(soc, msg_word, htt_t2h_msg);
2463 }
2464 #else
2465 static void dp_sawf_def_queues_update_map_report_conf(struct htt_soc *soc,
2466 						      uint32_t *msg_word,
2467 						      qdf_nbuf_t htt_t2h_msg)
2468 {}
2469 #endif
2470 
2471 #ifdef CONFIG_SAWF
2472 /*
2473  * dp_sawf_msduq_map() - Msdu queue creation information received
2474  * from target
2475  * @soc: soc handle.
2476  * @msg_word: Pointer to htt msg word.
2477  * @htt_t2h_msg: HTT message nbuf
2478  *
2479  * @return: void
2480  */
2481 static void dp_sawf_msduq_map(struct htt_soc *soc, uint32_t *msg_word,
2482 			      qdf_nbuf_t htt_t2h_msg)
2483 {
2484 	dp_htt_sawf_msduq_map(soc, msg_word, htt_t2h_msg);
2485 }
2486 
2487 /*
2488  * dp_sawf_mpdu_stats_handler() - HTT message handler for MPDU stats
2489  * @soc: soc handle.
2490  * @htt_t2h_msg: HTT message nbuf
2491  *
2492  * @return: void
2493  */
2494 static void dp_sawf_mpdu_stats_handler(struct htt_soc *soc,
2495 				       qdf_nbuf_t htt_t2h_msg)
2496 {
2497 	dp_sawf_htt_mpdu_stats_handler(soc, htt_t2h_msg);
2498 }
2499 #else
2500 static void dp_sawf_msduq_map(struct htt_soc *soc, uint32_t *msg_word,
2501 			      qdf_nbuf_t htt_t2h_msg)
2502 {}
2503 
2504 static void dp_sawf_mpdu_stats_handler(struct htt_soc *soc,
2505 				       qdf_nbuf_t htt_t2h_msg)
2506 {}
2507 #endif
2508 
2509 /*
2510  * time_allow_print() - time allow print
2511  * @htt_ring_tt:	ringi_id array of timestamps
2512  * @ring_id:		ring_id (index)
2513  *
2514  * Return: 1 for successfully saving timestamp in array
2515  *	and 0 for timestamp falling within 2 seconds after last one
2516  */
2517 static bool time_allow_print(struct bp_handler *htt_bp_handler,
2518 			     u_int8_t ring_id, u_int32_t th_time)
2519 {
2520 	unsigned long tstamp;
2521 	struct bp_handler *path = &htt_bp_handler[ring_id];
2522 
2523 	tstamp = qdf_get_system_timestamp();
2524 
2525 	if (!path)
2526 		return 0; //unable to print backpressure messages
2527 
2528 	if (path->bp_start_tt == -1) {
2529 		path->bp_start_tt = tstamp;
2530 		path->bp_duration = 0;
2531 		path->bp_last_tt = tstamp;
2532 		path->bp_counter = 1;
2533 		return 1;
2534 	}
2535 
2536 	path->bp_duration = tstamp - path->bp_start_tt;
2537 	path->bp_last_tt = tstamp;
2538 	path->bp_counter++;
2539 
2540 	if (path->bp_duration >= th_time) {
2541 		path->bp_start_tt = -1;
2542 		return 1;
2543 	}
2544 
2545 	return 0;
2546 }
2547 
2548 static void dp_htt_alert_print(enum htt_t2h_msg_type msg_type,
2549 			       struct dp_pdev *pdev, u_int8_t ring_id,
2550 			       u_int16_t hp_idx, u_int16_t tp_idx,
2551 			       u_int32_t bkp_time,
2552 			       struct bp_handler *htt_bp_handler,
2553 			       char *ring_stype)
2554 {
2555 	dp_alert("seq_num %u msg_type: %d pdev_id: %d ring_type: %s ",
2556 		 pdev->bkp_stats.seq_num, msg_type, pdev->pdev_id, ring_stype);
2557 	dp_alert("ring_id: %d hp_idx: %d tp_idx: %d bkpressure_time_ms: %d ",
2558 		 ring_id, hp_idx, tp_idx, bkp_time);
2559 	dp_alert("last_bp_event: %ld, total_bp_duration: %ld, bp_counter: %ld",
2560 		 htt_bp_handler[ring_id].bp_last_tt,
2561 		 htt_bp_handler[ring_id].bp_duration,
2562 		 htt_bp_handler[ring_id].bp_counter);
2563 }
2564 
2565 /**
2566  * dp_get_srng_ring_state_from_hal(): Get hal level ring stats
2567  * @soc: DP_SOC handle
2568  * @srng: DP_SRNG handle
2569  * @ring_type: srng src/dst ring
2570  *
2571  * Return: void
2572  */
2573 static QDF_STATUS
2574 dp_get_srng_ring_state_from_hal(struct dp_soc *soc,
2575 				struct dp_pdev *pdev,
2576 				struct dp_srng *srng,
2577 				enum hal_ring_type ring_type,
2578 				struct dp_srng_ring_state *state)
2579 {
2580 	struct hal_soc *hal_soc;
2581 
2582 	if (!soc || !srng || !srng->hal_srng || !state)
2583 		return QDF_STATUS_E_INVAL;
2584 
2585 	hal_soc = (struct hal_soc *)soc->hal_soc;
2586 
2587 	hal_get_sw_hptp(soc->hal_soc, srng->hal_srng, &state->sw_tail,
2588 			&state->sw_head);
2589 
2590 	hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &state->hw_head,
2591 			&state->hw_tail, ring_type);
2592 
2593 	state->ring_type = ring_type;
2594 
2595 	return QDF_STATUS_SUCCESS;
2596 }
2597 
2598 #ifdef QCA_MONITOR_PKT_SUPPORT
2599 static void
2600 dp_queue_mon_ring_stats(struct dp_pdev *pdev,
2601 			int lmac_id, uint32_t *num_srng,
2602 			struct dp_soc_srngs_state *soc_srngs_state)
2603 {
2604 	QDF_STATUS status;
2605 
2606 	if (pdev->soc->wlan_cfg_ctx->rxdma1_enable) {
2607 		status = dp_get_srng_ring_state_from_hal
2608 			(pdev->soc, pdev,
2609 			 &pdev->soc->rxdma_mon_buf_ring[lmac_id],
2610 			 RXDMA_MONITOR_BUF,
2611 			 &soc_srngs_state->ring_state[*num_srng]);
2612 
2613 		if (status == QDF_STATUS_SUCCESS)
2614 			qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS);
2615 
2616 		status = dp_get_srng_ring_state_from_hal
2617 			(pdev->soc, pdev,
2618 			 &pdev->soc->rxdma_mon_dst_ring[lmac_id],
2619 			 RXDMA_MONITOR_DST,
2620 			 &soc_srngs_state->ring_state[*num_srng]);
2621 
2622 		if (status == QDF_STATUS_SUCCESS)
2623 			qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS);
2624 
2625 		status = dp_get_srng_ring_state_from_hal
2626 			(pdev->soc, pdev,
2627 			 &pdev->soc->rxdma_mon_desc_ring[lmac_id],
2628 			 RXDMA_MONITOR_DESC,
2629 			 &soc_srngs_state->ring_state[*num_srng]);
2630 
2631 		if (status == QDF_STATUS_SUCCESS)
2632 			qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS);
2633 	}
2634 }
2635 #else
2636 static void
2637 dp_queue_mon_ring_stats(struct dp_pdev *pdev,
2638 			int lmac_id, uint32_t *num_srng,
2639 			struct dp_soc_srngs_state *soc_srngs_state)
2640 {
2641 }
2642 #endif
2643 
2644 #ifndef WLAN_DP_DISABLE_TCL_CMD_CRED_SRNG
2645 static inline QDF_STATUS
2646 dp_get_tcl_cmd_cred_ring_state_from_hal(struct dp_pdev *pdev,
2647 					struct dp_srng_ring_state *ring_state)
2648 {
2649 	return dp_get_srng_ring_state_from_hal(pdev->soc, pdev,
2650 					       &pdev->soc->tcl_cmd_credit_ring,
2651 					       TCL_CMD_CREDIT, ring_state);
2652 }
2653 #else
2654 static inline QDF_STATUS
2655 dp_get_tcl_cmd_cred_ring_state_from_hal(struct dp_pdev *pdev,
2656 					struct dp_srng_ring_state *ring_state)
2657 {
2658 	return QDF_STATUS_SUCCESS;
2659 }
2660 #endif
2661 
2662 #ifndef WLAN_DP_DISABLE_TCL_STATUS_SRNG
2663 static inline QDF_STATUS
2664 dp_get_tcl_status_ring_state_from_hal(struct dp_pdev *pdev,
2665 				      struct dp_srng_ring_state *ring_state)
2666 {
2667 	return dp_get_srng_ring_state_from_hal(pdev->soc, pdev,
2668 					       &pdev->soc->tcl_status_ring,
2669 					       TCL_STATUS, ring_state);
2670 }
2671 #else
2672 static inline QDF_STATUS
2673 dp_get_tcl_status_ring_state_from_hal(struct dp_pdev *pdev,
2674 				      struct dp_srng_ring_state *ring_state)
2675 {
2676 	return QDF_STATUS_SUCCESS;
2677 }
2678 #endif
2679 
2680 /**
2681  * dp_queue_srng_ring_stats(): Print pdev hal level ring stats
2682  * @pdev: DP_pdev handle
2683  *
2684  * Return: void
2685  */
2686 static void dp_queue_ring_stats(struct dp_pdev *pdev)
2687 {
2688 	uint32_t i;
2689 	int mac_id;
2690 	int lmac_id;
2691 	uint32_t j = 0;
2692 	struct dp_soc *soc = pdev->soc;
2693 	struct dp_soc_srngs_state * soc_srngs_state = NULL;
2694 	struct dp_soc_srngs_state *drop_srngs_state = NULL;
2695 	QDF_STATUS status;
2696 
2697 	soc_srngs_state = qdf_mem_malloc(sizeof(struct dp_soc_srngs_state));
2698 	if (!soc_srngs_state) {
2699 		dp_htt_alert("Memory alloc failed for back pressure event");
2700 		return;
2701 	}
2702 
2703 	status = dp_get_srng_ring_state_from_hal
2704 				(pdev->soc, pdev,
2705 				 &pdev->soc->reo_exception_ring,
2706 				 REO_EXCEPTION,
2707 				 &soc_srngs_state->ring_state[j]);
2708 
2709 	if (status == QDF_STATUS_SUCCESS)
2710 		qdf_assert_always(++j < DP_MAX_SRNGS);
2711 
2712 	status = dp_get_srng_ring_state_from_hal
2713 				(pdev->soc, pdev,
2714 				 &pdev->soc->reo_reinject_ring,
2715 				 REO_REINJECT,
2716 				 &soc_srngs_state->ring_state[j]);
2717 
2718 	if (status == QDF_STATUS_SUCCESS)
2719 		qdf_assert_always(++j < DP_MAX_SRNGS);
2720 
2721 	status = dp_get_srng_ring_state_from_hal
2722 				(pdev->soc, pdev,
2723 				 &pdev->soc->reo_cmd_ring,
2724 				 REO_CMD,
2725 				 &soc_srngs_state->ring_state[j]);
2726 
2727 	if (status == QDF_STATUS_SUCCESS)
2728 		qdf_assert_always(++j < DP_MAX_SRNGS);
2729 
2730 	status = dp_get_srng_ring_state_from_hal
2731 				(pdev->soc, pdev,
2732 				 &pdev->soc->reo_status_ring,
2733 				 REO_STATUS,
2734 				 &soc_srngs_state->ring_state[j]);
2735 
2736 	if (status == QDF_STATUS_SUCCESS)
2737 		qdf_assert_always(++j < DP_MAX_SRNGS);
2738 
2739 	status = dp_get_srng_ring_state_from_hal
2740 				(pdev->soc, pdev,
2741 				 &pdev->soc->rx_rel_ring,
2742 				 WBM2SW_RELEASE,
2743 				 &soc_srngs_state->ring_state[j]);
2744 
2745 	if (status == QDF_STATUS_SUCCESS)
2746 		qdf_assert_always(++j < DP_MAX_SRNGS);
2747 
2748 	status = dp_get_tcl_cmd_cred_ring_state_from_hal
2749 				(pdev, &soc_srngs_state->ring_state[j]);
2750 	if (status == QDF_STATUS_SUCCESS)
2751 		qdf_assert_always(++j < DP_MAX_SRNGS);
2752 
2753 	status = dp_get_tcl_status_ring_state_from_hal
2754 				(pdev, &soc_srngs_state->ring_state[j]);
2755 	if (status == QDF_STATUS_SUCCESS)
2756 		qdf_assert_always(++j < DP_MAX_SRNGS);
2757 
2758 	status = dp_get_srng_ring_state_from_hal
2759 				(pdev->soc, pdev,
2760 				 &pdev->soc->wbm_desc_rel_ring,
2761 				 SW2WBM_RELEASE,
2762 				 &soc_srngs_state->ring_state[j]);
2763 
2764 	if (status == QDF_STATUS_SUCCESS)
2765 		qdf_assert_always(++j < DP_MAX_SRNGS);
2766 
2767 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
2768 		status = dp_get_srng_ring_state_from_hal
2769 				(pdev->soc, pdev,
2770 				 &pdev->soc->reo_dest_ring[i],
2771 				 REO_DST,
2772 				 &soc_srngs_state->ring_state[j]);
2773 
2774 		if (status == QDF_STATUS_SUCCESS)
2775 			qdf_assert_always(++j < DP_MAX_SRNGS);
2776 	}
2777 
2778 	for (i = 0; i < pdev->soc->num_tcl_data_rings; i++) {
2779 		status = dp_get_srng_ring_state_from_hal
2780 				(pdev->soc, pdev,
2781 				 &pdev->soc->tcl_data_ring[i],
2782 				 TCL_DATA,
2783 				 &soc_srngs_state->ring_state[j]);
2784 
2785 		if (status == QDF_STATUS_SUCCESS)
2786 			qdf_assert_always(++j < DP_MAX_SRNGS);
2787 	}
2788 
2789 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
2790 		status = dp_get_srng_ring_state_from_hal
2791 				(pdev->soc, pdev,
2792 				 &pdev->soc->tx_comp_ring[i],
2793 				 WBM2SW_RELEASE,
2794 				 &soc_srngs_state->ring_state[j]);
2795 
2796 		if (status == QDF_STATUS_SUCCESS)
2797 			qdf_assert_always(++j < DP_MAX_SRNGS);
2798 	}
2799 
2800 	lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc, 0, pdev->pdev_id);
2801 	status = dp_get_srng_ring_state_from_hal
2802 				(pdev->soc, pdev,
2803 				 &pdev->soc->rx_refill_buf_ring
2804 				 [lmac_id],
2805 				 RXDMA_BUF,
2806 				 &soc_srngs_state->ring_state[j]);
2807 
2808 	if (status == QDF_STATUS_SUCCESS)
2809 		qdf_assert_always(++j < DP_MAX_SRNGS);
2810 
2811 	status = dp_get_srng_ring_state_from_hal
2812 				(pdev->soc, pdev,
2813 				 &pdev->rx_refill_buf_ring2,
2814 				 RXDMA_BUF,
2815 				 &soc_srngs_state->ring_state[j]);
2816 
2817 	if (status == QDF_STATUS_SUCCESS)
2818 		qdf_assert_always(++j < DP_MAX_SRNGS);
2819 
2820 
2821 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2822 		dp_get_srng_ring_state_from_hal
2823 				(pdev->soc, pdev,
2824 				 &pdev->rx_mac_buf_ring[i],
2825 				 RXDMA_BUF,
2826 				 &soc_srngs_state->ring_state[j]);
2827 
2828 		if (status == QDF_STATUS_SUCCESS)
2829 			qdf_assert_always(++j < DP_MAX_SRNGS);
2830 	}
2831 
2832 	for (mac_id = 0;
2833 	     mac_id  < soc->wlan_cfg_ctx->num_rxdma_status_rings_per_pdev;
2834 	     mac_id++) {
2835 		lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc,
2836 						     mac_id, pdev->pdev_id);
2837 
2838 		dp_queue_mon_ring_stats(pdev, lmac_id, &j,
2839 					soc_srngs_state);
2840 
2841 		status = dp_get_srng_ring_state_from_hal
2842 			(pdev->soc, pdev,
2843 			 &pdev->soc->rxdma_mon_status_ring[lmac_id],
2844 			 RXDMA_MONITOR_STATUS,
2845 			 &soc_srngs_state->ring_state[j]);
2846 
2847 		if (status == QDF_STATUS_SUCCESS)
2848 			qdf_assert_always(++j < DP_MAX_SRNGS);
2849 	}
2850 
2851 	for (i = 0; i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
2852 		lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc,
2853 						     i, pdev->pdev_id);
2854 
2855 		status = dp_get_srng_ring_state_from_hal
2856 				(pdev->soc, pdev,
2857 				 &pdev->soc->rxdma_err_dst_ring
2858 				 [lmac_id],
2859 				 RXDMA_DST,
2860 				 &soc_srngs_state->ring_state[j]);
2861 
2862 		if (status == QDF_STATUS_SUCCESS)
2863 			qdf_assert_always(++j < DP_MAX_SRNGS);
2864 	}
2865 	soc_srngs_state->max_ring_id = j;
2866 
2867 	qdf_spin_lock_bh(&pdev->bkp_stats.list_lock);
2868 
2869 	soc_srngs_state->seq_num = pdev->bkp_stats.seq_num;
2870 
2871 	if (pdev->bkp_stats.queue_depth >= HTT_BKP_STATS_MAX_QUEUE_DEPTH) {
2872 		drop_srngs_state = TAILQ_FIRST(&pdev->bkp_stats.list);
2873 		qdf_assert_always(drop_srngs_state);
2874 		TAILQ_REMOVE(&pdev->bkp_stats.list, drop_srngs_state,
2875 			     list_elem);
2876 		qdf_mem_free(drop_srngs_state);
2877 		pdev->bkp_stats.queue_depth--;
2878 	}
2879 
2880 	pdev->bkp_stats.queue_depth++;
2881 	TAILQ_INSERT_TAIL(&pdev->bkp_stats.list, soc_srngs_state,
2882 			  list_elem);
2883 	pdev->bkp_stats.seq_num++;
2884 	qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock);
2885 
2886 	qdf_queue_work(0, pdev->bkp_stats.work_queue,
2887 		       &pdev->bkp_stats.work);
2888 }
2889 
2890 /*
2891  * dp_htt_bkp_event_alert() - htt backpressure event alert
2892  * @msg_word:	htt packet context
2893  * @htt_soc:	HTT SOC handle
2894  *
2895  * Return: after attempting to print stats
2896  */
2897 static void dp_htt_bkp_event_alert(u_int32_t *msg_word, struct htt_soc *soc)
2898 {
2899 	u_int8_t ring_type;
2900 	u_int8_t pdev_id;
2901 	uint8_t target_pdev_id;
2902 	u_int8_t ring_id;
2903 	u_int16_t hp_idx;
2904 	u_int16_t tp_idx;
2905 	u_int32_t bkp_time;
2906 	u_int32_t th_time;
2907 	enum htt_t2h_msg_type msg_type;
2908 	struct dp_soc *dpsoc;
2909 	struct dp_pdev *pdev;
2910 	struct dp_htt_timestamp *radio_tt;
2911 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2912 
2913 
2914 	if (!soc)
2915 		return;
2916 
2917 	dpsoc = (struct dp_soc *)soc->dp_soc;
2918 	soc_cfg_ctx = dpsoc->wlan_cfg_ctx;
2919 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
2920 	ring_type = HTT_T2H_RX_BKPRESSURE_RING_TYPE_GET(*msg_word);
2921 	target_pdev_id = HTT_T2H_RX_BKPRESSURE_PDEV_ID_GET(*msg_word);
2922 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
2923 							 target_pdev_id);
2924 	if (pdev_id >= MAX_PDEV_CNT) {
2925 		dp_htt_debug("%pK: pdev id %d is invalid", soc, pdev_id);
2926 		return;
2927 	}
2928 
2929 	th_time = wlan_cfg_time_control_bp(soc_cfg_ctx);
2930 	pdev = (struct dp_pdev *)dpsoc->pdev_list[pdev_id];
2931 	ring_id = HTT_T2H_RX_BKPRESSURE_RINGID_GET(*msg_word);
2932 	hp_idx = HTT_T2H_RX_BKPRESSURE_HEAD_IDX_GET(*(msg_word + 1));
2933 	tp_idx = HTT_T2H_RX_BKPRESSURE_TAIL_IDX_GET(*(msg_word + 1));
2934 	bkp_time = HTT_T2H_RX_BKPRESSURE_TIME_MS_GET(*(msg_word + 2));
2935 	radio_tt = &soc->pdevid_tt[pdev_id];
2936 
2937 	switch (ring_type) {
2938 	case HTT_SW_RING_TYPE_UMAC:
2939 		if (!time_allow_print(radio_tt->umac_path, ring_id, th_time))
2940 			return;
2941 		dp_htt_alert_print(msg_type, pdev, ring_id, hp_idx, tp_idx,
2942 				   bkp_time, radio_tt->umac_path,
2943 				   "HTT_SW_RING_TYPE_UMAC");
2944 	break;
2945 	case HTT_SW_RING_TYPE_LMAC:
2946 		if (!time_allow_print(radio_tt->lmac_path, ring_id, th_time))
2947 			return;
2948 		dp_htt_alert_print(msg_type, pdev, ring_id, hp_idx, tp_idx,
2949 				   bkp_time, radio_tt->lmac_path,
2950 				   "HTT_SW_RING_TYPE_LMAC");
2951 	break;
2952 	default:
2953 		dp_alert("Invalid ring type: %d", ring_type);
2954 	break;
2955 	}
2956 
2957 	dp_queue_ring_stats(pdev);
2958 }
2959 
2960 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
2961 /*
2962  * dp_offload_ind_handler() - offload msg handler
2963  * @htt_soc: HTT SOC handle
2964  * @msg_word: Pointer to payload
2965  *
2966  * Return: None
2967  */
2968 static void
2969 dp_offload_ind_handler(struct htt_soc *soc, uint32_t *msg_word)
2970 {
2971 	u_int8_t pdev_id;
2972 	u_int8_t target_pdev_id;
2973 
2974 	target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
2975 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
2976 							 target_pdev_id);
2977 	dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_OFFLOAD_TX_DATA, soc->dp_soc,
2978 			     msg_word, HTT_INVALID_VDEV, WDI_NO_VAL,
2979 			     pdev_id);
2980 }
2981 #else
2982 static void
2983 dp_offload_ind_handler(struct htt_soc *soc, uint32_t *msg_word)
2984 {
2985 }
2986 #endif
2987 
2988 #ifdef WLAN_FEATURE_11BE_MLO
2989 #ifdef WLAN_MLO_MULTI_CHIP
2990 static inline void dp_update_mlo_ts_offset(struct dp_soc *soc,
2991 					   uint32_t ts_lo, uint32_t ts_hi)
2992 {
2993 	uint64_t mlo_offset;
2994 
2995 	mlo_offset = ((uint64_t)(ts_hi) << 32 | ts_lo);
2996 	soc->cdp_soc.ops->mlo_ops->mlo_update_mlo_ts_offset
2997 		((struct cdp_soc_t *)soc, mlo_offset);
2998 }
2999 #else
3000 static inline void dp_update_mlo_ts_offset(struct dp_soc *soc,
3001 					   uint32_t ts_lo, uint32_t ts_hi)
3002 {}
3003 #endif
3004 static void dp_htt_mlo_peer_map_handler(struct htt_soc *soc,
3005 					uint32_t *msg_word)
3006 {
3007 	uint8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3008 	uint8_t *mlo_peer_mac_addr;
3009 	uint16_t mlo_peer_id;
3010 	uint8_t num_links;
3011 	struct dp_mlo_flow_override_info mlo_flow_info[DP_MLO_FLOW_INFO_MAX];
3012 	struct dp_mlo_link_info mlo_link_info[DP_MAX_MLO_LINKS];
3013 	MLO_PEER_MAP_TLV_TAG_ID tlv_type = 0xff;
3014 	uint16_t tlv_len = 0;
3015 	int i = 0;
3016 
3017 	mlo_peer_id = HTT_RX_MLO_PEER_MAP_MLO_PEER_ID_GET(*msg_word);
3018 	num_links =
3019 		HTT_RX_MLO_PEER_MAP_NUM_LOGICAL_LINKS_GET(*msg_word);
3020 	mlo_peer_mac_addr =
3021 	htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3022 				   &mac_addr_deswizzle_buf[0]);
3023 
3024 	mlo_flow_info[0].ast_idx =
3025 		HTT_RX_MLO_PEER_MAP_PRIMARY_AST_INDEX_GET(*(msg_word + 3));
3026 	mlo_flow_info[0].ast_idx_valid =
3027 		HTT_RX_MLO_PEER_MAP_AST_INDEX_VALID_FLAG_GET(*(msg_word + 3));
3028 	mlo_flow_info[0].chip_id =
3029 		HTT_RX_MLO_PEER_MAP_CHIP_ID_AST_INDEX_GET(*(msg_word + 3));
3030 	mlo_flow_info[0].tidmask =
3031 		HTT_RX_MLO_PEER_MAP_TIDMASK_AST_INDEX_GET(*(msg_word + 3));
3032 	mlo_flow_info[0].cache_set_num =
3033 	HTT_RX_MLO_PEER_MAP_CACHE_SET_NUM_AST_INDEX_GET(*(msg_word + 3));
3034 
3035 	mlo_flow_info[1].ast_idx =
3036 		HTT_RX_MLO_PEER_MAP_PRIMARY_AST_INDEX_GET(*(msg_word + 3));
3037 	mlo_flow_info[1].ast_idx_valid =
3038 		HTT_RX_MLO_PEER_MAP_AST_INDEX_VALID_FLAG_GET(*(msg_word + 3));
3039 	mlo_flow_info[1].chip_id =
3040 		HTT_RX_MLO_PEER_MAP_CHIP_ID_AST_INDEX_GET(*(msg_word + 3));
3041 	mlo_flow_info[1].tidmask =
3042 		HTT_RX_MLO_PEER_MAP_TIDMASK_AST_INDEX_GET(*(msg_word + 3));
3043 	mlo_flow_info[1].cache_set_num =
3044 	HTT_RX_MLO_PEER_MAP_CACHE_SET_NUM_AST_INDEX_GET(*(msg_word + 3));
3045 
3046 	mlo_flow_info[2].ast_idx =
3047 		HTT_RX_MLO_PEER_MAP_PRIMARY_AST_INDEX_GET(*(msg_word + 3));
3048 	mlo_flow_info[2].ast_idx_valid =
3049 		HTT_RX_MLO_PEER_MAP_AST_INDEX_VALID_FLAG_GET(*(msg_word + 3));
3050 	mlo_flow_info[2].chip_id =
3051 		HTT_RX_MLO_PEER_MAP_CHIP_ID_AST_INDEX_GET(*(msg_word + 3));
3052 	mlo_flow_info[2].tidmask =
3053 		HTT_RX_MLO_PEER_MAP_TIDMASK_AST_INDEX_GET(*(msg_word + 3));
3054 	mlo_flow_info[2].cache_set_num =
3055 	HTT_RX_MLO_PEER_MAP_CACHE_SET_NUM_AST_INDEX_GET(*(msg_word + 3));
3056 
3057 	msg_word = msg_word + 8;
3058 	while (msg_word && (i < DP_MAX_MLO_LINKS)) {
3059 		mlo_link_info[i].peer_chip_id = 0xFF;
3060 		mlo_link_info[i].vdev_id = 0xFF;
3061 
3062 		tlv_type = HTT_RX_MLO_PEER_MAP_TLV_TAG_GET(*msg_word);
3063 		tlv_len = HTT_RX_MLO_PEER_MAP_TLV_LENGTH_GET(*msg_word);
3064 
3065 		if (tlv_len == 0) {
3066 			dp_err("TLV Length is 0");
3067 			break;
3068 		}
3069 
3070 		if (tlv_type == MLO_PEER_MAP_TLV_STRUCT_SOC_VDEV_PEER_IDS) {
3071 			mlo_link_info[i].peer_chip_id =
3072 				HTT_RX_MLO_PEER_MAP_CHIP_ID_GET(
3073 							*(msg_word + 1));
3074 			mlo_link_info[i].vdev_id =
3075 				HTT_RX_MLO_PEER_MAP_VDEV_ID_GET(
3076 							*(msg_word + 1));
3077 		}
3078 		/* Add header size to tlv length */
3079 		tlv_len = tlv_len + HTT_TLV_HDR_LEN;
3080 		msg_word = (uint32_t *)(((uint8_t *)msg_word) + tlv_len);
3081 		i++;
3082 	}
3083 
3084 	dp_rx_mlo_peer_map_handler(soc->dp_soc, mlo_peer_id,
3085 				   mlo_peer_mac_addr,
3086 				   mlo_flow_info, mlo_link_info);
3087 }
3088 
3089 static void dp_htt_mlo_peer_unmap_handler(struct htt_soc *soc,
3090 					  uint32_t *msg_word)
3091 {
3092 	uint16_t mlo_peer_id;
3093 
3094 	mlo_peer_id = HTT_RX_MLO_PEER_UNMAP_MLO_PEER_ID_GET(*msg_word);
3095 	dp_rx_mlo_peer_unmap_handler(soc->dp_soc, mlo_peer_id);
3096 }
3097 
3098 static void
3099 dp_rx_mlo_timestamp_ind_handler(struct dp_soc *soc,
3100 				uint32_t *msg_word)
3101 {
3102 	uint8_t pdev_id;
3103 	uint8_t target_pdev_id;
3104 	struct dp_pdev *pdev;
3105 
3106 	if (!soc)
3107 		return;
3108 
3109 	target_pdev_id = HTT_T2H_MLO_TIMESTAMP_OFFSET_PDEV_ID_GET(*msg_word);
3110 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc,
3111 							 target_pdev_id);
3112 
3113 	if (pdev_id >= MAX_PDEV_CNT) {
3114 		dp_htt_debug("%pK: pdev id %d is invalid", soc, pdev_id);
3115 		return;
3116 	}
3117 
3118 	pdev = (struct dp_pdev *)soc->pdev_list[pdev_id];
3119 
3120 	if (!pdev) {
3121 		dp_err("Invalid pdev");
3122 		return;
3123 	}
3124 	dp_wdi_event_handler(WDI_EVENT_MLO_TSTMP, soc,
3125 			     msg_word, HTT_INVALID_PEER, WDI_NO_VAL,
3126 			     pdev_id);
3127 
3128 	qdf_spin_lock_bh(&soc->htt_stats.lock);
3129 	pdev->timestamp.msg_type =
3130 		HTT_T2H_MLO_TIMESTAMP_OFFSET_MSG_TYPE_GET(*msg_word);
3131 	pdev->timestamp.pdev_id = pdev_id;
3132 	pdev->timestamp.chip_id =
3133 		HTT_T2H_MLO_TIMESTAMP_OFFSET_CHIP_ID_GET(*msg_word);
3134 	pdev->timestamp.mac_clk_freq =
3135 		HTT_T2H_MLO_TIMESTAMP_OFFSET_MAC_CLK_FREQ_MHZ_GET(*msg_word);
3136 	pdev->timestamp.sync_tstmp_lo_us = *(msg_word + 1);
3137 	pdev->timestamp.sync_tstmp_hi_us = *(msg_word + 2);
3138 	pdev->timestamp.mlo_offset_lo_us = *(msg_word + 3);
3139 	pdev->timestamp.mlo_offset_hi_us = *(msg_word + 4);
3140 	pdev->timestamp.mlo_offset_clks  = *(msg_word + 5);
3141 	pdev->timestamp.mlo_comp_us =
3142 	HTT_T2H_MLO_TIMESTAMP_OFFSET_MLO_TIMESTAMP_COMP_US_GET(
3143 							*(msg_word + 6));
3144 	pdev->timestamp.mlo_comp_clks =
3145 	HTT_T2H_MLO_TIMESTAMP_OFFSET_MLO_TIMESTAMP_COMP_CLKS_GET(
3146 							*(msg_word + 6));
3147 	pdev->timestamp.mlo_comp_timer =
3148 	HTT_T2H_MLO_TIMESTAMP_OFFSET_MLO_TIMESTAMP_COMP_PERIOD_US_GET(
3149 							*(msg_word + 7));
3150 
3151 	dp_htt_debug("tsf_lo=%d tsf_hi=%d, mlo_ofst_lo=%d, mlo_ofst_hi=%d\n",
3152 		     pdev->timestamp.sync_tstmp_lo_us,
3153 		     pdev->timestamp.sync_tstmp_hi_us,
3154 		     pdev->timestamp.mlo_offset_lo_us,
3155 		     pdev->timestamp.mlo_offset_hi_us);
3156 
3157 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
3158 
3159 	dp_update_mlo_ts_offset(soc,
3160 				pdev->timestamp.mlo_offset_lo_us,
3161 				pdev->timestamp.mlo_offset_hi_us);
3162 }
3163 #else
3164 static void dp_htt_mlo_peer_map_handler(struct htt_soc *soc,
3165 					uint32_t *msg_word)
3166 {
3167 	qdf_assert_always(0);
3168 }
3169 
3170 static void dp_htt_mlo_peer_unmap_handler(struct htt_soc *soc,
3171 					 uint32_t *msg_word)
3172 {
3173 	qdf_assert_always(0);
3174 }
3175 
3176 static void
3177 dp_rx_mlo_timestamp_ind_handler(void *soc_handle,
3178 				uint32_t *msg_word)
3179 {
3180 	qdf_assert_always(0);
3181 }
3182 #endif
3183 
3184 /*
3185  * dp_htt_rx_addba_handler() - RX Addba HTT msg handler
3186  * @soc: DP Soc handler
3187  * @peer_id: ID of peer
3188  * @tid: TID number
3189  * @win_sz: BA window size
3190  *
3191  * Return: None
3192  */
3193 static void
3194 dp_htt_rx_addba_handler(struct dp_soc *soc, uint16_t peer_id,
3195 			uint8_t tid, uint16_t win_sz)
3196 {
3197 	uint16_t status;
3198 	struct dp_peer *peer;
3199 
3200 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
3201 
3202 	if (!peer) {
3203 		dp_err("Peer not found peer id %d", peer_id);
3204 		return;
3205 	}
3206 
3207 	status = dp_addba_requestprocess_wifi3((struct cdp_soc_t *)soc,
3208 					       peer->mac_addr.raw,
3209 					       peer->vdev->vdev_id, 0,
3210 					       tid, 0, win_sz, 0xffff);
3211 
3212 	dp_addba_resp_tx_completion_wifi3(
3213 		(struct cdp_soc_t *)soc,
3214 		peer->mac_addr.raw, peer->vdev->vdev_id,
3215 		tid,
3216 		status);
3217 
3218 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3219 
3220 	dp_info("PeerID %d BAW %d TID %d stat %d",
3221 		peer_id, win_sz, tid, status);
3222 }
3223 
3224 /*
3225  * dp_htt_ppdu_id_fmt_handler() - PPDU ID Format handler
3226  * @htt_soc: HTT SOC handle
3227  * @msg_word: Pointer to payload
3228  *
3229  * Return: None
3230  */
3231 static void
3232 dp_htt_ppdu_id_fmt_handler(struct dp_soc *soc, uint32_t *msg_word)
3233 {
3234 	uint8_t msg_type, valid, bits, offset;
3235 
3236 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
3237 
3238 	msg_word += HTT_PPDU_ID_FMT_IND_LINK_ID_OFFSET;
3239 	valid = HTT_PPDU_ID_FMT_IND_VALID_GET_BITS31_16(*msg_word);
3240 	bits = HTT_PPDU_ID_FMT_IND_BITS_GET_BITS31_16(*msg_word);
3241 	offset = HTT_PPDU_ID_FMT_IND_OFFSET_GET_BITS31_16(*msg_word);
3242 
3243 	dp_info("link_id: valid %u bits %u offset %u", valid, bits, offset);
3244 
3245 	if (valid) {
3246 		soc->link_id_offset = offset;
3247 		soc->link_id_bits = bits;
3248 	}
3249 }
3250 
3251 /*
3252  * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler
3253  * @context:	Opaque context (HTT SOC handle)
3254  * @pkt:	HTC packet
3255  */
3256 static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
3257 {
3258 	struct htt_soc *soc = (struct htt_soc *) context;
3259 	qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
3260 	u_int32_t *msg_word;
3261 	enum htt_t2h_msg_type msg_type;
3262 	bool free_buf = true;
3263 
3264 	/* check for successful message reception */
3265 	if (pkt->Status != QDF_STATUS_SUCCESS) {
3266 		if (pkt->Status != QDF_STATUS_E_CANCELED)
3267 			soc->stats.htc_err_cnt++;
3268 
3269 		qdf_nbuf_free(htt_t2h_msg);
3270 		return;
3271 	}
3272 
3273 	/* TODO: Check if we should pop the HTC/HTT header alignment padding */
3274 
3275 	msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
3276 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
3277 	htt_event_record(soc->htt_logger_handle,
3278 			 msg_type, (uint8_t *)msg_word);
3279 	switch (msg_type) {
3280 	case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
3281 	{
3282 		dp_htt_bkp_event_alert(msg_word, soc);
3283 		break;
3284 	}
3285 	case HTT_T2H_MSG_TYPE_PEER_MAP:
3286 		{
3287 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3288 			u_int8_t *peer_mac_addr;
3289 			u_int16_t peer_id;
3290 			u_int16_t hw_peer_id;
3291 			u_int8_t vdev_id;
3292 			u_int8_t is_wds;
3293 			struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
3294 
3295 			peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
3296 			hw_peer_id =
3297 				HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2));
3298 			vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
3299 			peer_mac_addr = htt_t2h_mac_addr_deswizzle(
3300 				(u_int8_t *) (msg_word+1),
3301 				&mac_addr_deswizzle_buf[0]);
3302 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3303 				QDF_TRACE_LEVEL_DEBUG,
3304 				"HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3305 				peer_id, vdev_id);
3306 
3307 			/*
3308 			 * check if peer already exists for this peer_id, if so
3309 			 * this peer map event is in response for a wds peer add
3310 			 * wmi command sent during wds source port learning.
3311 			 * in this case just add the ast entry to the existing
3312 			 * peer ast_list.
3313 			 */
3314 			is_wds = !!(dpsoc->peer_id_to_obj_map[peer_id]);
3315 			dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id,
3316 					       vdev_id, peer_mac_addr, 0,
3317 					       is_wds);
3318 			break;
3319 		}
3320 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
3321 		{
3322 			u_int16_t peer_id;
3323 			u_int8_t vdev_id;
3324 			u_int8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0};
3325 			peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
3326 			vdev_id = HTT_RX_PEER_UNMAP_VDEV_ID_GET(*msg_word);
3327 
3328 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
3329 						 vdev_id, mac_addr, 0,
3330 						 DP_PEER_WDS_COUNT_INVALID);
3331 			break;
3332 		}
3333 	case HTT_T2H_MSG_TYPE_SEC_IND:
3334 		{
3335 			u_int16_t peer_id;
3336 			enum cdp_sec_type sec_type;
3337 			int is_unicast;
3338 
3339 			peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
3340 			sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
3341 			is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
3342 			/* point to the first part of the Michael key */
3343 			msg_word++;
3344 			dp_rx_sec_ind_handler(
3345 				soc->dp_soc, peer_id, sec_type, is_unicast,
3346 				msg_word, msg_word + 2);
3347 			break;
3348 		}
3349 
3350 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
3351 		{
3352 			free_buf =
3353 				dp_monitor_ppdu_stats_ind_handler(soc,
3354 								  msg_word,
3355 								  htt_t2h_msg);
3356 			break;
3357 		}
3358 
3359 	case HTT_T2H_MSG_TYPE_PKTLOG:
3360 		{
3361 			dp_pktlog_msg_handler(soc, msg_word);
3362 			break;
3363 		}
3364 
3365 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
3366 		{
3367 			/*
3368 			 * HTC maintains runtime pm count for H2T messages that
3369 			 * have a response msg from FW. This count ensures that
3370 			 * in the case FW does not sent out the response or host
3371 			 * did not process this indication runtime_put happens
3372 			 * properly in the cleanup path.
3373 			 */
3374 			if (htc_dec_return_runtime_cnt(soc->htc_soc) >= 0)
3375 				htc_pm_runtime_put(soc->htc_soc);
3376 			else
3377 				soc->stats.htt_ver_req_put_skip++;
3378 			soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
3379 			soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
3380 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
3381 				"target uses HTT version %d.%d; host uses %d.%d",
3382 				soc->tgt_ver.major, soc->tgt_ver.minor,
3383 				HTT_CURRENT_VERSION_MAJOR,
3384 				HTT_CURRENT_VERSION_MINOR);
3385 			if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
3386 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3387 					QDF_TRACE_LEVEL_WARN,
3388 					"*** Incompatible host/target HTT versions!");
3389 			}
3390 			/* abort if the target is incompatible with the host */
3391 			qdf_assert(soc->tgt_ver.major ==
3392 				HTT_CURRENT_VERSION_MAJOR);
3393 			if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
3394 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3395 					QDF_TRACE_LEVEL_INFO_LOW,
3396 					"*** Warning: host/target HTT versions"
3397 					" are different, though compatible!");
3398 			}
3399 			break;
3400 		}
3401 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
3402 		{
3403 			uint16_t peer_id;
3404 			uint8_t tid;
3405 			uint16_t win_sz;
3406 
3407 			/*
3408 			 * Update REO Queue Desc with new values
3409 			 */
3410 			peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
3411 			tid = HTT_RX_ADDBA_TID_GET(*msg_word);
3412 			win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
3413 
3414 			/*
3415 			 * Window size needs to be incremented by 1
3416 			 * since fw needs to represent a value of 256
3417 			 * using just 8 bits
3418 			 */
3419 			dp_htt_rx_addba_handler(soc->dp_soc, peer_id,
3420 						tid, win_sz + 1);
3421 			break;
3422 		}
3423 	case HTT_T2H_MSG_TYPE_RX_ADDBA_EXTN:
3424 		{
3425 			uint16_t peer_id;
3426 			uint8_t tid;
3427 			uint16_t win_sz;
3428 
3429 			peer_id = HTT_RX_ADDBA_EXTN_PEER_ID_GET(*msg_word);
3430 			tid = HTT_RX_ADDBA_EXTN_TID_GET(*msg_word);
3431 
3432 			msg_word++;
3433 			win_sz = HTT_RX_ADDBA_EXTN_WIN_SIZE_GET(*msg_word);
3434 
3435 			dp_htt_rx_addba_handler(soc->dp_soc, peer_id,
3436 						tid, win_sz);
3437 			break;
3438 		}
3439 	case HTT_T2H_PPDU_ID_FMT_IND:
3440 		{
3441 			dp_htt_ppdu_id_fmt_handler(soc->dp_soc, msg_word);
3442 			break;
3443 		}
3444 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
3445 		{
3446 			dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg);
3447 			break;
3448 		}
3449 	case HTT_T2H_MSG_TYPE_PEER_MAP_V2:
3450 		{
3451 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3452 			u_int8_t *peer_mac_addr;
3453 			u_int16_t peer_id;
3454 			u_int16_t hw_peer_id;
3455 			u_int8_t vdev_id;
3456 			bool is_wds;
3457 			u_int16_t ast_hash;
3458 			struct dp_ast_flow_override_info ast_flow_info;
3459 
3460 			qdf_mem_set(&ast_flow_info, 0,
3461 					    sizeof(struct dp_ast_flow_override_info));
3462 
3463 			peer_id = HTT_RX_PEER_MAP_V2_SW_PEER_ID_GET(*msg_word);
3464 			hw_peer_id =
3465 			HTT_RX_PEER_MAP_V2_HW_PEER_ID_GET(*(msg_word + 2));
3466 			vdev_id = HTT_RX_PEER_MAP_V2_VDEV_ID_GET(*msg_word);
3467 			peer_mac_addr =
3468 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3469 						   &mac_addr_deswizzle_buf[0]);
3470 			is_wds =
3471 			HTT_RX_PEER_MAP_V2_NEXT_HOP_GET(*(msg_word + 3));
3472 			ast_hash =
3473 			HTT_RX_PEER_MAP_V2_AST_HASH_VALUE_GET(*(msg_word + 3));
3474 			/*
3475 			 * Update 4 ast_index per peer, ast valid mask
3476 			 * and TID flow valid mask.
3477 			 * AST valid mask is 3 bit field corresponds to
3478 			 * ast_index[3:1]. ast_index 0 is always valid.
3479 			 */
3480 			ast_flow_info.ast_valid_mask =
3481 			HTT_RX_PEER_MAP_V2_AST_VALID_MASK_GET(*(msg_word + 3));
3482 			ast_flow_info.ast_idx[0] = hw_peer_id;
3483 			ast_flow_info.ast_flow_mask[0] =
3484 			HTT_RX_PEER_MAP_V2_AST_0_FLOW_MASK_GET(*(msg_word + 4));
3485 			ast_flow_info.ast_idx[1] =
3486 			HTT_RX_PEER_MAP_V2_AST_INDEX_1_GET(*(msg_word + 4));
3487 			ast_flow_info.ast_flow_mask[1] =
3488 			HTT_RX_PEER_MAP_V2_AST_1_FLOW_MASK_GET(*(msg_word + 4));
3489 			ast_flow_info.ast_idx[2] =
3490 			HTT_RX_PEER_MAP_V2_AST_INDEX_2_GET(*(msg_word + 5));
3491 			ast_flow_info.ast_flow_mask[2] =
3492 			HTT_RX_PEER_MAP_V2_AST_2_FLOW_MASK_GET(*(msg_word + 4));
3493 			ast_flow_info.ast_idx[3] =
3494 			HTT_RX_PEER_MAP_V2_AST_INDEX_3_GET(*(msg_word + 6));
3495 			ast_flow_info.ast_flow_mask[3] =
3496 			HTT_RX_PEER_MAP_V2_AST_3_FLOW_MASK_GET(*(msg_word + 4));
3497 			/*
3498 			 * TID valid mask is applicable only
3499 			 * for HI and LOW priority flows.
3500 			 * tid_valid_mas is 8 bit field corresponds
3501 			 * to TID[7:0]
3502 			 */
3503 			ast_flow_info.tid_valid_low_pri_mask =
3504 			HTT_RX_PEER_MAP_V2_TID_VALID_LOW_PRI_GET(*(msg_word + 5));
3505 			ast_flow_info.tid_valid_hi_pri_mask =
3506 			HTT_RX_PEER_MAP_V2_TID_VALID_HI_PRI_GET(*(msg_word + 5));
3507 
3508 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3509 				  QDF_TRACE_LEVEL_DEBUG,
3510 				  "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3511 				  peer_id, vdev_id);
3512 
3513 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3514 				  QDF_TRACE_LEVEL_INFO,
3515 				  "ast_idx[0] %d ast_idx[1] %d ast_idx[2] %d ast_idx[3] %d n",
3516 				  ast_flow_info.ast_idx[0],
3517 				  ast_flow_info.ast_idx[1],
3518 				  ast_flow_info.ast_idx[2],
3519 				  ast_flow_info.ast_idx[3]);
3520 
3521 			dp_rx_peer_map_handler(soc->dp_soc, peer_id,
3522 					       hw_peer_id, vdev_id,
3523 					       peer_mac_addr, ast_hash,
3524 					       is_wds);
3525 
3526 			/*
3527 			 * Update ast indexes for flow override support
3528 			 * Applicable only for non wds peers
3529 			 */
3530 			if (!soc->dp_soc->ast_offload_support)
3531 				dp_peer_ast_index_flow_queue_map_create(
3532 						soc->dp_soc, is_wds,
3533 						peer_id, peer_mac_addr,
3534 						&ast_flow_info);
3535 
3536 			break;
3537 		}
3538 	case HTT_T2H_MSG_TYPE_PEER_UNMAP_V2:
3539 		{
3540 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3541 			u_int8_t *mac_addr;
3542 			u_int16_t peer_id;
3543 			u_int8_t vdev_id;
3544 			u_int8_t is_wds;
3545 			u_int32_t free_wds_count;
3546 
3547 			peer_id =
3548 			HTT_RX_PEER_UNMAP_V2_SW_PEER_ID_GET(*msg_word);
3549 			vdev_id = HTT_RX_PEER_UNMAP_V2_VDEV_ID_GET(*msg_word);
3550 			mac_addr =
3551 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3552 						   &mac_addr_deswizzle_buf[0]);
3553 			is_wds =
3554 			HTT_RX_PEER_UNMAP_V2_NEXT_HOP_GET(*(msg_word + 2));
3555 			free_wds_count =
3556 			HTT_RX_PEER_UNMAP_V2_PEER_WDS_FREE_COUNT_GET(*(msg_word + 4));
3557 
3558 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3559 				  QDF_TRACE_LEVEL_INFO,
3560 				  "HTT_T2H_MSG_TYPE_PEER_UNMAP msg for peer id %d vdev id %d n",
3561 				  peer_id, vdev_id);
3562 
3563 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
3564 						 vdev_id, mac_addr,
3565 						 is_wds, free_wds_count);
3566 			break;
3567 		}
3568 	case HTT_T2H_MSG_TYPE_RX_DELBA:
3569 		{
3570 			uint16_t peer_id;
3571 			uint8_t tid;
3572 			uint8_t win_sz;
3573 			QDF_STATUS status;
3574 
3575 			peer_id = HTT_RX_DELBA_PEER_ID_GET(*msg_word);
3576 			tid = HTT_RX_DELBA_TID_GET(*msg_word);
3577 			win_sz = HTT_RX_DELBA_WIN_SIZE_GET(*msg_word);
3578 
3579 			status = dp_rx_delba_ind_handler(
3580 				soc->dp_soc,
3581 				peer_id, tid, win_sz);
3582 
3583 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3584 				  QDF_TRACE_LEVEL_INFO,
3585 				  FL("DELBA PeerID %d BAW %d TID %d stat %d"),
3586 				  peer_id, win_sz, tid, status);
3587 			break;
3588 		}
3589 	case HTT_T2H_MSG_TYPE_RX_DELBA_EXTN:
3590 		{
3591 			uint16_t peer_id;
3592 			uint8_t tid;
3593 			uint16_t win_sz;
3594 			QDF_STATUS status;
3595 
3596 			peer_id = HTT_RX_DELBA_EXTN_PEER_ID_GET(*msg_word);
3597 			tid = HTT_RX_DELBA_EXTN_TID_GET(*msg_word);
3598 
3599 			msg_word++;
3600 			win_sz = HTT_RX_DELBA_EXTN_WIN_SIZE_GET(*msg_word);
3601 
3602 			status = dp_rx_delba_ind_handler(soc->dp_soc,
3603 							 peer_id, tid,
3604 							 win_sz);
3605 
3606 			dp_info("DELBA PeerID %d BAW %d TID %d stat %d",
3607 				peer_id, win_sz, tid, status);
3608 			break;
3609 		}
3610 	case HTT_T2H_MSG_TYPE_FSE_CMEM_BASE_SEND:
3611 		{
3612 			uint16_t num_entries;
3613 			uint32_t cmem_ba_lo;
3614 			uint32_t cmem_ba_hi;
3615 
3616 			num_entries = HTT_CMEM_BASE_SEND_NUM_ENTRIES_GET(*msg_word);
3617 			cmem_ba_lo = *(msg_word + 1);
3618 			cmem_ba_hi = *(msg_word + 2);
3619 
3620 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
3621 				  FL("CMEM FSE num_entries %u CMEM BA LO %x HI %x"),
3622 				  num_entries, cmem_ba_lo, cmem_ba_hi);
3623 
3624 			dp_rx_fst_update_cmem_params(soc->dp_soc, num_entries,
3625 						     cmem_ba_lo, cmem_ba_hi);
3626 			break;
3627 		}
3628 	case HTT_T2H_MSG_TYPE_TX_OFFLOAD_DELIVER_IND:
3629 		{
3630 			dp_offload_ind_handler(soc, msg_word);
3631 			break;
3632 		}
3633 	case HTT_T2H_MSG_TYPE_PEER_MAP_V3:
3634 	{
3635 		u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3636 		u_int8_t *peer_mac_addr;
3637 		u_int16_t peer_id;
3638 		u_int16_t hw_peer_id;
3639 		u_int8_t vdev_id;
3640 		uint8_t is_wds;
3641 		u_int16_t ast_hash = 0;
3642 
3643 		peer_id = HTT_RX_PEER_MAP_V3_SW_PEER_ID_GET(*msg_word);
3644 		vdev_id = HTT_RX_PEER_MAP_V3_VDEV_ID_GET(*msg_word);
3645 		peer_mac_addr =
3646 		htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3647 					   &mac_addr_deswizzle_buf[0]);
3648 		hw_peer_id = HTT_RX_PEER_MAP_V3_HW_PEER_ID_GET(*(msg_word + 3));
3649 		ast_hash = HTT_RX_PEER_MAP_V3_CACHE_SET_NUM_GET(*(msg_word + 3));
3650 		is_wds = HTT_RX_PEER_MAP_V3_NEXT_HOP_GET(*(msg_word + 4));
3651 
3652 		dp_htt_info("HTT_T2H_MSG_TYPE_PEER_MAP_V3 msg for peer id %d vdev id %d n",
3653 			    peer_id, vdev_id);
3654 
3655 		dp_rx_peer_map_handler(soc->dp_soc, peer_id,
3656 				       hw_peer_id, vdev_id,
3657 				       peer_mac_addr, ast_hash,
3658 				       is_wds);
3659 
3660 		break;
3661 	}
3662 	case HTT_T2H_MSG_TYPE_MLO_RX_PEER_MAP:
3663 	{
3664 		dp_htt_mlo_peer_map_handler(soc, msg_word);
3665 		break;
3666 	}
3667 	case HTT_T2H_MSG_TYPE_MLO_RX_PEER_UNMAP:
3668 	{
3669 		dp_htt_mlo_peer_unmap_handler(soc, msg_word);
3670 		break;
3671 	}
3672 	case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND:
3673 	{
3674 		dp_rx_mlo_timestamp_ind_handler(soc->dp_soc, msg_word);
3675 		break;
3676 	}
3677 	case HTT_T2H_MSG_TYPE_VDEVS_TXRX_STATS_PERIODIC_IND:
3678 	{
3679 		dp_vdev_txrx_hw_stats_handler(soc, msg_word);
3680 		break;
3681 	}
3682 	case HTT_T2H_SAWF_DEF_QUEUES_MAP_REPORT_CONF:
3683 	{
3684 		dp_sawf_def_queues_update_map_report_conf(soc, msg_word,
3685 							  htt_t2h_msg);
3686 		break;
3687 	}
3688 	case HTT_T2H_SAWF_MSDUQ_INFO_IND:
3689 	{
3690 		dp_sawf_msduq_map(soc, msg_word, htt_t2h_msg);
3691 		break;
3692 	}
3693 	case HTT_T2H_MSG_TYPE_STREAMING_STATS_IND:
3694 	{
3695 		dp_sawf_mpdu_stats_handler(soc, htt_t2h_msg);
3696 		break;
3697 	}
3698 
3699 	default:
3700 		break;
3701 	};
3702 
3703 	/* Free the indication buffer */
3704 	if (free_buf)
3705 		qdf_nbuf_free(htt_t2h_msg);
3706 }
3707 
3708 /*
3709  * dp_htt_h2t_full() - Send full handler (called from HTC)
3710  * @context:	Opaque context (HTT SOC handle)
3711  * @pkt:	HTC packet
3712  *
3713  * Return: enum htc_send_full_action
3714  */
3715 static enum htc_send_full_action
3716 dp_htt_h2t_full(void *context, HTC_PACKET *pkt)
3717 {
3718 	return HTC_SEND_FULL_KEEP;
3719 }
3720 
3721 /*
3722  * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages
3723  * @context:	Opaque context (HTT SOC handle)
3724  * @nbuf:	nbuf containing T2H message
3725  * @pipe_id:	HIF pipe ID
3726  *
3727  * Return: QDF_STATUS
3728  *
3729  * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which
3730  * will be used for packet log and other high-priority HTT messages. Proper
3731  * HTC connection to be added later once required FW changes are available
3732  */
3733 static QDF_STATUS
3734 dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id)
3735 {
3736 	QDF_STATUS rc = QDF_STATUS_SUCCESS;
3737 	HTC_PACKET htc_pkt;
3738 
3739 	qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE);
3740 	qdf_mem_zero(&htc_pkt, sizeof(htc_pkt));
3741 	htc_pkt.Status = QDF_STATUS_SUCCESS;
3742 	htc_pkt.pPktContext = (void *)nbuf;
3743 	dp_htt_t2h_msg_handler(context, &htc_pkt);
3744 
3745 	return rc;
3746 }
3747 
3748 /*
3749  * htt_htc_soc_attach() - Register SOC level HTT instance with HTC
3750  * @htt_soc:	HTT SOC handle
3751  *
3752  * Return: QDF_STATUS
3753  */
3754 static QDF_STATUS
3755 htt_htc_soc_attach(struct htt_soc *soc)
3756 {
3757 	struct htc_service_connect_req connect;
3758 	struct htc_service_connect_resp response;
3759 	QDF_STATUS status;
3760 	struct dp_soc *dpsoc = soc->dp_soc;
3761 
3762 	qdf_mem_zero(&connect, sizeof(connect));
3763 	qdf_mem_zero(&response, sizeof(response));
3764 
3765 	connect.pMetaData = NULL;
3766 	connect.MetaDataLength = 0;
3767 	connect.EpCallbacks.pContext = soc;
3768 	connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
3769 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
3770 	connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler;
3771 
3772 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
3773 	connect.EpCallbacks.EpRecvRefill = NULL;
3774 
3775 	/* N/A, fill is done by HIF */
3776 	connect.EpCallbacks.RecvRefillWaterMark = 1;
3777 
3778 	connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
3779 	/*
3780 	 * Specify how deep to let a queue get before htc_send_pkt will
3781 	 * call the EpSendFull function due to excessive send queue depth.
3782 	 */
3783 	connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
3784 
3785 	/* disable flow control for HTT data message service */
3786 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
3787 
3788 	/* connect to control service */
3789 	connect.service_id = HTT_DATA_MSG_SVC;
3790 
3791 	status = htc_connect_service(soc->htc_soc, &connect, &response);
3792 
3793 	if (status != QDF_STATUS_SUCCESS)
3794 		return status;
3795 
3796 	soc->htc_endpoint = response.Endpoint;
3797 
3798 	hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint);
3799 
3800 	htt_interface_logging_init(&soc->htt_logger_handle, soc->ctrl_psoc);
3801 	dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc,
3802 		dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE);
3803 
3804 	return QDF_STATUS_SUCCESS; /* success */
3805 }
3806 
3807 /*
3808  * htt_soc_initialize() - SOC level HTT initialization
3809  * @htt_soc: Opaque htt SOC handle
3810  * @ctrl_psoc: Opaque ctrl SOC handle
3811  * @htc_soc: SOC level HTC handle
3812  * @hal_soc: Opaque HAL SOC handle
3813  * @osdev: QDF device
3814  *
3815  * Return: HTT handle on success; NULL on failure
3816  */
3817 void *
3818 htt_soc_initialize(struct htt_soc *htt_soc,
3819 		   struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
3820 		   HTC_HANDLE htc_soc,
3821 		   hal_soc_handle_t hal_soc_hdl, qdf_device_t osdev)
3822 {
3823 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
3824 
3825 	soc->osdev = osdev;
3826 	soc->ctrl_psoc = ctrl_psoc;
3827 	soc->htc_soc = htc_soc;
3828 	soc->hal_soc = hal_soc_hdl;
3829 
3830 	if (htt_htc_soc_attach(soc))
3831 		goto fail2;
3832 
3833 	return soc;
3834 
3835 fail2:
3836 	return NULL;
3837 }
3838 
3839 void htt_soc_htc_dealloc(struct htt_soc *htt_handle)
3840 {
3841 	htt_interface_logging_deinit(htt_handle->htt_logger_handle);
3842 	htt_htc_misc_pkt_pool_free(htt_handle);
3843 	htt_htc_pkt_pool_free(htt_handle);
3844 }
3845 
3846 /*
3847  * htt_soc_htc_prealloc() - HTC memory prealloc
3848  * @htt_soc: SOC level HTT handle
3849  *
3850  * Return: QDF_STATUS_SUCCESS on Success or
3851  * QDF_STATUS_E_NOMEM on allocation failure
3852  */
3853 QDF_STATUS htt_soc_htc_prealloc(struct htt_soc *soc)
3854 {
3855 	int i;
3856 
3857 	soc->htt_htc_pkt_freelist = NULL;
3858 	/* pre-allocate some HTC_PACKET objects */
3859 	for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
3860 		struct dp_htt_htc_pkt_union *pkt;
3861 		pkt = qdf_mem_malloc(sizeof(*pkt));
3862 		if (!pkt)
3863 			return QDF_STATUS_E_NOMEM;
3864 
3865 		htt_htc_pkt_free(soc, &pkt->u.pkt);
3866 	}
3867 	return QDF_STATUS_SUCCESS;
3868 }
3869 
3870 /*
3871  * htt_soc_detach() - Free SOC level HTT handle
3872  * @htt_hdl: HTT SOC handle
3873  */
3874 void htt_soc_detach(struct htt_soc *htt_hdl)
3875 {
3876 	int i;
3877 	struct htt_soc *htt_handle = (struct htt_soc *)htt_hdl;
3878 
3879 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3880 		qdf_mem_free(htt_handle->pdevid_tt[i].umac_path);
3881 		qdf_mem_free(htt_handle->pdevid_tt[i].lmac_path);
3882 	}
3883 
3884 	HTT_TX_MUTEX_DESTROY(&htt_handle->htt_tx_mutex);
3885 	qdf_mem_free(htt_handle);
3886 
3887 }
3888 
3889 /**
3890  * dp_h2t_ext_stats_msg_send(): function to contruct HTT message to pass to FW
3891  * @pdev: DP PDEV handle
3892  * @stats_type_upload_mask: stats type requested by user
3893  * @config_param_0: extra configuration parameters
3894  * @config_param_1: extra configuration parameters
3895  * @config_param_2: extra configuration parameters
3896  * @config_param_3: extra configuration parameters
3897  * @mac_id: mac number
3898  *
3899  * return: QDF STATUS
3900  */
3901 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
3902 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
3903 		uint32_t config_param_1, uint32_t config_param_2,
3904 		uint32_t config_param_3, int cookie_val, int cookie_msb,
3905 		uint8_t mac_id)
3906 {
3907 	struct htt_soc *soc = pdev->soc->htt_handle;
3908 	struct dp_htt_htc_pkt *pkt;
3909 	qdf_nbuf_t msg;
3910 	uint32_t *msg_word;
3911 	uint8_t pdev_mask = 0;
3912 	uint8_t *htt_logger_bufp;
3913 	int mac_for_pdev;
3914 	int target_pdev_id;
3915 	QDF_STATUS status;
3916 
3917 	msg = qdf_nbuf_alloc(
3918 			soc->osdev,
3919 			HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ),
3920 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
3921 
3922 	if (!msg)
3923 		return QDF_STATUS_E_NOMEM;
3924 
3925 	/*TODO:Add support for SOC stats
3926 	 * Bit 0: SOC Stats
3927 	 * Bit 1: Pdev stats for pdev id 0
3928 	 * Bit 2: Pdev stats for pdev id 1
3929 	 * Bit 3: Pdev stats for pdev id 2
3930 	 */
3931 	mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
3932 	target_pdev_id =
3933 	dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev);
3934 
3935 	pdev_mask = 1 << target_pdev_id;
3936 
3937 	/*
3938 	 * Set the length of the message.
3939 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3940 	 * separately during the below call to qdf_nbuf_push_head.
3941 	 * The contribution from the HTC header is added separately inside HTC.
3942 	 */
3943 	if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) {
3944 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3945 				"Failed to expand head for HTT_EXT_STATS");
3946 		qdf_nbuf_free(msg);
3947 		return QDF_STATUS_E_FAILURE;
3948 	}
3949 
3950 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
3951 
3952 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3953 	htt_logger_bufp = (uint8_t *)msg_word;
3954 	*msg_word = 0;
3955 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ);
3956 	HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask);
3957 	HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask);
3958 
3959 	/* word 1 */
3960 	msg_word++;
3961 	*msg_word = 0;
3962 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0);
3963 
3964 	/* word 2 */
3965 	msg_word++;
3966 	*msg_word = 0;
3967 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1);
3968 
3969 	/* word 3 */
3970 	msg_word++;
3971 	*msg_word = 0;
3972 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2);
3973 
3974 	/* word 4 */
3975 	msg_word++;
3976 	*msg_word = 0;
3977 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3);
3978 
3979 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0);
3980 
3981 	/* word 5 */
3982 	msg_word++;
3983 
3984 	/* word 6 */
3985 	msg_word++;
3986 	*msg_word = 0;
3987 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val);
3988 
3989 	/* word 7 */
3990 	msg_word++;
3991 	*msg_word = 0;
3992 	/* Currently Using last 2 bits for pdev_id
3993 	 * For future reference, reserving 3 bits in cookie_msb for pdev_id
3994 	 */
3995 	cookie_msb = (cookie_msb | pdev->pdev_id);
3996 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb);
3997 
3998 	pkt = htt_htc_pkt_alloc(soc);
3999 	if (!pkt) {
4000 		qdf_nbuf_free(msg);
4001 		return QDF_STATUS_E_NOMEM;
4002 	}
4003 
4004 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4005 
4006 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4007 			dp_htt_h2t_send_complete_free_netbuf,
4008 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
4009 			soc->htc_endpoint,
4010 			/* tag for FW response msg not guaranteed */
4011 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
4012 
4013 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4014 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_EXT_STATS_REQ,
4015 				     htt_logger_bufp);
4016 
4017 	if (status != QDF_STATUS_SUCCESS) {
4018 		qdf_nbuf_free(msg);
4019 		htt_htc_pkt_free(soc, pkt);
4020 	}
4021 
4022 	return status;
4023 }
4024 
4025 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
4026 #define HTT_VDEV_TXRX_STATS_RESET_BITMASK_L32_MASK 0xFFFFFFFF
4027 #define HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_MASK 0xFFFFFFFF00000000
4028 #define HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_SHIFT 32
4029 
4030 QDF_STATUS dp_h2t_hw_vdev_stats_config_send(struct dp_soc *dpsoc,
4031 					    uint8_t pdev_id, bool enable,
4032 					    bool reset, uint64_t reset_bitmask)
4033 {
4034 	struct htt_soc *soc = dpsoc->htt_handle;
4035 	struct dp_htt_htc_pkt *pkt;
4036 	qdf_nbuf_t msg;
4037 	uint32_t *msg_word;
4038 	uint8_t *htt_logger_bufp;
4039 	QDF_STATUS status;
4040 	int duration;
4041 	uint32_t bitmask;
4042 	int target_pdev_id;
4043 
4044 	msg = qdf_nbuf_alloc(
4045 			soc->osdev,
4046 			HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_vdevs_txrx_stats_cfg)),
4047 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
4048 
4049 	if (!msg) {
4050 		dp_htt_err("%pK: Fail to allocate "
4051 		"HTT_H2T_HW_VDEV_TXRX_STATS_CFG_MSG_SZ msg buffer", dpsoc);
4052 		return QDF_STATUS_E_NOMEM;
4053 	}
4054 
4055 	if (pdev_id != INVALID_PDEV_ID)
4056 		target_pdev_id = DP_SW2HW_MACID(pdev_id);
4057 	else
4058 		target_pdev_id = 0;
4059 
4060 	duration =
4061 	wlan_cfg_get_vdev_stats_hw_offload_timer(dpsoc->wlan_cfg_ctx);
4062 
4063 	/*
4064 	 * Set the length of the message.
4065 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4066 	 * separately during the below call to qdf_nbuf_push_head.
4067 	 * The contribution from the HTC header is added separately inside HTC.
4068 	 */
4069 	if (!qdf_nbuf_put_tail(msg,
4070 			       sizeof(struct htt_h2t_vdevs_txrx_stats_cfg))) {
4071 		dp_htt_err("%pK: Failed to expand head for HTT_HW_VDEV_STATS"
4072 			   , dpsoc);
4073 		qdf_nbuf_free(msg);
4074 		return QDF_STATUS_E_FAILURE;
4075 	}
4076 
4077 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
4078 
4079 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4080 	htt_logger_bufp = (uint8_t *)msg_word;
4081 	*msg_word = 0;
4082 
4083 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VDEVS_TXRX_STATS_CFG);
4084 	HTT_RX_VDEVS_TXRX_STATS_PDEV_ID_SET(*msg_word, target_pdev_id);
4085 
4086 	HTT_RX_VDEVS_TXRX_STATS_ENABLE_SET(*msg_word, enable);
4087 
4088 	HTT_RX_VDEVS_TXRX_STATS_PERIODIC_INTERVAL_SET(*msg_word,
4089 						      (duration >> 3));
4090 
4091 	HTT_RX_VDEVS_TXRX_STATS_RESET_STATS_BITS_SET(*msg_word, reset);
4092 
4093 	msg_word++;
4094 	*msg_word = 0;
4095 	bitmask = (reset_bitmask & HTT_VDEV_TXRX_STATS_RESET_BITMASK_L32_MASK);
4096 	*msg_word = bitmask;
4097 
4098 	msg_word++;
4099 	*msg_word = 0;
4100 	bitmask =
4101 		((reset_bitmask & HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_MASK) >>
4102 		 HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_SHIFT);
4103 	*msg_word = bitmask;
4104 
4105 	pkt = htt_htc_pkt_alloc(soc);
4106 	if (!pkt) {
4107 		dp_htt_err("%pK: Fail to allocate dp_htt_htc_pkt buffer",
4108 			   dpsoc);
4109 		qdf_assert(0);
4110 		qdf_nbuf_free(msg);
4111 		return QDF_STATUS_E_NOMEM;
4112 	}
4113 
4114 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4115 
4116 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4117 			       dp_htt_h2t_send_complete_free_netbuf,
4118 			       qdf_nbuf_data(msg), qdf_nbuf_len(msg),
4119 			       soc->htc_endpoint,
4120 			       /* tag for no FW response msg */
4121 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
4122 
4123 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4124 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
4125 				     HTT_H2T_MSG_TYPE_VDEVS_TXRX_STATS_CFG,
4126 				     htt_logger_bufp);
4127 
4128 	if (status != QDF_STATUS_SUCCESS) {
4129 		qdf_nbuf_free(msg);
4130 		htt_htc_pkt_free(soc, pkt);
4131 	}
4132 
4133 	return status;
4134 }
4135 #else
4136 QDF_STATUS dp_h2t_hw_vdev_stats_config_send(struct dp_soc *dpsoc,
4137 					    uint8_t pdev_id, bool enable,
4138 					    bool reset, uint64_t reset_bitmask)
4139 {
4140 	return QDF_STATUS_SUCCESS;
4141 }
4142 #endif
4143 
4144 /**
4145  * dp_h2t_3tuple_config_send(): function to contruct 3 tuple configuration
4146  * HTT message to pass to FW
4147  * @pdev: DP PDEV handle
4148  * @tuple_mask: tuple configuration to report 3 tuple hash value in either
4149  * toeplitz_2_or_4 or flow_id_toeplitz in MSDU START TLV.
4150  *
4151  * tuple_mask[1:0]:
4152  *   00 - Do not report 3 tuple hash value
4153  *   10 - Report 3 tuple hash value in toeplitz_2_or_4
4154  *   01 - Report 3 tuple hash value in flow_id_toeplitz
4155  *   11 - Report 3 tuple hash value in both toeplitz_2_or_4 & flow_id_toeplitz
4156  *
4157  * return: QDF STATUS
4158  */
4159 QDF_STATUS dp_h2t_3tuple_config_send(struct dp_pdev *pdev,
4160 				     uint32_t tuple_mask, uint8_t mac_id)
4161 {
4162 	struct htt_soc *soc = pdev->soc->htt_handle;
4163 	struct dp_htt_htc_pkt *pkt;
4164 	qdf_nbuf_t msg;
4165 	uint32_t *msg_word;
4166 	uint8_t *htt_logger_bufp;
4167 	int mac_for_pdev;
4168 	int target_pdev_id;
4169 
4170 	msg = qdf_nbuf_alloc(
4171 			soc->osdev,
4172 			HTT_MSG_BUF_SIZE(HTT_3_TUPLE_HASH_CFG_REQ_BYTES),
4173 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4174 
4175 	if (!msg)
4176 		return QDF_STATUS_E_NOMEM;
4177 
4178 	mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
4179 	target_pdev_id =
4180 	dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev);
4181 
4182 	/*
4183 	 * Set the length of the message.
4184 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4185 	 * separately during the below call to qdf_nbuf_push_head.
4186 	 * The contribution from the HTC header is added separately inside HTC.
4187 	 */
4188 	if (!qdf_nbuf_put_tail(msg, HTT_3_TUPLE_HASH_CFG_REQ_BYTES)) {
4189 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4190 			  "Failed to expand head for HTT_3TUPLE_CONFIG");
4191 		qdf_nbuf_free(msg);
4192 		return QDF_STATUS_E_FAILURE;
4193 	}
4194 
4195 	dp_htt_info("%pK: config_param_sent 0x%x for target_pdev %d\n -------------",
4196 		    pdev->soc, tuple_mask, target_pdev_id);
4197 
4198 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
4199 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4200 	htt_logger_bufp = (uint8_t *)msg_word;
4201 
4202 	*msg_word = 0;
4203 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG);
4204 	HTT_RX_3_TUPLE_HASH_PDEV_ID_SET(*msg_word, target_pdev_id);
4205 
4206 	msg_word++;
4207 	*msg_word = 0;
4208 	HTT_H2T_FLOW_ID_TOEPLITZ_FIELD_CONFIG_SET(*msg_word, tuple_mask);
4209 	HTT_H2T_TOEPLITZ_2_OR_4_FIELD_CONFIG_SET(*msg_word, tuple_mask);
4210 
4211 	pkt = htt_htc_pkt_alloc(soc);
4212 	if (!pkt) {
4213 		qdf_nbuf_free(msg);
4214 		return QDF_STATUS_E_NOMEM;
4215 	}
4216 
4217 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4218 
4219 	SET_HTC_PACKET_INFO_TX(
4220 			&pkt->htc_pkt,
4221 			dp_htt_h2t_send_complete_free_netbuf,
4222 			qdf_nbuf_data(msg),
4223 			qdf_nbuf_len(msg),
4224 			soc->htc_endpoint,
4225 			/* tag for no FW response msg */
4226 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
4227 
4228 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4229 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG,
4230 			    htt_logger_bufp);
4231 
4232 	return QDF_STATUS_SUCCESS;
4233 }
4234 
4235 /* This macro will revert once proper HTT header will define for
4236  * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file
4237  * */
4238 #if defined(WDI_EVENT_ENABLE)
4239 /**
4240  * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW
4241  * @pdev: DP PDEV handle
4242  * @stats_type_upload_mask: stats type requested by user
4243  * @mac_id: Mac id number
4244  *
4245  * return: QDF STATUS
4246  */
4247 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
4248 		uint32_t stats_type_upload_mask, uint8_t mac_id)
4249 {
4250 	struct htt_soc *soc = pdev->soc->htt_handle;
4251 	struct dp_htt_htc_pkt *pkt;
4252 	qdf_nbuf_t msg;
4253 	uint32_t *msg_word;
4254 	uint8_t pdev_mask;
4255 	QDF_STATUS status;
4256 
4257 	msg = qdf_nbuf_alloc(
4258 			soc->osdev,
4259 			HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ),
4260 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
4261 
4262 	if (!msg) {
4263 		dp_htt_err("%pK: Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer"
4264 			   , pdev->soc);
4265 		qdf_assert(0);
4266 		return QDF_STATUS_E_NOMEM;
4267 	}
4268 
4269 	/*TODO:Add support for SOC stats
4270 	 * Bit 0: SOC Stats
4271 	 * Bit 1: Pdev stats for pdev id 0
4272 	 * Bit 2: Pdev stats for pdev id 1
4273 	 * Bit 3: Pdev stats for pdev id 2
4274 	 */
4275 	pdev_mask = 1 << dp_get_target_pdev_id_for_host_pdev_id(pdev->soc,
4276 								mac_id);
4277 
4278 	/*
4279 	 * Set the length of the message.
4280 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4281 	 * separately during the below call to qdf_nbuf_push_head.
4282 	 * The contribution from the HTC header is added separately inside HTC.
4283 	 */
4284 	if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) {
4285 		dp_htt_err("%pK: Failed to expand head for HTT_CFG_STATS"
4286 			   , pdev->soc);
4287 		qdf_nbuf_free(msg);
4288 		return QDF_STATUS_E_FAILURE;
4289 	}
4290 
4291 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
4292 
4293 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4294 	*msg_word = 0;
4295 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
4296 	HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask);
4297 	HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word,
4298 			stats_type_upload_mask);
4299 
4300 	pkt = htt_htc_pkt_alloc(soc);
4301 	if (!pkt) {
4302 		dp_htt_err("%pK: Fail to allocate dp_htt_htc_pkt buffer", pdev->soc);
4303 		qdf_assert(0);
4304 		qdf_nbuf_free(msg);
4305 		return QDF_STATUS_E_NOMEM;
4306 	}
4307 
4308 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4309 
4310 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4311 			dp_htt_h2t_send_complete_free_netbuf,
4312 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
4313 			soc->htc_endpoint,
4314 			/* tag for no FW response msg */
4315 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
4316 
4317 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4318 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG,
4319 				     (uint8_t *)msg_word);
4320 
4321 	if (status != QDF_STATUS_SUCCESS) {
4322 		qdf_nbuf_free(msg);
4323 		htt_htc_pkt_free(soc, pkt);
4324 	}
4325 
4326 	return status;
4327 }
4328 
4329 qdf_export_symbol(dp_h2t_cfg_stats_msg_send);
4330 #endif
4331 
4332 void
4333 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
4334 			     uint32_t *tag_buf)
4335 {
4336 	struct dp_peer *peer = NULL;
4337 	switch (tag_type) {
4338 	case HTT_STATS_PEER_DETAILS_TAG:
4339 	{
4340 		htt_peer_details_tlv *dp_stats_buf =
4341 			(htt_peer_details_tlv *)tag_buf;
4342 
4343 		pdev->fw_stats_peer_id = dp_stats_buf->sw_peer_id;
4344 	}
4345 	break;
4346 	case HTT_STATS_PEER_STATS_CMN_TAG:
4347 	{
4348 		htt_peer_stats_cmn_tlv *dp_stats_buf =
4349 			(htt_peer_stats_cmn_tlv *)tag_buf;
4350 
4351 		peer = dp_peer_get_ref_by_id(pdev->soc, pdev->fw_stats_peer_id,
4352 					     DP_MOD_ID_HTT);
4353 
4354 		if (peer && !peer->bss_peer) {
4355 			peer->stats.tx.inactive_time =
4356 				dp_stats_buf->inactive_time;
4357 			qdf_event_set(&pdev->fw_peer_stats_event);
4358 		}
4359 		if (peer)
4360 			dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
4361 	}
4362 	break;
4363 	default:
4364 		qdf_err("Invalid tag_type");
4365 	}
4366 }
4367 
4368 /**
4369  * dp_htt_rx_flow_fst_setup(): Send HTT Rx FST setup message to FW
4370  * @pdev: DP pdev handle
4371  * @fse_setup_info: FST setup parameters
4372  *
4373  * Return: Success when HTT message is sent, error on failure
4374  */
4375 QDF_STATUS
4376 dp_htt_rx_flow_fst_setup(struct dp_pdev *pdev,
4377 			 struct dp_htt_rx_flow_fst_setup *fse_setup_info)
4378 {
4379 	struct htt_soc *soc = pdev->soc->htt_handle;
4380 	struct dp_htt_htc_pkt *pkt;
4381 	qdf_nbuf_t msg;
4382 	u_int32_t *msg_word;
4383 	struct htt_h2t_msg_rx_fse_setup_t *fse_setup;
4384 	uint8_t *htt_logger_bufp;
4385 	u_int32_t *key;
4386 	QDF_STATUS status;
4387 
4388 	msg = qdf_nbuf_alloc(
4389 		soc->osdev,
4390 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_setup_t)),
4391 		/* reserve room for the HTC header */
4392 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4393 
4394 	if (!msg)
4395 		return QDF_STATUS_E_NOMEM;
4396 
4397 	/*
4398 	 * Set the length of the message.
4399 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4400 	 * separately during the below call to qdf_nbuf_push_head.
4401 	 * The contribution from the HTC header is added separately inside HTC.
4402 	 */
4403 	if (!qdf_nbuf_put_tail(msg,
4404 			       sizeof(struct htt_h2t_msg_rx_fse_setup_t))) {
4405 		qdf_err("Failed to expand head for HTT RX_FSE_SETUP msg");
4406 		return QDF_STATUS_E_FAILURE;
4407 	}
4408 
4409 	/* fill in the message contents */
4410 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4411 
4412 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_setup_t));
4413 	/* rewind beyond alignment pad to get to the HTC header reserved area */
4414 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4415 	htt_logger_bufp = (uint8_t *)msg_word;
4416 
4417 	*msg_word = 0;
4418 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG);
4419 
4420 	fse_setup = (struct htt_h2t_msg_rx_fse_setup_t *)msg_word;
4421 
4422 	HTT_RX_FSE_SETUP_PDEV_ID_SET(*msg_word, fse_setup_info->pdev_id);
4423 
4424 	msg_word++;
4425 	HTT_RX_FSE_SETUP_NUM_REC_SET(*msg_word, fse_setup_info->max_entries);
4426 	HTT_RX_FSE_SETUP_MAX_SEARCH_SET(*msg_word, fse_setup_info->max_search);
4427 	HTT_RX_FSE_SETUP_IP_DA_SA_PREFIX_SET(*msg_word,
4428 					     fse_setup_info->ip_da_sa_prefix);
4429 
4430 	msg_word++;
4431 	HTT_RX_FSE_SETUP_BASE_ADDR_LO_SET(*msg_word,
4432 					  fse_setup_info->base_addr_lo);
4433 	msg_word++;
4434 	HTT_RX_FSE_SETUP_BASE_ADDR_HI_SET(*msg_word,
4435 					  fse_setup_info->base_addr_hi);
4436 
4437 	key = (u_int32_t *)fse_setup_info->hash_key;
4438 	fse_setup->toeplitz31_0 = *key++;
4439 	fse_setup->toeplitz63_32 = *key++;
4440 	fse_setup->toeplitz95_64 = *key++;
4441 	fse_setup->toeplitz127_96 = *key++;
4442 	fse_setup->toeplitz159_128 = *key++;
4443 	fse_setup->toeplitz191_160 = *key++;
4444 	fse_setup->toeplitz223_192 = *key++;
4445 	fse_setup->toeplitz255_224 = *key++;
4446 	fse_setup->toeplitz287_256 = *key++;
4447 	fse_setup->toeplitz314_288 = *key;
4448 
4449 	msg_word++;
4450 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz31_0);
4451 	msg_word++;
4452 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz63_32);
4453 	msg_word++;
4454 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz95_64);
4455 	msg_word++;
4456 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz127_96);
4457 	msg_word++;
4458 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz159_128);
4459 	msg_word++;
4460 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz191_160);
4461 	msg_word++;
4462 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz223_192);
4463 	msg_word++;
4464 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz255_224);
4465 	msg_word++;
4466 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz287_256);
4467 	msg_word++;
4468 	HTT_RX_FSE_SETUP_HASH_314_288_SET(*msg_word,
4469 					  fse_setup->toeplitz314_288);
4470 
4471 	pkt = htt_htc_pkt_alloc(soc);
4472 	if (!pkt) {
4473 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4474 		qdf_assert(0);
4475 		qdf_nbuf_free(msg);
4476 		return QDF_STATUS_E_RESOURCES; /* failure */
4477 	}
4478 
4479 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4480 
4481 	SET_HTC_PACKET_INFO_TX(
4482 		&pkt->htc_pkt,
4483 		dp_htt_h2t_send_complete_free_netbuf,
4484 		qdf_nbuf_data(msg),
4485 		qdf_nbuf_len(msg),
4486 		soc->htc_endpoint,
4487 		/* tag for no FW response msg */
4488 		HTC_TX_PACKET_TAG_RUNTIME_PUT);
4489 
4490 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4491 
4492 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
4493 				     HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG,
4494 				     htt_logger_bufp);
4495 
4496 	if (status == QDF_STATUS_SUCCESS) {
4497 		dp_info("HTT_H2T RX_FSE_SETUP sent to FW for pdev = %u",
4498 			fse_setup_info->pdev_id);
4499 		QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_DEBUG,
4500 				   (void *)fse_setup_info->hash_key,
4501 				   fse_setup_info->hash_key_len);
4502 	} else {
4503 		qdf_nbuf_free(msg);
4504 		htt_htc_pkt_free(soc, pkt);
4505 	}
4506 
4507 	return status;
4508 }
4509 
4510 /**
4511  * dp_htt_rx_flow_fse_operation(): Send HTT Flow Search Entry msg to
4512  * add/del a flow in HW
4513  * @pdev: DP pdev handle
4514  * @fse_op_info: Flow entry parameters
4515  *
4516  * Return: Success when HTT message is sent, error on failure
4517  */
4518 QDF_STATUS
4519 dp_htt_rx_flow_fse_operation(struct dp_pdev *pdev,
4520 			     struct dp_htt_rx_flow_fst_operation *fse_op_info)
4521 {
4522 	struct htt_soc *soc = pdev->soc->htt_handle;
4523 	struct dp_htt_htc_pkt *pkt;
4524 	qdf_nbuf_t msg;
4525 	u_int32_t *msg_word;
4526 	struct htt_h2t_msg_rx_fse_operation_t *fse_operation;
4527 	uint8_t *htt_logger_bufp;
4528 	QDF_STATUS status;
4529 
4530 	msg = qdf_nbuf_alloc(
4531 		soc->osdev,
4532 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_operation_t)),
4533 		/* reserve room for the HTC header */
4534 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4535 	if (!msg)
4536 		return QDF_STATUS_E_NOMEM;
4537 
4538 	/*
4539 	 * Set the length of the message.
4540 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4541 	 * separately during the below call to qdf_nbuf_push_head.
4542 	 * The contribution from the HTC header is added separately inside HTC.
4543 	 */
4544 	if (!qdf_nbuf_put_tail(msg,
4545 			       sizeof(struct htt_h2t_msg_rx_fse_operation_t))) {
4546 		qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
4547 		qdf_nbuf_free(msg);
4548 		return QDF_STATUS_E_FAILURE;
4549 	}
4550 
4551 	/* fill in the message contents */
4552 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4553 
4554 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_operation_t));
4555 	/* rewind beyond alignment pad to get to the HTC header reserved area */
4556 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4557 	htt_logger_bufp = (uint8_t *)msg_word;
4558 
4559 	*msg_word = 0;
4560 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG);
4561 
4562 	fse_operation = (struct htt_h2t_msg_rx_fse_operation_t *)msg_word;
4563 
4564 	HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, fse_op_info->pdev_id);
4565 	msg_word++;
4566 	HTT_RX_FSE_IPSEC_VALID_SET(*msg_word, false);
4567 	if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_ENTRY) {
4568 		HTT_RX_FSE_OPERATION_SET(*msg_word,
4569 					 HTT_RX_FSE_CACHE_INVALIDATE_ENTRY);
4570 		msg_word++;
4571 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4572 		*msg_word,
4573 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_31_0));
4574 		msg_word++;
4575 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4576 		*msg_word,
4577 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_63_32));
4578 		msg_word++;
4579 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4580 		*msg_word,
4581 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_95_64));
4582 		msg_word++;
4583 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4584 		*msg_word,
4585 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_127_96));
4586 		msg_word++;
4587 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4588 		*msg_word,
4589 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_31_0));
4590 		msg_word++;
4591 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4592 		*msg_word,
4593 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_63_32));
4594 		msg_word++;
4595 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4596 		*msg_word,
4597 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_95_64));
4598 		msg_word++;
4599 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4600 		*msg_word,
4601 		qdf_htonl(
4602 		fse_op_info->rx_flow->flow_tuple_info.dest_ip_127_96));
4603 		msg_word++;
4604 		HTT_RX_FSE_SOURCEPORT_SET(
4605 			*msg_word,
4606 			fse_op_info->rx_flow->flow_tuple_info.src_port);
4607 		HTT_RX_FSE_DESTPORT_SET(
4608 			*msg_word,
4609 			fse_op_info->rx_flow->flow_tuple_info.dest_port);
4610 		msg_word++;
4611 		HTT_RX_FSE_L4_PROTO_SET(
4612 			*msg_word,
4613 			fse_op_info->rx_flow->flow_tuple_info.l4_protocol);
4614 	} else if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_FULL) {
4615 		HTT_RX_FSE_OPERATION_SET(*msg_word,
4616 					 HTT_RX_FSE_CACHE_INVALIDATE_FULL);
4617 	} else if (fse_op_info->op_code == DP_HTT_FST_DISABLE) {
4618 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_DISABLE);
4619 	} else if (fse_op_info->op_code == DP_HTT_FST_ENABLE) {
4620 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_ENABLE);
4621 	}
4622 
4623 	pkt = htt_htc_pkt_alloc(soc);
4624 	if (!pkt) {
4625 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4626 		qdf_assert(0);
4627 		qdf_nbuf_free(msg);
4628 		return QDF_STATUS_E_RESOURCES; /* failure */
4629 	}
4630 
4631 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4632 
4633 	SET_HTC_PACKET_INFO_TX(
4634 		&pkt->htc_pkt,
4635 		dp_htt_h2t_send_complete_free_netbuf,
4636 		qdf_nbuf_data(msg),
4637 		qdf_nbuf_len(msg),
4638 		soc->htc_endpoint,
4639 		/* tag for no FW response msg */
4640 		HTC_TX_PACKET_TAG_RUNTIME_PUT);
4641 
4642 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4643 
4644 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
4645 				     HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG,
4646 				     htt_logger_bufp);
4647 
4648 	if (status == QDF_STATUS_SUCCESS) {
4649 		dp_info("HTT_H2T RX_FSE_OPERATION_CFG sent to FW for pdev = %u",
4650 			fse_op_info->pdev_id);
4651 	} else {
4652 		qdf_nbuf_free(msg);
4653 		htt_htc_pkt_free(soc, pkt);
4654 	}
4655 
4656 	return status;
4657 }
4658 
4659 /**
4660  * dp_htt_rx_fisa_config(): Send HTT msg to configure FISA
4661  * @pdev: DP pdev handle
4662  * @fse_op_info: Flow entry parameters
4663  *
4664  * Return: Success when HTT message is sent, error on failure
4665  */
4666 QDF_STATUS
4667 dp_htt_rx_fisa_config(struct dp_pdev *pdev,
4668 		      struct dp_htt_rx_fisa_cfg *fisa_config)
4669 {
4670 	struct htt_soc *soc = pdev->soc->htt_handle;
4671 	struct dp_htt_htc_pkt *pkt;
4672 	qdf_nbuf_t msg;
4673 	u_int32_t *msg_word;
4674 	struct htt_h2t_msg_type_fisa_config_t *htt_fisa_config;
4675 	uint8_t *htt_logger_bufp;
4676 	uint32_t len;
4677 	QDF_STATUS status;
4678 
4679 	len = HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_type_fisa_config_t));
4680 
4681 	msg = qdf_nbuf_alloc(soc->osdev,
4682 			     len,
4683 			     /* reserve room for the HTC header */
4684 			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
4685 			     4,
4686 			     TRUE);
4687 	if (!msg)
4688 		return QDF_STATUS_E_NOMEM;
4689 
4690 	/*
4691 	 * Set the length of the message.
4692 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4693 	 * separately during the below call to qdf_nbuf_push_head.
4694 	 * The contribution from the HTC header is added separately inside HTC.
4695 	 */
4696 	if (!qdf_nbuf_put_tail(msg,
4697 			       sizeof(struct htt_h2t_msg_type_fisa_config_t))) {
4698 		qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
4699 		qdf_nbuf_free(msg);
4700 		return QDF_STATUS_E_FAILURE;
4701 	}
4702 
4703 	/* fill in the message contents */
4704 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4705 
4706 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_type_fisa_config_t));
4707 	/* rewind beyond alignment pad to get to the HTC header reserved area */
4708 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4709 	htt_logger_bufp = (uint8_t *)msg_word;
4710 
4711 	*msg_word = 0;
4712 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FISA_CFG);
4713 
4714 	htt_fisa_config = (struct htt_h2t_msg_type_fisa_config_t *)msg_word;
4715 
4716 	HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, htt_fisa_config->pdev_id);
4717 
4718 	msg_word++;
4719 	HTT_RX_FISA_CONFIG_FISA_V2_ENABLE_SET(*msg_word, 1);
4720 	HTT_RX_FISA_CONFIG_FISA_V2_AGGR_LIMIT_SET(*msg_word, 0xf);
4721 
4722 	msg_word++;
4723 	htt_fisa_config->fisa_timeout_threshold = fisa_config->fisa_timeout;
4724 
4725 	pkt = htt_htc_pkt_alloc(soc);
4726 	if (!pkt) {
4727 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4728 		qdf_assert(0);
4729 		qdf_nbuf_free(msg);
4730 		return QDF_STATUS_E_RESOURCES; /* failure */
4731 	}
4732 
4733 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4734 
4735 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4736 			       dp_htt_h2t_send_complete_free_netbuf,
4737 			       qdf_nbuf_data(msg),
4738 			       qdf_nbuf_len(msg),
4739 			       soc->htc_endpoint,
4740 			       /* tag for no FW response msg */
4741 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
4742 
4743 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4744 
4745 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FISA_CFG,
4746 				     htt_logger_bufp);
4747 
4748 	if (status == QDF_STATUS_SUCCESS) {
4749 		dp_info("HTT_H2T_MSG_TYPE_RX_FISA_CFG sent to FW for pdev = %u",
4750 			fisa_config->pdev_id);
4751 	} else {
4752 		qdf_nbuf_free(msg);
4753 		htt_htc_pkt_free(soc, pkt);
4754 	}
4755 
4756 	return status;
4757 }
4758 
4759 #ifdef WLAN_SUPPORT_PPEDS
4760 /**
4761  * dp_htt_rxdma_rxole_ppe_cfg_set() - Send RxOLE and RxDMA PPE config
4762  * @dp_osc: Data path SoC handle
4763  * @cfg: RxDMA and RxOLE PPE config
4764  *
4765  * Return: Success when HTT message is sent, error on failure
4766  */
4767 QDF_STATUS
4768 dp_htt_rxdma_rxole_ppe_cfg_set(struct dp_soc *soc,
4769 			       struct dp_htt_rxdma_rxole_ppe_config *cfg)
4770 {
4771 	struct htt_soc *htt_handle = soc->htt_handle;
4772 	uint32_t len;
4773 	qdf_nbuf_t msg;
4774 	u_int32_t *msg_word;
4775 	QDF_STATUS status;
4776 	uint8_t *htt_logger_bufp;
4777 	struct dp_htt_htc_pkt *pkt;
4778 
4779 	len = HTT_MSG_BUF_SIZE(
4780 	      sizeof(struct htt_h2t_msg_type_rxdma_rxole_ppe_cfg_t));
4781 
4782 	msg = qdf_nbuf_alloc(soc->osdev,
4783 			     len,
4784 			     /* reserve room for the HTC header */
4785 			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
4786 			     4,
4787 			     TRUE);
4788 	if (!msg)
4789 		return QDF_STATUS_E_NOMEM;
4790 
4791 	/*
4792 	 * Set the length of the message.
4793 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4794 	 * separately during the below call to qdf_nbuf_push_head.
4795 	 * The contribution from the HTC header is added separately inside HTC.
4796 	 */
4797 	if (!qdf_nbuf_put_tail(
4798 		msg, sizeof(struct htt_h2t_msg_type_rxdma_rxole_ppe_cfg_t))) {
4799 		qdf_err("Failed to expand head for HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG msg");
4800 		qdf_nbuf_free(msg);
4801 		return QDF_STATUS_E_FAILURE;
4802 	}
4803 
4804 	/* fill in the message contents */
4805 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4806 
4807 	memset(msg_word, 0,
4808 	       sizeof(struct htt_h2t_msg_type_rxdma_rxole_ppe_cfg_t));
4809 
4810 	/* Rewind beyond alignment pad to get to the HTC header reserved area */
4811 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4812 	htt_logger_bufp = (uint8_t *)msg_word;
4813 
4814 	*msg_word = 0;
4815 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG);
4816 	HTT_PPE_CFG_OVERRIDE_SET(*msg_word, cfg->override);
4817 	HTT_PPE_CFG_REO_DEST_IND_SET(
4818 			*msg_word, cfg->reo_destination_indication);
4819 	HTT_PPE_CFG_MULTI_BUF_MSDU_OVERRIDE_EN_SET(
4820 			*msg_word, cfg->multi_buffer_msdu_override_en);
4821 	HTT_PPE_CFG_INTRA_BSS_OVERRIDE_EN_SET(
4822 			*msg_word, cfg->intra_bss_override);
4823 	HTT_PPE_CFG_DECAP_RAW_OVERRIDE_EN_SET(
4824 			*msg_word, cfg->decap_raw_override);
4825 	HTT_PPE_CFG_DECAP_NWIFI_OVERRIDE_EN_SET(
4826 			*msg_word, cfg->decap_nwifi_override);
4827 	HTT_PPE_CFG_IP_FRAG_OVERRIDE_EN_SET(
4828 			*msg_word, cfg->ip_frag_override);
4829 
4830 	pkt = htt_htc_pkt_alloc(htt_handle);
4831 	if (!pkt) {
4832 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4833 		qdf_assert(0);
4834 		qdf_nbuf_free(msg);
4835 		return QDF_STATUS_E_RESOURCES; /* failure */
4836 	}
4837 
4838 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4839 
4840 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4841 			       dp_htt_h2t_send_complete_free_netbuf,
4842 			       qdf_nbuf_data(msg),
4843 			       qdf_nbuf_len(msg),
4844 			       htt_handle->htc_endpoint,
4845 			       /* tag for no FW response msg */
4846 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
4847 
4848 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4849 
4850 	status = DP_HTT_SEND_HTC_PKT(htt_handle, pkt,
4851 				     HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG,
4852 				     htt_logger_bufp);
4853 
4854 	if (status != QDF_STATUS_SUCCESS) {
4855 		qdf_nbuf_free(msg);
4856 		htt_htc_pkt_free(htt_handle, pkt);
4857 		return status;
4858 	}
4859 
4860 	dp_info("HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG sent");
4861 	return status;
4862 }
4863 #endif /* WLAN_SUPPORT_PPEDS */
4864 
4865 /**
4866  * dp_bk_pressure_stats_handler(): worker function to print back pressure
4867  *				   stats
4868  *
4869  * @context : argument to work function
4870  */
4871 static void dp_bk_pressure_stats_handler(void *context)
4872 {
4873 	struct dp_pdev *pdev = (struct dp_pdev *)context;
4874 	struct dp_soc_srngs_state *soc_srngs_state = NULL;
4875 	const char *ring_name;
4876 	int i;
4877 	struct dp_srng_ring_state *ring_state;
4878 	bool empty_flag;
4879 
4880 	qdf_spin_lock_bh(&pdev->bkp_stats.list_lock);
4881 
4882 	/* Extract only first entry for printing in one work event */
4883 	if (pdev->bkp_stats.queue_depth &&
4884 	    !TAILQ_EMPTY(&pdev->bkp_stats.list)) {
4885 		soc_srngs_state = TAILQ_FIRST(&pdev->bkp_stats.list);
4886 		TAILQ_REMOVE(&pdev->bkp_stats.list, soc_srngs_state,
4887 			     list_elem);
4888 		pdev->bkp_stats.queue_depth--;
4889 	}
4890 
4891 	empty_flag = TAILQ_EMPTY(&pdev->bkp_stats.list);
4892 	qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock);
4893 
4894 	if (soc_srngs_state) {
4895 		DP_PRINT_STATS("### BKP stats for seq_num %u START ###",
4896 			       soc_srngs_state->seq_num);
4897 		for (i = 0; i < soc_srngs_state->max_ring_id; i++) {
4898 			ring_state = &soc_srngs_state->ring_state[i];
4899 			ring_name = dp_srng_get_str_from_hal_ring_type
4900 						(ring_state->ring_type);
4901 			DP_PRINT_STATS("%s: SW:Head pointer = %d Tail Pointer = %d\n",
4902 				       ring_name,
4903 				       ring_state->sw_head,
4904 				       ring_state->sw_tail);
4905 
4906 			DP_PRINT_STATS("%s: HW:Head pointer = %d Tail Pointer = %d\n",
4907 				       ring_name,
4908 				       ring_state->hw_head,
4909 				       ring_state->hw_tail);
4910 		}
4911 
4912 		DP_PRINT_STATS("### BKP stats for seq_num %u COMPLETE ###",
4913 			       soc_srngs_state->seq_num);
4914 		qdf_mem_free(soc_srngs_state);
4915 	}
4916 	dp_print_napi_stats(pdev->soc);
4917 
4918 	/* Schedule work again if queue is not empty */
4919 	if (!empty_flag)
4920 		qdf_queue_work(0, pdev->bkp_stats.work_queue,
4921 			       &pdev->bkp_stats.work);
4922 }
4923 
4924 /*
4925  * dp_pdev_bkp_stats_detach() - detach resources for back pressure stats
4926  *				processing
4927  * @pdev: Datapath PDEV handle
4928  *
4929  */
4930 void dp_pdev_bkp_stats_detach(struct dp_pdev *pdev)
4931 {
4932 	struct dp_soc_srngs_state *ring_state, *ring_state_next;
4933 
4934 	if (!pdev->bkp_stats.work_queue)
4935 		return;
4936 
4937 	qdf_flush_workqueue(0, pdev->bkp_stats.work_queue);
4938 	qdf_destroy_workqueue(0, pdev->bkp_stats.work_queue);
4939 	qdf_flush_work(&pdev->bkp_stats.work);
4940 	qdf_disable_work(&pdev->bkp_stats.work);
4941 	qdf_spin_lock_bh(&pdev->bkp_stats.list_lock);
4942 	TAILQ_FOREACH_SAFE(ring_state, &pdev->bkp_stats.list,
4943 			   list_elem, ring_state_next) {
4944 		TAILQ_REMOVE(&pdev->bkp_stats.list, ring_state,
4945 			     list_elem);
4946 		qdf_mem_free(ring_state);
4947 	}
4948 	qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock);
4949 	qdf_spinlock_destroy(&pdev->bkp_stats.list_lock);
4950 }
4951 
4952 /*
4953  * dp_pdev_bkp_stats_attach() - attach resources for back pressure stats
4954  *				processing
4955  * @pdev: Datapath PDEV handle
4956  *
4957  * Return: QDF_STATUS_SUCCESS: Success
4958  *         QDF_STATUS_E_NOMEM: Error
4959  */
4960 QDF_STATUS dp_pdev_bkp_stats_attach(struct dp_pdev *pdev)
4961 {
4962 	TAILQ_INIT(&pdev->bkp_stats.list);
4963 	pdev->bkp_stats.seq_num = 0;
4964 	pdev->bkp_stats.queue_depth = 0;
4965 
4966 	qdf_create_work(0, &pdev->bkp_stats.work,
4967 			dp_bk_pressure_stats_handler, pdev);
4968 
4969 	pdev->bkp_stats.work_queue =
4970 		qdf_alloc_unbound_workqueue("dp_bkp_work_queue");
4971 	if (!pdev->bkp_stats.work_queue)
4972 		goto fail;
4973 
4974 	qdf_spinlock_create(&pdev->bkp_stats.list_lock);
4975 	return QDF_STATUS_SUCCESS;
4976 
4977 fail:
4978 	dp_htt_alert("BKP stats attach failed");
4979 	qdf_flush_work(&pdev->bkp_stats.work);
4980 	qdf_disable_work(&pdev->bkp_stats.work);
4981 	return QDF_STATUS_E_FAILURE;
4982 }
4983 
4984 #ifdef DP_UMAC_HW_RESET_SUPPORT
4985 QDF_STATUS dp_htt_umac_reset_send_setup_cmd(
4986 		struct dp_soc *soc,
4987 		const struct dp_htt_umac_reset_setup_cmd_params *setup_params)
4988 {
4989 	struct htt_soc *htt_handle = soc->htt_handle;
4990 	uint32_t len;
4991 	qdf_nbuf_t msg;
4992 	u_int32_t *msg_word;
4993 	QDF_STATUS status;
4994 	uint8_t *htt_logger_bufp;
4995 	struct dp_htt_htc_pkt *pkt;
4996 
4997 	len = HTT_MSG_BUF_SIZE(
4998 		HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_BYTES);
4999 
5000 	msg = qdf_nbuf_alloc(soc->osdev,
5001 			     len,
5002 			     /* reserve room for the HTC header */
5003 			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
5004 			     4,
5005 			     TRUE);
5006 	if (!msg)
5007 		return QDF_STATUS_E_NOMEM;
5008 
5009 	/*
5010 	 * Set the length of the message.
5011 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
5012 	 * separately during the below call to qdf_nbuf_push_head.
5013 	 * The contribution from the HTC header is added separately inside HTC.
5014 	 */
5015 	if (!qdf_nbuf_put_tail(
5016 		msg, HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_BYTES)) {
5017 		dp_htt_err("Failed to expand head");
5018 		qdf_nbuf_free(msg);
5019 		return QDF_STATUS_E_FAILURE;
5020 	}
5021 
5022 	/* fill in the message contents */
5023 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
5024 
5025 	/* Rewind beyond alignment pad to get to the HTC header reserved area */
5026 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
5027 	htt_logger_bufp = (uint8_t *)msg_word;
5028 
5029 	qdf_mem_zero(msg_word,
5030 		     HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_BYTES);
5031 
5032 	HTT_H2T_MSG_TYPE_SET(
5033 		*msg_word,
5034 		HTT_H2T_MSG_TYPE_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP);
5035 	HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_T2H_MSG_METHOD_SET(
5036 		*msg_word, htt_umac_hang_recovery_msg_t2h_msi_and_h2t_polling);
5037 	HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_H2T_MSG_METHOD_SET(
5038 		*msg_word, htt_umac_hang_recovery_msg_t2h_msi_and_h2t_polling);
5039 
5040 	msg_word++;
5041 	*msg_word = setup_params->msi_data;
5042 
5043 	msg_word++;
5044 	*msg_word = sizeof(htt_umac_hang_recovery_msg_shmem_t);
5045 
5046 	msg_word++;
5047 	*msg_word = setup_params->shmem_addr_low;
5048 
5049 	msg_word++;
5050 	*msg_word = setup_params->shmem_addr_high;
5051 
5052 	pkt = htt_htc_pkt_alloc(htt_handle);
5053 	if (!pkt) {
5054 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
5055 		qdf_assert(0);
5056 		qdf_nbuf_free(msg);
5057 		return QDF_STATUS_E_NOMEM;
5058 	}
5059 
5060 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5061 
5062 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
5063 			       dp_htt_h2t_send_complete_free_netbuf,
5064 			       qdf_nbuf_data(msg),
5065 			       qdf_nbuf_len(msg),
5066 			       htt_handle->htc_endpoint,
5067 			       /* tag for no FW response msg */
5068 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
5069 
5070 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5071 
5072 	status = DP_HTT_SEND_HTC_PKT(
5073 			htt_handle, pkt,
5074 			HTT_H2T_MSG_TYPE_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP,
5075 			htt_logger_bufp);
5076 
5077 	if (QDF_IS_STATUS_ERROR(status)) {
5078 		qdf_nbuf_free(msg);
5079 		htt_htc_pkt_free(htt_handle, pkt);
5080 		return status;
5081 	}
5082 
5083 	dp_info("HTT_H2T_MSG_TYPE_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP sent");
5084 	return status;
5085 }
5086 #endif
5087