xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c (revision 45c28558a520fd0e975b20c0ad534a0aa7f08021)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <htt.h>
21 #include <hal_hw_headers.h>
22 #include <hal_api.h>
23 #include "dp_peer.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_rx.h"
27 #include "htt_stats.h"
28 #include "htt_ppdu_stats.h"
29 #include "dp_htt.h"
30 #ifdef WIFI_MONITOR_SUPPORT
31 #include <dp_mon.h>
32 #endif
33 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
34 #include "cdp_txrx_cmn_struct.h"
35 
36 #ifdef FEATURE_PERPKT_INFO
37 #include "dp_ratetable.h"
38 #endif
39 #include <qdf_module.h>
40 #ifdef CONFIG_SAWF_DEF_QUEUES
41 #include <dp_sawf_htt.h>
42 #endif
43 
44 #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
45 
46 #define HTT_HTC_PKT_POOL_INIT_SIZE 64
47 
48 #define HTT_MSG_BUF_SIZE(msg_bytes) \
49 	((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
50 
51 #define HTT_PID_BIT_MASK 0x3
52 
53 #define DP_EXT_MSG_LENGTH 2048
54 #define HTT_HEADER_LEN 16
55 #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16
56 
57 #define HTT_SHIFT_UPPER_TIMESTAMP 32
58 #define HTT_MASK_UPPER_TIMESTAMP 0xFFFFFFFF00000000
59 #define HTT_BKP_STATS_MAX_QUEUE_DEPTH 16
60 
61 struct dp_htt_htc_pkt *
62 htt_htc_pkt_alloc(struct htt_soc *soc)
63 {
64 	struct dp_htt_htc_pkt_union *pkt = NULL;
65 
66 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
67 	if (soc->htt_htc_pkt_freelist) {
68 		pkt = soc->htt_htc_pkt_freelist;
69 		soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next;
70 	}
71 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
72 
73 	if (!pkt)
74 		pkt = qdf_mem_malloc(sizeof(*pkt));
75 
76 	if (!pkt)
77 		return NULL;
78 
79 	htc_packet_set_magic_cookie(&(pkt->u.pkt.htc_pkt), 0);
80 
81 	return &pkt->u.pkt; /* not actually a dereference */
82 }
83 
84 qdf_export_symbol(htt_htc_pkt_alloc);
85 
86 void
87 htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
88 {
89 	struct dp_htt_htc_pkt_union *u_pkt =
90 		(struct dp_htt_htc_pkt_union *)pkt;
91 
92 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
93 	htc_packet_set_magic_cookie(&(u_pkt->u.pkt.htc_pkt), 0);
94 	u_pkt->u.next = soc->htt_htc_pkt_freelist;
95 	soc->htt_htc_pkt_freelist = u_pkt;
96 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
97 }
98 
99 qdf_export_symbol(htt_htc_pkt_free);
100 
101 /*
102  * htt_htc_pkt_pool_free() - Free HTC packet pool
103  * @htt_soc:	HTT SOC handle
104  */
105 void
106 htt_htc_pkt_pool_free(struct htt_soc *soc)
107 {
108 	struct dp_htt_htc_pkt_union *pkt, *next;
109 	pkt = soc->htt_htc_pkt_freelist;
110 	while (pkt) {
111 		next = pkt->u.next;
112 		qdf_mem_free(pkt);
113 		pkt = next;
114 	}
115 	soc->htt_htc_pkt_freelist = NULL;
116 }
117 
118 
119 #ifndef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST
120 
121 /*
122  * htt_htc_misc_pkt_list_trim() - trim misc list
123  * @htt_soc: HTT SOC handle
124  * @level: max no. of pkts in list
125  */
126 static void
127 htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level)
128 {
129 	struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL;
130 	int i = 0;
131 	qdf_nbuf_t netbuf;
132 
133 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
134 	pkt = soc->htt_htc_pkt_misclist;
135 	while (pkt) {
136 		next = pkt->u.next;
137 		/* trim the out grown list*/
138 		if (++i > level) {
139 			netbuf =
140 				(qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
141 			qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
142 			qdf_nbuf_free(netbuf);
143 			qdf_mem_free(pkt);
144 			pkt = NULL;
145 			if (prev)
146 				prev->u.next = NULL;
147 		}
148 		prev = pkt;
149 		pkt = next;
150 	}
151 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
152 }
153 
154 /*
155  * htt_htc_misc_pkt_list_add() - Add pkt to misc list
156  * @htt_soc:	HTT SOC handle
157  * @dp_htt_htc_pkt: pkt to be added to list
158  */
159 void
160 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
161 {
162 	struct dp_htt_htc_pkt_union *u_pkt =
163 				(struct dp_htt_htc_pkt_union *)pkt;
164 	int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc,
165 							pkt->htc_pkt.Endpoint)
166 				+ DP_HTT_HTC_PKT_MISCLIST_SIZE;
167 
168 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
169 	if (soc->htt_htc_pkt_misclist) {
170 		u_pkt->u.next = soc->htt_htc_pkt_misclist;
171 		soc->htt_htc_pkt_misclist = u_pkt;
172 	} else {
173 		soc->htt_htc_pkt_misclist = u_pkt;
174 	}
175 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
176 
177 	/* only ce pipe size + tx_queue_depth could possibly be in use
178 	 * free older packets in the misclist
179 	 */
180 	htt_htc_misc_pkt_list_trim(soc, misclist_trim_level);
181 }
182 
183 qdf_export_symbol(htt_htc_misc_pkt_list_add);
184 #endif  /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
185 
186 /*
187  * htt_htc_misc_pkt_pool_free() - free pkts in misc list
188  * @htt_soc:	HTT SOC handle
189  */
190 static void
191 htt_htc_misc_pkt_pool_free(struct htt_soc *soc)
192 {
193 	struct dp_htt_htc_pkt_union *pkt, *next;
194 	qdf_nbuf_t netbuf;
195 
196 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
197 	pkt = soc->htt_htc_pkt_misclist;
198 
199 	while (pkt) {
200 		next = pkt->u.next;
201 		if (htc_packet_get_magic_cookie(&(pkt->u.pkt.htc_pkt)) !=
202 		    HTC_PACKET_MAGIC_COOKIE) {
203 			pkt = next;
204 			soc->stats.skip_count++;
205 			continue;
206 		}
207 		netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
208 		qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
209 
210 		soc->stats.htc_pkt_free++;
211 		dp_htt_info("%pK: Pkt free count %d",
212 			    soc->dp_soc, soc->stats.htc_pkt_free);
213 
214 		qdf_nbuf_free(netbuf);
215 		qdf_mem_free(pkt);
216 		pkt = next;
217 	}
218 	soc->htt_htc_pkt_misclist = NULL;
219 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
220 	dp_info("HTC Packets, fail count = %d, skip count = %d",
221 		soc->stats.fail_count, soc->stats.skip_count);
222 }
223 
224 /*
225  * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ
226  * @tgt_mac_addr:	Target MAC
227  * @buffer:		Output buffer
228  */
229 static u_int8_t *
230 htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer)
231 {
232 #ifdef BIG_ENDIAN_HOST
233 	/*
234 	 * The host endianness is opposite of the target endianness.
235 	 * To make u_int32_t elements come out correctly, the target->host
236 	 * upload has swizzled the bytes in each u_int32_t element of the
237 	 * message.
238 	 * For byte-array message fields like the MAC address, this
239 	 * upload swizzling puts the bytes in the wrong order, and needs
240 	 * to be undone.
241 	 */
242 	buffer[0] = tgt_mac_addr[3];
243 	buffer[1] = tgt_mac_addr[2];
244 	buffer[2] = tgt_mac_addr[1];
245 	buffer[3] = tgt_mac_addr[0];
246 	buffer[4] = tgt_mac_addr[7];
247 	buffer[5] = tgt_mac_addr[6];
248 	return buffer;
249 #else
250 	/*
251 	 * The host endianness matches the target endianness -
252 	 * we can use the mac addr directly from the message buffer.
253 	 */
254 	return tgt_mac_addr;
255 #endif
256 }
257 
258 /*
259  * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
260  * @soc:	SOC handle
261  * @status:	Completion status
262  * @netbuf:	HTT buffer
263  */
264 static void
265 dp_htt_h2t_send_complete_free_netbuf(
266 	void *soc, A_STATUS status, qdf_nbuf_t netbuf)
267 {
268 	qdf_nbuf_free(netbuf);
269 }
270 
271 #ifdef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST
272 /*
273  * dp_htt_h2t_send_complete() - H2T completion handler
274  * @context:	Opaque context (HTT SOC handle)
275  * @htc_pkt:	HTC packet
276  */
277 static void
278 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
279 {
280 	struct htt_soc *soc =  (struct htt_soc *) context;
281 	struct dp_htt_htc_pkt *htt_pkt;
282 	qdf_nbuf_t netbuf;
283 
284 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
285 
286 	/* process (free or keep) the netbuf that held the message */
287 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
288 	/*
289 	 * adf sendcomplete is required for windows only
290 	 */
291 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
292 	/* free the htt_htc_pkt / HTC_PACKET object */
293 	qdf_nbuf_free(netbuf);
294 	htt_htc_pkt_free(soc, htt_pkt);
295 }
296 
297 #else /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
298 
299 /*
300  *  * dp_htt_h2t_send_complete() - H2T completion handler
301  *   * @context:    Opaque context (HTT SOC handle)
302  *    * @htc_pkt:    HTC packet
303  *     */
304 static void
305 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
306 {
307 	void (*send_complete_part2)(
308 	     void *soc, QDF_STATUS status, qdf_nbuf_t msdu);
309 	struct htt_soc *soc =  (struct htt_soc *) context;
310 	struct dp_htt_htc_pkt *htt_pkt;
311 	qdf_nbuf_t netbuf;
312 
313 	send_complete_part2 = htc_pkt->pPktContext;
314 
315 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
316 
317 	/* process (free or keep) the netbuf that held the message */
318 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
319 	/*
320 	 * adf sendcomplete is required for windows only
321 	*/
322 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
323 	if (send_complete_part2){
324 		send_complete_part2(
325 		    htt_pkt->soc_ctxt, htc_pkt->Status, netbuf);
326 	}
327 	/* free the htt_htc_pkt / HTC_PACKET object */
328 	htt_htc_pkt_free(soc, htt_pkt);
329 }
330 
331 #endif /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
332 
333 /*
334  * dp_htt_h2t_add_tcl_metadata_ver_v1() - Add tcl_metadata version V1
335  * @htt_soc:	HTT SOC handle
336  * @msg:	Pointer to nbuf
337  *
338  * Return: 0 on success; error code on failure
339  */
340 static int dp_htt_h2t_add_tcl_metadata_ver_v1(struct htt_soc *soc,
341 					      qdf_nbuf_t *msg)
342 {
343 	uint32_t *msg_word;
344 
345 	*msg = qdf_nbuf_alloc(
346 		soc->osdev,
347 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
348 		/* reserve room for the HTC header */
349 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
350 	if (!*msg)
351 		return QDF_STATUS_E_NOMEM;
352 
353 	/*
354 	 * Set the length of the message.
355 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
356 	 * separately during the below call to qdf_nbuf_push_head.
357 	 * The contribution from the HTC header is added separately inside HTC.
358 	 */
359 	if (!qdf_nbuf_put_tail(*msg, HTT_VER_REQ_BYTES)) {
360 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
361 			  "%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
362 			  __func__);
363 		return QDF_STATUS_E_FAILURE;
364 	}
365 
366 	/* fill in the message contents */
367 	msg_word = (u_int32_t *)qdf_nbuf_data(*msg);
368 
369 	/* rewind beyond alignment pad to get to the HTC header reserved area */
370 	qdf_nbuf_push_head(*msg, HTC_HDR_ALIGNMENT_PADDING);
371 
372 	*msg_word = 0;
373 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
374 
375 	return QDF_STATUS_SUCCESS;
376 }
377 
378 #ifdef QCA_DP_TX_FW_METADATA_V2
379 /*
380  * dp_htt_h2t_add_tcl_metadata_ver_v2() - Add tcl_metadata version V2
381  * @htt_soc:	HTT SOC handle
382  * @msg:	Pointer to nbuf
383  *
384  * Return: 0 on success; error code on failure
385  */
386 static int dp_htt_h2t_add_tcl_metadata_ver_v2(struct htt_soc *soc,
387 					      qdf_nbuf_t *msg)
388 {
389 	uint32_t *msg_word;
390 
391 	*msg = qdf_nbuf_alloc(
392 		soc->osdev,
393 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES + HTT_TCL_METADATA_VER_SZ),
394 		/* reserve room for the HTC header */
395 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
396 	if (!*msg)
397 		return QDF_STATUS_E_NOMEM;
398 
399 	/*
400 	 * Set the length of the message.
401 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
402 	 * separately during the below call to qdf_nbuf_push_head.
403 	 * The contribution from the HTC header is added separately inside HTC.
404 	 */
405 	if (!qdf_nbuf_put_tail(*msg,
406 			       HTT_VER_REQ_BYTES + HTT_TCL_METADATA_VER_SZ)) {
407 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
408 			  "%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
409 			  __func__);
410 		return QDF_STATUS_E_FAILURE;
411 	}
412 
413 	/* fill in the message contents */
414 	msg_word = (u_int32_t *)qdf_nbuf_data(*msg);
415 
416 	/* rewind beyond alignment pad to get to the HTC header reserved area */
417 	qdf_nbuf_push_head(*msg, HTC_HDR_ALIGNMENT_PADDING);
418 
419 	*msg_word = 0;
420 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
421 
422 	/* word 1 */
423 	msg_word++;
424 	*msg_word = 0;
425 	HTT_OPTION_TLV_TAG_SET(*msg_word, HTT_OPTION_TLV_TAG_TCL_METADATA_VER);
426 	HTT_OPTION_TLV_LENGTH_SET(*msg_word, HTT_TCL_METADATA_VER_SZ);
427 	HTT_OPTION_TLV_TCL_METADATA_VER_SET(*msg_word,
428 					    HTT_OPTION_TLV_TCL_METADATA_V2);
429 
430 	return QDF_STATUS_SUCCESS;
431 }
432 
433 /*
434  * dp_htt_h2t_add_tcl_metadata_ver() - Add tcl_metadata version
435  * @htt_soc:	HTT SOC handle
436  * @msg:	Pointer to nbuf
437  *
438  * Return: 0 on success; error code on failure
439  */
440 static int dp_htt_h2t_add_tcl_metadata_ver(struct htt_soc *soc, qdf_nbuf_t *msg)
441 {
442 	/* Use tcl_metadata_v1 when NSS offload is enabled */
443 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->dp_soc->wlan_cfg_ctx) ||
444 	    soc->dp_soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_FTM_MODE)
445 		return dp_htt_h2t_add_tcl_metadata_ver_v1(soc, msg);
446 	else
447 		return dp_htt_h2t_add_tcl_metadata_ver_v2(soc, msg);
448 }
449 #else
450 static int dp_htt_h2t_add_tcl_metadata_ver(struct htt_soc *soc, qdf_nbuf_t *msg)
451 {
452 	return dp_htt_h2t_add_tcl_metadata_ver_v1(soc, msg);
453 }
454 #endif
455 
456 /*
457  * htt_h2t_ver_req_msg() - Send HTT version request message to target
458  * @htt_soc:	HTT SOC handle
459  *
460  * Return: 0 on success; error code on failure
461  */
462 static int htt_h2t_ver_req_msg(struct htt_soc *soc)
463 {
464 	struct dp_htt_htc_pkt *pkt;
465 	qdf_nbuf_t msg = NULL;
466 	QDF_STATUS status;
467 
468 	status = dp_htt_h2t_add_tcl_metadata_ver(soc, &msg);
469 	if (status != QDF_STATUS_SUCCESS)
470 		return status;
471 
472 	pkt = htt_htc_pkt_alloc(soc);
473 	if (!pkt) {
474 		qdf_nbuf_free(msg);
475 		return QDF_STATUS_E_FAILURE;
476 	}
477 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
478 
479 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
480 		dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg),
481 		qdf_nbuf_len(msg), soc->htc_endpoint,
482 		HTC_TX_PACKET_TAG_RTPM_PUT_RC);
483 
484 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
485 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_VERSION_REQ,
486 				     NULL);
487 
488 	if (status != QDF_STATUS_SUCCESS) {
489 		qdf_nbuf_free(msg);
490 		htt_htc_pkt_free(soc, pkt);
491 	}
492 
493 	return status;
494 }
495 
496 /*
497  * htt_srng_setup() - Send SRNG setup message to target
498  * @htt_soc:	HTT SOC handle
499  * @mac_id:	MAC Id
500  * @hal_srng:	Opaque HAL SRNG pointer
501  * @hal_ring_type:	SRNG ring type
502  *
503  * Return: 0 on success; error code on failure
504  */
505 int htt_srng_setup(struct htt_soc *soc, int mac_id,
506 		   hal_ring_handle_t hal_ring_hdl,
507 		   int hal_ring_type)
508 {
509 	struct dp_htt_htc_pkt *pkt;
510 	qdf_nbuf_t htt_msg;
511 	uint32_t *msg_word;
512 	struct hal_srng_params srng_params;
513 	qdf_dma_addr_t hp_addr, tp_addr;
514 	uint32_t ring_entry_size =
515 		hal_srng_get_entrysize(soc->hal_soc, hal_ring_type);
516 	int htt_ring_type, htt_ring_id;
517 	uint8_t *htt_logger_bufp;
518 	int target_pdev_id;
519 	int lmac_id = dp_get_lmac_id_for_pdev_id(soc->dp_soc, 0, mac_id);
520 	QDF_STATUS status;
521 
522 	/* Sizes should be set in 4-byte words */
523 	ring_entry_size = ring_entry_size >> 2;
524 
525 	htt_msg = qdf_nbuf_alloc(soc->osdev,
526 		HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ),
527 		/* reserve room for the HTC header */
528 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
529 	if (!htt_msg) {
530 		dp_err("htt_msg alloc failed ring type %d", hal_ring_type);
531 		goto fail0;
532 	}
533 
534 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
535 	hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_ring_hdl);
536 	tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_ring_hdl);
537 
538 	switch (hal_ring_type) {
539 	case RXDMA_BUF:
540 #ifdef QCA_HOST2FW_RXBUF_RING
541 		if (srng_params.ring_id ==
542 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
543 		    (lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
544 			htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
545 			htt_ring_type = HTT_SW_TO_SW_RING;
546 #ifdef IPA_OFFLOAD
547 		} else if (srng_params.ring_id ==
548 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2 +
549 		    (lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
550 			htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING;
551 			htt_ring_type = HTT_SW_TO_SW_RING;
552 #ifdef IPA_WDI3_VLAN_SUPPORT
553 		} else if (srng_params.ring_id ==
554 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF3 +
555 		    (lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
556 			htt_ring_id = HTT_HOST3_TO_FW_RXBUF_RING;
557 			htt_ring_type = HTT_SW_TO_SW_RING;
558 #endif
559 #endif
560 #else
561 		if (srng_params.ring_id ==
562 			(HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
563 			(lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
564 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
565 			htt_ring_type = HTT_SW_TO_HW_RING;
566 #endif
567 		} else if (srng_params.ring_id ==
568 #ifdef IPA_OFFLOAD
569 			 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 +
570 #else
571 			 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF +
572 #endif
573 			(lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
574 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
575 			htt_ring_type = HTT_SW_TO_HW_RING;
576 		} else {
577 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
578 				   "%s: Ring %d currently not supported",
579 				   __func__, srng_params.ring_id);
580 			goto fail1;
581 		}
582 
583 		break;
584 	case RXDMA_MONITOR_BUF:
585 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
586 							 RXDMA_MONITOR_BUF);
587 		htt_ring_type = HTT_SW_TO_HW_RING;
588 		break;
589 	case RXDMA_MONITOR_STATUS:
590 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
591 		htt_ring_type = HTT_SW_TO_HW_RING;
592 		break;
593 	case RXDMA_MONITOR_DST:
594 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
595 							 RXDMA_MONITOR_DST);
596 		htt_ring_type = HTT_HW_TO_SW_RING;
597 		break;
598 	case RXDMA_MONITOR_DESC:
599 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
600 		htt_ring_type = HTT_SW_TO_HW_RING;
601 		break;
602 	case RXDMA_DST:
603 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
604 		htt_ring_type = HTT_HW_TO_SW_RING;
605 		break;
606 	case TX_MONITOR_BUF:
607 		htt_ring_id = HTT_TX_MON_HOST2MON_BUF_RING;
608 		htt_ring_type = HTT_SW_TO_HW_RING;
609 		break;
610 	case TX_MONITOR_DST:
611 		htt_ring_id = HTT_TX_MON_MON2HOST_DEST_RING;
612 		htt_ring_type = HTT_HW_TO_SW_RING;
613 		break;
614 
615 	default:
616 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
617 			"%s: Ring currently not supported", __func__);
618 			goto fail1;
619 	}
620 
621 	dp_info("ring_type %d ring_id %d htt_ring_id %d hp_addr 0x%llx tp_addr 0x%llx",
622 		hal_ring_type, srng_params.ring_id, htt_ring_id,
623 		(uint64_t)hp_addr,
624 		(uint64_t)tp_addr);
625 	/*
626 	 * Set the length of the message.
627 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
628 	 * separately during the below call to qdf_nbuf_push_head.
629 	 * The contribution from the HTC header is added separately inside HTC.
630 	 */
631 	if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) {
632 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
633 			"%s: Failed to expand head for SRING_SETUP msg",
634 			__func__);
635 		return QDF_STATUS_E_FAILURE;
636 	}
637 
638 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
639 
640 	/* rewind beyond alignment pad to get to the HTC header reserved area */
641 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
642 
643 	/* word 0 */
644 	*msg_word = 0;
645 	htt_logger_bufp = (uint8_t *)msg_word;
646 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP);
647 	target_pdev_id =
648 	dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, mac_id);
649 
650 	if ((htt_ring_type == HTT_SW_TO_HW_RING) ||
651 			(htt_ring_type == HTT_HW_TO_SW_RING))
652 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, target_pdev_id);
653 	else
654 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id);
655 
656 	dp_info("mac_id %d", mac_id);
657 	HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type);
658 	/* TODO: Discuss with FW on changing this to unique ID and using
659 	 * htt_ring_type to send the type of ring
660 	 */
661 	HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id);
662 
663 	/* word 1 */
664 	msg_word++;
665 	*msg_word = 0;
666 	HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word,
667 		srng_params.ring_base_paddr & 0xffffffff);
668 
669 	/* word 2 */
670 	msg_word++;
671 	*msg_word = 0;
672 	HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word,
673 		(uint64_t)srng_params.ring_base_paddr >> 32);
674 
675 	/* word 3 */
676 	msg_word++;
677 	*msg_word = 0;
678 	HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size);
679 	HTT_SRING_SETUP_RING_SIZE_SET(*msg_word,
680 		(ring_entry_size * srng_params.num_entries));
681 	dp_info("entry_size %d", ring_entry_size);
682 	dp_info("num_entries %d", srng_params.num_entries);
683 	dp_info("ring_size %d", (ring_entry_size * srng_params.num_entries));
684 	if (htt_ring_type == HTT_SW_TO_HW_RING)
685 		HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET(
686 						*msg_word, 1);
687 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word,
688 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
689 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word,
690 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
691 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word,
692 		!!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP));
693 
694 	/* word 4 */
695 	msg_word++;
696 	*msg_word = 0;
697 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
698 		hp_addr & 0xffffffff);
699 
700 	/* word 5 */
701 	msg_word++;
702 	*msg_word = 0;
703 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
704 		(uint64_t)hp_addr >> 32);
705 
706 	/* word 6 */
707 	msg_word++;
708 	*msg_word = 0;
709 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
710 		tp_addr & 0xffffffff);
711 
712 	/* word 7 */
713 	msg_word++;
714 	*msg_word = 0;
715 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
716 		(uint64_t)tp_addr >> 32);
717 
718 	/* word 8 */
719 	msg_word++;
720 	*msg_word = 0;
721 	HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word,
722 		srng_params.msi_addr & 0xffffffff);
723 
724 	/* word 9 */
725 	msg_word++;
726 	*msg_word = 0;
727 	HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word,
728 		(uint64_t)(srng_params.msi_addr) >> 32);
729 
730 	/* word 10 */
731 	msg_word++;
732 	*msg_word = 0;
733 	HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word,
734 		qdf_cpu_to_le32(srng_params.msi_data));
735 
736 	/* word 11 */
737 	msg_word++;
738 	*msg_word = 0;
739 	HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word,
740 		srng_params.intr_batch_cntr_thres_entries *
741 		ring_entry_size);
742 	HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word,
743 		srng_params.intr_timer_thres_us >> 3);
744 
745 	/* word 12 */
746 	msg_word++;
747 	*msg_word = 0;
748 	if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
749 		/* TODO: Setting low threshold to 1/8th of ring size - see
750 		 * if this needs to be configurable
751 		 */
752 		HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word,
753 			srng_params.low_threshold);
754 	}
755 	/* "response_required" field should be set if a HTT response message is
756 	 * required after setting up the ring.
757 	 */
758 	pkt = htt_htc_pkt_alloc(soc);
759 	if (!pkt) {
760 		dp_err("pkt alloc failed, ring_type %d ring_id %d htt_ring_id %d",
761 		       hal_ring_type, srng_params.ring_id, htt_ring_id);
762 		goto fail1;
763 	}
764 
765 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
766 
767 	SET_HTC_PACKET_INFO_TX(
768 		&pkt->htc_pkt,
769 		dp_htt_h2t_send_complete_free_netbuf,
770 		qdf_nbuf_data(htt_msg),
771 		qdf_nbuf_len(htt_msg),
772 		soc->htc_endpoint,
773 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
774 
775 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
776 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_SRING_SETUP,
777 				     htt_logger_bufp);
778 
779 	if (status != QDF_STATUS_SUCCESS) {
780 		qdf_nbuf_free(htt_msg);
781 		htt_htc_pkt_free(soc, pkt);
782 	}
783 
784 	return status;
785 
786 fail1:
787 	qdf_nbuf_free(htt_msg);
788 fail0:
789 	return QDF_STATUS_E_FAILURE;
790 }
791 
792 qdf_export_symbol(htt_srng_setup);
793 
794 #ifdef QCA_SUPPORT_FULL_MON
795 /**
796  * htt_h2t_full_mon_cfg() - Send full monitor configuration msg to FW
797  *
798  * @htt_soc: HTT Soc handle
799  * @pdev_id: Radio id
800  * @dp_full_mon_config: enabled/disable configuration
801  *
802  * Return: Success when HTT message is sent, error on failure
803  */
804 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc,
805 			 uint8_t pdev_id,
806 			 enum dp_full_mon_config config)
807 {
808 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
809 	struct dp_htt_htc_pkt *pkt;
810 	qdf_nbuf_t htt_msg;
811 	uint32_t *msg_word;
812 	uint8_t *htt_logger_bufp;
813 
814 	htt_msg = qdf_nbuf_alloc(soc->osdev,
815 				 HTT_MSG_BUF_SIZE(
816 				 HTT_RX_FULL_MONITOR_MODE_SETUP_SZ),
817 				 /* reserve room for the HTC header */
818 				 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
819 				 4,
820 				 TRUE);
821 	if (!htt_msg)
822 		return QDF_STATUS_E_FAILURE;
823 
824 	/*
825 	 * Set the length of the message.
826 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
827 	 * separately during the below call to qdf_nbuf_push_head.
828 	 * The contribution from the HTC header is added separately inside HTC.
829 	 */
830 	if (!qdf_nbuf_put_tail(htt_msg, HTT_RX_FULL_MONITOR_MODE_SETUP_SZ)) {
831 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
832 			  "%s: Failed to expand head for RX Ring Cfg msg",
833 			  __func__);
834 		goto fail1;
835 	}
836 
837 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
838 
839 	/* rewind beyond alignment pad to get to the HTC header reserved area */
840 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
841 
842 	/* word 0 */
843 	*msg_word = 0;
844 	htt_logger_bufp = (uint8_t *)msg_word;
845 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE);
846 	HTT_RX_FULL_MONITOR_MODE_OPERATION_PDEV_ID_SET(
847 			*msg_word, DP_SW2HW_MACID(pdev_id));
848 
849 	msg_word++;
850 	*msg_word = 0;
851 	/* word 1 */
852 	if (config == DP_FULL_MON_ENABLE) {
853 		HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true);
854 		HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, true);
855 		HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, true);
856 		HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2);
857 	} else if (config == DP_FULL_MON_DISABLE) {
858 		/* As per MAC team's suggestion, While disabling full monitor
859 		 * mode, Set 'en' bit to true in full monitor mode register.
860 		 */
861 		HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true);
862 		HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, false);
863 		HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, false);
864 		HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2);
865 	}
866 
867 	pkt = htt_htc_pkt_alloc(soc);
868 	if (!pkt) {
869 		qdf_err("HTC packet allocation failed");
870 		goto fail1;
871 	}
872 
873 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
874 
875 	SET_HTC_PACKET_INFO_TX(
876 		&pkt->htc_pkt,
877 		dp_htt_h2t_send_complete_free_netbuf,
878 		qdf_nbuf_data(htt_msg),
879 		qdf_nbuf_len(htt_msg),
880 		soc->htc_endpoint,
881 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
882 
883 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
884 	qdf_debug("config: %d", config);
885 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE,
886 			    htt_logger_bufp);
887 	return QDF_STATUS_SUCCESS;
888 fail1:
889 	qdf_nbuf_free(htt_msg);
890 	return QDF_STATUS_E_FAILURE;
891 }
892 
893 qdf_export_symbol(htt_h2t_full_mon_cfg);
894 #else
895 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc,
896 			 uint8_t pdev_id,
897 			 enum dp_full_mon_config config)
898 {
899 	return 0;
900 }
901 
902 qdf_export_symbol(htt_h2t_full_mon_cfg);
903 #endif
904 
905 #ifdef QCA_UNDECODED_METADATA_SUPPORT
906 static inline void
907 dp_mon_rx_enable_phy_errors(uint32_t *msg_word,
908 			    struct htt_rx_ring_tlv_filter *htt_tlv_filter)
909 {
910 	if (htt_tlv_filter->phy_err_filter_valid) {
911 		HTT_RX_RING_SELECTION_CFG_FP_PHY_ERR_SET
912 			(*msg_word, htt_tlv_filter->fp_phy_err);
913 		HTT_RX_RING_SELECTION_CFG_FP_PHY_ERR_BUF_SRC_SET
914 			(*msg_word, htt_tlv_filter->fp_phy_err_buf_src);
915 		HTT_RX_RING_SELECTION_CFG_FP_PHY_ERR_BUF_DEST_SET
916 			(*msg_word, htt_tlv_filter->fp_phy_err_buf_dest);
917 
918 		/* word 12*/
919 		msg_word++;
920 		*msg_word = 0;
921 		HTT_RX_RING_SELECTION_CFG_PHY_ERR_MASK_SET
922 			(*msg_word, htt_tlv_filter->phy_err_mask);
923 
924 		/* word 13*/
925 		msg_word++;
926 		*msg_word = 0;
927 		HTT_RX_RING_SELECTION_CFG_PHY_ERR_MASK_CONT_SET
928 			(*msg_word, htt_tlv_filter->phy_err_mask_cont);
929 	}
930 }
931 #else
932 static inline void
933 dp_mon_rx_enable_phy_errors(uint32_t *msg_word,
934 			    struct htt_rx_ring_tlv_filter *htt_tlv_filter)
935 {
936 }
937 #endif
938 
939 /*
940  * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter
941  * config message to target
942  * @htt_soc:	HTT SOC handle
943  * @pdev_id:	WIN- PDEV Id, MCL- mac id
944  * @hal_srng:	Opaque HAL SRNG pointer
945  * @hal_ring_type:	SRNG ring type
946  * @ring_buf_size:	SRNG buffer size
947  * @htt_tlv_filter:	Rx SRNG TLV and filter setting
948  * Return: 0 on success; error code on failure
949  */
950 int htt_h2t_rx_ring_cfg(struct htt_soc *htt_soc, int pdev_id,
951 			hal_ring_handle_t hal_ring_hdl,
952 			int hal_ring_type, int ring_buf_size,
953 			struct htt_rx_ring_tlv_filter *htt_tlv_filter)
954 {
955 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
956 	struct dp_htt_htc_pkt *pkt;
957 	qdf_nbuf_t htt_msg;
958 	uint32_t *msg_word;
959 	uint32_t *msg_word_data;
960 	struct hal_srng_params srng_params;
961 	uint32_t htt_ring_type, htt_ring_id;
962 	uint32_t tlv_filter;
963 	uint8_t *htt_logger_bufp;
964 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->dp_soc->wlan_cfg_ctx;
965 	uint32_t mon_drop_th = wlan_cfg_get_mon_drop_thresh(wlan_cfg_ctx);
966 	int target_pdev_id;
967 	QDF_STATUS status;
968 
969 	htt_msg = qdf_nbuf_alloc(soc->osdev,
970 		HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ),
971 	/* reserve room for the HTC header */
972 	HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
973 	if (!htt_msg) {
974 		dp_err("htt_msg alloc failed ring type %d", hal_ring_type);
975 		goto fail0;
976 	}
977 
978 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
979 
980 	switch (hal_ring_type) {
981 	case RXDMA_BUF:
982 		htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
983 		htt_ring_type = HTT_SW_TO_HW_RING;
984 		break;
985 	case RXDMA_MONITOR_BUF:
986 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
987 							 RXDMA_MONITOR_BUF);
988 		htt_ring_type = HTT_SW_TO_HW_RING;
989 		break;
990 	case RXDMA_MONITOR_STATUS:
991 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
992 		htt_ring_type = HTT_SW_TO_HW_RING;
993 		break;
994 	case RXDMA_MONITOR_DST:
995 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
996 							 RXDMA_MONITOR_DST);
997 		htt_ring_type = HTT_HW_TO_SW_RING;
998 		break;
999 	case RXDMA_MONITOR_DESC:
1000 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
1001 		htt_ring_type = HTT_SW_TO_HW_RING;
1002 		break;
1003 	case RXDMA_DST:
1004 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
1005 		htt_ring_type = HTT_HW_TO_SW_RING;
1006 		break;
1007 
1008 	default:
1009 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1010 			"%s: Ring currently not supported", __func__);
1011 		goto fail1;
1012 	}
1013 
1014 	dp_info("ring_type %d ring_id %d htt_ring_id %d",
1015 		hal_ring_type, srng_params.ring_id, htt_ring_id);
1016 
1017 	/*
1018 	 * Set the length of the message.
1019 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
1020 	 * separately during the below call to qdf_nbuf_push_head.
1021 	 * The contribution from the HTC header is added separately inside HTC.
1022 	 */
1023 	if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) {
1024 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1025 			"%s: Failed to expand head for RX Ring Cfg msg",
1026 			__func__);
1027 		goto fail1; /* failure */
1028 	}
1029 
1030 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
1031 
1032 	/* rewind beyond alignment pad to get to the HTC header reserved area */
1033 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
1034 
1035 	/* word 0 */
1036 	htt_logger_bufp = (uint8_t *)msg_word;
1037 	*msg_word = 0;
1038 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
1039 
1040 	/* applicable only for post Li */
1041 	dp_rx_mon_enable(soc->dp_soc, msg_word, htt_tlv_filter);
1042 
1043 	/*
1044 	 * pdev_id is indexed from 0 whereas mac_id is indexed from 1
1045 	 * SW_TO_SW and SW_TO_HW rings are unaffected by this
1046 	 */
1047 	target_pdev_id =
1048 	dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, pdev_id);
1049 
1050 	if (htt_ring_type == HTT_SW_TO_SW_RING ||
1051 			htt_ring_type == HTT_SW_TO_HW_RING ||
1052 			htt_ring_type == HTT_HW_TO_SW_RING)
1053 		HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word,
1054 						      target_pdev_id);
1055 
1056 	/* TODO: Discuss with FW on changing this to unique ID and using
1057 	 * htt_ring_type to send the type of ring
1058 	 */
1059 	HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id);
1060 
1061 	HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word,
1062 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
1063 
1064 	HTT_RX_RING_SELECTION_CFG_RX_OFFSETS_VALID_SET(*msg_word,
1065 						htt_tlv_filter->offset_valid);
1066 
1067 	if (mon_drop_th > 0)
1068 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1069 								   1);
1070 	else
1071 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1072 								   0);
1073 
1074 	/* word 1 */
1075 	msg_word++;
1076 	*msg_word = 0;
1077 	HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word,
1078 		ring_buf_size);
1079 
1080 	dp_mon_rx_packet_length_set(soc->dp_soc, msg_word, htt_tlv_filter);
1081 	dp_mon_rx_hdr_length_set(soc->dp_soc, msg_word, htt_tlv_filter);
1082 
1083 	/* word 2 */
1084 	msg_word++;
1085 	*msg_word = 0;
1086 
1087 	if (htt_tlv_filter->enable_fp) {
1088 		/* TYPE: MGMT */
1089 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1090 			FP, MGMT, 0000,
1091 			(htt_tlv_filter->fp_mgmt_filter &
1092 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1093 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1094 			FP, MGMT, 0001,
1095 			(htt_tlv_filter->fp_mgmt_filter &
1096 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1097 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1098 			FP, MGMT, 0010,
1099 			(htt_tlv_filter->fp_mgmt_filter &
1100 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1101 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1102 			FP, MGMT, 0011,
1103 			(htt_tlv_filter->fp_mgmt_filter &
1104 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1105 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1106 			FP, MGMT, 0100,
1107 			(htt_tlv_filter->fp_mgmt_filter &
1108 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1109 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1110 			FP, MGMT, 0101,
1111 			(htt_tlv_filter->fp_mgmt_filter &
1112 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1113 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1114 			FP, MGMT, 0110,
1115 			(htt_tlv_filter->fp_mgmt_filter &
1116 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1117 		/* reserved */
1118 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
1119 			MGMT, 0111,
1120 			(htt_tlv_filter->fp_mgmt_filter &
1121 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1122 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1123 			FP, MGMT, 1000,
1124 			(htt_tlv_filter->fp_mgmt_filter &
1125 			FILTER_MGMT_BEACON) ? 1 : 0);
1126 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1127 			FP, MGMT, 1001,
1128 			(htt_tlv_filter->fp_mgmt_filter &
1129 			FILTER_MGMT_ATIM) ? 1 : 0);
1130 	}
1131 
1132 	if (htt_tlv_filter->enable_md) {
1133 			/* TYPE: MGMT */
1134 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1135 			MD, MGMT, 0000,
1136 			(htt_tlv_filter->md_mgmt_filter &
1137 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1138 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1139 			MD, MGMT, 0001,
1140 			(htt_tlv_filter->md_mgmt_filter &
1141 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1142 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1143 			MD, MGMT, 0010,
1144 			(htt_tlv_filter->md_mgmt_filter &
1145 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1146 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1147 			MD, MGMT, 0011,
1148 			(htt_tlv_filter->md_mgmt_filter &
1149 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1150 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1151 			MD, MGMT, 0100,
1152 			(htt_tlv_filter->md_mgmt_filter &
1153 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1154 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1155 			MD, MGMT, 0101,
1156 			(htt_tlv_filter->md_mgmt_filter &
1157 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1158 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1159 			MD, MGMT, 0110,
1160 			(htt_tlv_filter->md_mgmt_filter &
1161 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1162 		/* reserved */
1163 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
1164 			MGMT, 0111,
1165 			(htt_tlv_filter->md_mgmt_filter &
1166 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1167 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1168 			MD, MGMT, 1000,
1169 			(htt_tlv_filter->md_mgmt_filter &
1170 			FILTER_MGMT_BEACON) ? 1 : 0);
1171 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1172 			MD, MGMT, 1001,
1173 			(htt_tlv_filter->md_mgmt_filter &
1174 			FILTER_MGMT_ATIM) ? 1 : 0);
1175 	}
1176 
1177 	if (htt_tlv_filter->enable_mo) {
1178 		/* TYPE: MGMT */
1179 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1180 			MO, MGMT, 0000,
1181 			(htt_tlv_filter->mo_mgmt_filter &
1182 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1183 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1184 			MO, MGMT, 0001,
1185 			(htt_tlv_filter->mo_mgmt_filter &
1186 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1187 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1188 			MO, MGMT, 0010,
1189 			(htt_tlv_filter->mo_mgmt_filter &
1190 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1191 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1192 			MO, MGMT, 0011,
1193 			(htt_tlv_filter->mo_mgmt_filter &
1194 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1195 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1196 			MO, MGMT, 0100,
1197 			(htt_tlv_filter->mo_mgmt_filter &
1198 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1199 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1200 			MO, MGMT, 0101,
1201 			(htt_tlv_filter->mo_mgmt_filter &
1202 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1203 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1204 			MO, MGMT, 0110,
1205 			(htt_tlv_filter->mo_mgmt_filter &
1206 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1207 		/* reserved */
1208 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
1209 			MGMT, 0111,
1210 			(htt_tlv_filter->mo_mgmt_filter &
1211 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1212 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1213 			MO, MGMT, 1000,
1214 			(htt_tlv_filter->mo_mgmt_filter &
1215 			FILTER_MGMT_BEACON) ? 1 : 0);
1216 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1217 			MO, MGMT, 1001,
1218 			(htt_tlv_filter->mo_mgmt_filter &
1219 			FILTER_MGMT_ATIM) ? 1 : 0);
1220 	}
1221 
1222 	/* word 3 */
1223 	msg_word++;
1224 	*msg_word = 0;
1225 
1226 	if (htt_tlv_filter->enable_fp) {
1227 		/* TYPE: MGMT */
1228 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1229 			FP, MGMT, 1010,
1230 			(htt_tlv_filter->fp_mgmt_filter &
1231 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1232 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1233 			FP, MGMT, 1011,
1234 			(htt_tlv_filter->fp_mgmt_filter &
1235 			FILTER_MGMT_AUTH) ? 1 : 0);
1236 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1237 			FP, MGMT, 1100,
1238 			(htt_tlv_filter->fp_mgmt_filter &
1239 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1240 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1241 			FP, MGMT, 1101,
1242 			(htt_tlv_filter->fp_mgmt_filter &
1243 			FILTER_MGMT_ACTION) ? 1 : 0);
1244 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1245 			FP, MGMT, 1110,
1246 			(htt_tlv_filter->fp_mgmt_filter &
1247 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1248 		/* reserved*/
1249 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
1250 			MGMT, 1111,
1251 			(htt_tlv_filter->fp_mgmt_filter &
1252 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1253 	}
1254 
1255 	if (htt_tlv_filter->enable_md) {
1256 			/* TYPE: MGMT */
1257 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1258 			MD, MGMT, 1010,
1259 			(htt_tlv_filter->md_mgmt_filter &
1260 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1261 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1262 			MD, MGMT, 1011,
1263 			(htt_tlv_filter->md_mgmt_filter &
1264 			FILTER_MGMT_AUTH) ? 1 : 0);
1265 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1266 			MD, MGMT, 1100,
1267 			(htt_tlv_filter->md_mgmt_filter &
1268 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1269 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1270 			MD, MGMT, 1101,
1271 			(htt_tlv_filter->md_mgmt_filter &
1272 			FILTER_MGMT_ACTION) ? 1 : 0);
1273 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1274 			MD, MGMT, 1110,
1275 			(htt_tlv_filter->md_mgmt_filter &
1276 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1277 	}
1278 
1279 	if (htt_tlv_filter->enable_mo) {
1280 		/* TYPE: MGMT */
1281 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1282 			MO, MGMT, 1010,
1283 			(htt_tlv_filter->mo_mgmt_filter &
1284 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1285 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1286 			MO, MGMT, 1011,
1287 			(htt_tlv_filter->mo_mgmt_filter &
1288 			FILTER_MGMT_AUTH) ? 1 : 0);
1289 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1290 			MO, MGMT, 1100,
1291 			(htt_tlv_filter->mo_mgmt_filter &
1292 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1293 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1294 			MO, MGMT, 1101,
1295 			(htt_tlv_filter->mo_mgmt_filter &
1296 			FILTER_MGMT_ACTION) ? 1 : 0);
1297 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1298 			MO, MGMT, 1110,
1299 			(htt_tlv_filter->mo_mgmt_filter &
1300 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1301 		/* reserved*/
1302 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
1303 			MGMT, 1111,
1304 			(htt_tlv_filter->mo_mgmt_filter &
1305 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1306 	}
1307 
1308 	/* word 4 */
1309 	msg_word++;
1310 	*msg_word = 0;
1311 
1312 	if (htt_tlv_filter->enable_fp) {
1313 		/* TYPE: CTRL */
1314 		/* reserved */
1315 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1316 			CTRL, 0000,
1317 			(htt_tlv_filter->fp_ctrl_filter &
1318 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1319 		/* reserved */
1320 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1321 			CTRL, 0001,
1322 			(htt_tlv_filter->fp_ctrl_filter &
1323 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1324 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1325 			CTRL, 0010,
1326 			(htt_tlv_filter->fp_ctrl_filter &
1327 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1328 		/* reserved */
1329 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1330 			CTRL, 0011,
1331 			(htt_tlv_filter->fp_ctrl_filter &
1332 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1333 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1334 			CTRL, 0100,
1335 			(htt_tlv_filter->fp_ctrl_filter &
1336 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1337 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1338 			CTRL, 0101,
1339 			(htt_tlv_filter->fp_ctrl_filter &
1340 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1341 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1342 			CTRL, 0110,
1343 			(htt_tlv_filter->fp_ctrl_filter &
1344 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1345 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1346 			CTRL, 0111,
1347 			(htt_tlv_filter->fp_ctrl_filter &
1348 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1349 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1350 			CTRL, 1000,
1351 			(htt_tlv_filter->fp_ctrl_filter &
1352 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1353 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1354 			CTRL, 1001,
1355 			(htt_tlv_filter->fp_ctrl_filter &
1356 			FILTER_CTRL_BA) ? 1 : 0);
1357 	}
1358 
1359 	if (htt_tlv_filter->enable_md) {
1360 		/* TYPE: CTRL */
1361 		/* reserved */
1362 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1363 			CTRL, 0000,
1364 			(htt_tlv_filter->md_ctrl_filter &
1365 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1366 		/* reserved */
1367 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1368 			CTRL, 0001,
1369 			(htt_tlv_filter->md_ctrl_filter &
1370 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1371 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1372 			CTRL, 0010,
1373 			(htt_tlv_filter->md_ctrl_filter &
1374 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1375 		/* reserved */
1376 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1377 			CTRL, 0011,
1378 			(htt_tlv_filter->md_ctrl_filter &
1379 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1380 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1381 			CTRL, 0100,
1382 			(htt_tlv_filter->md_ctrl_filter &
1383 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1384 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1385 			CTRL, 0101,
1386 			(htt_tlv_filter->md_ctrl_filter &
1387 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1388 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1389 			CTRL, 0110,
1390 			(htt_tlv_filter->md_ctrl_filter &
1391 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1392 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1393 			CTRL, 0111,
1394 			(htt_tlv_filter->md_ctrl_filter &
1395 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1396 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1397 			CTRL, 1000,
1398 			(htt_tlv_filter->md_ctrl_filter &
1399 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1400 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1401 			CTRL, 1001,
1402 			(htt_tlv_filter->md_ctrl_filter &
1403 			FILTER_CTRL_BA) ? 1 : 0);
1404 	}
1405 
1406 	if (htt_tlv_filter->enable_mo) {
1407 		/* TYPE: CTRL */
1408 		/* reserved */
1409 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1410 			CTRL, 0000,
1411 			(htt_tlv_filter->mo_ctrl_filter &
1412 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1413 		/* reserved */
1414 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1415 			CTRL, 0001,
1416 			(htt_tlv_filter->mo_ctrl_filter &
1417 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1418 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1419 			CTRL, 0010,
1420 			(htt_tlv_filter->mo_ctrl_filter &
1421 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1422 		/* reserved */
1423 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1424 			CTRL, 0011,
1425 			(htt_tlv_filter->mo_ctrl_filter &
1426 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1427 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1428 			CTRL, 0100,
1429 			(htt_tlv_filter->mo_ctrl_filter &
1430 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1431 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1432 			CTRL, 0101,
1433 			(htt_tlv_filter->mo_ctrl_filter &
1434 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1435 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1436 			CTRL, 0110,
1437 			(htt_tlv_filter->mo_ctrl_filter &
1438 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1439 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1440 			CTRL, 0111,
1441 			(htt_tlv_filter->mo_ctrl_filter &
1442 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1443 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1444 			CTRL, 1000,
1445 			(htt_tlv_filter->mo_ctrl_filter &
1446 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1447 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1448 			CTRL, 1001,
1449 			(htt_tlv_filter->mo_ctrl_filter &
1450 			FILTER_CTRL_BA) ? 1 : 0);
1451 	}
1452 
1453 	/* word 5 */
1454 	msg_word++;
1455 	*msg_word = 0;
1456 	if (htt_tlv_filter->enable_fp) {
1457 		/* TYPE: CTRL */
1458 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1459 			CTRL, 1010,
1460 			(htt_tlv_filter->fp_ctrl_filter &
1461 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1462 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1463 			CTRL, 1011,
1464 			(htt_tlv_filter->fp_ctrl_filter &
1465 			FILTER_CTRL_RTS) ? 1 : 0);
1466 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1467 			CTRL, 1100,
1468 			(htt_tlv_filter->fp_ctrl_filter &
1469 			FILTER_CTRL_CTS) ? 1 : 0);
1470 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1471 			CTRL, 1101,
1472 			(htt_tlv_filter->fp_ctrl_filter &
1473 			FILTER_CTRL_ACK) ? 1 : 0);
1474 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1475 			CTRL, 1110,
1476 			(htt_tlv_filter->fp_ctrl_filter &
1477 			FILTER_CTRL_CFEND) ? 1 : 0);
1478 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1479 			CTRL, 1111,
1480 			(htt_tlv_filter->fp_ctrl_filter &
1481 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1482 		/* TYPE: DATA */
1483 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1484 			DATA, MCAST,
1485 			(htt_tlv_filter->fp_data_filter &
1486 			FILTER_DATA_MCAST) ? 1 : 0);
1487 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1488 			DATA, UCAST,
1489 			(htt_tlv_filter->fp_data_filter &
1490 			FILTER_DATA_UCAST) ? 1 : 0);
1491 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1492 			DATA, NULL,
1493 			(htt_tlv_filter->fp_data_filter &
1494 			FILTER_DATA_NULL) ? 1 : 0);
1495 	}
1496 
1497 	if (htt_tlv_filter->enable_md) {
1498 		/* TYPE: CTRL */
1499 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1500 			CTRL, 1010,
1501 			(htt_tlv_filter->md_ctrl_filter &
1502 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1503 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1504 			CTRL, 1011,
1505 			(htt_tlv_filter->md_ctrl_filter &
1506 			FILTER_CTRL_RTS) ? 1 : 0);
1507 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1508 			CTRL, 1100,
1509 			(htt_tlv_filter->md_ctrl_filter &
1510 			FILTER_CTRL_CTS) ? 1 : 0);
1511 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1512 			CTRL, 1101,
1513 			(htt_tlv_filter->md_ctrl_filter &
1514 			FILTER_CTRL_ACK) ? 1 : 0);
1515 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1516 			CTRL, 1110,
1517 			(htt_tlv_filter->md_ctrl_filter &
1518 			FILTER_CTRL_CFEND) ? 1 : 0);
1519 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1520 			CTRL, 1111,
1521 			(htt_tlv_filter->md_ctrl_filter &
1522 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1523 		/* TYPE: DATA */
1524 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1525 			DATA, MCAST,
1526 			(htt_tlv_filter->md_data_filter &
1527 			FILTER_DATA_MCAST) ? 1 : 0);
1528 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1529 			DATA, UCAST,
1530 			(htt_tlv_filter->md_data_filter &
1531 			FILTER_DATA_UCAST) ? 1 : 0);
1532 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1533 			DATA, NULL,
1534 			(htt_tlv_filter->md_data_filter &
1535 			FILTER_DATA_NULL) ? 1 : 0);
1536 	}
1537 
1538 	if (htt_tlv_filter->enable_mo) {
1539 		/* TYPE: CTRL */
1540 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1541 			CTRL, 1010,
1542 			(htt_tlv_filter->mo_ctrl_filter &
1543 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1544 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1545 			CTRL, 1011,
1546 			(htt_tlv_filter->mo_ctrl_filter &
1547 			FILTER_CTRL_RTS) ? 1 : 0);
1548 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1549 			CTRL, 1100,
1550 			(htt_tlv_filter->mo_ctrl_filter &
1551 			FILTER_CTRL_CTS) ? 1 : 0);
1552 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1553 			CTRL, 1101,
1554 			(htt_tlv_filter->mo_ctrl_filter &
1555 			FILTER_CTRL_ACK) ? 1 : 0);
1556 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1557 			CTRL, 1110,
1558 			(htt_tlv_filter->mo_ctrl_filter &
1559 			FILTER_CTRL_CFEND) ? 1 : 0);
1560 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1561 			CTRL, 1111,
1562 			(htt_tlv_filter->mo_ctrl_filter &
1563 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1564 		/* TYPE: DATA */
1565 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1566 			DATA, MCAST,
1567 			(htt_tlv_filter->mo_data_filter &
1568 			FILTER_DATA_MCAST) ? 1 : 0);
1569 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1570 			DATA, UCAST,
1571 			(htt_tlv_filter->mo_data_filter &
1572 			FILTER_DATA_UCAST) ? 1 : 0);
1573 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1574 			DATA, NULL,
1575 			(htt_tlv_filter->mo_data_filter &
1576 			FILTER_DATA_NULL) ? 1 : 0);
1577 	}
1578 
1579 	/* word 6 */
1580 	msg_word++;
1581 	*msg_word = 0;
1582 	tlv_filter = 0;
1583 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START,
1584 		htt_tlv_filter->mpdu_start);
1585 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START,
1586 		htt_tlv_filter->msdu_start);
1587 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET,
1588 		htt_tlv_filter->packet);
1589 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END,
1590 		htt_tlv_filter->msdu_end);
1591 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END,
1592 		htt_tlv_filter->mpdu_end);
1593 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER,
1594 		htt_tlv_filter->packet_header);
1595 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION,
1596 		htt_tlv_filter->attention);
1597 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START,
1598 		htt_tlv_filter->ppdu_start);
1599 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END,
1600 		htt_tlv_filter->ppdu_end);
1601 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS,
1602 		htt_tlv_filter->ppdu_end_user_stats);
1603 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter,
1604 		PPDU_END_USER_STATS_EXT,
1605 		htt_tlv_filter->ppdu_end_user_stats_ext);
1606 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE,
1607 		htt_tlv_filter->ppdu_end_status_done);
1608 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START_USER_INFO,
1609 		htt_tlv_filter->ppdu_start_user_info);
1610 	/* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/
1611 	 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED,
1612 		 htt_tlv_filter->header_per_msdu);
1613 
1614 	HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter);
1615 
1616 	msg_word_data = (uint32_t *)qdf_nbuf_data(htt_msg);
1617 	dp_info("config_data: [0x%x][0x%x][0x%x][0x%x][0x%x][0x%x][0x%x]",
1618 		msg_word_data[0], msg_word_data[1], msg_word_data[2],
1619 		msg_word_data[3], msg_word_data[4], msg_word_data[5],
1620 		msg_word_data[6]);
1621 
1622 	/* word 7 */
1623 	msg_word++;
1624 	*msg_word = 0;
1625 	if (htt_tlv_filter->offset_valid) {
1626 		HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET_SET(*msg_word,
1627 					htt_tlv_filter->rx_packet_offset);
1628 		HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET_SET(*msg_word,
1629 					htt_tlv_filter->rx_header_offset);
1630 
1631 		/* word 8 */
1632 		msg_word++;
1633 		*msg_word = 0;
1634 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET_SET(*msg_word,
1635 					htt_tlv_filter->rx_mpdu_end_offset);
1636 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET_SET(*msg_word,
1637 					htt_tlv_filter->rx_mpdu_start_offset);
1638 
1639 		/* word 9 */
1640 		msg_word++;
1641 		*msg_word = 0;
1642 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET_SET(*msg_word,
1643 					htt_tlv_filter->rx_msdu_end_offset);
1644 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET_SET(*msg_word,
1645 					htt_tlv_filter->rx_msdu_start_offset);
1646 
1647 		/* word 10 */
1648 		msg_word++;
1649 		*msg_word = 0;
1650 		HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET_SET(*msg_word,
1651 					htt_tlv_filter->rx_attn_offset);
1652 
1653 		/* word 11 */
1654 		msg_word++;
1655 		*msg_word = 0;
1656 	} else {
1657 		/* word 11 */
1658 		msg_word += 4;
1659 		*msg_word = 0;
1660 	}
1661 
1662 	soc->dp_soc->arch_ops.dp_rx_word_mask_subscribe(
1663 						soc->dp_soc,
1664 						msg_word,
1665 						(void *)htt_tlv_filter);
1666 
1667 	if (mon_drop_th > 0)
1668 		HTT_RX_RING_SELECTION_CFG_RX_DROP_THRESHOLD_SET(*msg_word,
1669 								mon_drop_th);
1670 	dp_mon_rx_enable_mpdu_logging(soc->dp_soc, msg_word, htt_tlv_filter);
1671 
1672 	dp_mon_rx_enable_phy_errors(msg_word, htt_tlv_filter);
1673 
1674 	/* word 14*/
1675 	msg_word += 3;
1676 	/* word 15*/
1677 	msg_word++;
1678 
1679 #ifdef FW_SUPPORT_NOT_YET
1680 	/* word 17*/
1681 	msg_word += 3;
1682 	*msg_word = 0;
1683 
1684 	dp_mon_rx_enable_fpmo(soc->dp_soc, msg_word, htt_tlv_filter);
1685 #endif/* FW_SUPPORT_NOT_YET */
1686 
1687 	/* "response_required" field should be set if a HTT response message is
1688 	 * required after setting up the ring.
1689 	 */
1690 	pkt = htt_htc_pkt_alloc(soc);
1691 	if (!pkt) {
1692 		dp_err("pkt alloc failed, ring_type %d ring_id %d htt_ring_id %d",
1693 		       hal_ring_type, srng_params.ring_id, htt_ring_id);
1694 		goto fail1;
1695 	}
1696 
1697 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1698 
1699 	SET_HTC_PACKET_INFO_TX(
1700 		&pkt->htc_pkt,
1701 		dp_htt_h2t_send_complete_free_netbuf,
1702 		qdf_nbuf_data(htt_msg),
1703 		qdf_nbuf_len(htt_msg),
1704 		soc->htc_endpoint,
1705 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
1706 
1707 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1708 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
1709 				     HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG,
1710 				     htt_logger_bufp);
1711 
1712 	if (status != QDF_STATUS_SUCCESS) {
1713 		qdf_nbuf_free(htt_msg);
1714 		htt_htc_pkt_free(soc, pkt);
1715 	}
1716 
1717 	return status;
1718 
1719 fail1:
1720 	qdf_nbuf_free(htt_msg);
1721 fail0:
1722 	return QDF_STATUS_E_FAILURE;
1723 }
1724 
1725 qdf_export_symbol(htt_h2t_rx_ring_cfg);
1726 
1727 #if defined(HTT_STATS_ENABLE)
1728 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1729 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1730 
1731 {
1732 	uint32_t pdev_id;
1733 	uint32_t *msg_word = NULL;
1734 	uint32_t msg_remain_len = 0;
1735 
1736 	msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1737 
1738 	/*COOKIE MSB*/
1739 	pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1740 
1741 	/* stats message length + 16 size of HTT header*/
1742 	msg_remain_len = qdf_min(htt_stats->msg_len + 16,
1743 				(uint32_t)DP_EXT_MSG_LENGTH);
1744 
1745 	dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc,
1746 			msg_word,  msg_remain_len,
1747 			WDI_NO_VAL, pdev_id);
1748 
1749 	if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1750 		htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1751 	}
1752 	/* Need to be freed here as WDI handler will
1753 	 * make a copy of pkt to send data to application
1754 	 */
1755 	qdf_nbuf_free(htt_msg);
1756 	return QDF_STATUS_SUCCESS;
1757 }
1758 #else
1759 static inline QDF_STATUS
1760 dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1761 		      struct dp_soc *soc, qdf_nbuf_t htt_msg)
1762 {
1763 	return QDF_STATUS_E_NOSUPPORT;
1764 }
1765 #endif
1766 
1767 #ifdef HTT_STATS_DEBUGFS_SUPPORT
1768 /* dp_send_htt_stats_dbgfs_msg() - Function to send htt data to upper layer.
1769  * @pdev: dp pdev handle
1770  * @msg_word: HTT msg
1771  * @msg_len: Length of HTT msg sent
1772  *
1773  * Return: none
1774  */
1775 static inline void
1776 dp_htt_stats_dbgfs_send_msg(struct dp_pdev *pdev, uint32_t *msg_word,
1777 			    uint32_t msg_len)
1778 {
1779 	struct htt_dbgfs_cfg dbgfs_cfg;
1780 	int done = 0;
1781 
1782 	/* send 5th word of HTT msg to upper layer */
1783 	dbgfs_cfg.msg_word = (msg_word + 4);
1784 	dbgfs_cfg.m = pdev->dbgfs_cfg->m;
1785 
1786 	/* stats message length + 16 size of HTT header*/
1787 	msg_len = qdf_min(msg_len + HTT_HEADER_LEN, (uint32_t)DP_EXT_MSG_LENGTH);
1788 
1789 	if (pdev->dbgfs_cfg->htt_stats_dbgfs_msg_process)
1790 		pdev->dbgfs_cfg->htt_stats_dbgfs_msg_process(&dbgfs_cfg,
1791 							     (msg_len - HTT_HEADER_LEN));
1792 
1793 	/* Get TLV Done bit from 4th msg word */
1794 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*(msg_word + 3));
1795 	if (done) {
1796 		if (qdf_event_set(&pdev->dbgfs_cfg->htt_stats_dbgfs_event))
1797 			dp_htt_err("%pK: Failed to set event for debugfs htt stats"
1798 				   , pdev->soc);
1799 	}
1800 }
1801 #else
1802 static inline void
1803 dp_htt_stats_dbgfs_send_msg(struct dp_pdev *pdev, uint32_t *msg_word,
1804 			    uint32_t msg_len)
1805 {
1806 }
1807 #endif /* HTT_STATS_DEBUGFS_SUPPORT */
1808 
1809 #ifdef WLAN_SYSFS_DP_STATS
1810 /* dp_htt_stats_sysfs_update_config() - Function to send htt data to upper layer.
1811  * @pdev: dp pdev handle
1812  *
1813  * This function sets the process id and printing mode within the sysfs config
1814  * struct. which enables DP_PRINT statements within this process to write to the
1815  * console buffer provided by the user space.
1816  *
1817  * Return: None
1818  */
1819 static inline void
1820 dp_htt_stats_sysfs_update_config(struct dp_pdev *pdev)
1821 {
1822 	struct dp_soc *soc = pdev->soc;
1823 
1824 	if (!soc) {
1825 		dp_htt_err("soc is null");
1826 		return;
1827 	}
1828 
1829 	if (!soc->sysfs_config) {
1830 		dp_htt_err("soc->sysfs_config is NULL");
1831 		return;
1832 	}
1833 
1834 	/* set sysfs config parameters */
1835 	soc->sysfs_config->process_id = qdf_get_current_pid();
1836 	soc->sysfs_config->printing_mode = PRINTING_MODE_ENABLED;
1837 }
1838 
1839 /*
1840  * dp_htt_stats_sysfs_set_event() - Set sysfs stats event.
1841  * @soc: soc handle.
1842  * @msg_word: Pointer to htt msg word.
1843  *
1844  * @return: void
1845  */
1846 static inline void
1847 dp_htt_stats_sysfs_set_event(struct dp_soc *soc, uint32_t *msg_word)
1848 {
1849 	int done = 0;
1850 
1851 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*(msg_word + 3));
1852 	if (done) {
1853 		if (qdf_event_set(&soc->sysfs_config->sysfs_txrx_fw_request_done))
1854 			dp_htt_err("%pK:event compl Fail to set event ",
1855 				   soc);
1856 	}
1857 }
1858 #else /* WLAN_SYSFS_DP_STATS */
1859 static inline void
1860 dp_htt_stats_sysfs_update_config(struct dp_pdev *pdev)
1861 {
1862 }
1863 
1864 static inline void
1865 dp_htt_stats_sysfs_set_event(struct dp_soc *dp_soc, uint32_t *msg_word)
1866 {
1867 }
1868 #endif /* WLAN_SYSFS_DP_STATS */
1869 
1870 /* dp_htt_set_pdev_obss_stats() - Function to set pdev obss stats.
1871  * @pdev: dp pdev handle
1872  * @tag_type: HTT TLV tag type
1873  * @tag_buf: TLV buffer pointer
1874  *
1875  * Return: None
1876  */
1877 static inline void
1878 dp_htt_set_pdev_obss_stats(struct dp_pdev *pdev, uint32_t tag_type,
1879 			   uint32_t *tag_buf)
1880 {
1881 	if (tag_type != HTT_STATS_PDEV_OBSS_PD_TAG) {
1882 		dp_err("Tag mismatch");
1883 		return;
1884 	}
1885 	qdf_mem_copy(&pdev->stats.htt_tx_pdev_stats.obss_pd_stats_tlv,
1886 		     tag_buf, sizeof(struct cdp_pdev_obss_pd_stats_tlv));
1887 	qdf_event_set(&pdev->fw_obss_stats_event);
1888 }
1889 
1890 /**
1891  * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats
1892  * @htt_stats: htt stats info
1893  *
1894  * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message
1895  * contains sub messages which are identified by a TLV header.
1896  * In this function we will process the stream of T2H messages and read all the
1897  * TLV contained in the message.
1898  *
1899  * THe following cases have been taken care of
1900  * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer
1901  *		In this case the buffer will contain multiple tlvs.
1902  * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer.
1903  *		Only one tlv will be contained in the HTT message and this tag
1904  *		will extend onto the next buffer.
1905  * Case 3: When the buffer is the continuation of the previous message
1906  * Case 4: tlv length is 0. which will indicate the end of message
1907  *
1908  * return: void
1909  */
1910 static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats,
1911 					struct dp_soc *soc)
1912 {
1913 	htt_tlv_tag_t tlv_type = 0xff;
1914 	qdf_nbuf_t htt_msg = NULL;
1915 	uint32_t *msg_word;
1916 	uint8_t *tlv_buf_head = NULL;
1917 	uint8_t *tlv_buf_tail = NULL;
1918 	uint32_t msg_remain_len = 0;
1919 	uint32_t tlv_remain_len = 0;
1920 	uint32_t *tlv_start;
1921 	int cookie_val = 0;
1922 	int cookie_msb = 0;
1923 	int pdev_id;
1924 	bool copy_stats = false;
1925 	struct dp_pdev *pdev;
1926 
1927 	/* Process node in the HTT message queue */
1928 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1929 		!= NULL) {
1930 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1931 		cookie_val = *(msg_word + 1);
1932 		htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET(
1933 					*(msg_word +
1934 					HTT_T2H_EXT_STATS_TLV_START_OFFSET));
1935 
1936 		if (cookie_val) {
1937 			if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg)
1938 					== QDF_STATUS_SUCCESS) {
1939 				continue;
1940 			}
1941 		}
1942 
1943 		cookie_msb = *(msg_word + 2);
1944 		pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1945 		pdev = soc->pdev_list[pdev_id];
1946 
1947 		if (!cookie_val && (cookie_msb & DBG_STATS_COOKIE_HTT_DBGFS)) {
1948 			dp_htt_stats_dbgfs_send_msg(pdev, msg_word,
1949 						    htt_stats->msg_len);
1950 			qdf_nbuf_free(htt_msg);
1951 			continue;
1952 		}
1953 
1954 		if (!cookie_val && (cookie_msb & DBG_SYSFS_STATS_COOKIE))
1955 			dp_htt_stats_sysfs_update_config(pdev);
1956 
1957 		if (cookie_msb & DBG_STATS_COOKIE_DP_STATS)
1958 			copy_stats = true;
1959 
1960 		/* read 5th word */
1961 		msg_word = msg_word + 4;
1962 		msg_remain_len = qdf_min(htt_stats->msg_len,
1963 				(uint32_t) DP_EXT_MSG_LENGTH);
1964 		/* Keep processing the node till node length is 0 */
1965 		while (msg_remain_len) {
1966 			/*
1967 			 * if message is not a continuation of previous message
1968 			 * read the tlv type and tlv length
1969 			 */
1970 			if (!tlv_buf_head) {
1971 				tlv_type = HTT_STATS_TLV_TAG_GET(
1972 						*msg_word);
1973 				tlv_remain_len = HTT_STATS_TLV_LENGTH_GET(
1974 						*msg_word);
1975 			}
1976 
1977 			if (tlv_remain_len == 0) {
1978 				msg_remain_len = 0;
1979 
1980 				if (tlv_buf_head) {
1981 					qdf_mem_free(tlv_buf_head);
1982 					tlv_buf_head = NULL;
1983 					tlv_buf_tail = NULL;
1984 				}
1985 
1986 				goto error;
1987 			}
1988 
1989 			if (!tlv_buf_head)
1990 				tlv_remain_len += HTT_TLV_HDR_LEN;
1991 
1992 			if ((tlv_remain_len <= msg_remain_len)) {
1993 				/* Case 3 */
1994 				if (tlv_buf_head) {
1995 					qdf_mem_copy(tlv_buf_tail,
1996 							(uint8_t *)msg_word,
1997 							tlv_remain_len);
1998 					tlv_start = (uint32_t *)tlv_buf_head;
1999 				} else {
2000 					/* Case 1 */
2001 					tlv_start = msg_word;
2002 				}
2003 
2004 				if (copy_stats)
2005 					dp_htt_stats_copy_tag(pdev,
2006 							      tlv_type,
2007 							      tlv_start);
2008 				else
2009 					dp_htt_stats_print_tag(pdev,
2010 							       tlv_type,
2011 							       tlv_start);
2012 
2013 				if (tlv_type == HTT_STATS_PEER_DETAILS_TAG ||
2014 				    tlv_type == HTT_STATS_PEER_STATS_CMN_TAG)
2015 					dp_peer_update_inactive_time(pdev,
2016 								     tlv_type,
2017 								     tlv_start);
2018 
2019 				if (cookie_msb & DBG_STATS_COOKIE_HTT_OBSS)
2020 					dp_htt_set_pdev_obss_stats(pdev,
2021 								   tlv_type,
2022 								   tlv_start);
2023 
2024 				msg_remain_len -= tlv_remain_len;
2025 
2026 				msg_word = (uint32_t *)
2027 					(((uint8_t *)msg_word) +
2028 					tlv_remain_len);
2029 
2030 				tlv_remain_len = 0;
2031 
2032 				if (tlv_buf_head) {
2033 					qdf_mem_free(tlv_buf_head);
2034 					tlv_buf_head = NULL;
2035 					tlv_buf_tail = NULL;
2036 				}
2037 
2038 			} else { /* tlv_remain_len > msg_remain_len */
2039 				/* Case 2 & 3 */
2040 				if (!tlv_buf_head) {
2041 					tlv_buf_head = qdf_mem_malloc(
2042 							tlv_remain_len);
2043 
2044 					if (!tlv_buf_head) {
2045 						QDF_TRACE(QDF_MODULE_ID_TXRX,
2046 								QDF_TRACE_LEVEL_ERROR,
2047 								"Alloc failed");
2048 						goto error;
2049 					}
2050 
2051 					tlv_buf_tail = tlv_buf_head;
2052 				}
2053 
2054 				qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word,
2055 						msg_remain_len);
2056 				tlv_remain_len -= msg_remain_len;
2057 				tlv_buf_tail += msg_remain_len;
2058 			}
2059 		}
2060 
2061 		if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
2062 			htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
2063 		}
2064 
2065 		/* indicate event completion in case the event is done */
2066 		if (!cookie_val && (cookie_msb & DBG_SYSFS_STATS_COOKIE))
2067 			dp_htt_stats_sysfs_set_event(soc, msg_word);
2068 
2069 		qdf_nbuf_free(htt_msg);
2070 	}
2071 	return;
2072 
2073 error:
2074 	qdf_nbuf_free(htt_msg);
2075 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
2076 			!= NULL)
2077 		qdf_nbuf_free(htt_msg);
2078 }
2079 
2080 void htt_t2h_stats_handler(void *context)
2081 {
2082 	struct dp_soc *soc = (struct dp_soc *)context;
2083 	struct htt_stats_context htt_stats;
2084 	uint32_t *msg_word;
2085 	qdf_nbuf_t htt_msg = NULL;
2086 	uint8_t done;
2087 	uint32_t rem_stats;
2088 
2089 	if (!soc) {
2090 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2091 			  "soc is NULL");
2092 		return;
2093 	}
2094 
2095 	if (!qdf_atomic_read(&soc->cmn_init_done)) {
2096 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2097 			  "soc: 0x%pK, init_done: %d", soc,
2098 			  qdf_atomic_read(&soc->cmn_init_done));
2099 		return;
2100 	}
2101 
2102 	qdf_mem_zero(&htt_stats, sizeof(htt_stats));
2103 	qdf_nbuf_queue_init(&htt_stats.msg);
2104 
2105 	/* pull one completed stats from soc->htt_stats_msg and process */
2106 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2107 	if (!soc->htt_stats.num_stats) {
2108 		qdf_spin_unlock_bh(&soc->htt_stats.lock);
2109 		return;
2110 	}
2111 	while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) {
2112 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
2113 		msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET;
2114 		done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2115 		qdf_nbuf_queue_add(&htt_stats.msg, htt_msg);
2116 		/*
2117 		 * Done bit signifies that this is the last T2H buffer in the
2118 		 * stream of HTT EXT STATS message
2119 		 */
2120 		if (done)
2121 			break;
2122 	}
2123 	rem_stats = --soc->htt_stats.num_stats;
2124 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2125 
2126 	/* If there are more stats to process, schedule stats work again.
2127 	 * Scheduling prior to processing ht_stats to queue with early
2128 	 * index
2129 	 */
2130 	if (rem_stats)
2131 		qdf_sched_work(0, &soc->htt_stats.work);
2132 
2133 	dp_process_htt_stat_msg(&htt_stats, soc);
2134 }
2135 
2136 /**
2137  * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats
2138  * @soc: DP SOC handle
2139  * @htt_t2h_msg: HTT message nbuf
2140  *
2141  * return:void
2142  */
2143 static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc,
2144 					    qdf_nbuf_t htt_t2h_msg)
2145 {
2146 	uint8_t done;
2147 	qdf_nbuf_t msg_copy;
2148 	uint32_t *msg_word;
2149 
2150 	msg_word = (uint32_t *)qdf_nbuf_data(htt_t2h_msg);
2151 	msg_word = msg_word + 3;
2152 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2153 
2154 	/*
2155 	 * HTT EXT stats response comes as stream of TLVs which span over
2156 	 * multiple T2H messages.
2157 	 * The first message will carry length of the response.
2158 	 * For rest of the messages length will be zero.
2159 	 *
2160 	 * Clone the T2H message buffer and store it in a list to process
2161 	 * it later.
2162 	 *
2163 	 * The original T2H message buffers gets freed in the T2H HTT event
2164 	 * handler
2165 	 */
2166 	msg_copy = qdf_nbuf_clone(htt_t2h_msg);
2167 
2168 	if (!msg_copy) {
2169 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2170 			  "T2H message clone failed for HTT EXT STATS");
2171 		goto error;
2172 	}
2173 
2174 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2175 	qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy);
2176 	/*
2177 	 * Done bit signifies that this is the last T2H buffer in the stream of
2178 	 * HTT EXT STATS message
2179 	 */
2180 	if (done) {
2181 		soc->htt_stats.num_stats++;
2182 		qdf_sched_work(0, &soc->htt_stats.work);
2183 	}
2184 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2185 
2186 	return;
2187 
2188 error:
2189 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2190 	while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg))
2191 			!= NULL) {
2192 		qdf_nbuf_free(msg_copy);
2193 	}
2194 	soc->htt_stats.num_stats = 0;
2195 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2196 	return;
2197 }
2198 
2199 /*
2200  * htt_soc_attach_target() - SOC level HTT setup
2201  * @htt_soc:	HTT SOC handle
2202  *
2203  * Return: 0 on success; error code on failure
2204  */
2205 int htt_soc_attach_target(struct htt_soc *htt_soc)
2206 {
2207 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
2208 
2209 	return htt_h2t_ver_req_msg(soc);
2210 }
2211 
2212 void htt_set_htc_handle(struct htt_soc *htt_soc, HTC_HANDLE htc_soc)
2213 {
2214 	htt_soc->htc_soc = htc_soc;
2215 }
2216 
2217 HTC_HANDLE htt_get_htc_handle(struct htt_soc *htt_soc)
2218 {
2219 	return htt_soc->htc_soc;
2220 }
2221 
2222 struct htt_soc *htt_soc_attach(struct dp_soc *soc, HTC_HANDLE htc_handle)
2223 {
2224 	int i;
2225 	int j;
2226 	int umac_alloc_size = HTT_SW_UMAC_RING_IDX_MAX *
2227 			      sizeof(struct bp_handler);
2228 	int lmac_alloc_size = HTT_SW_LMAC_RING_IDX_MAX *
2229 			      sizeof(struct bp_handler);
2230 	struct htt_soc *htt_soc = NULL;
2231 
2232 	htt_soc = qdf_mem_malloc(sizeof(*htt_soc));
2233 	if (!htt_soc) {
2234 		dp_err("HTT attach failed");
2235 		return NULL;
2236 	}
2237 
2238 	for (i = 0; i < MAX_PDEV_CNT; i++) {
2239 		htt_soc->pdevid_tt[i].umac_path =
2240 			qdf_mem_malloc(umac_alloc_size);
2241 		if (!htt_soc->pdevid_tt[i].umac_path)
2242 			break;
2243 		for (j = 0; j < HTT_SW_UMAC_RING_IDX_MAX; j++)
2244 			htt_soc->pdevid_tt[i].umac_path[j].bp_start_tt = -1;
2245 		htt_soc->pdevid_tt[i].lmac_path =
2246 			qdf_mem_malloc(lmac_alloc_size);
2247 		if (!htt_soc->pdevid_tt[i].lmac_path) {
2248 			qdf_mem_free(htt_soc->pdevid_tt[i].umac_path);
2249 			break;
2250 		}
2251 		for (j = 0; j < HTT_SW_LMAC_RING_IDX_MAX ; j++)
2252 			htt_soc->pdevid_tt[i].lmac_path[j].bp_start_tt = -1;
2253 	}
2254 
2255 	if (i != MAX_PDEV_CNT) {
2256 		for (j = 0; j < i; j++) {
2257 			qdf_mem_free(htt_soc->pdevid_tt[j].umac_path);
2258 			qdf_mem_free(htt_soc->pdevid_tt[j].lmac_path);
2259 		}
2260 		qdf_mem_free(htt_soc);
2261 		return NULL;
2262 	}
2263 
2264 	htt_soc->dp_soc = soc;
2265 	htt_soc->htc_soc = htc_handle;
2266 	HTT_TX_MUTEX_INIT(&htt_soc->htt_tx_mutex);
2267 
2268 	return htt_soc;
2269 }
2270 
2271 #if defined(WDI_EVENT_ENABLE) && \
2272 	!defined(REMOVE_PKT_LOG)
2273 /*
2274  * dp_pktlog_msg_handler() - Pktlog msg handler
2275  * @htt_soc:	 HTT SOC handle
2276  * @msg_word:    Pointer to payload
2277  *
2278  * Return: None
2279  */
2280 static void
2281 dp_pktlog_msg_handler(struct htt_soc *soc,
2282 		      uint32_t *msg_word)
2283 {
2284 	uint8_t pdev_id;
2285 	uint8_t target_pdev_id;
2286 	uint32_t *pl_hdr;
2287 
2288 	target_pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word);
2289 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
2290 							 target_pdev_id);
2291 	pl_hdr = (msg_word + 1);
2292 	dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc,
2293 		pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL,
2294 		pdev_id);
2295 }
2296 #else
2297 static void
2298 dp_pktlog_msg_handler(struct htt_soc *soc,
2299 		      uint32_t *msg_word)
2300 {
2301 }
2302 #endif
2303 
2304 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
2305 /*
2306  * dp_vdev_txrx_hw_stats_handler - Handle vdev stats received from FW
2307  * @soc - htt soc handle
2308  * @ msg_word - buffer containing stats
2309  *
2310  * Return: void
2311  */
2312 static void dp_vdev_txrx_hw_stats_handler(struct htt_soc *soc,
2313 					  uint32_t *msg_word)
2314 {
2315 	struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
2316 	uint8_t pdev_id;
2317 	uint8_t vdev_id;
2318 	uint8_t target_pdev_id;
2319 	uint16_t payload_size;
2320 	struct dp_pdev *pdev;
2321 	struct dp_vdev *vdev;
2322 	uint8_t *tlv_buf;
2323 	uint32_t *tlv_buf_temp;
2324 	uint32_t *tag_buf;
2325 	htt_tlv_tag_t tlv_type;
2326 	uint16_t tlv_length;
2327 	uint64_t pkt_count = 0;
2328 	uint64_t byte_count = 0;
2329 	uint64_t soc_drop_cnt = 0;
2330 	struct cdp_pkt_info tx_comp = { 0 };
2331 	struct cdp_pkt_info tx_failed =  { 0 };
2332 
2333 	target_pdev_id =
2334 		HTT_T2H_VDEVS_TXRX_STATS_PERIODIC_IND_PDEV_ID_GET(*msg_word);
2335 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(dpsoc,
2336 							 target_pdev_id);
2337 
2338 	if (pdev_id >= MAX_PDEV_CNT)
2339 		return;
2340 
2341 	pdev = dpsoc->pdev_list[pdev_id];
2342 	if (!pdev) {
2343 		dp_err("PDEV is NULL for pdev_id:%d", pdev_id);
2344 		return;
2345 	}
2346 
2347 	payload_size =
2348 	HTT_T2H_VDEVS_TXRX_STATS_PERIODIC_IND_PAYLOAD_SIZE_GET(*msg_word);
2349 
2350 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2351 			   (void *)msg_word, payload_size + 16);
2352 
2353 	/* Adjust msg_word to point to the first TLV in buffer */
2354 	msg_word = msg_word + 4;
2355 
2356 	/* Parse the received buffer till payload size reaches 0 */
2357 	while (payload_size > 0) {
2358 		tlv_buf = (uint8_t *)msg_word;
2359 		tlv_buf_temp = msg_word;
2360 		tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
2361 		tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
2362 
2363 		/* Add header size to tlv length*/
2364 		tlv_length += 4;
2365 
2366 		switch (tlv_type) {
2367 		case HTT_STATS_SOC_TXRX_STATS_COMMON_TAG:
2368 		{
2369 			tag_buf = tlv_buf_temp +
2370 					HTT_VDEV_STATS_GET_INDEX(SOC_DROP_CNT);
2371 			soc_drop_cnt = HTT_VDEV_GET_STATS_U64(tag_buf);
2372 			DP_STATS_UPD(dpsoc, tx.tqm_drop_no_peer, soc_drop_cnt);
2373 			break;
2374 		}
2375 		case HTT_STATS_VDEV_TXRX_STATS_HW_STATS_TAG:
2376 		{
2377 			tag_buf = tlv_buf_temp +
2378 					HTT_VDEV_STATS_GET_INDEX(VDEV_ID);
2379 			vdev_id = (uint8_t)(*tag_buf);
2380 			vdev = dp_vdev_get_ref_by_id(dpsoc, vdev_id,
2381 						     DP_MOD_ID_HTT);
2382 
2383 			if (!vdev)
2384 				goto invalid_vdev;
2385 
2386 			/* Extract received packet count from buffer */
2387 			tag_buf = tlv_buf_temp +
2388 					HTT_VDEV_STATS_GET_INDEX(RX_PKT_CNT);
2389 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2390 			DP_STATS_UPD(vdev, rx_i.reo_rcvd_pkt.num, pkt_count);
2391 
2392 			/* Extract received packet byte count from buffer */
2393 			tag_buf = tlv_buf_temp +
2394 					HTT_VDEV_STATS_GET_INDEX(RX_BYTE_CNT);
2395 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2396 			DP_STATS_UPD(vdev, rx_i.reo_rcvd_pkt.bytes, byte_count);
2397 
2398 			/* Extract tx success packet count from buffer */
2399 			tag_buf = tlv_buf_temp +
2400 				HTT_VDEV_STATS_GET_INDEX(TX_SUCCESS_PKT_CNT);
2401 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2402 			tx_comp.num = pkt_count;
2403 
2404 			/* Extract tx success packet byte count from buffer */
2405 			tag_buf = tlv_buf_temp +
2406 				HTT_VDEV_STATS_GET_INDEX(TX_SUCCESS_BYTE_CNT);
2407 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2408 			tx_comp.bytes = byte_count;
2409 
2410 			/* Extract tx retry packet count from buffer */
2411 			tag_buf = tlv_buf_temp +
2412 				HTT_VDEV_STATS_GET_INDEX(TX_RETRY_PKT_CNT);
2413 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2414 			tx_comp.num += pkt_count;
2415 			tx_failed.num = pkt_count;
2416 
2417 			/* Extract tx retry packet byte count from buffer */
2418 			tag_buf = tlv_buf_temp +
2419 				HTT_VDEV_STATS_GET_INDEX(TX_RETRY_BYTE_CNT);
2420 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2421 			tx_comp.bytes += byte_count;
2422 			tx_failed.bytes = byte_count;
2423 
2424 			/* Extract tx drop packet count from buffer */
2425 			tag_buf = tlv_buf_temp +
2426 				HTT_VDEV_STATS_GET_INDEX(TX_DROP_PKT_CNT);
2427 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2428 			tx_comp.num += pkt_count;
2429 			tx_failed.num += pkt_count;
2430 
2431 			/* Extract tx drop packet byte count from buffer */
2432 			tag_buf = tlv_buf_temp +
2433 				HTT_VDEV_STATS_GET_INDEX(TX_DROP_BYTE_CNT);
2434 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2435 			tx_comp.bytes += byte_count;
2436 			tx_failed.bytes += byte_count;
2437 
2438 			/* Extract tx age-out packet count from buffer */
2439 			tag_buf = tlv_buf_temp +
2440 				HTT_VDEV_STATS_GET_INDEX(TX_AGE_OUT_PKT_CNT);
2441 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2442 			tx_comp.num += pkt_count;
2443 			tx_failed.num += pkt_count;
2444 
2445 			/* Extract tx age-out packet byte count from buffer */
2446 			tag_buf = tlv_buf_temp +
2447 				HTT_VDEV_STATS_GET_INDEX(TX_AGE_OUT_BYTE_CNT);
2448 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2449 			tx_comp.bytes += byte_count;
2450 			tx_failed.bytes += byte_count;
2451 
2452 			/* Extract tqm bypass packet count from buffer */
2453 			tag_buf = tlv_buf_temp +
2454 				HTT_VDEV_STATS_GET_INDEX(TX_TQM_BYPASS_PKT_CNT);
2455 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2456 			tx_comp.num += pkt_count;
2457 
2458 			/* Extract tx bypass packet byte count from buffer */
2459 			tag_buf = tlv_buf_temp +
2460 				HTT_VDEV_STATS_GET_INDEX(TX_TQM_BYPASS_BYTE_CNT);
2461 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2462 			tx_comp.bytes += byte_count;
2463 
2464 			DP_STATS_UPD(vdev, tx.comp_pkt.num, tx_comp.num);
2465 			DP_STATS_UPD(vdev, tx.comp_pkt.bytes, tx_comp.bytes);
2466 
2467 			DP_STATS_UPD(vdev, tx.tx_failed, tx_failed.num);
2468 
2469 			dp_vdev_unref_delete(dpsoc, vdev, DP_MOD_ID_HTT);
2470 			break;
2471 		}
2472 		default:
2473 			qdf_assert(0);
2474 		}
2475 invalid_vdev:
2476 		msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
2477 		payload_size -= tlv_length;
2478 	}
2479 }
2480 #else
2481 static void dp_vdev_txrx_hw_stats_handler(struct htt_soc *soc,
2482 					  uint32_t *msg_word)
2483 {}
2484 #endif
2485 
2486 #ifdef CONFIG_SAWF_DEF_QUEUES
2487 static void dp_sawf_def_queues_update_map_report_conf(struct htt_soc *soc,
2488 						      uint32_t *msg_word,
2489 						      qdf_nbuf_t htt_t2h_msg)
2490 {
2491 	dp_htt_sawf_def_queues_map_report_conf(soc, msg_word, htt_t2h_msg);
2492 }
2493 #else
2494 static void dp_sawf_def_queues_update_map_report_conf(struct htt_soc *soc,
2495 						      uint32_t *msg_word,
2496 						      qdf_nbuf_t htt_t2h_msg)
2497 {}
2498 #endif
2499 
2500 #ifdef CONFIG_SAWF
2501 /*
2502  * dp_sawf_msduq_map() - Msdu queue creation information received
2503  * from target
2504  * @soc: soc handle.
2505  * @msg_word: Pointer to htt msg word.
2506  * @htt_t2h_msg: HTT message nbuf
2507  *
2508  * @return: void
2509  */
2510 static void dp_sawf_msduq_map(struct htt_soc *soc, uint32_t *msg_word,
2511 			      qdf_nbuf_t htt_t2h_msg)
2512 {
2513 	dp_htt_sawf_msduq_map(soc, msg_word, htt_t2h_msg);
2514 }
2515 
2516 /*
2517  * dp_sawf_mpdu_stats_handler() - HTT message handler for MPDU stats
2518  * @soc: soc handle.
2519  * @htt_t2h_msg: HTT message nbuf
2520  *
2521  * @return: void
2522  */
2523 static void dp_sawf_mpdu_stats_handler(struct htt_soc *soc,
2524 				       qdf_nbuf_t htt_t2h_msg)
2525 {
2526 	dp_sawf_htt_mpdu_stats_handler(soc, htt_t2h_msg);
2527 }
2528 #else
2529 static void dp_sawf_msduq_map(struct htt_soc *soc, uint32_t *msg_word,
2530 			      qdf_nbuf_t htt_t2h_msg)
2531 {}
2532 
2533 static void dp_sawf_mpdu_stats_handler(struct htt_soc *soc,
2534 				       qdf_nbuf_t htt_t2h_msg)
2535 {}
2536 #endif
2537 
2538 /*
2539  * time_allow_print() - time allow print
2540  * @htt_ring_tt:	ringi_id array of timestamps
2541  * @ring_id:		ring_id (index)
2542  *
2543  * Return: 1 for successfully saving timestamp in array
2544  *	and 0 for timestamp falling within 2 seconds after last one
2545  */
2546 static bool time_allow_print(struct bp_handler *htt_bp_handler,
2547 			     u_int8_t ring_id, u_int32_t th_time)
2548 {
2549 	unsigned long tstamp;
2550 	struct bp_handler *path = &htt_bp_handler[ring_id];
2551 
2552 	tstamp = qdf_get_system_timestamp();
2553 
2554 	if (!path)
2555 		return 0; //unable to print backpressure messages
2556 
2557 	if (path->bp_start_tt == -1) {
2558 		path->bp_start_tt = tstamp;
2559 		path->bp_duration = 0;
2560 		path->bp_last_tt = tstamp;
2561 		path->bp_counter = 1;
2562 		return 1;
2563 	}
2564 
2565 	path->bp_duration = tstamp - path->bp_start_tt;
2566 	path->bp_last_tt = tstamp;
2567 	path->bp_counter++;
2568 
2569 	if (path->bp_duration >= th_time) {
2570 		path->bp_start_tt = -1;
2571 		return 1;
2572 	}
2573 
2574 	return 0;
2575 }
2576 
2577 static void dp_htt_alert_print(enum htt_t2h_msg_type msg_type,
2578 			       struct dp_pdev *pdev, u_int8_t ring_id,
2579 			       u_int16_t hp_idx, u_int16_t tp_idx,
2580 			       u_int32_t bkp_time,
2581 			       struct bp_handler *htt_bp_handler,
2582 			       char *ring_stype)
2583 {
2584 	dp_alert("seq_num %u msg_type: %d pdev_id: %d ring_type: %s ",
2585 		 pdev->bkp_stats.seq_num, msg_type, pdev->pdev_id, ring_stype);
2586 	dp_alert("ring_id: %d hp_idx: %d tp_idx: %d bkpressure_time_ms: %d ",
2587 		 ring_id, hp_idx, tp_idx, bkp_time);
2588 	dp_alert("last_bp_event: %ld, total_bp_duration: %ld, bp_counter: %ld",
2589 		 htt_bp_handler[ring_id].bp_last_tt,
2590 		 htt_bp_handler[ring_id].bp_duration,
2591 		 htt_bp_handler[ring_id].bp_counter);
2592 }
2593 
2594 /**
2595  * dp_get_srng_ring_state_from_hal(): Get hal level ring stats
2596  * @soc: DP_SOC handle
2597  * @srng: DP_SRNG handle
2598  * @ring_type: srng src/dst ring
2599  *
2600  * Return: void
2601  */
2602 static QDF_STATUS
2603 dp_get_srng_ring_state_from_hal(struct dp_soc *soc,
2604 				struct dp_pdev *pdev,
2605 				struct dp_srng *srng,
2606 				enum hal_ring_type ring_type,
2607 				struct dp_srng_ring_state *state)
2608 {
2609 	struct hal_soc *hal_soc;
2610 
2611 	if (!soc || !srng || !srng->hal_srng || !state)
2612 		return QDF_STATUS_E_INVAL;
2613 
2614 	hal_soc = (struct hal_soc *)soc->hal_soc;
2615 
2616 	hal_get_sw_hptp(soc->hal_soc, srng->hal_srng, &state->sw_tail,
2617 			&state->sw_head);
2618 
2619 	hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &state->hw_head,
2620 			&state->hw_tail, ring_type);
2621 
2622 	state->ring_type = ring_type;
2623 
2624 	return QDF_STATUS_SUCCESS;
2625 }
2626 
2627 #ifdef QCA_MONITOR_PKT_SUPPORT
2628 static void
2629 dp_queue_mon_ring_stats(struct dp_pdev *pdev,
2630 			int lmac_id, uint32_t *num_srng,
2631 			struct dp_soc_srngs_state *soc_srngs_state)
2632 {
2633 	QDF_STATUS status;
2634 
2635 	if (pdev->soc->wlan_cfg_ctx->rxdma1_enable) {
2636 		status = dp_get_srng_ring_state_from_hal
2637 			(pdev->soc, pdev,
2638 			 &pdev->soc->rxdma_mon_buf_ring[lmac_id],
2639 			 RXDMA_MONITOR_BUF,
2640 			 &soc_srngs_state->ring_state[*num_srng]);
2641 
2642 		if (status == QDF_STATUS_SUCCESS)
2643 			qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS);
2644 
2645 		status = dp_get_srng_ring_state_from_hal
2646 			(pdev->soc, pdev,
2647 			 &pdev->soc->rxdma_mon_dst_ring[lmac_id],
2648 			 RXDMA_MONITOR_DST,
2649 			 &soc_srngs_state->ring_state[*num_srng]);
2650 
2651 		if (status == QDF_STATUS_SUCCESS)
2652 			qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS);
2653 
2654 		status = dp_get_srng_ring_state_from_hal
2655 			(pdev->soc, pdev,
2656 			 &pdev->soc->rxdma_mon_desc_ring[lmac_id],
2657 			 RXDMA_MONITOR_DESC,
2658 			 &soc_srngs_state->ring_state[*num_srng]);
2659 
2660 		if (status == QDF_STATUS_SUCCESS)
2661 			qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS);
2662 	}
2663 }
2664 #else
2665 static void
2666 dp_queue_mon_ring_stats(struct dp_pdev *pdev,
2667 			int lmac_id, uint32_t *num_srng,
2668 			struct dp_soc_srngs_state *soc_srngs_state)
2669 {
2670 }
2671 #endif
2672 
2673 #ifndef WLAN_DP_DISABLE_TCL_CMD_CRED_SRNG
2674 static inline QDF_STATUS
2675 dp_get_tcl_cmd_cred_ring_state_from_hal(struct dp_pdev *pdev,
2676 					struct dp_srng_ring_state *ring_state)
2677 {
2678 	return dp_get_srng_ring_state_from_hal(pdev->soc, pdev,
2679 					       &pdev->soc->tcl_cmd_credit_ring,
2680 					       TCL_CMD_CREDIT, ring_state);
2681 }
2682 #else
2683 static inline QDF_STATUS
2684 dp_get_tcl_cmd_cred_ring_state_from_hal(struct dp_pdev *pdev,
2685 					struct dp_srng_ring_state *ring_state)
2686 {
2687 	return QDF_STATUS_SUCCESS;
2688 }
2689 #endif
2690 
2691 #ifndef WLAN_DP_DISABLE_TCL_STATUS_SRNG
2692 static inline QDF_STATUS
2693 dp_get_tcl_status_ring_state_from_hal(struct dp_pdev *pdev,
2694 				      struct dp_srng_ring_state *ring_state)
2695 {
2696 	return dp_get_srng_ring_state_from_hal(pdev->soc, pdev,
2697 					       &pdev->soc->tcl_status_ring,
2698 					       TCL_STATUS, ring_state);
2699 }
2700 #else
2701 static inline QDF_STATUS
2702 dp_get_tcl_status_ring_state_from_hal(struct dp_pdev *pdev,
2703 				      struct dp_srng_ring_state *ring_state)
2704 {
2705 	return QDF_STATUS_SUCCESS;
2706 }
2707 #endif
2708 
2709 /**
2710  * dp_queue_srng_ring_stats(): Print pdev hal level ring stats
2711  * @pdev: DP_pdev handle
2712  *
2713  * Return: void
2714  */
2715 static void dp_queue_ring_stats(struct dp_pdev *pdev)
2716 {
2717 	uint32_t i;
2718 	int mac_id;
2719 	int lmac_id;
2720 	uint32_t j = 0;
2721 	struct dp_soc *soc = pdev->soc;
2722 	struct dp_soc_srngs_state * soc_srngs_state = NULL;
2723 	struct dp_soc_srngs_state *drop_srngs_state = NULL;
2724 	QDF_STATUS status;
2725 
2726 	soc_srngs_state = qdf_mem_malloc(sizeof(struct dp_soc_srngs_state));
2727 	if (!soc_srngs_state) {
2728 		dp_htt_alert("Memory alloc failed for back pressure event");
2729 		return;
2730 	}
2731 
2732 	status = dp_get_srng_ring_state_from_hal
2733 				(pdev->soc, pdev,
2734 				 &pdev->soc->reo_exception_ring,
2735 				 REO_EXCEPTION,
2736 				 &soc_srngs_state->ring_state[j]);
2737 
2738 	if (status == QDF_STATUS_SUCCESS)
2739 		qdf_assert_always(++j < DP_MAX_SRNGS);
2740 
2741 	status = dp_get_srng_ring_state_from_hal
2742 				(pdev->soc, pdev,
2743 				 &pdev->soc->reo_reinject_ring,
2744 				 REO_REINJECT,
2745 				 &soc_srngs_state->ring_state[j]);
2746 
2747 	if (status == QDF_STATUS_SUCCESS)
2748 		qdf_assert_always(++j < DP_MAX_SRNGS);
2749 
2750 	status = dp_get_srng_ring_state_from_hal
2751 				(pdev->soc, pdev,
2752 				 &pdev->soc->reo_cmd_ring,
2753 				 REO_CMD,
2754 				 &soc_srngs_state->ring_state[j]);
2755 
2756 	if (status == QDF_STATUS_SUCCESS)
2757 		qdf_assert_always(++j < DP_MAX_SRNGS);
2758 
2759 	status = dp_get_srng_ring_state_from_hal
2760 				(pdev->soc, pdev,
2761 				 &pdev->soc->reo_status_ring,
2762 				 REO_STATUS,
2763 				 &soc_srngs_state->ring_state[j]);
2764 
2765 	if (status == QDF_STATUS_SUCCESS)
2766 		qdf_assert_always(++j < DP_MAX_SRNGS);
2767 
2768 	status = dp_get_srng_ring_state_from_hal
2769 				(pdev->soc, pdev,
2770 				 &pdev->soc->rx_rel_ring,
2771 				 WBM2SW_RELEASE,
2772 				 &soc_srngs_state->ring_state[j]);
2773 
2774 	if (status == QDF_STATUS_SUCCESS)
2775 		qdf_assert_always(++j < DP_MAX_SRNGS);
2776 
2777 	status = dp_get_tcl_cmd_cred_ring_state_from_hal
2778 				(pdev, &soc_srngs_state->ring_state[j]);
2779 	if (status == QDF_STATUS_SUCCESS)
2780 		qdf_assert_always(++j < DP_MAX_SRNGS);
2781 
2782 	status = dp_get_tcl_status_ring_state_from_hal
2783 				(pdev, &soc_srngs_state->ring_state[j]);
2784 	if (status == QDF_STATUS_SUCCESS)
2785 		qdf_assert_always(++j < DP_MAX_SRNGS);
2786 
2787 	status = dp_get_srng_ring_state_from_hal
2788 				(pdev->soc, pdev,
2789 				 &pdev->soc->wbm_desc_rel_ring,
2790 				 SW2WBM_RELEASE,
2791 				 &soc_srngs_state->ring_state[j]);
2792 
2793 	if (status == QDF_STATUS_SUCCESS)
2794 		qdf_assert_always(++j < DP_MAX_SRNGS);
2795 
2796 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
2797 		status = dp_get_srng_ring_state_from_hal
2798 				(pdev->soc, pdev,
2799 				 &pdev->soc->reo_dest_ring[i],
2800 				 REO_DST,
2801 				 &soc_srngs_state->ring_state[j]);
2802 
2803 		if (status == QDF_STATUS_SUCCESS)
2804 			qdf_assert_always(++j < DP_MAX_SRNGS);
2805 	}
2806 
2807 	for (i = 0; i < pdev->soc->num_tcl_data_rings; i++) {
2808 		status = dp_get_srng_ring_state_from_hal
2809 				(pdev->soc, pdev,
2810 				 &pdev->soc->tcl_data_ring[i],
2811 				 TCL_DATA,
2812 				 &soc_srngs_state->ring_state[j]);
2813 
2814 		if (status == QDF_STATUS_SUCCESS)
2815 			qdf_assert_always(++j < DP_MAX_SRNGS);
2816 	}
2817 
2818 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
2819 		status = dp_get_srng_ring_state_from_hal
2820 				(pdev->soc, pdev,
2821 				 &pdev->soc->tx_comp_ring[i],
2822 				 WBM2SW_RELEASE,
2823 				 &soc_srngs_state->ring_state[j]);
2824 
2825 		if (status == QDF_STATUS_SUCCESS)
2826 			qdf_assert_always(++j < DP_MAX_SRNGS);
2827 	}
2828 
2829 	lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc, 0, pdev->pdev_id);
2830 	status = dp_get_srng_ring_state_from_hal
2831 				(pdev->soc, pdev,
2832 				 &pdev->soc->rx_refill_buf_ring
2833 				 [lmac_id],
2834 				 RXDMA_BUF,
2835 				 &soc_srngs_state->ring_state[j]);
2836 
2837 	if (status == QDF_STATUS_SUCCESS)
2838 		qdf_assert_always(++j < DP_MAX_SRNGS);
2839 
2840 	status = dp_get_srng_ring_state_from_hal
2841 				(pdev->soc, pdev,
2842 				 &pdev->rx_refill_buf_ring2,
2843 				 RXDMA_BUF,
2844 				 &soc_srngs_state->ring_state[j]);
2845 
2846 	if (status == QDF_STATUS_SUCCESS)
2847 		qdf_assert_always(++j < DP_MAX_SRNGS);
2848 
2849 
2850 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2851 		dp_get_srng_ring_state_from_hal
2852 				(pdev->soc, pdev,
2853 				 &pdev->rx_mac_buf_ring[i],
2854 				 RXDMA_BUF,
2855 				 &soc_srngs_state->ring_state[j]);
2856 
2857 		if (status == QDF_STATUS_SUCCESS)
2858 			qdf_assert_always(++j < DP_MAX_SRNGS);
2859 	}
2860 
2861 	for (mac_id = 0;
2862 	     mac_id  < soc->wlan_cfg_ctx->num_rxdma_status_rings_per_pdev;
2863 	     mac_id++) {
2864 		lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc,
2865 						     mac_id, pdev->pdev_id);
2866 
2867 		dp_queue_mon_ring_stats(pdev, lmac_id, &j,
2868 					soc_srngs_state);
2869 
2870 		status = dp_get_srng_ring_state_from_hal
2871 			(pdev->soc, pdev,
2872 			 &pdev->soc->rxdma_mon_status_ring[lmac_id],
2873 			 RXDMA_MONITOR_STATUS,
2874 			 &soc_srngs_state->ring_state[j]);
2875 
2876 		if (status == QDF_STATUS_SUCCESS)
2877 			qdf_assert_always(++j < DP_MAX_SRNGS);
2878 	}
2879 
2880 	for (i = 0; i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
2881 		lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc,
2882 						     i, pdev->pdev_id);
2883 
2884 		status = dp_get_srng_ring_state_from_hal
2885 				(pdev->soc, pdev,
2886 				 &pdev->soc->rxdma_err_dst_ring
2887 				 [lmac_id],
2888 				 RXDMA_DST,
2889 				 &soc_srngs_state->ring_state[j]);
2890 
2891 		if (status == QDF_STATUS_SUCCESS)
2892 			qdf_assert_always(++j < DP_MAX_SRNGS);
2893 	}
2894 	soc_srngs_state->max_ring_id = j;
2895 
2896 	qdf_spin_lock_bh(&pdev->bkp_stats.list_lock);
2897 
2898 	soc_srngs_state->seq_num = pdev->bkp_stats.seq_num;
2899 
2900 	if (pdev->bkp_stats.queue_depth >= HTT_BKP_STATS_MAX_QUEUE_DEPTH) {
2901 		drop_srngs_state = TAILQ_FIRST(&pdev->bkp_stats.list);
2902 		qdf_assert_always(drop_srngs_state);
2903 		TAILQ_REMOVE(&pdev->bkp_stats.list, drop_srngs_state,
2904 			     list_elem);
2905 		qdf_mem_free(drop_srngs_state);
2906 		pdev->bkp_stats.queue_depth--;
2907 	}
2908 
2909 	pdev->bkp_stats.queue_depth++;
2910 	TAILQ_INSERT_TAIL(&pdev->bkp_stats.list, soc_srngs_state,
2911 			  list_elem);
2912 	pdev->bkp_stats.seq_num++;
2913 	qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock);
2914 
2915 	qdf_queue_work(0, pdev->bkp_stats.work_queue,
2916 		       &pdev->bkp_stats.work);
2917 }
2918 
2919 /*
2920  * dp_htt_bkp_event_alert() - htt backpressure event alert
2921  * @msg_word:	htt packet context
2922  * @htt_soc:	HTT SOC handle
2923  *
2924  * Return: after attempting to print stats
2925  */
2926 static void dp_htt_bkp_event_alert(u_int32_t *msg_word, struct htt_soc *soc)
2927 {
2928 	u_int8_t ring_type;
2929 	u_int8_t pdev_id;
2930 	uint8_t target_pdev_id;
2931 	u_int8_t ring_id;
2932 	u_int16_t hp_idx;
2933 	u_int16_t tp_idx;
2934 	u_int32_t bkp_time;
2935 	u_int32_t th_time;
2936 	enum htt_t2h_msg_type msg_type;
2937 	struct dp_soc *dpsoc;
2938 	struct dp_pdev *pdev;
2939 	struct dp_htt_timestamp *radio_tt;
2940 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2941 
2942 
2943 	if (!soc)
2944 		return;
2945 
2946 	dpsoc = (struct dp_soc *)soc->dp_soc;
2947 	soc_cfg_ctx = dpsoc->wlan_cfg_ctx;
2948 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
2949 	ring_type = HTT_T2H_RX_BKPRESSURE_RING_TYPE_GET(*msg_word);
2950 	target_pdev_id = HTT_T2H_RX_BKPRESSURE_PDEV_ID_GET(*msg_word);
2951 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
2952 							 target_pdev_id);
2953 	if (pdev_id >= MAX_PDEV_CNT) {
2954 		dp_htt_debug("%pK: pdev id %d is invalid", soc, pdev_id);
2955 		return;
2956 	}
2957 
2958 	th_time = wlan_cfg_time_control_bp(soc_cfg_ctx);
2959 	pdev = (struct dp_pdev *)dpsoc->pdev_list[pdev_id];
2960 	ring_id = HTT_T2H_RX_BKPRESSURE_RINGID_GET(*msg_word);
2961 	hp_idx = HTT_T2H_RX_BKPRESSURE_HEAD_IDX_GET(*(msg_word + 1));
2962 	tp_idx = HTT_T2H_RX_BKPRESSURE_TAIL_IDX_GET(*(msg_word + 1));
2963 	bkp_time = HTT_T2H_RX_BKPRESSURE_TIME_MS_GET(*(msg_word + 2));
2964 	radio_tt = &soc->pdevid_tt[pdev_id];
2965 
2966 	switch (ring_type) {
2967 	case HTT_SW_RING_TYPE_UMAC:
2968 		if (!time_allow_print(radio_tt->umac_path, ring_id, th_time))
2969 			return;
2970 		dp_htt_alert_print(msg_type, pdev, ring_id, hp_idx, tp_idx,
2971 				   bkp_time, radio_tt->umac_path,
2972 				   "HTT_SW_RING_TYPE_UMAC");
2973 	break;
2974 	case HTT_SW_RING_TYPE_LMAC:
2975 		if (!time_allow_print(radio_tt->lmac_path, ring_id, th_time))
2976 			return;
2977 		dp_htt_alert_print(msg_type, pdev, ring_id, hp_idx, tp_idx,
2978 				   bkp_time, radio_tt->lmac_path,
2979 				   "HTT_SW_RING_TYPE_LMAC");
2980 	break;
2981 	default:
2982 		dp_alert("Invalid ring type: %d", ring_type);
2983 	break;
2984 	}
2985 
2986 	dp_queue_ring_stats(pdev);
2987 }
2988 
2989 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
2990 /*
2991  * dp_offload_ind_handler() - offload msg handler
2992  * @htt_soc: HTT SOC handle
2993  * @msg_word: Pointer to payload
2994  *
2995  * Return: None
2996  */
2997 static void
2998 dp_offload_ind_handler(struct htt_soc *soc, uint32_t *msg_word)
2999 {
3000 	u_int8_t pdev_id;
3001 	u_int8_t target_pdev_id;
3002 
3003 	target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
3004 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
3005 							 target_pdev_id);
3006 	dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_OFFLOAD_TX_DATA, soc->dp_soc,
3007 			     msg_word, HTT_INVALID_VDEV, WDI_NO_VAL,
3008 			     pdev_id);
3009 }
3010 #else
3011 static void
3012 dp_offload_ind_handler(struct htt_soc *soc, uint32_t *msg_word)
3013 {
3014 }
3015 #endif
3016 
3017 #ifdef WLAN_FEATURE_11BE_MLO
3018 #ifdef WLAN_MLO_MULTI_CHIP
3019 static inline void dp_update_mlo_ts_offset(struct dp_soc *soc,
3020 					   uint32_t ts_lo, uint32_t ts_hi)
3021 {
3022 	uint64_t mlo_offset;
3023 
3024 	mlo_offset = ((uint64_t)(ts_hi) << 32 | ts_lo);
3025 	soc->cdp_soc.ops->mlo_ops->mlo_update_mlo_ts_offset
3026 		((struct cdp_soc_t *)soc, mlo_offset);
3027 }
3028 #else
3029 static inline void dp_update_mlo_ts_offset(struct dp_soc *soc,
3030 					   uint32_t ts_lo, uint32_t ts_hi)
3031 {}
3032 #endif
3033 static void dp_htt_mlo_peer_map_handler(struct htt_soc *soc,
3034 					uint32_t *msg_word)
3035 {
3036 	uint8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3037 	uint8_t *mlo_peer_mac_addr;
3038 	uint16_t mlo_peer_id;
3039 	uint8_t num_links;
3040 	struct dp_mlo_flow_override_info mlo_flow_info[DP_MLO_FLOW_INFO_MAX];
3041 	struct dp_mlo_link_info mlo_link_info[DP_MAX_MLO_LINKS];
3042 	MLO_PEER_MAP_TLV_TAG_ID tlv_type = 0xff;
3043 	uint16_t tlv_len = 0;
3044 	int i = 0;
3045 
3046 	mlo_peer_id = HTT_RX_MLO_PEER_MAP_MLO_PEER_ID_GET(*msg_word);
3047 	num_links =
3048 		HTT_RX_MLO_PEER_MAP_NUM_LOGICAL_LINKS_GET(*msg_word);
3049 	mlo_peer_mac_addr =
3050 	htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3051 				   &mac_addr_deswizzle_buf[0]);
3052 
3053 	mlo_flow_info[0].ast_idx =
3054 		HTT_RX_MLO_PEER_MAP_PRIMARY_AST_INDEX_GET(*(msg_word + 3));
3055 	mlo_flow_info[0].ast_idx_valid =
3056 		HTT_RX_MLO_PEER_MAP_AST_INDEX_VALID_FLAG_GET(*(msg_word + 3));
3057 	mlo_flow_info[0].chip_id =
3058 		HTT_RX_MLO_PEER_MAP_CHIP_ID_AST_INDEX_GET(*(msg_word + 3));
3059 	mlo_flow_info[0].tidmask =
3060 		HTT_RX_MLO_PEER_MAP_TIDMASK_AST_INDEX_GET(*(msg_word + 3));
3061 	mlo_flow_info[0].cache_set_num =
3062 	HTT_RX_MLO_PEER_MAP_CACHE_SET_NUM_AST_INDEX_GET(*(msg_word + 3));
3063 
3064 	mlo_flow_info[1].ast_idx =
3065 		HTT_RX_MLO_PEER_MAP_PRIMARY_AST_INDEX_GET(*(msg_word + 3));
3066 	mlo_flow_info[1].ast_idx_valid =
3067 		HTT_RX_MLO_PEER_MAP_AST_INDEX_VALID_FLAG_GET(*(msg_word + 3));
3068 	mlo_flow_info[1].chip_id =
3069 		HTT_RX_MLO_PEER_MAP_CHIP_ID_AST_INDEX_GET(*(msg_word + 3));
3070 	mlo_flow_info[1].tidmask =
3071 		HTT_RX_MLO_PEER_MAP_TIDMASK_AST_INDEX_GET(*(msg_word + 3));
3072 	mlo_flow_info[1].cache_set_num =
3073 	HTT_RX_MLO_PEER_MAP_CACHE_SET_NUM_AST_INDEX_GET(*(msg_word + 3));
3074 
3075 	mlo_flow_info[2].ast_idx =
3076 		HTT_RX_MLO_PEER_MAP_PRIMARY_AST_INDEX_GET(*(msg_word + 3));
3077 	mlo_flow_info[2].ast_idx_valid =
3078 		HTT_RX_MLO_PEER_MAP_AST_INDEX_VALID_FLAG_GET(*(msg_word + 3));
3079 	mlo_flow_info[2].chip_id =
3080 		HTT_RX_MLO_PEER_MAP_CHIP_ID_AST_INDEX_GET(*(msg_word + 3));
3081 	mlo_flow_info[2].tidmask =
3082 		HTT_RX_MLO_PEER_MAP_TIDMASK_AST_INDEX_GET(*(msg_word + 3));
3083 	mlo_flow_info[2].cache_set_num =
3084 	HTT_RX_MLO_PEER_MAP_CACHE_SET_NUM_AST_INDEX_GET(*(msg_word + 3));
3085 
3086 	msg_word = msg_word + 8;
3087 	while (msg_word && (i < DP_MAX_MLO_LINKS)) {
3088 		mlo_link_info[i].peer_chip_id = 0xFF;
3089 		mlo_link_info[i].vdev_id = 0xFF;
3090 
3091 		tlv_type = HTT_RX_MLO_PEER_MAP_TLV_TAG_GET(*msg_word);
3092 		tlv_len = HTT_RX_MLO_PEER_MAP_TLV_LENGTH_GET(*msg_word);
3093 
3094 		if (tlv_len == 0) {
3095 			dp_err("TLV Length is 0");
3096 			break;
3097 		}
3098 
3099 		if (tlv_type == MLO_PEER_MAP_TLV_STRUCT_SOC_VDEV_PEER_IDS) {
3100 			mlo_link_info[i].peer_chip_id =
3101 				HTT_RX_MLO_PEER_MAP_CHIP_ID_GET(
3102 							*(msg_word + 1));
3103 			mlo_link_info[i].vdev_id =
3104 				HTT_RX_MLO_PEER_MAP_VDEV_ID_GET(
3105 							*(msg_word + 1));
3106 		}
3107 		/* Add header size to tlv length */
3108 		tlv_len = tlv_len + HTT_TLV_HDR_LEN;
3109 		msg_word = (uint32_t *)(((uint8_t *)msg_word) + tlv_len);
3110 		i++;
3111 	}
3112 
3113 	dp_rx_mlo_peer_map_handler(soc->dp_soc, mlo_peer_id,
3114 				   mlo_peer_mac_addr,
3115 				   mlo_flow_info, mlo_link_info);
3116 }
3117 
3118 static void dp_htt_mlo_peer_unmap_handler(struct htt_soc *soc,
3119 					  uint32_t *msg_word)
3120 {
3121 	uint16_t mlo_peer_id;
3122 
3123 	mlo_peer_id = HTT_RX_MLO_PEER_UNMAP_MLO_PEER_ID_GET(*msg_word);
3124 	dp_rx_mlo_peer_unmap_handler(soc->dp_soc, mlo_peer_id);
3125 }
3126 
3127 static void
3128 dp_rx_mlo_timestamp_ind_handler(struct dp_soc *soc,
3129 				uint32_t *msg_word)
3130 {
3131 	uint8_t pdev_id;
3132 	uint8_t target_pdev_id;
3133 	struct dp_pdev *pdev;
3134 
3135 	if (!soc)
3136 		return;
3137 
3138 	target_pdev_id = HTT_T2H_MLO_TIMESTAMP_OFFSET_PDEV_ID_GET(*msg_word);
3139 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc,
3140 							 target_pdev_id);
3141 
3142 	if (pdev_id >= MAX_PDEV_CNT) {
3143 		dp_htt_debug("%pK: pdev id %d is invalid", soc, pdev_id);
3144 		return;
3145 	}
3146 
3147 	pdev = (struct dp_pdev *)soc->pdev_list[pdev_id];
3148 
3149 	if (!pdev) {
3150 		dp_err("Invalid pdev");
3151 		return;
3152 	}
3153 	dp_wdi_event_handler(WDI_EVENT_MLO_TSTMP, soc,
3154 			     msg_word, HTT_INVALID_PEER, WDI_NO_VAL,
3155 			     pdev_id);
3156 
3157 	qdf_spin_lock_bh(&soc->htt_stats.lock);
3158 	pdev->timestamp.msg_type =
3159 		HTT_T2H_MLO_TIMESTAMP_OFFSET_MSG_TYPE_GET(*msg_word);
3160 	pdev->timestamp.pdev_id = pdev_id;
3161 	pdev->timestamp.chip_id =
3162 		HTT_T2H_MLO_TIMESTAMP_OFFSET_CHIP_ID_GET(*msg_word);
3163 	pdev->timestamp.mac_clk_freq =
3164 		HTT_T2H_MLO_TIMESTAMP_OFFSET_MAC_CLK_FREQ_MHZ_GET(*msg_word);
3165 	pdev->timestamp.sync_tstmp_lo_us = *(msg_word + 1);
3166 	pdev->timestamp.sync_tstmp_hi_us = *(msg_word + 2);
3167 	pdev->timestamp.mlo_offset_lo_us = *(msg_word + 3);
3168 	pdev->timestamp.mlo_offset_hi_us = *(msg_word + 4);
3169 	pdev->timestamp.mlo_offset_clks  = *(msg_word + 5);
3170 	pdev->timestamp.mlo_comp_us =
3171 	HTT_T2H_MLO_TIMESTAMP_OFFSET_MLO_TIMESTAMP_COMP_US_GET(
3172 							*(msg_word + 6));
3173 	pdev->timestamp.mlo_comp_clks =
3174 	HTT_T2H_MLO_TIMESTAMP_OFFSET_MLO_TIMESTAMP_COMP_CLKS_GET(
3175 							*(msg_word + 6));
3176 	pdev->timestamp.mlo_comp_timer =
3177 	HTT_T2H_MLO_TIMESTAMP_OFFSET_MLO_TIMESTAMP_COMP_PERIOD_US_GET(
3178 							*(msg_word + 7));
3179 
3180 	dp_htt_debug("tsf_lo=%d tsf_hi=%d, mlo_ofst_lo=%d, mlo_ofst_hi=%d\n",
3181 		     pdev->timestamp.sync_tstmp_lo_us,
3182 		     pdev->timestamp.sync_tstmp_hi_us,
3183 		     pdev->timestamp.mlo_offset_lo_us,
3184 		     pdev->timestamp.mlo_offset_hi_us);
3185 
3186 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
3187 
3188 	dp_update_mlo_ts_offset(soc,
3189 				pdev->timestamp.mlo_offset_lo_us,
3190 				pdev->timestamp.mlo_offset_hi_us);
3191 }
3192 #else
3193 static void dp_htt_mlo_peer_map_handler(struct htt_soc *soc,
3194 					uint32_t *msg_word)
3195 {
3196 	qdf_assert_always(0);
3197 }
3198 
3199 static void dp_htt_mlo_peer_unmap_handler(struct htt_soc *soc,
3200 					 uint32_t *msg_word)
3201 {
3202 	qdf_assert_always(0);
3203 }
3204 
3205 static void
3206 dp_rx_mlo_timestamp_ind_handler(void *soc_handle,
3207 				uint32_t *msg_word)
3208 {
3209 	qdf_assert_always(0);
3210 }
3211 #endif
3212 
3213 /*
3214  * dp_htt_rx_addba_handler() - RX Addba HTT msg handler
3215  * @soc: DP Soc handler
3216  * @peer_id: ID of peer
3217  * @tid: TID number
3218  * @win_sz: BA window size
3219  *
3220  * Return: None
3221  */
3222 static void
3223 dp_htt_rx_addba_handler(struct dp_soc *soc, uint16_t peer_id,
3224 			uint8_t tid, uint16_t win_sz)
3225 {
3226 	uint16_t status;
3227 	struct dp_peer *peer;
3228 
3229 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
3230 
3231 	if (!peer) {
3232 		dp_err("Peer not found peer id %d", peer_id);
3233 		return;
3234 	}
3235 
3236 	status = dp_addba_requestprocess_wifi3((struct cdp_soc_t *)soc,
3237 					       peer->mac_addr.raw,
3238 					       peer->vdev->vdev_id, 0,
3239 					       tid, 0, win_sz, 0xffff);
3240 
3241 	dp_addba_resp_tx_completion_wifi3(
3242 		(struct cdp_soc_t *)soc,
3243 		peer->mac_addr.raw, peer->vdev->vdev_id,
3244 		tid,
3245 		status);
3246 
3247 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3248 
3249 	dp_info("PeerID %d BAW %d TID %d stat %d",
3250 		peer_id, win_sz, tid, status);
3251 }
3252 
3253 /*
3254  * dp_htt_ppdu_id_fmt_handler() - PPDU ID Format handler
3255  * @htt_soc: HTT SOC handle
3256  * @msg_word: Pointer to payload
3257  *
3258  * Return: None
3259  */
3260 static void
3261 dp_htt_ppdu_id_fmt_handler(struct dp_soc *soc, uint32_t *msg_word)
3262 {
3263 	uint8_t msg_type, valid, bits, offset;
3264 
3265 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
3266 
3267 	msg_word += HTT_PPDU_ID_FMT_IND_LINK_ID_OFFSET;
3268 	valid = HTT_PPDU_ID_FMT_IND_VALID_GET_BITS31_16(*msg_word);
3269 	bits = HTT_PPDU_ID_FMT_IND_BITS_GET_BITS31_16(*msg_word);
3270 	offset = HTT_PPDU_ID_FMT_IND_OFFSET_GET_BITS31_16(*msg_word);
3271 
3272 	dp_info("link_id: valid %u bits %u offset %u", valid, bits, offset);
3273 
3274 	if (valid) {
3275 		soc->link_id_offset = offset;
3276 		soc->link_id_bits = bits;
3277 	}
3278 }
3279 
3280 /*
3281  * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler
3282  * @context:	Opaque context (HTT SOC handle)
3283  * @pkt:	HTC packet
3284  */
3285 static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
3286 {
3287 	struct htt_soc *soc = (struct htt_soc *) context;
3288 	qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
3289 	u_int32_t *msg_word;
3290 	enum htt_t2h_msg_type msg_type;
3291 	bool free_buf = true;
3292 
3293 	/* check for successful message reception */
3294 	if (pkt->Status != QDF_STATUS_SUCCESS) {
3295 		if (pkt->Status != QDF_STATUS_E_CANCELED)
3296 			soc->stats.htc_err_cnt++;
3297 
3298 		qdf_nbuf_free(htt_t2h_msg);
3299 		return;
3300 	}
3301 
3302 	/* TODO: Check if we should pop the HTC/HTT header alignment padding */
3303 
3304 	msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
3305 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
3306 	htt_event_record(soc->htt_logger_handle,
3307 			 msg_type, (uint8_t *)msg_word);
3308 	switch (msg_type) {
3309 	case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
3310 	{
3311 		dp_htt_bkp_event_alert(msg_word, soc);
3312 		break;
3313 	}
3314 	case HTT_T2H_MSG_TYPE_PEER_MAP:
3315 		{
3316 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3317 			u_int8_t *peer_mac_addr;
3318 			u_int16_t peer_id;
3319 			u_int16_t hw_peer_id;
3320 			u_int8_t vdev_id;
3321 			u_int8_t is_wds;
3322 			struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
3323 
3324 			peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
3325 			hw_peer_id =
3326 				HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2));
3327 			vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
3328 			peer_mac_addr = htt_t2h_mac_addr_deswizzle(
3329 				(u_int8_t *) (msg_word+1),
3330 				&mac_addr_deswizzle_buf[0]);
3331 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3332 				QDF_TRACE_LEVEL_DEBUG,
3333 				"HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3334 				peer_id, vdev_id);
3335 
3336 			/*
3337 			 * check if peer already exists for this peer_id, if so
3338 			 * this peer map event is in response for a wds peer add
3339 			 * wmi command sent during wds source port learning.
3340 			 * in this case just add the ast entry to the existing
3341 			 * peer ast_list.
3342 			 */
3343 			is_wds = !!(dpsoc->peer_id_to_obj_map[peer_id]);
3344 			dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id,
3345 					       vdev_id, peer_mac_addr, 0,
3346 					       is_wds);
3347 			break;
3348 		}
3349 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
3350 		{
3351 			u_int16_t peer_id;
3352 			u_int8_t vdev_id;
3353 			u_int8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0};
3354 			peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
3355 			vdev_id = HTT_RX_PEER_UNMAP_VDEV_ID_GET(*msg_word);
3356 
3357 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
3358 						 vdev_id, mac_addr, 0,
3359 						 DP_PEER_WDS_COUNT_INVALID);
3360 			break;
3361 		}
3362 	case HTT_T2H_MSG_TYPE_SEC_IND:
3363 		{
3364 			u_int16_t peer_id;
3365 			enum cdp_sec_type sec_type;
3366 			int is_unicast;
3367 
3368 			peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
3369 			sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
3370 			is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
3371 			/* point to the first part of the Michael key */
3372 			msg_word++;
3373 			dp_rx_sec_ind_handler(
3374 				soc->dp_soc, peer_id, sec_type, is_unicast,
3375 				msg_word, msg_word + 2);
3376 			break;
3377 		}
3378 
3379 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
3380 		{
3381 			free_buf =
3382 				dp_monitor_ppdu_stats_ind_handler(soc,
3383 								  msg_word,
3384 								  htt_t2h_msg);
3385 			break;
3386 		}
3387 
3388 	case HTT_T2H_MSG_TYPE_PKTLOG:
3389 		{
3390 			dp_pktlog_msg_handler(soc, msg_word);
3391 			break;
3392 		}
3393 
3394 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
3395 		{
3396 			/*
3397 			 * HTC maintains runtime pm count for H2T messages that
3398 			 * have a response msg from FW. This count ensures that
3399 			 * in the case FW does not sent out the response or host
3400 			 * did not process this indication runtime_put happens
3401 			 * properly in the cleanup path.
3402 			 */
3403 			if (htc_dec_return_runtime_cnt(soc->htc_soc) >= 0)
3404 				htc_pm_runtime_put(soc->htc_soc);
3405 			else
3406 				soc->stats.htt_ver_req_put_skip++;
3407 			soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
3408 			soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
3409 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
3410 				"target uses HTT version %d.%d; host uses %d.%d",
3411 				soc->tgt_ver.major, soc->tgt_ver.minor,
3412 				HTT_CURRENT_VERSION_MAJOR,
3413 				HTT_CURRENT_VERSION_MINOR);
3414 			if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
3415 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3416 					QDF_TRACE_LEVEL_WARN,
3417 					"*** Incompatible host/target HTT versions!");
3418 			}
3419 			/* abort if the target is incompatible with the host */
3420 			qdf_assert(soc->tgt_ver.major ==
3421 				HTT_CURRENT_VERSION_MAJOR);
3422 			if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
3423 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3424 					QDF_TRACE_LEVEL_INFO_LOW,
3425 					"*** Warning: host/target HTT versions"
3426 					" are different, though compatible!");
3427 			}
3428 			break;
3429 		}
3430 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
3431 		{
3432 			uint16_t peer_id;
3433 			uint8_t tid;
3434 			uint16_t win_sz;
3435 
3436 			/*
3437 			 * Update REO Queue Desc with new values
3438 			 */
3439 			peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
3440 			tid = HTT_RX_ADDBA_TID_GET(*msg_word);
3441 			win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
3442 
3443 			/*
3444 			 * Window size needs to be incremented by 1
3445 			 * since fw needs to represent a value of 256
3446 			 * using just 8 bits
3447 			 */
3448 			dp_htt_rx_addba_handler(soc->dp_soc, peer_id,
3449 						tid, win_sz + 1);
3450 			break;
3451 		}
3452 	case HTT_T2H_MSG_TYPE_RX_ADDBA_EXTN:
3453 		{
3454 			uint16_t peer_id;
3455 			uint8_t tid;
3456 			uint16_t win_sz;
3457 
3458 			peer_id = HTT_RX_ADDBA_EXTN_PEER_ID_GET(*msg_word);
3459 			tid = HTT_RX_ADDBA_EXTN_TID_GET(*msg_word);
3460 
3461 			msg_word++;
3462 			win_sz = HTT_RX_ADDBA_EXTN_WIN_SIZE_GET(*msg_word);
3463 
3464 			dp_htt_rx_addba_handler(soc->dp_soc, peer_id,
3465 						tid, win_sz);
3466 			break;
3467 		}
3468 	case HTT_T2H_PPDU_ID_FMT_IND:
3469 		{
3470 			dp_htt_ppdu_id_fmt_handler(soc->dp_soc, msg_word);
3471 			break;
3472 		}
3473 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
3474 		{
3475 			dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg);
3476 			break;
3477 		}
3478 	case HTT_T2H_MSG_TYPE_PEER_MAP_V2:
3479 		{
3480 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3481 			u_int8_t *peer_mac_addr;
3482 			u_int16_t peer_id;
3483 			u_int16_t hw_peer_id;
3484 			u_int8_t vdev_id;
3485 			bool is_wds;
3486 			u_int16_t ast_hash;
3487 			struct dp_ast_flow_override_info ast_flow_info;
3488 
3489 			qdf_mem_set(&ast_flow_info, 0,
3490 					    sizeof(struct dp_ast_flow_override_info));
3491 
3492 			peer_id = HTT_RX_PEER_MAP_V2_SW_PEER_ID_GET(*msg_word);
3493 			hw_peer_id =
3494 			HTT_RX_PEER_MAP_V2_HW_PEER_ID_GET(*(msg_word + 2));
3495 			vdev_id = HTT_RX_PEER_MAP_V2_VDEV_ID_GET(*msg_word);
3496 			peer_mac_addr =
3497 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3498 						   &mac_addr_deswizzle_buf[0]);
3499 			is_wds =
3500 			HTT_RX_PEER_MAP_V2_NEXT_HOP_GET(*(msg_word + 3));
3501 			ast_hash =
3502 			HTT_RX_PEER_MAP_V2_AST_HASH_VALUE_GET(*(msg_word + 3));
3503 			/*
3504 			 * Update 4 ast_index per peer, ast valid mask
3505 			 * and TID flow valid mask.
3506 			 * AST valid mask is 3 bit field corresponds to
3507 			 * ast_index[3:1]. ast_index 0 is always valid.
3508 			 */
3509 			ast_flow_info.ast_valid_mask =
3510 			HTT_RX_PEER_MAP_V2_AST_VALID_MASK_GET(*(msg_word + 3));
3511 			ast_flow_info.ast_idx[0] = hw_peer_id;
3512 			ast_flow_info.ast_flow_mask[0] =
3513 			HTT_RX_PEER_MAP_V2_AST_0_FLOW_MASK_GET(*(msg_word + 4));
3514 			ast_flow_info.ast_idx[1] =
3515 			HTT_RX_PEER_MAP_V2_AST_INDEX_1_GET(*(msg_word + 4));
3516 			ast_flow_info.ast_flow_mask[1] =
3517 			HTT_RX_PEER_MAP_V2_AST_1_FLOW_MASK_GET(*(msg_word + 4));
3518 			ast_flow_info.ast_idx[2] =
3519 			HTT_RX_PEER_MAP_V2_AST_INDEX_2_GET(*(msg_word + 5));
3520 			ast_flow_info.ast_flow_mask[2] =
3521 			HTT_RX_PEER_MAP_V2_AST_2_FLOW_MASK_GET(*(msg_word + 4));
3522 			ast_flow_info.ast_idx[3] =
3523 			HTT_RX_PEER_MAP_V2_AST_INDEX_3_GET(*(msg_word + 6));
3524 			ast_flow_info.ast_flow_mask[3] =
3525 			HTT_RX_PEER_MAP_V2_AST_3_FLOW_MASK_GET(*(msg_word + 4));
3526 			/*
3527 			 * TID valid mask is applicable only
3528 			 * for HI and LOW priority flows.
3529 			 * tid_valid_mas is 8 bit field corresponds
3530 			 * to TID[7:0]
3531 			 */
3532 			ast_flow_info.tid_valid_low_pri_mask =
3533 			HTT_RX_PEER_MAP_V2_TID_VALID_LOW_PRI_GET(*(msg_word + 5));
3534 			ast_flow_info.tid_valid_hi_pri_mask =
3535 			HTT_RX_PEER_MAP_V2_TID_VALID_HI_PRI_GET(*(msg_word + 5));
3536 
3537 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3538 				  QDF_TRACE_LEVEL_DEBUG,
3539 				  "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3540 				  peer_id, vdev_id);
3541 
3542 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3543 				  QDF_TRACE_LEVEL_INFO,
3544 				  "ast_idx[0] %d ast_idx[1] %d ast_idx[2] %d ast_idx[3] %d n",
3545 				  ast_flow_info.ast_idx[0],
3546 				  ast_flow_info.ast_idx[1],
3547 				  ast_flow_info.ast_idx[2],
3548 				  ast_flow_info.ast_idx[3]);
3549 
3550 			dp_rx_peer_map_handler(soc->dp_soc, peer_id,
3551 					       hw_peer_id, vdev_id,
3552 					       peer_mac_addr, ast_hash,
3553 					       is_wds);
3554 
3555 			/*
3556 			 * Update ast indexes for flow override support
3557 			 * Applicable only for non wds peers
3558 			 */
3559 			if (!soc->dp_soc->ast_offload_support)
3560 				dp_peer_ast_index_flow_queue_map_create(
3561 						soc->dp_soc, is_wds,
3562 						peer_id, peer_mac_addr,
3563 						&ast_flow_info);
3564 
3565 			break;
3566 		}
3567 	case HTT_T2H_MSG_TYPE_PEER_UNMAP_V2:
3568 		{
3569 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3570 			u_int8_t *mac_addr;
3571 			u_int16_t peer_id;
3572 			u_int8_t vdev_id;
3573 			u_int8_t is_wds;
3574 			u_int32_t free_wds_count;
3575 
3576 			peer_id =
3577 			HTT_RX_PEER_UNMAP_V2_SW_PEER_ID_GET(*msg_word);
3578 			vdev_id = HTT_RX_PEER_UNMAP_V2_VDEV_ID_GET(*msg_word);
3579 			mac_addr =
3580 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3581 						   &mac_addr_deswizzle_buf[0]);
3582 			is_wds =
3583 			HTT_RX_PEER_UNMAP_V2_NEXT_HOP_GET(*(msg_word + 2));
3584 			free_wds_count =
3585 			HTT_RX_PEER_UNMAP_V2_PEER_WDS_FREE_COUNT_GET(*(msg_word + 4));
3586 
3587 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3588 				  QDF_TRACE_LEVEL_INFO,
3589 				  "HTT_T2H_MSG_TYPE_PEER_UNMAP msg for peer id %d vdev id %d n",
3590 				  peer_id, vdev_id);
3591 
3592 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
3593 						 vdev_id, mac_addr,
3594 						 is_wds, free_wds_count);
3595 			break;
3596 		}
3597 	case HTT_T2H_MSG_TYPE_RX_DELBA:
3598 		{
3599 			uint16_t peer_id;
3600 			uint8_t tid;
3601 			uint8_t win_sz;
3602 			QDF_STATUS status;
3603 
3604 			peer_id = HTT_RX_DELBA_PEER_ID_GET(*msg_word);
3605 			tid = HTT_RX_DELBA_TID_GET(*msg_word);
3606 			win_sz = HTT_RX_DELBA_WIN_SIZE_GET(*msg_word);
3607 
3608 			status = dp_rx_delba_ind_handler(
3609 				soc->dp_soc,
3610 				peer_id, tid, win_sz);
3611 
3612 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3613 				  QDF_TRACE_LEVEL_INFO,
3614 				  FL("DELBA PeerID %d BAW %d TID %d stat %d"),
3615 				  peer_id, win_sz, tid, status);
3616 			break;
3617 		}
3618 	case HTT_T2H_MSG_TYPE_RX_DELBA_EXTN:
3619 		{
3620 			uint16_t peer_id;
3621 			uint8_t tid;
3622 			uint16_t win_sz;
3623 			QDF_STATUS status;
3624 
3625 			peer_id = HTT_RX_DELBA_EXTN_PEER_ID_GET(*msg_word);
3626 			tid = HTT_RX_DELBA_EXTN_TID_GET(*msg_word);
3627 
3628 			msg_word++;
3629 			win_sz = HTT_RX_DELBA_EXTN_WIN_SIZE_GET(*msg_word);
3630 
3631 			status = dp_rx_delba_ind_handler(soc->dp_soc,
3632 							 peer_id, tid,
3633 							 win_sz);
3634 
3635 			dp_info("DELBA PeerID %d BAW %d TID %d stat %d",
3636 				peer_id, win_sz, tid, status);
3637 			break;
3638 		}
3639 	case HTT_T2H_MSG_TYPE_FSE_CMEM_BASE_SEND:
3640 		{
3641 			uint16_t num_entries;
3642 			uint32_t cmem_ba_lo;
3643 			uint32_t cmem_ba_hi;
3644 
3645 			num_entries = HTT_CMEM_BASE_SEND_NUM_ENTRIES_GET(*msg_word);
3646 			cmem_ba_lo = *(msg_word + 1);
3647 			cmem_ba_hi = *(msg_word + 2);
3648 
3649 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
3650 				  FL("CMEM FSE num_entries %u CMEM BA LO %x HI %x"),
3651 				  num_entries, cmem_ba_lo, cmem_ba_hi);
3652 
3653 			dp_rx_fst_update_cmem_params(soc->dp_soc, num_entries,
3654 						     cmem_ba_lo, cmem_ba_hi);
3655 			break;
3656 		}
3657 	case HTT_T2H_MSG_TYPE_TX_OFFLOAD_DELIVER_IND:
3658 		{
3659 			dp_offload_ind_handler(soc, msg_word);
3660 			break;
3661 		}
3662 	case HTT_T2H_MSG_TYPE_PEER_MAP_V3:
3663 	{
3664 		u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3665 		u_int8_t *peer_mac_addr;
3666 		u_int16_t peer_id;
3667 		u_int16_t hw_peer_id;
3668 		u_int8_t vdev_id;
3669 		uint8_t is_wds;
3670 		u_int16_t ast_hash = 0;
3671 
3672 		peer_id = HTT_RX_PEER_MAP_V3_SW_PEER_ID_GET(*msg_word);
3673 		vdev_id = HTT_RX_PEER_MAP_V3_VDEV_ID_GET(*msg_word);
3674 		peer_mac_addr =
3675 		htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3676 					   &mac_addr_deswizzle_buf[0]);
3677 		hw_peer_id = HTT_RX_PEER_MAP_V3_HW_PEER_ID_GET(*(msg_word + 3));
3678 		ast_hash = HTT_RX_PEER_MAP_V3_CACHE_SET_NUM_GET(*(msg_word + 3));
3679 		is_wds = HTT_RX_PEER_MAP_V3_NEXT_HOP_GET(*(msg_word + 4));
3680 
3681 		dp_htt_info("HTT_T2H_MSG_TYPE_PEER_MAP_V3 msg for peer id %d vdev id %d n",
3682 			    peer_id, vdev_id);
3683 
3684 		dp_rx_peer_map_handler(soc->dp_soc, peer_id,
3685 				       hw_peer_id, vdev_id,
3686 				       peer_mac_addr, ast_hash,
3687 				       is_wds);
3688 
3689 		break;
3690 	}
3691 	case HTT_T2H_MSG_TYPE_MLO_RX_PEER_MAP:
3692 	{
3693 		dp_htt_mlo_peer_map_handler(soc, msg_word);
3694 		break;
3695 	}
3696 	case HTT_T2H_MSG_TYPE_MLO_RX_PEER_UNMAP:
3697 	{
3698 		dp_htt_mlo_peer_unmap_handler(soc, msg_word);
3699 		break;
3700 	}
3701 	case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND:
3702 	{
3703 		dp_rx_mlo_timestamp_ind_handler(soc->dp_soc, msg_word);
3704 		break;
3705 	}
3706 	case HTT_T2H_MSG_TYPE_VDEVS_TXRX_STATS_PERIODIC_IND:
3707 	{
3708 		dp_vdev_txrx_hw_stats_handler(soc, msg_word);
3709 		break;
3710 	}
3711 	case HTT_T2H_SAWF_DEF_QUEUES_MAP_REPORT_CONF:
3712 	{
3713 		dp_sawf_def_queues_update_map_report_conf(soc, msg_word,
3714 							  htt_t2h_msg);
3715 		break;
3716 	}
3717 	case HTT_T2H_SAWF_MSDUQ_INFO_IND:
3718 	{
3719 		dp_sawf_msduq_map(soc, msg_word, htt_t2h_msg);
3720 		break;
3721 	}
3722 	case HTT_T2H_MSG_TYPE_STREAMING_STATS_IND:
3723 	{
3724 		dp_sawf_mpdu_stats_handler(soc, htt_t2h_msg);
3725 		break;
3726 	}
3727 
3728 	default:
3729 		break;
3730 	};
3731 
3732 	/* Free the indication buffer */
3733 	if (free_buf)
3734 		qdf_nbuf_free(htt_t2h_msg);
3735 }
3736 
3737 /*
3738  * dp_htt_h2t_full() - Send full handler (called from HTC)
3739  * @context:	Opaque context (HTT SOC handle)
3740  * @pkt:	HTC packet
3741  *
3742  * Return: enum htc_send_full_action
3743  */
3744 static enum htc_send_full_action
3745 dp_htt_h2t_full(void *context, HTC_PACKET *pkt)
3746 {
3747 	return HTC_SEND_FULL_KEEP;
3748 }
3749 
3750 /*
3751  * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages
3752  * @context:	Opaque context (HTT SOC handle)
3753  * @nbuf:	nbuf containing T2H message
3754  * @pipe_id:	HIF pipe ID
3755  *
3756  * Return: QDF_STATUS
3757  *
3758  * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which
3759  * will be used for packet log and other high-priority HTT messages. Proper
3760  * HTC connection to be added later once required FW changes are available
3761  */
3762 static QDF_STATUS
3763 dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id)
3764 {
3765 	QDF_STATUS rc = QDF_STATUS_SUCCESS;
3766 	HTC_PACKET htc_pkt;
3767 
3768 	qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE);
3769 	qdf_mem_zero(&htc_pkt, sizeof(htc_pkt));
3770 	htc_pkt.Status = QDF_STATUS_SUCCESS;
3771 	htc_pkt.pPktContext = (void *)nbuf;
3772 	dp_htt_t2h_msg_handler(context, &htc_pkt);
3773 
3774 	return rc;
3775 }
3776 
3777 /*
3778  * htt_htc_soc_attach() - Register SOC level HTT instance with HTC
3779  * @htt_soc:	HTT SOC handle
3780  *
3781  * Return: QDF_STATUS
3782  */
3783 static QDF_STATUS
3784 htt_htc_soc_attach(struct htt_soc *soc)
3785 {
3786 	struct htc_service_connect_req connect;
3787 	struct htc_service_connect_resp response;
3788 	QDF_STATUS status;
3789 	struct dp_soc *dpsoc = soc->dp_soc;
3790 
3791 	qdf_mem_zero(&connect, sizeof(connect));
3792 	qdf_mem_zero(&response, sizeof(response));
3793 
3794 	connect.pMetaData = NULL;
3795 	connect.MetaDataLength = 0;
3796 	connect.EpCallbacks.pContext = soc;
3797 	connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
3798 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
3799 	connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler;
3800 
3801 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
3802 	connect.EpCallbacks.EpRecvRefill = NULL;
3803 
3804 	/* N/A, fill is done by HIF */
3805 	connect.EpCallbacks.RecvRefillWaterMark = 1;
3806 
3807 	connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
3808 	/*
3809 	 * Specify how deep to let a queue get before htc_send_pkt will
3810 	 * call the EpSendFull function due to excessive send queue depth.
3811 	 */
3812 	connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
3813 
3814 	/* disable flow control for HTT data message service */
3815 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
3816 
3817 	/* connect to control service */
3818 	connect.service_id = HTT_DATA_MSG_SVC;
3819 
3820 	status = htc_connect_service(soc->htc_soc, &connect, &response);
3821 
3822 	if (status != QDF_STATUS_SUCCESS)
3823 		return status;
3824 
3825 	soc->htc_endpoint = response.Endpoint;
3826 
3827 	hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint);
3828 
3829 	htt_interface_logging_init(&soc->htt_logger_handle, soc->ctrl_psoc);
3830 	dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc,
3831 		dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE);
3832 
3833 	return QDF_STATUS_SUCCESS; /* success */
3834 }
3835 
3836 /*
3837  * htt_soc_initialize() - SOC level HTT initialization
3838  * @htt_soc: Opaque htt SOC handle
3839  * @ctrl_psoc: Opaque ctrl SOC handle
3840  * @htc_soc: SOC level HTC handle
3841  * @hal_soc: Opaque HAL SOC handle
3842  * @osdev: QDF device
3843  *
3844  * Return: HTT handle on success; NULL on failure
3845  */
3846 void *
3847 htt_soc_initialize(struct htt_soc *htt_soc,
3848 		   struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
3849 		   HTC_HANDLE htc_soc,
3850 		   hal_soc_handle_t hal_soc_hdl, qdf_device_t osdev)
3851 {
3852 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
3853 
3854 	soc->osdev = osdev;
3855 	soc->ctrl_psoc = ctrl_psoc;
3856 	soc->htc_soc = htc_soc;
3857 	soc->hal_soc = hal_soc_hdl;
3858 
3859 	if (htt_htc_soc_attach(soc))
3860 		goto fail2;
3861 
3862 	return soc;
3863 
3864 fail2:
3865 	return NULL;
3866 }
3867 
3868 void htt_soc_htc_dealloc(struct htt_soc *htt_handle)
3869 {
3870 	htt_interface_logging_deinit(htt_handle->htt_logger_handle);
3871 	htt_htc_misc_pkt_pool_free(htt_handle);
3872 	htt_htc_pkt_pool_free(htt_handle);
3873 }
3874 
3875 /*
3876  * htt_soc_htc_prealloc() - HTC memory prealloc
3877  * @htt_soc: SOC level HTT handle
3878  *
3879  * Return: QDF_STATUS_SUCCESS on Success or
3880  * QDF_STATUS_E_NOMEM on allocation failure
3881  */
3882 QDF_STATUS htt_soc_htc_prealloc(struct htt_soc *soc)
3883 {
3884 	int i;
3885 
3886 	soc->htt_htc_pkt_freelist = NULL;
3887 	/* pre-allocate some HTC_PACKET objects */
3888 	for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
3889 		struct dp_htt_htc_pkt_union *pkt;
3890 		pkt = qdf_mem_malloc(sizeof(*pkt));
3891 		if (!pkt)
3892 			return QDF_STATUS_E_NOMEM;
3893 
3894 		htt_htc_pkt_free(soc, &pkt->u.pkt);
3895 	}
3896 	return QDF_STATUS_SUCCESS;
3897 }
3898 
3899 /*
3900  * htt_soc_detach() - Free SOC level HTT handle
3901  * @htt_hdl: HTT SOC handle
3902  */
3903 void htt_soc_detach(struct htt_soc *htt_hdl)
3904 {
3905 	int i;
3906 	struct htt_soc *htt_handle = (struct htt_soc *)htt_hdl;
3907 
3908 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3909 		qdf_mem_free(htt_handle->pdevid_tt[i].umac_path);
3910 		qdf_mem_free(htt_handle->pdevid_tt[i].lmac_path);
3911 	}
3912 
3913 	HTT_TX_MUTEX_DESTROY(&htt_handle->htt_tx_mutex);
3914 	qdf_mem_free(htt_handle);
3915 
3916 }
3917 
3918 /**
3919  * dp_h2t_ext_stats_msg_send(): function to construct HTT message to pass to FW
3920  * @pdev: DP PDEV handle
3921  * @stats_type_upload_mask: stats type requested by user
3922  * @config_param_0: extra configuration parameters
3923  * @config_param_1: extra configuration parameters
3924  * @config_param_2: extra configuration parameters
3925  * @config_param_3: extra configuration parameters
3926  * @mac_id: mac number
3927  *
3928  * return: QDF STATUS
3929  */
3930 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
3931 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
3932 		uint32_t config_param_1, uint32_t config_param_2,
3933 		uint32_t config_param_3, int cookie_val, int cookie_msb,
3934 		uint8_t mac_id)
3935 {
3936 	struct htt_soc *soc = pdev->soc->htt_handle;
3937 	struct dp_htt_htc_pkt *pkt;
3938 	qdf_nbuf_t msg;
3939 	uint32_t *msg_word;
3940 	uint8_t pdev_mask = 0;
3941 	uint8_t *htt_logger_bufp;
3942 	int mac_for_pdev;
3943 	int target_pdev_id;
3944 	QDF_STATUS status;
3945 
3946 	msg = qdf_nbuf_alloc(
3947 			soc->osdev,
3948 			HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ),
3949 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
3950 
3951 	if (!msg)
3952 		return QDF_STATUS_E_NOMEM;
3953 
3954 	/*TODO:Add support for SOC stats
3955 	 * Bit 0: SOC Stats
3956 	 * Bit 1: Pdev stats for pdev id 0
3957 	 * Bit 2: Pdev stats for pdev id 1
3958 	 * Bit 3: Pdev stats for pdev id 2
3959 	 */
3960 	mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
3961 	target_pdev_id =
3962 	dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev);
3963 
3964 	pdev_mask = 1 << target_pdev_id;
3965 
3966 	/*
3967 	 * Set the length of the message.
3968 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3969 	 * separately during the below call to qdf_nbuf_push_head.
3970 	 * The contribution from the HTC header is added separately inside HTC.
3971 	 */
3972 	if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) {
3973 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3974 				"Failed to expand head for HTT_EXT_STATS");
3975 		qdf_nbuf_free(msg);
3976 		return QDF_STATUS_E_FAILURE;
3977 	}
3978 
3979 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
3980 
3981 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3982 	htt_logger_bufp = (uint8_t *)msg_word;
3983 	*msg_word = 0;
3984 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ);
3985 	HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask);
3986 	HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask);
3987 
3988 	/* word 1 */
3989 	msg_word++;
3990 	*msg_word = 0;
3991 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0);
3992 
3993 	/* word 2 */
3994 	msg_word++;
3995 	*msg_word = 0;
3996 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1);
3997 
3998 	/* word 3 */
3999 	msg_word++;
4000 	*msg_word = 0;
4001 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2);
4002 
4003 	/* word 4 */
4004 	msg_word++;
4005 	*msg_word = 0;
4006 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3);
4007 
4008 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0);
4009 
4010 	/* word 5 */
4011 	msg_word++;
4012 
4013 	/* word 6 */
4014 	msg_word++;
4015 	*msg_word = 0;
4016 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val);
4017 
4018 	/* word 7 */
4019 	msg_word++;
4020 	*msg_word = 0;
4021 	/* Currently Using last 2 bits for pdev_id
4022 	 * For future reference, reserving 3 bits in cookie_msb for pdev_id
4023 	 */
4024 	cookie_msb = (cookie_msb | pdev->pdev_id);
4025 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb);
4026 
4027 	pkt = htt_htc_pkt_alloc(soc);
4028 	if (!pkt) {
4029 		qdf_nbuf_free(msg);
4030 		return QDF_STATUS_E_NOMEM;
4031 	}
4032 
4033 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4034 
4035 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4036 			dp_htt_h2t_send_complete_free_netbuf,
4037 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
4038 			soc->htc_endpoint,
4039 			/* tag for FW response msg not guaranteed */
4040 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
4041 
4042 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4043 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_EXT_STATS_REQ,
4044 				     htt_logger_bufp);
4045 
4046 	if (status != QDF_STATUS_SUCCESS) {
4047 		qdf_nbuf_free(msg);
4048 		htt_htc_pkt_free(soc, pkt);
4049 	}
4050 
4051 	return status;
4052 }
4053 
4054 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
4055 #define HTT_VDEV_TXRX_STATS_RESET_BITMASK_L32_MASK 0xFFFFFFFF
4056 #define HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_MASK 0xFFFFFFFF00000000
4057 #define HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_SHIFT 32
4058 
4059 QDF_STATUS dp_h2t_hw_vdev_stats_config_send(struct dp_soc *dpsoc,
4060 					    uint8_t pdev_id, bool enable,
4061 					    bool reset, uint64_t reset_bitmask)
4062 {
4063 	struct htt_soc *soc = dpsoc->htt_handle;
4064 	struct dp_htt_htc_pkt *pkt;
4065 	qdf_nbuf_t msg;
4066 	uint32_t *msg_word;
4067 	uint8_t *htt_logger_bufp;
4068 	QDF_STATUS status;
4069 	int duration;
4070 	uint32_t bitmask;
4071 	int target_pdev_id;
4072 
4073 	msg = qdf_nbuf_alloc(
4074 			soc->osdev,
4075 			HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_vdevs_txrx_stats_cfg)),
4076 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
4077 
4078 	if (!msg) {
4079 		dp_htt_err("%pK: Fail to allocate "
4080 		"HTT_H2T_HW_VDEV_TXRX_STATS_CFG_MSG_SZ msg buffer", dpsoc);
4081 		return QDF_STATUS_E_NOMEM;
4082 	}
4083 
4084 	if (pdev_id != INVALID_PDEV_ID)
4085 		target_pdev_id = DP_SW2HW_MACID(pdev_id);
4086 	else
4087 		target_pdev_id = 0;
4088 
4089 	duration =
4090 	wlan_cfg_get_vdev_stats_hw_offload_timer(dpsoc->wlan_cfg_ctx);
4091 
4092 	/*
4093 	 * Set the length of the message.
4094 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4095 	 * separately during the below call to qdf_nbuf_push_head.
4096 	 * The contribution from the HTC header is added separately inside HTC.
4097 	 */
4098 	if (!qdf_nbuf_put_tail(msg,
4099 			       sizeof(struct htt_h2t_vdevs_txrx_stats_cfg))) {
4100 		dp_htt_err("%pK: Failed to expand head for HTT_HW_VDEV_STATS"
4101 			   , dpsoc);
4102 		qdf_nbuf_free(msg);
4103 		return QDF_STATUS_E_FAILURE;
4104 	}
4105 
4106 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
4107 
4108 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4109 	htt_logger_bufp = (uint8_t *)msg_word;
4110 	*msg_word = 0;
4111 
4112 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VDEVS_TXRX_STATS_CFG);
4113 	HTT_RX_VDEVS_TXRX_STATS_PDEV_ID_SET(*msg_word, target_pdev_id);
4114 
4115 	HTT_RX_VDEVS_TXRX_STATS_ENABLE_SET(*msg_word, enable);
4116 
4117 	HTT_RX_VDEVS_TXRX_STATS_PERIODIC_INTERVAL_SET(*msg_word,
4118 						      (duration >> 3));
4119 
4120 	HTT_RX_VDEVS_TXRX_STATS_RESET_STATS_BITS_SET(*msg_word, reset);
4121 
4122 	msg_word++;
4123 	*msg_word = 0;
4124 	bitmask = (reset_bitmask & HTT_VDEV_TXRX_STATS_RESET_BITMASK_L32_MASK);
4125 	*msg_word = bitmask;
4126 
4127 	msg_word++;
4128 	*msg_word = 0;
4129 	bitmask =
4130 		((reset_bitmask & HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_MASK) >>
4131 		 HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_SHIFT);
4132 	*msg_word = bitmask;
4133 
4134 	pkt = htt_htc_pkt_alloc(soc);
4135 	if (!pkt) {
4136 		dp_htt_err("%pK: Fail to allocate dp_htt_htc_pkt buffer",
4137 			   dpsoc);
4138 		qdf_assert(0);
4139 		qdf_nbuf_free(msg);
4140 		return QDF_STATUS_E_NOMEM;
4141 	}
4142 
4143 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4144 
4145 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4146 			       dp_htt_h2t_send_complete_free_netbuf,
4147 			       qdf_nbuf_data(msg), qdf_nbuf_len(msg),
4148 			       soc->htc_endpoint,
4149 			       /* tag for no FW response msg */
4150 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
4151 
4152 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4153 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
4154 				     HTT_H2T_MSG_TYPE_VDEVS_TXRX_STATS_CFG,
4155 				     htt_logger_bufp);
4156 
4157 	if (status != QDF_STATUS_SUCCESS) {
4158 		qdf_nbuf_free(msg);
4159 		htt_htc_pkt_free(soc, pkt);
4160 	}
4161 
4162 	return status;
4163 }
4164 #else
4165 QDF_STATUS dp_h2t_hw_vdev_stats_config_send(struct dp_soc *dpsoc,
4166 					    uint8_t pdev_id, bool enable,
4167 					    bool reset, uint64_t reset_bitmask)
4168 {
4169 	return QDF_STATUS_SUCCESS;
4170 }
4171 #endif
4172 
4173 /**
4174  * dp_h2t_3tuple_config_send(): function to construct 3 tuple configuration
4175  * HTT message to pass to FW
4176  * @pdev: DP PDEV handle
4177  * @tuple_mask: tuple configuration to report 3 tuple hash value in either
4178  * toeplitz_2_or_4 or flow_id_toeplitz in MSDU START TLV.
4179  *
4180  * tuple_mask[1:0]:
4181  *   00 - Do not report 3 tuple hash value
4182  *   10 - Report 3 tuple hash value in toeplitz_2_or_4
4183  *   01 - Report 3 tuple hash value in flow_id_toeplitz
4184  *   11 - Report 3 tuple hash value in both toeplitz_2_or_4 & flow_id_toeplitz
4185  *
4186  * return: QDF STATUS
4187  */
4188 QDF_STATUS dp_h2t_3tuple_config_send(struct dp_pdev *pdev,
4189 				     uint32_t tuple_mask, uint8_t mac_id)
4190 {
4191 	struct htt_soc *soc = pdev->soc->htt_handle;
4192 	struct dp_htt_htc_pkt *pkt;
4193 	qdf_nbuf_t msg;
4194 	uint32_t *msg_word;
4195 	uint8_t *htt_logger_bufp;
4196 	int mac_for_pdev;
4197 	int target_pdev_id;
4198 
4199 	msg = qdf_nbuf_alloc(
4200 			soc->osdev,
4201 			HTT_MSG_BUF_SIZE(HTT_3_TUPLE_HASH_CFG_REQ_BYTES),
4202 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4203 
4204 	if (!msg)
4205 		return QDF_STATUS_E_NOMEM;
4206 
4207 	mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
4208 	target_pdev_id =
4209 	dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev);
4210 
4211 	/*
4212 	 * Set the length of the message.
4213 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4214 	 * separately during the below call to qdf_nbuf_push_head.
4215 	 * The contribution from the HTC header is added separately inside HTC.
4216 	 */
4217 	if (!qdf_nbuf_put_tail(msg, HTT_3_TUPLE_HASH_CFG_REQ_BYTES)) {
4218 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4219 			  "Failed to expand head for HTT_3TUPLE_CONFIG");
4220 		qdf_nbuf_free(msg);
4221 		return QDF_STATUS_E_FAILURE;
4222 	}
4223 
4224 	dp_htt_info("%pK: config_param_sent 0x%x for target_pdev %d\n -------------",
4225 		    pdev->soc, tuple_mask, target_pdev_id);
4226 
4227 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
4228 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4229 	htt_logger_bufp = (uint8_t *)msg_word;
4230 
4231 	*msg_word = 0;
4232 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG);
4233 	HTT_RX_3_TUPLE_HASH_PDEV_ID_SET(*msg_word, target_pdev_id);
4234 
4235 	msg_word++;
4236 	*msg_word = 0;
4237 	HTT_H2T_FLOW_ID_TOEPLITZ_FIELD_CONFIG_SET(*msg_word, tuple_mask);
4238 	HTT_H2T_TOEPLITZ_2_OR_4_FIELD_CONFIG_SET(*msg_word, tuple_mask);
4239 
4240 	pkt = htt_htc_pkt_alloc(soc);
4241 	if (!pkt) {
4242 		qdf_nbuf_free(msg);
4243 		return QDF_STATUS_E_NOMEM;
4244 	}
4245 
4246 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4247 
4248 	SET_HTC_PACKET_INFO_TX(
4249 			&pkt->htc_pkt,
4250 			dp_htt_h2t_send_complete_free_netbuf,
4251 			qdf_nbuf_data(msg),
4252 			qdf_nbuf_len(msg),
4253 			soc->htc_endpoint,
4254 			/* tag for no FW response msg */
4255 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
4256 
4257 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4258 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG,
4259 			    htt_logger_bufp);
4260 
4261 	return QDF_STATUS_SUCCESS;
4262 }
4263 
4264 /* This macro will revert once proper HTT header will define for
4265  * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file
4266  * */
4267 #if defined(WDI_EVENT_ENABLE)
4268 /**
4269  * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW
4270  * @pdev: DP PDEV handle
4271  * @stats_type_upload_mask: stats type requested by user
4272  * @mac_id: Mac id number
4273  *
4274  * return: QDF STATUS
4275  */
4276 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
4277 		uint32_t stats_type_upload_mask, uint8_t mac_id)
4278 {
4279 	struct htt_soc *soc = pdev->soc->htt_handle;
4280 	struct dp_htt_htc_pkt *pkt;
4281 	qdf_nbuf_t msg;
4282 	uint32_t *msg_word;
4283 	uint8_t pdev_mask;
4284 	QDF_STATUS status;
4285 
4286 	msg = qdf_nbuf_alloc(
4287 			soc->osdev,
4288 			HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ),
4289 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
4290 
4291 	if (!msg) {
4292 		dp_htt_err("%pK: Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer"
4293 			   , pdev->soc);
4294 		qdf_assert(0);
4295 		return QDF_STATUS_E_NOMEM;
4296 	}
4297 
4298 	/*TODO:Add support for SOC stats
4299 	 * Bit 0: SOC Stats
4300 	 * Bit 1: Pdev stats for pdev id 0
4301 	 * Bit 2: Pdev stats for pdev id 1
4302 	 * Bit 3: Pdev stats for pdev id 2
4303 	 */
4304 	pdev_mask = 1 << dp_get_target_pdev_id_for_host_pdev_id(pdev->soc,
4305 								mac_id);
4306 
4307 	/*
4308 	 * Set the length of the message.
4309 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4310 	 * separately during the below call to qdf_nbuf_push_head.
4311 	 * The contribution from the HTC header is added separately inside HTC.
4312 	 */
4313 	if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) {
4314 		dp_htt_err("%pK: Failed to expand head for HTT_CFG_STATS"
4315 			   , pdev->soc);
4316 		qdf_nbuf_free(msg);
4317 		return QDF_STATUS_E_FAILURE;
4318 	}
4319 
4320 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
4321 
4322 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4323 	*msg_word = 0;
4324 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
4325 	HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask);
4326 	HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word,
4327 			stats_type_upload_mask);
4328 
4329 	pkt = htt_htc_pkt_alloc(soc);
4330 	if (!pkt) {
4331 		dp_htt_err("%pK: Fail to allocate dp_htt_htc_pkt buffer", pdev->soc);
4332 		qdf_assert(0);
4333 		qdf_nbuf_free(msg);
4334 		return QDF_STATUS_E_NOMEM;
4335 	}
4336 
4337 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4338 
4339 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4340 			dp_htt_h2t_send_complete_free_netbuf,
4341 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
4342 			soc->htc_endpoint,
4343 			/* tag for no FW response msg */
4344 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
4345 
4346 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4347 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG,
4348 				     (uint8_t *)msg_word);
4349 
4350 	if (status != QDF_STATUS_SUCCESS) {
4351 		qdf_nbuf_free(msg);
4352 		htt_htc_pkt_free(soc, pkt);
4353 	}
4354 
4355 	return status;
4356 }
4357 
4358 qdf_export_symbol(dp_h2t_cfg_stats_msg_send);
4359 #endif
4360 
4361 void
4362 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
4363 			     uint32_t *tag_buf)
4364 {
4365 	struct dp_peer *peer = NULL;
4366 	switch (tag_type) {
4367 	case HTT_STATS_PEER_DETAILS_TAG:
4368 	{
4369 		htt_peer_details_tlv *dp_stats_buf =
4370 			(htt_peer_details_tlv *)tag_buf;
4371 
4372 		pdev->fw_stats_peer_id = dp_stats_buf->sw_peer_id;
4373 	}
4374 	break;
4375 	case HTT_STATS_PEER_STATS_CMN_TAG:
4376 	{
4377 		htt_peer_stats_cmn_tlv *dp_stats_buf =
4378 			(htt_peer_stats_cmn_tlv *)tag_buf;
4379 
4380 		peer = dp_peer_get_ref_by_id(pdev->soc, pdev->fw_stats_peer_id,
4381 					     DP_MOD_ID_HTT);
4382 
4383 		if (peer && !peer->bss_peer) {
4384 			peer->stats.tx.inactive_time =
4385 				dp_stats_buf->inactive_time;
4386 			qdf_event_set(&pdev->fw_peer_stats_event);
4387 		}
4388 		if (peer)
4389 			dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
4390 	}
4391 	break;
4392 	default:
4393 		qdf_err("Invalid tag_type");
4394 	}
4395 }
4396 
4397 /**
4398  * dp_htt_rx_flow_fst_setup(): Send HTT Rx FST setup message to FW
4399  * @pdev: DP pdev handle
4400  * @fse_setup_info: FST setup parameters
4401  *
4402  * Return: Success when HTT message is sent, error on failure
4403  */
4404 QDF_STATUS
4405 dp_htt_rx_flow_fst_setup(struct dp_pdev *pdev,
4406 			 struct dp_htt_rx_flow_fst_setup *fse_setup_info)
4407 {
4408 	struct htt_soc *soc = pdev->soc->htt_handle;
4409 	struct dp_htt_htc_pkt *pkt;
4410 	qdf_nbuf_t msg;
4411 	u_int32_t *msg_word;
4412 	struct htt_h2t_msg_rx_fse_setup_t *fse_setup;
4413 	uint8_t *htt_logger_bufp;
4414 	u_int32_t *key;
4415 	QDF_STATUS status;
4416 
4417 	msg = qdf_nbuf_alloc(
4418 		soc->osdev,
4419 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_setup_t)),
4420 		/* reserve room for the HTC header */
4421 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4422 
4423 	if (!msg)
4424 		return QDF_STATUS_E_NOMEM;
4425 
4426 	/*
4427 	 * Set the length of the message.
4428 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4429 	 * separately during the below call to qdf_nbuf_push_head.
4430 	 * The contribution from the HTC header is added separately inside HTC.
4431 	 */
4432 	if (!qdf_nbuf_put_tail(msg,
4433 			       sizeof(struct htt_h2t_msg_rx_fse_setup_t))) {
4434 		qdf_err("Failed to expand head for HTT RX_FSE_SETUP msg");
4435 		return QDF_STATUS_E_FAILURE;
4436 	}
4437 
4438 	/* fill in the message contents */
4439 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4440 
4441 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_setup_t));
4442 	/* rewind beyond alignment pad to get to the HTC header reserved area */
4443 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4444 	htt_logger_bufp = (uint8_t *)msg_word;
4445 
4446 	*msg_word = 0;
4447 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG);
4448 
4449 	fse_setup = (struct htt_h2t_msg_rx_fse_setup_t *)msg_word;
4450 
4451 	HTT_RX_FSE_SETUP_PDEV_ID_SET(*msg_word, fse_setup_info->pdev_id);
4452 
4453 	msg_word++;
4454 	HTT_RX_FSE_SETUP_NUM_REC_SET(*msg_word, fse_setup_info->max_entries);
4455 	HTT_RX_FSE_SETUP_MAX_SEARCH_SET(*msg_word, fse_setup_info->max_search);
4456 	HTT_RX_FSE_SETUP_IP_DA_SA_PREFIX_SET(*msg_word,
4457 					     fse_setup_info->ip_da_sa_prefix);
4458 
4459 	msg_word++;
4460 	HTT_RX_FSE_SETUP_BASE_ADDR_LO_SET(*msg_word,
4461 					  fse_setup_info->base_addr_lo);
4462 	msg_word++;
4463 	HTT_RX_FSE_SETUP_BASE_ADDR_HI_SET(*msg_word,
4464 					  fse_setup_info->base_addr_hi);
4465 
4466 	key = (u_int32_t *)fse_setup_info->hash_key;
4467 	fse_setup->toeplitz31_0 = *key++;
4468 	fse_setup->toeplitz63_32 = *key++;
4469 	fse_setup->toeplitz95_64 = *key++;
4470 	fse_setup->toeplitz127_96 = *key++;
4471 	fse_setup->toeplitz159_128 = *key++;
4472 	fse_setup->toeplitz191_160 = *key++;
4473 	fse_setup->toeplitz223_192 = *key++;
4474 	fse_setup->toeplitz255_224 = *key++;
4475 	fse_setup->toeplitz287_256 = *key++;
4476 	fse_setup->toeplitz314_288 = *key;
4477 
4478 	msg_word++;
4479 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz31_0);
4480 	msg_word++;
4481 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz63_32);
4482 	msg_word++;
4483 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz95_64);
4484 	msg_word++;
4485 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz127_96);
4486 	msg_word++;
4487 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz159_128);
4488 	msg_word++;
4489 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz191_160);
4490 	msg_word++;
4491 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz223_192);
4492 	msg_word++;
4493 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz255_224);
4494 	msg_word++;
4495 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz287_256);
4496 	msg_word++;
4497 	HTT_RX_FSE_SETUP_HASH_314_288_SET(*msg_word,
4498 					  fse_setup->toeplitz314_288);
4499 
4500 	pkt = htt_htc_pkt_alloc(soc);
4501 	if (!pkt) {
4502 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4503 		qdf_assert(0);
4504 		qdf_nbuf_free(msg);
4505 		return QDF_STATUS_E_RESOURCES; /* failure */
4506 	}
4507 
4508 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4509 
4510 	SET_HTC_PACKET_INFO_TX(
4511 		&pkt->htc_pkt,
4512 		dp_htt_h2t_send_complete_free_netbuf,
4513 		qdf_nbuf_data(msg),
4514 		qdf_nbuf_len(msg),
4515 		soc->htc_endpoint,
4516 		/* tag for no FW response msg */
4517 		HTC_TX_PACKET_TAG_RUNTIME_PUT);
4518 
4519 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4520 
4521 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
4522 				     HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG,
4523 				     htt_logger_bufp);
4524 
4525 	if (status == QDF_STATUS_SUCCESS) {
4526 		dp_info("HTT_H2T RX_FSE_SETUP sent to FW for pdev = %u",
4527 			fse_setup_info->pdev_id);
4528 		QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_DEBUG,
4529 				   (void *)fse_setup_info->hash_key,
4530 				   fse_setup_info->hash_key_len);
4531 	} else {
4532 		qdf_nbuf_free(msg);
4533 		htt_htc_pkt_free(soc, pkt);
4534 	}
4535 
4536 	return status;
4537 }
4538 
4539 /**
4540  * dp_htt_rx_flow_fse_operation(): Send HTT Flow Search Entry msg to
4541  * add/del a flow in HW
4542  * @pdev: DP pdev handle
4543  * @fse_op_info: Flow entry parameters
4544  *
4545  * Return: Success when HTT message is sent, error on failure
4546  */
4547 QDF_STATUS
4548 dp_htt_rx_flow_fse_operation(struct dp_pdev *pdev,
4549 			     struct dp_htt_rx_flow_fst_operation *fse_op_info)
4550 {
4551 	struct htt_soc *soc = pdev->soc->htt_handle;
4552 	struct dp_htt_htc_pkt *pkt;
4553 	qdf_nbuf_t msg;
4554 	u_int32_t *msg_word;
4555 	struct htt_h2t_msg_rx_fse_operation_t *fse_operation;
4556 	uint8_t *htt_logger_bufp;
4557 	QDF_STATUS status;
4558 
4559 	msg = qdf_nbuf_alloc(
4560 		soc->osdev,
4561 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_operation_t)),
4562 		/* reserve room for the HTC header */
4563 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4564 	if (!msg)
4565 		return QDF_STATUS_E_NOMEM;
4566 
4567 	/*
4568 	 * Set the length of the message.
4569 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4570 	 * separately during the below call to qdf_nbuf_push_head.
4571 	 * The contribution from the HTC header is added separately inside HTC.
4572 	 */
4573 	if (!qdf_nbuf_put_tail(msg,
4574 			       sizeof(struct htt_h2t_msg_rx_fse_operation_t))) {
4575 		qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
4576 		qdf_nbuf_free(msg);
4577 		return QDF_STATUS_E_FAILURE;
4578 	}
4579 
4580 	/* fill in the message contents */
4581 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4582 
4583 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_operation_t));
4584 	/* rewind beyond alignment pad to get to the HTC header reserved area */
4585 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4586 	htt_logger_bufp = (uint8_t *)msg_word;
4587 
4588 	*msg_word = 0;
4589 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG);
4590 
4591 	fse_operation = (struct htt_h2t_msg_rx_fse_operation_t *)msg_word;
4592 
4593 	HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, fse_op_info->pdev_id);
4594 	msg_word++;
4595 	HTT_RX_FSE_IPSEC_VALID_SET(*msg_word, false);
4596 	if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_ENTRY) {
4597 		HTT_RX_FSE_OPERATION_SET(*msg_word,
4598 					 HTT_RX_FSE_CACHE_INVALIDATE_ENTRY);
4599 		msg_word++;
4600 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4601 		*msg_word,
4602 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_31_0));
4603 		msg_word++;
4604 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4605 		*msg_word,
4606 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_63_32));
4607 		msg_word++;
4608 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4609 		*msg_word,
4610 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_95_64));
4611 		msg_word++;
4612 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4613 		*msg_word,
4614 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_127_96));
4615 		msg_word++;
4616 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4617 		*msg_word,
4618 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_31_0));
4619 		msg_word++;
4620 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4621 		*msg_word,
4622 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_63_32));
4623 		msg_word++;
4624 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4625 		*msg_word,
4626 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_95_64));
4627 		msg_word++;
4628 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4629 		*msg_word,
4630 		qdf_htonl(
4631 		fse_op_info->rx_flow->flow_tuple_info.dest_ip_127_96));
4632 		msg_word++;
4633 		HTT_RX_FSE_SOURCEPORT_SET(
4634 			*msg_word,
4635 			fse_op_info->rx_flow->flow_tuple_info.src_port);
4636 		HTT_RX_FSE_DESTPORT_SET(
4637 			*msg_word,
4638 			fse_op_info->rx_flow->flow_tuple_info.dest_port);
4639 		msg_word++;
4640 		HTT_RX_FSE_L4_PROTO_SET(
4641 			*msg_word,
4642 			fse_op_info->rx_flow->flow_tuple_info.l4_protocol);
4643 	} else if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_FULL) {
4644 		HTT_RX_FSE_OPERATION_SET(*msg_word,
4645 					 HTT_RX_FSE_CACHE_INVALIDATE_FULL);
4646 	} else if (fse_op_info->op_code == DP_HTT_FST_DISABLE) {
4647 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_DISABLE);
4648 	} else if (fse_op_info->op_code == DP_HTT_FST_ENABLE) {
4649 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_ENABLE);
4650 	}
4651 
4652 	pkt = htt_htc_pkt_alloc(soc);
4653 	if (!pkt) {
4654 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4655 		qdf_assert(0);
4656 		qdf_nbuf_free(msg);
4657 		return QDF_STATUS_E_RESOURCES; /* failure */
4658 	}
4659 
4660 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4661 
4662 	SET_HTC_PACKET_INFO_TX(
4663 		&pkt->htc_pkt,
4664 		dp_htt_h2t_send_complete_free_netbuf,
4665 		qdf_nbuf_data(msg),
4666 		qdf_nbuf_len(msg),
4667 		soc->htc_endpoint,
4668 		/* tag for no FW response msg */
4669 		HTC_TX_PACKET_TAG_RUNTIME_PUT);
4670 
4671 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4672 
4673 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
4674 				     HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG,
4675 				     htt_logger_bufp);
4676 
4677 	if (status == QDF_STATUS_SUCCESS) {
4678 		dp_info("HTT_H2T RX_FSE_OPERATION_CFG sent to FW for pdev = %u",
4679 			fse_op_info->pdev_id);
4680 	} else {
4681 		qdf_nbuf_free(msg);
4682 		htt_htc_pkt_free(soc, pkt);
4683 	}
4684 
4685 	return status;
4686 }
4687 
4688 /**
4689  * dp_htt_rx_fisa_config(): Send HTT msg to configure FISA
4690  * @pdev: DP pdev handle
4691  * @fse_op_info: Flow entry parameters
4692  *
4693  * Return: Success when HTT message is sent, error on failure
4694  */
4695 QDF_STATUS
4696 dp_htt_rx_fisa_config(struct dp_pdev *pdev,
4697 		      struct dp_htt_rx_fisa_cfg *fisa_config)
4698 {
4699 	struct htt_soc *soc = pdev->soc->htt_handle;
4700 	struct dp_htt_htc_pkt *pkt;
4701 	qdf_nbuf_t msg;
4702 	u_int32_t *msg_word;
4703 	struct htt_h2t_msg_type_fisa_config_t *htt_fisa_config;
4704 	uint8_t *htt_logger_bufp;
4705 	uint32_t len;
4706 	QDF_STATUS status;
4707 
4708 	len = HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_type_fisa_config_t));
4709 
4710 	msg = qdf_nbuf_alloc(soc->osdev,
4711 			     len,
4712 			     /* reserve room for the HTC header */
4713 			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
4714 			     4,
4715 			     TRUE);
4716 	if (!msg)
4717 		return QDF_STATUS_E_NOMEM;
4718 
4719 	/*
4720 	 * Set the length of the message.
4721 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4722 	 * separately during the below call to qdf_nbuf_push_head.
4723 	 * The contribution from the HTC header is added separately inside HTC.
4724 	 */
4725 	if (!qdf_nbuf_put_tail(msg,
4726 			       sizeof(struct htt_h2t_msg_type_fisa_config_t))) {
4727 		qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
4728 		qdf_nbuf_free(msg);
4729 		return QDF_STATUS_E_FAILURE;
4730 	}
4731 
4732 	/* fill in the message contents */
4733 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4734 
4735 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_type_fisa_config_t));
4736 	/* rewind beyond alignment pad to get to the HTC header reserved area */
4737 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4738 	htt_logger_bufp = (uint8_t *)msg_word;
4739 
4740 	*msg_word = 0;
4741 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FISA_CFG);
4742 
4743 	htt_fisa_config = (struct htt_h2t_msg_type_fisa_config_t *)msg_word;
4744 
4745 	HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, htt_fisa_config->pdev_id);
4746 
4747 	msg_word++;
4748 	HTT_RX_FISA_CONFIG_FISA_V2_ENABLE_SET(*msg_word, 1);
4749 	HTT_RX_FISA_CONFIG_FISA_V2_AGGR_LIMIT_SET(*msg_word, 0xf);
4750 
4751 	msg_word++;
4752 	htt_fisa_config->fisa_timeout_threshold = fisa_config->fisa_timeout;
4753 
4754 	pkt = htt_htc_pkt_alloc(soc);
4755 	if (!pkt) {
4756 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4757 		qdf_assert(0);
4758 		qdf_nbuf_free(msg);
4759 		return QDF_STATUS_E_RESOURCES; /* failure */
4760 	}
4761 
4762 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4763 
4764 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4765 			       dp_htt_h2t_send_complete_free_netbuf,
4766 			       qdf_nbuf_data(msg),
4767 			       qdf_nbuf_len(msg),
4768 			       soc->htc_endpoint,
4769 			       /* tag for no FW response msg */
4770 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
4771 
4772 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4773 
4774 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FISA_CFG,
4775 				     htt_logger_bufp);
4776 
4777 	if (status == QDF_STATUS_SUCCESS) {
4778 		dp_info("HTT_H2T_MSG_TYPE_RX_FISA_CFG sent to FW for pdev = %u",
4779 			fisa_config->pdev_id);
4780 	} else {
4781 		qdf_nbuf_free(msg);
4782 		htt_htc_pkt_free(soc, pkt);
4783 	}
4784 
4785 	return status;
4786 }
4787 
4788 #ifdef WLAN_SUPPORT_PPEDS
4789 /**
4790  * dp_htt_rxdma_rxole_ppe_cfg_set() - Send RxOLE and RxDMA PPE config
4791  * @dp_osc: Data path SoC handle
4792  * @cfg: RxDMA and RxOLE PPE config
4793  *
4794  * Return: Success when HTT message is sent, error on failure
4795  */
4796 QDF_STATUS
4797 dp_htt_rxdma_rxole_ppe_cfg_set(struct dp_soc *soc,
4798 			       struct dp_htt_rxdma_rxole_ppe_config *cfg)
4799 {
4800 	struct htt_soc *htt_handle = soc->htt_handle;
4801 	uint32_t len;
4802 	qdf_nbuf_t msg;
4803 	u_int32_t *msg_word;
4804 	QDF_STATUS status;
4805 	uint8_t *htt_logger_bufp;
4806 	struct dp_htt_htc_pkt *pkt;
4807 
4808 	len = HTT_MSG_BUF_SIZE(
4809 	      sizeof(struct htt_h2t_msg_type_rxdma_rxole_ppe_cfg_t));
4810 
4811 	msg = qdf_nbuf_alloc(soc->osdev,
4812 			     len,
4813 			     /* reserve room for the HTC header */
4814 			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
4815 			     4,
4816 			     TRUE);
4817 	if (!msg)
4818 		return QDF_STATUS_E_NOMEM;
4819 
4820 	/*
4821 	 * Set the length of the message.
4822 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4823 	 * separately during the below call to qdf_nbuf_push_head.
4824 	 * The contribution from the HTC header is added separately inside HTC.
4825 	 */
4826 	if (!qdf_nbuf_put_tail(
4827 		msg, sizeof(struct htt_h2t_msg_type_rxdma_rxole_ppe_cfg_t))) {
4828 		qdf_err("Failed to expand head for HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG msg");
4829 		qdf_nbuf_free(msg);
4830 		return QDF_STATUS_E_FAILURE;
4831 	}
4832 
4833 	/* fill in the message contents */
4834 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4835 
4836 	memset(msg_word, 0,
4837 	       sizeof(struct htt_h2t_msg_type_rxdma_rxole_ppe_cfg_t));
4838 
4839 	/* Rewind beyond alignment pad to get to the HTC header reserved area */
4840 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4841 	htt_logger_bufp = (uint8_t *)msg_word;
4842 
4843 	*msg_word = 0;
4844 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG);
4845 	HTT_PPE_CFG_OVERRIDE_SET(*msg_word, cfg->override);
4846 	HTT_PPE_CFG_REO_DEST_IND_SET(
4847 			*msg_word, cfg->reo_destination_indication);
4848 	HTT_PPE_CFG_MULTI_BUF_MSDU_OVERRIDE_EN_SET(
4849 			*msg_word, cfg->multi_buffer_msdu_override_en);
4850 	HTT_PPE_CFG_INTRA_BSS_OVERRIDE_EN_SET(
4851 			*msg_word, cfg->intra_bss_override);
4852 	HTT_PPE_CFG_DECAP_RAW_OVERRIDE_EN_SET(
4853 			*msg_word, cfg->decap_raw_override);
4854 	HTT_PPE_CFG_DECAP_NWIFI_OVERRIDE_EN_SET(
4855 			*msg_word, cfg->decap_nwifi_override);
4856 	HTT_PPE_CFG_IP_FRAG_OVERRIDE_EN_SET(
4857 			*msg_word, cfg->ip_frag_override);
4858 
4859 	pkt = htt_htc_pkt_alloc(htt_handle);
4860 	if (!pkt) {
4861 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4862 		qdf_assert(0);
4863 		qdf_nbuf_free(msg);
4864 		return QDF_STATUS_E_RESOURCES; /* failure */
4865 	}
4866 
4867 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4868 
4869 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4870 			       dp_htt_h2t_send_complete_free_netbuf,
4871 			       qdf_nbuf_data(msg),
4872 			       qdf_nbuf_len(msg),
4873 			       htt_handle->htc_endpoint,
4874 			       /* tag for no FW response msg */
4875 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
4876 
4877 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4878 
4879 	status = DP_HTT_SEND_HTC_PKT(htt_handle, pkt,
4880 				     HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG,
4881 				     htt_logger_bufp);
4882 
4883 	if (status != QDF_STATUS_SUCCESS) {
4884 		qdf_nbuf_free(msg);
4885 		htt_htc_pkt_free(htt_handle, pkt);
4886 		return status;
4887 	}
4888 
4889 	dp_info("HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG sent");
4890 	return status;
4891 }
4892 #endif /* WLAN_SUPPORT_PPEDS */
4893 
4894 /**
4895  * dp_bk_pressure_stats_handler(): worker function to print back pressure
4896  *				   stats
4897  *
4898  * @context : argument to work function
4899  */
4900 static void dp_bk_pressure_stats_handler(void *context)
4901 {
4902 	struct dp_pdev *pdev = (struct dp_pdev *)context;
4903 	struct dp_soc_srngs_state *soc_srngs_state = NULL;
4904 	const char *ring_name;
4905 	int i;
4906 	struct dp_srng_ring_state *ring_state;
4907 	bool empty_flag;
4908 
4909 	qdf_spin_lock_bh(&pdev->bkp_stats.list_lock);
4910 
4911 	/* Extract only first entry for printing in one work event */
4912 	if (pdev->bkp_stats.queue_depth &&
4913 	    !TAILQ_EMPTY(&pdev->bkp_stats.list)) {
4914 		soc_srngs_state = TAILQ_FIRST(&pdev->bkp_stats.list);
4915 		TAILQ_REMOVE(&pdev->bkp_stats.list, soc_srngs_state,
4916 			     list_elem);
4917 		pdev->bkp_stats.queue_depth--;
4918 	}
4919 
4920 	empty_flag = TAILQ_EMPTY(&pdev->bkp_stats.list);
4921 	qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock);
4922 
4923 	if (soc_srngs_state) {
4924 		DP_PRINT_STATS("### BKP stats for seq_num %u START ###",
4925 			       soc_srngs_state->seq_num);
4926 		for (i = 0; i < soc_srngs_state->max_ring_id; i++) {
4927 			ring_state = &soc_srngs_state->ring_state[i];
4928 			ring_name = dp_srng_get_str_from_hal_ring_type
4929 						(ring_state->ring_type);
4930 			DP_PRINT_STATS("%s: SW:Head pointer = %d Tail Pointer = %d\n",
4931 				       ring_name,
4932 				       ring_state->sw_head,
4933 				       ring_state->sw_tail);
4934 
4935 			DP_PRINT_STATS("%s: HW:Head pointer = %d Tail Pointer = %d\n",
4936 				       ring_name,
4937 				       ring_state->hw_head,
4938 				       ring_state->hw_tail);
4939 		}
4940 
4941 		DP_PRINT_STATS("### BKP stats for seq_num %u COMPLETE ###",
4942 			       soc_srngs_state->seq_num);
4943 		qdf_mem_free(soc_srngs_state);
4944 	}
4945 	dp_print_napi_stats(pdev->soc);
4946 
4947 	/* Schedule work again if queue is not empty */
4948 	if (!empty_flag)
4949 		qdf_queue_work(0, pdev->bkp_stats.work_queue,
4950 			       &pdev->bkp_stats.work);
4951 }
4952 
4953 /*
4954  * dp_pdev_bkp_stats_detach() - detach resources for back pressure stats
4955  *				processing
4956  * @pdev: Datapath PDEV handle
4957  *
4958  */
4959 void dp_pdev_bkp_stats_detach(struct dp_pdev *pdev)
4960 {
4961 	struct dp_soc_srngs_state *ring_state, *ring_state_next;
4962 
4963 	if (!pdev->bkp_stats.work_queue)
4964 		return;
4965 
4966 	qdf_flush_workqueue(0, pdev->bkp_stats.work_queue);
4967 	qdf_destroy_workqueue(0, pdev->bkp_stats.work_queue);
4968 	qdf_flush_work(&pdev->bkp_stats.work);
4969 	qdf_disable_work(&pdev->bkp_stats.work);
4970 	qdf_spin_lock_bh(&pdev->bkp_stats.list_lock);
4971 	TAILQ_FOREACH_SAFE(ring_state, &pdev->bkp_stats.list,
4972 			   list_elem, ring_state_next) {
4973 		TAILQ_REMOVE(&pdev->bkp_stats.list, ring_state,
4974 			     list_elem);
4975 		qdf_mem_free(ring_state);
4976 	}
4977 	qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock);
4978 	qdf_spinlock_destroy(&pdev->bkp_stats.list_lock);
4979 }
4980 
4981 /*
4982  * dp_pdev_bkp_stats_attach() - attach resources for back pressure stats
4983  *				processing
4984  * @pdev: Datapath PDEV handle
4985  *
4986  * Return: QDF_STATUS_SUCCESS: Success
4987  *         QDF_STATUS_E_NOMEM: Error
4988  */
4989 QDF_STATUS dp_pdev_bkp_stats_attach(struct dp_pdev *pdev)
4990 {
4991 	TAILQ_INIT(&pdev->bkp_stats.list);
4992 	pdev->bkp_stats.seq_num = 0;
4993 	pdev->bkp_stats.queue_depth = 0;
4994 
4995 	qdf_create_work(0, &pdev->bkp_stats.work,
4996 			dp_bk_pressure_stats_handler, pdev);
4997 
4998 	pdev->bkp_stats.work_queue =
4999 		qdf_alloc_unbound_workqueue("dp_bkp_work_queue");
5000 	if (!pdev->bkp_stats.work_queue)
5001 		goto fail;
5002 
5003 	qdf_spinlock_create(&pdev->bkp_stats.list_lock);
5004 	return QDF_STATUS_SUCCESS;
5005 
5006 fail:
5007 	dp_htt_alert("BKP stats attach failed");
5008 	qdf_flush_work(&pdev->bkp_stats.work);
5009 	qdf_disable_work(&pdev->bkp_stats.work);
5010 	return QDF_STATUS_E_FAILURE;
5011 }
5012 
5013 #ifdef DP_UMAC_HW_RESET_SUPPORT
5014 QDF_STATUS dp_htt_umac_reset_send_setup_cmd(
5015 		struct dp_soc *soc,
5016 		const struct dp_htt_umac_reset_setup_cmd_params *setup_params)
5017 {
5018 	struct htt_soc *htt_handle = soc->htt_handle;
5019 	uint32_t len;
5020 	qdf_nbuf_t msg;
5021 	u_int32_t *msg_word;
5022 	QDF_STATUS status;
5023 	uint8_t *htt_logger_bufp;
5024 	struct dp_htt_htc_pkt *pkt;
5025 
5026 	len = HTT_MSG_BUF_SIZE(
5027 		HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_BYTES);
5028 
5029 	msg = qdf_nbuf_alloc(soc->osdev,
5030 			     len,
5031 			     /* reserve room for the HTC header */
5032 			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
5033 			     4,
5034 			     TRUE);
5035 	if (!msg)
5036 		return QDF_STATUS_E_NOMEM;
5037 
5038 	/*
5039 	 * Set the length of the message.
5040 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
5041 	 * separately during the below call to qdf_nbuf_push_head.
5042 	 * The contribution from the HTC header is added separately inside HTC.
5043 	 */
5044 	if (!qdf_nbuf_put_tail(
5045 		msg, HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_BYTES)) {
5046 		dp_htt_err("Failed to expand head");
5047 		qdf_nbuf_free(msg);
5048 		return QDF_STATUS_E_FAILURE;
5049 	}
5050 
5051 	/* fill in the message contents */
5052 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
5053 
5054 	/* Rewind beyond alignment pad to get to the HTC header reserved area */
5055 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
5056 	htt_logger_bufp = (uint8_t *)msg_word;
5057 
5058 	qdf_mem_zero(msg_word,
5059 		     HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_BYTES);
5060 
5061 	HTT_H2T_MSG_TYPE_SET(
5062 		*msg_word,
5063 		HTT_H2T_MSG_TYPE_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP);
5064 	HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_T2H_MSG_METHOD_SET(
5065 		*msg_word, htt_umac_hang_recovery_msg_t2h_msi_and_h2t_polling);
5066 	HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_H2T_MSG_METHOD_SET(
5067 		*msg_word, htt_umac_hang_recovery_msg_t2h_msi_and_h2t_polling);
5068 
5069 	msg_word++;
5070 	*msg_word = setup_params->msi_data;
5071 
5072 	msg_word++;
5073 	*msg_word = sizeof(htt_umac_hang_recovery_msg_shmem_t);
5074 
5075 	msg_word++;
5076 	*msg_word = setup_params->shmem_addr_low;
5077 
5078 	msg_word++;
5079 	*msg_word = setup_params->shmem_addr_high;
5080 
5081 	pkt = htt_htc_pkt_alloc(htt_handle);
5082 	if (!pkt) {
5083 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
5084 		qdf_assert(0);
5085 		qdf_nbuf_free(msg);
5086 		return QDF_STATUS_E_NOMEM;
5087 	}
5088 
5089 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5090 
5091 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
5092 			       dp_htt_h2t_send_complete_free_netbuf,
5093 			       qdf_nbuf_data(msg),
5094 			       qdf_nbuf_len(msg),
5095 			       htt_handle->htc_endpoint,
5096 			       /* tag for no FW response msg */
5097 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
5098 
5099 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5100 
5101 	status = DP_HTT_SEND_HTC_PKT(
5102 			htt_handle, pkt,
5103 			HTT_H2T_MSG_TYPE_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP,
5104 			htt_logger_bufp);
5105 
5106 	if (QDF_IS_STATUS_ERROR(status)) {
5107 		qdf_nbuf_free(msg);
5108 		htt_htc_pkt_free(htt_handle, pkt);
5109 		return status;
5110 	}
5111 
5112 	dp_info("HTT_H2T_MSG_TYPE_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP sent");
5113 	return status;
5114 }
5115 #endif
5116