xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c (revision 8cfe6b10058a04cafb17eed051f2ddf11bee8931)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <htt.h>
21 #include <hal_hw_headers.h>
22 #include <hal_api.h>
23 #include "dp_peer.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_rx.h"
27 #include "htt_stats.h"
28 #include "htt_ppdu_stats.h"
29 #include "dp_htt.h"
30 #ifdef WIFI_MONITOR_SUPPORT
31 #include <dp_mon.h>
32 #endif
33 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
34 #include "cdp_txrx_cmn_struct.h"
35 
36 #ifdef FEATURE_PERPKT_INFO
37 #include "dp_ratetable.h"
38 #endif
39 #include <qdf_module.h>
40 #ifdef CONFIG_SAWF_DEF_QUEUES
41 #include <dp_sawf_htt.h>
42 #endif
43 
44 #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
45 
46 #define HTT_HTC_PKT_POOL_INIT_SIZE 64
47 
48 #define HTT_MSG_BUF_SIZE(msg_bytes) \
49 	((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
50 
51 #define HTT_PID_BIT_MASK 0x3
52 
53 #define DP_EXT_MSG_LENGTH 2048
54 #define HTT_HEADER_LEN 16
55 #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16
56 
57 #define HTT_SHIFT_UPPER_TIMESTAMP 32
58 #define HTT_MASK_UPPER_TIMESTAMP 0xFFFFFFFF00000000
59 #define HTT_BKP_STATS_MAX_QUEUE_DEPTH 16
60 
61 struct dp_htt_htc_pkt *
62 htt_htc_pkt_alloc(struct htt_soc *soc)
63 {
64 	struct dp_htt_htc_pkt_union *pkt = NULL;
65 
66 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
67 	if (soc->htt_htc_pkt_freelist) {
68 		pkt = soc->htt_htc_pkt_freelist;
69 		soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next;
70 	}
71 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
72 
73 	if (!pkt)
74 		pkt = qdf_mem_malloc(sizeof(*pkt));
75 
76 	if (!pkt)
77 		return NULL;
78 
79 	htc_packet_set_magic_cookie(&(pkt->u.pkt.htc_pkt), 0);
80 
81 	return &pkt->u.pkt; /* not actually a dereference */
82 }
83 
84 qdf_export_symbol(htt_htc_pkt_alloc);
85 
86 void
87 htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
88 {
89 	struct dp_htt_htc_pkt_union *u_pkt =
90 		(struct dp_htt_htc_pkt_union *)pkt;
91 
92 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
93 	htc_packet_set_magic_cookie(&(u_pkt->u.pkt.htc_pkt), 0);
94 	u_pkt->u.next = soc->htt_htc_pkt_freelist;
95 	soc->htt_htc_pkt_freelist = u_pkt;
96 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
97 }
98 
99 qdf_export_symbol(htt_htc_pkt_free);
100 
101 /*
102  * htt_htc_pkt_pool_free() - Free HTC packet pool
103  * @htt_soc:	HTT SOC handle
104  */
105 void
106 htt_htc_pkt_pool_free(struct htt_soc *soc)
107 {
108 	struct dp_htt_htc_pkt_union *pkt, *next;
109 	pkt = soc->htt_htc_pkt_freelist;
110 	while (pkt) {
111 		next = pkt->u.next;
112 		qdf_mem_free(pkt);
113 		pkt = next;
114 	}
115 	soc->htt_htc_pkt_freelist = NULL;
116 }
117 
118 
119 #ifndef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST
120 
121 /*
122  * htt_htc_misc_pkt_list_trim() - trim misc list
123  * @htt_soc: HTT SOC handle
124  * @level: max no. of pkts in list
125  */
126 static void
127 htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level)
128 {
129 	struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL;
130 	int i = 0;
131 	qdf_nbuf_t netbuf;
132 
133 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
134 	pkt = soc->htt_htc_pkt_misclist;
135 	while (pkt) {
136 		next = pkt->u.next;
137 		/* trim the out grown list*/
138 		if (++i > level) {
139 			netbuf =
140 				(qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
141 			qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
142 			qdf_nbuf_free(netbuf);
143 			qdf_mem_free(pkt);
144 			pkt = NULL;
145 			if (prev)
146 				prev->u.next = NULL;
147 		}
148 		prev = pkt;
149 		pkt = next;
150 	}
151 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
152 }
153 
154 /*
155  * htt_htc_misc_pkt_list_add() - Add pkt to misc list
156  * @htt_soc:	HTT SOC handle
157  * @dp_htt_htc_pkt: pkt to be added to list
158  */
159 void
160 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
161 {
162 	struct dp_htt_htc_pkt_union *u_pkt =
163 				(struct dp_htt_htc_pkt_union *)pkt;
164 	int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc,
165 							pkt->htc_pkt.Endpoint)
166 				+ DP_HTT_HTC_PKT_MISCLIST_SIZE;
167 
168 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
169 	if (soc->htt_htc_pkt_misclist) {
170 		u_pkt->u.next = soc->htt_htc_pkt_misclist;
171 		soc->htt_htc_pkt_misclist = u_pkt;
172 	} else {
173 		soc->htt_htc_pkt_misclist = u_pkt;
174 	}
175 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
176 
177 	/* only ce pipe size + tx_queue_depth could possibly be in use
178 	 * free older packets in the misclist
179 	 */
180 	htt_htc_misc_pkt_list_trim(soc, misclist_trim_level);
181 }
182 
183 qdf_export_symbol(htt_htc_misc_pkt_list_add);
184 #endif  /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
185 
186 /*
187  * htt_htc_misc_pkt_pool_free() - free pkts in misc list
188  * @htt_soc:	HTT SOC handle
189  */
190 static void
191 htt_htc_misc_pkt_pool_free(struct htt_soc *soc)
192 {
193 	struct dp_htt_htc_pkt_union *pkt, *next;
194 	qdf_nbuf_t netbuf;
195 
196 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
197 	pkt = soc->htt_htc_pkt_misclist;
198 
199 	while (pkt) {
200 		next = pkt->u.next;
201 		if (htc_packet_get_magic_cookie(&(pkt->u.pkt.htc_pkt)) !=
202 		    HTC_PACKET_MAGIC_COOKIE) {
203 			pkt = next;
204 			soc->stats.skip_count++;
205 			continue;
206 		}
207 		netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
208 		qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
209 
210 		soc->stats.htc_pkt_free++;
211 		dp_htt_info("%pK: Pkt free count %d",
212 			    soc->dp_soc, soc->stats.htc_pkt_free);
213 
214 		qdf_nbuf_free(netbuf);
215 		qdf_mem_free(pkt);
216 		pkt = next;
217 	}
218 	soc->htt_htc_pkt_misclist = NULL;
219 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
220 	dp_info("HTC Packets, fail count = %d, skip count = %d",
221 		soc->stats.fail_count, soc->stats.skip_count);
222 }
223 
224 /*
225  * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ
226  * @tgt_mac_addr:	Target MAC
227  * @buffer:		Output buffer
228  */
229 static u_int8_t *
230 htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer)
231 {
232 #ifdef BIG_ENDIAN_HOST
233 	/*
234 	 * The host endianness is opposite of the target endianness.
235 	 * To make u_int32_t elements come out correctly, the target->host
236 	 * upload has swizzled the bytes in each u_int32_t element of the
237 	 * message.
238 	 * For byte-array message fields like the MAC address, this
239 	 * upload swizzling puts the bytes in the wrong order, and needs
240 	 * to be undone.
241 	 */
242 	buffer[0] = tgt_mac_addr[3];
243 	buffer[1] = tgt_mac_addr[2];
244 	buffer[2] = tgt_mac_addr[1];
245 	buffer[3] = tgt_mac_addr[0];
246 	buffer[4] = tgt_mac_addr[7];
247 	buffer[5] = tgt_mac_addr[6];
248 	return buffer;
249 #else
250 	/*
251 	 * The host endianness matches the target endianness -
252 	 * we can use the mac addr directly from the message buffer.
253 	 */
254 	return tgt_mac_addr;
255 #endif
256 }
257 
258 /*
259  * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
260  * @soc:	SOC handle
261  * @status:	Completion status
262  * @netbuf:	HTT buffer
263  */
264 static void
265 dp_htt_h2t_send_complete_free_netbuf(
266 	void *soc, A_STATUS status, qdf_nbuf_t netbuf)
267 {
268 	qdf_nbuf_free(netbuf);
269 }
270 
271 #ifdef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST
272 /*
273  * dp_htt_h2t_send_complete() - H2T completion handler
274  * @context:	Opaque context (HTT SOC handle)
275  * @htc_pkt:	HTC packet
276  */
277 static void
278 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
279 {
280 	struct htt_soc *soc =  (struct htt_soc *) context;
281 	struct dp_htt_htc_pkt *htt_pkt;
282 	qdf_nbuf_t netbuf;
283 
284 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
285 
286 	/* process (free or keep) the netbuf that held the message */
287 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
288 	/*
289 	 * adf sendcomplete is required for windows only
290 	 */
291 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
292 	/* free the htt_htc_pkt / HTC_PACKET object */
293 	qdf_nbuf_free(netbuf);
294 	htt_htc_pkt_free(soc, htt_pkt);
295 }
296 
297 #else /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
298 
299 /*
300  *  * dp_htt_h2t_send_complete() - H2T completion handler
301  *   * @context:    Opaque context (HTT SOC handle)
302  *    * @htc_pkt:    HTC packet
303  *     */
304 static void
305 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
306 {
307 	void (*send_complete_part2)(
308 	     void *soc, QDF_STATUS status, qdf_nbuf_t msdu);
309 	struct htt_soc *soc =  (struct htt_soc *) context;
310 	struct dp_htt_htc_pkt *htt_pkt;
311 	qdf_nbuf_t netbuf;
312 
313 	send_complete_part2 = htc_pkt->pPktContext;
314 
315 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
316 
317 	/* process (free or keep) the netbuf that held the message */
318 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
319 	/*
320 	 * adf sendcomplete is required for windows only
321 	*/
322 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
323 	if (send_complete_part2){
324 		send_complete_part2(
325 		    htt_pkt->soc_ctxt, htc_pkt->Status, netbuf);
326 	}
327 	/* free the htt_htc_pkt / HTC_PACKET object */
328 	htt_htc_pkt_free(soc, htt_pkt);
329 }
330 
331 #endif /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
332 
333 /*
334  * dp_htt_h2t_add_tcl_metadata_ver_v1() - Add tcl_metadata version V1
335  * @htt_soc:	HTT SOC handle
336  * @msg:	Pointer to nbuf
337  *
338  * Return: 0 on success; error code on failure
339  */
340 static int dp_htt_h2t_add_tcl_metadata_ver_v1(struct htt_soc *soc,
341 					      qdf_nbuf_t *msg)
342 {
343 	uint32_t *msg_word;
344 
345 	*msg = qdf_nbuf_alloc(
346 		soc->osdev,
347 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
348 		/* reserve room for the HTC header */
349 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
350 	if (!*msg)
351 		return QDF_STATUS_E_NOMEM;
352 
353 	/*
354 	 * Set the length of the message.
355 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
356 	 * separately during the below call to qdf_nbuf_push_head.
357 	 * The contribution from the HTC header is added separately inside HTC.
358 	 */
359 	if (!qdf_nbuf_put_tail(*msg, HTT_VER_REQ_BYTES)) {
360 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
361 			  "%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
362 			  __func__);
363 		return QDF_STATUS_E_FAILURE;
364 	}
365 
366 	/* fill in the message contents */
367 	msg_word = (u_int32_t *)qdf_nbuf_data(*msg);
368 
369 	/* rewind beyond alignment pad to get to the HTC header reserved area */
370 	qdf_nbuf_push_head(*msg, HTC_HDR_ALIGNMENT_PADDING);
371 
372 	*msg_word = 0;
373 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
374 
375 	return QDF_STATUS_SUCCESS;
376 }
377 
378 #ifdef QCA_DP_TX_FW_METADATA_V2
379 /*
380  * dp_htt_h2t_add_tcl_metadata_ver_v2() - Add tcl_metadata version V2
381  * @htt_soc:	HTT SOC handle
382  * @msg:	Pointer to nbuf
383  *
384  * Return: 0 on success; error code on failure
385  */
386 static int dp_htt_h2t_add_tcl_metadata_ver_v2(struct htt_soc *soc,
387 					      qdf_nbuf_t *msg)
388 {
389 	uint32_t *msg_word;
390 
391 	*msg = qdf_nbuf_alloc(
392 		soc->osdev,
393 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES + HTT_TCL_METADATA_VER_SZ),
394 		/* reserve room for the HTC header */
395 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
396 	if (!*msg)
397 		return QDF_STATUS_E_NOMEM;
398 
399 	/*
400 	 * Set the length of the message.
401 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
402 	 * separately during the below call to qdf_nbuf_push_head.
403 	 * The contribution from the HTC header is added separately inside HTC.
404 	 */
405 	if (!qdf_nbuf_put_tail(*msg,
406 			       HTT_VER_REQ_BYTES + HTT_TCL_METADATA_VER_SZ)) {
407 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
408 			  "%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
409 			  __func__);
410 		return QDF_STATUS_E_FAILURE;
411 	}
412 
413 	/* fill in the message contents */
414 	msg_word = (u_int32_t *)qdf_nbuf_data(*msg);
415 
416 	/* rewind beyond alignment pad to get to the HTC header reserved area */
417 	qdf_nbuf_push_head(*msg, HTC_HDR_ALIGNMENT_PADDING);
418 
419 	*msg_word = 0;
420 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
421 
422 	/* word 1 */
423 	msg_word++;
424 	*msg_word = 0;
425 	HTT_OPTION_TLV_TAG_SET(*msg_word, HTT_OPTION_TLV_TAG_TCL_METADATA_VER);
426 	HTT_OPTION_TLV_LENGTH_SET(*msg_word, HTT_TCL_METADATA_VER_SZ);
427 	HTT_OPTION_TLV_TCL_METADATA_VER_SET(*msg_word,
428 					    HTT_OPTION_TLV_TCL_METADATA_V2);
429 
430 	return QDF_STATUS_SUCCESS;
431 }
432 
433 /*
434  * dp_htt_h2t_add_tcl_metadata_ver() - Add tcl_metadata version
435  * @htt_soc:	HTT SOC handle
436  * @msg:	Pointer to nbuf
437  *
438  * Return: 0 on success; error code on failure
439  */
440 static int dp_htt_h2t_add_tcl_metadata_ver(struct htt_soc *soc, qdf_nbuf_t *msg)
441 {
442 	/* Use tcl_metadata_v1 when NSS offload is enabled */
443 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->dp_soc->wlan_cfg_ctx) ||
444 	    soc->dp_soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_FTM_MODE)
445 		return dp_htt_h2t_add_tcl_metadata_ver_v1(soc, msg);
446 	else
447 		return dp_htt_h2t_add_tcl_metadata_ver_v2(soc, msg);
448 }
449 #else
450 static int dp_htt_h2t_add_tcl_metadata_ver(struct htt_soc *soc, qdf_nbuf_t *msg)
451 {
452 	return dp_htt_h2t_add_tcl_metadata_ver_v1(soc, msg);
453 }
454 #endif
455 
456 /*
457  * htt_h2t_ver_req_msg() - Send HTT version request message to target
458  * @htt_soc:	HTT SOC handle
459  *
460  * Return: 0 on success; error code on failure
461  */
462 static int htt_h2t_ver_req_msg(struct htt_soc *soc)
463 {
464 	struct dp_htt_htc_pkt *pkt;
465 	qdf_nbuf_t msg = NULL;
466 	QDF_STATUS status;
467 
468 	status = dp_htt_h2t_add_tcl_metadata_ver(soc, &msg);
469 	if (status != QDF_STATUS_SUCCESS)
470 		return status;
471 
472 	pkt = htt_htc_pkt_alloc(soc);
473 	if (!pkt) {
474 		qdf_nbuf_free(msg);
475 		return QDF_STATUS_E_FAILURE;
476 	}
477 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
478 
479 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
480 		dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg),
481 		qdf_nbuf_len(msg), soc->htc_endpoint,
482 		HTC_TX_PACKET_TAG_RTPM_PUT_RC);
483 
484 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
485 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_VERSION_REQ,
486 				     NULL);
487 
488 	if (status != QDF_STATUS_SUCCESS) {
489 		qdf_nbuf_free(msg);
490 		htt_htc_pkt_free(soc, pkt);
491 	}
492 
493 	return status;
494 }
495 
496 /*
497  * htt_srng_setup() - Send SRNG setup message to target
498  * @htt_soc:	HTT SOC handle
499  * @mac_id:	MAC Id
500  * @hal_srng:	Opaque HAL SRNG pointer
501  * @hal_ring_type:	SRNG ring type
502  *
503  * Return: 0 on success; error code on failure
504  */
505 int htt_srng_setup(struct htt_soc *soc, int mac_id,
506 		   hal_ring_handle_t hal_ring_hdl,
507 		   int hal_ring_type)
508 {
509 	struct dp_htt_htc_pkt *pkt;
510 	qdf_nbuf_t htt_msg;
511 	uint32_t *msg_word;
512 	struct hal_srng_params srng_params;
513 	qdf_dma_addr_t hp_addr, tp_addr;
514 	uint32_t ring_entry_size =
515 		hal_srng_get_entrysize(soc->hal_soc, hal_ring_type);
516 	int htt_ring_type, htt_ring_id;
517 	uint8_t *htt_logger_bufp;
518 	int target_pdev_id;
519 	int lmac_id = dp_get_lmac_id_for_pdev_id(soc->dp_soc, 0, mac_id);
520 	QDF_STATUS status;
521 
522 	/* Sizes should be set in 4-byte words */
523 	ring_entry_size = ring_entry_size >> 2;
524 
525 	htt_msg = qdf_nbuf_alloc(soc->osdev,
526 		HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ),
527 		/* reserve room for the HTC header */
528 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
529 	if (!htt_msg) {
530 		dp_err("htt_msg alloc failed ring type %d", hal_ring_type);
531 		goto fail0;
532 	}
533 
534 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
535 	hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_ring_hdl);
536 	tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_ring_hdl);
537 
538 	switch (hal_ring_type) {
539 	case RXDMA_BUF:
540 #ifdef QCA_HOST2FW_RXBUF_RING
541 		if (srng_params.ring_id ==
542 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
543 		    (lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
544 			htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
545 			htt_ring_type = HTT_SW_TO_SW_RING;
546 #ifdef IPA_OFFLOAD
547 		} else if (srng_params.ring_id ==
548 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2 +
549 		    (lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
550 			htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING;
551 			htt_ring_type = HTT_SW_TO_SW_RING;
552 #ifdef IPA_WDI3_VLAN_SUPPORT
553 		} else if (srng_params.ring_id ==
554 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF3 +
555 		    (lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
556 			htt_ring_id = HTT_HOST3_TO_FW_RXBUF_RING;
557 			htt_ring_type = HTT_SW_TO_SW_RING;
558 #endif
559 #endif
560 #else
561 		if (srng_params.ring_id ==
562 			(HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
563 			(lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
564 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
565 			htt_ring_type = HTT_SW_TO_HW_RING;
566 #endif
567 		} else if (srng_params.ring_id ==
568 #ifdef IPA_OFFLOAD
569 			 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 +
570 #else
571 			 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF +
572 #endif
573 			(lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
574 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
575 			htt_ring_type = HTT_SW_TO_HW_RING;
576 #ifdef FEATURE_DIRECT_LINK
577 		} else if (srng_params.ring_id ==
578 			   (HAL_SRNG_WMAC1_RX_DIRECT_LINK_SW_REFILL_RING +
579 			    (lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
580 			htt_ring_id = HTT_LPASS_TO_FW_RXBUF_RING;
581 			htt_ring_type = HTT_SW_TO_SW_RING;
582 #endif
583 		} else {
584 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
585 				   "%s: Ring %d currently not supported",
586 				   __func__, srng_params.ring_id);
587 			goto fail1;
588 		}
589 
590 		break;
591 	case RXDMA_MONITOR_BUF:
592 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
593 							 RXDMA_MONITOR_BUF);
594 		htt_ring_type = HTT_SW_TO_HW_RING;
595 		break;
596 	case RXDMA_MONITOR_STATUS:
597 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
598 		htt_ring_type = HTT_SW_TO_HW_RING;
599 		break;
600 	case RXDMA_MONITOR_DST:
601 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
602 							 RXDMA_MONITOR_DST);
603 		htt_ring_type = HTT_HW_TO_SW_RING;
604 		break;
605 	case RXDMA_MONITOR_DESC:
606 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
607 		htt_ring_type = HTT_SW_TO_HW_RING;
608 		break;
609 	case RXDMA_DST:
610 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
611 		htt_ring_type = HTT_HW_TO_SW_RING;
612 		break;
613 	case TX_MONITOR_BUF:
614 		htt_ring_id = HTT_TX_MON_HOST2MON_BUF_RING;
615 		htt_ring_type = HTT_SW_TO_HW_RING;
616 		break;
617 	case TX_MONITOR_DST:
618 		htt_ring_id = HTT_TX_MON_MON2HOST_DEST_RING;
619 		htt_ring_type = HTT_HW_TO_SW_RING;
620 		break;
621 
622 	default:
623 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
624 			"%s: Ring currently not supported", __func__);
625 			goto fail1;
626 	}
627 
628 	dp_info("ring_type %d ring_id %d htt_ring_id %d hp_addr 0x%llx tp_addr 0x%llx",
629 		hal_ring_type, srng_params.ring_id, htt_ring_id,
630 		(uint64_t)hp_addr,
631 		(uint64_t)tp_addr);
632 	/*
633 	 * Set the length of the message.
634 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
635 	 * separately during the below call to qdf_nbuf_push_head.
636 	 * The contribution from the HTC header is added separately inside HTC.
637 	 */
638 	if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) {
639 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
640 			"%s: Failed to expand head for SRING_SETUP msg",
641 			__func__);
642 		return QDF_STATUS_E_FAILURE;
643 	}
644 
645 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
646 
647 	/* rewind beyond alignment pad to get to the HTC header reserved area */
648 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
649 
650 	/* word 0 */
651 	*msg_word = 0;
652 	htt_logger_bufp = (uint8_t *)msg_word;
653 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP);
654 	target_pdev_id =
655 	dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, mac_id);
656 
657 	if ((htt_ring_type == HTT_SW_TO_HW_RING) ||
658 			(htt_ring_type == HTT_HW_TO_SW_RING))
659 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, target_pdev_id);
660 	else
661 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id);
662 
663 	dp_info("mac_id %d", mac_id);
664 	HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type);
665 	/* TODO: Discuss with FW on changing this to unique ID and using
666 	 * htt_ring_type to send the type of ring
667 	 */
668 	HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id);
669 
670 	/* word 1 */
671 	msg_word++;
672 	*msg_word = 0;
673 	HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word,
674 		srng_params.ring_base_paddr & 0xffffffff);
675 
676 	/* word 2 */
677 	msg_word++;
678 	*msg_word = 0;
679 	HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word,
680 		(uint64_t)srng_params.ring_base_paddr >> 32);
681 
682 	/* word 3 */
683 	msg_word++;
684 	*msg_word = 0;
685 	HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size);
686 	HTT_SRING_SETUP_RING_SIZE_SET(*msg_word,
687 		(ring_entry_size * srng_params.num_entries));
688 	dp_info("entry_size %d", ring_entry_size);
689 	dp_info("num_entries %d", srng_params.num_entries);
690 	dp_info("ring_size %d", (ring_entry_size * srng_params.num_entries));
691 	if (htt_ring_type == HTT_SW_TO_HW_RING)
692 		HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET(
693 						*msg_word, 1);
694 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word,
695 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
696 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word,
697 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
698 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word,
699 		!!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP));
700 
701 	/* word 4 */
702 	msg_word++;
703 	*msg_word = 0;
704 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
705 		hp_addr & 0xffffffff);
706 
707 	/* word 5 */
708 	msg_word++;
709 	*msg_word = 0;
710 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
711 		(uint64_t)hp_addr >> 32);
712 
713 	/* word 6 */
714 	msg_word++;
715 	*msg_word = 0;
716 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
717 		tp_addr & 0xffffffff);
718 
719 	/* word 7 */
720 	msg_word++;
721 	*msg_word = 0;
722 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
723 		(uint64_t)tp_addr >> 32);
724 
725 	/* word 8 */
726 	msg_word++;
727 	*msg_word = 0;
728 	HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word,
729 		srng_params.msi_addr & 0xffffffff);
730 
731 	/* word 9 */
732 	msg_word++;
733 	*msg_word = 0;
734 	HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word,
735 		(uint64_t)(srng_params.msi_addr) >> 32);
736 
737 	/* word 10 */
738 	msg_word++;
739 	*msg_word = 0;
740 	HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word,
741 		qdf_cpu_to_le32(srng_params.msi_data));
742 
743 	/* word 11 */
744 	msg_word++;
745 	*msg_word = 0;
746 	HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word,
747 		srng_params.intr_batch_cntr_thres_entries *
748 		ring_entry_size);
749 	HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word,
750 		srng_params.intr_timer_thres_us >> 3);
751 
752 	/* word 12 */
753 	msg_word++;
754 	*msg_word = 0;
755 	if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
756 		/* TODO: Setting low threshold to 1/8th of ring size - see
757 		 * if this needs to be configurable
758 		 */
759 		HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word,
760 			srng_params.low_threshold);
761 	}
762 	/* "response_required" field should be set if a HTT response message is
763 	 * required after setting up the ring.
764 	 */
765 	pkt = htt_htc_pkt_alloc(soc);
766 	if (!pkt) {
767 		dp_err("pkt alloc failed, ring_type %d ring_id %d htt_ring_id %d",
768 		       hal_ring_type, srng_params.ring_id, htt_ring_id);
769 		goto fail1;
770 	}
771 
772 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
773 
774 	SET_HTC_PACKET_INFO_TX(
775 		&pkt->htc_pkt,
776 		dp_htt_h2t_send_complete_free_netbuf,
777 		qdf_nbuf_data(htt_msg),
778 		qdf_nbuf_len(htt_msg),
779 		soc->htc_endpoint,
780 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
781 
782 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
783 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_SRING_SETUP,
784 				     htt_logger_bufp);
785 
786 	if (status != QDF_STATUS_SUCCESS) {
787 		qdf_nbuf_free(htt_msg);
788 		htt_htc_pkt_free(soc, pkt);
789 	}
790 
791 	return status;
792 
793 fail1:
794 	qdf_nbuf_free(htt_msg);
795 fail0:
796 	return QDF_STATUS_E_FAILURE;
797 }
798 
799 qdf_export_symbol(htt_srng_setup);
800 
801 #ifdef QCA_SUPPORT_FULL_MON
802 /**
803  * htt_h2t_full_mon_cfg() - Send full monitor configuration msg to FW
804  *
805  * @htt_soc: HTT Soc handle
806  * @pdev_id: Radio id
807  * @dp_full_mon_config: enabled/disable configuration
808  *
809  * Return: Success when HTT message is sent, error on failure
810  */
811 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc,
812 			 uint8_t pdev_id,
813 			 enum dp_full_mon_config config)
814 {
815 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
816 	struct dp_htt_htc_pkt *pkt;
817 	qdf_nbuf_t htt_msg;
818 	uint32_t *msg_word;
819 	uint8_t *htt_logger_bufp;
820 
821 	htt_msg = qdf_nbuf_alloc(soc->osdev,
822 				 HTT_MSG_BUF_SIZE(
823 				 HTT_RX_FULL_MONITOR_MODE_SETUP_SZ),
824 				 /* reserve room for the HTC header */
825 				 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
826 				 4,
827 				 TRUE);
828 	if (!htt_msg)
829 		return QDF_STATUS_E_FAILURE;
830 
831 	/*
832 	 * Set the length of the message.
833 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
834 	 * separately during the below call to qdf_nbuf_push_head.
835 	 * The contribution from the HTC header is added separately inside HTC.
836 	 */
837 	if (!qdf_nbuf_put_tail(htt_msg, HTT_RX_FULL_MONITOR_MODE_SETUP_SZ)) {
838 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
839 			  "%s: Failed to expand head for RX Ring Cfg msg",
840 			  __func__);
841 		goto fail1;
842 	}
843 
844 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
845 
846 	/* rewind beyond alignment pad to get to the HTC header reserved area */
847 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
848 
849 	/* word 0 */
850 	*msg_word = 0;
851 	htt_logger_bufp = (uint8_t *)msg_word;
852 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE);
853 	HTT_RX_FULL_MONITOR_MODE_OPERATION_PDEV_ID_SET(
854 			*msg_word, DP_SW2HW_MACID(pdev_id));
855 
856 	msg_word++;
857 	*msg_word = 0;
858 	/* word 1 */
859 	if (config == DP_FULL_MON_ENABLE) {
860 		HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true);
861 		HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, true);
862 		HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, true);
863 		HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2);
864 	} else if (config == DP_FULL_MON_DISABLE) {
865 		/* As per MAC team's suggestion, While disabling full monitor
866 		 * mode, Set 'en' bit to true in full monitor mode register.
867 		 */
868 		HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true);
869 		HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, false);
870 		HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, false);
871 		HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2);
872 	}
873 
874 	pkt = htt_htc_pkt_alloc(soc);
875 	if (!pkt) {
876 		qdf_err("HTC packet allocation failed");
877 		goto fail1;
878 	}
879 
880 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
881 
882 	SET_HTC_PACKET_INFO_TX(
883 		&pkt->htc_pkt,
884 		dp_htt_h2t_send_complete_free_netbuf,
885 		qdf_nbuf_data(htt_msg),
886 		qdf_nbuf_len(htt_msg),
887 		soc->htc_endpoint,
888 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
889 
890 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
891 	qdf_debug("config: %d", config);
892 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE,
893 			    htt_logger_bufp);
894 	return QDF_STATUS_SUCCESS;
895 fail1:
896 	qdf_nbuf_free(htt_msg);
897 	return QDF_STATUS_E_FAILURE;
898 }
899 
900 qdf_export_symbol(htt_h2t_full_mon_cfg);
901 #else
902 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc,
903 			 uint8_t pdev_id,
904 			 enum dp_full_mon_config config)
905 {
906 	return 0;
907 }
908 
909 qdf_export_symbol(htt_h2t_full_mon_cfg);
910 #endif
911 
912 #ifdef QCA_UNDECODED_METADATA_SUPPORT
913 static inline void
914 dp_mon_rx_enable_phy_errors(uint32_t *msg_word,
915 			    struct htt_rx_ring_tlv_filter *htt_tlv_filter)
916 {
917 	if (htt_tlv_filter->phy_err_filter_valid) {
918 		HTT_RX_RING_SELECTION_CFG_FP_PHY_ERR_SET
919 			(*msg_word, htt_tlv_filter->fp_phy_err);
920 		HTT_RX_RING_SELECTION_CFG_FP_PHY_ERR_BUF_SRC_SET
921 			(*msg_word, htt_tlv_filter->fp_phy_err_buf_src);
922 		HTT_RX_RING_SELECTION_CFG_FP_PHY_ERR_BUF_DEST_SET
923 			(*msg_word, htt_tlv_filter->fp_phy_err_buf_dest);
924 
925 		/* word 12*/
926 		msg_word++;
927 		*msg_word = 0;
928 		HTT_RX_RING_SELECTION_CFG_PHY_ERR_MASK_SET
929 			(*msg_word, htt_tlv_filter->phy_err_mask);
930 
931 		/* word 13*/
932 		msg_word++;
933 		*msg_word = 0;
934 		HTT_RX_RING_SELECTION_CFG_PHY_ERR_MASK_CONT_SET
935 			(*msg_word, htt_tlv_filter->phy_err_mask_cont);
936 	}
937 }
938 #else
939 static inline void
940 dp_mon_rx_enable_phy_errors(uint32_t *msg_word,
941 			    struct htt_rx_ring_tlv_filter *htt_tlv_filter)
942 {
943 }
944 #endif
945 
946 /*
947  * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter
948  * config message to target
949  * @htt_soc:	HTT SOC handle
950  * @pdev_id:	WIN- PDEV Id, MCL- mac id
951  * @hal_srng:	Opaque HAL SRNG pointer
952  * @hal_ring_type:	SRNG ring type
953  * @ring_buf_size:	SRNG buffer size
954  * @htt_tlv_filter:	Rx SRNG TLV and filter setting
955  * Return: 0 on success; error code on failure
956  */
957 int htt_h2t_rx_ring_cfg(struct htt_soc *htt_soc, int pdev_id,
958 			hal_ring_handle_t hal_ring_hdl,
959 			int hal_ring_type, int ring_buf_size,
960 			struct htt_rx_ring_tlv_filter *htt_tlv_filter)
961 {
962 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
963 	struct dp_htt_htc_pkt *pkt;
964 	qdf_nbuf_t htt_msg;
965 	uint32_t *msg_word;
966 	uint32_t *msg_word_data;
967 	struct hal_srng_params srng_params;
968 	uint32_t htt_ring_type, htt_ring_id;
969 	uint32_t tlv_filter;
970 	uint8_t *htt_logger_bufp;
971 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->dp_soc->wlan_cfg_ctx;
972 	uint32_t mon_drop_th = wlan_cfg_get_mon_drop_thresh(wlan_cfg_ctx);
973 	int target_pdev_id;
974 	QDF_STATUS status;
975 
976 	htt_msg = qdf_nbuf_alloc(soc->osdev,
977 		HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ),
978 	/* reserve room for the HTC header */
979 	HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
980 	if (!htt_msg) {
981 		dp_err("htt_msg alloc failed ring type %d", hal_ring_type);
982 		goto fail0;
983 	}
984 
985 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
986 
987 	switch (hal_ring_type) {
988 	case RXDMA_BUF:
989 		htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
990 		htt_ring_type = HTT_SW_TO_HW_RING;
991 		break;
992 	case RXDMA_MONITOR_BUF:
993 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
994 							 RXDMA_MONITOR_BUF);
995 		htt_ring_type = HTT_SW_TO_HW_RING;
996 		break;
997 	case RXDMA_MONITOR_STATUS:
998 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
999 		htt_ring_type = HTT_SW_TO_HW_RING;
1000 		break;
1001 	case RXDMA_MONITOR_DST:
1002 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
1003 							 RXDMA_MONITOR_DST);
1004 		htt_ring_type = HTT_HW_TO_SW_RING;
1005 		break;
1006 	case RXDMA_MONITOR_DESC:
1007 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
1008 		htt_ring_type = HTT_SW_TO_HW_RING;
1009 		break;
1010 	case RXDMA_DST:
1011 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
1012 		htt_ring_type = HTT_HW_TO_SW_RING;
1013 		break;
1014 
1015 	default:
1016 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1017 			"%s: Ring currently not supported", __func__);
1018 		goto fail1;
1019 	}
1020 
1021 	dp_info("ring_type %d ring_id %d htt_ring_id %d",
1022 		hal_ring_type, srng_params.ring_id, htt_ring_id);
1023 
1024 	/*
1025 	 * Set the length of the message.
1026 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
1027 	 * separately during the below call to qdf_nbuf_push_head.
1028 	 * The contribution from the HTC header is added separately inside HTC.
1029 	 */
1030 	if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) {
1031 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1032 			"%s: Failed to expand head for RX Ring Cfg msg",
1033 			__func__);
1034 		goto fail1; /* failure */
1035 	}
1036 
1037 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
1038 
1039 	/* rewind beyond alignment pad to get to the HTC header reserved area */
1040 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
1041 
1042 	/* word 0 */
1043 	htt_logger_bufp = (uint8_t *)msg_word;
1044 	*msg_word = 0;
1045 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
1046 
1047 	/* applicable only for post Li */
1048 	dp_rx_mon_enable(soc->dp_soc, msg_word, htt_tlv_filter);
1049 
1050 	/*
1051 	 * pdev_id is indexed from 0 whereas mac_id is indexed from 1
1052 	 * SW_TO_SW and SW_TO_HW rings are unaffected by this
1053 	 */
1054 	target_pdev_id =
1055 	dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, pdev_id);
1056 
1057 	if (htt_ring_type == HTT_SW_TO_SW_RING ||
1058 			htt_ring_type == HTT_SW_TO_HW_RING ||
1059 			htt_ring_type == HTT_HW_TO_SW_RING)
1060 		HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word,
1061 						      target_pdev_id);
1062 
1063 	/* TODO: Discuss with FW on changing this to unique ID and using
1064 	 * htt_ring_type to send the type of ring
1065 	 */
1066 	HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id);
1067 
1068 	HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word,
1069 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
1070 
1071 	HTT_RX_RING_SELECTION_CFG_RX_OFFSETS_VALID_SET(*msg_word,
1072 						htt_tlv_filter->offset_valid);
1073 
1074 	if (mon_drop_th > 0)
1075 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1076 								   1);
1077 	else
1078 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1079 								   0);
1080 
1081 	/* word 1 */
1082 	msg_word++;
1083 	*msg_word = 0;
1084 	HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word,
1085 		ring_buf_size);
1086 
1087 	dp_mon_rx_packet_length_set(soc->dp_soc, msg_word, htt_tlv_filter);
1088 	dp_mon_rx_hdr_length_set(soc->dp_soc, msg_word, htt_tlv_filter);
1089 	dp_mon_rx_mac_filter_set(soc->dp_soc, msg_word, htt_tlv_filter);
1090 
1091 	/* word 2 */
1092 	msg_word++;
1093 	*msg_word = 0;
1094 
1095 	if (htt_tlv_filter->enable_fp) {
1096 		/* TYPE: MGMT */
1097 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1098 			FP, MGMT, 0000,
1099 			(htt_tlv_filter->fp_mgmt_filter &
1100 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1101 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1102 			FP, MGMT, 0001,
1103 			(htt_tlv_filter->fp_mgmt_filter &
1104 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1105 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1106 			FP, MGMT, 0010,
1107 			(htt_tlv_filter->fp_mgmt_filter &
1108 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1109 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1110 			FP, MGMT, 0011,
1111 			(htt_tlv_filter->fp_mgmt_filter &
1112 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1113 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1114 			FP, MGMT, 0100,
1115 			(htt_tlv_filter->fp_mgmt_filter &
1116 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1117 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1118 			FP, MGMT, 0101,
1119 			(htt_tlv_filter->fp_mgmt_filter &
1120 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1121 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1122 			FP, MGMT, 0110,
1123 			(htt_tlv_filter->fp_mgmt_filter &
1124 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1125 		/* reserved */
1126 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
1127 			MGMT, 0111,
1128 			(htt_tlv_filter->fp_mgmt_filter &
1129 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1130 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1131 			FP, MGMT, 1000,
1132 			(htt_tlv_filter->fp_mgmt_filter &
1133 			FILTER_MGMT_BEACON) ? 1 : 0);
1134 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1135 			FP, MGMT, 1001,
1136 			(htt_tlv_filter->fp_mgmt_filter &
1137 			FILTER_MGMT_ATIM) ? 1 : 0);
1138 	}
1139 
1140 	if (htt_tlv_filter->enable_md) {
1141 			/* TYPE: MGMT */
1142 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1143 			MD, MGMT, 0000,
1144 			(htt_tlv_filter->md_mgmt_filter &
1145 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1146 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1147 			MD, MGMT, 0001,
1148 			(htt_tlv_filter->md_mgmt_filter &
1149 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1150 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1151 			MD, MGMT, 0010,
1152 			(htt_tlv_filter->md_mgmt_filter &
1153 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1154 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1155 			MD, MGMT, 0011,
1156 			(htt_tlv_filter->md_mgmt_filter &
1157 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1158 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1159 			MD, MGMT, 0100,
1160 			(htt_tlv_filter->md_mgmt_filter &
1161 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1162 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1163 			MD, MGMT, 0101,
1164 			(htt_tlv_filter->md_mgmt_filter &
1165 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1166 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1167 			MD, MGMT, 0110,
1168 			(htt_tlv_filter->md_mgmt_filter &
1169 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1170 		/* reserved */
1171 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
1172 			MGMT, 0111,
1173 			(htt_tlv_filter->md_mgmt_filter &
1174 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1175 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1176 			MD, MGMT, 1000,
1177 			(htt_tlv_filter->md_mgmt_filter &
1178 			FILTER_MGMT_BEACON) ? 1 : 0);
1179 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1180 			MD, MGMT, 1001,
1181 			(htt_tlv_filter->md_mgmt_filter &
1182 			FILTER_MGMT_ATIM) ? 1 : 0);
1183 	}
1184 
1185 	if (htt_tlv_filter->enable_mo) {
1186 		/* TYPE: MGMT */
1187 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1188 			MO, MGMT, 0000,
1189 			(htt_tlv_filter->mo_mgmt_filter &
1190 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1191 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1192 			MO, MGMT, 0001,
1193 			(htt_tlv_filter->mo_mgmt_filter &
1194 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1195 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1196 			MO, MGMT, 0010,
1197 			(htt_tlv_filter->mo_mgmt_filter &
1198 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1199 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1200 			MO, MGMT, 0011,
1201 			(htt_tlv_filter->mo_mgmt_filter &
1202 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1203 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1204 			MO, MGMT, 0100,
1205 			(htt_tlv_filter->mo_mgmt_filter &
1206 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1207 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1208 			MO, MGMT, 0101,
1209 			(htt_tlv_filter->mo_mgmt_filter &
1210 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1211 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1212 			MO, MGMT, 0110,
1213 			(htt_tlv_filter->mo_mgmt_filter &
1214 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1215 		/* reserved */
1216 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
1217 			MGMT, 0111,
1218 			(htt_tlv_filter->mo_mgmt_filter &
1219 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1220 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1221 			MO, MGMT, 1000,
1222 			(htt_tlv_filter->mo_mgmt_filter &
1223 			FILTER_MGMT_BEACON) ? 1 : 0);
1224 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1225 			MO, MGMT, 1001,
1226 			(htt_tlv_filter->mo_mgmt_filter &
1227 			FILTER_MGMT_ATIM) ? 1 : 0);
1228 	}
1229 
1230 	/* word 3 */
1231 	msg_word++;
1232 	*msg_word = 0;
1233 
1234 	if (htt_tlv_filter->enable_fp) {
1235 		/* TYPE: MGMT */
1236 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1237 			FP, MGMT, 1010,
1238 			(htt_tlv_filter->fp_mgmt_filter &
1239 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1240 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1241 			FP, MGMT, 1011,
1242 			(htt_tlv_filter->fp_mgmt_filter &
1243 			FILTER_MGMT_AUTH) ? 1 : 0);
1244 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1245 			FP, MGMT, 1100,
1246 			(htt_tlv_filter->fp_mgmt_filter &
1247 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1248 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1249 			FP, MGMT, 1101,
1250 			(htt_tlv_filter->fp_mgmt_filter &
1251 			FILTER_MGMT_ACTION) ? 1 : 0);
1252 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1253 			FP, MGMT, 1110,
1254 			(htt_tlv_filter->fp_mgmt_filter &
1255 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1256 		/* reserved*/
1257 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
1258 			MGMT, 1111,
1259 			(htt_tlv_filter->fp_mgmt_filter &
1260 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1261 	}
1262 
1263 	if (htt_tlv_filter->enable_md) {
1264 			/* TYPE: MGMT */
1265 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1266 			MD, MGMT, 1010,
1267 			(htt_tlv_filter->md_mgmt_filter &
1268 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1269 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1270 			MD, MGMT, 1011,
1271 			(htt_tlv_filter->md_mgmt_filter &
1272 			FILTER_MGMT_AUTH) ? 1 : 0);
1273 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1274 			MD, MGMT, 1100,
1275 			(htt_tlv_filter->md_mgmt_filter &
1276 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1277 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1278 			MD, MGMT, 1101,
1279 			(htt_tlv_filter->md_mgmt_filter &
1280 			FILTER_MGMT_ACTION) ? 1 : 0);
1281 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1282 			MD, MGMT, 1110,
1283 			(htt_tlv_filter->md_mgmt_filter &
1284 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1285 	}
1286 
1287 	if (htt_tlv_filter->enable_mo) {
1288 		/* TYPE: MGMT */
1289 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1290 			MO, MGMT, 1010,
1291 			(htt_tlv_filter->mo_mgmt_filter &
1292 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1293 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1294 			MO, MGMT, 1011,
1295 			(htt_tlv_filter->mo_mgmt_filter &
1296 			FILTER_MGMT_AUTH) ? 1 : 0);
1297 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1298 			MO, MGMT, 1100,
1299 			(htt_tlv_filter->mo_mgmt_filter &
1300 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1301 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1302 			MO, MGMT, 1101,
1303 			(htt_tlv_filter->mo_mgmt_filter &
1304 			FILTER_MGMT_ACTION) ? 1 : 0);
1305 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1306 			MO, MGMT, 1110,
1307 			(htt_tlv_filter->mo_mgmt_filter &
1308 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1309 		/* reserved*/
1310 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
1311 			MGMT, 1111,
1312 			(htt_tlv_filter->mo_mgmt_filter &
1313 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1314 	}
1315 
1316 	/* word 4 */
1317 	msg_word++;
1318 	*msg_word = 0;
1319 
1320 	if (htt_tlv_filter->enable_fp) {
1321 		/* TYPE: CTRL */
1322 		/* reserved */
1323 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1324 			CTRL, 0000,
1325 			(htt_tlv_filter->fp_ctrl_filter &
1326 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1327 		/* reserved */
1328 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1329 			CTRL, 0001,
1330 			(htt_tlv_filter->fp_ctrl_filter &
1331 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1332 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1333 			CTRL, 0010,
1334 			(htt_tlv_filter->fp_ctrl_filter &
1335 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1336 		/* reserved */
1337 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1338 			CTRL, 0011,
1339 			(htt_tlv_filter->fp_ctrl_filter &
1340 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1341 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1342 			CTRL, 0100,
1343 			(htt_tlv_filter->fp_ctrl_filter &
1344 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1345 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1346 			CTRL, 0101,
1347 			(htt_tlv_filter->fp_ctrl_filter &
1348 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1349 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1350 			CTRL, 0110,
1351 			(htt_tlv_filter->fp_ctrl_filter &
1352 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1353 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1354 			CTRL, 0111,
1355 			(htt_tlv_filter->fp_ctrl_filter &
1356 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1357 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1358 			CTRL, 1000,
1359 			(htt_tlv_filter->fp_ctrl_filter &
1360 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1361 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1362 			CTRL, 1001,
1363 			(htt_tlv_filter->fp_ctrl_filter &
1364 			FILTER_CTRL_BA) ? 1 : 0);
1365 	}
1366 
1367 	if (htt_tlv_filter->enable_md) {
1368 		/* TYPE: CTRL */
1369 		/* reserved */
1370 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1371 			CTRL, 0000,
1372 			(htt_tlv_filter->md_ctrl_filter &
1373 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1374 		/* reserved */
1375 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1376 			CTRL, 0001,
1377 			(htt_tlv_filter->md_ctrl_filter &
1378 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1379 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1380 			CTRL, 0010,
1381 			(htt_tlv_filter->md_ctrl_filter &
1382 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1383 		/* reserved */
1384 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1385 			CTRL, 0011,
1386 			(htt_tlv_filter->md_ctrl_filter &
1387 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1388 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1389 			CTRL, 0100,
1390 			(htt_tlv_filter->md_ctrl_filter &
1391 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1392 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1393 			CTRL, 0101,
1394 			(htt_tlv_filter->md_ctrl_filter &
1395 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1396 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1397 			CTRL, 0110,
1398 			(htt_tlv_filter->md_ctrl_filter &
1399 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1400 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1401 			CTRL, 0111,
1402 			(htt_tlv_filter->md_ctrl_filter &
1403 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1404 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1405 			CTRL, 1000,
1406 			(htt_tlv_filter->md_ctrl_filter &
1407 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1408 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1409 			CTRL, 1001,
1410 			(htt_tlv_filter->md_ctrl_filter &
1411 			FILTER_CTRL_BA) ? 1 : 0);
1412 	}
1413 
1414 	if (htt_tlv_filter->enable_mo) {
1415 		/* TYPE: CTRL */
1416 		/* reserved */
1417 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1418 			CTRL, 0000,
1419 			(htt_tlv_filter->mo_ctrl_filter &
1420 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1421 		/* reserved */
1422 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1423 			CTRL, 0001,
1424 			(htt_tlv_filter->mo_ctrl_filter &
1425 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1426 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1427 			CTRL, 0010,
1428 			(htt_tlv_filter->mo_ctrl_filter &
1429 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1430 		/* reserved */
1431 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1432 			CTRL, 0011,
1433 			(htt_tlv_filter->mo_ctrl_filter &
1434 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1435 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1436 			CTRL, 0100,
1437 			(htt_tlv_filter->mo_ctrl_filter &
1438 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1439 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1440 			CTRL, 0101,
1441 			(htt_tlv_filter->mo_ctrl_filter &
1442 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1443 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1444 			CTRL, 0110,
1445 			(htt_tlv_filter->mo_ctrl_filter &
1446 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1447 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1448 			CTRL, 0111,
1449 			(htt_tlv_filter->mo_ctrl_filter &
1450 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1451 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1452 			CTRL, 1000,
1453 			(htt_tlv_filter->mo_ctrl_filter &
1454 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1455 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1456 			CTRL, 1001,
1457 			(htt_tlv_filter->mo_ctrl_filter &
1458 			FILTER_CTRL_BA) ? 1 : 0);
1459 	}
1460 
1461 	/* word 5 */
1462 	msg_word++;
1463 	*msg_word = 0;
1464 	if (htt_tlv_filter->enable_fp) {
1465 		/* TYPE: CTRL */
1466 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1467 			CTRL, 1010,
1468 			(htt_tlv_filter->fp_ctrl_filter &
1469 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1470 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1471 			CTRL, 1011,
1472 			(htt_tlv_filter->fp_ctrl_filter &
1473 			FILTER_CTRL_RTS) ? 1 : 0);
1474 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1475 			CTRL, 1100,
1476 			(htt_tlv_filter->fp_ctrl_filter &
1477 			FILTER_CTRL_CTS) ? 1 : 0);
1478 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1479 			CTRL, 1101,
1480 			(htt_tlv_filter->fp_ctrl_filter &
1481 			FILTER_CTRL_ACK) ? 1 : 0);
1482 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1483 			CTRL, 1110,
1484 			(htt_tlv_filter->fp_ctrl_filter &
1485 			FILTER_CTRL_CFEND) ? 1 : 0);
1486 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1487 			CTRL, 1111,
1488 			(htt_tlv_filter->fp_ctrl_filter &
1489 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1490 		/* TYPE: DATA */
1491 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1492 			DATA, MCAST,
1493 			(htt_tlv_filter->fp_data_filter &
1494 			FILTER_DATA_MCAST) ? 1 : 0);
1495 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1496 			DATA, UCAST,
1497 			(htt_tlv_filter->fp_data_filter &
1498 			FILTER_DATA_UCAST) ? 1 : 0);
1499 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1500 			DATA, NULL,
1501 			(htt_tlv_filter->fp_data_filter &
1502 			FILTER_DATA_NULL) ? 1 : 0);
1503 	}
1504 
1505 	if (htt_tlv_filter->enable_md) {
1506 		/* TYPE: CTRL */
1507 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1508 			CTRL, 1010,
1509 			(htt_tlv_filter->md_ctrl_filter &
1510 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1511 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1512 			CTRL, 1011,
1513 			(htt_tlv_filter->md_ctrl_filter &
1514 			FILTER_CTRL_RTS) ? 1 : 0);
1515 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1516 			CTRL, 1100,
1517 			(htt_tlv_filter->md_ctrl_filter &
1518 			FILTER_CTRL_CTS) ? 1 : 0);
1519 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1520 			CTRL, 1101,
1521 			(htt_tlv_filter->md_ctrl_filter &
1522 			FILTER_CTRL_ACK) ? 1 : 0);
1523 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1524 			CTRL, 1110,
1525 			(htt_tlv_filter->md_ctrl_filter &
1526 			FILTER_CTRL_CFEND) ? 1 : 0);
1527 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1528 			CTRL, 1111,
1529 			(htt_tlv_filter->md_ctrl_filter &
1530 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1531 		/* TYPE: DATA */
1532 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1533 			DATA, MCAST,
1534 			(htt_tlv_filter->md_data_filter &
1535 			FILTER_DATA_MCAST) ? 1 : 0);
1536 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1537 			DATA, UCAST,
1538 			(htt_tlv_filter->md_data_filter &
1539 			FILTER_DATA_UCAST) ? 1 : 0);
1540 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1541 			DATA, NULL,
1542 			(htt_tlv_filter->md_data_filter &
1543 			FILTER_DATA_NULL) ? 1 : 0);
1544 	}
1545 
1546 	if (htt_tlv_filter->enable_mo) {
1547 		/* TYPE: CTRL */
1548 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1549 			CTRL, 1010,
1550 			(htt_tlv_filter->mo_ctrl_filter &
1551 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1552 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1553 			CTRL, 1011,
1554 			(htt_tlv_filter->mo_ctrl_filter &
1555 			FILTER_CTRL_RTS) ? 1 : 0);
1556 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1557 			CTRL, 1100,
1558 			(htt_tlv_filter->mo_ctrl_filter &
1559 			FILTER_CTRL_CTS) ? 1 : 0);
1560 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1561 			CTRL, 1101,
1562 			(htt_tlv_filter->mo_ctrl_filter &
1563 			FILTER_CTRL_ACK) ? 1 : 0);
1564 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1565 			CTRL, 1110,
1566 			(htt_tlv_filter->mo_ctrl_filter &
1567 			FILTER_CTRL_CFEND) ? 1 : 0);
1568 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1569 			CTRL, 1111,
1570 			(htt_tlv_filter->mo_ctrl_filter &
1571 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1572 		/* TYPE: DATA */
1573 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1574 			DATA, MCAST,
1575 			(htt_tlv_filter->mo_data_filter &
1576 			FILTER_DATA_MCAST) ? 1 : 0);
1577 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1578 			DATA, UCAST,
1579 			(htt_tlv_filter->mo_data_filter &
1580 			FILTER_DATA_UCAST) ? 1 : 0);
1581 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1582 			DATA, NULL,
1583 			(htt_tlv_filter->mo_data_filter &
1584 			FILTER_DATA_NULL) ? 1 : 0);
1585 	}
1586 
1587 	/* word 6 */
1588 	msg_word++;
1589 	*msg_word = 0;
1590 	tlv_filter = 0;
1591 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START,
1592 		htt_tlv_filter->mpdu_start);
1593 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START,
1594 		htt_tlv_filter->msdu_start);
1595 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET,
1596 		htt_tlv_filter->packet);
1597 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END,
1598 		htt_tlv_filter->msdu_end);
1599 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END,
1600 		htt_tlv_filter->mpdu_end);
1601 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER,
1602 		htt_tlv_filter->packet_header);
1603 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION,
1604 		htt_tlv_filter->attention);
1605 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START,
1606 		htt_tlv_filter->ppdu_start);
1607 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END,
1608 		htt_tlv_filter->ppdu_end);
1609 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS,
1610 		htt_tlv_filter->ppdu_end_user_stats);
1611 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter,
1612 		PPDU_END_USER_STATS_EXT,
1613 		htt_tlv_filter->ppdu_end_user_stats_ext);
1614 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE,
1615 		htt_tlv_filter->ppdu_end_status_done);
1616 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START_USER_INFO,
1617 		htt_tlv_filter->ppdu_start_user_info);
1618 	/* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/
1619 	 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED,
1620 		 htt_tlv_filter->header_per_msdu);
1621 
1622 	HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter);
1623 
1624 	msg_word_data = (uint32_t *)qdf_nbuf_data(htt_msg);
1625 	dp_info("config_data: [0x%x][0x%x][0x%x][0x%x][0x%x][0x%x][0x%x]",
1626 		msg_word_data[0], msg_word_data[1], msg_word_data[2],
1627 		msg_word_data[3], msg_word_data[4], msg_word_data[5],
1628 		msg_word_data[6]);
1629 
1630 	/* word 7 */
1631 	msg_word++;
1632 	*msg_word = 0;
1633 	if (htt_tlv_filter->offset_valid) {
1634 		HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET_SET(*msg_word,
1635 					htt_tlv_filter->rx_packet_offset);
1636 		HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET_SET(*msg_word,
1637 					htt_tlv_filter->rx_header_offset);
1638 
1639 		/* word 8 */
1640 		msg_word++;
1641 		*msg_word = 0;
1642 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET_SET(*msg_word,
1643 					htt_tlv_filter->rx_mpdu_end_offset);
1644 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET_SET(*msg_word,
1645 					htt_tlv_filter->rx_mpdu_start_offset);
1646 
1647 		/* word 9 */
1648 		msg_word++;
1649 		*msg_word = 0;
1650 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET_SET(*msg_word,
1651 					htt_tlv_filter->rx_msdu_end_offset);
1652 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET_SET(*msg_word,
1653 					htt_tlv_filter->rx_msdu_start_offset);
1654 
1655 		/* word 10 */
1656 		msg_word++;
1657 		*msg_word = 0;
1658 		HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET_SET(*msg_word,
1659 					htt_tlv_filter->rx_attn_offset);
1660 
1661 		/* word 11 */
1662 		msg_word++;
1663 		*msg_word = 0;
1664 	} else {
1665 		/* word 11 */
1666 		msg_word += 4;
1667 		*msg_word = 0;
1668 	}
1669 
1670 	soc->dp_soc->arch_ops.dp_rx_word_mask_subscribe(
1671 						soc->dp_soc,
1672 						msg_word,
1673 						(void *)htt_tlv_filter);
1674 
1675 	dp_mon_rx_wmask_subscribe(soc->dp_soc, msg_word, htt_tlv_filter);
1676 
1677 	if (mon_drop_th > 0)
1678 		HTT_RX_RING_SELECTION_CFG_RX_DROP_THRESHOLD_SET(*msg_word,
1679 				mon_drop_th);
1680 
1681 	dp_mon_rx_enable_mpdu_logging(soc->dp_soc, msg_word, htt_tlv_filter);
1682 
1683 	dp_mon_rx_enable_phy_errors(msg_word, htt_tlv_filter);
1684 
1685 	/* word 14*/
1686 	msg_word += 3;
1687 
1688 	/* word 15*/
1689 	msg_word++;
1690 
1691 	/* word 16*/
1692 	msg_word++;
1693 	*msg_word = 0;
1694 
1695 	dp_mon_rx_enable_pkt_tlv_offset(soc->dp_soc, msg_word, htt_tlv_filter);
1696 
1697 #ifdef FW_SUPPORT_NOT_YET
1698 	/* word 20 and 21*/
1699 	msg_word += 4;
1700 	*msg_word = 0;
1701 
1702 	dp_mon_rx_enable_fpmo(soc->dp_soc, msg_word, htt_tlv_filter);
1703 #endif/* FW_SUPPORT_NOT_YET */
1704 
1705 	/* "response_required" field should be set if a HTT response message is
1706 	 * required after setting up the ring.
1707 	 */
1708 	pkt = htt_htc_pkt_alloc(soc);
1709 	if (!pkt) {
1710 		dp_err("pkt alloc failed, ring_type %d ring_id %d htt_ring_id %d",
1711 		       hal_ring_type, srng_params.ring_id, htt_ring_id);
1712 		goto fail1;
1713 	}
1714 
1715 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1716 
1717 	SET_HTC_PACKET_INFO_TX(
1718 		&pkt->htc_pkt,
1719 		dp_htt_h2t_send_complete_free_netbuf,
1720 		qdf_nbuf_data(htt_msg),
1721 		qdf_nbuf_len(htt_msg),
1722 		soc->htc_endpoint,
1723 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
1724 
1725 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1726 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
1727 				     HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG,
1728 				     htt_logger_bufp);
1729 
1730 	if (status != QDF_STATUS_SUCCESS) {
1731 		qdf_nbuf_free(htt_msg);
1732 		htt_htc_pkt_free(soc, pkt);
1733 	}
1734 
1735 	return status;
1736 
1737 fail1:
1738 	qdf_nbuf_free(htt_msg);
1739 fail0:
1740 	return QDF_STATUS_E_FAILURE;
1741 }
1742 
1743 qdf_export_symbol(htt_h2t_rx_ring_cfg);
1744 
1745 #if defined(HTT_STATS_ENABLE)
1746 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1747 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1748 
1749 {
1750 	uint32_t pdev_id;
1751 	uint32_t *msg_word = NULL;
1752 	uint32_t msg_remain_len = 0;
1753 
1754 	msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1755 
1756 	/*COOKIE MSB*/
1757 	pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1758 
1759 	/* stats message length + 16 size of HTT header*/
1760 	msg_remain_len = qdf_min(htt_stats->msg_len + 16,
1761 				(uint32_t)DP_EXT_MSG_LENGTH);
1762 
1763 	dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc,
1764 			msg_word,  msg_remain_len,
1765 			WDI_NO_VAL, pdev_id);
1766 
1767 	if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1768 		htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1769 	}
1770 	/* Need to be freed here as WDI handler will
1771 	 * make a copy of pkt to send data to application
1772 	 */
1773 	qdf_nbuf_free(htt_msg);
1774 	return QDF_STATUS_SUCCESS;
1775 }
1776 #else
1777 static inline QDF_STATUS
1778 dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1779 		      struct dp_soc *soc, qdf_nbuf_t htt_msg)
1780 {
1781 	return QDF_STATUS_E_NOSUPPORT;
1782 }
1783 #endif
1784 
1785 #ifdef HTT_STATS_DEBUGFS_SUPPORT
1786 /* dp_send_htt_stats_dbgfs_msg() - Function to send htt data to upper layer.
1787  * @pdev: dp pdev handle
1788  * @msg_word: HTT msg
1789  * @msg_len: Length of HTT msg sent
1790  *
1791  * Return: none
1792  */
1793 static inline void
1794 dp_htt_stats_dbgfs_send_msg(struct dp_pdev *pdev, uint32_t *msg_word,
1795 			    uint32_t msg_len)
1796 {
1797 	struct htt_dbgfs_cfg dbgfs_cfg;
1798 	int done = 0;
1799 
1800 	/* send 5th word of HTT msg to upper layer */
1801 	dbgfs_cfg.msg_word = (msg_word + 4);
1802 	dbgfs_cfg.m = pdev->dbgfs_cfg->m;
1803 
1804 	/* stats message length + 16 size of HTT header*/
1805 	msg_len = qdf_min(msg_len + HTT_HEADER_LEN, (uint32_t)DP_EXT_MSG_LENGTH);
1806 
1807 	if (pdev->dbgfs_cfg->htt_stats_dbgfs_msg_process)
1808 		pdev->dbgfs_cfg->htt_stats_dbgfs_msg_process(&dbgfs_cfg,
1809 							     (msg_len - HTT_HEADER_LEN));
1810 
1811 	/* Get TLV Done bit from 4th msg word */
1812 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*(msg_word + 3));
1813 	if (done) {
1814 		if (qdf_event_set(&pdev->dbgfs_cfg->htt_stats_dbgfs_event))
1815 			dp_htt_err("%pK: Failed to set event for debugfs htt stats"
1816 				   , pdev->soc);
1817 	}
1818 }
1819 #else
1820 static inline void
1821 dp_htt_stats_dbgfs_send_msg(struct dp_pdev *pdev, uint32_t *msg_word,
1822 			    uint32_t msg_len)
1823 {
1824 }
1825 #endif /* HTT_STATS_DEBUGFS_SUPPORT */
1826 
1827 #ifdef WLAN_SYSFS_DP_STATS
1828 /* dp_htt_stats_sysfs_update_config() - Function to send htt data to upper layer.
1829  * @pdev: dp pdev handle
1830  *
1831  * This function sets the process id and printing mode within the sysfs config
1832  * struct. which enables DP_PRINT statements within this process to write to the
1833  * console buffer provided by the user space.
1834  *
1835  * Return: None
1836  */
1837 static inline void
1838 dp_htt_stats_sysfs_update_config(struct dp_pdev *pdev)
1839 {
1840 	struct dp_soc *soc = pdev->soc;
1841 
1842 	if (!soc) {
1843 		dp_htt_err("soc is null");
1844 		return;
1845 	}
1846 
1847 	if (!soc->sysfs_config) {
1848 		dp_htt_err("soc->sysfs_config is NULL");
1849 		return;
1850 	}
1851 
1852 	/* set sysfs config parameters */
1853 	soc->sysfs_config->process_id = qdf_get_current_pid();
1854 	soc->sysfs_config->printing_mode = PRINTING_MODE_ENABLED;
1855 }
1856 
1857 /*
1858  * dp_htt_stats_sysfs_set_event() - Set sysfs stats event.
1859  * @soc: soc handle.
1860  * @msg_word: Pointer to htt msg word.
1861  *
1862  * @return: void
1863  */
1864 static inline void
1865 dp_htt_stats_sysfs_set_event(struct dp_soc *soc, uint32_t *msg_word)
1866 {
1867 	int done = 0;
1868 
1869 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*(msg_word + 3));
1870 	if (done) {
1871 		if (qdf_event_set(&soc->sysfs_config->sysfs_txrx_fw_request_done))
1872 			dp_htt_err("%pK:event compl Fail to set event ",
1873 				   soc);
1874 	}
1875 }
1876 #else /* WLAN_SYSFS_DP_STATS */
1877 static inline void
1878 dp_htt_stats_sysfs_update_config(struct dp_pdev *pdev)
1879 {
1880 }
1881 
1882 static inline void
1883 dp_htt_stats_sysfs_set_event(struct dp_soc *dp_soc, uint32_t *msg_word)
1884 {
1885 }
1886 #endif /* WLAN_SYSFS_DP_STATS */
1887 
1888 /* dp_htt_set_pdev_obss_stats() - Function to set pdev obss stats.
1889  * @pdev: dp pdev handle
1890  * @tag_type: HTT TLV tag type
1891  * @tag_buf: TLV buffer pointer
1892  *
1893  * Return: None
1894  */
1895 static inline void
1896 dp_htt_set_pdev_obss_stats(struct dp_pdev *pdev, uint32_t tag_type,
1897 			   uint32_t *tag_buf)
1898 {
1899 	if (tag_type != HTT_STATS_PDEV_OBSS_PD_TAG) {
1900 		dp_err("Tag mismatch");
1901 		return;
1902 	}
1903 	qdf_mem_copy(&pdev->stats.htt_tx_pdev_stats.obss_pd_stats_tlv,
1904 		     tag_buf, sizeof(struct cdp_pdev_obss_pd_stats_tlv));
1905 	qdf_event_set(&pdev->fw_obss_stats_event);
1906 }
1907 
1908 /**
1909  * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats
1910  * @htt_stats: htt stats info
1911  *
1912  * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message
1913  * contains sub messages which are identified by a TLV header.
1914  * In this function we will process the stream of T2H messages and read all the
1915  * TLV contained in the message.
1916  *
1917  * THe following cases have been taken care of
1918  * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer
1919  *		In this case the buffer will contain multiple tlvs.
1920  * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer.
1921  *		Only one tlv will be contained in the HTT message and this tag
1922  *		will extend onto the next buffer.
1923  * Case 3: When the buffer is the continuation of the previous message
1924  * Case 4: tlv length is 0. which will indicate the end of message
1925  *
1926  * return: void
1927  */
1928 static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats,
1929 					struct dp_soc *soc)
1930 {
1931 	htt_tlv_tag_t tlv_type = 0xff;
1932 	qdf_nbuf_t htt_msg = NULL;
1933 	uint32_t *msg_word;
1934 	uint8_t *tlv_buf_head = NULL;
1935 	uint8_t *tlv_buf_tail = NULL;
1936 	uint32_t msg_remain_len = 0;
1937 	uint32_t tlv_remain_len = 0;
1938 	uint32_t *tlv_start;
1939 	int cookie_val = 0;
1940 	int cookie_msb = 0;
1941 	int pdev_id;
1942 	bool copy_stats = false;
1943 	struct dp_pdev *pdev;
1944 
1945 	/* Process node in the HTT message queue */
1946 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1947 		!= NULL) {
1948 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1949 		cookie_val = *(msg_word + 1);
1950 		htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET(
1951 					*(msg_word +
1952 					HTT_T2H_EXT_STATS_TLV_START_OFFSET));
1953 
1954 		if (cookie_val) {
1955 			if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg)
1956 					== QDF_STATUS_SUCCESS) {
1957 				continue;
1958 			}
1959 		}
1960 
1961 		cookie_msb = *(msg_word + 2);
1962 		pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1963 		pdev = soc->pdev_list[pdev_id];
1964 
1965 		if (!cookie_val && (cookie_msb & DBG_STATS_COOKIE_HTT_DBGFS)) {
1966 			dp_htt_stats_dbgfs_send_msg(pdev, msg_word,
1967 						    htt_stats->msg_len);
1968 			qdf_nbuf_free(htt_msg);
1969 			continue;
1970 		}
1971 
1972 		if (!cookie_val && (cookie_msb & DBG_SYSFS_STATS_COOKIE))
1973 			dp_htt_stats_sysfs_update_config(pdev);
1974 
1975 		if (cookie_msb & DBG_STATS_COOKIE_DP_STATS)
1976 			copy_stats = true;
1977 
1978 		/* read 5th word */
1979 		msg_word = msg_word + 4;
1980 		msg_remain_len = qdf_min(htt_stats->msg_len,
1981 				(uint32_t) DP_EXT_MSG_LENGTH);
1982 		/* Keep processing the node till node length is 0 */
1983 		while (msg_remain_len) {
1984 			/*
1985 			 * if message is not a continuation of previous message
1986 			 * read the tlv type and tlv length
1987 			 */
1988 			if (!tlv_buf_head) {
1989 				tlv_type = HTT_STATS_TLV_TAG_GET(
1990 						*msg_word);
1991 				tlv_remain_len = HTT_STATS_TLV_LENGTH_GET(
1992 						*msg_word);
1993 			}
1994 
1995 			if (tlv_remain_len == 0) {
1996 				msg_remain_len = 0;
1997 
1998 				if (tlv_buf_head) {
1999 					qdf_mem_free(tlv_buf_head);
2000 					tlv_buf_head = NULL;
2001 					tlv_buf_tail = NULL;
2002 				}
2003 
2004 				goto error;
2005 			}
2006 
2007 			if (!tlv_buf_head)
2008 				tlv_remain_len += HTT_TLV_HDR_LEN;
2009 
2010 			if ((tlv_remain_len <= msg_remain_len)) {
2011 				/* Case 3 */
2012 				if (tlv_buf_head) {
2013 					qdf_mem_copy(tlv_buf_tail,
2014 							(uint8_t *)msg_word,
2015 							tlv_remain_len);
2016 					tlv_start = (uint32_t *)tlv_buf_head;
2017 				} else {
2018 					/* Case 1 */
2019 					tlv_start = msg_word;
2020 				}
2021 
2022 				if (copy_stats)
2023 					dp_htt_stats_copy_tag(pdev,
2024 							      tlv_type,
2025 							      tlv_start);
2026 				else
2027 					dp_htt_stats_print_tag(pdev,
2028 							       tlv_type,
2029 							       tlv_start);
2030 
2031 				if (tlv_type == HTT_STATS_PEER_DETAILS_TAG ||
2032 				    tlv_type == HTT_STATS_PEER_STATS_CMN_TAG)
2033 					dp_peer_update_inactive_time(pdev,
2034 								     tlv_type,
2035 								     tlv_start);
2036 
2037 				if (cookie_msb & DBG_STATS_COOKIE_HTT_OBSS)
2038 					dp_htt_set_pdev_obss_stats(pdev,
2039 								   tlv_type,
2040 								   tlv_start);
2041 
2042 				msg_remain_len -= tlv_remain_len;
2043 
2044 				msg_word = (uint32_t *)
2045 					(((uint8_t *)msg_word) +
2046 					tlv_remain_len);
2047 
2048 				tlv_remain_len = 0;
2049 
2050 				if (tlv_buf_head) {
2051 					qdf_mem_free(tlv_buf_head);
2052 					tlv_buf_head = NULL;
2053 					tlv_buf_tail = NULL;
2054 				}
2055 
2056 			} else { /* tlv_remain_len > msg_remain_len */
2057 				/* Case 2 & 3 */
2058 				if (!tlv_buf_head) {
2059 					tlv_buf_head = qdf_mem_malloc(
2060 							tlv_remain_len);
2061 
2062 					if (!tlv_buf_head) {
2063 						QDF_TRACE(QDF_MODULE_ID_TXRX,
2064 								QDF_TRACE_LEVEL_ERROR,
2065 								"Alloc failed");
2066 						goto error;
2067 					}
2068 
2069 					tlv_buf_tail = tlv_buf_head;
2070 				}
2071 
2072 				qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word,
2073 						msg_remain_len);
2074 				tlv_remain_len -= msg_remain_len;
2075 				tlv_buf_tail += msg_remain_len;
2076 			}
2077 		}
2078 
2079 		if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
2080 			htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
2081 		}
2082 
2083 		/* indicate event completion in case the event is done */
2084 		if (!cookie_val && (cookie_msb & DBG_SYSFS_STATS_COOKIE))
2085 			dp_htt_stats_sysfs_set_event(soc, msg_word);
2086 
2087 		qdf_nbuf_free(htt_msg);
2088 	}
2089 	return;
2090 
2091 error:
2092 	qdf_nbuf_free(htt_msg);
2093 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
2094 			!= NULL)
2095 		qdf_nbuf_free(htt_msg);
2096 }
2097 
2098 void htt_t2h_stats_handler(void *context)
2099 {
2100 	struct dp_soc *soc = (struct dp_soc *)context;
2101 	struct htt_stats_context htt_stats;
2102 	uint32_t *msg_word;
2103 	qdf_nbuf_t htt_msg = NULL;
2104 	uint8_t done;
2105 	uint32_t rem_stats;
2106 
2107 	if (!soc) {
2108 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2109 			  "soc is NULL");
2110 		return;
2111 	}
2112 
2113 	if (!qdf_atomic_read(&soc->cmn_init_done)) {
2114 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2115 			  "soc: 0x%pK, init_done: %d", soc,
2116 			  qdf_atomic_read(&soc->cmn_init_done));
2117 		return;
2118 	}
2119 
2120 	qdf_mem_zero(&htt_stats, sizeof(htt_stats));
2121 	qdf_nbuf_queue_init(&htt_stats.msg);
2122 
2123 	/* pull one completed stats from soc->htt_stats_msg and process */
2124 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2125 	if (!soc->htt_stats.num_stats) {
2126 		qdf_spin_unlock_bh(&soc->htt_stats.lock);
2127 		return;
2128 	}
2129 	while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) {
2130 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
2131 		msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET;
2132 		done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2133 		qdf_nbuf_queue_add(&htt_stats.msg, htt_msg);
2134 		/*
2135 		 * Done bit signifies that this is the last T2H buffer in the
2136 		 * stream of HTT EXT STATS message
2137 		 */
2138 		if (done)
2139 			break;
2140 	}
2141 	rem_stats = --soc->htt_stats.num_stats;
2142 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2143 
2144 	/* If there are more stats to process, schedule stats work again.
2145 	 * Scheduling prior to processing ht_stats to queue with early
2146 	 * index
2147 	 */
2148 	if (rem_stats)
2149 		qdf_sched_work(0, &soc->htt_stats.work);
2150 
2151 	dp_process_htt_stat_msg(&htt_stats, soc);
2152 }
2153 
2154 /**
2155  * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats
2156  * @soc: DP SOC handle
2157  * @htt_t2h_msg: HTT message nbuf
2158  *
2159  * return:void
2160  */
2161 static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc,
2162 					    qdf_nbuf_t htt_t2h_msg)
2163 {
2164 	uint8_t done;
2165 	qdf_nbuf_t msg_copy;
2166 	uint32_t *msg_word;
2167 
2168 	msg_word = (uint32_t *)qdf_nbuf_data(htt_t2h_msg);
2169 	msg_word = msg_word + 3;
2170 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2171 
2172 	/*
2173 	 * HTT EXT stats response comes as stream of TLVs which span over
2174 	 * multiple T2H messages.
2175 	 * The first message will carry length of the response.
2176 	 * For rest of the messages length will be zero.
2177 	 *
2178 	 * Clone the T2H message buffer and store it in a list to process
2179 	 * it later.
2180 	 *
2181 	 * The original T2H message buffers gets freed in the T2H HTT event
2182 	 * handler
2183 	 */
2184 	msg_copy = qdf_nbuf_clone(htt_t2h_msg);
2185 
2186 	if (!msg_copy) {
2187 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2188 			  "T2H message clone failed for HTT EXT STATS");
2189 		goto error;
2190 	}
2191 
2192 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2193 	qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy);
2194 	/*
2195 	 * Done bit signifies that this is the last T2H buffer in the stream of
2196 	 * HTT EXT STATS message
2197 	 */
2198 	if (done) {
2199 		soc->htt_stats.num_stats++;
2200 		qdf_sched_work(0, &soc->htt_stats.work);
2201 	}
2202 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2203 
2204 	return;
2205 
2206 error:
2207 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2208 	while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg))
2209 			!= NULL) {
2210 		qdf_nbuf_free(msg_copy);
2211 	}
2212 	soc->htt_stats.num_stats = 0;
2213 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2214 	return;
2215 }
2216 
2217 /*
2218  * htt_soc_attach_target() - SOC level HTT setup
2219  * @htt_soc:	HTT SOC handle
2220  *
2221  * Return: 0 on success; error code on failure
2222  */
2223 int htt_soc_attach_target(struct htt_soc *htt_soc)
2224 {
2225 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
2226 
2227 	return htt_h2t_ver_req_msg(soc);
2228 }
2229 
2230 void htt_set_htc_handle(struct htt_soc *htt_soc, HTC_HANDLE htc_soc)
2231 {
2232 	htt_soc->htc_soc = htc_soc;
2233 }
2234 
2235 HTC_HANDLE htt_get_htc_handle(struct htt_soc *htt_soc)
2236 {
2237 	return htt_soc->htc_soc;
2238 }
2239 
2240 struct htt_soc *htt_soc_attach(struct dp_soc *soc, HTC_HANDLE htc_handle)
2241 {
2242 	int i;
2243 	int j;
2244 	int umac_alloc_size = HTT_SW_UMAC_RING_IDX_MAX *
2245 			      sizeof(struct bp_handler);
2246 	int lmac_alloc_size = HTT_SW_LMAC_RING_IDX_MAX *
2247 			      sizeof(struct bp_handler);
2248 	struct htt_soc *htt_soc = NULL;
2249 
2250 	htt_soc = qdf_mem_malloc(sizeof(*htt_soc));
2251 	if (!htt_soc) {
2252 		dp_err("HTT attach failed");
2253 		return NULL;
2254 	}
2255 
2256 	for (i = 0; i < MAX_PDEV_CNT; i++) {
2257 		htt_soc->pdevid_tt[i].umac_path =
2258 			qdf_mem_malloc(umac_alloc_size);
2259 		if (!htt_soc->pdevid_tt[i].umac_path)
2260 			break;
2261 		for (j = 0; j < HTT_SW_UMAC_RING_IDX_MAX; j++)
2262 			htt_soc->pdevid_tt[i].umac_path[j].bp_start_tt = -1;
2263 		htt_soc->pdevid_tt[i].lmac_path =
2264 			qdf_mem_malloc(lmac_alloc_size);
2265 		if (!htt_soc->pdevid_tt[i].lmac_path) {
2266 			qdf_mem_free(htt_soc->pdevid_tt[i].umac_path);
2267 			break;
2268 		}
2269 		for (j = 0; j < HTT_SW_LMAC_RING_IDX_MAX ; j++)
2270 			htt_soc->pdevid_tt[i].lmac_path[j].bp_start_tt = -1;
2271 	}
2272 
2273 	if (i != MAX_PDEV_CNT) {
2274 		for (j = 0; j < i; j++) {
2275 			qdf_mem_free(htt_soc->pdevid_tt[j].umac_path);
2276 			qdf_mem_free(htt_soc->pdevid_tt[j].lmac_path);
2277 		}
2278 		qdf_mem_free(htt_soc);
2279 		return NULL;
2280 	}
2281 
2282 	htt_soc->dp_soc = soc;
2283 	htt_soc->htc_soc = htc_handle;
2284 	HTT_TX_MUTEX_INIT(&htt_soc->htt_tx_mutex);
2285 
2286 	return htt_soc;
2287 }
2288 
2289 #if defined(WDI_EVENT_ENABLE) && \
2290 	!defined(REMOVE_PKT_LOG)
2291 /*
2292  * dp_pktlog_msg_handler() - Pktlog msg handler
2293  * @htt_soc:	 HTT SOC handle
2294  * @msg_word:    Pointer to payload
2295  *
2296  * Return: None
2297  */
2298 static void
2299 dp_pktlog_msg_handler(struct htt_soc *soc,
2300 		      uint32_t *msg_word)
2301 {
2302 	uint8_t pdev_id;
2303 	uint8_t target_pdev_id;
2304 	uint32_t *pl_hdr;
2305 
2306 	target_pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word);
2307 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
2308 							 target_pdev_id);
2309 	pl_hdr = (msg_word + 1);
2310 	dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc,
2311 		pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL,
2312 		pdev_id);
2313 }
2314 #else
2315 static void
2316 dp_pktlog_msg_handler(struct htt_soc *soc,
2317 		      uint32_t *msg_word)
2318 {
2319 }
2320 #endif
2321 
2322 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
2323 /*
2324  * dp_vdev_txrx_hw_stats_handler - Handle vdev stats received from FW
2325  * @soc - htt soc handle
2326  * @ msg_word - buffer containing stats
2327  *
2328  * Return: void
2329  */
2330 static void dp_vdev_txrx_hw_stats_handler(struct htt_soc *soc,
2331 					  uint32_t *msg_word)
2332 {
2333 	struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
2334 	uint8_t pdev_id;
2335 	uint8_t vdev_id;
2336 	uint8_t target_pdev_id;
2337 	uint16_t payload_size;
2338 	struct dp_pdev *pdev;
2339 	struct dp_vdev *vdev;
2340 	uint8_t *tlv_buf;
2341 	uint32_t *tlv_buf_temp;
2342 	uint32_t *tag_buf;
2343 	htt_tlv_tag_t tlv_type;
2344 	uint16_t tlv_length;
2345 	uint64_t pkt_count = 0;
2346 	uint64_t byte_count = 0;
2347 	uint64_t soc_drop_cnt = 0;
2348 	struct cdp_pkt_info tx_comp = { 0 };
2349 	struct cdp_pkt_info tx_failed =  { 0 };
2350 
2351 	target_pdev_id =
2352 		HTT_T2H_VDEVS_TXRX_STATS_PERIODIC_IND_PDEV_ID_GET(*msg_word);
2353 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(dpsoc,
2354 							 target_pdev_id);
2355 
2356 	if (pdev_id >= MAX_PDEV_CNT)
2357 		return;
2358 
2359 	pdev = dpsoc->pdev_list[pdev_id];
2360 	if (!pdev) {
2361 		dp_err("PDEV is NULL for pdev_id:%d", pdev_id);
2362 		return;
2363 	}
2364 
2365 	payload_size =
2366 	HTT_T2H_VDEVS_TXRX_STATS_PERIODIC_IND_PAYLOAD_SIZE_GET(*msg_word);
2367 
2368 	qdf_trace_hex_dump(QDF_MODULE_ID_DP_HTT, QDF_TRACE_LEVEL_INFO,
2369 			   (void *)msg_word, payload_size + 16);
2370 
2371 	/* Adjust msg_word to point to the first TLV in buffer */
2372 	msg_word = msg_word + 4;
2373 
2374 	/* Parse the received buffer till payload size reaches 0 */
2375 	while (payload_size > 0) {
2376 		tlv_buf = (uint8_t *)msg_word;
2377 		tlv_buf_temp = msg_word;
2378 		tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
2379 		tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
2380 
2381 		/* Add header size to tlv length*/
2382 		tlv_length += 4;
2383 
2384 		switch (tlv_type) {
2385 		case HTT_STATS_SOC_TXRX_STATS_COMMON_TAG:
2386 		{
2387 			tag_buf = tlv_buf_temp +
2388 					HTT_VDEV_STATS_GET_INDEX(SOC_DROP_CNT);
2389 			soc_drop_cnt = HTT_VDEV_GET_STATS_U64(tag_buf);
2390 			DP_STATS_UPD(dpsoc, tx.tqm_drop_no_peer, soc_drop_cnt);
2391 			break;
2392 		}
2393 		case HTT_STATS_VDEV_TXRX_STATS_HW_STATS_TAG:
2394 		{
2395 			tag_buf = tlv_buf_temp +
2396 					HTT_VDEV_STATS_GET_INDEX(VDEV_ID);
2397 			vdev_id = (uint8_t)(*tag_buf);
2398 			vdev = dp_vdev_get_ref_by_id(dpsoc, vdev_id,
2399 						     DP_MOD_ID_HTT);
2400 
2401 			if (!vdev)
2402 				goto invalid_vdev;
2403 
2404 			/* Extract received packet count from buffer */
2405 			tag_buf = tlv_buf_temp +
2406 					HTT_VDEV_STATS_GET_INDEX(RX_PKT_CNT);
2407 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2408 			DP_STATS_UPD(vdev, rx_i.reo_rcvd_pkt.num, pkt_count);
2409 
2410 			/* Extract received packet byte count from buffer */
2411 			tag_buf = tlv_buf_temp +
2412 					HTT_VDEV_STATS_GET_INDEX(RX_BYTE_CNT);
2413 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2414 			DP_STATS_UPD(vdev, rx_i.reo_rcvd_pkt.bytes, byte_count);
2415 
2416 			/* Extract tx success packet count from buffer */
2417 			tag_buf = tlv_buf_temp +
2418 				HTT_VDEV_STATS_GET_INDEX(TX_SUCCESS_PKT_CNT);
2419 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2420 			tx_comp.num = pkt_count;
2421 
2422 			/* Extract tx success packet byte count from buffer */
2423 			tag_buf = tlv_buf_temp +
2424 				HTT_VDEV_STATS_GET_INDEX(TX_SUCCESS_BYTE_CNT);
2425 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2426 			tx_comp.bytes = byte_count;
2427 
2428 			/* Extract tx retry packet count from buffer */
2429 			tag_buf = tlv_buf_temp +
2430 				HTT_VDEV_STATS_GET_INDEX(TX_RETRY_PKT_CNT);
2431 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2432 			tx_comp.num += pkt_count;
2433 			tx_failed.num = pkt_count;
2434 
2435 			/* Extract tx retry packet byte count from buffer */
2436 			tag_buf = tlv_buf_temp +
2437 				HTT_VDEV_STATS_GET_INDEX(TX_RETRY_BYTE_CNT);
2438 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2439 			tx_comp.bytes += byte_count;
2440 			tx_failed.bytes = byte_count;
2441 
2442 			/* Extract tx drop packet count from buffer */
2443 			tag_buf = tlv_buf_temp +
2444 				HTT_VDEV_STATS_GET_INDEX(TX_DROP_PKT_CNT);
2445 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2446 			tx_comp.num += pkt_count;
2447 			tx_failed.num += pkt_count;
2448 
2449 			/* Extract tx drop packet byte count from buffer */
2450 			tag_buf = tlv_buf_temp +
2451 				HTT_VDEV_STATS_GET_INDEX(TX_DROP_BYTE_CNT);
2452 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2453 			tx_comp.bytes += byte_count;
2454 			tx_failed.bytes += byte_count;
2455 
2456 			/* Extract tx age-out packet count from buffer */
2457 			tag_buf = tlv_buf_temp +
2458 				HTT_VDEV_STATS_GET_INDEX(TX_AGE_OUT_PKT_CNT);
2459 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2460 			tx_comp.num += pkt_count;
2461 			tx_failed.num += pkt_count;
2462 
2463 			/* Extract tx age-out packet byte count from buffer */
2464 			tag_buf = tlv_buf_temp +
2465 				HTT_VDEV_STATS_GET_INDEX(TX_AGE_OUT_BYTE_CNT);
2466 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2467 			tx_comp.bytes += byte_count;
2468 			tx_failed.bytes += byte_count;
2469 
2470 			/* Extract tqm bypass packet count from buffer */
2471 			tag_buf = tlv_buf_temp +
2472 				HTT_VDEV_STATS_GET_INDEX(TX_TQM_BYPASS_PKT_CNT);
2473 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2474 			tx_comp.num += pkt_count;
2475 
2476 			/* Extract tx bypass packet byte count from buffer */
2477 			tag_buf = tlv_buf_temp +
2478 				HTT_VDEV_STATS_GET_INDEX(TX_TQM_BYPASS_BYTE_CNT);
2479 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2480 			tx_comp.bytes += byte_count;
2481 
2482 			DP_STATS_UPD(vdev, tx.comp_pkt.num, tx_comp.num);
2483 			DP_STATS_UPD(vdev, tx.comp_pkt.bytes, tx_comp.bytes);
2484 
2485 			DP_STATS_UPD(vdev, tx.tx_failed, tx_failed.num);
2486 
2487 			dp_vdev_unref_delete(dpsoc, vdev, DP_MOD_ID_HTT);
2488 			break;
2489 		}
2490 		default:
2491 			qdf_assert(0);
2492 		}
2493 invalid_vdev:
2494 		msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
2495 		payload_size -= tlv_length;
2496 	}
2497 }
2498 #else
2499 static void dp_vdev_txrx_hw_stats_handler(struct htt_soc *soc,
2500 					  uint32_t *msg_word)
2501 {}
2502 #endif
2503 
2504 #ifdef CONFIG_SAWF_DEF_QUEUES
2505 static void dp_sawf_def_queues_update_map_report_conf(struct htt_soc *soc,
2506 						      uint32_t *msg_word,
2507 						      qdf_nbuf_t htt_t2h_msg)
2508 {
2509 	dp_htt_sawf_def_queues_map_report_conf(soc, msg_word, htt_t2h_msg);
2510 }
2511 #else
2512 static void dp_sawf_def_queues_update_map_report_conf(struct htt_soc *soc,
2513 						      uint32_t *msg_word,
2514 						      qdf_nbuf_t htt_t2h_msg)
2515 {}
2516 #endif
2517 
2518 #ifdef CONFIG_SAWF
2519 /*
2520  * dp_sawf_msduq_map() - Msdu queue creation information received
2521  * from target
2522  * @soc: soc handle.
2523  * @msg_word: Pointer to htt msg word.
2524  * @htt_t2h_msg: HTT message nbuf
2525  *
2526  * @return: void
2527  */
2528 static void dp_sawf_msduq_map(struct htt_soc *soc, uint32_t *msg_word,
2529 			      qdf_nbuf_t htt_t2h_msg)
2530 {
2531 	dp_htt_sawf_msduq_map(soc, msg_word, htt_t2h_msg);
2532 }
2533 
2534 /*
2535  * dp_sawf_mpdu_stats_handler() - HTT message handler for MPDU stats
2536  * @soc: soc handle.
2537  * @htt_t2h_msg: HTT message nbuf
2538  *
2539  * @return: void
2540  */
2541 static void dp_sawf_mpdu_stats_handler(struct htt_soc *soc,
2542 				       qdf_nbuf_t htt_t2h_msg)
2543 {
2544 	dp_sawf_htt_mpdu_stats_handler(soc, htt_t2h_msg);
2545 }
2546 #else
2547 static void dp_sawf_msduq_map(struct htt_soc *soc, uint32_t *msg_word,
2548 			      qdf_nbuf_t htt_t2h_msg)
2549 {}
2550 
2551 static void dp_sawf_mpdu_stats_handler(struct htt_soc *soc,
2552 				       qdf_nbuf_t htt_t2h_msg)
2553 {}
2554 #endif
2555 
2556 /*
2557  * time_allow_print() - time allow print
2558  * @htt_ring_tt:	ringi_id array of timestamps
2559  * @ring_id:		ring_id (index)
2560  *
2561  * Return: 1 for successfully saving timestamp in array
2562  *	and 0 for timestamp falling within 2 seconds after last one
2563  */
2564 static bool time_allow_print(struct bp_handler *htt_bp_handler,
2565 			     u_int8_t ring_id, u_int32_t th_time)
2566 {
2567 	unsigned long tstamp;
2568 	struct bp_handler *path = &htt_bp_handler[ring_id];
2569 
2570 	tstamp = qdf_get_system_timestamp();
2571 
2572 	if (!path)
2573 		return 0; //unable to print backpressure messages
2574 
2575 	if (path->bp_start_tt == -1) {
2576 		path->bp_start_tt = tstamp;
2577 		path->bp_duration = 0;
2578 		path->bp_last_tt = tstamp;
2579 		path->bp_counter = 1;
2580 		return 1;
2581 	}
2582 
2583 	path->bp_duration = tstamp - path->bp_start_tt;
2584 	path->bp_last_tt = tstamp;
2585 	path->bp_counter++;
2586 
2587 	if (path->bp_duration >= th_time) {
2588 		path->bp_start_tt = -1;
2589 		return 1;
2590 	}
2591 
2592 	return 0;
2593 }
2594 
2595 static void dp_htt_alert_print(enum htt_t2h_msg_type msg_type,
2596 			       struct dp_pdev *pdev, u_int8_t ring_id,
2597 			       u_int16_t hp_idx, u_int16_t tp_idx,
2598 			       u_int32_t bkp_time,
2599 			       struct bp_handler *htt_bp_handler,
2600 			       char *ring_stype)
2601 {
2602 	dp_alert("seq_num %u msg_type: %d pdev_id: %d ring_type: %s ",
2603 		 pdev->bkp_stats.seq_num, msg_type, pdev->pdev_id, ring_stype);
2604 	dp_alert("ring_id: %d hp_idx: %d tp_idx: %d bkpressure_time_ms: %d ",
2605 		 ring_id, hp_idx, tp_idx, bkp_time);
2606 	dp_alert("last_bp_event: %ld, total_bp_duration: %ld, bp_counter: %ld",
2607 		 htt_bp_handler[ring_id].bp_last_tt,
2608 		 htt_bp_handler[ring_id].bp_duration,
2609 		 htt_bp_handler[ring_id].bp_counter);
2610 }
2611 
2612 /**
2613  * dp_get_srng_ring_state_from_hal(): Get hal level ring stats
2614  * @soc: DP_SOC handle
2615  * @srng: DP_SRNG handle
2616  * @ring_type: srng src/dst ring
2617  *
2618  * Return: void
2619  */
2620 static QDF_STATUS
2621 dp_get_srng_ring_state_from_hal(struct dp_soc *soc,
2622 				struct dp_pdev *pdev,
2623 				struct dp_srng *srng,
2624 				enum hal_ring_type ring_type,
2625 				struct dp_srng_ring_state *state)
2626 {
2627 	struct hal_soc *hal_soc;
2628 
2629 	if (!soc || !srng || !srng->hal_srng || !state)
2630 		return QDF_STATUS_E_INVAL;
2631 
2632 	hal_soc = (struct hal_soc *)soc->hal_soc;
2633 
2634 	hal_get_sw_hptp(soc->hal_soc, srng->hal_srng, &state->sw_tail,
2635 			&state->sw_head);
2636 
2637 	hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &state->hw_head,
2638 			&state->hw_tail, ring_type);
2639 
2640 	state->ring_type = ring_type;
2641 
2642 	return QDF_STATUS_SUCCESS;
2643 }
2644 
2645 #ifdef QCA_MONITOR_PKT_SUPPORT
2646 static void
2647 dp_queue_mon_ring_stats(struct dp_pdev *pdev,
2648 			int lmac_id, uint32_t *num_srng,
2649 			struct dp_soc_srngs_state *soc_srngs_state)
2650 {
2651 	QDF_STATUS status;
2652 
2653 	if (pdev->soc->wlan_cfg_ctx->rxdma1_enable) {
2654 		status = dp_get_srng_ring_state_from_hal
2655 			(pdev->soc, pdev,
2656 			 &pdev->soc->rxdma_mon_buf_ring[lmac_id],
2657 			 RXDMA_MONITOR_BUF,
2658 			 &soc_srngs_state->ring_state[*num_srng]);
2659 
2660 		if (status == QDF_STATUS_SUCCESS)
2661 			qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS);
2662 
2663 		status = dp_get_srng_ring_state_from_hal
2664 			(pdev->soc, pdev,
2665 			 &pdev->soc->rxdma_mon_dst_ring[lmac_id],
2666 			 RXDMA_MONITOR_DST,
2667 			 &soc_srngs_state->ring_state[*num_srng]);
2668 
2669 		if (status == QDF_STATUS_SUCCESS)
2670 			qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS);
2671 
2672 		status = dp_get_srng_ring_state_from_hal
2673 			(pdev->soc, pdev,
2674 			 &pdev->soc->rxdma_mon_desc_ring[lmac_id],
2675 			 RXDMA_MONITOR_DESC,
2676 			 &soc_srngs_state->ring_state[*num_srng]);
2677 
2678 		if (status == QDF_STATUS_SUCCESS)
2679 			qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS);
2680 	}
2681 }
2682 #else
2683 static void
2684 dp_queue_mon_ring_stats(struct dp_pdev *pdev,
2685 			int lmac_id, uint32_t *num_srng,
2686 			struct dp_soc_srngs_state *soc_srngs_state)
2687 {
2688 }
2689 #endif
2690 
2691 #ifndef WLAN_DP_DISABLE_TCL_CMD_CRED_SRNG
2692 static inline QDF_STATUS
2693 dp_get_tcl_cmd_cred_ring_state_from_hal(struct dp_pdev *pdev,
2694 					struct dp_srng_ring_state *ring_state)
2695 {
2696 	return dp_get_srng_ring_state_from_hal(pdev->soc, pdev,
2697 					       &pdev->soc->tcl_cmd_credit_ring,
2698 					       TCL_CMD_CREDIT, ring_state);
2699 }
2700 #else
2701 static inline QDF_STATUS
2702 dp_get_tcl_cmd_cred_ring_state_from_hal(struct dp_pdev *pdev,
2703 					struct dp_srng_ring_state *ring_state)
2704 {
2705 	return QDF_STATUS_SUCCESS;
2706 }
2707 #endif
2708 
2709 #ifndef WLAN_DP_DISABLE_TCL_STATUS_SRNG
2710 static inline QDF_STATUS
2711 dp_get_tcl_status_ring_state_from_hal(struct dp_pdev *pdev,
2712 				      struct dp_srng_ring_state *ring_state)
2713 {
2714 	return dp_get_srng_ring_state_from_hal(pdev->soc, pdev,
2715 					       &pdev->soc->tcl_status_ring,
2716 					       TCL_STATUS, ring_state);
2717 }
2718 #else
2719 static inline QDF_STATUS
2720 dp_get_tcl_status_ring_state_from_hal(struct dp_pdev *pdev,
2721 				      struct dp_srng_ring_state *ring_state)
2722 {
2723 	return QDF_STATUS_SUCCESS;
2724 }
2725 #endif
2726 
2727 /**
2728  * dp_queue_srng_ring_stats(): Print pdev hal level ring stats
2729  * @pdev: DP_pdev handle
2730  *
2731  * Return: void
2732  */
2733 static void dp_queue_ring_stats(struct dp_pdev *pdev)
2734 {
2735 	uint32_t i;
2736 	int mac_id;
2737 	int lmac_id;
2738 	uint32_t j = 0;
2739 	struct dp_soc *soc = pdev->soc;
2740 	struct dp_soc_srngs_state * soc_srngs_state = NULL;
2741 	struct dp_soc_srngs_state *drop_srngs_state = NULL;
2742 	QDF_STATUS status;
2743 
2744 	soc_srngs_state = qdf_mem_malloc(sizeof(struct dp_soc_srngs_state));
2745 	if (!soc_srngs_state) {
2746 		dp_htt_alert("Memory alloc failed for back pressure event");
2747 		return;
2748 	}
2749 
2750 	status = dp_get_srng_ring_state_from_hal
2751 				(pdev->soc, pdev,
2752 				 &pdev->soc->reo_exception_ring,
2753 				 REO_EXCEPTION,
2754 				 &soc_srngs_state->ring_state[j]);
2755 
2756 	if (status == QDF_STATUS_SUCCESS)
2757 		qdf_assert_always(++j < DP_MAX_SRNGS);
2758 
2759 	status = dp_get_srng_ring_state_from_hal
2760 				(pdev->soc, pdev,
2761 				 &pdev->soc->reo_reinject_ring,
2762 				 REO_REINJECT,
2763 				 &soc_srngs_state->ring_state[j]);
2764 
2765 	if (status == QDF_STATUS_SUCCESS)
2766 		qdf_assert_always(++j < DP_MAX_SRNGS);
2767 
2768 	status = dp_get_srng_ring_state_from_hal
2769 				(pdev->soc, pdev,
2770 				 &pdev->soc->reo_cmd_ring,
2771 				 REO_CMD,
2772 				 &soc_srngs_state->ring_state[j]);
2773 
2774 	if (status == QDF_STATUS_SUCCESS)
2775 		qdf_assert_always(++j < DP_MAX_SRNGS);
2776 
2777 	status = dp_get_srng_ring_state_from_hal
2778 				(pdev->soc, pdev,
2779 				 &pdev->soc->reo_status_ring,
2780 				 REO_STATUS,
2781 				 &soc_srngs_state->ring_state[j]);
2782 
2783 	if (status == QDF_STATUS_SUCCESS)
2784 		qdf_assert_always(++j < DP_MAX_SRNGS);
2785 
2786 	status = dp_get_srng_ring_state_from_hal
2787 				(pdev->soc, pdev,
2788 				 &pdev->soc->rx_rel_ring,
2789 				 WBM2SW_RELEASE,
2790 				 &soc_srngs_state->ring_state[j]);
2791 
2792 	if (status == QDF_STATUS_SUCCESS)
2793 		qdf_assert_always(++j < DP_MAX_SRNGS);
2794 
2795 	status = dp_get_tcl_cmd_cred_ring_state_from_hal
2796 				(pdev, &soc_srngs_state->ring_state[j]);
2797 	if (status == QDF_STATUS_SUCCESS)
2798 		qdf_assert_always(++j < DP_MAX_SRNGS);
2799 
2800 	status = dp_get_tcl_status_ring_state_from_hal
2801 				(pdev, &soc_srngs_state->ring_state[j]);
2802 	if (status == QDF_STATUS_SUCCESS)
2803 		qdf_assert_always(++j < DP_MAX_SRNGS);
2804 
2805 	status = dp_get_srng_ring_state_from_hal
2806 				(pdev->soc, pdev,
2807 				 &pdev->soc->wbm_desc_rel_ring,
2808 				 SW2WBM_RELEASE,
2809 				 &soc_srngs_state->ring_state[j]);
2810 
2811 	if (status == QDF_STATUS_SUCCESS)
2812 		qdf_assert_always(++j < DP_MAX_SRNGS);
2813 
2814 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
2815 		status = dp_get_srng_ring_state_from_hal
2816 				(pdev->soc, pdev,
2817 				 &pdev->soc->reo_dest_ring[i],
2818 				 REO_DST,
2819 				 &soc_srngs_state->ring_state[j]);
2820 
2821 		if (status == QDF_STATUS_SUCCESS)
2822 			qdf_assert_always(++j < DP_MAX_SRNGS);
2823 	}
2824 
2825 	for (i = 0; i < pdev->soc->num_tcl_data_rings; i++) {
2826 		status = dp_get_srng_ring_state_from_hal
2827 				(pdev->soc, pdev,
2828 				 &pdev->soc->tcl_data_ring[i],
2829 				 TCL_DATA,
2830 				 &soc_srngs_state->ring_state[j]);
2831 
2832 		if (status == QDF_STATUS_SUCCESS)
2833 			qdf_assert_always(++j < DP_MAX_SRNGS);
2834 	}
2835 
2836 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
2837 		status = dp_get_srng_ring_state_from_hal
2838 				(pdev->soc, pdev,
2839 				 &pdev->soc->tx_comp_ring[i],
2840 				 WBM2SW_RELEASE,
2841 				 &soc_srngs_state->ring_state[j]);
2842 
2843 		if (status == QDF_STATUS_SUCCESS)
2844 			qdf_assert_always(++j < DP_MAX_SRNGS);
2845 	}
2846 
2847 	lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc, 0, pdev->pdev_id);
2848 	status = dp_get_srng_ring_state_from_hal
2849 				(pdev->soc, pdev,
2850 				 &pdev->soc->rx_refill_buf_ring
2851 				 [lmac_id],
2852 				 RXDMA_BUF,
2853 				 &soc_srngs_state->ring_state[j]);
2854 
2855 	if (status == QDF_STATUS_SUCCESS)
2856 		qdf_assert_always(++j < DP_MAX_SRNGS);
2857 
2858 	status = dp_get_srng_ring_state_from_hal
2859 				(pdev->soc, pdev,
2860 				 &pdev->rx_refill_buf_ring2,
2861 				 RXDMA_BUF,
2862 				 &soc_srngs_state->ring_state[j]);
2863 
2864 	if (status == QDF_STATUS_SUCCESS)
2865 		qdf_assert_always(++j < DP_MAX_SRNGS);
2866 
2867 
2868 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2869 		dp_get_srng_ring_state_from_hal
2870 				(pdev->soc, pdev,
2871 				 &pdev->rx_mac_buf_ring[i],
2872 				 RXDMA_BUF,
2873 				 &soc_srngs_state->ring_state[j]);
2874 
2875 		if (status == QDF_STATUS_SUCCESS)
2876 			qdf_assert_always(++j < DP_MAX_SRNGS);
2877 	}
2878 
2879 	for (mac_id = 0;
2880 	     mac_id  < soc->wlan_cfg_ctx->num_rxdma_status_rings_per_pdev;
2881 	     mac_id++) {
2882 		lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc,
2883 						     mac_id, pdev->pdev_id);
2884 
2885 		dp_queue_mon_ring_stats(pdev, lmac_id, &j,
2886 					soc_srngs_state);
2887 
2888 		status = dp_get_srng_ring_state_from_hal
2889 			(pdev->soc, pdev,
2890 			 &pdev->soc->rxdma_mon_status_ring[lmac_id],
2891 			 RXDMA_MONITOR_STATUS,
2892 			 &soc_srngs_state->ring_state[j]);
2893 
2894 		if (status == QDF_STATUS_SUCCESS)
2895 			qdf_assert_always(++j < DP_MAX_SRNGS);
2896 	}
2897 
2898 	for (i = 0; i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
2899 		lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc,
2900 						     i, pdev->pdev_id);
2901 
2902 		status = dp_get_srng_ring_state_from_hal
2903 				(pdev->soc, pdev,
2904 				 &pdev->soc->rxdma_err_dst_ring
2905 				 [lmac_id],
2906 				 RXDMA_DST,
2907 				 &soc_srngs_state->ring_state[j]);
2908 
2909 		if (status == QDF_STATUS_SUCCESS)
2910 			qdf_assert_always(++j < DP_MAX_SRNGS);
2911 	}
2912 	soc_srngs_state->max_ring_id = j;
2913 
2914 	qdf_spin_lock_bh(&pdev->bkp_stats.list_lock);
2915 
2916 	soc_srngs_state->seq_num = pdev->bkp_stats.seq_num;
2917 
2918 	if (pdev->bkp_stats.queue_depth >= HTT_BKP_STATS_MAX_QUEUE_DEPTH) {
2919 		drop_srngs_state = TAILQ_FIRST(&pdev->bkp_stats.list);
2920 		qdf_assert_always(drop_srngs_state);
2921 		TAILQ_REMOVE(&pdev->bkp_stats.list, drop_srngs_state,
2922 			     list_elem);
2923 		qdf_mem_free(drop_srngs_state);
2924 		pdev->bkp_stats.queue_depth--;
2925 	}
2926 
2927 	pdev->bkp_stats.queue_depth++;
2928 	TAILQ_INSERT_TAIL(&pdev->bkp_stats.list, soc_srngs_state,
2929 			  list_elem);
2930 	pdev->bkp_stats.seq_num++;
2931 	qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock);
2932 
2933 	qdf_queue_work(0, pdev->bkp_stats.work_queue,
2934 		       &pdev->bkp_stats.work);
2935 }
2936 
2937 /*
2938  * dp_htt_bkp_event_alert() - htt backpressure event alert
2939  * @msg_word:	htt packet context
2940  * @htt_soc:	HTT SOC handle
2941  *
2942  * Return: after attempting to print stats
2943  */
2944 static void dp_htt_bkp_event_alert(u_int32_t *msg_word, struct htt_soc *soc)
2945 {
2946 	u_int8_t ring_type;
2947 	u_int8_t pdev_id;
2948 	uint8_t target_pdev_id;
2949 	u_int8_t ring_id;
2950 	u_int16_t hp_idx;
2951 	u_int16_t tp_idx;
2952 	u_int32_t bkp_time;
2953 	u_int32_t th_time;
2954 	enum htt_t2h_msg_type msg_type;
2955 	struct dp_soc *dpsoc;
2956 	struct dp_pdev *pdev;
2957 	struct dp_htt_timestamp *radio_tt;
2958 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2959 
2960 
2961 	if (!soc)
2962 		return;
2963 
2964 	dpsoc = (struct dp_soc *)soc->dp_soc;
2965 	soc_cfg_ctx = dpsoc->wlan_cfg_ctx;
2966 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
2967 	ring_type = HTT_T2H_RX_BKPRESSURE_RING_TYPE_GET(*msg_word);
2968 	target_pdev_id = HTT_T2H_RX_BKPRESSURE_PDEV_ID_GET(*msg_word);
2969 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
2970 							 target_pdev_id);
2971 	if (pdev_id >= MAX_PDEV_CNT) {
2972 		dp_htt_debug("%pK: pdev id %d is invalid", soc, pdev_id);
2973 		return;
2974 	}
2975 
2976 	th_time = wlan_cfg_time_control_bp(soc_cfg_ctx);
2977 	pdev = (struct dp_pdev *)dpsoc->pdev_list[pdev_id];
2978 	ring_id = HTT_T2H_RX_BKPRESSURE_RINGID_GET(*msg_word);
2979 	hp_idx = HTT_T2H_RX_BKPRESSURE_HEAD_IDX_GET(*(msg_word + 1));
2980 	tp_idx = HTT_T2H_RX_BKPRESSURE_TAIL_IDX_GET(*(msg_word + 1));
2981 	bkp_time = HTT_T2H_RX_BKPRESSURE_TIME_MS_GET(*(msg_word + 2));
2982 	radio_tt = &soc->pdevid_tt[pdev_id];
2983 
2984 	switch (ring_type) {
2985 	case HTT_SW_RING_TYPE_UMAC:
2986 		if (!time_allow_print(radio_tt->umac_path, ring_id, th_time))
2987 			return;
2988 		dp_htt_alert_print(msg_type, pdev, ring_id, hp_idx, tp_idx,
2989 				   bkp_time, radio_tt->umac_path,
2990 				   "HTT_SW_RING_TYPE_UMAC");
2991 	break;
2992 	case HTT_SW_RING_TYPE_LMAC:
2993 		if (!time_allow_print(radio_tt->lmac_path, ring_id, th_time))
2994 			return;
2995 		dp_htt_alert_print(msg_type, pdev, ring_id, hp_idx, tp_idx,
2996 				   bkp_time, radio_tt->lmac_path,
2997 				   "HTT_SW_RING_TYPE_LMAC");
2998 	break;
2999 	default:
3000 		dp_alert("Invalid ring type: %d", ring_type);
3001 	break;
3002 	}
3003 
3004 	dp_queue_ring_stats(pdev);
3005 }
3006 
3007 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
3008 /*
3009  * dp_offload_ind_handler() - offload msg handler
3010  * @htt_soc: HTT SOC handle
3011  * @msg_word: Pointer to payload
3012  *
3013  * Return: None
3014  */
3015 static void
3016 dp_offload_ind_handler(struct htt_soc *soc, uint32_t *msg_word)
3017 {
3018 	u_int8_t pdev_id;
3019 	u_int8_t target_pdev_id;
3020 
3021 	target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
3022 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
3023 							 target_pdev_id);
3024 	dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_OFFLOAD_TX_DATA, soc->dp_soc,
3025 			     msg_word, HTT_INVALID_VDEV, WDI_NO_VAL,
3026 			     pdev_id);
3027 }
3028 #else
3029 static void
3030 dp_offload_ind_handler(struct htt_soc *soc, uint32_t *msg_word)
3031 {
3032 }
3033 #endif
3034 
3035 #ifdef WLAN_FEATURE_11BE_MLO
3036 #ifdef WLAN_MLO_MULTI_CHIP
3037 static inline void dp_update_mlo_ts_offset(struct dp_soc *soc,
3038 					   uint32_t ts_lo, uint32_t ts_hi)
3039 {
3040 	uint64_t mlo_offset;
3041 
3042 	mlo_offset = ((uint64_t)(ts_hi) << 32 | ts_lo);
3043 	soc->cdp_soc.ops->mlo_ops->mlo_update_mlo_ts_offset
3044 		((struct cdp_soc_t *)soc, mlo_offset);
3045 }
3046 #else
3047 static inline void dp_update_mlo_ts_offset(struct dp_soc *soc,
3048 					   uint32_t ts_lo, uint32_t ts_hi)
3049 {}
3050 #endif
3051 static void dp_htt_mlo_peer_map_handler(struct htt_soc *soc,
3052 					uint32_t *msg_word)
3053 {
3054 	uint8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3055 	uint8_t *mlo_peer_mac_addr;
3056 	uint16_t mlo_peer_id;
3057 	uint8_t num_links;
3058 	struct dp_mlo_flow_override_info mlo_flow_info[DP_MLO_FLOW_INFO_MAX];
3059 	struct dp_mlo_link_info mlo_link_info[DP_MAX_MLO_LINKS];
3060 	MLO_PEER_MAP_TLV_TAG_ID tlv_type = 0xff;
3061 	uint16_t tlv_len = 0;
3062 	int i = 0;
3063 
3064 	mlo_peer_id = HTT_RX_MLO_PEER_MAP_MLO_PEER_ID_GET(*msg_word);
3065 	num_links =
3066 		HTT_RX_MLO_PEER_MAP_NUM_LOGICAL_LINKS_GET(*msg_word);
3067 	mlo_peer_mac_addr =
3068 	htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3069 				   &mac_addr_deswizzle_buf[0]);
3070 
3071 	mlo_flow_info[0].ast_idx =
3072 		HTT_RX_MLO_PEER_MAP_PRIMARY_AST_INDEX_GET(*(msg_word + 3));
3073 	mlo_flow_info[0].ast_idx_valid =
3074 		HTT_RX_MLO_PEER_MAP_AST_INDEX_VALID_FLAG_GET(*(msg_word + 3));
3075 	mlo_flow_info[0].chip_id =
3076 		HTT_RX_MLO_PEER_MAP_CHIP_ID_AST_INDEX_GET(*(msg_word + 3));
3077 	mlo_flow_info[0].tidmask =
3078 		HTT_RX_MLO_PEER_MAP_TIDMASK_AST_INDEX_GET(*(msg_word + 3));
3079 	mlo_flow_info[0].cache_set_num =
3080 	HTT_RX_MLO_PEER_MAP_CACHE_SET_NUM_AST_INDEX_GET(*(msg_word + 3));
3081 
3082 	mlo_flow_info[1].ast_idx =
3083 		HTT_RX_MLO_PEER_MAP_PRIMARY_AST_INDEX_GET(*(msg_word + 3));
3084 	mlo_flow_info[1].ast_idx_valid =
3085 		HTT_RX_MLO_PEER_MAP_AST_INDEX_VALID_FLAG_GET(*(msg_word + 3));
3086 	mlo_flow_info[1].chip_id =
3087 		HTT_RX_MLO_PEER_MAP_CHIP_ID_AST_INDEX_GET(*(msg_word + 3));
3088 	mlo_flow_info[1].tidmask =
3089 		HTT_RX_MLO_PEER_MAP_TIDMASK_AST_INDEX_GET(*(msg_word + 3));
3090 	mlo_flow_info[1].cache_set_num =
3091 	HTT_RX_MLO_PEER_MAP_CACHE_SET_NUM_AST_INDEX_GET(*(msg_word + 3));
3092 
3093 	mlo_flow_info[2].ast_idx =
3094 		HTT_RX_MLO_PEER_MAP_PRIMARY_AST_INDEX_GET(*(msg_word + 3));
3095 	mlo_flow_info[2].ast_idx_valid =
3096 		HTT_RX_MLO_PEER_MAP_AST_INDEX_VALID_FLAG_GET(*(msg_word + 3));
3097 	mlo_flow_info[2].chip_id =
3098 		HTT_RX_MLO_PEER_MAP_CHIP_ID_AST_INDEX_GET(*(msg_word + 3));
3099 	mlo_flow_info[2].tidmask =
3100 		HTT_RX_MLO_PEER_MAP_TIDMASK_AST_INDEX_GET(*(msg_word + 3));
3101 	mlo_flow_info[2].cache_set_num =
3102 	HTT_RX_MLO_PEER_MAP_CACHE_SET_NUM_AST_INDEX_GET(*(msg_word + 3));
3103 
3104 	msg_word = msg_word + 8;
3105 	while (msg_word && (i < DP_MAX_MLO_LINKS)) {
3106 		mlo_link_info[i].peer_chip_id = 0xFF;
3107 		mlo_link_info[i].vdev_id = 0xFF;
3108 
3109 		tlv_type = HTT_RX_MLO_PEER_MAP_TLV_TAG_GET(*msg_word);
3110 		tlv_len = HTT_RX_MLO_PEER_MAP_TLV_LENGTH_GET(*msg_word);
3111 
3112 		if (tlv_len == 0) {
3113 			dp_err("TLV Length is 0");
3114 			break;
3115 		}
3116 
3117 		if (tlv_type == MLO_PEER_MAP_TLV_STRUCT_SOC_VDEV_PEER_IDS) {
3118 			mlo_link_info[i].peer_chip_id =
3119 				HTT_RX_MLO_PEER_MAP_CHIP_ID_GET(
3120 							*(msg_word + 1));
3121 			mlo_link_info[i].vdev_id =
3122 				HTT_RX_MLO_PEER_MAP_VDEV_ID_GET(
3123 							*(msg_word + 1));
3124 		}
3125 		/* Add header size to tlv length */
3126 		tlv_len = tlv_len + HTT_TLV_HDR_LEN;
3127 		msg_word = (uint32_t *)(((uint8_t *)msg_word) + tlv_len);
3128 		i++;
3129 	}
3130 
3131 	dp_rx_mlo_peer_map_handler(soc->dp_soc, mlo_peer_id,
3132 				   mlo_peer_mac_addr,
3133 				   mlo_flow_info, mlo_link_info);
3134 }
3135 
3136 static void dp_htt_mlo_peer_unmap_handler(struct htt_soc *soc,
3137 					  uint32_t *msg_word)
3138 {
3139 	uint16_t mlo_peer_id;
3140 
3141 	mlo_peer_id = HTT_RX_MLO_PEER_UNMAP_MLO_PEER_ID_GET(*msg_word);
3142 	dp_rx_mlo_peer_unmap_handler(soc->dp_soc, mlo_peer_id);
3143 }
3144 
3145 static void
3146 dp_rx_mlo_timestamp_ind_handler(struct dp_soc *soc,
3147 				uint32_t *msg_word)
3148 {
3149 	uint8_t pdev_id;
3150 	uint8_t target_pdev_id;
3151 	struct dp_pdev *pdev;
3152 
3153 	if (!soc)
3154 		return;
3155 
3156 	target_pdev_id = HTT_T2H_MLO_TIMESTAMP_OFFSET_PDEV_ID_GET(*msg_word);
3157 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc,
3158 							 target_pdev_id);
3159 
3160 	if (pdev_id >= MAX_PDEV_CNT) {
3161 		dp_htt_debug("%pK: pdev id %d is invalid", soc, pdev_id);
3162 		return;
3163 	}
3164 
3165 	pdev = (struct dp_pdev *)soc->pdev_list[pdev_id];
3166 
3167 	if (!pdev) {
3168 		dp_err("Invalid pdev");
3169 		return;
3170 	}
3171 	dp_wdi_event_handler(WDI_EVENT_MLO_TSTMP, soc,
3172 			     msg_word, HTT_INVALID_PEER, WDI_NO_VAL,
3173 			     pdev_id);
3174 
3175 	qdf_spin_lock_bh(&soc->htt_stats.lock);
3176 	pdev->timestamp.msg_type =
3177 		HTT_T2H_MLO_TIMESTAMP_OFFSET_MSG_TYPE_GET(*msg_word);
3178 	pdev->timestamp.pdev_id = pdev_id;
3179 	pdev->timestamp.chip_id =
3180 		HTT_T2H_MLO_TIMESTAMP_OFFSET_CHIP_ID_GET(*msg_word);
3181 	pdev->timestamp.mac_clk_freq =
3182 		HTT_T2H_MLO_TIMESTAMP_OFFSET_MAC_CLK_FREQ_MHZ_GET(*msg_word);
3183 	pdev->timestamp.sync_tstmp_lo_us = *(msg_word + 1);
3184 	pdev->timestamp.sync_tstmp_hi_us = *(msg_word + 2);
3185 	pdev->timestamp.mlo_offset_lo_us = *(msg_word + 3);
3186 	pdev->timestamp.mlo_offset_hi_us = *(msg_word + 4);
3187 	pdev->timestamp.mlo_offset_clks  = *(msg_word + 5);
3188 	pdev->timestamp.mlo_comp_us =
3189 	HTT_T2H_MLO_TIMESTAMP_OFFSET_MLO_TIMESTAMP_COMP_US_GET(
3190 							*(msg_word + 6));
3191 	pdev->timestamp.mlo_comp_clks =
3192 	HTT_T2H_MLO_TIMESTAMP_OFFSET_MLO_TIMESTAMP_COMP_CLKS_GET(
3193 							*(msg_word + 6));
3194 	pdev->timestamp.mlo_comp_timer =
3195 	HTT_T2H_MLO_TIMESTAMP_OFFSET_MLO_TIMESTAMP_COMP_PERIOD_US_GET(
3196 							*(msg_word + 7));
3197 
3198 	dp_htt_debug("tsf_lo=%d tsf_hi=%d, mlo_ofst_lo=%d, mlo_ofst_hi=%d\n",
3199 		     pdev->timestamp.sync_tstmp_lo_us,
3200 		     pdev->timestamp.sync_tstmp_hi_us,
3201 		     pdev->timestamp.mlo_offset_lo_us,
3202 		     pdev->timestamp.mlo_offset_hi_us);
3203 
3204 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
3205 
3206 	dp_update_mlo_ts_offset(soc,
3207 				pdev->timestamp.mlo_offset_lo_us,
3208 				pdev->timestamp.mlo_offset_hi_us);
3209 }
3210 #else
3211 static void dp_htt_mlo_peer_map_handler(struct htt_soc *soc,
3212 					uint32_t *msg_word)
3213 {
3214 	qdf_assert_always(0);
3215 }
3216 
3217 static void dp_htt_mlo_peer_unmap_handler(struct htt_soc *soc,
3218 					 uint32_t *msg_word)
3219 {
3220 	qdf_assert_always(0);
3221 }
3222 
3223 static void
3224 dp_rx_mlo_timestamp_ind_handler(void *soc_handle,
3225 				uint32_t *msg_word)
3226 {
3227 	qdf_assert_always(0);
3228 }
3229 #endif
3230 
3231 /*
3232  * dp_htt_rx_addba_handler() - RX Addba HTT msg handler
3233  * @soc: DP Soc handler
3234  * @peer_id: ID of peer
3235  * @tid: TID number
3236  * @win_sz: BA window size
3237  *
3238  * Return: None
3239  */
3240 static void
3241 dp_htt_rx_addba_handler(struct dp_soc *soc, uint16_t peer_id,
3242 			uint8_t tid, uint16_t win_sz)
3243 {
3244 	uint16_t status;
3245 	struct dp_peer *peer;
3246 
3247 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
3248 
3249 	if (!peer) {
3250 		dp_err("Peer not found peer id %d", peer_id);
3251 		return;
3252 	}
3253 
3254 	status = dp_addba_requestprocess_wifi3((struct cdp_soc_t *)soc,
3255 					       peer->mac_addr.raw,
3256 					       peer->vdev->vdev_id, 0,
3257 					       tid, 0, win_sz, 0xffff);
3258 
3259 	dp_addba_resp_tx_completion_wifi3(
3260 		(struct cdp_soc_t *)soc,
3261 		peer->mac_addr.raw, peer->vdev->vdev_id,
3262 		tid,
3263 		status);
3264 
3265 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3266 
3267 	dp_info("PeerID %d BAW %d TID %d stat %d",
3268 		peer_id, win_sz, tid, status);
3269 }
3270 
3271 /*
3272  * dp_htt_ppdu_id_fmt_handler() - PPDU ID Format handler
3273  * @htt_soc: HTT SOC handle
3274  * @msg_word: Pointer to payload
3275  *
3276  * Return: None
3277  */
3278 static void
3279 dp_htt_ppdu_id_fmt_handler(struct dp_soc *soc, uint32_t *msg_word)
3280 {
3281 	uint8_t msg_type, valid, bits, offset;
3282 
3283 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
3284 
3285 	msg_word += HTT_PPDU_ID_FMT_IND_LINK_ID_OFFSET;
3286 	valid = HTT_PPDU_ID_FMT_IND_VALID_GET_BITS31_16(*msg_word);
3287 	bits = HTT_PPDU_ID_FMT_IND_BITS_GET_BITS31_16(*msg_word);
3288 	offset = HTT_PPDU_ID_FMT_IND_OFFSET_GET_BITS31_16(*msg_word);
3289 
3290 	dp_info("link_id: valid %u bits %u offset %u", valid, bits, offset);
3291 
3292 	if (valid) {
3293 		soc->link_id_offset = offset;
3294 		soc->link_id_bits = bits;
3295 	}
3296 }
3297 
3298 /*
3299  * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler
3300  * @context:	Opaque context (HTT SOC handle)
3301  * @pkt:	HTC packet
3302  */
3303 static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
3304 {
3305 	struct htt_soc *soc = (struct htt_soc *) context;
3306 	qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
3307 	u_int32_t *msg_word;
3308 	enum htt_t2h_msg_type msg_type;
3309 	bool free_buf = true;
3310 
3311 	/* check for successful message reception */
3312 	if (pkt->Status != QDF_STATUS_SUCCESS) {
3313 		if (pkt->Status != QDF_STATUS_E_CANCELED)
3314 			soc->stats.htc_err_cnt++;
3315 
3316 		qdf_nbuf_free(htt_t2h_msg);
3317 		return;
3318 	}
3319 
3320 	/* TODO: Check if we should pop the HTC/HTT header alignment padding */
3321 
3322 	msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
3323 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
3324 	htt_event_record(soc->htt_logger_handle,
3325 			 msg_type, (uint8_t *)msg_word);
3326 	switch (msg_type) {
3327 	case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
3328 	{
3329 		dp_htt_bkp_event_alert(msg_word, soc);
3330 		break;
3331 	}
3332 	case HTT_T2H_MSG_TYPE_PEER_MAP:
3333 		{
3334 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3335 			u_int8_t *peer_mac_addr;
3336 			u_int16_t peer_id;
3337 			u_int16_t hw_peer_id;
3338 			u_int8_t vdev_id;
3339 			u_int8_t is_wds;
3340 			struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
3341 
3342 			peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
3343 			hw_peer_id =
3344 				HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2));
3345 			vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
3346 			peer_mac_addr = htt_t2h_mac_addr_deswizzle(
3347 				(u_int8_t *) (msg_word+1),
3348 				&mac_addr_deswizzle_buf[0]);
3349 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3350 				QDF_TRACE_LEVEL_DEBUG,
3351 				"HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3352 				peer_id, vdev_id);
3353 
3354 			/*
3355 			 * check if peer already exists for this peer_id, if so
3356 			 * this peer map event is in response for a wds peer add
3357 			 * wmi command sent during wds source port learning.
3358 			 * in this case just add the ast entry to the existing
3359 			 * peer ast_list.
3360 			 */
3361 			is_wds = !!(dpsoc->peer_id_to_obj_map[peer_id]);
3362 			dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id,
3363 					       vdev_id, peer_mac_addr, 0,
3364 					       is_wds);
3365 			break;
3366 		}
3367 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
3368 		{
3369 			u_int16_t peer_id;
3370 			u_int8_t vdev_id;
3371 			u_int8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0};
3372 			peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
3373 			vdev_id = HTT_RX_PEER_UNMAP_VDEV_ID_GET(*msg_word);
3374 
3375 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
3376 						 vdev_id, mac_addr, 0,
3377 						 DP_PEER_WDS_COUNT_INVALID);
3378 			break;
3379 		}
3380 	case HTT_T2H_MSG_TYPE_SEC_IND:
3381 		{
3382 			u_int16_t peer_id;
3383 			enum cdp_sec_type sec_type;
3384 			int is_unicast;
3385 
3386 			peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
3387 			sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
3388 			is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
3389 			/* point to the first part of the Michael key */
3390 			msg_word++;
3391 			dp_rx_sec_ind_handler(
3392 				soc->dp_soc, peer_id, sec_type, is_unicast,
3393 				msg_word, msg_word + 2);
3394 			break;
3395 		}
3396 
3397 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
3398 		{
3399 			free_buf =
3400 				dp_monitor_ppdu_stats_ind_handler(soc,
3401 								  msg_word,
3402 								  htt_t2h_msg);
3403 			break;
3404 		}
3405 
3406 	case HTT_T2H_MSG_TYPE_PKTLOG:
3407 		{
3408 			dp_pktlog_msg_handler(soc, msg_word);
3409 			break;
3410 		}
3411 
3412 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
3413 		{
3414 			/*
3415 			 * HTC maintains runtime pm count for H2T messages that
3416 			 * have a response msg from FW. This count ensures that
3417 			 * in the case FW does not sent out the response or host
3418 			 * did not process this indication runtime_put happens
3419 			 * properly in the cleanup path.
3420 			 */
3421 			if (htc_dec_return_htt_runtime_cnt(soc->htc_soc) >= 0)
3422 				htc_pm_runtime_put(soc->htc_soc);
3423 			else
3424 				soc->stats.htt_ver_req_put_skip++;
3425 			soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
3426 			soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
3427 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
3428 				"target uses HTT version %d.%d; host uses %d.%d",
3429 				soc->tgt_ver.major, soc->tgt_ver.minor,
3430 				HTT_CURRENT_VERSION_MAJOR,
3431 				HTT_CURRENT_VERSION_MINOR);
3432 			if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
3433 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3434 					QDF_TRACE_LEVEL_WARN,
3435 					"*** Incompatible host/target HTT versions!");
3436 			}
3437 			/* abort if the target is incompatible with the host */
3438 			qdf_assert(soc->tgt_ver.major ==
3439 				HTT_CURRENT_VERSION_MAJOR);
3440 			if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
3441 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3442 					QDF_TRACE_LEVEL_INFO_LOW,
3443 					"*** Warning: host/target HTT versions"
3444 					" are different, though compatible!");
3445 			}
3446 			break;
3447 		}
3448 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
3449 		{
3450 			uint16_t peer_id;
3451 			uint8_t tid;
3452 			uint16_t win_sz;
3453 
3454 			/*
3455 			 * Update REO Queue Desc with new values
3456 			 */
3457 			peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
3458 			tid = HTT_RX_ADDBA_TID_GET(*msg_word);
3459 			win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
3460 
3461 			/*
3462 			 * Window size needs to be incremented by 1
3463 			 * since fw needs to represent a value of 256
3464 			 * using just 8 bits
3465 			 */
3466 			dp_htt_rx_addba_handler(soc->dp_soc, peer_id,
3467 						tid, win_sz + 1);
3468 			break;
3469 		}
3470 	case HTT_T2H_MSG_TYPE_RX_ADDBA_EXTN:
3471 		{
3472 			uint16_t peer_id;
3473 			uint8_t tid;
3474 			uint16_t win_sz;
3475 
3476 			peer_id = HTT_RX_ADDBA_EXTN_PEER_ID_GET(*msg_word);
3477 			tid = HTT_RX_ADDBA_EXTN_TID_GET(*msg_word);
3478 
3479 			msg_word++;
3480 			win_sz = HTT_RX_ADDBA_EXTN_WIN_SIZE_GET(*msg_word);
3481 
3482 			dp_htt_rx_addba_handler(soc->dp_soc, peer_id,
3483 						tid, win_sz);
3484 			break;
3485 		}
3486 	case HTT_T2H_PPDU_ID_FMT_IND:
3487 		{
3488 			dp_htt_ppdu_id_fmt_handler(soc->dp_soc, msg_word);
3489 			break;
3490 		}
3491 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
3492 		{
3493 			dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg);
3494 			break;
3495 		}
3496 	case HTT_T2H_MSG_TYPE_PEER_MAP_V2:
3497 		{
3498 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3499 			u_int8_t *peer_mac_addr;
3500 			u_int16_t peer_id;
3501 			u_int16_t hw_peer_id;
3502 			u_int8_t vdev_id;
3503 			bool is_wds;
3504 			u_int16_t ast_hash;
3505 			struct dp_ast_flow_override_info ast_flow_info;
3506 
3507 			qdf_mem_set(&ast_flow_info, 0,
3508 					    sizeof(struct dp_ast_flow_override_info));
3509 
3510 			peer_id = HTT_RX_PEER_MAP_V2_SW_PEER_ID_GET(*msg_word);
3511 			hw_peer_id =
3512 			HTT_RX_PEER_MAP_V2_HW_PEER_ID_GET(*(msg_word + 2));
3513 			vdev_id = HTT_RX_PEER_MAP_V2_VDEV_ID_GET(*msg_word);
3514 			peer_mac_addr =
3515 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3516 						   &mac_addr_deswizzle_buf[0]);
3517 			is_wds =
3518 			HTT_RX_PEER_MAP_V2_NEXT_HOP_GET(*(msg_word + 3));
3519 			ast_hash =
3520 			HTT_RX_PEER_MAP_V2_AST_HASH_VALUE_GET(*(msg_word + 3));
3521 			/*
3522 			 * Update 4 ast_index per peer, ast valid mask
3523 			 * and TID flow valid mask.
3524 			 * AST valid mask is 3 bit field corresponds to
3525 			 * ast_index[3:1]. ast_index 0 is always valid.
3526 			 */
3527 			ast_flow_info.ast_valid_mask =
3528 			HTT_RX_PEER_MAP_V2_AST_VALID_MASK_GET(*(msg_word + 3));
3529 			ast_flow_info.ast_idx[0] = hw_peer_id;
3530 			ast_flow_info.ast_flow_mask[0] =
3531 			HTT_RX_PEER_MAP_V2_AST_0_FLOW_MASK_GET(*(msg_word + 4));
3532 			ast_flow_info.ast_idx[1] =
3533 			HTT_RX_PEER_MAP_V2_AST_INDEX_1_GET(*(msg_word + 4));
3534 			ast_flow_info.ast_flow_mask[1] =
3535 			HTT_RX_PEER_MAP_V2_AST_1_FLOW_MASK_GET(*(msg_word + 4));
3536 			ast_flow_info.ast_idx[2] =
3537 			HTT_RX_PEER_MAP_V2_AST_INDEX_2_GET(*(msg_word + 5));
3538 			ast_flow_info.ast_flow_mask[2] =
3539 			HTT_RX_PEER_MAP_V2_AST_2_FLOW_MASK_GET(*(msg_word + 4));
3540 			ast_flow_info.ast_idx[3] =
3541 			HTT_RX_PEER_MAP_V2_AST_INDEX_3_GET(*(msg_word + 6));
3542 			ast_flow_info.ast_flow_mask[3] =
3543 			HTT_RX_PEER_MAP_V2_AST_3_FLOW_MASK_GET(*(msg_word + 4));
3544 			/*
3545 			 * TID valid mask is applicable only
3546 			 * for HI and LOW priority flows.
3547 			 * tid_valid_mas is 8 bit field corresponds
3548 			 * to TID[7:0]
3549 			 */
3550 			ast_flow_info.tid_valid_low_pri_mask =
3551 			HTT_RX_PEER_MAP_V2_TID_VALID_LOW_PRI_GET(*(msg_word + 5));
3552 			ast_flow_info.tid_valid_hi_pri_mask =
3553 			HTT_RX_PEER_MAP_V2_TID_VALID_HI_PRI_GET(*(msg_word + 5));
3554 
3555 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3556 				  QDF_TRACE_LEVEL_DEBUG,
3557 				  "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3558 				  peer_id, vdev_id);
3559 
3560 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3561 				  QDF_TRACE_LEVEL_INFO,
3562 				  "ast_idx[0] %d ast_idx[1] %d ast_idx[2] %d ast_idx[3] %d n",
3563 				  ast_flow_info.ast_idx[0],
3564 				  ast_flow_info.ast_idx[1],
3565 				  ast_flow_info.ast_idx[2],
3566 				  ast_flow_info.ast_idx[3]);
3567 
3568 			dp_rx_peer_map_handler(soc->dp_soc, peer_id,
3569 					       hw_peer_id, vdev_id,
3570 					       peer_mac_addr, ast_hash,
3571 					       is_wds);
3572 
3573 			/*
3574 			 * Update ast indexes for flow override support
3575 			 * Applicable only for non wds peers
3576 			 */
3577 			if (!soc->dp_soc->ast_offload_support)
3578 				dp_peer_ast_index_flow_queue_map_create(
3579 						soc->dp_soc, is_wds,
3580 						peer_id, peer_mac_addr,
3581 						&ast_flow_info);
3582 
3583 			break;
3584 		}
3585 	case HTT_T2H_MSG_TYPE_PEER_UNMAP_V2:
3586 		{
3587 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3588 			u_int8_t *mac_addr;
3589 			u_int16_t peer_id;
3590 			u_int8_t vdev_id;
3591 			u_int8_t is_wds;
3592 			u_int32_t free_wds_count;
3593 
3594 			peer_id =
3595 			HTT_RX_PEER_UNMAP_V2_SW_PEER_ID_GET(*msg_word);
3596 			vdev_id = HTT_RX_PEER_UNMAP_V2_VDEV_ID_GET(*msg_word);
3597 			mac_addr =
3598 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3599 						   &mac_addr_deswizzle_buf[0]);
3600 			is_wds =
3601 			HTT_RX_PEER_UNMAP_V2_NEXT_HOP_GET(*(msg_word + 2));
3602 			free_wds_count =
3603 			HTT_RX_PEER_UNMAP_V2_PEER_WDS_FREE_COUNT_GET(*(msg_word + 4));
3604 
3605 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3606 				  QDF_TRACE_LEVEL_INFO,
3607 				  "HTT_T2H_MSG_TYPE_PEER_UNMAP msg for peer id %d vdev id %d n",
3608 				  peer_id, vdev_id);
3609 
3610 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
3611 						 vdev_id, mac_addr,
3612 						 is_wds, free_wds_count);
3613 			break;
3614 		}
3615 	case HTT_T2H_MSG_TYPE_RX_DELBA:
3616 		{
3617 			uint16_t peer_id;
3618 			uint8_t tid;
3619 			uint8_t win_sz;
3620 			QDF_STATUS status;
3621 
3622 			peer_id = HTT_RX_DELBA_PEER_ID_GET(*msg_word);
3623 			tid = HTT_RX_DELBA_TID_GET(*msg_word);
3624 			win_sz = HTT_RX_DELBA_WIN_SIZE_GET(*msg_word);
3625 
3626 			status = dp_rx_delba_ind_handler(
3627 				soc->dp_soc,
3628 				peer_id, tid, win_sz);
3629 
3630 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3631 				  QDF_TRACE_LEVEL_INFO,
3632 				  FL("DELBA PeerID %d BAW %d TID %d stat %d"),
3633 				  peer_id, win_sz, tid, status);
3634 			break;
3635 		}
3636 	case HTT_T2H_MSG_TYPE_RX_DELBA_EXTN:
3637 		{
3638 			uint16_t peer_id;
3639 			uint8_t tid;
3640 			uint16_t win_sz;
3641 			QDF_STATUS status;
3642 
3643 			peer_id = HTT_RX_DELBA_EXTN_PEER_ID_GET(*msg_word);
3644 			tid = HTT_RX_DELBA_EXTN_TID_GET(*msg_word);
3645 
3646 			msg_word++;
3647 			win_sz = HTT_RX_DELBA_EXTN_WIN_SIZE_GET(*msg_word);
3648 
3649 			status = dp_rx_delba_ind_handler(soc->dp_soc,
3650 							 peer_id, tid,
3651 							 win_sz);
3652 
3653 			dp_info("DELBA PeerID %d BAW %d TID %d stat %d",
3654 				peer_id, win_sz, tid, status);
3655 			break;
3656 		}
3657 	case HTT_T2H_MSG_TYPE_FSE_CMEM_BASE_SEND:
3658 		{
3659 			uint16_t num_entries;
3660 			uint32_t cmem_ba_lo;
3661 			uint32_t cmem_ba_hi;
3662 
3663 			num_entries = HTT_CMEM_BASE_SEND_NUM_ENTRIES_GET(*msg_word);
3664 			cmem_ba_lo = *(msg_word + 1);
3665 			cmem_ba_hi = *(msg_word + 2);
3666 
3667 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
3668 				  FL("CMEM FSE num_entries %u CMEM BA LO %x HI %x"),
3669 				  num_entries, cmem_ba_lo, cmem_ba_hi);
3670 
3671 			dp_rx_fst_update_cmem_params(soc->dp_soc, num_entries,
3672 						     cmem_ba_lo, cmem_ba_hi);
3673 			break;
3674 		}
3675 	case HTT_T2H_MSG_TYPE_TX_OFFLOAD_DELIVER_IND:
3676 		{
3677 			dp_offload_ind_handler(soc, msg_word);
3678 			break;
3679 		}
3680 	case HTT_T2H_MSG_TYPE_PEER_MAP_V3:
3681 	{
3682 		u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3683 		u_int8_t *peer_mac_addr;
3684 		u_int16_t peer_id;
3685 		u_int16_t hw_peer_id;
3686 		u_int8_t vdev_id;
3687 		uint8_t is_wds;
3688 		u_int16_t ast_hash = 0;
3689 
3690 		peer_id = HTT_RX_PEER_MAP_V3_SW_PEER_ID_GET(*msg_word);
3691 		vdev_id = HTT_RX_PEER_MAP_V3_VDEV_ID_GET(*msg_word);
3692 		peer_mac_addr =
3693 		htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3694 					   &mac_addr_deswizzle_buf[0]);
3695 		hw_peer_id = HTT_RX_PEER_MAP_V3_HW_PEER_ID_GET(*(msg_word + 3));
3696 		ast_hash = HTT_RX_PEER_MAP_V3_CACHE_SET_NUM_GET(*(msg_word + 3));
3697 		is_wds = HTT_RX_PEER_MAP_V3_NEXT_HOP_GET(*(msg_word + 4));
3698 
3699 		dp_htt_info("HTT_T2H_MSG_TYPE_PEER_MAP_V3 msg for peer id %d vdev id %d n",
3700 			    peer_id, vdev_id);
3701 
3702 		dp_rx_peer_map_handler(soc->dp_soc, peer_id,
3703 				       hw_peer_id, vdev_id,
3704 				       peer_mac_addr, ast_hash,
3705 				       is_wds);
3706 
3707 		break;
3708 	}
3709 	case HTT_T2H_MSG_TYPE_MLO_RX_PEER_MAP:
3710 	{
3711 		dp_htt_mlo_peer_map_handler(soc, msg_word);
3712 		break;
3713 	}
3714 	case HTT_T2H_MSG_TYPE_MLO_RX_PEER_UNMAP:
3715 	{
3716 		dp_htt_mlo_peer_unmap_handler(soc, msg_word);
3717 		break;
3718 	}
3719 	case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND:
3720 	{
3721 		dp_rx_mlo_timestamp_ind_handler(soc->dp_soc, msg_word);
3722 		break;
3723 	}
3724 	case HTT_T2H_MSG_TYPE_VDEVS_TXRX_STATS_PERIODIC_IND:
3725 	{
3726 		dp_vdev_txrx_hw_stats_handler(soc, msg_word);
3727 		break;
3728 	}
3729 	case HTT_T2H_SAWF_DEF_QUEUES_MAP_REPORT_CONF:
3730 	{
3731 		dp_sawf_def_queues_update_map_report_conf(soc, msg_word,
3732 							  htt_t2h_msg);
3733 		break;
3734 	}
3735 	case HTT_T2H_SAWF_MSDUQ_INFO_IND:
3736 	{
3737 		dp_sawf_msduq_map(soc, msg_word, htt_t2h_msg);
3738 		break;
3739 	}
3740 	case HTT_T2H_MSG_TYPE_STREAMING_STATS_IND:
3741 	{
3742 		dp_sawf_mpdu_stats_handler(soc, htt_t2h_msg);
3743 		break;
3744 	}
3745 
3746 	default:
3747 		break;
3748 	};
3749 
3750 	/* Free the indication buffer */
3751 	if (free_buf)
3752 		qdf_nbuf_free(htt_t2h_msg);
3753 }
3754 
3755 /*
3756  * dp_htt_h2t_full() - Send full handler (called from HTC)
3757  * @context:	Opaque context (HTT SOC handle)
3758  * @pkt:	HTC packet
3759  *
3760  * Return: enum htc_send_full_action
3761  */
3762 static enum htc_send_full_action
3763 dp_htt_h2t_full(void *context, HTC_PACKET *pkt)
3764 {
3765 	return HTC_SEND_FULL_KEEP;
3766 }
3767 
3768 /*
3769  * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages
3770  * @context:	Opaque context (HTT SOC handle)
3771  * @nbuf:	nbuf containing T2H message
3772  * @pipe_id:	HIF pipe ID
3773  *
3774  * Return: QDF_STATUS
3775  *
3776  * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which
3777  * will be used for packet log and other high-priority HTT messages. Proper
3778  * HTC connection to be added later once required FW changes are available
3779  */
3780 static QDF_STATUS
3781 dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id)
3782 {
3783 	QDF_STATUS rc = QDF_STATUS_SUCCESS;
3784 	HTC_PACKET htc_pkt;
3785 
3786 	qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE);
3787 	qdf_mem_zero(&htc_pkt, sizeof(htc_pkt));
3788 	htc_pkt.Status = QDF_STATUS_SUCCESS;
3789 	htc_pkt.pPktContext = (void *)nbuf;
3790 	dp_htt_t2h_msg_handler(context, &htc_pkt);
3791 
3792 	return rc;
3793 }
3794 
3795 /*
3796  * htt_htc_soc_attach() - Register SOC level HTT instance with HTC
3797  * @htt_soc:	HTT SOC handle
3798  *
3799  * Return: QDF_STATUS
3800  */
3801 static QDF_STATUS
3802 htt_htc_soc_attach(struct htt_soc *soc)
3803 {
3804 	struct htc_service_connect_req connect;
3805 	struct htc_service_connect_resp response;
3806 	QDF_STATUS status;
3807 	struct dp_soc *dpsoc = soc->dp_soc;
3808 
3809 	qdf_mem_zero(&connect, sizeof(connect));
3810 	qdf_mem_zero(&response, sizeof(response));
3811 
3812 	connect.pMetaData = NULL;
3813 	connect.MetaDataLength = 0;
3814 	connect.EpCallbacks.pContext = soc;
3815 	connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
3816 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
3817 	connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler;
3818 
3819 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
3820 	connect.EpCallbacks.EpRecvRefill = NULL;
3821 
3822 	/* N/A, fill is done by HIF */
3823 	connect.EpCallbacks.RecvRefillWaterMark = 1;
3824 
3825 	connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
3826 	/*
3827 	 * Specify how deep to let a queue get before htc_send_pkt will
3828 	 * call the EpSendFull function due to excessive send queue depth.
3829 	 */
3830 	connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
3831 
3832 	/* disable flow control for HTT data message service */
3833 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
3834 
3835 	/* connect to control service */
3836 	connect.service_id = HTT_DATA_MSG_SVC;
3837 
3838 	status = htc_connect_service(soc->htc_soc, &connect, &response);
3839 
3840 	if (status != QDF_STATUS_SUCCESS)
3841 		return status;
3842 
3843 	soc->htc_endpoint = response.Endpoint;
3844 
3845 	hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint);
3846 
3847 	htt_interface_logging_init(&soc->htt_logger_handle, soc->ctrl_psoc);
3848 	dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc,
3849 		dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE);
3850 
3851 	return QDF_STATUS_SUCCESS; /* success */
3852 }
3853 
3854 /*
3855  * htt_soc_initialize() - SOC level HTT initialization
3856  * @htt_soc: Opaque htt SOC handle
3857  * @ctrl_psoc: Opaque ctrl SOC handle
3858  * @htc_soc: SOC level HTC handle
3859  * @hal_soc: Opaque HAL SOC handle
3860  * @osdev: QDF device
3861  *
3862  * Return: HTT handle on success; NULL on failure
3863  */
3864 void *
3865 htt_soc_initialize(struct htt_soc *htt_soc,
3866 		   struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
3867 		   HTC_HANDLE htc_soc,
3868 		   hal_soc_handle_t hal_soc_hdl, qdf_device_t osdev)
3869 {
3870 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
3871 
3872 	soc->osdev = osdev;
3873 	soc->ctrl_psoc = ctrl_psoc;
3874 	soc->htc_soc = htc_soc;
3875 	soc->hal_soc = hal_soc_hdl;
3876 
3877 	if (htt_htc_soc_attach(soc))
3878 		goto fail2;
3879 
3880 	return soc;
3881 
3882 fail2:
3883 	return NULL;
3884 }
3885 
3886 void htt_soc_htc_dealloc(struct htt_soc *htt_handle)
3887 {
3888 	htt_interface_logging_deinit(htt_handle->htt_logger_handle);
3889 	htt_htc_misc_pkt_pool_free(htt_handle);
3890 	htt_htc_pkt_pool_free(htt_handle);
3891 }
3892 
3893 /*
3894  * htt_soc_htc_prealloc() - HTC memory prealloc
3895  * @htt_soc: SOC level HTT handle
3896  *
3897  * Return: QDF_STATUS_SUCCESS on Success or
3898  * QDF_STATUS_E_NOMEM on allocation failure
3899  */
3900 QDF_STATUS htt_soc_htc_prealloc(struct htt_soc *soc)
3901 {
3902 	int i;
3903 
3904 	soc->htt_htc_pkt_freelist = NULL;
3905 	/* pre-allocate some HTC_PACKET objects */
3906 	for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
3907 		struct dp_htt_htc_pkt_union *pkt;
3908 		pkt = qdf_mem_malloc(sizeof(*pkt));
3909 		if (!pkt)
3910 			return QDF_STATUS_E_NOMEM;
3911 
3912 		htt_htc_pkt_free(soc, &pkt->u.pkt);
3913 	}
3914 	return QDF_STATUS_SUCCESS;
3915 }
3916 
3917 /*
3918  * htt_soc_detach() - Free SOC level HTT handle
3919  * @htt_hdl: HTT SOC handle
3920  */
3921 void htt_soc_detach(struct htt_soc *htt_hdl)
3922 {
3923 	int i;
3924 	struct htt_soc *htt_handle = (struct htt_soc *)htt_hdl;
3925 
3926 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3927 		qdf_mem_free(htt_handle->pdevid_tt[i].umac_path);
3928 		qdf_mem_free(htt_handle->pdevid_tt[i].lmac_path);
3929 	}
3930 
3931 	HTT_TX_MUTEX_DESTROY(&htt_handle->htt_tx_mutex);
3932 	qdf_mem_free(htt_handle);
3933 
3934 }
3935 
3936 /**
3937  * dp_h2t_ext_stats_msg_send(): function to construct HTT message to pass to FW
3938  * @pdev: DP PDEV handle
3939  * @stats_type_upload_mask: stats type requested by user
3940  * @config_param_0: extra configuration parameters
3941  * @config_param_1: extra configuration parameters
3942  * @config_param_2: extra configuration parameters
3943  * @config_param_3: extra configuration parameters
3944  * @mac_id: mac number
3945  *
3946  * return: QDF STATUS
3947  */
3948 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
3949 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
3950 		uint32_t config_param_1, uint32_t config_param_2,
3951 		uint32_t config_param_3, int cookie_val, int cookie_msb,
3952 		uint8_t mac_id)
3953 {
3954 	struct htt_soc *soc = pdev->soc->htt_handle;
3955 	struct dp_htt_htc_pkt *pkt;
3956 	qdf_nbuf_t msg;
3957 	uint32_t *msg_word;
3958 	uint8_t pdev_mask = 0;
3959 	uint8_t *htt_logger_bufp;
3960 	int mac_for_pdev;
3961 	int target_pdev_id;
3962 	QDF_STATUS status;
3963 
3964 	msg = qdf_nbuf_alloc(
3965 			soc->osdev,
3966 			HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ),
3967 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
3968 
3969 	if (!msg)
3970 		return QDF_STATUS_E_NOMEM;
3971 
3972 	/*TODO:Add support for SOC stats
3973 	 * Bit 0: SOC Stats
3974 	 * Bit 1: Pdev stats for pdev id 0
3975 	 * Bit 2: Pdev stats for pdev id 1
3976 	 * Bit 3: Pdev stats for pdev id 2
3977 	 */
3978 	mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
3979 	target_pdev_id =
3980 	dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev);
3981 
3982 	pdev_mask = 1 << target_pdev_id;
3983 
3984 	/*
3985 	 * Set the length of the message.
3986 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3987 	 * separately during the below call to qdf_nbuf_push_head.
3988 	 * The contribution from the HTC header is added separately inside HTC.
3989 	 */
3990 	if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) {
3991 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3992 				"Failed to expand head for HTT_EXT_STATS");
3993 		qdf_nbuf_free(msg);
3994 		return QDF_STATUS_E_FAILURE;
3995 	}
3996 
3997 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
3998 
3999 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4000 	htt_logger_bufp = (uint8_t *)msg_word;
4001 	*msg_word = 0;
4002 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ);
4003 	HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask);
4004 	HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask);
4005 
4006 	/* word 1 */
4007 	msg_word++;
4008 	*msg_word = 0;
4009 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0);
4010 
4011 	/* word 2 */
4012 	msg_word++;
4013 	*msg_word = 0;
4014 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1);
4015 
4016 	/* word 3 */
4017 	msg_word++;
4018 	*msg_word = 0;
4019 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2);
4020 
4021 	/* word 4 */
4022 	msg_word++;
4023 	*msg_word = 0;
4024 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3);
4025 
4026 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0);
4027 
4028 	/* word 5 */
4029 	msg_word++;
4030 
4031 	/* word 6 */
4032 	msg_word++;
4033 	*msg_word = 0;
4034 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val);
4035 
4036 	/* word 7 */
4037 	msg_word++;
4038 	*msg_word = 0;
4039 	/* Currently Using last 2 bits for pdev_id
4040 	 * For future reference, reserving 3 bits in cookie_msb for pdev_id
4041 	 */
4042 	cookie_msb = (cookie_msb | pdev->pdev_id);
4043 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb);
4044 
4045 	pkt = htt_htc_pkt_alloc(soc);
4046 	if (!pkt) {
4047 		qdf_nbuf_free(msg);
4048 		return QDF_STATUS_E_NOMEM;
4049 	}
4050 
4051 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4052 
4053 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4054 			dp_htt_h2t_send_complete_free_netbuf,
4055 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
4056 			soc->htc_endpoint,
4057 			/* tag for FW response msg not guaranteed */
4058 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
4059 
4060 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4061 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_EXT_STATS_REQ,
4062 				     htt_logger_bufp);
4063 
4064 	if (status != QDF_STATUS_SUCCESS) {
4065 		qdf_nbuf_free(msg);
4066 		htt_htc_pkt_free(soc, pkt);
4067 	}
4068 
4069 	return status;
4070 }
4071 
4072 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
4073 #define HTT_VDEV_TXRX_STATS_RESET_BITMASK_L32_MASK 0xFFFFFFFF
4074 #define HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_MASK 0xFFFFFFFF00000000
4075 #define HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_SHIFT 32
4076 
4077 QDF_STATUS dp_h2t_hw_vdev_stats_config_send(struct dp_soc *dpsoc,
4078 					    uint8_t pdev_id, bool enable,
4079 					    bool reset, uint64_t reset_bitmask)
4080 {
4081 	struct htt_soc *soc = dpsoc->htt_handle;
4082 	struct dp_htt_htc_pkt *pkt;
4083 	qdf_nbuf_t msg;
4084 	uint32_t *msg_word;
4085 	uint8_t *htt_logger_bufp;
4086 	QDF_STATUS status;
4087 	int duration;
4088 	uint32_t bitmask;
4089 	int target_pdev_id;
4090 
4091 	msg = qdf_nbuf_alloc(
4092 			soc->osdev,
4093 			HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_vdevs_txrx_stats_cfg)),
4094 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
4095 
4096 	if (!msg) {
4097 		dp_htt_err("%pK: Fail to allocate "
4098 		"HTT_H2T_HW_VDEV_TXRX_STATS_CFG_MSG_SZ msg buffer", dpsoc);
4099 		return QDF_STATUS_E_NOMEM;
4100 	}
4101 
4102 	if (pdev_id != INVALID_PDEV_ID)
4103 		target_pdev_id = DP_SW2HW_MACID(pdev_id);
4104 	else
4105 		target_pdev_id = 0;
4106 
4107 	duration =
4108 	wlan_cfg_get_vdev_stats_hw_offload_timer(dpsoc->wlan_cfg_ctx);
4109 
4110 	/*
4111 	 * Set the length of the message.
4112 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4113 	 * separately during the below call to qdf_nbuf_push_head.
4114 	 * The contribution from the HTC header is added separately inside HTC.
4115 	 */
4116 	if (!qdf_nbuf_put_tail(msg,
4117 			       sizeof(struct htt_h2t_vdevs_txrx_stats_cfg))) {
4118 		dp_htt_err("%pK: Failed to expand head for HTT_HW_VDEV_STATS"
4119 			   , dpsoc);
4120 		qdf_nbuf_free(msg);
4121 		return QDF_STATUS_E_FAILURE;
4122 	}
4123 
4124 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
4125 
4126 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4127 	htt_logger_bufp = (uint8_t *)msg_word;
4128 	*msg_word = 0;
4129 
4130 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VDEVS_TXRX_STATS_CFG);
4131 	HTT_RX_VDEVS_TXRX_STATS_PDEV_ID_SET(*msg_word, target_pdev_id);
4132 
4133 	HTT_RX_VDEVS_TXRX_STATS_ENABLE_SET(*msg_word, enable);
4134 
4135 	HTT_RX_VDEVS_TXRX_STATS_PERIODIC_INTERVAL_SET(*msg_word,
4136 						      (duration >> 3));
4137 
4138 	HTT_RX_VDEVS_TXRX_STATS_RESET_STATS_BITS_SET(*msg_word, reset);
4139 
4140 	msg_word++;
4141 	*msg_word = 0;
4142 	bitmask = (reset_bitmask & HTT_VDEV_TXRX_STATS_RESET_BITMASK_L32_MASK);
4143 	*msg_word = bitmask;
4144 
4145 	msg_word++;
4146 	*msg_word = 0;
4147 	bitmask =
4148 		((reset_bitmask & HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_MASK) >>
4149 		 HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_SHIFT);
4150 	*msg_word = bitmask;
4151 
4152 	pkt = htt_htc_pkt_alloc(soc);
4153 	if (!pkt) {
4154 		dp_htt_err("%pK: Fail to allocate dp_htt_htc_pkt buffer",
4155 			   dpsoc);
4156 		qdf_assert(0);
4157 		qdf_nbuf_free(msg);
4158 		return QDF_STATUS_E_NOMEM;
4159 	}
4160 
4161 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4162 
4163 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4164 			       dp_htt_h2t_send_complete_free_netbuf,
4165 			       qdf_nbuf_data(msg), qdf_nbuf_len(msg),
4166 			       soc->htc_endpoint,
4167 			       /* tag for no FW response msg */
4168 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
4169 
4170 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4171 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
4172 				     HTT_H2T_MSG_TYPE_VDEVS_TXRX_STATS_CFG,
4173 				     htt_logger_bufp);
4174 
4175 	if (status != QDF_STATUS_SUCCESS) {
4176 		qdf_nbuf_free(msg);
4177 		htt_htc_pkt_free(soc, pkt);
4178 	}
4179 
4180 	return status;
4181 }
4182 #else
4183 QDF_STATUS dp_h2t_hw_vdev_stats_config_send(struct dp_soc *dpsoc,
4184 					    uint8_t pdev_id, bool enable,
4185 					    bool reset, uint64_t reset_bitmask)
4186 {
4187 	return QDF_STATUS_SUCCESS;
4188 }
4189 #endif
4190 
4191 /**
4192  * dp_h2t_3tuple_config_send(): function to construct 3 tuple configuration
4193  * HTT message to pass to FW
4194  * @pdev: DP PDEV handle
4195  * @tuple_mask: tuple configuration to report 3 tuple hash value in either
4196  * toeplitz_2_or_4 or flow_id_toeplitz in MSDU START TLV.
4197  *
4198  * tuple_mask[1:0]:
4199  *   00 - Do not report 3 tuple hash value
4200  *   10 - Report 3 tuple hash value in toeplitz_2_or_4
4201  *   01 - Report 3 tuple hash value in flow_id_toeplitz
4202  *   11 - Report 3 tuple hash value in both toeplitz_2_or_4 & flow_id_toeplitz
4203  *
4204  * return: QDF STATUS
4205  */
4206 QDF_STATUS dp_h2t_3tuple_config_send(struct dp_pdev *pdev,
4207 				     uint32_t tuple_mask, uint8_t mac_id)
4208 {
4209 	struct htt_soc *soc = pdev->soc->htt_handle;
4210 	struct dp_htt_htc_pkt *pkt;
4211 	qdf_nbuf_t msg;
4212 	uint32_t *msg_word;
4213 	uint8_t *htt_logger_bufp;
4214 	int mac_for_pdev;
4215 	int target_pdev_id;
4216 
4217 	msg = qdf_nbuf_alloc(
4218 			soc->osdev,
4219 			HTT_MSG_BUF_SIZE(HTT_3_TUPLE_HASH_CFG_REQ_BYTES),
4220 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4221 
4222 	if (!msg)
4223 		return QDF_STATUS_E_NOMEM;
4224 
4225 	mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
4226 	target_pdev_id =
4227 	dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev);
4228 
4229 	/*
4230 	 * Set the length of the message.
4231 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4232 	 * separately during the below call to qdf_nbuf_push_head.
4233 	 * The contribution from the HTC header is added separately inside HTC.
4234 	 */
4235 	if (!qdf_nbuf_put_tail(msg, HTT_3_TUPLE_HASH_CFG_REQ_BYTES)) {
4236 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4237 			  "Failed to expand head for HTT_3TUPLE_CONFIG");
4238 		qdf_nbuf_free(msg);
4239 		return QDF_STATUS_E_FAILURE;
4240 	}
4241 
4242 	dp_htt_info("%pK: config_param_sent 0x%x for target_pdev %d\n -------------",
4243 		    pdev->soc, tuple_mask, target_pdev_id);
4244 
4245 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
4246 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4247 	htt_logger_bufp = (uint8_t *)msg_word;
4248 
4249 	*msg_word = 0;
4250 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG);
4251 	HTT_RX_3_TUPLE_HASH_PDEV_ID_SET(*msg_word, target_pdev_id);
4252 
4253 	msg_word++;
4254 	*msg_word = 0;
4255 	HTT_H2T_FLOW_ID_TOEPLITZ_FIELD_CONFIG_SET(*msg_word, tuple_mask);
4256 	HTT_H2T_TOEPLITZ_2_OR_4_FIELD_CONFIG_SET(*msg_word, tuple_mask);
4257 
4258 	pkt = htt_htc_pkt_alloc(soc);
4259 	if (!pkt) {
4260 		qdf_nbuf_free(msg);
4261 		return QDF_STATUS_E_NOMEM;
4262 	}
4263 
4264 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4265 
4266 	SET_HTC_PACKET_INFO_TX(
4267 			&pkt->htc_pkt,
4268 			dp_htt_h2t_send_complete_free_netbuf,
4269 			qdf_nbuf_data(msg),
4270 			qdf_nbuf_len(msg),
4271 			soc->htc_endpoint,
4272 			/* tag for no FW response msg */
4273 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
4274 
4275 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4276 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG,
4277 			    htt_logger_bufp);
4278 
4279 	return QDF_STATUS_SUCCESS;
4280 }
4281 
4282 /* This macro will revert once proper HTT header will define for
4283  * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file
4284  * */
4285 #if defined(WDI_EVENT_ENABLE)
4286 /**
4287  * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW
4288  * @pdev: DP PDEV handle
4289  * @stats_type_upload_mask: stats type requested by user
4290  * @mac_id: Mac id number
4291  *
4292  * return: QDF STATUS
4293  */
4294 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
4295 		uint32_t stats_type_upload_mask, uint8_t mac_id)
4296 {
4297 	struct htt_soc *soc = pdev->soc->htt_handle;
4298 	struct dp_htt_htc_pkt *pkt;
4299 	qdf_nbuf_t msg;
4300 	uint32_t *msg_word;
4301 	uint8_t pdev_mask;
4302 	QDF_STATUS status;
4303 
4304 	msg = qdf_nbuf_alloc(
4305 			soc->osdev,
4306 			HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ),
4307 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
4308 
4309 	if (!msg) {
4310 		dp_htt_err("%pK: Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer"
4311 			   , pdev->soc);
4312 		qdf_assert(0);
4313 		return QDF_STATUS_E_NOMEM;
4314 	}
4315 
4316 	/*TODO:Add support for SOC stats
4317 	 * Bit 0: SOC Stats
4318 	 * Bit 1: Pdev stats for pdev id 0
4319 	 * Bit 2: Pdev stats for pdev id 1
4320 	 * Bit 3: Pdev stats for pdev id 2
4321 	 */
4322 	pdev_mask = 1 << dp_get_target_pdev_id_for_host_pdev_id(pdev->soc,
4323 								mac_id);
4324 
4325 	/*
4326 	 * Set the length of the message.
4327 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4328 	 * separately during the below call to qdf_nbuf_push_head.
4329 	 * The contribution from the HTC header is added separately inside HTC.
4330 	 */
4331 	if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) {
4332 		dp_htt_err("%pK: Failed to expand head for HTT_CFG_STATS"
4333 			   , pdev->soc);
4334 		qdf_nbuf_free(msg);
4335 		return QDF_STATUS_E_FAILURE;
4336 	}
4337 
4338 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
4339 
4340 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4341 	*msg_word = 0;
4342 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
4343 	HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask);
4344 	HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word,
4345 			stats_type_upload_mask);
4346 
4347 	pkt = htt_htc_pkt_alloc(soc);
4348 	if (!pkt) {
4349 		dp_htt_err("%pK: Fail to allocate dp_htt_htc_pkt buffer", pdev->soc);
4350 		qdf_assert(0);
4351 		qdf_nbuf_free(msg);
4352 		return QDF_STATUS_E_NOMEM;
4353 	}
4354 
4355 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4356 
4357 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4358 			dp_htt_h2t_send_complete_free_netbuf,
4359 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
4360 			soc->htc_endpoint,
4361 			/* tag for no FW response msg */
4362 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
4363 
4364 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4365 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG,
4366 				     (uint8_t *)msg_word);
4367 
4368 	if (status != QDF_STATUS_SUCCESS) {
4369 		qdf_nbuf_free(msg);
4370 		htt_htc_pkt_free(soc, pkt);
4371 	}
4372 
4373 	return status;
4374 }
4375 
4376 qdf_export_symbol(dp_h2t_cfg_stats_msg_send);
4377 #endif
4378 
4379 void
4380 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
4381 			     uint32_t *tag_buf)
4382 {
4383 	struct dp_peer *peer = NULL;
4384 	switch (tag_type) {
4385 	case HTT_STATS_PEER_DETAILS_TAG:
4386 	{
4387 		htt_peer_details_tlv *dp_stats_buf =
4388 			(htt_peer_details_tlv *)tag_buf;
4389 
4390 		pdev->fw_stats_peer_id = dp_stats_buf->sw_peer_id;
4391 	}
4392 	break;
4393 	case HTT_STATS_PEER_STATS_CMN_TAG:
4394 	{
4395 		htt_peer_stats_cmn_tlv *dp_stats_buf =
4396 			(htt_peer_stats_cmn_tlv *)tag_buf;
4397 
4398 		peer = dp_peer_get_ref_by_id(pdev->soc, pdev->fw_stats_peer_id,
4399 					     DP_MOD_ID_HTT);
4400 
4401 		if (peer && !peer->bss_peer) {
4402 			peer->stats.tx.inactive_time =
4403 				dp_stats_buf->inactive_time;
4404 			qdf_event_set(&pdev->fw_peer_stats_event);
4405 		}
4406 		if (peer)
4407 			dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
4408 	}
4409 	break;
4410 	default:
4411 		qdf_err("Invalid tag_type");
4412 	}
4413 }
4414 
4415 /**
4416  * dp_htt_rx_flow_fst_setup(): Send HTT Rx FST setup message to FW
4417  * @pdev: DP pdev handle
4418  * @fse_setup_info: FST setup parameters
4419  *
4420  * Return: Success when HTT message is sent, error on failure
4421  */
4422 QDF_STATUS
4423 dp_htt_rx_flow_fst_setup(struct dp_pdev *pdev,
4424 			 struct dp_htt_rx_flow_fst_setup *fse_setup_info)
4425 {
4426 	struct htt_soc *soc = pdev->soc->htt_handle;
4427 	struct dp_htt_htc_pkt *pkt;
4428 	qdf_nbuf_t msg;
4429 	u_int32_t *msg_word;
4430 	struct htt_h2t_msg_rx_fse_setup_t *fse_setup;
4431 	uint8_t *htt_logger_bufp;
4432 	u_int32_t *key;
4433 	QDF_STATUS status;
4434 
4435 	msg = qdf_nbuf_alloc(
4436 		soc->osdev,
4437 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_setup_t)),
4438 		/* reserve room for the HTC header */
4439 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4440 
4441 	if (!msg)
4442 		return QDF_STATUS_E_NOMEM;
4443 
4444 	/*
4445 	 * Set the length of the message.
4446 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4447 	 * separately during the below call to qdf_nbuf_push_head.
4448 	 * The contribution from the HTC header is added separately inside HTC.
4449 	 */
4450 	if (!qdf_nbuf_put_tail(msg,
4451 			       sizeof(struct htt_h2t_msg_rx_fse_setup_t))) {
4452 		qdf_err("Failed to expand head for HTT RX_FSE_SETUP msg");
4453 		return QDF_STATUS_E_FAILURE;
4454 	}
4455 
4456 	/* fill in the message contents */
4457 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4458 
4459 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_setup_t));
4460 	/* rewind beyond alignment pad to get to the HTC header reserved area */
4461 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4462 	htt_logger_bufp = (uint8_t *)msg_word;
4463 
4464 	*msg_word = 0;
4465 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG);
4466 
4467 	fse_setup = (struct htt_h2t_msg_rx_fse_setup_t *)msg_word;
4468 
4469 	HTT_RX_FSE_SETUP_PDEV_ID_SET(*msg_word, fse_setup_info->pdev_id);
4470 
4471 	msg_word++;
4472 	HTT_RX_FSE_SETUP_NUM_REC_SET(*msg_word, fse_setup_info->max_entries);
4473 	HTT_RX_FSE_SETUP_MAX_SEARCH_SET(*msg_word, fse_setup_info->max_search);
4474 	HTT_RX_FSE_SETUP_IP_DA_SA_PREFIX_SET(*msg_word,
4475 					     fse_setup_info->ip_da_sa_prefix);
4476 
4477 	msg_word++;
4478 	HTT_RX_FSE_SETUP_BASE_ADDR_LO_SET(*msg_word,
4479 					  fse_setup_info->base_addr_lo);
4480 	msg_word++;
4481 	HTT_RX_FSE_SETUP_BASE_ADDR_HI_SET(*msg_word,
4482 					  fse_setup_info->base_addr_hi);
4483 
4484 	key = (u_int32_t *)fse_setup_info->hash_key;
4485 	fse_setup->toeplitz31_0 = *key++;
4486 	fse_setup->toeplitz63_32 = *key++;
4487 	fse_setup->toeplitz95_64 = *key++;
4488 	fse_setup->toeplitz127_96 = *key++;
4489 	fse_setup->toeplitz159_128 = *key++;
4490 	fse_setup->toeplitz191_160 = *key++;
4491 	fse_setup->toeplitz223_192 = *key++;
4492 	fse_setup->toeplitz255_224 = *key++;
4493 	fse_setup->toeplitz287_256 = *key++;
4494 	fse_setup->toeplitz314_288 = *key;
4495 
4496 	msg_word++;
4497 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz31_0);
4498 	msg_word++;
4499 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz63_32);
4500 	msg_word++;
4501 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz95_64);
4502 	msg_word++;
4503 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz127_96);
4504 	msg_word++;
4505 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz159_128);
4506 	msg_word++;
4507 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz191_160);
4508 	msg_word++;
4509 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz223_192);
4510 	msg_word++;
4511 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz255_224);
4512 	msg_word++;
4513 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz287_256);
4514 	msg_word++;
4515 	HTT_RX_FSE_SETUP_HASH_314_288_SET(*msg_word,
4516 					  fse_setup->toeplitz314_288);
4517 
4518 	pkt = htt_htc_pkt_alloc(soc);
4519 	if (!pkt) {
4520 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4521 		qdf_assert(0);
4522 		qdf_nbuf_free(msg);
4523 		return QDF_STATUS_E_RESOURCES; /* failure */
4524 	}
4525 
4526 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4527 
4528 	SET_HTC_PACKET_INFO_TX(
4529 		&pkt->htc_pkt,
4530 		dp_htt_h2t_send_complete_free_netbuf,
4531 		qdf_nbuf_data(msg),
4532 		qdf_nbuf_len(msg),
4533 		soc->htc_endpoint,
4534 		/* tag for no FW response msg */
4535 		HTC_TX_PACKET_TAG_RUNTIME_PUT);
4536 
4537 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4538 
4539 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
4540 				     HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG,
4541 				     htt_logger_bufp);
4542 
4543 	if (status == QDF_STATUS_SUCCESS) {
4544 		dp_info("HTT_H2T RX_FSE_SETUP sent to FW for pdev = %u",
4545 			fse_setup_info->pdev_id);
4546 		QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_DEBUG,
4547 				   (void *)fse_setup_info->hash_key,
4548 				   fse_setup_info->hash_key_len);
4549 	} else {
4550 		qdf_nbuf_free(msg);
4551 		htt_htc_pkt_free(soc, pkt);
4552 	}
4553 
4554 	return status;
4555 }
4556 
4557 /**
4558  * dp_htt_rx_flow_fse_operation(): Send HTT Flow Search Entry msg to
4559  * add/del a flow in HW
4560  * @pdev: DP pdev handle
4561  * @fse_op_info: Flow entry parameters
4562  *
4563  * Return: Success when HTT message is sent, error on failure
4564  */
4565 QDF_STATUS
4566 dp_htt_rx_flow_fse_operation(struct dp_pdev *pdev,
4567 			     struct dp_htt_rx_flow_fst_operation *fse_op_info)
4568 {
4569 	struct htt_soc *soc = pdev->soc->htt_handle;
4570 	struct dp_htt_htc_pkt *pkt;
4571 	qdf_nbuf_t msg;
4572 	u_int32_t *msg_word;
4573 	struct htt_h2t_msg_rx_fse_operation_t *fse_operation;
4574 	uint8_t *htt_logger_bufp;
4575 	QDF_STATUS status;
4576 
4577 	msg = qdf_nbuf_alloc(
4578 		soc->osdev,
4579 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_operation_t)),
4580 		/* reserve room for the HTC header */
4581 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4582 	if (!msg)
4583 		return QDF_STATUS_E_NOMEM;
4584 
4585 	/*
4586 	 * Set the length of the message.
4587 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4588 	 * separately during the below call to qdf_nbuf_push_head.
4589 	 * The contribution from the HTC header is added separately inside HTC.
4590 	 */
4591 	if (!qdf_nbuf_put_tail(msg,
4592 			       sizeof(struct htt_h2t_msg_rx_fse_operation_t))) {
4593 		qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
4594 		qdf_nbuf_free(msg);
4595 		return QDF_STATUS_E_FAILURE;
4596 	}
4597 
4598 	/* fill in the message contents */
4599 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4600 
4601 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_operation_t));
4602 	/* rewind beyond alignment pad to get to the HTC header reserved area */
4603 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4604 	htt_logger_bufp = (uint8_t *)msg_word;
4605 
4606 	*msg_word = 0;
4607 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG);
4608 
4609 	fse_operation = (struct htt_h2t_msg_rx_fse_operation_t *)msg_word;
4610 
4611 	HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, fse_op_info->pdev_id);
4612 	msg_word++;
4613 	HTT_RX_FSE_IPSEC_VALID_SET(*msg_word, false);
4614 	if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_ENTRY) {
4615 		HTT_RX_FSE_OPERATION_SET(*msg_word,
4616 					 HTT_RX_FSE_CACHE_INVALIDATE_ENTRY);
4617 		msg_word++;
4618 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4619 		*msg_word,
4620 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_31_0));
4621 		msg_word++;
4622 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4623 		*msg_word,
4624 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_63_32));
4625 		msg_word++;
4626 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4627 		*msg_word,
4628 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_95_64));
4629 		msg_word++;
4630 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4631 		*msg_word,
4632 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_127_96));
4633 		msg_word++;
4634 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4635 		*msg_word,
4636 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_31_0));
4637 		msg_word++;
4638 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4639 		*msg_word,
4640 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_63_32));
4641 		msg_word++;
4642 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4643 		*msg_word,
4644 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_95_64));
4645 		msg_word++;
4646 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4647 		*msg_word,
4648 		qdf_htonl(
4649 		fse_op_info->rx_flow->flow_tuple_info.dest_ip_127_96));
4650 		msg_word++;
4651 		HTT_RX_FSE_SOURCEPORT_SET(
4652 			*msg_word,
4653 			fse_op_info->rx_flow->flow_tuple_info.src_port);
4654 		HTT_RX_FSE_DESTPORT_SET(
4655 			*msg_word,
4656 			fse_op_info->rx_flow->flow_tuple_info.dest_port);
4657 		msg_word++;
4658 		HTT_RX_FSE_L4_PROTO_SET(
4659 			*msg_word,
4660 			fse_op_info->rx_flow->flow_tuple_info.l4_protocol);
4661 	} else if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_FULL) {
4662 		HTT_RX_FSE_OPERATION_SET(*msg_word,
4663 					 HTT_RX_FSE_CACHE_INVALIDATE_FULL);
4664 	} else if (fse_op_info->op_code == DP_HTT_FST_DISABLE) {
4665 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_DISABLE);
4666 	} else if (fse_op_info->op_code == DP_HTT_FST_ENABLE) {
4667 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_ENABLE);
4668 	}
4669 
4670 	pkt = htt_htc_pkt_alloc(soc);
4671 	if (!pkt) {
4672 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4673 		qdf_assert(0);
4674 		qdf_nbuf_free(msg);
4675 		return QDF_STATUS_E_RESOURCES; /* failure */
4676 	}
4677 
4678 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4679 
4680 	SET_HTC_PACKET_INFO_TX(
4681 		&pkt->htc_pkt,
4682 		dp_htt_h2t_send_complete_free_netbuf,
4683 		qdf_nbuf_data(msg),
4684 		qdf_nbuf_len(msg),
4685 		soc->htc_endpoint,
4686 		/* tag for no FW response msg */
4687 		HTC_TX_PACKET_TAG_RUNTIME_PUT);
4688 
4689 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4690 
4691 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
4692 				     HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG,
4693 				     htt_logger_bufp);
4694 
4695 	if (status == QDF_STATUS_SUCCESS) {
4696 		dp_info("HTT_H2T RX_FSE_OPERATION_CFG sent to FW for pdev = %u",
4697 			fse_op_info->pdev_id);
4698 	} else {
4699 		qdf_nbuf_free(msg);
4700 		htt_htc_pkt_free(soc, pkt);
4701 	}
4702 
4703 	return status;
4704 }
4705 
4706 /**
4707  * dp_htt_rx_fisa_config(): Send HTT msg to configure FISA
4708  * @pdev: DP pdev handle
4709  * @fse_op_info: Flow entry parameters
4710  *
4711  * Return: Success when HTT message is sent, error on failure
4712  */
4713 QDF_STATUS
4714 dp_htt_rx_fisa_config(struct dp_pdev *pdev,
4715 		      struct dp_htt_rx_fisa_cfg *fisa_config)
4716 {
4717 	struct htt_soc *soc = pdev->soc->htt_handle;
4718 	struct dp_htt_htc_pkt *pkt;
4719 	qdf_nbuf_t msg;
4720 	u_int32_t *msg_word;
4721 	struct htt_h2t_msg_type_fisa_config_t *htt_fisa_config;
4722 	uint8_t *htt_logger_bufp;
4723 	uint32_t len;
4724 	QDF_STATUS status;
4725 
4726 	len = HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_type_fisa_config_t));
4727 
4728 	msg = qdf_nbuf_alloc(soc->osdev,
4729 			     len,
4730 			     /* reserve room for the HTC header */
4731 			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
4732 			     4,
4733 			     TRUE);
4734 	if (!msg)
4735 		return QDF_STATUS_E_NOMEM;
4736 
4737 	/*
4738 	 * Set the length of the message.
4739 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4740 	 * separately during the below call to qdf_nbuf_push_head.
4741 	 * The contribution from the HTC header is added separately inside HTC.
4742 	 */
4743 	if (!qdf_nbuf_put_tail(msg,
4744 			       sizeof(struct htt_h2t_msg_type_fisa_config_t))) {
4745 		qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
4746 		qdf_nbuf_free(msg);
4747 		return QDF_STATUS_E_FAILURE;
4748 	}
4749 
4750 	/* fill in the message contents */
4751 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4752 
4753 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_type_fisa_config_t));
4754 	/* rewind beyond alignment pad to get to the HTC header reserved area */
4755 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4756 	htt_logger_bufp = (uint8_t *)msg_word;
4757 
4758 	*msg_word = 0;
4759 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FISA_CFG);
4760 
4761 	htt_fisa_config = (struct htt_h2t_msg_type_fisa_config_t *)msg_word;
4762 
4763 	HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, htt_fisa_config->pdev_id);
4764 
4765 	msg_word++;
4766 	HTT_RX_FISA_CONFIG_FISA_V2_ENABLE_SET(*msg_word, 1);
4767 	HTT_RX_FISA_CONFIG_FISA_V2_AGGR_LIMIT_SET(*msg_word, 0xf);
4768 
4769 	msg_word++;
4770 	htt_fisa_config->fisa_timeout_threshold = fisa_config->fisa_timeout;
4771 
4772 	pkt = htt_htc_pkt_alloc(soc);
4773 	if (!pkt) {
4774 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4775 		qdf_assert(0);
4776 		qdf_nbuf_free(msg);
4777 		return QDF_STATUS_E_RESOURCES; /* failure */
4778 	}
4779 
4780 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4781 
4782 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4783 			       dp_htt_h2t_send_complete_free_netbuf,
4784 			       qdf_nbuf_data(msg),
4785 			       qdf_nbuf_len(msg),
4786 			       soc->htc_endpoint,
4787 			       /* tag for no FW response msg */
4788 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
4789 
4790 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4791 
4792 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FISA_CFG,
4793 				     htt_logger_bufp);
4794 
4795 	if (status == QDF_STATUS_SUCCESS) {
4796 		dp_info("HTT_H2T_MSG_TYPE_RX_FISA_CFG sent to FW for pdev = %u",
4797 			fisa_config->pdev_id);
4798 	} else {
4799 		qdf_nbuf_free(msg);
4800 		htt_htc_pkt_free(soc, pkt);
4801 	}
4802 
4803 	return status;
4804 }
4805 
4806 #ifdef WLAN_SUPPORT_PPEDS
4807 /**
4808  * dp_htt_rxdma_rxole_ppe_cfg_set() - Send RxOLE and RxDMA PPE config
4809  * @dp_osc: Data path SoC handle
4810  * @cfg: RxDMA and RxOLE PPE config
4811  *
4812  * Return: Success when HTT message is sent, error on failure
4813  */
4814 QDF_STATUS
4815 dp_htt_rxdma_rxole_ppe_cfg_set(struct dp_soc *soc,
4816 			       struct dp_htt_rxdma_rxole_ppe_config *cfg)
4817 {
4818 	struct htt_soc *htt_handle = soc->htt_handle;
4819 	uint32_t len;
4820 	qdf_nbuf_t msg;
4821 	u_int32_t *msg_word;
4822 	QDF_STATUS status;
4823 	uint8_t *htt_logger_bufp;
4824 	struct dp_htt_htc_pkt *pkt;
4825 
4826 	len = HTT_MSG_BUF_SIZE(
4827 	      sizeof(struct htt_h2t_msg_type_rxdma_rxole_ppe_cfg_t));
4828 
4829 	msg = qdf_nbuf_alloc(soc->osdev,
4830 			     len,
4831 			     /* reserve room for the HTC header */
4832 			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
4833 			     4,
4834 			     TRUE);
4835 	if (!msg)
4836 		return QDF_STATUS_E_NOMEM;
4837 
4838 	/*
4839 	 * Set the length of the message.
4840 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4841 	 * separately during the below call to qdf_nbuf_push_head.
4842 	 * The contribution from the HTC header is added separately inside HTC.
4843 	 */
4844 	if (!qdf_nbuf_put_tail(
4845 		msg, sizeof(struct htt_h2t_msg_type_rxdma_rxole_ppe_cfg_t))) {
4846 		qdf_err("Failed to expand head for HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG msg");
4847 		qdf_nbuf_free(msg);
4848 		return QDF_STATUS_E_FAILURE;
4849 	}
4850 
4851 	/* fill in the message contents */
4852 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4853 
4854 	memset(msg_word, 0,
4855 	       sizeof(struct htt_h2t_msg_type_rxdma_rxole_ppe_cfg_t));
4856 
4857 	/* Rewind beyond alignment pad to get to the HTC header reserved area */
4858 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4859 	htt_logger_bufp = (uint8_t *)msg_word;
4860 
4861 	*msg_word = 0;
4862 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG);
4863 	HTT_PPE_CFG_OVERRIDE_SET(*msg_word, cfg->override);
4864 	HTT_PPE_CFG_REO_DEST_IND_SET(
4865 			*msg_word, cfg->reo_destination_indication);
4866 	HTT_PPE_CFG_MULTI_BUF_MSDU_OVERRIDE_EN_SET(
4867 			*msg_word, cfg->multi_buffer_msdu_override_en);
4868 	HTT_PPE_CFG_INTRA_BSS_OVERRIDE_EN_SET(
4869 			*msg_word, cfg->intra_bss_override);
4870 	HTT_PPE_CFG_DECAP_RAW_OVERRIDE_EN_SET(
4871 			*msg_word, cfg->decap_raw_override);
4872 	HTT_PPE_CFG_DECAP_NWIFI_OVERRIDE_EN_SET(
4873 			*msg_word, cfg->decap_nwifi_override);
4874 	HTT_PPE_CFG_IP_FRAG_OVERRIDE_EN_SET(
4875 			*msg_word, cfg->ip_frag_override);
4876 
4877 	pkt = htt_htc_pkt_alloc(htt_handle);
4878 	if (!pkt) {
4879 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4880 		qdf_assert(0);
4881 		qdf_nbuf_free(msg);
4882 		return QDF_STATUS_E_RESOURCES; /* failure */
4883 	}
4884 
4885 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4886 
4887 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4888 			       dp_htt_h2t_send_complete_free_netbuf,
4889 			       qdf_nbuf_data(msg),
4890 			       qdf_nbuf_len(msg),
4891 			       htt_handle->htc_endpoint,
4892 			       /* tag for no FW response msg */
4893 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
4894 
4895 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4896 
4897 	status = DP_HTT_SEND_HTC_PKT(htt_handle, pkt,
4898 				     HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG,
4899 				     htt_logger_bufp);
4900 
4901 	if (status != QDF_STATUS_SUCCESS) {
4902 		qdf_nbuf_free(msg);
4903 		htt_htc_pkt_free(htt_handle, pkt);
4904 		return status;
4905 	}
4906 
4907 	dp_info("HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG sent");
4908 	return status;
4909 }
4910 #endif /* WLAN_SUPPORT_PPEDS */
4911 
4912 /**
4913  * dp_bk_pressure_stats_handler(): worker function to print back pressure
4914  *				   stats
4915  *
4916  * @context : argument to work function
4917  */
4918 static void dp_bk_pressure_stats_handler(void *context)
4919 {
4920 	struct dp_pdev *pdev = (struct dp_pdev *)context;
4921 	struct dp_soc_srngs_state *soc_srngs_state = NULL;
4922 	const char *ring_name;
4923 	int i;
4924 	struct dp_srng_ring_state *ring_state;
4925 	bool empty_flag;
4926 
4927 	qdf_spin_lock_bh(&pdev->bkp_stats.list_lock);
4928 
4929 	/* Extract only first entry for printing in one work event */
4930 	if (pdev->bkp_stats.queue_depth &&
4931 	    !TAILQ_EMPTY(&pdev->bkp_stats.list)) {
4932 		soc_srngs_state = TAILQ_FIRST(&pdev->bkp_stats.list);
4933 		TAILQ_REMOVE(&pdev->bkp_stats.list, soc_srngs_state,
4934 			     list_elem);
4935 		pdev->bkp_stats.queue_depth--;
4936 	}
4937 
4938 	empty_flag = TAILQ_EMPTY(&pdev->bkp_stats.list);
4939 	qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock);
4940 
4941 	if (soc_srngs_state) {
4942 		DP_PRINT_STATS("### BKP stats for seq_num %u START ###",
4943 			       soc_srngs_state->seq_num);
4944 		for (i = 0; i < soc_srngs_state->max_ring_id; i++) {
4945 			ring_state = &soc_srngs_state->ring_state[i];
4946 			ring_name = dp_srng_get_str_from_hal_ring_type
4947 						(ring_state->ring_type);
4948 			DP_PRINT_STATS("%s: SW:Head pointer = %d Tail Pointer = %d\n",
4949 				       ring_name,
4950 				       ring_state->sw_head,
4951 				       ring_state->sw_tail);
4952 
4953 			DP_PRINT_STATS("%s: HW:Head pointer = %d Tail Pointer = %d\n",
4954 				       ring_name,
4955 				       ring_state->hw_head,
4956 				       ring_state->hw_tail);
4957 		}
4958 
4959 		DP_PRINT_STATS("### BKP stats for seq_num %u COMPLETE ###",
4960 			       soc_srngs_state->seq_num);
4961 		qdf_mem_free(soc_srngs_state);
4962 	}
4963 	dp_print_napi_stats(pdev->soc);
4964 
4965 	/* Schedule work again if queue is not empty */
4966 	if (!empty_flag)
4967 		qdf_queue_work(0, pdev->bkp_stats.work_queue,
4968 			       &pdev->bkp_stats.work);
4969 }
4970 
4971 /*
4972  * dp_pdev_bkp_stats_detach() - detach resources for back pressure stats
4973  *				processing
4974  * @pdev: Datapath PDEV handle
4975  *
4976  */
4977 void dp_pdev_bkp_stats_detach(struct dp_pdev *pdev)
4978 {
4979 	struct dp_soc_srngs_state *ring_state, *ring_state_next;
4980 
4981 	if (!pdev->bkp_stats.work_queue)
4982 		return;
4983 
4984 	qdf_flush_workqueue(0, pdev->bkp_stats.work_queue);
4985 	qdf_destroy_workqueue(0, pdev->bkp_stats.work_queue);
4986 	qdf_flush_work(&pdev->bkp_stats.work);
4987 	qdf_disable_work(&pdev->bkp_stats.work);
4988 	qdf_spin_lock_bh(&pdev->bkp_stats.list_lock);
4989 	TAILQ_FOREACH_SAFE(ring_state, &pdev->bkp_stats.list,
4990 			   list_elem, ring_state_next) {
4991 		TAILQ_REMOVE(&pdev->bkp_stats.list, ring_state,
4992 			     list_elem);
4993 		qdf_mem_free(ring_state);
4994 	}
4995 	qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock);
4996 	qdf_spinlock_destroy(&pdev->bkp_stats.list_lock);
4997 }
4998 
4999 /*
5000  * dp_pdev_bkp_stats_attach() - attach resources for back pressure stats
5001  *				processing
5002  * @pdev: Datapath PDEV handle
5003  *
5004  * Return: QDF_STATUS_SUCCESS: Success
5005  *         QDF_STATUS_E_NOMEM: Error
5006  */
5007 QDF_STATUS dp_pdev_bkp_stats_attach(struct dp_pdev *pdev)
5008 {
5009 	TAILQ_INIT(&pdev->bkp_stats.list);
5010 	pdev->bkp_stats.seq_num = 0;
5011 	pdev->bkp_stats.queue_depth = 0;
5012 
5013 	qdf_create_work(0, &pdev->bkp_stats.work,
5014 			dp_bk_pressure_stats_handler, pdev);
5015 
5016 	pdev->bkp_stats.work_queue =
5017 		qdf_alloc_unbound_workqueue("dp_bkp_work_queue");
5018 	if (!pdev->bkp_stats.work_queue)
5019 		goto fail;
5020 
5021 	qdf_spinlock_create(&pdev->bkp_stats.list_lock);
5022 	return QDF_STATUS_SUCCESS;
5023 
5024 fail:
5025 	dp_htt_alert("BKP stats attach failed");
5026 	qdf_flush_work(&pdev->bkp_stats.work);
5027 	qdf_disable_work(&pdev->bkp_stats.work);
5028 	return QDF_STATUS_E_FAILURE;
5029 }
5030 
5031 #ifdef DP_UMAC_HW_RESET_SUPPORT
5032 QDF_STATUS dp_htt_umac_reset_send_setup_cmd(
5033 		struct dp_soc *soc,
5034 		const struct dp_htt_umac_reset_setup_cmd_params *setup_params)
5035 {
5036 	struct htt_soc *htt_handle = soc->htt_handle;
5037 	uint32_t len;
5038 	qdf_nbuf_t msg;
5039 	u_int32_t *msg_word;
5040 	QDF_STATUS status;
5041 	uint8_t *htt_logger_bufp;
5042 	struct dp_htt_htc_pkt *pkt;
5043 
5044 	len = HTT_MSG_BUF_SIZE(
5045 		HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_BYTES);
5046 
5047 	msg = qdf_nbuf_alloc(soc->osdev,
5048 			     len,
5049 			     /* reserve room for the HTC header */
5050 			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
5051 			     4,
5052 			     TRUE);
5053 	if (!msg)
5054 		return QDF_STATUS_E_NOMEM;
5055 
5056 	/*
5057 	 * Set the length of the message.
5058 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
5059 	 * separately during the below call to qdf_nbuf_push_head.
5060 	 * The contribution from the HTC header is added separately inside HTC.
5061 	 */
5062 	if (!qdf_nbuf_put_tail(
5063 		msg, HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_BYTES)) {
5064 		dp_htt_err("Failed to expand head");
5065 		qdf_nbuf_free(msg);
5066 		return QDF_STATUS_E_FAILURE;
5067 	}
5068 
5069 	/* fill in the message contents */
5070 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
5071 
5072 	/* Rewind beyond alignment pad to get to the HTC header reserved area */
5073 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
5074 	htt_logger_bufp = (uint8_t *)msg_word;
5075 
5076 	qdf_mem_zero(msg_word,
5077 		     HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_BYTES);
5078 
5079 	HTT_H2T_MSG_TYPE_SET(
5080 		*msg_word,
5081 		HTT_H2T_MSG_TYPE_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP);
5082 	HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_T2H_MSG_METHOD_SET(
5083 		*msg_word, htt_umac_hang_recovery_msg_t2h_msi_and_h2t_polling);
5084 	HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_H2T_MSG_METHOD_SET(
5085 		*msg_word, htt_umac_hang_recovery_msg_t2h_msi_and_h2t_polling);
5086 
5087 	msg_word++;
5088 	*msg_word = setup_params->msi_data;
5089 
5090 	msg_word++;
5091 	*msg_word = sizeof(htt_umac_hang_recovery_msg_shmem_t);
5092 
5093 	msg_word++;
5094 	*msg_word = setup_params->shmem_addr_low;
5095 
5096 	msg_word++;
5097 	*msg_word = setup_params->shmem_addr_high;
5098 
5099 	pkt = htt_htc_pkt_alloc(htt_handle);
5100 	if (!pkt) {
5101 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
5102 		qdf_assert(0);
5103 		qdf_nbuf_free(msg);
5104 		return QDF_STATUS_E_NOMEM;
5105 	}
5106 
5107 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5108 
5109 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
5110 			       dp_htt_h2t_send_complete_free_netbuf,
5111 			       qdf_nbuf_data(msg),
5112 			       qdf_nbuf_len(msg),
5113 			       htt_handle->htc_endpoint,
5114 			       /* tag for no FW response msg */
5115 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
5116 
5117 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5118 
5119 	status = DP_HTT_SEND_HTC_PKT(
5120 			htt_handle, pkt,
5121 			HTT_H2T_MSG_TYPE_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP,
5122 			htt_logger_bufp);
5123 
5124 	if (QDF_IS_STATUS_ERROR(status)) {
5125 		qdf_nbuf_free(msg);
5126 		htt_htc_pkt_free(htt_handle, pkt);
5127 		return status;
5128 	}
5129 
5130 	dp_info("HTT_H2T_MSG_TYPE_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP sent");
5131 	return status;
5132 }
5133 #endif
5134