xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c (revision b118e31770646189d507115705e6a8341392c990)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <htt.h>
21 #include <hal_hw_headers.h>
22 #include <hal_api.h>
23 #include "dp_peer.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_rx.h"
27 #include "htt_stats.h"
28 #include "htt_ppdu_stats.h"
29 #include "dp_htt.h"
30 #ifdef WIFI_MONITOR_SUPPORT
31 #include <dp_mon.h>
32 #endif
33 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
34 #include "cdp_txrx_cmn_struct.h"
35 
36 #ifdef FEATURE_PERPKT_INFO
37 #include "dp_ratetable.h"
38 #endif
39 #include <qdf_module.h>
40 #ifdef CONFIG_SAWF_DEF_QUEUES
41 #include <dp_sawf_htt.h>
42 #endif
43 
44 #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
45 
46 #define HTT_HTC_PKT_POOL_INIT_SIZE 64
47 
48 #define HTT_MSG_BUF_SIZE(msg_bytes) \
49 	((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
50 
51 #define HTT_PID_BIT_MASK 0x3
52 
53 #define DP_EXT_MSG_LENGTH 2048
54 #define HTT_HEADER_LEN 16
55 #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16
56 
57 #define HTT_SHIFT_UPPER_TIMESTAMP 32
58 #define HTT_MASK_UPPER_TIMESTAMP 0xFFFFFFFF00000000
59 #define HTT_BKP_STATS_MAX_QUEUE_DEPTH 16
60 
61 struct dp_htt_htc_pkt *
62 htt_htc_pkt_alloc(struct htt_soc *soc)
63 {
64 	struct dp_htt_htc_pkt_union *pkt = NULL;
65 
66 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
67 	if (soc->htt_htc_pkt_freelist) {
68 		pkt = soc->htt_htc_pkt_freelist;
69 		soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next;
70 	}
71 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
72 
73 	if (!pkt)
74 		pkt = qdf_mem_malloc(sizeof(*pkt));
75 
76 	if (!pkt)
77 		return NULL;
78 
79 	htc_packet_set_magic_cookie(&(pkt->u.pkt.htc_pkt), 0);
80 
81 	return &pkt->u.pkt; /* not actually a dereference */
82 }
83 
84 qdf_export_symbol(htt_htc_pkt_alloc);
85 
86 void
87 htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
88 {
89 	struct dp_htt_htc_pkt_union *u_pkt =
90 		(struct dp_htt_htc_pkt_union *)pkt;
91 
92 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
93 	htc_packet_set_magic_cookie(&(u_pkt->u.pkt.htc_pkt), 0);
94 	u_pkt->u.next = soc->htt_htc_pkt_freelist;
95 	soc->htt_htc_pkt_freelist = u_pkt;
96 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
97 }
98 
99 qdf_export_symbol(htt_htc_pkt_free);
100 
101 /*
102  * htt_htc_pkt_pool_free() - Free HTC packet pool
103  * @htt_soc:	HTT SOC handle
104  */
105 void
106 htt_htc_pkt_pool_free(struct htt_soc *soc)
107 {
108 	struct dp_htt_htc_pkt_union *pkt, *next;
109 	pkt = soc->htt_htc_pkt_freelist;
110 	while (pkt) {
111 		next = pkt->u.next;
112 		qdf_mem_free(pkt);
113 		pkt = next;
114 	}
115 	soc->htt_htc_pkt_freelist = NULL;
116 }
117 
118 
119 #ifndef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST
120 
121 /*
122  * htt_htc_misc_pkt_list_trim() - trim misc list
123  * @htt_soc: HTT SOC handle
124  * @level: max no. of pkts in list
125  */
126 static void
127 htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level)
128 {
129 	struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL;
130 	int i = 0;
131 	qdf_nbuf_t netbuf;
132 
133 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
134 	pkt = soc->htt_htc_pkt_misclist;
135 	while (pkt) {
136 		next = pkt->u.next;
137 		/* trim the out grown list*/
138 		if (++i > level) {
139 			netbuf =
140 				(qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
141 			qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
142 			qdf_nbuf_free(netbuf);
143 			qdf_mem_free(pkt);
144 			pkt = NULL;
145 			if (prev)
146 				prev->u.next = NULL;
147 		}
148 		prev = pkt;
149 		pkt = next;
150 	}
151 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
152 }
153 
154 /*
155  * htt_htc_misc_pkt_list_add() - Add pkt to misc list
156  * @htt_soc:	HTT SOC handle
157  * @dp_htt_htc_pkt: pkt to be added to list
158  */
159 void
160 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
161 {
162 	struct dp_htt_htc_pkt_union *u_pkt =
163 				(struct dp_htt_htc_pkt_union *)pkt;
164 	int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc,
165 							pkt->htc_pkt.Endpoint)
166 				+ DP_HTT_HTC_PKT_MISCLIST_SIZE;
167 
168 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
169 	if (soc->htt_htc_pkt_misclist) {
170 		u_pkt->u.next = soc->htt_htc_pkt_misclist;
171 		soc->htt_htc_pkt_misclist = u_pkt;
172 	} else {
173 		soc->htt_htc_pkt_misclist = u_pkt;
174 	}
175 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
176 
177 	/* only ce pipe size + tx_queue_depth could possibly be in use
178 	 * free older packets in the misclist
179 	 */
180 	htt_htc_misc_pkt_list_trim(soc, misclist_trim_level);
181 }
182 
183 qdf_export_symbol(htt_htc_misc_pkt_list_add);
184 #endif  /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
185 
186 /*
187  * htt_htc_misc_pkt_pool_free() - free pkts in misc list
188  * @htt_soc:	HTT SOC handle
189  */
190 static void
191 htt_htc_misc_pkt_pool_free(struct htt_soc *soc)
192 {
193 	struct dp_htt_htc_pkt_union *pkt, *next;
194 	qdf_nbuf_t netbuf;
195 
196 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
197 	pkt = soc->htt_htc_pkt_misclist;
198 
199 	while (pkt) {
200 		next = pkt->u.next;
201 		if (htc_packet_get_magic_cookie(&(pkt->u.pkt.htc_pkt)) !=
202 		    HTC_PACKET_MAGIC_COOKIE) {
203 			pkt = next;
204 			soc->stats.skip_count++;
205 			continue;
206 		}
207 		netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
208 		qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
209 
210 		soc->stats.htc_pkt_free++;
211 		dp_htt_info("%pK: Pkt free count %d",
212 			    soc->dp_soc, soc->stats.htc_pkt_free);
213 
214 		qdf_nbuf_free(netbuf);
215 		qdf_mem_free(pkt);
216 		pkt = next;
217 	}
218 	soc->htt_htc_pkt_misclist = NULL;
219 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
220 	dp_info("HTC Packets, fail count = %d, skip count = %d",
221 		soc->stats.fail_count, soc->stats.skip_count);
222 }
223 
224 /*
225  * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ
226  * @tgt_mac_addr:	Target MAC
227  * @buffer:		Output buffer
228  */
229 static u_int8_t *
230 htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer)
231 {
232 #ifdef BIG_ENDIAN_HOST
233 	/*
234 	 * The host endianness is opposite of the target endianness.
235 	 * To make u_int32_t elements come out correctly, the target->host
236 	 * upload has swizzled the bytes in each u_int32_t element of the
237 	 * message.
238 	 * For byte-array message fields like the MAC address, this
239 	 * upload swizzling puts the bytes in the wrong order, and needs
240 	 * to be undone.
241 	 */
242 	buffer[0] = tgt_mac_addr[3];
243 	buffer[1] = tgt_mac_addr[2];
244 	buffer[2] = tgt_mac_addr[1];
245 	buffer[3] = tgt_mac_addr[0];
246 	buffer[4] = tgt_mac_addr[7];
247 	buffer[5] = tgt_mac_addr[6];
248 	return buffer;
249 #else
250 	/*
251 	 * The host endianness matches the target endianness -
252 	 * we can use the mac addr directly from the message buffer.
253 	 */
254 	return tgt_mac_addr;
255 #endif
256 }
257 
258 /*
259  * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
260  * @soc:	SOC handle
261  * @status:	Completion status
262  * @netbuf:	HTT buffer
263  */
264 static void
265 dp_htt_h2t_send_complete_free_netbuf(
266 	void *soc, A_STATUS status, qdf_nbuf_t netbuf)
267 {
268 	qdf_nbuf_free(netbuf);
269 }
270 
271 #ifdef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST
272 /*
273  * dp_htt_h2t_send_complete() - H2T completion handler
274  * @context:	Opaque context (HTT SOC handle)
275  * @htc_pkt:	HTC packet
276  */
277 static void
278 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
279 {
280 	struct htt_soc *soc =  (struct htt_soc *) context;
281 	struct dp_htt_htc_pkt *htt_pkt;
282 	qdf_nbuf_t netbuf;
283 
284 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
285 
286 	/* process (free or keep) the netbuf that held the message */
287 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
288 	/*
289 	 * adf sendcomplete is required for windows only
290 	 */
291 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
292 	/* free the htt_htc_pkt / HTC_PACKET object */
293 	qdf_nbuf_free(netbuf);
294 	htt_htc_pkt_free(soc, htt_pkt);
295 }
296 
297 #else /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
298 
299 /*
300  *  * dp_htt_h2t_send_complete() - H2T completion handler
301  *   * @context:    Opaque context (HTT SOC handle)
302  *    * @htc_pkt:    HTC packet
303  *     */
304 static void
305 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
306 {
307 	void (*send_complete_part2)(
308 	     void *soc, QDF_STATUS status, qdf_nbuf_t msdu);
309 	struct htt_soc *soc =  (struct htt_soc *) context;
310 	struct dp_htt_htc_pkt *htt_pkt;
311 	qdf_nbuf_t netbuf;
312 
313 	send_complete_part2 = htc_pkt->pPktContext;
314 
315 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
316 
317 	/* process (free or keep) the netbuf that held the message */
318 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
319 	/*
320 	 * adf sendcomplete is required for windows only
321 	*/
322 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
323 	if (send_complete_part2){
324 		send_complete_part2(
325 		    htt_pkt->soc_ctxt, htc_pkt->Status, netbuf);
326 	}
327 	/* free the htt_htc_pkt / HTC_PACKET object */
328 	htt_htc_pkt_free(soc, htt_pkt);
329 }
330 
331 #endif /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
332 
333 /*
334  * dp_htt_h2t_add_tcl_metadata_ver_v1() - Add tcl_metadata verion V1
335  * @htt_soc:	HTT SOC handle
336  * @msg:	Pointer to nbuf
337  *
338  * Return: 0 on success; error code on failure
339  */
340 static int dp_htt_h2t_add_tcl_metadata_ver_v1(struct htt_soc *soc,
341 					      qdf_nbuf_t *msg)
342 {
343 	uint32_t *msg_word;
344 
345 	*msg = qdf_nbuf_alloc(
346 		soc->osdev,
347 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
348 		/* reserve room for the HTC header */
349 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
350 	if (!*msg)
351 		return QDF_STATUS_E_NOMEM;
352 
353 	/*
354 	 * Set the length of the message.
355 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
356 	 * separately during the below call to qdf_nbuf_push_head.
357 	 * The contribution from the HTC header is added separately inside HTC.
358 	 */
359 	if (!qdf_nbuf_put_tail(*msg, HTT_VER_REQ_BYTES)) {
360 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
361 			  "%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
362 			  __func__);
363 		return QDF_STATUS_E_FAILURE;
364 	}
365 
366 	/* fill in the message contents */
367 	msg_word = (u_int32_t *)qdf_nbuf_data(*msg);
368 
369 	/* rewind beyond alignment pad to get to the HTC header reserved area */
370 	qdf_nbuf_push_head(*msg, HTC_HDR_ALIGNMENT_PADDING);
371 
372 	*msg_word = 0;
373 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
374 
375 	return QDF_STATUS_SUCCESS;
376 }
377 
378 #ifdef QCA_DP_TX_FW_METADATA_V2
379 /*
380  * dp_htt_h2t_add_tcl_metadata_ver_v2() - Add tcl_metadata verion V2
381  * @htt_soc:	HTT SOC handle
382  * @msg:	Pointer to nbuf
383  *
384  * Return: 0 on success; error code on failure
385  */
386 static int dp_htt_h2t_add_tcl_metadata_ver_v2(struct htt_soc *soc,
387 					      qdf_nbuf_t *msg)
388 {
389 	uint32_t *msg_word;
390 
391 	*msg = qdf_nbuf_alloc(
392 		soc->osdev,
393 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES + HTT_TCL_METADATA_VER_SZ),
394 		/* reserve room for the HTC header */
395 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
396 	if (!*msg)
397 		return QDF_STATUS_E_NOMEM;
398 
399 	/*
400 	 * Set the length of the message.
401 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
402 	 * separately during the below call to qdf_nbuf_push_head.
403 	 * The contribution from the HTC header is added separately inside HTC.
404 	 */
405 	if (!qdf_nbuf_put_tail(*msg,
406 			       HTT_VER_REQ_BYTES + HTT_TCL_METADATA_VER_SZ)) {
407 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
408 			  "%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
409 			  __func__);
410 		return QDF_STATUS_E_FAILURE;
411 	}
412 
413 	/* fill in the message contents */
414 	msg_word = (u_int32_t *)qdf_nbuf_data(*msg);
415 
416 	/* rewind beyond alignment pad to get to the HTC header reserved area */
417 	qdf_nbuf_push_head(*msg, HTC_HDR_ALIGNMENT_PADDING);
418 
419 	*msg_word = 0;
420 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
421 
422 	/* word 1 */
423 	msg_word++;
424 	*msg_word = 0;
425 	HTT_OPTION_TLV_TAG_SET(*msg_word, HTT_OPTION_TLV_TAG_TCL_METADATA_VER);
426 	HTT_OPTION_TLV_LENGTH_SET(*msg_word, HTT_TCL_METADATA_VER_SZ);
427 	HTT_OPTION_TLV_TCL_METADATA_VER_SET(*msg_word,
428 					    HTT_OPTION_TLV_TCL_METADATA_V2);
429 
430 	return QDF_STATUS_SUCCESS;
431 }
432 
433 /*
434  * dp_htt_h2t_add_tcl_metadata_ver() - Add tcl_metadata verion
435  * @htt_soc:	HTT SOC handle
436  * @msg:	Pointer to nbuf
437  *
438  * Return: 0 on success; error code on failure
439  */
440 static int dp_htt_h2t_add_tcl_metadata_ver(struct htt_soc *soc, qdf_nbuf_t *msg)
441 {
442 	/* Use tcl_metadata_v1 when NSS offload is enabled */
443 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->dp_soc->wlan_cfg_ctx) ||
444 	    soc->dp_soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_FTM_MODE)
445 		return dp_htt_h2t_add_tcl_metadata_ver_v1(soc, msg);
446 	else
447 		return dp_htt_h2t_add_tcl_metadata_ver_v2(soc, msg);
448 }
449 #else
450 static int dp_htt_h2t_add_tcl_metadata_ver(struct htt_soc *soc, qdf_nbuf_t *msg)
451 {
452 	return dp_htt_h2t_add_tcl_metadata_ver_v1(soc, msg);
453 }
454 #endif
455 
456 /*
457  * htt_h2t_ver_req_msg() - Send HTT version request message to target
458  * @htt_soc:	HTT SOC handle
459  *
460  * Return: 0 on success; error code on failure
461  */
462 static int htt_h2t_ver_req_msg(struct htt_soc *soc)
463 {
464 	struct dp_htt_htc_pkt *pkt;
465 	qdf_nbuf_t msg = NULL;
466 	QDF_STATUS status;
467 
468 	status = dp_htt_h2t_add_tcl_metadata_ver(soc, &msg);
469 	if (status != QDF_STATUS_SUCCESS)
470 		return status;
471 
472 	pkt = htt_htc_pkt_alloc(soc);
473 	if (!pkt) {
474 		qdf_nbuf_free(msg);
475 		return QDF_STATUS_E_FAILURE;
476 	}
477 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
478 
479 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
480 		dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg),
481 		qdf_nbuf_len(msg), soc->htc_endpoint,
482 		HTC_TX_PACKET_TAG_RTPM_PUT_RC);
483 
484 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
485 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_VERSION_REQ,
486 				     NULL);
487 
488 	if (status != QDF_STATUS_SUCCESS) {
489 		qdf_nbuf_free(msg);
490 		htt_htc_pkt_free(soc, pkt);
491 	}
492 
493 	return status;
494 }
495 
496 /*
497  * htt_srng_setup() - Send SRNG setup message to target
498  * @htt_soc:	HTT SOC handle
499  * @mac_id:	MAC Id
500  * @hal_srng:	Opaque HAL SRNG pointer
501  * @hal_ring_type:	SRNG ring type
502  *
503  * Return: 0 on success; error code on failure
504  */
505 int htt_srng_setup(struct htt_soc *soc, int mac_id,
506 		   hal_ring_handle_t hal_ring_hdl,
507 		   int hal_ring_type)
508 {
509 	struct dp_htt_htc_pkt *pkt;
510 	qdf_nbuf_t htt_msg;
511 	uint32_t *msg_word;
512 	struct hal_srng_params srng_params;
513 	qdf_dma_addr_t hp_addr, tp_addr;
514 	uint32_t ring_entry_size =
515 		hal_srng_get_entrysize(soc->hal_soc, hal_ring_type);
516 	int htt_ring_type, htt_ring_id;
517 	uint8_t *htt_logger_bufp;
518 	int target_pdev_id;
519 	int lmac_id = dp_get_lmac_id_for_pdev_id(soc->dp_soc, 0, mac_id);
520 	QDF_STATUS status;
521 
522 	/* Sizes should be set in 4-byte words */
523 	ring_entry_size = ring_entry_size >> 2;
524 
525 	htt_msg = qdf_nbuf_alloc(soc->osdev,
526 		HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ),
527 		/* reserve room for the HTC header */
528 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
529 	if (!htt_msg) {
530 		dp_err("htt_msg alloc failed ring type %d", hal_ring_type);
531 		goto fail0;
532 	}
533 
534 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
535 	hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_ring_hdl);
536 	tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_ring_hdl);
537 
538 	switch (hal_ring_type) {
539 	case RXDMA_BUF:
540 #ifdef QCA_HOST2FW_RXBUF_RING
541 		if (srng_params.ring_id ==
542 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
543 		    (lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
544 			htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
545 			htt_ring_type = HTT_SW_TO_SW_RING;
546 #ifdef IPA_OFFLOAD
547 		} else if (srng_params.ring_id ==
548 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2 +
549 		    (lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
550 			htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING;
551 			htt_ring_type = HTT_SW_TO_SW_RING;
552 #endif
553 #else
554 		if (srng_params.ring_id ==
555 			(HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
556 			(lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
557 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
558 			htt_ring_type = HTT_SW_TO_HW_RING;
559 #endif
560 		} else if (srng_params.ring_id ==
561 #ifdef IPA_OFFLOAD
562 			 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 +
563 #else
564 			 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF +
565 #endif
566 			(lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
567 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
568 			htt_ring_type = HTT_SW_TO_HW_RING;
569 		} else {
570 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
571 				   "%s: Ring %d currently not supported",
572 				   __func__, srng_params.ring_id);
573 			goto fail1;
574 		}
575 
576 		break;
577 	case RXDMA_MONITOR_BUF:
578 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
579 							 RXDMA_MONITOR_BUF);
580 		htt_ring_type = HTT_SW_TO_HW_RING;
581 		break;
582 	case RXDMA_MONITOR_STATUS:
583 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
584 		htt_ring_type = HTT_SW_TO_HW_RING;
585 		break;
586 	case RXDMA_MONITOR_DST:
587 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
588 							 RXDMA_MONITOR_DST);
589 		htt_ring_type = HTT_HW_TO_SW_RING;
590 		break;
591 	case RXDMA_MONITOR_DESC:
592 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
593 		htt_ring_type = HTT_SW_TO_HW_RING;
594 		break;
595 	case RXDMA_DST:
596 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
597 		htt_ring_type = HTT_HW_TO_SW_RING;
598 		break;
599 	case TX_MONITOR_BUF:
600 		htt_ring_id = HTT_TX_MON_HOST2MON_BUF_RING;
601 		htt_ring_type = HTT_SW_TO_HW_RING;
602 		break;
603 	case TX_MONITOR_DST:
604 		htt_ring_id = HTT_TX_MON_MON2HOST_DEST_RING;
605 		htt_ring_type = HTT_HW_TO_SW_RING;
606 		break;
607 
608 	default:
609 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
610 			"%s: Ring currently not supported", __func__);
611 			goto fail1;
612 	}
613 
614 	dp_info("ring_type %d ring_id %d htt_ring_id %d hp_addr 0x%llx tp_addr 0x%llx",
615 		hal_ring_type, srng_params.ring_id, htt_ring_id,
616 		(uint64_t)hp_addr,
617 		(uint64_t)tp_addr);
618 	/*
619 	 * Set the length of the message.
620 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
621 	 * separately during the below call to qdf_nbuf_push_head.
622 	 * The contribution from the HTC header is added separately inside HTC.
623 	 */
624 	if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) {
625 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
626 			"%s: Failed to expand head for SRING_SETUP msg",
627 			__func__);
628 		return QDF_STATUS_E_FAILURE;
629 	}
630 
631 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
632 
633 	/* rewind beyond alignment pad to get to the HTC header reserved area */
634 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
635 
636 	/* word 0 */
637 	*msg_word = 0;
638 	htt_logger_bufp = (uint8_t *)msg_word;
639 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP);
640 	target_pdev_id =
641 	dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, mac_id);
642 
643 	if ((htt_ring_type == HTT_SW_TO_HW_RING) ||
644 			(htt_ring_type == HTT_HW_TO_SW_RING))
645 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, target_pdev_id);
646 	else
647 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id);
648 
649 	dp_info("mac_id %d", mac_id);
650 	HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type);
651 	/* TODO: Discuss with FW on changing this to unique ID and using
652 	 * htt_ring_type to send the type of ring
653 	 */
654 	HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id);
655 
656 	/* word 1 */
657 	msg_word++;
658 	*msg_word = 0;
659 	HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word,
660 		srng_params.ring_base_paddr & 0xffffffff);
661 
662 	/* word 2 */
663 	msg_word++;
664 	*msg_word = 0;
665 	HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word,
666 		(uint64_t)srng_params.ring_base_paddr >> 32);
667 
668 	/* word 3 */
669 	msg_word++;
670 	*msg_word = 0;
671 	HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size);
672 	HTT_SRING_SETUP_RING_SIZE_SET(*msg_word,
673 		(ring_entry_size * srng_params.num_entries));
674 	dp_info("entry_size %d", ring_entry_size);
675 	dp_info("num_entries %d", srng_params.num_entries);
676 	dp_info("ring_size %d", (ring_entry_size * srng_params.num_entries));
677 	if (htt_ring_type == HTT_SW_TO_HW_RING)
678 		HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET(
679 						*msg_word, 1);
680 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word,
681 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
682 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word,
683 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
684 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word,
685 		!!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP));
686 
687 	/* word 4 */
688 	msg_word++;
689 	*msg_word = 0;
690 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
691 		hp_addr & 0xffffffff);
692 
693 	/* word 5 */
694 	msg_word++;
695 	*msg_word = 0;
696 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
697 		(uint64_t)hp_addr >> 32);
698 
699 	/* word 6 */
700 	msg_word++;
701 	*msg_word = 0;
702 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
703 		tp_addr & 0xffffffff);
704 
705 	/* word 7 */
706 	msg_word++;
707 	*msg_word = 0;
708 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
709 		(uint64_t)tp_addr >> 32);
710 
711 	/* word 8 */
712 	msg_word++;
713 	*msg_word = 0;
714 	HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word,
715 		srng_params.msi_addr & 0xffffffff);
716 
717 	/* word 9 */
718 	msg_word++;
719 	*msg_word = 0;
720 	HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word,
721 		(uint64_t)(srng_params.msi_addr) >> 32);
722 
723 	/* word 10 */
724 	msg_word++;
725 	*msg_word = 0;
726 	HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word,
727 		qdf_cpu_to_le32(srng_params.msi_data));
728 
729 	/* word 11 */
730 	msg_word++;
731 	*msg_word = 0;
732 	HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word,
733 		srng_params.intr_batch_cntr_thres_entries *
734 		ring_entry_size);
735 	HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word,
736 		srng_params.intr_timer_thres_us >> 3);
737 
738 	/* word 12 */
739 	msg_word++;
740 	*msg_word = 0;
741 	if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
742 		/* TODO: Setting low threshold to 1/8th of ring size - see
743 		 * if this needs to be configurable
744 		 */
745 		HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word,
746 			srng_params.low_threshold);
747 	}
748 	/* "response_required" field should be set if a HTT response message is
749 	 * required after setting up the ring.
750 	 */
751 	pkt = htt_htc_pkt_alloc(soc);
752 	if (!pkt) {
753 		dp_err("pkt alloc failed, ring_type %d ring_id %d htt_ring_id %d",
754 		       hal_ring_type, srng_params.ring_id, htt_ring_id);
755 		goto fail1;
756 	}
757 
758 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
759 
760 	SET_HTC_PACKET_INFO_TX(
761 		&pkt->htc_pkt,
762 		dp_htt_h2t_send_complete_free_netbuf,
763 		qdf_nbuf_data(htt_msg),
764 		qdf_nbuf_len(htt_msg),
765 		soc->htc_endpoint,
766 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
767 
768 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
769 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_SRING_SETUP,
770 				     htt_logger_bufp);
771 
772 	if (status != QDF_STATUS_SUCCESS) {
773 		qdf_nbuf_free(htt_msg);
774 		htt_htc_pkt_free(soc, pkt);
775 	}
776 
777 	return status;
778 
779 fail1:
780 	qdf_nbuf_free(htt_msg);
781 fail0:
782 	return QDF_STATUS_E_FAILURE;
783 }
784 
785 qdf_export_symbol(htt_srng_setup);
786 
787 #ifdef QCA_SUPPORT_FULL_MON
788 /**
789  * htt_h2t_full_mon_cfg() - Send full monitor configuarion msg to FW
790  *
791  * @htt_soc: HTT Soc handle
792  * @pdev_id: Radio id
793  * @dp_full_mon_config: enabled/disable configuration
794  *
795  * Return: Success when HTT message is sent, error on failure
796  */
797 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc,
798 			 uint8_t pdev_id,
799 			 enum dp_full_mon_config config)
800 {
801 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
802 	struct dp_htt_htc_pkt *pkt;
803 	qdf_nbuf_t htt_msg;
804 	uint32_t *msg_word;
805 	uint8_t *htt_logger_bufp;
806 
807 	htt_msg = qdf_nbuf_alloc(soc->osdev,
808 				 HTT_MSG_BUF_SIZE(
809 				 HTT_RX_FULL_MONITOR_MODE_SETUP_SZ),
810 				 /* reserve room for the HTC header */
811 				 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
812 				 4,
813 				 TRUE);
814 	if (!htt_msg)
815 		return QDF_STATUS_E_FAILURE;
816 
817 	/*
818 	 * Set the length of the message.
819 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
820 	 * separately during the below call to qdf_nbuf_push_head.
821 	 * The contribution from the HTC header is added separately inside HTC.
822 	 */
823 	if (!qdf_nbuf_put_tail(htt_msg, HTT_RX_FULL_MONITOR_MODE_SETUP_SZ)) {
824 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
825 			  "%s: Failed to expand head for RX Ring Cfg msg",
826 			  __func__);
827 		goto fail1;
828 	}
829 
830 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
831 
832 	/* rewind beyond alignment pad to get to the HTC header reserved area */
833 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
834 
835 	/* word 0 */
836 	*msg_word = 0;
837 	htt_logger_bufp = (uint8_t *)msg_word;
838 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE);
839 	HTT_RX_FULL_MONITOR_MODE_OPERATION_PDEV_ID_SET(
840 			*msg_word, DP_SW2HW_MACID(pdev_id));
841 
842 	msg_word++;
843 	*msg_word = 0;
844 	/* word 1 */
845 	if (config == DP_FULL_MON_ENABLE) {
846 		HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true);
847 		HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, true);
848 		HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, true);
849 		HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2);
850 	} else if (config == DP_FULL_MON_DISABLE) {
851 		/* As per MAC team's suggestion, While disbaling full monitor
852 		 * mode, Set 'en' bit to true in full monitor mode register.
853 		 */
854 		HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true);
855 		HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, false);
856 		HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, false);
857 		HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2);
858 	}
859 
860 	pkt = htt_htc_pkt_alloc(soc);
861 	if (!pkt) {
862 		qdf_err("HTC packet allocation failed");
863 		goto fail1;
864 	}
865 
866 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
867 
868 	SET_HTC_PACKET_INFO_TX(
869 		&pkt->htc_pkt,
870 		dp_htt_h2t_send_complete_free_netbuf,
871 		qdf_nbuf_data(htt_msg),
872 		qdf_nbuf_len(htt_msg),
873 		soc->htc_endpoint,
874 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
875 
876 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
877 	qdf_debug("config: %d", config);
878 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE,
879 			    htt_logger_bufp);
880 	return QDF_STATUS_SUCCESS;
881 fail1:
882 	qdf_nbuf_free(htt_msg);
883 	return QDF_STATUS_E_FAILURE;
884 }
885 
886 qdf_export_symbol(htt_h2t_full_mon_cfg);
887 #else
888 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc,
889 			 uint8_t pdev_id,
890 			 enum dp_full_mon_config config)
891 {
892 	return 0;
893 }
894 
895 qdf_export_symbol(htt_h2t_full_mon_cfg);
896 #endif
897 
898 #ifdef QCA_UNDECODED_METADATA_SUPPORT
899 static inline void
900 dp_mon_rx_enable_phy_errors(uint32_t *msg_word,
901 			    struct htt_rx_ring_tlv_filter *htt_tlv_filter)
902 {
903 	if (htt_tlv_filter->phy_err_filter_valid) {
904 		HTT_RX_RING_SELECTION_CFG_FP_PHY_ERR_SET
905 			(*msg_word, htt_tlv_filter->fp_phy_err);
906 		HTT_RX_RING_SELECTION_CFG_FP_PHY_ERR_BUF_SRC_SET
907 			(*msg_word, htt_tlv_filter->fp_phy_err_buf_src);
908 		HTT_RX_RING_SELECTION_CFG_FP_PHY_ERR_BUF_DEST_SET
909 			(*msg_word, htt_tlv_filter->fp_phy_err_buf_dest);
910 
911 		/* word 12*/
912 		msg_word++;
913 		*msg_word = 0;
914 		HTT_RX_RING_SELECTION_CFG_PHY_ERR_MASK_SET
915 			(*msg_word, htt_tlv_filter->phy_err_mask);
916 
917 		/* word 13*/
918 		msg_word++;
919 		*msg_word = 0;
920 		HTT_RX_RING_SELECTION_CFG_PHY_ERR_MASK_CONT_SET
921 			(*msg_word, htt_tlv_filter->phy_err_mask_cont);
922 	}
923 }
924 #else
925 static inline void
926 dp_mon_rx_enable_phy_errors(uint32_t *msg_word,
927 			    struct htt_rx_ring_tlv_filter *htt_tlv_filter)
928 {
929 }
930 #endif
931 
932 /*
933  * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter
934  * config message to target
935  * @htt_soc:	HTT SOC handle
936  * @pdev_id:	WIN- PDEV Id, MCL- mac id
937  * @hal_srng:	Opaque HAL SRNG pointer
938  * @hal_ring_type:	SRNG ring type
939  * @ring_buf_size:	SRNG buffer size
940  * @htt_tlv_filter:	Rx SRNG TLV and filter setting
941  * Return: 0 on success; error code on failure
942  */
943 int htt_h2t_rx_ring_cfg(struct htt_soc *htt_soc, int pdev_id,
944 			hal_ring_handle_t hal_ring_hdl,
945 			int hal_ring_type, int ring_buf_size,
946 			struct htt_rx_ring_tlv_filter *htt_tlv_filter)
947 {
948 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
949 	struct dp_htt_htc_pkt *pkt;
950 	qdf_nbuf_t htt_msg;
951 	uint32_t *msg_word;
952 	uint32_t *msg_word_data;
953 	struct hal_srng_params srng_params;
954 	uint32_t htt_ring_type, htt_ring_id;
955 	uint32_t tlv_filter;
956 	uint8_t *htt_logger_bufp;
957 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->dp_soc->wlan_cfg_ctx;
958 	uint32_t mon_drop_th = wlan_cfg_get_mon_drop_thresh(wlan_cfg_ctx);
959 	int target_pdev_id;
960 	QDF_STATUS status;
961 
962 	htt_msg = qdf_nbuf_alloc(soc->osdev,
963 		HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ),
964 	/* reserve room for the HTC header */
965 	HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
966 	if (!htt_msg) {
967 		dp_err("htt_msg alloc failed ring type %d", hal_ring_type);
968 		goto fail0;
969 	}
970 
971 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
972 
973 	switch (hal_ring_type) {
974 	case RXDMA_BUF:
975 		htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
976 		htt_ring_type = HTT_SW_TO_HW_RING;
977 		break;
978 	case RXDMA_MONITOR_BUF:
979 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
980 							 RXDMA_MONITOR_BUF);
981 		htt_ring_type = HTT_SW_TO_HW_RING;
982 		break;
983 	case RXDMA_MONITOR_STATUS:
984 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
985 		htt_ring_type = HTT_SW_TO_HW_RING;
986 		break;
987 	case RXDMA_MONITOR_DST:
988 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
989 							 RXDMA_MONITOR_DST);
990 		htt_ring_type = HTT_HW_TO_SW_RING;
991 		break;
992 	case RXDMA_MONITOR_DESC:
993 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
994 		htt_ring_type = HTT_SW_TO_HW_RING;
995 		break;
996 	case RXDMA_DST:
997 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
998 		htt_ring_type = HTT_HW_TO_SW_RING;
999 		break;
1000 
1001 	default:
1002 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1003 			"%s: Ring currently not supported", __func__);
1004 		goto fail1;
1005 	}
1006 
1007 	dp_info("ring_type %d ring_id %d htt_ring_id %d",
1008 		hal_ring_type, srng_params.ring_id, htt_ring_id);
1009 
1010 	/*
1011 	 * Set the length of the message.
1012 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
1013 	 * separately during the below call to qdf_nbuf_push_head.
1014 	 * The contribution from the HTC header is added separately inside HTC.
1015 	 */
1016 	if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) {
1017 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1018 			"%s: Failed to expand head for RX Ring Cfg msg",
1019 			__func__);
1020 		goto fail1; /* failure */
1021 	}
1022 
1023 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
1024 
1025 	/* rewind beyond alignment pad to get to the HTC header reserved area */
1026 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
1027 
1028 	/* word 0 */
1029 	htt_logger_bufp = (uint8_t *)msg_word;
1030 	*msg_word = 0;
1031 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
1032 
1033 	/* applicable only for post Li */
1034 	dp_rx_mon_enable(soc->dp_soc, msg_word, htt_tlv_filter);
1035 
1036 	/*
1037 	 * pdev_id is indexed from 0 whereas mac_id is indexed from 1
1038 	 * SW_TO_SW and SW_TO_HW rings are unaffected by this
1039 	 */
1040 	target_pdev_id =
1041 	dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, pdev_id);
1042 
1043 	if (htt_ring_type == HTT_SW_TO_SW_RING ||
1044 			htt_ring_type == HTT_SW_TO_HW_RING ||
1045 			htt_ring_type == HTT_HW_TO_SW_RING)
1046 		HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word,
1047 						      target_pdev_id);
1048 
1049 	/* TODO: Discuss with FW on changing this to unique ID and using
1050 	 * htt_ring_type to send the type of ring
1051 	 */
1052 	HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id);
1053 
1054 	HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word,
1055 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
1056 
1057 	HTT_RX_RING_SELECTION_CFG_RX_OFFSETS_VALID_SET(*msg_word,
1058 						htt_tlv_filter->offset_valid);
1059 
1060 	if (mon_drop_th > 0)
1061 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1062 								   1);
1063 	else
1064 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1065 								   0);
1066 
1067 	/* word 1 */
1068 	msg_word++;
1069 	*msg_word = 0;
1070 	HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word,
1071 		ring_buf_size);
1072 
1073 	dp_mon_rx_packet_length_set(soc->dp_soc, msg_word, htt_tlv_filter);
1074 	dp_mon_rx_hdr_length_set(soc->dp_soc, msg_word, htt_tlv_filter);
1075 
1076 	/* word 2 */
1077 	msg_word++;
1078 	*msg_word = 0;
1079 
1080 	if (htt_tlv_filter->enable_fp) {
1081 		/* TYPE: MGMT */
1082 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1083 			FP, MGMT, 0000,
1084 			(htt_tlv_filter->fp_mgmt_filter &
1085 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1086 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1087 			FP, MGMT, 0001,
1088 			(htt_tlv_filter->fp_mgmt_filter &
1089 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1090 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1091 			FP, MGMT, 0010,
1092 			(htt_tlv_filter->fp_mgmt_filter &
1093 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1094 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1095 			FP, MGMT, 0011,
1096 			(htt_tlv_filter->fp_mgmt_filter &
1097 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1098 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1099 			FP, MGMT, 0100,
1100 			(htt_tlv_filter->fp_mgmt_filter &
1101 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1102 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1103 			FP, MGMT, 0101,
1104 			(htt_tlv_filter->fp_mgmt_filter &
1105 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1106 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1107 			FP, MGMT, 0110,
1108 			(htt_tlv_filter->fp_mgmt_filter &
1109 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1110 		/* reserved */
1111 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
1112 			MGMT, 0111,
1113 			(htt_tlv_filter->fp_mgmt_filter &
1114 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1115 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1116 			FP, MGMT, 1000,
1117 			(htt_tlv_filter->fp_mgmt_filter &
1118 			FILTER_MGMT_BEACON) ? 1 : 0);
1119 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1120 			FP, MGMT, 1001,
1121 			(htt_tlv_filter->fp_mgmt_filter &
1122 			FILTER_MGMT_ATIM) ? 1 : 0);
1123 	}
1124 
1125 	if (htt_tlv_filter->enable_md) {
1126 			/* TYPE: MGMT */
1127 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1128 			MD, MGMT, 0000,
1129 			(htt_tlv_filter->md_mgmt_filter &
1130 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1131 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1132 			MD, MGMT, 0001,
1133 			(htt_tlv_filter->md_mgmt_filter &
1134 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1135 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1136 			MD, MGMT, 0010,
1137 			(htt_tlv_filter->md_mgmt_filter &
1138 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1139 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1140 			MD, MGMT, 0011,
1141 			(htt_tlv_filter->md_mgmt_filter &
1142 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1143 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1144 			MD, MGMT, 0100,
1145 			(htt_tlv_filter->md_mgmt_filter &
1146 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1147 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1148 			MD, MGMT, 0101,
1149 			(htt_tlv_filter->md_mgmt_filter &
1150 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1151 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1152 			MD, MGMT, 0110,
1153 			(htt_tlv_filter->md_mgmt_filter &
1154 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1155 		/* reserved */
1156 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
1157 			MGMT, 0111,
1158 			(htt_tlv_filter->md_mgmt_filter &
1159 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1160 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1161 			MD, MGMT, 1000,
1162 			(htt_tlv_filter->md_mgmt_filter &
1163 			FILTER_MGMT_BEACON) ? 1 : 0);
1164 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1165 			MD, MGMT, 1001,
1166 			(htt_tlv_filter->md_mgmt_filter &
1167 			FILTER_MGMT_ATIM) ? 1 : 0);
1168 	}
1169 
1170 	if (htt_tlv_filter->enable_mo) {
1171 		/* TYPE: MGMT */
1172 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1173 			MO, MGMT, 0000,
1174 			(htt_tlv_filter->mo_mgmt_filter &
1175 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1176 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1177 			MO, MGMT, 0001,
1178 			(htt_tlv_filter->mo_mgmt_filter &
1179 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1180 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1181 			MO, MGMT, 0010,
1182 			(htt_tlv_filter->mo_mgmt_filter &
1183 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1184 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1185 			MO, MGMT, 0011,
1186 			(htt_tlv_filter->mo_mgmt_filter &
1187 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1188 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1189 			MO, MGMT, 0100,
1190 			(htt_tlv_filter->mo_mgmt_filter &
1191 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1192 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1193 			MO, MGMT, 0101,
1194 			(htt_tlv_filter->mo_mgmt_filter &
1195 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1196 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1197 			MO, MGMT, 0110,
1198 			(htt_tlv_filter->mo_mgmt_filter &
1199 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1200 		/* reserved */
1201 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
1202 			MGMT, 0111,
1203 			(htt_tlv_filter->mo_mgmt_filter &
1204 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1205 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1206 			MO, MGMT, 1000,
1207 			(htt_tlv_filter->mo_mgmt_filter &
1208 			FILTER_MGMT_BEACON) ? 1 : 0);
1209 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1210 			MO, MGMT, 1001,
1211 			(htt_tlv_filter->mo_mgmt_filter &
1212 			FILTER_MGMT_ATIM) ? 1 : 0);
1213 	}
1214 
1215 	/* word 3 */
1216 	msg_word++;
1217 	*msg_word = 0;
1218 
1219 	if (htt_tlv_filter->enable_fp) {
1220 		/* TYPE: MGMT */
1221 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1222 			FP, MGMT, 1010,
1223 			(htt_tlv_filter->fp_mgmt_filter &
1224 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1225 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1226 			FP, MGMT, 1011,
1227 			(htt_tlv_filter->fp_mgmt_filter &
1228 			FILTER_MGMT_AUTH) ? 1 : 0);
1229 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1230 			FP, MGMT, 1100,
1231 			(htt_tlv_filter->fp_mgmt_filter &
1232 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1233 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1234 			FP, MGMT, 1101,
1235 			(htt_tlv_filter->fp_mgmt_filter &
1236 			FILTER_MGMT_ACTION) ? 1 : 0);
1237 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1238 			FP, MGMT, 1110,
1239 			(htt_tlv_filter->fp_mgmt_filter &
1240 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1241 		/* reserved*/
1242 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
1243 			MGMT, 1111,
1244 			(htt_tlv_filter->fp_mgmt_filter &
1245 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1246 	}
1247 
1248 	if (htt_tlv_filter->enable_md) {
1249 			/* TYPE: MGMT */
1250 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1251 			MD, MGMT, 1010,
1252 			(htt_tlv_filter->md_mgmt_filter &
1253 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1254 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1255 			MD, MGMT, 1011,
1256 			(htt_tlv_filter->md_mgmt_filter &
1257 			FILTER_MGMT_AUTH) ? 1 : 0);
1258 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1259 			MD, MGMT, 1100,
1260 			(htt_tlv_filter->md_mgmt_filter &
1261 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1262 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1263 			MD, MGMT, 1101,
1264 			(htt_tlv_filter->md_mgmt_filter &
1265 			FILTER_MGMT_ACTION) ? 1 : 0);
1266 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1267 			MD, MGMT, 1110,
1268 			(htt_tlv_filter->md_mgmt_filter &
1269 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1270 	}
1271 
1272 	if (htt_tlv_filter->enable_mo) {
1273 		/* TYPE: MGMT */
1274 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1275 			MO, MGMT, 1010,
1276 			(htt_tlv_filter->mo_mgmt_filter &
1277 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1278 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1279 			MO, MGMT, 1011,
1280 			(htt_tlv_filter->mo_mgmt_filter &
1281 			FILTER_MGMT_AUTH) ? 1 : 0);
1282 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1283 			MO, MGMT, 1100,
1284 			(htt_tlv_filter->mo_mgmt_filter &
1285 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1286 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1287 			MO, MGMT, 1101,
1288 			(htt_tlv_filter->mo_mgmt_filter &
1289 			FILTER_MGMT_ACTION) ? 1 : 0);
1290 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1291 			MO, MGMT, 1110,
1292 			(htt_tlv_filter->mo_mgmt_filter &
1293 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1294 		/* reserved*/
1295 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
1296 			MGMT, 1111,
1297 			(htt_tlv_filter->mo_mgmt_filter &
1298 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1299 	}
1300 
1301 	/* word 4 */
1302 	msg_word++;
1303 	*msg_word = 0;
1304 
1305 	if (htt_tlv_filter->enable_fp) {
1306 		/* TYPE: CTRL */
1307 		/* reserved */
1308 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1309 			CTRL, 0000,
1310 			(htt_tlv_filter->fp_ctrl_filter &
1311 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1312 		/* reserved */
1313 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1314 			CTRL, 0001,
1315 			(htt_tlv_filter->fp_ctrl_filter &
1316 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1317 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1318 			CTRL, 0010,
1319 			(htt_tlv_filter->fp_ctrl_filter &
1320 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1321 		/* reserved */
1322 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1323 			CTRL, 0011,
1324 			(htt_tlv_filter->fp_ctrl_filter &
1325 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1326 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1327 			CTRL, 0100,
1328 			(htt_tlv_filter->fp_ctrl_filter &
1329 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1330 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1331 			CTRL, 0101,
1332 			(htt_tlv_filter->fp_ctrl_filter &
1333 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1334 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1335 			CTRL, 0110,
1336 			(htt_tlv_filter->fp_ctrl_filter &
1337 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1338 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1339 			CTRL, 0111,
1340 			(htt_tlv_filter->fp_ctrl_filter &
1341 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1342 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1343 			CTRL, 1000,
1344 			(htt_tlv_filter->fp_ctrl_filter &
1345 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1346 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1347 			CTRL, 1001,
1348 			(htt_tlv_filter->fp_ctrl_filter &
1349 			FILTER_CTRL_BA) ? 1 : 0);
1350 	}
1351 
1352 	if (htt_tlv_filter->enable_md) {
1353 		/* TYPE: CTRL */
1354 		/* reserved */
1355 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1356 			CTRL, 0000,
1357 			(htt_tlv_filter->md_ctrl_filter &
1358 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1359 		/* reserved */
1360 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1361 			CTRL, 0001,
1362 			(htt_tlv_filter->md_ctrl_filter &
1363 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1364 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1365 			CTRL, 0010,
1366 			(htt_tlv_filter->md_ctrl_filter &
1367 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1368 		/* reserved */
1369 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1370 			CTRL, 0011,
1371 			(htt_tlv_filter->md_ctrl_filter &
1372 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1373 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1374 			CTRL, 0100,
1375 			(htt_tlv_filter->md_ctrl_filter &
1376 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1377 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1378 			CTRL, 0101,
1379 			(htt_tlv_filter->md_ctrl_filter &
1380 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1381 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1382 			CTRL, 0110,
1383 			(htt_tlv_filter->md_ctrl_filter &
1384 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1385 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1386 			CTRL, 0111,
1387 			(htt_tlv_filter->md_ctrl_filter &
1388 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1389 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1390 			CTRL, 1000,
1391 			(htt_tlv_filter->md_ctrl_filter &
1392 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1393 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1394 			CTRL, 1001,
1395 			(htt_tlv_filter->md_ctrl_filter &
1396 			FILTER_CTRL_BA) ? 1 : 0);
1397 	}
1398 
1399 	if (htt_tlv_filter->enable_mo) {
1400 		/* TYPE: CTRL */
1401 		/* reserved */
1402 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1403 			CTRL, 0000,
1404 			(htt_tlv_filter->mo_ctrl_filter &
1405 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1406 		/* reserved */
1407 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1408 			CTRL, 0001,
1409 			(htt_tlv_filter->mo_ctrl_filter &
1410 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1411 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1412 			CTRL, 0010,
1413 			(htt_tlv_filter->mo_ctrl_filter &
1414 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1415 		/* reserved */
1416 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1417 			CTRL, 0011,
1418 			(htt_tlv_filter->mo_ctrl_filter &
1419 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1420 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1421 			CTRL, 0100,
1422 			(htt_tlv_filter->mo_ctrl_filter &
1423 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1424 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1425 			CTRL, 0101,
1426 			(htt_tlv_filter->mo_ctrl_filter &
1427 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1428 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1429 			CTRL, 0110,
1430 			(htt_tlv_filter->mo_ctrl_filter &
1431 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1432 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1433 			CTRL, 0111,
1434 			(htt_tlv_filter->mo_ctrl_filter &
1435 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1436 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1437 			CTRL, 1000,
1438 			(htt_tlv_filter->mo_ctrl_filter &
1439 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1440 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1441 			CTRL, 1001,
1442 			(htt_tlv_filter->mo_ctrl_filter &
1443 			FILTER_CTRL_BA) ? 1 : 0);
1444 	}
1445 
1446 	/* word 5 */
1447 	msg_word++;
1448 	*msg_word = 0;
1449 	if (htt_tlv_filter->enable_fp) {
1450 		/* TYPE: CTRL */
1451 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1452 			CTRL, 1010,
1453 			(htt_tlv_filter->fp_ctrl_filter &
1454 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1455 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1456 			CTRL, 1011,
1457 			(htt_tlv_filter->fp_ctrl_filter &
1458 			FILTER_CTRL_RTS) ? 1 : 0);
1459 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1460 			CTRL, 1100,
1461 			(htt_tlv_filter->fp_ctrl_filter &
1462 			FILTER_CTRL_CTS) ? 1 : 0);
1463 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1464 			CTRL, 1101,
1465 			(htt_tlv_filter->fp_ctrl_filter &
1466 			FILTER_CTRL_ACK) ? 1 : 0);
1467 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1468 			CTRL, 1110,
1469 			(htt_tlv_filter->fp_ctrl_filter &
1470 			FILTER_CTRL_CFEND) ? 1 : 0);
1471 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1472 			CTRL, 1111,
1473 			(htt_tlv_filter->fp_ctrl_filter &
1474 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1475 		/* TYPE: DATA */
1476 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1477 			DATA, MCAST,
1478 			(htt_tlv_filter->fp_data_filter &
1479 			FILTER_DATA_MCAST) ? 1 : 0);
1480 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1481 			DATA, UCAST,
1482 			(htt_tlv_filter->fp_data_filter &
1483 			FILTER_DATA_UCAST) ? 1 : 0);
1484 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1485 			DATA, NULL,
1486 			(htt_tlv_filter->fp_data_filter &
1487 			FILTER_DATA_NULL) ? 1 : 0);
1488 	}
1489 
1490 	if (htt_tlv_filter->enable_md) {
1491 		/* TYPE: CTRL */
1492 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1493 			CTRL, 1010,
1494 			(htt_tlv_filter->md_ctrl_filter &
1495 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1496 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1497 			CTRL, 1011,
1498 			(htt_tlv_filter->md_ctrl_filter &
1499 			FILTER_CTRL_RTS) ? 1 : 0);
1500 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1501 			CTRL, 1100,
1502 			(htt_tlv_filter->md_ctrl_filter &
1503 			FILTER_CTRL_CTS) ? 1 : 0);
1504 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1505 			CTRL, 1101,
1506 			(htt_tlv_filter->md_ctrl_filter &
1507 			FILTER_CTRL_ACK) ? 1 : 0);
1508 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1509 			CTRL, 1110,
1510 			(htt_tlv_filter->md_ctrl_filter &
1511 			FILTER_CTRL_CFEND) ? 1 : 0);
1512 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1513 			CTRL, 1111,
1514 			(htt_tlv_filter->md_ctrl_filter &
1515 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1516 		/* TYPE: DATA */
1517 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1518 			DATA, MCAST,
1519 			(htt_tlv_filter->md_data_filter &
1520 			FILTER_DATA_MCAST) ? 1 : 0);
1521 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1522 			DATA, UCAST,
1523 			(htt_tlv_filter->md_data_filter &
1524 			FILTER_DATA_UCAST) ? 1 : 0);
1525 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1526 			DATA, NULL,
1527 			(htt_tlv_filter->md_data_filter &
1528 			FILTER_DATA_NULL) ? 1 : 0);
1529 	}
1530 
1531 	if (htt_tlv_filter->enable_mo) {
1532 		/* TYPE: CTRL */
1533 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1534 			CTRL, 1010,
1535 			(htt_tlv_filter->mo_ctrl_filter &
1536 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1537 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1538 			CTRL, 1011,
1539 			(htt_tlv_filter->mo_ctrl_filter &
1540 			FILTER_CTRL_RTS) ? 1 : 0);
1541 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1542 			CTRL, 1100,
1543 			(htt_tlv_filter->mo_ctrl_filter &
1544 			FILTER_CTRL_CTS) ? 1 : 0);
1545 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1546 			CTRL, 1101,
1547 			(htt_tlv_filter->mo_ctrl_filter &
1548 			FILTER_CTRL_ACK) ? 1 : 0);
1549 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1550 			CTRL, 1110,
1551 			(htt_tlv_filter->mo_ctrl_filter &
1552 			FILTER_CTRL_CFEND) ? 1 : 0);
1553 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1554 			CTRL, 1111,
1555 			(htt_tlv_filter->mo_ctrl_filter &
1556 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1557 		/* TYPE: DATA */
1558 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1559 			DATA, MCAST,
1560 			(htt_tlv_filter->mo_data_filter &
1561 			FILTER_DATA_MCAST) ? 1 : 0);
1562 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1563 			DATA, UCAST,
1564 			(htt_tlv_filter->mo_data_filter &
1565 			FILTER_DATA_UCAST) ? 1 : 0);
1566 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1567 			DATA, NULL,
1568 			(htt_tlv_filter->mo_data_filter &
1569 			FILTER_DATA_NULL) ? 1 : 0);
1570 	}
1571 
1572 	/* word 6 */
1573 	msg_word++;
1574 	*msg_word = 0;
1575 	tlv_filter = 0;
1576 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START,
1577 		htt_tlv_filter->mpdu_start);
1578 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START,
1579 		htt_tlv_filter->msdu_start);
1580 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET,
1581 		htt_tlv_filter->packet);
1582 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END,
1583 		htt_tlv_filter->msdu_end);
1584 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END,
1585 		htt_tlv_filter->mpdu_end);
1586 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER,
1587 		htt_tlv_filter->packet_header);
1588 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION,
1589 		htt_tlv_filter->attention);
1590 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START,
1591 		htt_tlv_filter->ppdu_start);
1592 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END,
1593 		htt_tlv_filter->ppdu_end);
1594 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS,
1595 		htt_tlv_filter->ppdu_end_user_stats);
1596 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter,
1597 		PPDU_END_USER_STATS_EXT,
1598 		htt_tlv_filter->ppdu_end_user_stats_ext);
1599 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE,
1600 		htt_tlv_filter->ppdu_end_status_done);
1601 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START_USER_INFO,
1602 		htt_tlv_filter->ppdu_start_user_info);
1603 	/* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/
1604 	 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED,
1605 		 htt_tlv_filter->header_per_msdu);
1606 
1607 	HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter);
1608 
1609 	msg_word_data = (uint32_t *)qdf_nbuf_data(htt_msg);
1610 	dp_info("config_data: [0x%x][0x%x][0x%x][0x%x][0x%x][0x%x][0x%x]",
1611 		msg_word_data[0], msg_word_data[1], msg_word_data[2],
1612 		msg_word_data[3], msg_word_data[4], msg_word_data[5],
1613 		msg_word_data[6]);
1614 
1615 	/* word 7 */
1616 	msg_word++;
1617 	*msg_word = 0;
1618 	if (htt_tlv_filter->offset_valid) {
1619 		HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET_SET(*msg_word,
1620 					htt_tlv_filter->rx_packet_offset);
1621 		HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET_SET(*msg_word,
1622 					htt_tlv_filter->rx_header_offset);
1623 
1624 		/* word 8 */
1625 		msg_word++;
1626 		*msg_word = 0;
1627 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET_SET(*msg_word,
1628 					htt_tlv_filter->rx_mpdu_end_offset);
1629 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET_SET(*msg_word,
1630 					htt_tlv_filter->rx_mpdu_start_offset);
1631 
1632 		/* word 9 */
1633 		msg_word++;
1634 		*msg_word = 0;
1635 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET_SET(*msg_word,
1636 					htt_tlv_filter->rx_msdu_end_offset);
1637 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET_SET(*msg_word,
1638 					htt_tlv_filter->rx_msdu_start_offset);
1639 
1640 		/* word 10 */
1641 		msg_word++;
1642 		*msg_word = 0;
1643 		HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET_SET(*msg_word,
1644 					htt_tlv_filter->rx_attn_offset);
1645 
1646 		/* word 11 */
1647 		msg_word++;
1648 		*msg_word = 0;
1649 	} else {
1650 		/* word 11 */
1651 		msg_word += 4;
1652 		*msg_word = 0;
1653 	}
1654 
1655 	if (mon_drop_th > 0)
1656 		HTT_RX_RING_SELECTION_CFG_RX_DROP_THRESHOLD_SET(*msg_word,
1657 								mon_drop_th);
1658 	dp_mon_rx_enable_mpdu_logging(soc->dp_soc, msg_word, htt_tlv_filter);
1659 
1660 	dp_mon_rx_enable_phy_errors(msg_word, htt_tlv_filter);
1661 
1662 	/* word 14*/
1663 	msg_word += 3;
1664 	*msg_word = 0;
1665 
1666 	dp_mon_rx_wmask_subscribe(soc->dp_soc, msg_word, htt_tlv_filter);
1667 
1668 	/* "response_required" field should be set if a HTT response message is
1669 	 * required after setting up the ring.
1670 	 */
1671 	pkt = htt_htc_pkt_alloc(soc);
1672 	if (!pkt) {
1673 		dp_err("pkt alloc failed, ring_type %d ring_id %d htt_ring_id %d",
1674 		       hal_ring_type, srng_params.ring_id, htt_ring_id);
1675 		goto fail1;
1676 	}
1677 
1678 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1679 
1680 	SET_HTC_PACKET_INFO_TX(
1681 		&pkt->htc_pkt,
1682 		dp_htt_h2t_send_complete_free_netbuf,
1683 		qdf_nbuf_data(htt_msg),
1684 		qdf_nbuf_len(htt_msg),
1685 		soc->htc_endpoint,
1686 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
1687 
1688 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1689 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
1690 				     HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG,
1691 				     htt_logger_bufp);
1692 
1693 	if (status != QDF_STATUS_SUCCESS) {
1694 		qdf_nbuf_free(htt_msg);
1695 		htt_htc_pkt_free(soc, pkt);
1696 	}
1697 
1698 	return status;
1699 
1700 fail1:
1701 	qdf_nbuf_free(htt_msg);
1702 fail0:
1703 	return QDF_STATUS_E_FAILURE;
1704 }
1705 
1706 qdf_export_symbol(htt_h2t_rx_ring_cfg);
1707 
1708 #if defined(HTT_STATS_ENABLE)
1709 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1710 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1711 
1712 {
1713 	uint32_t pdev_id;
1714 	uint32_t *msg_word = NULL;
1715 	uint32_t msg_remain_len = 0;
1716 
1717 	msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1718 
1719 	/*COOKIE MSB*/
1720 	pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1721 
1722 	/* stats message length + 16 size of HTT header*/
1723 	msg_remain_len = qdf_min(htt_stats->msg_len + 16,
1724 				(uint32_t)DP_EXT_MSG_LENGTH);
1725 
1726 	dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc,
1727 			msg_word,  msg_remain_len,
1728 			WDI_NO_VAL, pdev_id);
1729 
1730 	if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1731 		htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1732 	}
1733 	/* Need to be freed here as WDI handler will
1734 	 * make a copy of pkt to send data to application
1735 	 */
1736 	qdf_nbuf_free(htt_msg);
1737 	return QDF_STATUS_SUCCESS;
1738 }
1739 #else
1740 static inline QDF_STATUS
1741 dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1742 		      struct dp_soc *soc, qdf_nbuf_t htt_msg)
1743 {
1744 	return QDF_STATUS_E_NOSUPPORT;
1745 }
1746 #endif
1747 
1748 #ifdef HTT_STATS_DEBUGFS_SUPPORT
1749 /* dp_send_htt_stats_dbgfs_msg() - Function to send htt data to upper layer.
1750  * @pdev: dp pdev handle
1751  * @msg_word: HTT msg
1752  * @msg_len: Length of HTT msg sent
1753  *
1754  * Return: none
1755  */
1756 static inline void
1757 dp_htt_stats_dbgfs_send_msg(struct dp_pdev *pdev, uint32_t *msg_word,
1758 			    uint32_t msg_len)
1759 {
1760 	struct htt_dbgfs_cfg dbgfs_cfg;
1761 	int done = 0;
1762 
1763 	/* send 5th word of HTT msg to upper layer */
1764 	dbgfs_cfg.msg_word = (msg_word + 4);
1765 	dbgfs_cfg.m = pdev->dbgfs_cfg->m;
1766 
1767 	/* stats message length + 16 size of HTT header*/
1768 	msg_len = qdf_min(msg_len + HTT_HEADER_LEN, (uint32_t)DP_EXT_MSG_LENGTH);
1769 
1770 	if (pdev->dbgfs_cfg->htt_stats_dbgfs_msg_process)
1771 		pdev->dbgfs_cfg->htt_stats_dbgfs_msg_process(&dbgfs_cfg,
1772 							     (msg_len - HTT_HEADER_LEN));
1773 
1774 	/* Get TLV Done bit from 4th msg word */
1775 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*(msg_word + 3));
1776 	if (done) {
1777 		if (qdf_event_set(&pdev->dbgfs_cfg->htt_stats_dbgfs_event))
1778 			dp_htt_err("%pK: Failed to set event for debugfs htt stats"
1779 				   , pdev->soc);
1780 	}
1781 }
1782 #else
1783 static inline void
1784 dp_htt_stats_dbgfs_send_msg(struct dp_pdev *pdev, uint32_t *msg_word,
1785 			    uint32_t msg_len)
1786 {
1787 }
1788 #endif /* HTT_STATS_DEBUGFS_SUPPORT */
1789 
1790 #ifdef WLAN_SYSFS_DP_STATS
1791 /* dp_htt_stats_sysfs_update_config() - Function to send htt data to upper layer.
1792  * @pdev: dp pdev handle
1793  *
1794  * This function sets the process id and printing mode within the sysfs config
1795  * struct. which enables DP_PRINT statements within this process to write to the
1796  * console buffer provided by the user space.
1797  *
1798  * Return: None
1799  */
1800 static inline void
1801 dp_htt_stats_sysfs_update_config(struct dp_pdev *pdev)
1802 {
1803 	struct dp_soc *soc = pdev->soc;
1804 
1805 	if (!soc) {
1806 		dp_htt_err("soc is null");
1807 		return;
1808 	}
1809 
1810 	if (!soc->sysfs_config) {
1811 		dp_htt_err("soc->sysfs_config is NULL");
1812 		return;
1813 	}
1814 
1815 	/* set sysfs config parameters */
1816 	soc->sysfs_config->process_id = qdf_get_current_pid();
1817 	soc->sysfs_config->printing_mode = PRINTING_MODE_ENABLED;
1818 }
1819 
1820 /*
1821  * dp_htt_stats_sysfs_set_event() - Set sysfs stats event.
1822  * @soc: soc handle.
1823  * @msg_word: Pointer to htt msg word.
1824  *
1825  * @return: void
1826  */
1827 static inline void
1828 dp_htt_stats_sysfs_set_event(struct dp_soc *soc, uint32_t *msg_word)
1829 {
1830 	int done = 0;
1831 
1832 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*(msg_word + 3));
1833 	if (done) {
1834 		if (qdf_event_set(&soc->sysfs_config->sysfs_txrx_fw_request_done))
1835 			dp_htt_err("%pK:event compl Fail to set event ",
1836 				   soc);
1837 	}
1838 }
1839 #else /* WLAN_SYSFS_DP_STATS */
1840 static inline void
1841 dp_htt_stats_sysfs_update_config(struct dp_pdev *pdev)
1842 {
1843 }
1844 
1845 static inline void
1846 dp_htt_stats_sysfs_set_event(struct dp_soc *dp_soc, uint32_t *msg_word)
1847 {
1848 }
1849 #endif /* WLAN_SYSFS_DP_STATS */
1850 
1851 /**
1852  * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats
1853  * @htt_stats: htt stats info
1854  *
1855  * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message
1856  * contains sub messages which are identified by a TLV header.
1857  * In this function we will process the stream of T2H messages and read all the
1858  * TLV contained in the message.
1859  *
1860  * THe following cases have been taken care of
1861  * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer
1862  *		In this case the buffer will contain multiple tlvs.
1863  * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer.
1864  *		Only one tlv will be contained in the HTT message and this tag
1865  *		will extend onto the next buffer.
1866  * Case 3: When the buffer is the continuation of the previous message
1867  * Case 4: tlv length is 0. which will indicate the end of message
1868  *
1869  * return: void
1870  */
1871 static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats,
1872 					struct dp_soc *soc)
1873 {
1874 	htt_tlv_tag_t tlv_type = 0xff;
1875 	qdf_nbuf_t htt_msg = NULL;
1876 	uint32_t *msg_word;
1877 	uint8_t *tlv_buf_head = NULL;
1878 	uint8_t *tlv_buf_tail = NULL;
1879 	uint32_t msg_remain_len = 0;
1880 	uint32_t tlv_remain_len = 0;
1881 	uint32_t *tlv_start;
1882 	int cookie_val = 0;
1883 	int cookie_msb = 0;
1884 	int pdev_id;
1885 	bool copy_stats = false;
1886 	struct dp_pdev *pdev;
1887 
1888 	/* Process node in the HTT message queue */
1889 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1890 		!= NULL) {
1891 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1892 		cookie_val = *(msg_word + 1);
1893 		htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET(
1894 					*(msg_word +
1895 					HTT_T2H_EXT_STATS_TLV_START_OFFSET));
1896 
1897 		if (cookie_val) {
1898 			if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg)
1899 					== QDF_STATUS_SUCCESS) {
1900 				continue;
1901 			}
1902 		}
1903 
1904 		cookie_msb = *(msg_word + 2);
1905 		pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1906 		pdev = soc->pdev_list[pdev_id];
1907 
1908 		if (!cookie_val && (cookie_msb & DBG_STATS_COOKIE_HTT_DBGFS)) {
1909 			dp_htt_stats_dbgfs_send_msg(pdev, msg_word,
1910 						    htt_stats->msg_len);
1911 			qdf_nbuf_free(htt_msg);
1912 			continue;
1913 		}
1914 
1915 		if (!cookie_val && (cookie_msb & DBG_SYSFS_STATS_COOKIE))
1916 			dp_htt_stats_sysfs_update_config(pdev);
1917 
1918 		if (cookie_msb & DBG_STATS_COOKIE_DP_STATS)
1919 			copy_stats = true;
1920 
1921 		/* read 5th word */
1922 		msg_word = msg_word + 4;
1923 		msg_remain_len = qdf_min(htt_stats->msg_len,
1924 				(uint32_t) DP_EXT_MSG_LENGTH);
1925 		/* Keep processing the node till node length is 0 */
1926 		while (msg_remain_len) {
1927 			/*
1928 			 * if message is not a continuation of previous message
1929 			 * read the tlv type and tlv length
1930 			 */
1931 			if (!tlv_buf_head) {
1932 				tlv_type = HTT_STATS_TLV_TAG_GET(
1933 						*msg_word);
1934 				tlv_remain_len = HTT_STATS_TLV_LENGTH_GET(
1935 						*msg_word);
1936 			}
1937 
1938 			if (tlv_remain_len == 0) {
1939 				msg_remain_len = 0;
1940 
1941 				if (tlv_buf_head) {
1942 					qdf_mem_free(tlv_buf_head);
1943 					tlv_buf_head = NULL;
1944 					tlv_buf_tail = NULL;
1945 				}
1946 
1947 				goto error;
1948 			}
1949 
1950 			if (!tlv_buf_head)
1951 				tlv_remain_len += HTT_TLV_HDR_LEN;
1952 
1953 			if ((tlv_remain_len <= msg_remain_len)) {
1954 				/* Case 3 */
1955 				if (tlv_buf_head) {
1956 					qdf_mem_copy(tlv_buf_tail,
1957 							(uint8_t *)msg_word,
1958 							tlv_remain_len);
1959 					tlv_start = (uint32_t *)tlv_buf_head;
1960 				} else {
1961 					/* Case 1 */
1962 					tlv_start = msg_word;
1963 				}
1964 
1965 				if (copy_stats)
1966 					dp_htt_stats_copy_tag(pdev,
1967 							      tlv_type,
1968 							      tlv_start);
1969 				else
1970 					dp_htt_stats_print_tag(pdev,
1971 							       tlv_type,
1972 							       tlv_start);
1973 
1974 				if (tlv_type == HTT_STATS_PEER_DETAILS_TAG ||
1975 				    tlv_type == HTT_STATS_PEER_STATS_CMN_TAG)
1976 					dp_peer_update_inactive_time(pdev,
1977 								     tlv_type,
1978 								     tlv_start);
1979 
1980 				msg_remain_len -= tlv_remain_len;
1981 
1982 				msg_word = (uint32_t *)
1983 					(((uint8_t *)msg_word) +
1984 					tlv_remain_len);
1985 
1986 				tlv_remain_len = 0;
1987 
1988 				if (tlv_buf_head) {
1989 					qdf_mem_free(tlv_buf_head);
1990 					tlv_buf_head = NULL;
1991 					tlv_buf_tail = NULL;
1992 				}
1993 
1994 			} else { /* tlv_remain_len > msg_remain_len */
1995 				/* Case 2 & 3 */
1996 				if (!tlv_buf_head) {
1997 					tlv_buf_head = qdf_mem_malloc(
1998 							tlv_remain_len);
1999 
2000 					if (!tlv_buf_head) {
2001 						QDF_TRACE(QDF_MODULE_ID_TXRX,
2002 								QDF_TRACE_LEVEL_ERROR,
2003 								"Alloc failed");
2004 						goto error;
2005 					}
2006 
2007 					tlv_buf_tail = tlv_buf_head;
2008 				}
2009 
2010 				qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word,
2011 						msg_remain_len);
2012 				tlv_remain_len -= msg_remain_len;
2013 				tlv_buf_tail += msg_remain_len;
2014 			}
2015 		}
2016 
2017 		if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
2018 			htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
2019 		}
2020 
2021 		/* indicate event completion in case the event is done */
2022 		if (!cookie_val && (cookie_msb & DBG_SYSFS_STATS_COOKIE))
2023 			dp_htt_stats_sysfs_set_event(soc, msg_word);
2024 
2025 		qdf_nbuf_free(htt_msg);
2026 	}
2027 	return;
2028 
2029 error:
2030 	qdf_nbuf_free(htt_msg);
2031 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
2032 			!= NULL)
2033 		qdf_nbuf_free(htt_msg);
2034 }
2035 
2036 void htt_t2h_stats_handler(void *context)
2037 {
2038 	struct dp_soc *soc = (struct dp_soc *)context;
2039 	struct htt_stats_context htt_stats;
2040 	uint32_t *msg_word;
2041 	qdf_nbuf_t htt_msg = NULL;
2042 	uint8_t done;
2043 	uint32_t rem_stats;
2044 
2045 	if (!soc) {
2046 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2047 			  "soc is NULL");
2048 		return;
2049 	}
2050 
2051 	if (!qdf_atomic_read(&soc->cmn_init_done)) {
2052 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2053 			  "soc: 0x%pK, init_done: %d", soc,
2054 			  qdf_atomic_read(&soc->cmn_init_done));
2055 		return;
2056 	}
2057 
2058 	qdf_mem_zero(&htt_stats, sizeof(htt_stats));
2059 	qdf_nbuf_queue_init(&htt_stats.msg);
2060 
2061 	/* pull one completed stats from soc->htt_stats_msg and process */
2062 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2063 	if (!soc->htt_stats.num_stats) {
2064 		qdf_spin_unlock_bh(&soc->htt_stats.lock);
2065 		return;
2066 	}
2067 	while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) {
2068 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
2069 		msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET;
2070 		done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2071 		qdf_nbuf_queue_add(&htt_stats.msg, htt_msg);
2072 		/*
2073 		 * Done bit signifies that this is the last T2H buffer in the
2074 		 * stream of HTT EXT STATS message
2075 		 */
2076 		if (done)
2077 			break;
2078 	}
2079 	rem_stats = --soc->htt_stats.num_stats;
2080 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2081 
2082 	/* If there are more stats to process, schedule stats work again.
2083 	 * Scheduling prior to processing ht_stats to queue with early
2084 	 * index
2085 	 */
2086 	if (rem_stats)
2087 		qdf_sched_work(0, &soc->htt_stats.work);
2088 
2089 	dp_process_htt_stat_msg(&htt_stats, soc);
2090 }
2091 
2092 /**
2093  * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats
2094  * @soc: DP SOC handle
2095  * @htt_t2h_msg: HTT message nbuf
2096  *
2097  * return:void
2098  */
2099 static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc,
2100 					    qdf_nbuf_t htt_t2h_msg)
2101 {
2102 	uint8_t done;
2103 	qdf_nbuf_t msg_copy;
2104 	uint32_t *msg_word;
2105 
2106 	msg_word = (uint32_t *)qdf_nbuf_data(htt_t2h_msg);
2107 	msg_word = msg_word + 3;
2108 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2109 
2110 	/*
2111 	 * HTT EXT stats response comes as stream of TLVs which span over
2112 	 * multiple T2H messages.
2113 	 * The first message will carry length of the response.
2114 	 * For rest of the messages length will be zero.
2115 	 *
2116 	 * Clone the T2H message buffer and store it in a list to process
2117 	 * it later.
2118 	 *
2119 	 * The original T2H message buffers gets freed in the T2H HTT event
2120 	 * handler
2121 	 */
2122 	msg_copy = qdf_nbuf_clone(htt_t2h_msg);
2123 
2124 	if (!msg_copy) {
2125 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2126 			  "T2H messge clone failed for HTT EXT STATS");
2127 		goto error;
2128 	}
2129 
2130 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2131 	qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy);
2132 	/*
2133 	 * Done bit signifies that this is the last T2H buffer in the stream of
2134 	 * HTT EXT STATS message
2135 	 */
2136 	if (done) {
2137 		soc->htt_stats.num_stats++;
2138 		qdf_sched_work(0, &soc->htt_stats.work);
2139 	}
2140 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2141 
2142 	return;
2143 
2144 error:
2145 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2146 	while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg))
2147 			!= NULL) {
2148 		qdf_nbuf_free(msg_copy);
2149 	}
2150 	soc->htt_stats.num_stats = 0;
2151 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2152 	return;
2153 }
2154 
2155 /*
2156  * htt_soc_attach_target() - SOC level HTT setup
2157  * @htt_soc:	HTT SOC handle
2158  *
2159  * Return: 0 on success; error code on failure
2160  */
2161 int htt_soc_attach_target(struct htt_soc *htt_soc)
2162 {
2163 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
2164 
2165 	return htt_h2t_ver_req_msg(soc);
2166 }
2167 
2168 void htt_set_htc_handle(struct htt_soc *htt_soc, HTC_HANDLE htc_soc)
2169 {
2170 	htt_soc->htc_soc = htc_soc;
2171 }
2172 
2173 HTC_HANDLE htt_get_htc_handle(struct htt_soc *htt_soc)
2174 {
2175 	return htt_soc->htc_soc;
2176 }
2177 
2178 struct htt_soc *htt_soc_attach(struct dp_soc *soc, HTC_HANDLE htc_handle)
2179 {
2180 	int i;
2181 	int j;
2182 	int alloc_size = HTT_SW_UMAC_RING_IDX_MAX * sizeof(unsigned long);
2183 	struct htt_soc *htt_soc = NULL;
2184 
2185 	htt_soc = qdf_mem_malloc(sizeof(*htt_soc));
2186 	if (!htt_soc) {
2187 		dp_err("HTT attach failed");
2188 		return NULL;
2189 	}
2190 
2191 	for (i = 0; i < MAX_PDEV_CNT; i++) {
2192 		htt_soc->pdevid_tt[i].umac_ttt = qdf_mem_malloc(alloc_size);
2193 		if (!htt_soc->pdevid_tt[i].umac_ttt)
2194 			break;
2195 		qdf_mem_set(htt_soc->pdevid_tt[i].umac_ttt, alloc_size, -1);
2196 		htt_soc->pdevid_tt[i].lmac_ttt = qdf_mem_malloc(alloc_size);
2197 		if (!htt_soc->pdevid_tt[i].lmac_ttt) {
2198 			qdf_mem_free(htt_soc->pdevid_tt[i].umac_ttt);
2199 			break;
2200 		}
2201 		qdf_mem_set(htt_soc->pdevid_tt[i].lmac_ttt, alloc_size, -1);
2202 	}
2203 	if (i != MAX_PDEV_CNT) {
2204 		for (j = 0; j < i; j++) {
2205 			qdf_mem_free(htt_soc->pdevid_tt[j].umac_ttt);
2206 			qdf_mem_free(htt_soc->pdevid_tt[j].lmac_ttt);
2207 		}
2208 		qdf_mem_free(htt_soc);
2209 		return NULL;
2210 	}
2211 
2212 	htt_soc->dp_soc = soc;
2213 	htt_soc->htc_soc = htc_handle;
2214 	HTT_TX_MUTEX_INIT(&htt_soc->htt_tx_mutex);
2215 
2216 	return htt_soc;
2217 }
2218 
2219 #if defined(WDI_EVENT_ENABLE) && \
2220 	!defined(REMOVE_PKT_LOG)
2221 /*
2222  * dp_pktlog_msg_handler() - Pktlog msg handler
2223  * @htt_soc:	 HTT SOC handle
2224  * @msg_word:    Pointer to payload
2225  *
2226  * Return: None
2227  */
2228 static void
2229 dp_pktlog_msg_handler(struct htt_soc *soc,
2230 		      uint32_t *msg_word)
2231 {
2232 	uint8_t pdev_id;
2233 	uint8_t target_pdev_id;
2234 	uint32_t *pl_hdr;
2235 
2236 	target_pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word);
2237 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
2238 							 target_pdev_id);
2239 	pl_hdr = (msg_word + 1);
2240 	dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc,
2241 		pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL,
2242 		pdev_id);
2243 }
2244 #else
2245 static void
2246 dp_pktlog_msg_handler(struct htt_soc *soc,
2247 		      uint32_t *msg_word)
2248 {
2249 }
2250 #endif
2251 
2252 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
2253 /*
2254  * dp_vdev_txrx_hw_stats_handler - Handle vdev stats received from FW
2255  * @soc - htt soc handle
2256  * @ msg_word - buffer containing stats
2257  *
2258  * Return: void
2259  */
2260 static void dp_vdev_txrx_hw_stats_handler(struct htt_soc *soc,
2261 					  uint32_t *msg_word)
2262 {
2263 	struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
2264 	uint8_t pdev_id;
2265 	uint8_t vdev_id;
2266 	uint8_t target_pdev_id;
2267 	uint16_t payload_size;
2268 	struct dp_pdev *pdev;
2269 	struct dp_vdev *vdev;
2270 	uint8_t *tlv_buf;
2271 	uint32_t *tlv_buf_temp;
2272 	uint32_t *tag_buf;
2273 	htt_tlv_tag_t tlv_type;
2274 	uint16_t tlv_length;
2275 	uint64_t pkt_count = 0;
2276 	uint64_t byte_count = 0;
2277 	uint64_t soc_drop_cnt = 0;
2278 	struct cdp_pkt_info tx_comp = { 0 };
2279 	struct cdp_pkt_info tx_failed =  { 0 };
2280 
2281 	target_pdev_id =
2282 		HTT_T2H_VDEVS_TXRX_STATS_PERIODIC_IND_PDEV_ID_GET(*msg_word);
2283 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(dpsoc,
2284 							 target_pdev_id);
2285 
2286 	if (pdev_id >= MAX_PDEV_CNT)
2287 		return;
2288 
2289 	pdev = dpsoc->pdev_list[pdev_id];
2290 	if (!pdev) {
2291 		dp_err("PDEV is NULL for pdev_id:%d", pdev_id);
2292 		return;
2293 	}
2294 
2295 	payload_size =
2296 	HTT_T2H_VDEVS_TXRX_STATS_PERIODIC_IND_PAYLOAD_SIZE_GET(*msg_word);
2297 
2298 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2299 			   (void *)msg_word, payload_size + 16);
2300 
2301 	/* Adjust msg_word to point to the first TLV in buffer */
2302 	msg_word = msg_word + 4;
2303 
2304 	/* Parse the received buffer till payload size reaches 0 */
2305 	while (payload_size > 0) {
2306 		tlv_buf = (uint8_t *)msg_word;
2307 		tlv_buf_temp = msg_word;
2308 		tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
2309 		tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
2310 
2311 		/* Add header size to tlv length*/
2312 		tlv_length += 4;
2313 
2314 		switch (tlv_type) {
2315 		case HTT_STATS_SOC_TXRX_STATS_COMMON_TAG:
2316 		{
2317 			tag_buf = tlv_buf_temp +
2318 					HTT_VDEV_STATS_GET_INDEX(SOC_DROP_CNT);
2319 			soc_drop_cnt = HTT_VDEV_GET_STATS_U64(tag_buf);
2320 			DP_STATS_UPD(dpsoc, tx.tqm_drop_no_peer, soc_drop_cnt);
2321 			break;
2322 		}
2323 		case HTT_STATS_VDEV_TXRX_STATS_HW_STATS_TAG:
2324 		{
2325 			tag_buf = tlv_buf_temp +
2326 					HTT_VDEV_STATS_GET_INDEX(VDEV_ID);
2327 			vdev_id = (uint8_t)(*tag_buf);
2328 			vdev = dp_vdev_get_ref_by_id(dpsoc, vdev_id,
2329 						     DP_MOD_ID_HTT);
2330 
2331 			if (!vdev)
2332 				goto invalid_vdev;
2333 
2334 			/* Extract received packet count from buffer */
2335 			tag_buf = tlv_buf_temp +
2336 					HTT_VDEV_STATS_GET_INDEX(RX_PKT_CNT);
2337 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2338 			DP_STATS_UPD(vdev, rx_i.reo_rcvd_pkt.num, pkt_count);
2339 
2340 			/* Extract received packet byte count from buffer */
2341 			tag_buf = tlv_buf_temp +
2342 					HTT_VDEV_STATS_GET_INDEX(RX_BYTE_CNT);
2343 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2344 			DP_STATS_UPD(vdev, rx_i.reo_rcvd_pkt.bytes, byte_count);
2345 
2346 			/* Extract tx success packet count from buffer */
2347 			tag_buf = tlv_buf_temp +
2348 				HTT_VDEV_STATS_GET_INDEX(TX_SUCCESS_PKT_CNT);
2349 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2350 			tx_comp.num = pkt_count;
2351 
2352 			/* Extract tx success packet byte count from buffer */
2353 			tag_buf = tlv_buf_temp +
2354 				HTT_VDEV_STATS_GET_INDEX(TX_SUCCESS_BYTE_CNT);
2355 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2356 			tx_comp.bytes = byte_count;
2357 
2358 			/* Extract tx retry packet count from buffer */
2359 			tag_buf = tlv_buf_temp +
2360 				HTT_VDEV_STATS_GET_INDEX(TX_RETRY_PKT_CNT);
2361 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2362 			tx_comp.num += pkt_count;
2363 			tx_failed.num = pkt_count;
2364 
2365 			/* Extract tx retry packet byte count from buffer */
2366 			tag_buf = tlv_buf_temp +
2367 				HTT_VDEV_STATS_GET_INDEX(TX_RETRY_BYTE_CNT);
2368 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2369 			tx_comp.bytes += byte_count;
2370 			tx_failed.bytes = byte_count;
2371 
2372 			/* Extract tx drop packet count from buffer */
2373 			tag_buf = tlv_buf_temp +
2374 				HTT_VDEV_STATS_GET_INDEX(TX_DROP_PKT_CNT);
2375 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2376 			tx_comp.num += pkt_count;
2377 			tx_failed.num += pkt_count;
2378 
2379 			/* Extract tx drop packet byte count from buffer */
2380 			tag_buf = tlv_buf_temp +
2381 				HTT_VDEV_STATS_GET_INDEX(TX_DROP_BYTE_CNT);
2382 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2383 			tx_comp.bytes += byte_count;
2384 			tx_failed.bytes += byte_count;
2385 
2386 			/* Extract tx age-out packet count from buffer */
2387 			tag_buf = tlv_buf_temp +
2388 				HTT_VDEV_STATS_GET_INDEX(TX_AGE_OUT_PKT_CNT);
2389 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2390 			tx_comp.num += pkt_count;
2391 			tx_failed.num += pkt_count;
2392 
2393 			/* Extract tx age-out packet byte count from buffer */
2394 			tag_buf = tlv_buf_temp +
2395 				HTT_VDEV_STATS_GET_INDEX(TX_AGE_OUT_BYTE_CNT);
2396 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2397 			tx_comp.bytes += byte_count;
2398 			tx_failed.bytes += byte_count;
2399 
2400 			/* Extract tqm bypass packet count from buffer */
2401 			tag_buf = tlv_buf_temp +
2402 				HTT_VDEV_STATS_GET_INDEX(TX_TQM_BYPASS_PKT_CNT);
2403 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2404 			tx_comp.num += pkt_count;
2405 
2406 			/* Extract tx bypass packet byte count from buffer */
2407 			tag_buf = tlv_buf_temp +
2408 				HTT_VDEV_STATS_GET_INDEX(TX_TQM_BYPASS_BYTE_CNT);
2409 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2410 			tx_comp.bytes += byte_count;
2411 
2412 			DP_STATS_UPD(vdev, tx.comp_pkt.num, tx_comp.num);
2413 			DP_STATS_UPD(vdev, tx.comp_pkt.bytes, tx_comp.bytes);
2414 
2415 			DP_STATS_UPD(vdev, tx.tx_failed, tx_failed.num);
2416 
2417 			dp_vdev_unref_delete(dpsoc, vdev, DP_MOD_ID_HTT);
2418 			break;
2419 		}
2420 		default:
2421 			qdf_assert(0);
2422 		}
2423 invalid_vdev:
2424 		msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
2425 		payload_size -= tlv_length;
2426 	}
2427 }
2428 #else
2429 static void dp_vdev_txrx_hw_stats_handler(struct htt_soc *soc,
2430 					  uint32_t *msg_word)
2431 {}
2432 #endif
2433 
2434 #ifdef CONFIG_SAWF_DEF_QUEUES
2435 static void dp_sawf_def_queues_update_map_report_conf(struct htt_soc *soc,
2436 						      uint32_t *msg_word,
2437 						      qdf_nbuf_t htt_t2h_msg)
2438 {
2439 	dp_htt_sawf_def_queues_map_report_conf(soc, msg_word, htt_t2h_msg);
2440 }
2441 #else
2442 static void dp_sawf_def_queues_update_map_report_conf(struct htt_soc *soc,
2443 						      uint32_t *msg_word,
2444 						      qdf_nbuf_t htt_t2h_msg)
2445 {}
2446 #endif
2447 
2448 #ifdef CONFIG_SAWF
2449 /*
2450  * dp_sawf_msduq_map() - Msdu queue creation information received
2451  * from target
2452  * @soc: soc handle.
2453  * @msg_word: Pointer to htt msg word.
2454  * @htt_t2h_msg: HTT message nbuf
2455  *
2456  * @return: void
2457  */
2458 static void dp_sawf_msduq_map(struct htt_soc *soc, uint32_t *msg_word,
2459 			      qdf_nbuf_t htt_t2h_msg)
2460 {
2461 	dp_htt_sawf_msduq_map(soc, msg_word, htt_t2h_msg);
2462 }
2463 
2464 /*
2465  * dp_sawf_mpdu_stats_handler() - HTT message handler for MPDU stats
2466  * @soc: soc handle.
2467  * @htt_t2h_msg: HTT message nbuf
2468  *
2469  * @return: void
2470  */
2471 static void dp_sawf_mpdu_stats_handler(struct htt_soc *soc,
2472 				       qdf_nbuf_t htt_t2h_msg)
2473 {
2474 	dp_sawf_htt_mpdu_stats_handler(soc, htt_t2h_msg);
2475 }
2476 #else
2477 static void dp_sawf_msduq_map(struct htt_soc *soc, uint32_t *msg_word,
2478 			      qdf_nbuf_t htt_t2h_msg)
2479 {}
2480 
2481 static void dp_sawf_mpdu_stats_handler(struct htt_soc *soc,
2482 				       qdf_nbuf_t htt_t2h_msg)
2483 {}
2484 #endif
2485 
2486 /*
2487  * time_allow_print() - time allow print
2488  * @htt_ring_tt:	ringi_id array of timestamps
2489  * @ring_id:		ring_id (index)
2490  *
2491  * Return: 1 for successfully saving timestamp in array
2492  *	and 0 for timestamp falling within 2 seconds after last one
2493  */
2494 static bool time_allow_print(unsigned long *htt_ring_tt, u_int8_t ring_id)
2495 {
2496 	unsigned long tstamp;
2497 	unsigned long delta;
2498 
2499 	tstamp = qdf_get_system_timestamp();
2500 
2501 	if (!htt_ring_tt)
2502 		return 0; //unable to print backpressure messages
2503 
2504 	if (htt_ring_tt[ring_id] == -1) {
2505 		htt_ring_tt[ring_id] = tstamp;
2506 		return 1;
2507 	}
2508 	delta = tstamp - htt_ring_tt[ring_id];
2509 	if (delta >= 2000) {
2510 		htt_ring_tt[ring_id] = tstamp;
2511 		return 1;
2512 	}
2513 
2514 	return 0;
2515 }
2516 
2517 static void dp_htt_alert_print(enum htt_t2h_msg_type msg_type,
2518 			       struct dp_pdev *pdev, u_int8_t ring_id,
2519 			       u_int16_t hp_idx, u_int16_t tp_idx,
2520 			       u_int32_t bkp_time, char *ring_stype)
2521 {
2522 	dp_alert("seq_num %u msg_type: %d pdev_id: %d ring_type: %s ",
2523 		 pdev->bkp_stats.seq_num, msg_type, pdev->pdev_id, ring_stype);
2524 	dp_alert("ring_id: %d hp_idx: %d tp_idx: %d bkpressure_time_ms: %d ",
2525 		 ring_id, hp_idx, tp_idx, bkp_time);
2526 }
2527 
2528 /**
2529  * dp_get_srng_ring_state_from_hal(): Get hal level ring stats
2530  * @soc: DP_SOC handle
2531  * @srng: DP_SRNG handle
2532  * @ring_type: srng src/dst ring
2533  *
2534  * Return: void
2535  */
2536 static QDF_STATUS
2537 dp_get_srng_ring_state_from_hal(struct dp_soc *soc,
2538 				struct dp_pdev *pdev,
2539 				struct dp_srng *srng,
2540 				enum hal_ring_type ring_type,
2541 				struct dp_srng_ring_state *state)
2542 {
2543 	struct hal_soc *hal_soc;
2544 
2545 	if (!soc || !srng || !srng->hal_srng || !state)
2546 		return QDF_STATUS_E_INVAL;
2547 
2548 	hal_soc = (struct hal_soc *)soc->hal_soc;
2549 
2550 	hal_get_sw_hptp(soc->hal_soc, srng->hal_srng, &state->sw_tail,
2551 			&state->sw_head);
2552 
2553 	hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &state->hw_head,
2554 			&state->hw_tail, ring_type);
2555 
2556 	state->ring_type = ring_type;
2557 
2558 	return QDF_STATUS_SUCCESS;
2559 }
2560 
2561 #ifdef QCA_MONITOR_PKT_SUPPORT
2562 static void
2563 dp_queue_mon_ring_stats(struct dp_pdev *pdev,
2564 			int lmac_id, uint32_t *num_srng,
2565 			struct dp_soc_srngs_state *soc_srngs_state)
2566 {
2567 	QDF_STATUS status;
2568 
2569 	if (pdev->soc->wlan_cfg_ctx->rxdma1_enable) {
2570 		status = dp_get_srng_ring_state_from_hal
2571 			(pdev->soc, pdev,
2572 			 &pdev->soc->rxdma_mon_buf_ring[lmac_id],
2573 			 RXDMA_MONITOR_BUF,
2574 			 &soc_srngs_state->ring_state[*num_srng]);
2575 
2576 		if (status == QDF_STATUS_SUCCESS)
2577 			qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS);
2578 
2579 		status = dp_get_srng_ring_state_from_hal
2580 			(pdev->soc, pdev,
2581 			 &pdev->soc->rxdma_mon_dst_ring[lmac_id],
2582 			 RXDMA_MONITOR_DST,
2583 			 &soc_srngs_state->ring_state[*num_srng]);
2584 
2585 		if (status == QDF_STATUS_SUCCESS)
2586 			qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS);
2587 
2588 		status = dp_get_srng_ring_state_from_hal
2589 			(pdev->soc, pdev,
2590 			 &pdev->soc->rxdma_mon_desc_ring[lmac_id],
2591 			 RXDMA_MONITOR_DESC,
2592 			 &soc_srngs_state->ring_state[*num_srng]);
2593 
2594 		if (status == QDF_STATUS_SUCCESS)
2595 			qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS);
2596 	}
2597 }
2598 #else
2599 static void
2600 dp_queue_mon_ring_stats(struct dp_pdev *pdev,
2601 			int lmac_id, uint32_t *num_srng,
2602 			struct dp_soc_srngs_state *soc_srngs_state)
2603 {
2604 }
2605 #endif
2606 
2607 #ifndef WLAN_DP_DISABLE_TCL_CMD_CRED_SRNG
2608 static inline QDF_STATUS
2609 dp_get_tcl_cmd_cred_ring_state_from_hal(struct dp_pdev *pdev,
2610 					struct dp_srng_ring_state *ring_state)
2611 {
2612 	return dp_get_srng_ring_state_from_hal(pdev->soc, pdev,
2613 					       &pdev->soc->tcl_cmd_credit_ring,
2614 					       TCL_CMD_CREDIT, ring_state);
2615 }
2616 #else
2617 static inline QDF_STATUS
2618 dp_get_tcl_cmd_cred_ring_state_from_hal(struct dp_pdev *pdev,
2619 					struct dp_srng_ring_state *ring_state)
2620 {
2621 	return QDF_STATUS_SUCCESS;
2622 }
2623 #endif
2624 
2625 #ifndef WLAN_DP_DISABLE_TCL_STATUS_SRNG
2626 static inline QDF_STATUS
2627 dp_get_tcl_status_ring_state_from_hal(struct dp_pdev *pdev,
2628 				      struct dp_srng_ring_state *ring_state)
2629 {
2630 	return dp_get_srng_ring_state_from_hal(pdev->soc, pdev,
2631 					       &pdev->soc->tcl_status_ring,
2632 					       TCL_STATUS, ring_state);
2633 }
2634 #else
2635 static inline QDF_STATUS
2636 dp_get_tcl_status_ring_state_from_hal(struct dp_pdev *pdev,
2637 				      struct dp_srng_ring_state *ring_state)
2638 {
2639 	return QDF_STATUS_SUCCESS;
2640 }
2641 #endif
2642 
2643 /**
2644  * dp_queue_srng_ring_stats(): Print pdev hal level ring stats
2645  * @pdev: DP_pdev handle
2646  *
2647  * Return: void
2648  */
2649 static void dp_queue_ring_stats(struct dp_pdev *pdev)
2650 {
2651 	uint32_t i;
2652 	int mac_id;
2653 	int lmac_id;
2654 	uint32_t j = 0;
2655 	struct dp_soc *soc = pdev->soc;
2656 	struct dp_soc_srngs_state * soc_srngs_state = NULL;
2657 	struct dp_soc_srngs_state *drop_srngs_state = NULL;
2658 	QDF_STATUS status;
2659 
2660 	soc_srngs_state = qdf_mem_malloc(sizeof(struct dp_soc_srngs_state));
2661 	if (!soc_srngs_state) {
2662 		dp_htt_alert("Memory alloc failed for back pressure event");
2663 		return;
2664 	}
2665 
2666 	status = dp_get_srng_ring_state_from_hal
2667 				(pdev->soc, pdev,
2668 				 &pdev->soc->reo_exception_ring,
2669 				 REO_EXCEPTION,
2670 				 &soc_srngs_state->ring_state[j]);
2671 
2672 	if (status == QDF_STATUS_SUCCESS)
2673 		qdf_assert_always(++j < DP_MAX_SRNGS);
2674 
2675 	status = dp_get_srng_ring_state_from_hal
2676 				(pdev->soc, pdev,
2677 				 &pdev->soc->reo_reinject_ring,
2678 				 REO_REINJECT,
2679 				 &soc_srngs_state->ring_state[j]);
2680 
2681 	if (status == QDF_STATUS_SUCCESS)
2682 		qdf_assert_always(++j < DP_MAX_SRNGS);
2683 
2684 	status = dp_get_srng_ring_state_from_hal
2685 				(pdev->soc, pdev,
2686 				 &pdev->soc->reo_cmd_ring,
2687 				 REO_CMD,
2688 				 &soc_srngs_state->ring_state[j]);
2689 
2690 	if (status == QDF_STATUS_SUCCESS)
2691 		qdf_assert_always(++j < DP_MAX_SRNGS);
2692 
2693 	status = dp_get_srng_ring_state_from_hal
2694 				(pdev->soc, pdev,
2695 				 &pdev->soc->reo_status_ring,
2696 				 REO_STATUS,
2697 				 &soc_srngs_state->ring_state[j]);
2698 
2699 	if (status == QDF_STATUS_SUCCESS)
2700 		qdf_assert_always(++j < DP_MAX_SRNGS);
2701 
2702 	status = dp_get_srng_ring_state_from_hal
2703 				(pdev->soc, pdev,
2704 				 &pdev->soc->rx_rel_ring,
2705 				 WBM2SW_RELEASE,
2706 				 &soc_srngs_state->ring_state[j]);
2707 
2708 	if (status == QDF_STATUS_SUCCESS)
2709 		qdf_assert_always(++j < DP_MAX_SRNGS);
2710 
2711 	status = dp_get_tcl_cmd_cred_ring_state_from_hal
2712 				(pdev, &soc_srngs_state->ring_state[j]);
2713 	if (status == QDF_STATUS_SUCCESS)
2714 		qdf_assert_always(++j < DP_MAX_SRNGS);
2715 
2716 	status = dp_get_tcl_status_ring_state_from_hal
2717 				(pdev, &soc_srngs_state->ring_state[j]);
2718 	if (status == QDF_STATUS_SUCCESS)
2719 		qdf_assert_always(++j < DP_MAX_SRNGS);
2720 
2721 	status = dp_get_srng_ring_state_from_hal
2722 				(pdev->soc, pdev,
2723 				 &pdev->soc->wbm_desc_rel_ring,
2724 				 SW2WBM_RELEASE,
2725 				 &soc_srngs_state->ring_state[j]);
2726 
2727 	if (status == QDF_STATUS_SUCCESS)
2728 		qdf_assert_always(++j < DP_MAX_SRNGS);
2729 
2730 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
2731 		status = dp_get_srng_ring_state_from_hal
2732 				(pdev->soc, pdev,
2733 				 &pdev->soc->reo_dest_ring[i],
2734 				 REO_DST,
2735 				 &soc_srngs_state->ring_state[j]);
2736 
2737 		if (status == QDF_STATUS_SUCCESS)
2738 			qdf_assert_always(++j < DP_MAX_SRNGS);
2739 	}
2740 
2741 	for (i = 0; i < pdev->soc->num_tcl_data_rings; i++) {
2742 		status = dp_get_srng_ring_state_from_hal
2743 				(pdev->soc, pdev,
2744 				 &pdev->soc->tcl_data_ring[i],
2745 				 TCL_DATA,
2746 				 &soc_srngs_state->ring_state[j]);
2747 
2748 		if (status == QDF_STATUS_SUCCESS)
2749 			qdf_assert_always(++j < DP_MAX_SRNGS);
2750 	}
2751 
2752 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
2753 		status = dp_get_srng_ring_state_from_hal
2754 				(pdev->soc, pdev,
2755 				 &pdev->soc->tx_comp_ring[i],
2756 				 WBM2SW_RELEASE,
2757 				 &soc_srngs_state->ring_state[j]);
2758 
2759 		if (status == QDF_STATUS_SUCCESS)
2760 			qdf_assert_always(++j < DP_MAX_SRNGS);
2761 	}
2762 
2763 	lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc, 0, pdev->pdev_id);
2764 	status = dp_get_srng_ring_state_from_hal
2765 				(pdev->soc, pdev,
2766 				 &pdev->soc->rx_refill_buf_ring
2767 				 [lmac_id],
2768 				 RXDMA_BUF,
2769 				 &soc_srngs_state->ring_state[j]);
2770 
2771 	if (status == QDF_STATUS_SUCCESS)
2772 		qdf_assert_always(++j < DP_MAX_SRNGS);
2773 
2774 	status = dp_get_srng_ring_state_from_hal
2775 				(pdev->soc, pdev,
2776 				 &pdev->rx_refill_buf_ring2,
2777 				 RXDMA_BUF,
2778 				 &soc_srngs_state->ring_state[j]);
2779 
2780 	if (status == QDF_STATUS_SUCCESS)
2781 		qdf_assert_always(++j < DP_MAX_SRNGS);
2782 
2783 
2784 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2785 		dp_get_srng_ring_state_from_hal
2786 				(pdev->soc, pdev,
2787 				 &pdev->rx_mac_buf_ring[i],
2788 				 RXDMA_BUF,
2789 				 &soc_srngs_state->ring_state[j]);
2790 
2791 		if (status == QDF_STATUS_SUCCESS)
2792 			qdf_assert_always(++j < DP_MAX_SRNGS);
2793 	}
2794 
2795 	for (mac_id = 0;
2796 	     mac_id  < soc->wlan_cfg_ctx->num_rxdma_status_rings_per_pdev;
2797 	     mac_id++) {
2798 		lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc,
2799 						     mac_id, pdev->pdev_id);
2800 
2801 		dp_queue_mon_ring_stats(pdev, lmac_id, &j,
2802 					soc_srngs_state);
2803 
2804 		status = dp_get_srng_ring_state_from_hal
2805 			(pdev->soc, pdev,
2806 			 &pdev->soc->rxdma_mon_status_ring[lmac_id],
2807 			 RXDMA_MONITOR_STATUS,
2808 			 &soc_srngs_state->ring_state[j]);
2809 
2810 		if (status == QDF_STATUS_SUCCESS)
2811 			qdf_assert_always(++j < DP_MAX_SRNGS);
2812 	}
2813 
2814 	for (i = 0; i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
2815 		lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc,
2816 						     i, pdev->pdev_id);
2817 
2818 		status = dp_get_srng_ring_state_from_hal
2819 				(pdev->soc, pdev,
2820 				 &pdev->soc->rxdma_err_dst_ring
2821 				 [lmac_id],
2822 				 RXDMA_DST,
2823 				 &soc_srngs_state->ring_state[j]);
2824 
2825 		if (status == QDF_STATUS_SUCCESS)
2826 			qdf_assert_always(++j < DP_MAX_SRNGS);
2827 	}
2828 	soc_srngs_state->max_ring_id = j;
2829 
2830 	qdf_spin_lock_bh(&pdev->bkp_stats.list_lock);
2831 
2832 	soc_srngs_state->seq_num = pdev->bkp_stats.seq_num;
2833 
2834 	if (pdev->bkp_stats.queue_depth >= HTT_BKP_STATS_MAX_QUEUE_DEPTH) {
2835 		drop_srngs_state = TAILQ_FIRST(&pdev->bkp_stats.list);
2836 		qdf_assert_always(drop_srngs_state);
2837 		TAILQ_REMOVE(&pdev->bkp_stats.list, drop_srngs_state,
2838 			     list_elem);
2839 		qdf_mem_free(drop_srngs_state);
2840 		pdev->bkp_stats.queue_depth--;
2841 	}
2842 
2843 	pdev->bkp_stats.queue_depth++;
2844 	TAILQ_INSERT_TAIL(&pdev->bkp_stats.list, soc_srngs_state,
2845 			  list_elem);
2846 	pdev->bkp_stats.seq_num++;
2847 	qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock);
2848 
2849 	qdf_queue_work(0, pdev->bkp_stats.work_queue,
2850 		       &pdev->bkp_stats.work);
2851 }
2852 
2853 /*
2854  * dp_htt_bkp_event_alert() - htt backpressure event alert
2855  * @msg_word:	htt packet context
2856  * @htt_soc:	HTT SOC handle
2857  *
2858  * Return: after attempting to print stats
2859  */
2860 static void dp_htt_bkp_event_alert(u_int32_t *msg_word, struct htt_soc *soc)
2861 {
2862 	u_int8_t ring_type;
2863 	u_int8_t pdev_id;
2864 	uint8_t target_pdev_id;
2865 	u_int8_t ring_id;
2866 	u_int16_t hp_idx;
2867 	u_int16_t tp_idx;
2868 	u_int32_t bkp_time;
2869 	enum htt_t2h_msg_type msg_type;
2870 	struct dp_soc *dpsoc;
2871 	struct dp_pdev *pdev;
2872 	struct dp_htt_timestamp *radio_tt;
2873 
2874 	if (!soc)
2875 		return;
2876 
2877 	dpsoc = (struct dp_soc *)soc->dp_soc;
2878 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
2879 	ring_type = HTT_T2H_RX_BKPRESSURE_RING_TYPE_GET(*msg_word);
2880 	target_pdev_id = HTT_T2H_RX_BKPRESSURE_PDEV_ID_GET(*msg_word);
2881 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
2882 							 target_pdev_id);
2883 	if (pdev_id >= MAX_PDEV_CNT) {
2884 		dp_htt_debug("%pK: pdev id %d is invalid", soc, pdev_id);
2885 		return;
2886 	}
2887 
2888 	pdev = (struct dp_pdev *)dpsoc->pdev_list[pdev_id];
2889 	ring_id = HTT_T2H_RX_BKPRESSURE_RINGID_GET(*msg_word);
2890 	hp_idx = HTT_T2H_RX_BKPRESSURE_HEAD_IDX_GET(*(msg_word + 1));
2891 	tp_idx = HTT_T2H_RX_BKPRESSURE_TAIL_IDX_GET(*(msg_word + 1));
2892 	bkp_time = HTT_T2H_RX_BKPRESSURE_TIME_MS_GET(*(msg_word + 2));
2893 	radio_tt = &soc->pdevid_tt[pdev_id];
2894 
2895 	switch (ring_type) {
2896 	case HTT_SW_RING_TYPE_UMAC:
2897 		if (!time_allow_print(radio_tt->umac_ttt, ring_id))
2898 			return;
2899 		dp_htt_alert_print(msg_type, pdev, ring_id, hp_idx, tp_idx,
2900 				   bkp_time, "HTT_SW_RING_TYPE_UMAC");
2901 	break;
2902 	case HTT_SW_RING_TYPE_LMAC:
2903 		if (!time_allow_print(radio_tt->lmac_ttt, ring_id))
2904 			return;
2905 		dp_htt_alert_print(msg_type, pdev, ring_id, hp_idx, tp_idx,
2906 				   bkp_time, "HTT_SW_RING_TYPE_LMAC");
2907 	break;
2908 	default:
2909 		dp_htt_alert_print(msg_type, pdev, ring_id, hp_idx, tp_idx,
2910 				   bkp_time, "UNKNOWN");
2911 	break;
2912 	}
2913 
2914 	dp_queue_ring_stats(pdev);
2915 }
2916 
2917 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
2918 /*
2919  * dp_offload_ind_handler() - offload msg handler
2920  * @htt_soc: HTT SOC handle
2921  * @msg_word: Pointer to payload
2922  *
2923  * Return: None
2924  */
2925 static void
2926 dp_offload_ind_handler(struct htt_soc *soc, uint32_t *msg_word)
2927 {
2928 	u_int8_t pdev_id;
2929 	u_int8_t target_pdev_id;
2930 
2931 	target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
2932 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
2933 							 target_pdev_id);
2934 	dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_OFFLOAD_TX_DATA, soc->dp_soc,
2935 			     msg_word, HTT_INVALID_VDEV, WDI_NO_VAL,
2936 			     pdev_id);
2937 }
2938 #else
2939 static void
2940 dp_offload_ind_handler(struct htt_soc *soc, uint32_t *msg_word)
2941 {
2942 }
2943 #endif
2944 
2945 #ifdef WLAN_FEATURE_11BE_MLO
2946 #ifdef WLAN_MLO_MULTI_CHIP
2947 static inline void dp_update_mlo_ts_offset(struct dp_soc *soc,
2948 					   uint32_t ts_lo, uint32_t ts_hi)
2949 {
2950 	uint64_t mlo_offset;
2951 
2952 	mlo_offset = ((uint64_t)(ts_hi) << 32 | ts_lo);
2953 	soc->cdp_soc.ops->mlo_ops->mlo_update_mlo_ts_offset
2954 		((struct cdp_soc_t *)soc, mlo_offset);
2955 }
2956 #else
2957 static inline void dp_update_mlo_ts_offset(struct dp_soc *soc,
2958 					   uint32_t ts_lo, uint32_t ts_hi)
2959 {}
2960 #endif
2961 static void dp_htt_mlo_peer_map_handler(struct htt_soc *soc,
2962 					uint32_t *msg_word)
2963 {
2964 	uint8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
2965 	uint8_t *mlo_peer_mac_addr;
2966 	uint16_t mlo_peer_id;
2967 	uint8_t num_links;
2968 	struct dp_mlo_flow_override_info mlo_flow_info[DP_MLO_FLOW_INFO_MAX];
2969 	struct dp_mlo_link_info mlo_link_info[DP_MAX_MLO_LINKS];
2970 	MLO_PEER_MAP_TLV_TAG_ID tlv_type = 0xff;
2971 	uint16_t tlv_len = 0;
2972 	int i = 0;
2973 
2974 	mlo_peer_id = HTT_RX_MLO_PEER_MAP_MLO_PEER_ID_GET(*msg_word);
2975 	num_links =
2976 		HTT_RX_MLO_PEER_MAP_NUM_LOGICAL_LINKS_GET(*msg_word);
2977 	mlo_peer_mac_addr =
2978 	htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
2979 				   &mac_addr_deswizzle_buf[0]);
2980 
2981 	mlo_flow_info[0].ast_idx =
2982 		HTT_RX_MLO_PEER_MAP_PRIMARY_AST_INDEX_GET(*(msg_word + 3));
2983 	mlo_flow_info[0].ast_idx_valid =
2984 		HTT_RX_MLO_PEER_MAP_AST_INDEX_VALID_FLAG_GET(*(msg_word + 3));
2985 	mlo_flow_info[0].chip_id =
2986 		HTT_RX_MLO_PEER_MAP_CHIP_ID_AST_INDEX_GET(*(msg_word + 3));
2987 	mlo_flow_info[0].tidmask =
2988 		HTT_RX_MLO_PEER_MAP_TIDMASK_AST_INDEX_GET(*(msg_word + 3));
2989 	mlo_flow_info[0].cache_set_num =
2990 	HTT_RX_MLO_PEER_MAP_CACHE_SET_NUM_AST_INDEX_GET(*(msg_word + 3));
2991 
2992 	mlo_flow_info[1].ast_idx =
2993 		HTT_RX_MLO_PEER_MAP_PRIMARY_AST_INDEX_GET(*(msg_word + 3));
2994 	mlo_flow_info[1].ast_idx_valid =
2995 		HTT_RX_MLO_PEER_MAP_AST_INDEX_VALID_FLAG_GET(*(msg_word + 3));
2996 	mlo_flow_info[1].chip_id =
2997 		HTT_RX_MLO_PEER_MAP_CHIP_ID_AST_INDEX_GET(*(msg_word + 3));
2998 	mlo_flow_info[1].tidmask =
2999 		HTT_RX_MLO_PEER_MAP_TIDMASK_AST_INDEX_GET(*(msg_word + 3));
3000 	mlo_flow_info[1].cache_set_num =
3001 	HTT_RX_MLO_PEER_MAP_CACHE_SET_NUM_AST_INDEX_GET(*(msg_word + 3));
3002 
3003 	mlo_flow_info[2].ast_idx =
3004 		HTT_RX_MLO_PEER_MAP_PRIMARY_AST_INDEX_GET(*(msg_word + 3));
3005 	mlo_flow_info[2].ast_idx_valid =
3006 		HTT_RX_MLO_PEER_MAP_AST_INDEX_VALID_FLAG_GET(*(msg_word + 3));
3007 	mlo_flow_info[2].chip_id =
3008 		HTT_RX_MLO_PEER_MAP_CHIP_ID_AST_INDEX_GET(*(msg_word + 3));
3009 	mlo_flow_info[2].tidmask =
3010 		HTT_RX_MLO_PEER_MAP_TIDMASK_AST_INDEX_GET(*(msg_word + 3));
3011 	mlo_flow_info[2].cache_set_num =
3012 	HTT_RX_MLO_PEER_MAP_CACHE_SET_NUM_AST_INDEX_GET(*(msg_word + 3));
3013 
3014 	msg_word = msg_word + 8;
3015 	while (msg_word && (i < DP_MAX_MLO_LINKS)) {
3016 		mlo_link_info[i].peer_chip_id = 0xFF;
3017 		mlo_link_info[i].vdev_id = 0xFF;
3018 
3019 		tlv_type = HTT_RX_MLO_PEER_MAP_TLV_TAG_GET(*msg_word);
3020 		tlv_len = HTT_RX_MLO_PEER_MAP_TLV_LENGTH_GET(*msg_word);
3021 
3022 		if (tlv_len == 0) {
3023 			dp_err("TLV Length is 0");
3024 			break;
3025 		}
3026 
3027 		if (tlv_type == MLO_PEER_MAP_TLV_STRUCT_SOC_VDEV_PEER_IDS) {
3028 			mlo_link_info[i].peer_chip_id =
3029 				HTT_RX_MLO_PEER_MAP_CHIP_ID_GET(
3030 							*(msg_word + 1));
3031 			mlo_link_info[i].vdev_id =
3032 				HTT_RX_MLO_PEER_MAP_VDEV_ID_GET(
3033 							*(msg_word + 1));
3034 		}
3035 		/* Add header size to tlv length */
3036 		tlv_len = tlv_len + HTT_TLV_HDR_LEN;
3037 		msg_word = (uint32_t *)(((uint8_t *)msg_word) + tlv_len);
3038 		i++;
3039 	}
3040 
3041 	dp_rx_mlo_peer_map_handler(soc->dp_soc, mlo_peer_id,
3042 				   mlo_peer_mac_addr,
3043 				   mlo_flow_info, mlo_link_info);
3044 }
3045 
3046 static void dp_htt_mlo_peer_unmap_handler(struct htt_soc *soc,
3047 					  uint32_t *msg_word)
3048 {
3049 	uint16_t mlo_peer_id;
3050 
3051 	mlo_peer_id = HTT_RX_MLO_PEER_UNMAP_MLO_PEER_ID_GET(*msg_word);
3052 	dp_rx_mlo_peer_unmap_handler(soc->dp_soc, mlo_peer_id);
3053 }
3054 
3055 static void
3056 dp_rx_mlo_timestamp_ind_handler(struct dp_soc *soc,
3057 				uint32_t *msg_word)
3058 {
3059 	uint8_t pdev_id;
3060 	uint8_t target_pdev_id;
3061 	struct dp_pdev *pdev;
3062 
3063 	if (!soc)
3064 		return;
3065 
3066 	target_pdev_id = HTT_T2H_MLO_TIMESTAMP_OFFSET_PDEV_ID_GET(*msg_word);
3067 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc,
3068 							 target_pdev_id);
3069 
3070 	if (pdev_id >= MAX_PDEV_CNT) {
3071 		dp_htt_debug("%pK: pdev id %d is invalid", soc, pdev_id);
3072 		return;
3073 	}
3074 
3075 	pdev = (struct dp_pdev *)soc->pdev_list[pdev_id];
3076 
3077 	if (!pdev) {
3078 		dp_err("Invalid pdev");
3079 		return;
3080 	}
3081 	dp_wdi_event_handler(WDI_EVENT_MLO_TSTMP, soc,
3082 			     msg_word, HTT_INVALID_PEER, WDI_NO_VAL,
3083 			     pdev_id);
3084 
3085 	qdf_spin_lock_bh(&soc->htt_stats.lock);
3086 	pdev->timestamp.msg_type =
3087 		HTT_T2H_MLO_TIMESTAMP_OFFSET_MSG_TYPE_GET(*msg_word);
3088 	pdev->timestamp.pdev_id = pdev_id;
3089 	pdev->timestamp.chip_id =
3090 		HTT_T2H_MLO_TIMESTAMP_OFFSET_CHIP_ID_GET(*msg_word);
3091 	pdev->timestamp.mac_clk_freq =
3092 		HTT_T2H_MLO_TIMESTAMP_OFFSET_MAC_CLK_FREQ_MHZ_GET(*msg_word);
3093 	pdev->timestamp.sync_tstmp_lo_us = *(msg_word + 1);
3094 	pdev->timestamp.sync_tstmp_hi_us = *(msg_word + 2);
3095 	pdev->timestamp.mlo_offset_lo_us = *(msg_word + 3);
3096 	pdev->timestamp.mlo_offset_hi_us = *(msg_word + 4);
3097 	pdev->timestamp.mlo_offset_clks  = *(msg_word + 5);
3098 	pdev->timestamp.mlo_comp_us =
3099 	HTT_T2H_MLO_TIMESTAMP_OFFSET_MLO_TIMESTAMP_COMP_US_GET(
3100 							*(msg_word + 6));
3101 	pdev->timestamp.mlo_comp_clks =
3102 	HTT_T2H_MLO_TIMESTAMP_OFFSET_MLO_TIMESTAMP_COMP_CLKS_GET(
3103 							*(msg_word + 6));
3104 	pdev->timestamp.mlo_comp_timer =
3105 	HTT_T2H_MLO_TIMESTAMP_OFFSET_MLO_TIMESTAMP_COMP_PERIOD_US_GET(
3106 							*(msg_word + 7));
3107 
3108 	dp_htt_debug("tsf_lo=%d tsf_hi=%d, mlo_ofst_lo=%d, mlo_ofst_hi=%d\n",
3109 		     pdev->timestamp.sync_tstmp_lo_us,
3110 		     pdev->timestamp.sync_tstmp_hi_us,
3111 		     pdev->timestamp.mlo_offset_lo_us,
3112 		     pdev->timestamp.mlo_offset_hi_us);
3113 
3114 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
3115 
3116 	dp_update_mlo_ts_offset(soc,
3117 				pdev->timestamp.mlo_offset_lo_us,
3118 				pdev->timestamp.mlo_offset_hi_us);
3119 }
3120 #else
3121 static void dp_htt_mlo_peer_map_handler(struct htt_soc *soc,
3122 					uint32_t *msg_word)
3123 {
3124 	qdf_assert_always(0);
3125 }
3126 
3127 static void dp_htt_mlo_peer_unmap_handler(struct htt_soc *soc,
3128 					 uint32_t *msg_word)
3129 {
3130 	qdf_assert_always(0);
3131 }
3132 
3133 static void
3134 dp_rx_mlo_timestamp_ind_handler(void *soc_handle,
3135 				uint32_t *msg_word)
3136 {
3137 	qdf_assert_always(0);
3138 }
3139 #endif
3140 
3141 /*
3142  * dp_htt_rx_addba_handler() - RX Addba HTT msg handler
3143  * @soc: DP Soc handler
3144  * @peer_id: ID of peer
3145  * @tid: TID number
3146  * @win_sz: BA window size
3147  *
3148  * Return: None
3149  */
3150 static void
3151 dp_htt_rx_addba_handler(struct dp_soc *soc, uint16_t peer_id,
3152 			uint8_t tid, uint16_t win_sz)
3153 {
3154 	uint16_t status;
3155 	struct dp_peer *peer;
3156 
3157 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
3158 
3159 	if (!peer) {
3160 		dp_err("Peer not found peer id %d", peer_id);
3161 		return;
3162 	}
3163 
3164 	status = dp_addba_requestprocess_wifi3((struct cdp_soc_t *)soc,
3165 					       peer->mac_addr.raw,
3166 					       peer->vdev->vdev_id, 0,
3167 					       tid, 0, win_sz, 0xffff);
3168 
3169 	dp_addba_resp_tx_completion_wifi3(
3170 		(struct cdp_soc_t *)soc,
3171 		peer->mac_addr.raw, peer->vdev->vdev_id,
3172 		tid,
3173 		status);
3174 
3175 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3176 
3177 	dp_info("PeerID %d BAW %d TID %d stat %d",
3178 		peer_id, win_sz, tid, status);
3179 }
3180 
3181 /*
3182  * dp_htt_ppdu_id_fmt_handler() - PPDU ID Format handler
3183  * @htt_soc: HTT SOC handle
3184  * @msg_word: Pointer to payload
3185  *
3186  * Return: None
3187  */
3188 static void
3189 dp_htt_ppdu_id_fmt_handler(struct dp_soc *soc, uint32_t *msg_word)
3190 {
3191 	uint8_t msg_type, valid, bits, offset;
3192 
3193 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
3194 
3195 	msg_word += HTT_PPDU_ID_FMT_IND_LINK_ID_OFFSET;
3196 	valid = HTT_PPDU_ID_FMT_IND_VALID_GET_BITS31_16(*msg_word);
3197 	bits = HTT_PPDU_ID_FMT_IND_BITS_GET_BITS31_16(*msg_word);
3198 	offset = HTT_PPDU_ID_FMT_IND_OFFSET_GET_BITS31_16(*msg_word);
3199 
3200 	dp_info("link_id: valid %u bits %u offset %u", valid, bits, offset);
3201 
3202 	if (valid) {
3203 		soc->link_id_offset = offset;
3204 		soc->link_id_bits = bits;
3205 	}
3206 }
3207 
3208 /*
3209  * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler
3210  * @context:	Opaque context (HTT SOC handle)
3211  * @pkt:	HTC packet
3212  */
3213 static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
3214 {
3215 	struct htt_soc *soc = (struct htt_soc *) context;
3216 	qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
3217 	u_int32_t *msg_word;
3218 	enum htt_t2h_msg_type msg_type;
3219 	bool free_buf = true;
3220 
3221 	/* check for successful message reception */
3222 	if (pkt->Status != QDF_STATUS_SUCCESS) {
3223 		if (pkt->Status != QDF_STATUS_E_CANCELED)
3224 			soc->stats.htc_err_cnt++;
3225 
3226 		qdf_nbuf_free(htt_t2h_msg);
3227 		return;
3228 	}
3229 
3230 	/* TODO: Check if we should pop the HTC/HTT header alignment padding */
3231 
3232 	msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
3233 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
3234 	htt_event_record(soc->htt_logger_handle,
3235 			 msg_type, (uint8_t *)msg_word);
3236 	switch (msg_type) {
3237 	case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
3238 	{
3239 		dp_htt_bkp_event_alert(msg_word, soc);
3240 		break;
3241 	}
3242 	case HTT_T2H_MSG_TYPE_PEER_MAP:
3243 		{
3244 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3245 			u_int8_t *peer_mac_addr;
3246 			u_int16_t peer_id;
3247 			u_int16_t hw_peer_id;
3248 			u_int8_t vdev_id;
3249 			u_int8_t is_wds;
3250 			struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
3251 
3252 			peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
3253 			hw_peer_id =
3254 				HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2));
3255 			vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
3256 			peer_mac_addr = htt_t2h_mac_addr_deswizzle(
3257 				(u_int8_t *) (msg_word+1),
3258 				&mac_addr_deswizzle_buf[0]);
3259 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3260 				QDF_TRACE_LEVEL_DEBUG,
3261 				"HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3262 				peer_id, vdev_id);
3263 
3264 			/*
3265 			 * check if peer already exists for this peer_id, if so
3266 			 * this peer map event is in response for a wds peer add
3267 			 * wmi command sent during wds source port learning.
3268 			 * in this case just add the ast entry to the existing
3269 			 * peer ast_list.
3270 			 */
3271 			is_wds = !!(dpsoc->peer_id_to_obj_map[peer_id]);
3272 			dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id,
3273 					       vdev_id, peer_mac_addr, 0,
3274 					       is_wds);
3275 			break;
3276 		}
3277 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
3278 		{
3279 			u_int16_t peer_id;
3280 			u_int8_t vdev_id;
3281 			u_int8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0};
3282 			peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
3283 			vdev_id = HTT_RX_PEER_UNMAP_VDEV_ID_GET(*msg_word);
3284 
3285 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
3286 						 vdev_id, mac_addr, 0,
3287 						 DP_PEER_WDS_COUNT_INVALID);
3288 			break;
3289 		}
3290 	case HTT_T2H_MSG_TYPE_SEC_IND:
3291 		{
3292 			u_int16_t peer_id;
3293 			enum cdp_sec_type sec_type;
3294 			int is_unicast;
3295 
3296 			peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
3297 			sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
3298 			is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
3299 			/* point to the first part of the Michael key */
3300 			msg_word++;
3301 			dp_rx_sec_ind_handler(
3302 				soc->dp_soc, peer_id, sec_type, is_unicast,
3303 				msg_word, msg_word + 2);
3304 			break;
3305 		}
3306 
3307 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
3308 		{
3309 			free_buf =
3310 				dp_monitor_ppdu_stats_ind_handler(soc,
3311 								  msg_word,
3312 								  htt_t2h_msg);
3313 			break;
3314 		}
3315 
3316 	case HTT_T2H_MSG_TYPE_PKTLOG:
3317 		{
3318 			dp_pktlog_msg_handler(soc, msg_word);
3319 			break;
3320 		}
3321 
3322 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
3323 		{
3324 			/*
3325 			 * HTC maintains runtime pm count for H2T messages that
3326 			 * have a response msg from FW. This count ensures that
3327 			 * in the case FW does not sent out the response or host
3328 			 * did not process this indication runtime_put happens
3329 			 * properly in the cleanup path.
3330 			 */
3331 			if (htc_dec_return_runtime_cnt(soc->htc_soc) >= 0)
3332 				htc_pm_runtime_put(soc->htc_soc);
3333 			else
3334 				soc->stats.htt_ver_req_put_skip++;
3335 			soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
3336 			soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
3337 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
3338 				"target uses HTT version %d.%d; host uses %d.%d",
3339 				soc->tgt_ver.major, soc->tgt_ver.minor,
3340 				HTT_CURRENT_VERSION_MAJOR,
3341 				HTT_CURRENT_VERSION_MINOR);
3342 			if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
3343 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3344 					QDF_TRACE_LEVEL_WARN,
3345 					"*** Incompatible host/target HTT versions!");
3346 			}
3347 			/* abort if the target is incompatible with the host */
3348 			qdf_assert(soc->tgt_ver.major ==
3349 				HTT_CURRENT_VERSION_MAJOR);
3350 			if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
3351 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3352 					QDF_TRACE_LEVEL_INFO_LOW,
3353 					"*** Warning: host/target HTT versions"
3354 					" are different, though compatible!");
3355 			}
3356 			break;
3357 		}
3358 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
3359 		{
3360 			uint16_t peer_id;
3361 			uint8_t tid;
3362 			uint16_t win_sz;
3363 
3364 			/*
3365 			 * Update REO Queue Desc with new values
3366 			 */
3367 			peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
3368 			tid = HTT_RX_ADDBA_TID_GET(*msg_word);
3369 			win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
3370 
3371 			/*
3372 			 * Window size needs to be incremented by 1
3373 			 * since fw needs to represent a value of 256
3374 			 * using just 8 bits
3375 			 */
3376 			dp_htt_rx_addba_handler(soc->dp_soc, peer_id,
3377 						tid, win_sz + 1);
3378 			break;
3379 		}
3380 	case HTT_T2H_MSG_TYPE_RX_ADDBA_EXTN:
3381 		{
3382 			uint16_t peer_id;
3383 			uint8_t tid;
3384 			uint16_t win_sz;
3385 
3386 			peer_id = HTT_RX_ADDBA_EXTN_PEER_ID_GET(*msg_word);
3387 			tid = HTT_RX_ADDBA_EXTN_TID_GET(*msg_word);
3388 
3389 			msg_word++;
3390 			win_sz = HTT_RX_ADDBA_EXTN_WIN_SIZE_GET(*msg_word);
3391 
3392 			dp_htt_rx_addba_handler(soc->dp_soc, peer_id,
3393 						tid, win_sz);
3394 			break;
3395 		}
3396 	case HTT_T2H_PPDU_ID_FMT_IND:
3397 		{
3398 			dp_htt_ppdu_id_fmt_handler(soc->dp_soc, msg_word);
3399 			break;
3400 		}
3401 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
3402 		{
3403 			dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg);
3404 			break;
3405 		}
3406 	case HTT_T2H_MSG_TYPE_PEER_MAP_V2:
3407 		{
3408 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3409 			u_int8_t *peer_mac_addr;
3410 			u_int16_t peer_id;
3411 			u_int16_t hw_peer_id;
3412 			u_int8_t vdev_id;
3413 			bool is_wds;
3414 			u_int16_t ast_hash;
3415 			struct dp_ast_flow_override_info ast_flow_info;
3416 
3417 			qdf_mem_set(&ast_flow_info, 0,
3418 					    sizeof(struct dp_ast_flow_override_info));
3419 
3420 			peer_id = HTT_RX_PEER_MAP_V2_SW_PEER_ID_GET(*msg_word);
3421 			hw_peer_id =
3422 			HTT_RX_PEER_MAP_V2_HW_PEER_ID_GET(*(msg_word + 2));
3423 			vdev_id = HTT_RX_PEER_MAP_V2_VDEV_ID_GET(*msg_word);
3424 			peer_mac_addr =
3425 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3426 						   &mac_addr_deswizzle_buf[0]);
3427 			is_wds =
3428 			HTT_RX_PEER_MAP_V2_NEXT_HOP_GET(*(msg_word + 3));
3429 			ast_hash =
3430 			HTT_RX_PEER_MAP_V2_AST_HASH_VALUE_GET(*(msg_word + 3));
3431 			/*
3432 			 * Update 4 ast_index per peer, ast valid mask
3433 			 * and TID flow valid mask.
3434 			 * AST valid mask is 3 bit field corresponds to
3435 			 * ast_index[3:1]. ast_index 0 is always valid.
3436 			 */
3437 			ast_flow_info.ast_valid_mask =
3438 			HTT_RX_PEER_MAP_V2_AST_VALID_MASK_GET(*(msg_word + 3));
3439 			ast_flow_info.ast_idx[0] = hw_peer_id;
3440 			ast_flow_info.ast_flow_mask[0] =
3441 			HTT_RX_PEER_MAP_V2_AST_0_FLOW_MASK_GET(*(msg_word + 4));
3442 			ast_flow_info.ast_idx[1] =
3443 			HTT_RX_PEER_MAP_V2_AST_INDEX_1_GET(*(msg_word + 4));
3444 			ast_flow_info.ast_flow_mask[1] =
3445 			HTT_RX_PEER_MAP_V2_AST_1_FLOW_MASK_GET(*(msg_word + 4));
3446 			ast_flow_info.ast_idx[2] =
3447 			HTT_RX_PEER_MAP_V2_AST_INDEX_2_GET(*(msg_word + 5));
3448 			ast_flow_info.ast_flow_mask[2] =
3449 			HTT_RX_PEER_MAP_V2_AST_2_FLOW_MASK_GET(*(msg_word + 4));
3450 			ast_flow_info.ast_idx[3] =
3451 			HTT_RX_PEER_MAP_V2_AST_INDEX_3_GET(*(msg_word + 6));
3452 			ast_flow_info.ast_flow_mask[3] =
3453 			HTT_RX_PEER_MAP_V2_AST_3_FLOW_MASK_GET(*(msg_word + 4));
3454 			/*
3455 			 * TID valid mask is applicable only
3456 			 * for HI and LOW priority flows.
3457 			 * tid_valid_mas is 8 bit field corresponds
3458 			 * to TID[7:0]
3459 			 */
3460 			ast_flow_info.tid_valid_low_pri_mask =
3461 			HTT_RX_PEER_MAP_V2_TID_VALID_LOW_PRI_GET(*(msg_word + 5));
3462 			ast_flow_info.tid_valid_hi_pri_mask =
3463 			HTT_RX_PEER_MAP_V2_TID_VALID_HI_PRI_GET(*(msg_word + 5));
3464 
3465 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3466 				  QDF_TRACE_LEVEL_DEBUG,
3467 				  "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3468 				  peer_id, vdev_id);
3469 
3470 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3471 				  QDF_TRACE_LEVEL_INFO,
3472 				  "ast_idx[0] %d ast_idx[1] %d ast_idx[2] %d ast_idx[3] %d n",
3473 				  ast_flow_info.ast_idx[0],
3474 				  ast_flow_info.ast_idx[1],
3475 				  ast_flow_info.ast_idx[2],
3476 				  ast_flow_info.ast_idx[3]);
3477 
3478 			dp_rx_peer_map_handler(soc->dp_soc, peer_id,
3479 					       hw_peer_id, vdev_id,
3480 					       peer_mac_addr, ast_hash,
3481 					       is_wds);
3482 
3483 			/*
3484 			 * Update ast indexes for flow override support
3485 			 * Applicable only for non wds peers
3486 			 */
3487 			if (!soc->dp_soc->ast_offload_support)
3488 				dp_peer_ast_index_flow_queue_map_create(
3489 						soc->dp_soc, is_wds,
3490 						peer_id, peer_mac_addr,
3491 						&ast_flow_info);
3492 
3493 			break;
3494 		}
3495 	case HTT_T2H_MSG_TYPE_PEER_UNMAP_V2:
3496 		{
3497 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3498 			u_int8_t *mac_addr;
3499 			u_int16_t peer_id;
3500 			u_int8_t vdev_id;
3501 			u_int8_t is_wds;
3502 			u_int32_t free_wds_count;
3503 
3504 			peer_id =
3505 			HTT_RX_PEER_UNMAP_V2_SW_PEER_ID_GET(*msg_word);
3506 			vdev_id = HTT_RX_PEER_UNMAP_V2_VDEV_ID_GET(*msg_word);
3507 			mac_addr =
3508 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3509 						   &mac_addr_deswizzle_buf[0]);
3510 			is_wds =
3511 			HTT_RX_PEER_UNMAP_V2_NEXT_HOP_GET(*(msg_word + 2));
3512 			free_wds_count =
3513 			HTT_RX_PEER_UNMAP_V2_PEER_WDS_FREE_COUNT_GET(*(msg_word + 4));
3514 
3515 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3516 				  QDF_TRACE_LEVEL_INFO,
3517 				  "HTT_T2H_MSG_TYPE_PEER_UNMAP msg for peer id %d vdev id %d n",
3518 				  peer_id, vdev_id);
3519 
3520 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
3521 						 vdev_id, mac_addr,
3522 						 is_wds, free_wds_count);
3523 			break;
3524 		}
3525 	case HTT_T2H_MSG_TYPE_RX_DELBA:
3526 		{
3527 			uint16_t peer_id;
3528 			uint8_t tid;
3529 			uint8_t win_sz;
3530 			QDF_STATUS status;
3531 
3532 			peer_id = HTT_RX_DELBA_PEER_ID_GET(*msg_word);
3533 			tid = HTT_RX_DELBA_TID_GET(*msg_word);
3534 			win_sz = HTT_RX_DELBA_WIN_SIZE_GET(*msg_word);
3535 
3536 			status = dp_rx_delba_ind_handler(
3537 				soc->dp_soc,
3538 				peer_id, tid, win_sz);
3539 
3540 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3541 				  QDF_TRACE_LEVEL_INFO,
3542 				  FL("DELBA PeerID %d BAW %d TID %d stat %d"),
3543 				  peer_id, win_sz, tid, status);
3544 			break;
3545 		}
3546 	case HTT_T2H_MSG_TYPE_RX_DELBA_EXTN:
3547 		{
3548 			uint16_t peer_id;
3549 			uint8_t tid;
3550 			uint16_t win_sz;
3551 			QDF_STATUS status;
3552 
3553 			peer_id = HTT_RX_DELBA_EXTN_PEER_ID_GET(*msg_word);
3554 			tid = HTT_RX_DELBA_EXTN_TID_GET(*msg_word);
3555 
3556 			msg_word++;
3557 			win_sz = HTT_RX_DELBA_EXTN_WIN_SIZE_GET(*msg_word);
3558 
3559 			status = dp_rx_delba_ind_handler(soc->dp_soc,
3560 							 peer_id, tid,
3561 							 win_sz);
3562 
3563 			dp_info("DELBA PeerID %d BAW %d TID %d stat %d",
3564 				peer_id, win_sz, tid, status);
3565 			break;
3566 		}
3567 	case HTT_T2H_MSG_TYPE_FSE_CMEM_BASE_SEND:
3568 		{
3569 			uint16_t num_entries;
3570 			uint32_t cmem_ba_lo;
3571 			uint32_t cmem_ba_hi;
3572 
3573 			num_entries = HTT_CMEM_BASE_SEND_NUM_ENTRIES_GET(*msg_word);
3574 			cmem_ba_lo = *(msg_word + 1);
3575 			cmem_ba_hi = *(msg_word + 2);
3576 
3577 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
3578 				  FL("CMEM FSE num_entries %u CMEM BA LO %x HI %x"),
3579 				  num_entries, cmem_ba_lo, cmem_ba_hi);
3580 
3581 			dp_rx_fst_update_cmem_params(soc->dp_soc, num_entries,
3582 						     cmem_ba_lo, cmem_ba_hi);
3583 			break;
3584 		}
3585 	case HTT_T2H_MSG_TYPE_TX_OFFLOAD_DELIVER_IND:
3586 		{
3587 			dp_offload_ind_handler(soc, msg_word);
3588 			break;
3589 		}
3590 	case HTT_T2H_MSG_TYPE_PEER_MAP_V3:
3591 	{
3592 		u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3593 		u_int8_t *peer_mac_addr;
3594 		u_int16_t peer_id;
3595 		u_int16_t hw_peer_id;
3596 		u_int8_t vdev_id;
3597 		uint8_t is_wds;
3598 		u_int16_t ast_hash = 0;
3599 
3600 		peer_id = HTT_RX_PEER_MAP_V3_SW_PEER_ID_GET(*msg_word);
3601 		vdev_id = HTT_RX_PEER_MAP_V3_VDEV_ID_GET(*msg_word);
3602 		peer_mac_addr =
3603 		htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3604 					   &mac_addr_deswizzle_buf[0]);
3605 		hw_peer_id = HTT_RX_PEER_MAP_V3_HW_PEER_ID_GET(*(msg_word + 3));
3606 		ast_hash = HTT_RX_PEER_MAP_V3_CACHE_SET_NUM_GET(*(msg_word + 3));
3607 		is_wds = HTT_RX_PEER_MAP_V3_NEXT_HOP_GET(*(msg_word + 4));
3608 
3609 		dp_htt_info("HTT_T2H_MSG_TYPE_PEER_MAP_V3 msg for peer id %d vdev id %d n",
3610 			    peer_id, vdev_id);
3611 
3612 		dp_rx_peer_map_handler(soc->dp_soc, peer_id,
3613 				       hw_peer_id, vdev_id,
3614 				       peer_mac_addr, ast_hash,
3615 				       is_wds);
3616 
3617 		break;
3618 	}
3619 	case HTT_T2H_MSG_TYPE_MLO_RX_PEER_MAP:
3620 	{
3621 		dp_htt_mlo_peer_map_handler(soc, msg_word);
3622 		break;
3623 	}
3624 	case HTT_T2H_MSG_TYPE_MLO_RX_PEER_UNMAP:
3625 	{
3626 		dp_htt_mlo_peer_unmap_handler(soc, msg_word);
3627 		break;
3628 	}
3629 	case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND:
3630 	{
3631 		dp_rx_mlo_timestamp_ind_handler(soc->dp_soc, msg_word);
3632 		break;
3633 	}
3634 	case HTT_T2H_MSG_TYPE_VDEVS_TXRX_STATS_PERIODIC_IND:
3635 	{
3636 		dp_vdev_txrx_hw_stats_handler(soc, msg_word);
3637 		break;
3638 	}
3639 	case HTT_T2H_SAWF_DEF_QUEUES_MAP_REPORT_CONF:
3640 	{
3641 		dp_sawf_def_queues_update_map_report_conf(soc, msg_word,
3642 							  htt_t2h_msg);
3643 		break;
3644 	}
3645 	case HTT_T2H_SAWF_MSDUQ_INFO_IND:
3646 	{
3647 		dp_sawf_msduq_map(soc, msg_word, htt_t2h_msg);
3648 		break;
3649 	}
3650 	case HTT_T2H_MSG_TYPE_STREAMING_STATS_IND:
3651 	{
3652 		dp_sawf_mpdu_stats_handler(soc, htt_t2h_msg);
3653 		break;
3654 	}
3655 
3656 	default:
3657 		break;
3658 	};
3659 
3660 	/* Free the indication buffer */
3661 	if (free_buf)
3662 		qdf_nbuf_free(htt_t2h_msg);
3663 }
3664 
3665 /*
3666  * dp_htt_h2t_full() - Send full handler (called from HTC)
3667  * @context:	Opaque context (HTT SOC handle)
3668  * @pkt:	HTC packet
3669  *
3670  * Return: enum htc_send_full_action
3671  */
3672 static enum htc_send_full_action
3673 dp_htt_h2t_full(void *context, HTC_PACKET *pkt)
3674 {
3675 	return HTC_SEND_FULL_KEEP;
3676 }
3677 
3678 /*
3679  * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages
3680  * @context:	Opaque context (HTT SOC handle)
3681  * @nbuf:	nbuf containing T2H message
3682  * @pipe_id:	HIF pipe ID
3683  *
3684  * Return: QDF_STATUS
3685  *
3686  * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which
3687  * will be used for packet log and other high-priority HTT messages. Proper
3688  * HTC connection to be added later once required FW changes are available
3689  */
3690 static QDF_STATUS
3691 dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id)
3692 {
3693 	QDF_STATUS rc = QDF_STATUS_SUCCESS;
3694 	HTC_PACKET htc_pkt;
3695 
3696 	qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE);
3697 	qdf_mem_zero(&htc_pkt, sizeof(htc_pkt));
3698 	htc_pkt.Status = QDF_STATUS_SUCCESS;
3699 	htc_pkt.pPktContext = (void *)nbuf;
3700 	dp_htt_t2h_msg_handler(context, &htc_pkt);
3701 
3702 	return rc;
3703 }
3704 
3705 /*
3706  * htt_htc_soc_attach() - Register SOC level HTT instance with HTC
3707  * @htt_soc:	HTT SOC handle
3708  *
3709  * Return: QDF_STATUS
3710  */
3711 static QDF_STATUS
3712 htt_htc_soc_attach(struct htt_soc *soc)
3713 {
3714 	struct htc_service_connect_req connect;
3715 	struct htc_service_connect_resp response;
3716 	QDF_STATUS status;
3717 	struct dp_soc *dpsoc = soc->dp_soc;
3718 
3719 	qdf_mem_zero(&connect, sizeof(connect));
3720 	qdf_mem_zero(&response, sizeof(response));
3721 
3722 	connect.pMetaData = NULL;
3723 	connect.MetaDataLength = 0;
3724 	connect.EpCallbacks.pContext = soc;
3725 	connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
3726 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
3727 	connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler;
3728 
3729 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
3730 	connect.EpCallbacks.EpRecvRefill = NULL;
3731 
3732 	/* N/A, fill is done by HIF */
3733 	connect.EpCallbacks.RecvRefillWaterMark = 1;
3734 
3735 	connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
3736 	/*
3737 	 * Specify how deep to let a queue get before htc_send_pkt will
3738 	 * call the EpSendFull function due to excessive send queue depth.
3739 	 */
3740 	connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
3741 
3742 	/* disable flow control for HTT data message service */
3743 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
3744 
3745 	/* connect to control service */
3746 	connect.service_id = HTT_DATA_MSG_SVC;
3747 
3748 	status = htc_connect_service(soc->htc_soc, &connect, &response);
3749 
3750 	if (status != QDF_STATUS_SUCCESS)
3751 		return status;
3752 
3753 	soc->htc_endpoint = response.Endpoint;
3754 
3755 	hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint);
3756 
3757 	htt_interface_logging_init(&soc->htt_logger_handle, soc->ctrl_psoc);
3758 	dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc,
3759 		dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE);
3760 
3761 	return QDF_STATUS_SUCCESS; /* success */
3762 }
3763 
3764 /*
3765  * htt_soc_initialize() - SOC level HTT initialization
3766  * @htt_soc: Opaque htt SOC handle
3767  * @ctrl_psoc: Opaque ctrl SOC handle
3768  * @htc_soc: SOC level HTC handle
3769  * @hal_soc: Opaque HAL SOC handle
3770  * @osdev: QDF device
3771  *
3772  * Return: HTT handle on success; NULL on failure
3773  */
3774 void *
3775 htt_soc_initialize(struct htt_soc *htt_soc,
3776 		   struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
3777 		   HTC_HANDLE htc_soc,
3778 		   hal_soc_handle_t hal_soc_hdl, qdf_device_t osdev)
3779 {
3780 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
3781 
3782 	soc->osdev = osdev;
3783 	soc->ctrl_psoc = ctrl_psoc;
3784 	soc->htc_soc = htc_soc;
3785 	soc->hal_soc = hal_soc_hdl;
3786 
3787 	if (htt_htc_soc_attach(soc))
3788 		goto fail2;
3789 
3790 	return soc;
3791 
3792 fail2:
3793 	return NULL;
3794 }
3795 
3796 void htt_soc_htc_dealloc(struct htt_soc *htt_handle)
3797 {
3798 	htt_interface_logging_deinit(htt_handle->htt_logger_handle);
3799 	htt_htc_misc_pkt_pool_free(htt_handle);
3800 	htt_htc_pkt_pool_free(htt_handle);
3801 }
3802 
3803 /*
3804  * htt_soc_htc_prealloc() - HTC memory prealloc
3805  * @htt_soc: SOC level HTT handle
3806  *
3807  * Return: QDF_STATUS_SUCCESS on Success or
3808  * QDF_STATUS_E_NOMEM on allocation failure
3809  */
3810 QDF_STATUS htt_soc_htc_prealloc(struct htt_soc *soc)
3811 {
3812 	int i;
3813 
3814 	soc->htt_htc_pkt_freelist = NULL;
3815 	/* pre-allocate some HTC_PACKET objects */
3816 	for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
3817 		struct dp_htt_htc_pkt_union *pkt;
3818 		pkt = qdf_mem_malloc(sizeof(*pkt));
3819 		if (!pkt)
3820 			return QDF_STATUS_E_NOMEM;
3821 
3822 		htt_htc_pkt_free(soc, &pkt->u.pkt);
3823 	}
3824 	return QDF_STATUS_SUCCESS;
3825 }
3826 
3827 /*
3828  * htt_soc_detach() - Free SOC level HTT handle
3829  * @htt_hdl: HTT SOC handle
3830  */
3831 void htt_soc_detach(struct htt_soc *htt_hdl)
3832 {
3833 	int i;
3834 	struct htt_soc *htt_handle = (struct htt_soc *)htt_hdl;
3835 
3836 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3837 		qdf_mem_free(htt_handle->pdevid_tt[i].umac_ttt);
3838 		qdf_mem_free(htt_handle->pdevid_tt[i].lmac_ttt);
3839 	}
3840 
3841 	HTT_TX_MUTEX_DESTROY(&htt_handle->htt_tx_mutex);
3842 	qdf_mem_free(htt_handle);
3843 
3844 }
3845 
3846 /**
3847  * dp_h2t_ext_stats_msg_send(): function to contruct HTT message to pass to FW
3848  * @pdev: DP PDEV handle
3849  * @stats_type_upload_mask: stats type requested by user
3850  * @config_param_0: extra configuration parameters
3851  * @config_param_1: extra configuration parameters
3852  * @config_param_2: extra configuration parameters
3853  * @config_param_3: extra configuration parameters
3854  * @mac_id: mac number
3855  *
3856  * return: QDF STATUS
3857  */
3858 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
3859 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
3860 		uint32_t config_param_1, uint32_t config_param_2,
3861 		uint32_t config_param_3, int cookie_val, int cookie_msb,
3862 		uint8_t mac_id)
3863 {
3864 	struct htt_soc *soc = pdev->soc->htt_handle;
3865 	struct dp_htt_htc_pkt *pkt;
3866 	qdf_nbuf_t msg;
3867 	uint32_t *msg_word;
3868 	uint8_t pdev_mask = 0;
3869 	uint8_t *htt_logger_bufp;
3870 	int mac_for_pdev;
3871 	int target_pdev_id;
3872 	QDF_STATUS status;
3873 
3874 	msg = qdf_nbuf_alloc(
3875 			soc->osdev,
3876 			HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ),
3877 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
3878 
3879 	if (!msg)
3880 		return QDF_STATUS_E_NOMEM;
3881 
3882 	/*TODO:Add support for SOC stats
3883 	 * Bit 0: SOC Stats
3884 	 * Bit 1: Pdev stats for pdev id 0
3885 	 * Bit 2: Pdev stats for pdev id 1
3886 	 * Bit 3: Pdev stats for pdev id 2
3887 	 */
3888 	mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
3889 	target_pdev_id =
3890 	dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev);
3891 
3892 	pdev_mask = 1 << target_pdev_id;
3893 
3894 	/*
3895 	 * Set the length of the message.
3896 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3897 	 * separately during the below call to qdf_nbuf_push_head.
3898 	 * The contribution from the HTC header is added separately inside HTC.
3899 	 */
3900 	if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) {
3901 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3902 				"Failed to expand head for HTT_EXT_STATS");
3903 		qdf_nbuf_free(msg);
3904 		return QDF_STATUS_E_FAILURE;
3905 	}
3906 
3907 	dp_htt_tx_stats_info("%pK: cookie <-> %d\n config_param_0 %u\n"
3908 			     "config_param_1 %u\n config_param_2 %u\n"
3909 			     "config_param_4 %u\n -------------",
3910 			     pdev->soc, cookie_val,
3911 			     config_param_0,
3912 			     config_param_1, config_param_2, config_param_3);
3913 
3914 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
3915 
3916 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3917 	htt_logger_bufp = (uint8_t *)msg_word;
3918 	*msg_word = 0;
3919 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ);
3920 	HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask);
3921 	HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask);
3922 
3923 	/* word 1 */
3924 	msg_word++;
3925 	*msg_word = 0;
3926 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0);
3927 
3928 	/* word 2 */
3929 	msg_word++;
3930 	*msg_word = 0;
3931 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1);
3932 
3933 	/* word 3 */
3934 	msg_word++;
3935 	*msg_word = 0;
3936 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2);
3937 
3938 	/* word 4 */
3939 	msg_word++;
3940 	*msg_word = 0;
3941 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3);
3942 
3943 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0);
3944 
3945 	/* word 5 */
3946 	msg_word++;
3947 
3948 	/* word 6 */
3949 	msg_word++;
3950 	*msg_word = 0;
3951 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val);
3952 
3953 	/* word 7 */
3954 	msg_word++;
3955 	*msg_word = 0;
3956 	/* Currently Using last 2 bits for pdev_id
3957 	 * For future reference, reserving 3 bits in cookie_msb for pdev_id
3958 	 */
3959 	cookie_msb = (cookie_msb | pdev->pdev_id);
3960 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb);
3961 
3962 	pkt = htt_htc_pkt_alloc(soc);
3963 	if (!pkt) {
3964 		qdf_nbuf_free(msg);
3965 		return QDF_STATUS_E_NOMEM;
3966 	}
3967 
3968 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
3969 
3970 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
3971 			dp_htt_h2t_send_complete_free_netbuf,
3972 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
3973 			soc->htc_endpoint,
3974 			/* tag for FW response msg not guaranteed */
3975 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
3976 
3977 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
3978 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_EXT_STATS_REQ,
3979 				     htt_logger_bufp);
3980 
3981 	if (status != QDF_STATUS_SUCCESS) {
3982 		qdf_nbuf_free(msg);
3983 		htt_htc_pkt_free(soc, pkt);
3984 	}
3985 
3986 	return status;
3987 }
3988 
3989 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
3990 #define HTT_VDEV_TXRX_STATS_RESET_BITMASK_L32_MASK 0xFFFFFFFF
3991 #define HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_MASK 0xFFFFFFFF00000000
3992 #define HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_SHIFT 32
3993 
3994 QDF_STATUS dp_h2t_hw_vdev_stats_config_send(struct dp_soc *dpsoc,
3995 					    uint8_t pdev_id, bool enable,
3996 					    bool reset, uint64_t reset_bitmask)
3997 {
3998 	struct htt_soc *soc = dpsoc->htt_handle;
3999 	struct dp_htt_htc_pkt *pkt;
4000 	qdf_nbuf_t msg;
4001 	uint32_t *msg_word;
4002 	uint8_t *htt_logger_bufp;
4003 	QDF_STATUS status;
4004 	int duration;
4005 	uint32_t bitmask;
4006 	int target_pdev_id;
4007 
4008 	msg = qdf_nbuf_alloc(
4009 			soc->osdev,
4010 			HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_vdevs_txrx_stats_cfg)),
4011 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
4012 
4013 	if (!msg) {
4014 		dp_htt_err("%pK: Fail to allocate "
4015 		"HTT_H2T_HW_VDEV_TXRX_STATS_CFG_MSG_SZ msg buffer", dpsoc);
4016 		return QDF_STATUS_E_NOMEM;
4017 	}
4018 
4019 	if (pdev_id != INVALID_PDEV_ID)
4020 		target_pdev_id = DP_SW2HW_MACID(pdev_id);
4021 	else
4022 		target_pdev_id = 0;
4023 
4024 	duration =
4025 	wlan_cfg_get_vdev_stats_hw_offload_timer(dpsoc->wlan_cfg_ctx);
4026 
4027 	/*
4028 	 * Set the length of the message.
4029 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4030 	 * separately during the below call to qdf_nbuf_push_head.
4031 	 * The contribution from the HTC header is added separately inside HTC.
4032 	 */
4033 	if (!qdf_nbuf_put_tail(msg,
4034 			       sizeof(struct htt_h2t_vdevs_txrx_stats_cfg))) {
4035 		dp_htt_err("%pK: Failed to expand head for HTT_HW_VDEV_STATS"
4036 			   , dpsoc);
4037 		qdf_nbuf_free(msg);
4038 		return QDF_STATUS_E_FAILURE;
4039 	}
4040 
4041 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
4042 
4043 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4044 	htt_logger_bufp = (uint8_t *)msg_word;
4045 	*msg_word = 0;
4046 
4047 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VDEVS_TXRX_STATS_CFG);
4048 	HTT_RX_VDEVS_TXRX_STATS_PDEV_ID_SET(*msg_word, target_pdev_id);
4049 
4050 	HTT_RX_VDEVS_TXRX_STATS_ENABLE_SET(*msg_word, enable);
4051 
4052 	HTT_RX_VDEVS_TXRX_STATS_PERIODIC_INTERVAL_SET(*msg_word,
4053 						      (duration >> 3));
4054 
4055 	HTT_RX_VDEVS_TXRX_STATS_RESET_STATS_BITS_SET(*msg_word, reset);
4056 
4057 	msg_word++;
4058 	*msg_word = 0;
4059 	bitmask = (reset_bitmask & HTT_VDEV_TXRX_STATS_RESET_BITMASK_L32_MASK);
4060 	*msg_word = bitmask;
4061 
4062 	msg_word++;
4063 	*msg_word = 0;
4064 	bitmask =
4065 		((reset_bitmask & HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_MASK) >>
4066 		 HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_SHIFT);
4067 	*msg_word = bitmask;
4068 
4069 	pkt = htt_htc_pkt_alloc(soc);
4070 	if (!pkt) {
4071 		dp_htt_err("%pK: Fail to allocate dp_htt_htc_pkt buffer",
4072 			   dpsoc);
4073 		qdf_assert(0);
4074 		qdf_nbuf_free(msg);
4075 		return QDF_STATUS_E_NOMEM;
4076 	}
4077 
4078 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4079 
4080 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4081 			       dp_htt_h2t_send_complete_free_netbuf,
4082 			       qdf_nbuf_data(msg), qdf_nbuf_len(msg),
4083 			       soc->htc_endpoint,
4084 			       /* tag for no FW response msg */
4085 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
4086 
4087 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4088 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
4089 				     HTT_H2T_MSG_TYPE_VDEVS_TXRX_STATS_CFG,
4090 				     htt_logger_bufp);
4091 
4092 	if (status != QDF_STATUS_SUCCESS) {
4093 		qdf_nbuf_free(msg);
4094 		htt_htc_pkt_free(soc, pkt);
4095 	}
4096 
4097 	return status;
4098 }
4099 #else
4100 QDF_STATUS dp_h2t_hw_vdev_stats_config_send(struct dp_soc *dpsoc,
4101 					    uint8_t pdev_id, bool enable,
4102 					    bool reset, uint64_t reset_bitmask)
4103 {
4104 	return QDF_STATUS_SUCCESS;
4105 }
4106 #endif
4107 
4108 /**
4109  * dp_h2t_3tuple_config_send(): function to contruct 3 tuple configuration
4110  * HTT message to pass to FW
4111  * @pdev: DP PDEV handle
4112  * @tuple_mask: tuple configuration to report 3 tuple hash value in either
4113  * toeplitz_2_or_4 or flow_id_toeplitz in MSDU START TLV.
4114  *
4115  * tuple_mask[1:0]:
4116  *   00 - Do not report 3 tuple hash value
4117  *   10 - Report 3 tuple hash value in toeplitz_2_or_4
4118  *   01 - Report 3 tuple hash value in flow_id_toeplitz
4119  *   11 - Report 3 tuple hash value in both toeplitz_2_or_4 & flow_id_toeplitz
4120  *
4121  * return: QDF STATUS
4122  */
4123 QDF_STATUS dp_h2t_3tuple_config_send(struct dp_pdev *pdev,
4124 				     uint32_t tuple_mask, uint8_t mac_id)
4125 {
4126 	struct htt_soc *soc = pdev->soc->htt_handle;
4127 	struct dp_htt_htc_pkt *pkt;
4128 	qdf_nbuf_t msg;
4129 	uint32_t *msg_word;
4130 	uint8_t *htt_logger_bufp;
4131 	int mac_for_pdev;
4132 	int target_pdev_id;
4133 
4134 	msg = qdf_nbuf_alloc(
4135 			soc->osdev,
4136 			HTT_MSG_BUF_SIZE(HTT_3_TUPLE_HASH_CFG_REQ_BYTES),
4137 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4138 
4139 	if (!msg)
4140 		return QDF_STATUS_E_NOMEM;
4141 
4142 	mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
4143 	target_pdev_id =
4144 	dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev);
4145 
4146 	/*
4147 	 * Set the length of the message.
4148 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4149 	 * separately during the below call to qdf_nbuf_push_head.
4150 	 * The contribution from the HTC header is added separately inside HTC.
4151 	 */
4152 	if (!qdf_nbuf_put_tail(msg, HTT_3_TUPLE_HASH_CFG_REQ_BYTES)) {
4153 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4154 			  "Failed to expand head for HTT_3TUPLE_CONFIG");
4155 		qdf_nbuf_free(msg);
4156 		return QDF_STATUS_E_FAILURE;
4157 	}
4158 
4159 	dp_htt_info("%pK: config_param_sent 0x%x for target_pdev %d\n -------------",
4160 		    pdev->soc, tuple_mask, target_pdev_id);
4161 
4162 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
4163 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4164 	htt_logger_bufp = (uint8_t *)msg_word;
4165 
4166 	*msg_word = 0;
4167 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG);
4168 	HTT_RX_3_TUPLE_HASH_PDEV_ID_SET(*msg_word, target_pdev_id);
4169 
4170 	msg_word++;
4171 	*msg_word = 0;
4172 	HTT_H2T_FLOW_ID_TOEPLITZ_FIELD_CONFIG_SET(*msg_word, tuple_mask);
4173 	HTT_H2T_TOEPLITZ_2_OR_4_FIELD_CONFIG_SET(*msg_word, tuple_mask);
4174 
4175 	pkt = htt_htc_pkt_alloc(soc);
4176 	if (!pkt) {
4177 		qdf_nbuf_free(msg);
4178 		return QDF_STATUS_E_NOMEM;
4179 	}
4180 
4181 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4182 
4183 	SET_HTC_PACKET_INFO_TX(
4184 			&pkt->htc_pkt,
4185 			dp_htt_h2t_send_complete_free_netbuf,
4186 			qdf_nbuf_data(msg),
4187 			qdf_nbuf_len(msg),
4188 			soc->htc_endpoint,
4189 			/* tag for no FW response msg */
4190 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
4191 
4192 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4193 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG,
4194 			    htt_logger_bufp);
4195 
4196 	return QDF_STATUS_SUCCESS;
4197 }
4198 
4199 /* This macro will revert once proper HTT header will define for
4200  * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file
4201  * */
4202 #if defined(WDI_EVENT_ENABLE)
4203 /**
4204  * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW
4205  * @pdev: DP PDEV handle
4206  * @stats_type_upload_mask: stats type requested by user
4207  * @mac_id: Mac id number
4208  *
4209  * return: QDF STATUS
4210  */
4211 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
4212 		uint32_t stats_type_upload_mask, uint8_t mac_id)
4213 {
4214 	struct htt_soc *soc = pdev->soc->htt_handle;
4215 	struct dp_htt_htc_pkt *pkt;
4216 	qdf_nbuf_t msg;
4217 	uint32_t *msg_word;
4218 	uint8_t pdev_mask;
4219 	QDF_STATUS status;
4220 
4221 	msg = qdf_nbuf_alloc(
4222 			soc->osdev,
4223 			HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ),
4224 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
4225 
4226 	if (!msg) {
4227 		dp_htt_err("%pK: Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer"
4228 			   , pdev->soc);
4229 		qdf_assert(0);
4230 		return QDF_STATUS_E_NOMEM;
4231 	}
4232 
4233 	/*TODO:Add support for SOC stats
4234 	 * Bit 0: SOC Stats
4235 	 * Bit 1: Pdev stats for pdev id 0
4236 	 * Bit 2: Pdev stats for pdev id 1
4237 	 * Bit 3: Pdev stats for pdev id 2
4238 	 */
4239 	pdev_mask = 1 << dp_get_target_pdev_id_for_host_pdev_id(pdev->soc,
4240 								mac_id);
4241 
4242 	/*
4243 	 * Set the length of the message.
4244 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4245 	 * separately during the below call to qdf_nbuf_push_head.
4246 	 * The contribution from the HTC header is added separately inside HTC.
4247 	 */
4248 	if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) {
4249 		dp_htt_err("%pK: Failed to expand head for HTT_CFG_STATS"
4250 			   , pdev->soc);
4251 		qdf_nbuf_free(msg);
4252 		return QDF_STATUS_E_FAILURE;
4253 	}
4254 
4255 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
4256 
4257 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4258 	*msg_word = 0;
4259 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
4260 	HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask);
4261 	HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word,
4262 			stats_type_upload_mask);
4263 
4264 	pkt = htt_htc_pkt_alloc(soc);
4265 	if (!pkt) {
4266 		dp_htt_err("%pK: Fail to allocate dp_htt_htc_pkt buffer", pdev->soc);
4267 		qdf_assert(0);
4268 		qdf_nbuf_free(msg);
4269 		return QDF_STATUS_E_NOMEM;
4270 	}
4271 
4272 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4273 
4274 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4275 			dp_htt_h2t_send_complete_free_netbuf,
4276 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
4277 			soc->htc_endpoint,
4278 			/* tag for no FW response msg */
4279 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
4280 
4281 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4282 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG,
4283 				     (uint8_t *)msg_word);
4284 
4285 	if (status != QDF_STATUS_SUCCESS) {
4286 		qdf_nbuf_free(msg);
4287 		htt_htc_pkt_free(soc, pkt);
4288 	}
4289 
4290 	return status;
4291 }
4292 
4293 qdf_export_symbol(dp_h2t_cfg_stats_msg_send);
4294 #endif
4295 
4296 void
4297 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
4298 			     uint32_t *tag_buf)
4299 {
4300 	struct dp_peer *peer = NULL;
4301 	switch (tag_type) {
4302 	case HTT_STATS_PEER_DETAILS_TAG:
4303 	{
4304 		htt_peer_details_tlv *dp_stats_buf =
4305 			(htt_peer_details_tlv *)tag_buf;
4306 
4307 		pdev->fw_stats_peer_id = dp_stats_buf->sw_peer_id;
4308 	}
4309 	break;
4310 	case HTT_STATS_PEER_STATS_CMN_TAG:
4311 	{
4312 		htt_peer_stats_cmn_tlv *dp_stats_buf =
4313 			(htt_peer_stats_cmn_tlv *)tag_buf;
4314 
4315 		peer = dp_peer_get_ref_by_id(pdev->soc, pdev->fw_stats_peer_id,
4316 					     DP_MOD_ID_HTT);
4317 
4318 		if (peer && !peer->bss_peer) {
4319 			peer->stats.tx.inactive_time =
4320 				dp_stats_buf->inactive_time;
4321 			qdf_event_set(&pdev->fw_peer_stats_event);
4322 		}
4323 		if (peer)
4324 			dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
4325 	}
4326 	break;
4327 	default:
4328 		qdf_err("Invalid tag_type");
4329 	}
4330 }
4331 
4332 /**
4333  * dp_htt_rx_flow_fst_setup(): Send HTT Rx FST setup message to FW
4334  * @pdev: DP pdev handle
4335  * @fse_setup_info: FST setup parameters
4336  *
4337  * Return: Success when HTT message is sent, error on failure
4338  */
4339 QDF_STATUS
4340 dp_htt_rx_flow_fst_setup(struct dp_pdev *pdev,
4341 			 struct dp_htt_rx_flow_fst_setup *fse_setup_info)
4342 {
4343 	struct htt_soc *soc = pdev->soc->htt_handle;
4344 	struct dp_htt_htc_pkt *pkt;
4345 	qdf_nbuf_t msg;
4346 	u_int32_t *msg_word;
4347 	struct htt_h2t_msg_rx_fse_setup_t *fse_setup;
4348 	uint8_t *htt_logger_bufp;
4349 	u_int32_t *key;
4350 	QDF_STATUS status;
4351 
4352 	msg = qdf_nbuf_alloc(
4353 		soc->osdev,
4354 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_setup_t)),
4355 		/* reserve room for the HTC header */
4356 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4357 
4358 	if (!msg)
4359 		return QDF_STATUS_E_NOMEM;
4360 
4361 	/*
4362 	 * Set the length of the message.
4363 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4364 	 * separately during the below call to qdf_nbuf_push_head.
4365 	 * The contribution from the HTC header is added separately inside HTC.
4366 	 */
4367 	if (!qdf_nbuf_put_tail(msg,
4368 			       sizeof(struct htt_h2t_msg_rx_fse_setup_t))) {
4369 		qdf_err("Failed to expand head for HTT RX_FSE_SETUP msg");
4370 		return QDF_STATUS_E_FAILURE;
4371 	}
4372 
4373 	/* fill in the message contents */
4374 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4375 
4376 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_setup_t));
4377 	/* rewind beyond alignment pad to get to the HTC header reserved area */
4378 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4379 	htt_logger_bufp = (uint8_t *)msg_word;
4380 
4381 	*msg_word = 0;
4382 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG);
4383 
4384 	fse_setup = (struct htt_h2t_msg_rx_fse_setup_t *)msg_word;
4385 
4386 	HTT_RX_FSE_SETUP_PDEV_ID_SET(*msg_word, fse_setup_info->pdev_id);
4387 
4388 	msg_word++;
4389 	HTT_RX_FSE_SETUP_NUM_REC_SET(*msg_word, fse_setup_info->max_entries);
4390 	HTT_RX_FSE_SETUP_MAX_SEARCH_SET(*msg_word, fse_setup_info->max_search);
4391 	HTT_RX_FSE_SETUP_IP_DA_SA_PREFIX_SET(*msg_word,
4392 					     fse_setup_info->ip_da_sa_prefix);
4393 
4394 	msg_word++;
4395 	HTT_RX_FSE_SETUP_BASE_ADDR_LO_SET(*msg_word,
4396 					  fse_setup_info->base_addr_lo);
4397 	msg_word++;
4398 	HTT_RX_FSE_SETUP_BASE_ADDR_HI_SET(*msg_word,
4399 					  fse_setup_info->base_addr_hi);
4400 
4401 	key = (u_int32_t *)fse_setup_info->hash_key;
4402 	fse_setup->toeplitz31_0 = *key++;
4403 	fse_setup->toeplitz63_32 = *key++;
4404 	fse_setup->toeplitz95_64 = *key++;
4405 	fse_setup->toeplitz127_96 = *key++;
4406 	fse_setup->toeplitz159_128 = *key++;
4407 	fse_setup->toeplitz191_160 = *key++;
4408 	fse_setup->toeplitz223_192 = *key++;
4409 	fse_setup->toeplitz255_224 = *key++;
4410 	fse_setup->toeplitz287_256 = *key++;
4411 	fse_setup->toeplitz314_288 = *key;
4412 
4413 	msg_word++;
4414 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz31_0);
4415 	msg_word++;
4416 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz63_32);
4417 	msg_word++;
4418 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz95_64);
4419 	msg_word++;
4420 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz127_96);
4421 	msg_word++;
4422 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz159_128);
4423 	msg_word++;
4424 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz191_160);
4425 	msg_word++;
4426 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz223_192);
4427 	msg_word++;
4428 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz255_224);
4429 	msg_word++;
4430 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz287_256);
4431 	msg_word++;
4432 	HTT_RX_FSE_SETUP_HASH_314_288_SET(*msg_word,
4433 					  fse_setup->toeplitz314_288);
4434 
4435 	pkt = htt_htc_pkt_alloc(soc);
4436 	if (!pkt) {
4437 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4438 		qdf_assert(0);
4439 		qdf_nbuf_free(msg);
4440 		return QDF_STATUS_E_RESOURCES; /* failure */
4441 	}
4442 
4443 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4444 
4445 	SET_HTC_PACKET_INFO_TX(
4446 		&pkt->htc_pkt,
4447 		dp_htt_h2t_send_complete_free_netbuf,
4448 		qdf_nbuf_data(msg),
4449 		qdf_nbuf_len(msg),
4450 		soc->htc_endpoint,
4451 		/* tag for no FW response msg */
4452 		HTC_TX_PACKET_TAG_RUNTIME_PUT);
4453 
4454 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4455 
4456 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
4457 				     HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG,
4458 				     htt_logger_bufp);
4459 
4460 	if (status == QDF_STATUS_SUCCESS) {
4461 		dp_info("HTT_H2T RX_FSE_SETUP sent to FW for pdev = %u",
4462 			fse_setup_info->pdev_id);
4463 		QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_DEBUG,
4464 				   (void *)fse_setup_info->hash_key,
4465 				   fse_setup_info->hash_key_len);
4466 	} else {
4467 		qdf_nbuf_free(msg);
4468 		htt_htc_pkt_free(soc, pkt);
4469 	}
4470 
4471 	return status;
4472 }
4473 
4474 /**
4475  * dp_htt_rx_flow_fse_operation(): Send HTT Flow Search Entry msg to
4476  * add/del a flow in HW
4477  * @pdev: DP pdev handle
4478  * @fse_op_info: Flow entry parameters
4479  *
4480  * Return: Success when HTT message is sent, error on failure
4481  */
4482 QDF_STATUS
4483 dp_htt_rx_flow_fse_operation(struct dp_pdev *pdev,
4484 			     struct dp_htt_rx_flow_fst_operation *fse_op_info)
4485 {
4486 	struct htt_soc *soc = pdev->soc->htt_handle;
4487 	struct dp_htt_htc_pkt *pkt;
4488 	qdf_nbuf_t msg;
4489 	u_int32_t *msg_word;
4490 	struct htt_h2t_msg_rx_fse_operation_t *fse_operation;
4491 	uint8_t *htt_logger_bufp;
4492 	QDF_STATUS status;
4493 
4494 	msg = qdf_nbuf_alloc(
4495 		soc->osdev,
4496 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_operation_t)),
4497 		/* reserve room for the HTC header */
4498 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4499 	if (!msg)
4500 		return QDF_STATUS_E_NOMEM;
4501 
4502 	/*
4503 	 * Set the length of the message.
4504 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4505 	 * separately during the below call to qdf_nbuf_push_head.
4506 	 * The contribution from the HTC header is added separately inside HTC.
4507 	 */
4508 	if (!qdf_nbuf_put_tail(msg,
4509 			       sizeof(struct htt_h2t_msg_rx_fse_operation_t))) {
4510 		qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
4511 		qdf_nbuf_free(msg);
4512 		return QDF_STATUS_E_FAILURE;
4513 	}
4514 
4515 	/* fill in the message contents */
4516 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4517 
4518 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_operation_t));
4519 	/* rewind beyond alignment pad to get to the HTC header reserved area */
4520 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4521 	htt_logger_bufp = (uint8_t *)msg_word;
4522 
4523 	*msg_word = 0;
4524 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG);
4525 
4526 	fse_operation = (struct htt_h2t_msg_rx_fse_operation_t *)msg_word;
4527 
4528 	HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, fse_op_info->pdev_id);
4529 	msg_word++;
4530 	HTT_RX_FSE_IPSEC_VALID_SET(*msg_word, false);
4531 	if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_ENTRY) {
4532 		HTT_RX_FSE_OPERATION_SET(*msg_word,
4533 					 HTT_RX_FSE_CACHE_INVALIDATE_ENTRY);
4534 		msg_word++;
4535 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4536 		*msg_word,
4537 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_31_0));
4538 		msg_word++;
4539 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4540 		*msg_word,
4541 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_63_32));
4542 		msg_word++;
4543 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4544 		*msg_word,
4545 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_95_64));
4546 		msg_word++;
4547 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4548 		*msg_word,
4549 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_127_96));
4550 		msg_word++;
4551 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4552 		*msg_word,
4553 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_31_0));
4554 		msg_word++;
4555 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4556 		*msg_word,
4557 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_63_32));
4558 		msg_word++;
4559 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4560 		*msg_word,
4561 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_95_64));
4562 		msg_word++;
4563 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4564 		*msg_word,
4565 		qdf_htonl(
4566 		fse_op_info->rx_flow->flow_tuple_info.dest_ip_127_96));
4567 		msg_word++;
4568 		HTT_RX_FSE_SOURCEPORT_SET(
4569 			*msg_word,
4570 			fse_op_info->rx_flow->flow_tuple_info.src_port);
4571 		HTT_RX_FSE_DESTPORT_SET(
4572 			*msg_word,
4573 			fse_op_info->rx_flow->flow_tuple_info.dest_port);
4574 		msg_word++;
4575 		HTT_RX_FSE_L4_PROTO_SET(
4576 			*msg_word,
4577 			fse_op_info->rx_flow->flow_tuple_info.l4_protocol);
4578 	} else if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_FULL) {
4579 		HTT_RX_FSE_OPERATION_SET(*msg_word,
4580 					 HTT_RX_FSE_CACHE_INVALIDATE_FULL);
4581 	} else if (fse_op_info->op_code == DP_HTT_FST_DISABLE) {
4582 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_DISABLE);
4583 	} else if (fse_op_info->op_code == DP_HTT_FST_ENABLE) {
4584 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_ENABLE);
4585 	}
4586 
4587 	pkt = htt_htc_pkt_alloc(soc);
4588 	if (!pkt) {
4589 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4590 		qdf_assert(0);
4591 		qdf_nbuf_free(msg);
4592 		return QDF_STATUS_E_RESOURCES; /* failure */
4593 	}
4594 
4595 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4596 
4597 	SET_HTC_PACKET_INFO_TX(
4598 		&pkt->htc_pkt,
4599 		dp_htt_h2t_send_complete_free_netbuf,
4600 		qdf_nbuf_data(msg),
4601 		qdf_nbuf_len(msg),
4602 		soc->htc_endpoint,
4603 		/* tag for no FW response msg */
4604 		HTC_TX_PACKET_TAG_RUNTIME_PUT);
4605 
4606 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4607 
4608 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
4609 				     HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG,
4610 				     htt_logger_bufp);
4611 
4612 	if (status == QDF_STATUS_SUCCESS) {
4613 		dp_info("HTT_H2T RX_FSE_OPERATION_CFG sent to FW for pdev = %u",
4614 			fse_op_info->pdev_id);
4615 	} else {
4616 		qdf_nbuf_free(msg);
4617 		htt_htc_pkt_free(soc, pkt);
4618 	}
4619 
4620 	return status;
4621 }
4622 
4623 /**
4624  * dp_htt_rx_fisa_config(): Send HTT msg to configure FISA
4625  * @pdev: DP pdev handle
4626  * @fse_op_info: Flow entry parameters
4627  *
4628  * Return: Success when HTT message is sent, error on failure
4629  */
4630 QDF_STATUS
4631 dp_htt_rx_fisa_config(struct dp_pdev *pdev,
4632 		      struct dp_htt_rx_fisa_cfg *fisa_config)
4633 {
4634 	struct htt_soc *soc = pdev->soc->htt_handle;
4635 	struct dp_htt_htc_pkt *pkt;
4636 	qdf_nbuf_t msg;
4637 	u_int32_t *msg_word;
4638 	struct htt_h2t_msg_type_fisa_config_t *htt_fisa_config;
4639 	uint8_t *htt_logger_bufp;
4640 	uint32_t len;
4641 	QDF_STATUS status;
4642 
4643 	len = HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_type_fisa_config_t));
4644 
4645 	msg = qdf_nbuf_alloc(soc->osdev,
4646 			     len,
4647 			     /* reserve room for the HTC header */
4648 			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
4649 			     4,
4650 			     TRUE);
4651 	if (!msg)
4652 		return QDF_STATUS_E_NOMEM;
4653 
4654 	/*
4655 	 * Set the length of the message.
4656 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4657 	 * separately during the below call to qdf_nbuf_push_head.
4658 	 * The contribution from the HTC header is added separately inside HTC.
4659 	 */
4660 	if (!qdf_nbuf_put_tail(msg,
4661 			       sizeof(struct htt_h2t_msg_type_fisa_config_t))) {
4662 		qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
4663 		qdf_nbuf_free(msg);
4664 		return QDF_STATUS_E_FAILURE;
4665 	}
4666 
4667 	/* fill in the message contents */
4668 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4669 
4670 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_type_fisa_config_t));
4671 	/* rewind beyond alignment pad to get to the HTC header reserved area */
4672 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4673 	htt_logger_bufp = (uint8_t *)msg_word;
4674 
4675 	*msg_word = 0;
4676 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FISA_CFG);
4677 
4678 	htt_fisa_config = (struct htt_h2t_msg_type_fisa_config_t *)msg_word;
4679 
4680 	HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, htt_fisa_config->pdev_id);
4681 
4682 	msg_word++;
4683 	HTT_RX_FISA_CONFIG_FISA_V2_ENABLE_SET(*msg_word, 1);
4684 	HTT_RX_FISA_CONFIG_FISA_V2_AGGR_LIMIT_SET(*msg_word, 0xf);
4685 
4686 	msg_word++;
4687 	htt_fisa_config->fisa_timeout_threshold = fisa_config->fisa_timeout;
4688 
4689 	pkt = htt_htc_pkt_alloc(soc);
4690 	if (!pkt) {
4691 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4692 		qdf_assert(0);
4693 		qdf_nbuf_free(msg);
4694 		return QDF_STATUS_E_RESOURCES; /* failure */
4695 	}
4696 
4697 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4698 
4699 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4700 			       dp_htt_h2t_send_complete_free_netbuf,
4701 			       qdf_nbuf_data(msg),
4702 			       qdf_nbuf_len(msg),
4703 			       soc->htc_endpoint,
4704 			       /* tag for no FW response msg */
4705 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
4706 
4707 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4708 
4709 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FISA_CFG,
4710 				     htt_logger_bufp);
4711 
4712 	if (status == QDF_STATUS_SUCCESS) {
4713 		dp_info("HTT_H2T_MSG_TYPE_RX_FISA_CFG sent to FW for pdev = %u",
4714 			fisa_config->pdev_id);
4715 	} else {
4716 		qdf_nbuf_free(msg);
4717 		htt_htc_pkt_free(soc, pkt);
4718 	}
4719 
4720 	return status;
4721 }
4722 
4723 #ifdef WLAN_SUPPORT_PPEDS
4724 /**
4725  * dp_htt_rxdma_rxole_ppe_cfg_set() - Send RxOLE and RxDMA PPE config
4726  * @dp_osc: Data path SoC handle
4727  * @cfg: RxDMA and RxOLE PPE config
4728  *
4729  * Return: Success when HTT message is sent, error on failure
4730  */
4731 QDF_STATUS
4732 dp_htt_rxdma_rxole_ppe_cfg_set(struct dp_soc *soc,
4733 			       struct dp_htt_rxdma_rxole_ppe_config *cfg)
4734 {
4735 	struct htt_soc *htt_handle = soc->htt_handle;
4736 	uint32_t len;
4737 	qdf_nbuf_t msg;
4738 	u_int32_t *msg_word;
4739 	QDF_STATUS status;
4740 	uint8_t *htt_logger_bufp;
4741 	struct dp_htt_htc_pkt *pkt;
4742 
4743 	len = HTT_MSG_BUF_SIZE(
4744 	      sizeof(struct htt_h2t_msg_type_rxdma_rxole_ppe_cfg_t));
4745 
4746 	msg = qdf_nbuf_alloc(soc->osdev,
4747 			     len,
4748 			     /* reserve room for the HTC header */
4749 			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
4750 			     4,
4751 			     TRUE);
4752 	if (!msg)
4753 		return QDF_STATUS_E_NOMEM;
4754 
4755 	/*
4756 	 * Set the length of the message.
4757 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4758 	 * separately during the below call to qdf_nbuf_push_head.
4759 	 * The contribution from the HTC header is added separately inside HTC.
4760 	 */
4761 	if (!qdf_nbuf_put_tail(
4762 		msg, sizeof(struct htt_h2t_msg_type_rxdma_rxole_ppe_cfg_t))) {
4763 		qdf_err("Failed to expand head for HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG msg");
4764 		qdf_nbuf_free(msg);
4765 		return QDF_STATUS_E_FAILURE;
4766 	}
4767 
4768 	/* fill in the message contents */
4769 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4770 
4771 	memset(msg_word, 0,
4772 	       sizeof(struct htt_h2t_msg_type_rxdma_rxole_ppe_cfg_t));
4773 
4774 	/* Rewind beyond alignment pad to get to the HTC header reserved area */
4775 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4776 	htt_logger_bufp = (uint8_t *)msg_word;
4777 
4778 	*msg_word = 0;
4779 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG);
4780 	HTT_PPE_CFG_OVERRIDE_SET(*msg_word, cfg->override);
4781 	HTT_PPE_CFG_REO_DEST_IND_SET(
4782 			*msg_word, cfg->reo_destination_indication);
4783 	HTT_PPE_CFG_MULTI_BUF_MSDU_OVERRIDE_EN_SET(
4784 			*msg_word, cfg->multi_buffer_msdu_override_en);
4785 	HTT_PPE_CFG_INTRA_BSS_OVERRIDE_EN_SET(
4786 			*msg_word, cfg->intra_bss_override);
4787 	HTT_PPE_CFG_DECAP_RAW_OVERRIDE_EN_SET(
4788 			*msg_word, cfg->decap_raw_override);
4789 	HTT_PPE_CFG_DECAP_NWIFI_OVERRIDE_EN_SET(
4790 			*msg_word, cfg->decap_nwifi_override);
4791 	HTT_PPE_CFG_IP_FRAG_OVERRIDE_EN_SET(
4792 			*msg_word, cfg->ip_frag_override);
4793 
4794 	pkt = htt_htc_pkt_alloc(htt_handle);
4795 	if (!pkt) {
4796 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4797 		qdf_assert(0);
4798 		qdf_nbuf_free(msg);
4799 		return QDF_STATUS_E_RESOURCES; /* failure */
4800 	}
4801 
4802 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4803 
4804 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4805 			       dp_htt_h2t_send_complete_free_netbuf,
4806 			       qdf_nbuf_data(msg),
4807 			       qdf_nbuf_len(msg),
4808 			       htt_handle->htc_endpoint,
4809 			       /* tag for no FW response msg */
4810 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
4811 
4812 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4813 
4814 	status = DP_HTT_SEND_HTC_PKT(htt_handle, pkt,
4815 				     HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG,
4816 				     htt_logger_bufp);
4817 
4818 	if (status != QDF_STATUS_SUCCESS) {
4819 		qdf_nbuf_free(msg);
4820 		htt_htc_pkt_free(htt_handle, pkt);
4821 		return status;
4822 	}
4823 
4824 	dp_info("HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG sent");
4825 	return status;
4826 }
4827 #endif /* WLAN_SUPPORT_PPEDS */
4828 
4829 /**
4830  * dp_bk_pressure_stats_handler(): worker function to print back pressure
4831  *				   stats
4832  *
4833  * @context : argument to work function
4834  */
4835 static void dp_bk_pressure_stats_handler(void *context)
4836 {
4837 	struct dp_pdev *pdev = (struct dp_pdev *)context;
4838 	struct dp_soc_srngs_state *soc_srngs_state = NULL;
4839 	const char *ring_name;
4840 	int i;
4841 	struct dp_srng_ring_state *ring_state;
4842 	bool empty_flag;
4843 
4844 	qdf_spin_lock_bh(&pdev->bkp_stats.list_lock);
4845 
4846 	/* Extract only first entry for printing in one work event */
4847 	if (pdev->bkp_stats.queue_depth &&
4848 	    !TAILQ_EMPTY(&pdev->bkp_stats.list)) {
4849 		soc_srngs_state = TAILQ_FIRST(&pdev->bkp_stats.list);
4850 		TAILQ_REMOVE(&pdev->bkp_stats.list, soc_srngs_state,
4851 			     list_elem);
4852 		pdev->bkp_stats.queue_depth--;
4853 	}
4854 
4855 	empty_flag = TAILQ_EMPTY(&pdev->bkp_stats.list);
4856 	qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock);
4857 
4858 	if (soc_srngs_state) {
4859 		DP_PRINT_STATS("### BKP stats for seq_num %u START ###",
4860 			       soc_srngs_state->seq_num);
4861 		for (i = 0; i < soc_srngs_state->max_ring_id; i++) {
4862 			ring_state = &soc_srngs_state->ring_state[i];
4863 			ring_name = dp_srng_get_str_from_hal_ring_type
4864 						(ring_state->ring_type);
4865 			DP_PRINT_STATS("%s: SW:Head pointer = %d Tail Pointer = %d\n",
4866 				       ring_name,
4867 				       ring_state->sw_head,
4868 				       ring_state->sw_tail);
4869 
4870 			DP_PRINT_STATS("%s: HW:Head pointer = %d Tail Pointer = %d\n",
4871 				       ring_name,
4872 				       ring_state->hw_head,
4873 				       ring_state->hw_tail);
4874 		}
4875 
4876 		DP_PRINT_STATS("### BKP stats for seq_num %u COMPLETE ###",
4877 			       soc_srngs_state->seq_num);
4878 		qdf_mem_free(soc_srngs_state);
4879 	}
4880 	dp_print_napi_stats(pdev->soc);
4881 
4882 	/* Schedule work again if queue is not empty */
4883 	if (!empty_flag)
4884 		qdf_queue_work(0, pdev->bkp_stats.work_queue,
4885 			       &pdev->bkp_stats.work);
4886 }
4887 
4888 /*
4889  * dp_pdev_bkp_stats_detach() - detach resources for back pressure stats
4890  *				processing
4891  * @pdev: Datapath PDEV handle
4892  *
4893  */
4894 void dp_pdev_bkp_stats_detach(struct dp_pdev *pdev)
4895 {
4896 	struct dp_soc_srngs_state *ring_state, *ring_state_next;
4897 
4898 	if (!pdev->bkp_stats.work_queue)
4899 		return;
4900 
4901 	qdf_flush_workqueue(0, pdev->bkp_stats.work_queue);
4902 	qdf_destroy_workqueue(0, pdev->bkp_stats.work_queue);
4903 	qdf_flush_work(&pdev->bkp_stats.work);
4904 	qdf_disable_work(&pdev->bkp_stats.work);
4905 	qdf_spin_lock_bh(&pdev->bkp_stats.list_lock);
4906 	TAILQ_FOREACH_SAFE(ring_state, &pdev->bkp_stats.list,
4907 			   list_elem, ring_state_next) {
4908 		TAILQ_REMOVE(&pdev->bkp_stats.list, ring_state,
4909 			     list_elem);
4910 		qdf_mem_free(ring_state);
4911 	}
4912 	qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock);
4913 	qdf_spinlock_destroy(&pdev->bkp_stats.list_lock);
4914 }
4915 
4916 /*
4917  * dp_pdev_bkp_stats_attach() - attach resources for back pressure stats
4918  *				processing
4919  * @pdev: Datapath PDEV handle
4920  *
4921  * Return: QDF_STATUS_SUCCESS: Success
4922  *         QDF_STATUS_E_NOMEM: Error
4923  */
4924 QDF_STATUS dp_pdev_bkp_stats_attach(struct dp_pdev *pdev)
4925 {
4926 	TAILQ_INIT(&pdev->bkp_stats.list);
4927 	pdev->bkp_stats.seq_num = 0;
4928 	pdev->bkp_stats.queue_depth = 0;
4929 
4930 	qdf_create_work(0, &pdev->bkp_stats.work,
4931 			dp_bk_pressure_stats_handler, pdev);
4932 
4933 	pdev->bkp_stats.work_queue =
4934 		qdf_alloc_unbound_workqueue("dp_bkp_work_queue");
4935 	if (!pdev->bkp_stats.work_queue)
4936 		goto fail;
4937 
4938 	qdf_spinlock_create(&pdev->bkp_stats.list_lock);
4939 	return QDF_STATUS_SUCCESS;
4940 
4941 fail:
4942 	dp_htt_alert("BKP stats attach failed");
4943 	qdf_flush_work(&pdev->bkp_stats.work);
4944 	qdf_disable_work(&pdev->bkp_stats.work);
4945 	return QDF_STATUS_E_FAILURE;
4946 }
4947