xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c (revision 67011a392c017971b1c0e7b8f6621fd5613f6075)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <htt.h>
21 #include <hal_hw_headers.h>
22 #include <hal_api.h>
23 #include "dp_peer.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_rx.h"
27 #include "htt_stats.h"
28 #include "htt_ppdu_stats.h"
29 #include "dp_htt.h"
30 #ifdef WIFI_MONITOR_SUPPORT
31 #include <dp_mon.h>
32 #endif
33 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
34 #include "cdp_txrx_cmn_struct.h"
35 
36 #ifdef FEATURE_PERPKT_INFO
37 #include "dp_ratetable.h"
38 #endif
39 #include <qdf_module.h>
40 #ifdef CONFIG_SAWF_DEF_QUEUES
41 #include <dp_sawf_htt.h>
42 #endif
43 
44 #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
45 
46 #define HTT_HTC_PKT_POOL_INIT_SIZE 64
47 
48 #define HTT_MSG_BUF_SIZE(msg_bytes) \
49 	((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
50 
51 #define HTT_PID_BIT_MASK 0x3
52 
53 #define DP_EXT_MSG_LENGTH 2048
54 #define HTT_HEADER_LEN 16
55 #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16
56 
57 #define HTT_SHIFT_UPPER_TIMESTAMP 32
58 #define HTT_MASK_UPPER_TIMESTAMP 0xFFFFFFFF00000000
59 #define HTT_BKP_STATS_MAX_QUEUE_DEPTH 16
60 
61 struct dp_htt_htc_pkt *
62 htt_htc_pkt_alloc(struct htt_soc *soc)
63 {
64 	struct dp_htt_htc_pkt_union *pkt = NULL;
65 
66 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
67 	if (soc->htt_htc_pkt_freelist) {
68 		pkt = soc->htt_htc_pkt_freelist;
69 		soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next;
70 	}
71 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
72 
73 	if (!pkt)
74 		pkt = qdf_mem_malloc(sizeof(*pkt));
75 
76 	if (!pkt)
77 		return NULL;
78 
79 	htc_packet_set_magic_cookie(&(pkt->u.pkt.htc_pkt), 0);
80 
81 	return &pkt->u.pkt; /* not actually a dereference */
82 }
83 
84 qdf_export_symbol(htt_htc_pkt_alloc);
85 
86 void
87 htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
88 {
89 	struct dp_htt_htc_pkt_union *u_pkt =
90 		(struct dp_htt_htc_pkt_union *)pkt;
91 
92 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
93 	htc_packet_set_magic_cookie(&(u_pkt->u.pkt.htc_pkt), 0);
94 	u_pkt->u.next = soc->htt_htc_pkt_freelist;
95 	soc->htt_htc_pkt_freelist = u_pkt;
96 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
97 }
98 
99 qdf_export_symbol(htt_htc_pkt_free);
100 
101 /*
102  * htt_htc_pkt_pool_free() - Free HTC packet pool
103  * @htt_soc:	HTT SOC handle
104  */
105 void
106 htt_htc_pkt_pool_free(struct htt_soc *soc)
107 {
108 	struct dp_htt_htc_pkt_union *pkt, *next;
109 	pkt = soc->htt_htc_pkt_freelist;
110 	while (pkt) {
111 		next = pkt->u.next;
112 		qdf_mem_free(pkt);
113 		pkt = next;
114 	}
115 	soc->htt_htc_pkt_freelist = NULL;
116 }
117 
118 
119 #ifndef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST
120 
121 /*
122  * htt_htc_misc_pkt_list_trim() - trim misc list
123  * @htt_soc: HTT SOC handle
124  * @level: max no. of pkts in list
125  */
126 static void
127 htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level)
128 {
129 	struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL;
130 	int i = 0;
131 	qdf_nbuf_t netbuf;
132 
133 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
134 	pkt = soc->htt_htc_pkt_misclist;
135 	while (pkt) {
136 		next = pkt->u.next;
137 		/* trim the out grown list*/
138 		if (++i > level) {
139 			netbuf =
140 				(qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
141 			qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
142 			qdf_nbuf_free(netbuf);
143 			qdf_mem_free(pkt);
144 			pkt = NULL;
145 			if (prev)
146 				prev->u.next = NULL;
147 		}
148 		prev = pkt;
149 		pkt = next;
150 	}
151 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
152 }
153 
154 /*
155  * htt_htc_misc_pkt_list_add() - Add pkt to misc list
156  * @htt_soc:	HTT SOC handle
157  * @dp_htt_htc_pkt: pkt to be added to list
158  */
159 void
160 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
161 {
162 	struct dp_htt_htc_pkt_union *u_pkt =
163 				(struct dp_htt_htc_pkt_union *)pkt;
164 	int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc,
165 							pkt->htc_pkt.Endpoint)
166 				+ DP_HTT_HTC_PKT_MISCLIST_SIZE;
167 
168 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
169 	if (soc->htt_htc_pkt_misclist) {
170 		u_pkt->u.next = soc->htt_htc_pkt_misclist;
171 		soc->htt_htc_pkt_misclist = u_pkt;
172 	} else {
173 		soc->htt_htc_pkt_misclist = u_pkt;
174 	}
175 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
176 
177 	/* only ce pipe size + tx_queue_depth could possibly be in use
178 	 * free older packets in the misclist
179 	 */
180 	htt_htc_misc_pkt_list_trim(soc, misclist_trim_level);
181 }
182 
183 qdf_export_symbol(htt_htc_misc_pkt_list_add);
184 #endif  /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
185 
186 /*
187  * htt_htc_misc_pkt_pool_free() - free pkts in misc list
188  * @htt_soc:	HTT SOC handle
189  */
190 static void
191 htt_htc_misc_pkt_pool_free(struct htt_soc *soc)
192 {
193 	struct dp_htt_htc_pkt_union *pkt, *next;
194 	qdf_nbuf_t netbuf;
195 
196 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
197 	pkt = soc->htt_htc_pkt_misclist;
198 
199 	while (pkt) {
200 		next = pkt->u.next;
201 		if (htc_packet_get_magic_cookie(&(pkt->u.pkt.htc_pkt)) !=
202 		    HTC_PACKET_MAGIC_COOKIE) {
203 			pkt = next;
204 			soc->stats.skip_count++;
205 			continue;
206 		}
207 		netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
208 		qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
209 
210 		soc->stats.htc_pkt_free++;
211 		dp_htt_info("%pK: Pkt free count %d",
212 			    soc->dp_soc, soc->stats.htc_pkt_free);
213 
214 		qdf_nbuf_free(netbuf);
215 		qdf_mem_free(pkt);
216 		pkt = next;
217 	}
218 	soc->htt_htc_pkt_misclist = NULL;
219 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
220 	dp_info("HTC Packets, fail count = %d, skip count = %d",
221 		soc->stats.fail_count, soc->stats.skip_count);
222 }
223 
224 /*
225  * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ
226  * @tgt_mac_addr:	Target MAC
227  * @buffer:		Output buffer
228  */
229 static u_int8_t *
230 htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer)
231 {
232 #ifdef BIG_ENDIAN_HOST
233 	/*
234 	 * The host endianness is opposite of the target endianness.
235 	 * To make u_int32_t elements come out correctly, the target->host
236 	 * upload has swizzled the bytes in each u_int32_t element of the
237 	 * message.
238 	 * For byte-array message fields like the MAC address, this
239 	 * upload swizzling puts the bytes in the wrong order, and needs
240 	 * to be undone.
241 	 */
242 	buffer[0] = tgt_mac_addr[3];
243 	buffer[1] = tgt_mac_addr[2];
244 	buffer[2] = tgt_mac_addr[1];
245 	buffer[3] = tgt_mac_addr[0];
246 	buffer[4] = tgt_mac_addr[7];
247 	buffer[5] = tgt_mac_addr[6];
248 	return buffer;
249 #else
250 	/*
251 	 * The host endianness matches the target endianness -
252 	 * we can use the mac addr directly from the message buffer.
253 	 */
254 	return tgt_mac_addr;
255 #endif
256 }
257 
258 /*
259  * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
260  * @soc:	SOC handle
261  * @status:	Completion status
262  * @netbuf:	HTT buffer
263  */
264 static void
265 dp_htt_h2t_send_complete_free_netbuf(
266 	void *soc, A_STATUS status, qdf_nbuf_t netbuf)
267 {
268 	qdf_nbuf_free(netbuf);
269 }
270 
271 #ifdef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST
272 /*
273  * dp_htt_h2t_send_complete() - H2T completion handler
274  * @context:	Opaque context (HTT SOC handle)
275  * @htc_pkt:	HTC packet
276  */
277 static void
278 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
279 {
280 	struct htt_soc *soc =  (struct htt_soc *) context;
281 	struct dp_htt_htc_pkt *htt_pkt;
282 	qdf_nbuf_t netbuf;
283 
284 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
285 
286 	/* process (free or keep) the netbuf that held the message */
287 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
288 	/*
289 	 * adf sendcomplete is required for windows only
290 	 */
291 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
292 	/* free the htt_htc_pkt / HTC_PACKET object */
293 	qdf_nbuf_free(netbuf);
294 	htt_htc_pkt_free(soc, htt_pkt);
295 }
296 
297 #else /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
298 
299 /*
300  *  * dp_htt_h2t_send_complete() - H2T completion handler
301  *   * @context:    Opaque context (HTT SOC handle)
302  *    * @htc_pkt:    HTC packet
303  *     */
304 static void
305 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
306 {
307 	void (*send_complete_part2)(
308 	     void *soc, QDF_STATUS status, qdf_nbuf_t msdu);
309 	struct htt_soc *soc =  (struct htt_soc *) context;
310 	struct dp_htt_htc_pkt *htt_pkt;
311 	qdf_nbuf_t netbuf;
312 
313 	send_complete_part2 = htc_pkt->pPktContext;
314 
315 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
316 
317 	/* process (free or keep) the netbuf that held the message */
318 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
319 	/*
320 	 * adf sendcomplete is required for windows only
321 	*/
322 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
323 	if (send_complete_part2){
324 		send_complete_part2(
325 		    htt_pkt->soc_ctxt, htc_pkt->Status, netbuf);
326 	}
327 	/* free the htt_htc_pkt / HTC_PACKET object */
328 	htt_htc_pkt_free(soc, htt_pkt);
329 }
330 
331 #endif /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
332 
333 /*
334  * dp_htt_h2t_add_tcl_metadata_ver_v1() - Add tcl_metadata verion V1
335  * @htt_soc:	HTT SOC handle
336  * @msg:	Pointer to nbuf
337  *
338  * Return: 0 on success; error code on failure
339  */
340 static int dp_htt_h2t_add_tcl_metadata_ver_v1(struct htt_soc *soc,
341 					      qdf_nbuf_t *msg)
342 {
343 	uint32_t *msg_word;
344 
345 	*msg = qdf_nbuf_alloc(
346 		soc->osdev,
347 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
348 		/* reserve room for the HTC header */
349 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
350 	if (!*msg)
351 		return QDF_STATUS_E_NOMEM;
352 
353 	/*
354 	 * Set the length of the message.
355 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
356 	 * separately during the below call to qdf_nbuf_push_head.
357 	 * The contribution from the HTC header is added separately inside HTC.
358 	 */
359 	if (!qdf_nbuf_put_tail(*msg, HTT_VER_REQ_BYTES)) {
360 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
361 			  "%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
362 			  __func__);
363 		return QDF_STATUS_E_FAILURE;
364 	}
365 
366 	/* fill in the message contents */
367 	msg_word = (u_int32_t *)qdf_nbuf_data(*msg);
368 
369 	/* rewind beyond alignment pad to get to the HTC header reserved area */
370 	qdf_nbuf_push_head(*msg, HTC_HDR_ALIGNMENT_PADDING);
371 
372 	*msg_word = 0;
373 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
374 
375 	return QDF_STATUS_SUCCESS;
376 }
377 
378 #ifdef QCA_DP_TX_FW_METADATA_V2
379 /*
380  * dp_htt_h2t_add_tcl_metadata_ver_v2() - Add tcl_metadata verion V2
381  * @htt_soc:	HTT SOC handle
382  * @msg:	Pointer to nbuf
383  *
384  * Return: 0 on success; error code on failure
385  */
386 static int dp_htt_h2t_add_tcl_metadata_ver_v2(struct htt_soc *soc,
387 					      qdf_nbuf_t *msg)
388 {
389 	uint32_t *msg_word;
390 
391 	*msg = qdf_nbuf_alloc(
392 		soc->osdev,
393 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES + HTT_TCL_METADATA_VER_SZ),
394 		/* reserve room for the HTC header */
395 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
396 	if (!*msg)
397 		return QDF_STATUS_E_NOMEM;
398 
399 	/*
400 	 * Set the length of the message.
401 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
402 	 * separately during the below call to qdf_nbuf_push_head.
403 	 * The contribution from the HTC header is added separately inside HTC.
404 	 */
405 	if (!qdf_nbuf_put_tail(*msg,
406 			       HTT_VER_REQ_BYTES + HTT_TCL_METADATA_VER_SZ)) {
407 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
408 			  "%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
409 			  __func__);
410 		return QDF_STATUS_E_FAILURE;
411 	}
412 
413 	/* fill in the message contents */
414 	msg_word = (u_int32_t *)qdf_nbuf_data(*msg);
415 
416 	/* rewind beyond alignment pad to get to the HTC header reserved area */
417 	qdf_nbuf_push_head(*msg, HTC_HDR_ALIGNMENT_PADDING);
418 
419 	*msg_word = 0;
420 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
421 
422 	/* word 1 */
423 	msg_word++;
424 	*msg_word = 0;
425 	HTT_OPTION_TLV_TAG_SET(*msg_word, HTT_OPTION_TLV_TAG_TCL_METADATA_VER);
426 	HTT_OPTION_TLV_LENGTH_SET(*msg_word, HTT_TCL_METADATA_VER_SZ);
427 	HTT_OPTION_TLV_TCL_METADATA_VER_SET(*msg_word,
428 					    HTT_OPTION_TLV_TCL_METADATA_V2);
429 
430 	return QDF_STATUS_SUCCESS;
431 }
432 
433 /*
434  * dp_htt_h2t_add_tcl_metadata_ver() - Add tcl_metadata verion
435  * @htt_soc:	HTT SOC handle
436  * @msg:	Pointer to nbuf
437  *
438  * Return: 0 on success; error code on failure
439  */
440 static int dp_htt_h2t_add_tcl_metadata_ver(struct htt_soc *soc, qdf_nbuf_t *msg)
441 {
442 	/* Use tcl_metadata_v1 when NSS offload is enabled */
443 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->dp_soc->wlan_cfg_ctx) ||
444 	    soc->dp_soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_FTM_MODE)
445 		return dp_htt_h2t_add_tcl_metadata_ver_v1(soc, msg);
446 	else
447 		return dp_htt_h2t_add_tcl_metadata_ver_v2(soc, msg);
448 }
449 #else
450 static int dp_htt_h2t_add_tcl_metadata_ver(struct htt_soc *soc, qdf_nbuf_t *msg)
451 {
452 	return dp_htt_h2t_add_tcl_metadata_ver_v1(soc, msg);
453 }
454 #endif
455 
456 /*
457  * htt_h2t_ver_req_msg() - Send HTT version request message to target
458  * @htt_soc:	HTT SOC handle
459  *
460  * Return: 0 on success; error code on failure
461  */
462 static int htt_h2t_ver_req_msg(struct htt_soc *soc)
463 {
464 	struct dp_htt_htc_pkt *pkt;
465 	qdf_nbuf_t msg = NULL;
466 	QDF_STATUS status;
467 
468 	status = dp_htt_h2t_add_tcl_metadata_ver(soc, &msg);
469 	if (status != QDF_STATUS_SUCCESS)
470 		return status;
471 
472 	pkt = htt_htc_pkt_alloc(soc);
473 	if (!pkt) {
474 		qdf_nbuf_free(msg);
475 		return QDF_STATUS_E_FAILURE;
476 	}
477 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
478 
479 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
480 		dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg),
481 		qdf_nbuf_len(msg), soc->htc_endpoint,
482 		HTC_TX_PACKET_TAG_RTPM_PUT_RC);
483 
484 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
485 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_VERSION_REQ,
486 				     NULL);
487 
488 	if (status != QDF_STATUS_SUCCESS) {
489 		qdf_nbuf_free(msg);
490 		htt_htc_pkt_free(soc, pkt);
491 	}
492 
493 	return status;
494 }
495 
496 /*
497  * htt_srng_setup() - Send SRNG setup message to target
498  * @htt_soc:	HTT SOC handle
499  * @mac_id:	MAC Id
500  * @hal_srng:	Opaque HAL SRNG pointer
501  * @hal_ring_type:	SRNG ring type
502  *
503  * Return: 0 on success; error code on failure
504  */
505 int htt_srng_setup(struct htt_soc *soc, int mac_id,
506 		   hal_ring_handle_t hal_ring_hdl,
507 		   int hal_ring_type)
508 {
509 	struct dp_htt_htc_pkt *pkt;
510 	qdf_nbuf_t htt_msg;
511 	uint32_t *msg_word;
512 	struct hal_srng_params srng_params;
513 	qdf_dma_addr_t hp_addr, tp_addr;
514 	uint32_t ring_entry_size =
515 		hal_srng_get_entrysize(soc->hal_soc, hal_ring_type);
516 	int htt_ring_type, htt_ring_id;
517 	uint8_t *htt_logger_bufp;
518 	int target_pdev_id;
519 	int lmac_id = dp_get_lmac_id_for_pdev_id(soc->dp_soc, 0, mac_id);
520 	QDF_STATUS status;
521 
522 	/* Sizes should be set in 4-byte words */
523 	ring_entry_size = ring_entry_size >> 2;
524 
525 	htt_msg = qdf_nbuf_alloc(soc->osdev,
526 		HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ),
527 		/* reserve room for the HTC header */
528 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
529 	if (!htt_msg) {
530 		dp_err("htt_msg alloc failed ring type %d", hal_ring_type);
531 		goto fail0;
532 	}
533 
534 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
535 	hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_ring_hdl);
536 	tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_ring_hdl);
537 
538 	switch (hal_ring_type) {
539 	case RXDMA_BUF:
540 #ifdef QCA_HOST2FW_RXBUF_RING
541 		if (srng_params.ring_id ==
542 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
543 		    (lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
544 			htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
545 			htt_ring_type = HTT_SW_TO_SW_RING;
546 #ifdef IPA_OFFLOAD
547 		} else if (srng_params.ring_id ==
548 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2 +
549 		    (lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
550 			htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING;
551 			htt_ring_type = HTT_SW_TO_SW_RING;
552 #ifdef IPA_WDI3_VLAN_SUPPORT
553 		} else if (srng_params.ring_id ==
554 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF3 +
555 		    (lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
556 			htt_ring_id = HTT_HOST3_TO_FW_RXBUF_RING;
557 			htt_ring_type = HTT_SW_TO_SW_RING;
558 #endif
559 #endif
560 #else
561 		if (srng_params.ring_id ==
562 			(HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
563 			(lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
564 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
565 			htt_ring_type = HTT_SW_TO_HW_RING;
566 #endif
567 		} else if (srng_params.ring_id ==
568 #ifdef IPA_OFFLOAD
569 			 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 +
570 #else
571 			 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF +
572 #endif
573 			(lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
574 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
575 			htt_ring_type = HTT_SW_TO_HW_RING;
576 		} else {
577 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
578 				   "%s: Ring %d currently not supported",
579 				   __func__, srng_params.ring_id);
580 			goto fail1;
581 		}
582 
583 		break;
584 	case RXDMA_MONITOR_BUF:
585 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
586 							 RXDMA_MONITOR_BUF);
587 		htt_ring_type = HTT_SW_TO_HW_RING;
588 		break;
589 	case RXDMA_MONITOR_STATUS:
590 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
591 		htt_ring_type = HTT_SW_TO_HW_RING;
592 		break;
593 	case RXDMA_MONITOR_DST:
594 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
595 							 RXDMA_MONITOR_DST);
596 		htt_ring_type = HTT_HW_TO_SW_RING;
597 		break;
598 	case RXDMA_MONITOR_DESC:
599 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
600 		htt_ring_type = HTT_SW_TO_HW_RING;
601 		break;
602 	case RXDMA_DST:
603 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
604 		htt_ring_type = HTT_HW_TO_SW_RING;
605 		break;
606 	case TX_MONITOR_BUF:
607 		htt_ring_id = HTT_TX_MON_HOST2MON_BUF_RING;
608 		htt_ring_type = HTT_SW_TO_HW_RING;
609 		break;
610 	case TX_MONITOR_DST:
611 		htt_ring_id = HTT_TX_MON_MON2HOST_DEST_RING;
612 		htt_ring_type = HTT_HW_TO_SW_RING;
613 		break;
614 
615 	default:
616 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
617 			"%s: Ring currently not supported", __func__);
618 			goto fail1;
619 	}
620 
621 	dp_info("ring_type %d ring_id %d htt_ring_id %d hp_addr 0x%llx tp_addr 0x%llx",
622 		hal_ring_type, srng_params.ring_id, htt_ring_id,
623 		(uint64_t)hp_addr,
624 		(uint64_t)tp_addr);
625 	/*
626 	 * Set the length of the message.
627 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
628 	 * separately during the below call to qdf_nbuf_push_head.
629 	 * The contribution from the HTC header is added separately inside HTC.
630 	 */
631 	if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) {
632 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
633 			"%s: Failed to expand head for SRING_SETUP msg",
634 			__func__);
635 		return QDF_STATUS_E_FAILURE;
636 	}
637 
638 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
639 
640 	/* rewind beyond alignment pad to get to the HTC header reserved area */
641 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
642 
643 	/* word 0 */
644 	*msg_word = 0;
645 	htt_logger_bufp = (uint8_t *)msg_word;
646 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP);
647 	target_pdev_id =
648 	dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, mac_id);
649 
650 	if ((htt_ring_type == HTT_SW_TO_HW_RING) ||
651 			(htt_ring_type == HTT_HW_TO_SW_RING))
652 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, target_pdev_id);
653 	else
654 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id);
655 
656 	dp_info("mac_id %d", mac_id);
657 	HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type);
658 	/* TODO: Discuss with FW on changing this to unique ID and using
659 	 * htt_ring_type to send the type of ring
660 	 */
661 	HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id);
662 
663 	/* word 1 */
664 	msg_word++;
665 	*msg_word = 0;
666 	HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word,
667 		srng_params.ring_base_paddr & 0xffffffff);
668 
669 	/* word 2 */
670 	msg_word++;
671 	*msg_word = 0;
672 	HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word,
673 		(uint64_t)srng_params.ring_base_paddr >> 32);
674 
675 	/* word 3 */
676 	msg_word++;
677 	*msg_word = 0;
678 	HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size);
679 	HTT_SRING_SETUP_RING_SIZE_SET(*msg_word,
680 		(ring_entry_size * srng_params.num_entries));
681 	dp_info("entry_size %d", ring_entry_size);
682 	dp_info("num_entries %d", srng_params.num_entries);
683 	dp_info("ring_size %d", (ring_entry_size * srng_params.num_entries));
684 	if (htt_ring_type == HTT_SW_TO_HW_RING)
685 		HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET(
686 						*msg_word, 1);
687 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word,
688 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
689 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word,
690 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
691 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word,
692 		!!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP));
693 
694 	/* word 4 */
695 	msg_word++;
696 	*msg_word = 0;
697 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
698 		hp_addr & 0xffffffff);
699 
700 	/* word 5 */
701 	msg_word++;
702 	*msg_word = 0;
703 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
704 		(uint64_t)hp_addr >> 32);
705 
706 	/* word 6 */
707 	msg_word++;
708 	*msg_word = 0;
709 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
710 		tp_addr & 0xffffffff);
711 
712 	/* word 7 */
713 	msg_word++;
714 	*msg_word = 0;
715 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
716 		(uint64_t)tp_addr >> 32);
717 
718 	/* word 8 */
719 	msg_word++;
720 	*msg_word = 0;
721 	HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word,
722 		srng_params.msi_addr & 0xffffffff);
723 
724 	/* word 9 */
725 	msg_word++;
726 	*msg_word = 0;
727 	HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word,
728 		(uint64_t)(srng_params.msi_addr) >> 32);
729 
730 	/* word 10 */
731 	msg_word++;
732 	*msg_word = 0;
733 	HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word,
734 		qdf_cpu_to_le32(srng_params.msi_data));
735 
736 	/* word 11 */
737 	msg_word++;
738 	*msg_word = 0;
739 	HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word,
740 		srng_params.intr_batch_cntr_thres_entries *
741 		ring_entry_size);
742 	HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word,
743 		srng_params.intr_timer_thres_us >> 3);
744 
745 	/* word 12 */
746 	msg_word++;
747 	*msg_word = 0;
748 	if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
749 		/* TODO: Setting low threshold to 1/8th of ring size - see
750 		 * if this needs to be configurable
751 		 */
752 		HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word,
753 			srng_params.low_threshold);
754 	}
755 	/* "response_required" field should be set if a HTT response message is
756 	 * required after setting up the ring.
757 	 */
758 	pkt = htt_htc_pkt_alloc(soc);
759 	if (!pkt) {
760 		dp_err("pkt alloc failed, ring_type %d ring_id %d htt_ring_id %d",
761 		       hal_ring_type, srng_params.ring_id, htt_ring_id);
762 		goto fail1;
763 	}
764 
765 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
766 
767 	SET_HTC_PACKET_INFO_TX(
768 		&pkt->htc_pkt,
769 		dp_htt_h2t_send_complete_free_netbuf,
770 		qdf_nbuf_data(htt_msg),
771 		qdf_nbuf_len(htt_msg),
772 		soc->htc_endpoint,
773 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
774 
775 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
776 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_SRING_SETUP,
777 				     htt_logger_bufp);
778 
779 	if (status != QDF_STATUS_SUCCESS) {
780 		qdf_nbuf_free(htt_msg);
781 		htt_htc_pkt_free(soc, pkt);
782 	}
783 
784 	return status;
785 
786 fail1:
787 	qdf_nbuf_free(htt_msg);
788 fail0:
789 	return QDF_STATUS_E_FAILURE;
790 }
791 
792 qdf_export_symbol(htt_srng_setup);
793 
794 #ifdef QCA_SUPPORT_FULL_MON
795 /**
796  * htt_h2t_full_mon_cfg() - Send full monitor configuarion msg to FW
797  *
798  * @htt_soc: HTT Soc handle
799  * @pdev_id: Radio id
800  * @dp_full_mon_config: enabled/disable configuration
801  *
802  * Return: Success when HTT message is sent, error on failure
803  */
804 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc,
805 			 uint8_t pdev_id,
806 			 enum dp_full_mon_config config)
807 {
808 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
809 	struct dp_htt_htc_pkt *pkt;
810 	qdf_nbuf_t htt_msg;
811 	uint32_t *msg_word;
812 	uint8_t *htt_logger_bufp;
813 
814 	htt_msg = qdf_nbuf_alloc(soc->osdev,
815 				 HTT_MSG_BUF_SIZE(
816 				 HTT_RX_FULL_MONITOR_MODE_SETUP_SZ),
817 				 /* reserve room for the HTC header */
818 				 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
819 				 4,
820 				 TRUE);
821 	if (!htt_msg)
822 		return QDF_STATUS_E_FAILURE;
823 
824 	/*
825 	 * Set the length of the message.
826 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
827 	 * separately during the below call to qdf_nbuf_push_head.
828 	 * The contribution from the HTC header is added separately inside HTC.
829 	 */
830 	if (!qdf_nbuf_put_tail(htt_msg, HTT_RX_FULL_MONITOR_MODE_SETUP_SZ)) {
831 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
832 			  "%s: Failed to expand head for RX Ring Cfg msg",
833 			  __func__);
834 		goto fail1;
835 	}
836 
837 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
838 
839 	/* rewind beyond alignment pad to get to the HTC header reserved area */
840 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
841 
842 	/* word 0 */
843 	*msg_word = 0;
844 	htt_logger_bufp = (uint8_t *)msg_word;
845 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE);
846 	HTT_RX_FULL_MONITOR_MODE_OPERATION_PDEV_ID_SET(
847 			*msg_word, DP_SW2HW_MACID(pdev_id));
848 
849 	msg_word++;
850 	*msg_word = 0;
851 	/* word 1 */
852 	if (config == DP_FULL_MON_ENABLE) {
853 		HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true);
854 		HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, true);
855 		HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, true);
856 		HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2);
857 	} else if (config == DP_FULL_MON_DISABLE) {
858 		/* As per MAC team's suggestion, While disbaling full monitor
859 		 * mode, Set 'en' bit to true in full monitor mode register.
860 		 */
861 		HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true);
862 		HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, false);
863 		HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, false);
864 		HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2);
865 	}
866 
867 	pkt = htt_htc_pkt_alloc(soc);
868 	if (!pkt) {
869 		qdf_err("HTC packet allocation failed");
870 		goto fail1;
871 	}
872 
873 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
874 
875 	SET_HTC_PACKET_INFO_TX(
876 		&pkt->htc_pkt,
877 		dp_htt_h2t_send_complete_free_netbuf,
878 		qdf_nbuf_data(htt_msg),
879 		qdf_nbuf_len(htt_msg),
880 		soc->htc_endpoint,
881 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
882 
883 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
884 	qdf_debug("config: %d", config);
885 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE,
886 			    htt_logger_bufp);
887 	return QDF_STATUS_SUCCESS;
888 fail1:
889 	qdf_nbuf_free(htt_msg);
890 	return QDF_STATUS_E_FAILURE;
891 }
892 
893 qdf_export_symbol(htt_h2t_full_mon_cfg);
894 #else
895 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc,
896 			 uint8_t pdev_id,
897 			 enum dp_full_mon_config config)
898 {
899 	return 0;
900 }
901 
902 qdf_export_symbol(htt_h2t_full_mon_cfg);
903 #endif
904 
905 #ifdef QCA_UNDECODED_METADATA_SUPPORT
906 static inline void
907 dp_mon_rx_enable_phy_errors(uint32_t *msg_word,
908 			    struct htt_rx_ring_tlv_filter *htt_tlv_filter)
909 {
910 	if (htt_tlv_filter->phy_err_filter_valid) {
911 		HTT_RX_RING_SELECTION_CFG_FP_PHY_ERR_SET
912 			(*msg_word, htt_tlv_filter->fp_phy_err);
913 		HTT_RX_RING_SELECTION_CFG_FP_PHY_ERR_BUF_SRC_SET
914 			(*msg_word, htt_tlv_filter->fp_phy_err_buf_src);
915 		HTT_RX_RING_SELECTION_CFG_FP_PHY_ERR_BUF_DEST_SET
916 			(*msg_word, htt_tlv_filter->fp_phy_err_buf_dest);
917 
918 		/* word 12*/
919 		msg_word++;
920 		*msg_word = 0;
921 		HTT_RX_RING_SELECTION_CFG_PHY_ERR_MASK_SET
922 			(*msg_word, htt_tlv_filter->phy_err_mask);
923 
924 		/* word 13*/
925 		msg_word++;
926 		*msg_word = 0;
927 		HTT_RX_RING_SELECTION_CFG_PHY_ERR_MASK_CONT_SET
928 			(*msg_word, htt_tlv_filter->phy_err_mask_cont);
929 	}
930 }
931 #else
932 static inline void
933 dp_mon_rx_enable_phy_errors(uint32_t *msg_word,
934 			    struct htt_rx_ring_tlv_filter *htt_tlv_filter)
935 {
936 }
937 #endif
938 
939 /*
940  * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter
941  * config message to target
942  * @htt_soc:	HTT SOC handle
943  * @pdev_id:	WIN- PDEV Id, MCL- mac id
944  * @hal_srng:	Opaque HAL SRNG pointer
945  * @hal_ring_type:	SRNG ring type
946  * @ring_buf_size:	SRNG buffer size
947  * @htt_tlv_filter:	Rx SRNG TLV and filter setting
948  * Return: 0 on success; error code on failure
949  */
950 int htt_h2t_rx_ring_cfg(struct htt_soc *htt_soc, int pdev_id,
951 			hal_ring_handle_t hal_ring_hdl,
952 			int hal_ring_type, int ring_buf_size,
953 			struct htt_rx_ring_tlv_filter *htt_tlv_filter)
954 {
955 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
956 	struct dp_htt_htc_pkt *pkt;
957 	qdf_nbuf_t htt_msg;
958 	uint32_t *msg_word;
959 	uint32_t *msg_word_data;
960 	struct hal_srng_params srng_params;
961 	uint32_t htt_ring_type, htt_ring_id;
962 	uint32_t tlv_filter;
963 	uint8_t *htt_logger_bufp;
964 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->dp_soc->wlan_cfg_ctx;
965 	uint32_t mon_drop_th = wlan_cfg_get_mon_drop_thresh(wlan_cfg_ctx);
966 	int target_pdev_id;
967 	QDF_STATUS status;
968 
969 	htt_msg = qdf_nbuf_alloc(soc->osdev,
970 		HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ),
971 	/* reserve room for the HTC header */
972 	HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
973 	if (!htt_msg) {
974 		dp_err("htt_msg alloc failed ring type %d", hal_ring_type);
975 		goto fail0;
976 	}
977 
978 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
979 
980 	switch (hal_ring_type) {
981 	case RXDMA_BUF:
982 		htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
983 		htt_ring_type = HTT_SW_TO_HW_RING;
984 		break;
985 	case RXDMA_MONITOR_BUF:
986 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
987 							 RXDMA_MONITOR_BUF);
988 		htt_ring_type = HTT_SW_TO_HW_RING;
989 		break;
990 	case RXDMA_MONITOR_STATUS:
991 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
992 		htt_ring_type = HTT_SW_TO_HW_RING;
993 		break;
994 	case RXDMA_MONITOR_DST:
995 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
996 							 RXDMA_MONITOR_DST);
997 		htt_ring_type = HTT_HW_TO_SW_RING;
998 		break;
999 	case RXDMA_MONITOR_DESC:
1000 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
1001 		htt_ring_type = HTT_SW_TO_HW_RING;
1002 		break;
1003 	case RXDMA_DST:
1004 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
1005 		htt_ring_type = HTT_HW_TO_SW_RING;
1006 		break;
1007 
1008 	default:
1009 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1010 			"%s: Ring currently not supported", __func__);
1011 		goto fail1;
1012 	}
1013 
1014 	dp_info("ring_type %d ring_id %d htt_ring_id %d",
1015 		hal_ring_type, srng_params.ring_id, htt_ring_id);
1016 
1017 	/*
1018 	 * Set the length of the message.
1019 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
1020 	 * separately during the below call to qdf_nbuf_push_head.
1021 	 * The contribution from the HTC header is added separately inside HTC.
1022 	 */
1023 	if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) {
1024 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1025 			"%s: Failed to expand head for RX Ring Cfg msg",
1026 			__func__);
1027 		goto fail1; /* failure */
1028 	}
1029 
1030 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
1031 
1032 	/* rewind beyond alignment pad to get to the HTC header reserved area */
1033 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
1034 
1035 	/* word 0 */
1036 	htt_logger_bufp = (uint8_t *)msg_word;
1037 	*msg_word = 0;
1038 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
1039 
1040 	/* applicable only for post Li */
1041 	dp_rx_mon_enable(soc->dp_soc, msg_word, htt_tlv_filter);
1042 
1043 	/*
1044 	 * pdev_id is indexed from 0 whereas mac_id is indexed from 1
1045 	 * SW_TO_SW and SW_TO_HW rings are unaffected by this
1046 	 */
1047 	target_pdev_id =
1048 	dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, pdev_id);
1049 
1050 	if (htt_ring_type == HTT_SW_TO_SW_RING ||
1051 			htt_ring_type == HTT_SW_TO_HW_RING ||
1052 			htt_ring_type == HTT_HW_TO_SW_RING)
1053 		HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word,
1054 						      target_pdev_id);
1055 
1056 	/* TODO: Discuss with FW on changing this to unique ID and using
1057 	 * htt_ring_type to send the type of ring
1058 	 */
1059 	HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id);
1060 
1061 	HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word,
1062 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
1063 
1064 	HTT_RX_RING_SELECTION_CFG_RX_OFFSETS_VALID_SET(*msg_word,
1065 						htt_tlv_filter->offset_valid);
1066 
1067 	if (mon_drop_th > 0)
1068 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1069 								   1);
1070 	else
1071 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1072 								   0);
1073 
1074 	/* word 1 */
1075 	msg_word++;
1076 	*msg_word = 0;
1077 	HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word,
1078 		ring_buf_size);
1079 
1080 	dp_mon_rx_packet_length_set(soc->dp_soc, msg_word, htt_tlv_filter);
1081 	dp_mon_rx_hdr_length_set(soc->dp_soc, msg_word, htt_tlv_filter);
1082 
1083 	/* word 2 */
1084 	msg_word++;
1085 	*msg_word = 0;
1086 
1087 	if (htt_tlv_filter->enable_fp) {
1088 		/* TYPE: MGMT */
1089 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1090 			FP, MGMT, 0000,
1091 			(htt_tlv_filter->fp_mgmt_filter &
1092 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1093 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1094 			FP, MGMT, 0001,
1095 			(htt_tlv_filter->fp_mgmt_filter &
1096 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1097 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1098 			FP, MGMT, 0010,
1099 			(htt_tlv_filter->fp_mgmt_filter &
1100 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1101 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1102 			FP, MGMT, 0011,
1103 			(htt_tlv_filter->fp_mgmt_filter &
1104 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1105 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1106 			FP, MGMT, 0100,
1107 			(htt_tlv_filter->fp_mgmt_filter &
1108 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1109 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1110 			FP, MGMT, 0101,
1111 			(htt_tlv_filter->fp_mgmt_filter &
1112 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1113 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1114 			FP, MGMT, 0110,
1115 			(htt_tlv_filter->fp_mgmt_filter &
1116 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1117 		/* reserved */
1118 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
1119 			MGMT, 0111,
1120 			(htt_tlv_filter->fp_mgmt_filter &
1121 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1122 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1123 			FP, MGMT, 1000,
1124 			(htt_tlv_filter->fp_mgmt_filter &
1125 			FILTER_MGMT_BEACON) ? 1 : 0);
1126 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1127 			FP, MGMT, 1001,
1128 			(htt_tlv_filter->fp_mgmt_filter &
1129 			FILTER_MGMT_ATIM) ? 1 : 0);
1130 	}
1131 
1132 	if (htt_tlv_filter->enable_md) {
1133 			/* TYPE: MGMT */
1134 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1135 			MD, MGMT, 0000,
1136 			(htt_tlv_filter->md_mgmt_filter &
1137 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1138 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1139 			MD, MGMT, 0001,
1140 			(htt_tlv_filter->md_mgmt_filter &
1141 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1142 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1143 			MD, MGMT, 0010,
1144 			(htt_tlv_filter->md_mgmt_filter &
1145 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1146 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1147 			MD, MGMT, 0011,
1148 			(htt_tlv_filter->md_mgmt_filter &
1149 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1150 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1151 			MD, MGMT, 0100,
1152 			(htt_tlv_filter->md_mgmt_filter &
1153 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1154 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1155 			MD, MGMT, 0101,
1156 			(htt_tlv_filter->md_mgmt_filter &
1157 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1158 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1159 			MD, MGMT, 0110,
1160 			(htt_tlv_filter->md_mgmt_filter &
1161 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1162 		/* reserved */
1163 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
1164 			MGMT, 0111,
1165 			(htt_tlv_filter->md_mgmt_filter &
1166 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1167 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1168 			MD, MGMT, 1000,
1169 			(htt_tlv_filter->md_mgmt_filter &
1170 			FILTER_MGMT_BEACON) ? 1 : 0);
1171 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1172 			MD, MGMT, 1001,
1173 			(htt_tlv_filter->md_mgmt_filter &
1174 			FILTER_MGMT_ATIM) ? 1 : 0);
1175 	}
1176 
1177 	if (htt_tlv_filter->enable_mo) {
1178 		/* TYPE: MGMT */
1179 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1180 			MO, MGMT, 0000,
1181 			(htt_tlv_filter->mo_mgmt_filter &
1182 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1183 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1184 			MO, MGMT, 0001,
1185 			(htt_tlv_filter->mo_mgmt_filter &
1186 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1187 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1188 			MO, MGMT, 0010,
1189 			(htt_tlv_filter->mo_mgmt_filter &
1190 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1191 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1192 			MO, MGMT, 0011,
1193 			(htt_tlv_filter->mo_mgmt_filter &
1194 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1195 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1196 			MO, MGMT, 0100,
1197 			(htt_tlv_filter->mo_mgmt_filter &
1198 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1199 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1200 			MO, MGMT, 0101,
1201 			(htt_tlv_filter->mo_mgmt_filter &
1202 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1203 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1204 			MO, MGMT, 0110,
1205 			(htt_tlv_filter->mo_mgmt_filter &
1206 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1207 		/* reserved */
1208 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
1209 			MGMT, 0111,
1210 			(htt_tlv_filter->mo_mgmt_filter &
1211 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1212 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1213 			MO, MGMT, 1000,
1214 			(htt_tlv_filter->mo_mgmt_filter &
1215 			FILTER_MGMT_BEACON) ? 1 : 0);
1216 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1217 			MO, MGMT, 1001,
1218 			(htt_tlv_filter->mo_mgmt_filter &
1219 			FILTER_MGMT_ATIM) ? 1 : 0);
1220 	}
1221 
1222 	/* word 3 */
1223 	msg_word++;
1224 	*msg_word = 0;
1225 
1226 	if (htt_tlv_filter->enable_fp) {
1227 		/* TYPE: MGMT */
1228 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1229 			FP, MGMT, 1010,
1230 			(htt_tlv_filter->fp_mgmt_filter &
1231 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1232 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1233 			FP, MGMT, 1011,
1234 			(htt_tlv_filter->fp_mgmt_filter &
1235 			FILTER_MGMT_AUTH) ? 1 : 0);
1236 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1237 			FP, MGMT, 1100,
1238 			(htt_tlv_filter->fp_mgmt_filter &
1239 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1240 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1241 			FP, MGMT, 1101,
1242 			(htt_tlv_filter->fp_mgmt_filter &
1243 			FILTER_MGMT_ACTION) ? 1 : 0);
1244 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1245 			FP, MGMT, 1110,
1246 			(htt_tlv_filter->fp_mgmt_filter &
1247 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1248 		/* reserved*/
1249 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
1250 			MGMT, 1111,
1251 			(htt_tlv_filter->fp_mgmt_filter &
1252 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1253 	}
1254 
1255 	if (htt_tlv_filter->enable_md) {
1256 			/* TYPE: MGMT */
1257 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1258 			MD, MGMT, 1010,
1259 			(htt_tlv_filter->md_mgmt_filter &
1260 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1261 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1262 			MD, MGMT, 1011,
1263 			(htt_tlv_filter->md_mgmt_filter &
1264 			FILTER_MGMT_AUTH) ? 1 : 0);
1265 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1266 			MD, MGMT, 1100,
1267 			(htt_tlv_filter->md_mgmt_filter &
1268 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1269 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1270 			MD, MGMT, 1101,
1271 			(htt_tlv_filter->md_mgmt_filter &
1272 			FILTER_MGMT_ACTION) ? 1 : 0);
1273 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1274 			MD, MGMT, 1110,
1275 			(htt_tlv_filter->md_mgmt_filter &
1276 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1277 	}
1278 
1279 	if (htt_tlv_filter->enable_mo) {
1280 		/* TYPE: MGMT */
1281 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1282 			MO, MGMT, 1010,
1283 			(htt_tlv_filter->mo_mgmt_filter &
1284 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1285 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1286 			MO, MGMT, 1011,
1287 			(htt_tlv_filter->mo_mgmt_filter &
1288 			FILTER_MGMT_AUTH) ? 1 : 0);
1289 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1290 			MO, MGMT, 1100,
1291 			(htt_tlv_filter->mo_mgmt_filter &
1292 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1293 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1294 			MO, MGMT, 1101,
1295 			(htt_tlv_filter->mo_mgmt_filter &
1296 			FILTER_MGMT_ACTION) ? 1 : 0);
1297 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1298 			MO, MGMT, 1110,
1299 			(htt_tlv_filter->mo_mgmt_filter &
1300 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1301 		/* reserved*/
1302 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
1303 			MGMT, 1111,
1304 			(htt_tlv_filter->mo_mgmt_filter &
1305 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1306 	}
1307 
1308 	/* word 4 */
1309 	msg_word++;
1310 	*msg_word = 0;
1311 
1312 	if (htt_tlv_filter->enable_fp) {
1313 		/* TYPE: CTRL */
1314 		/* reserved */
1315 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1316 			CTRL, 0000,
1317 			(htt_tlv_filter->fp_ctrl_filter &
1318 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1319 		/* reserved */
1320 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1321 			CTRL, 0001,
1322 			(htt_tlv_filter->fp_ctrl_filter &
1323 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1324 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1325 			CTRL, 0010,
1326 			(htt_tlv_filter->fp_ctrl_filter &
1327 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1328 		/* reserved */
1329 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1330 			CTRL, 0011,
1331 			(htt_tlv_filter->fp_ctrl_filter &
1332 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1333 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1334 			CTRL, 0100,
1335 			(htt_tlv_filter->fp_ctrl_filter &
1336 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1337 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1338 			CTRL, 0101,
1339 			(htt_tlv_filter->fp_ctrl_filter &
1340 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1341 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1342 			CTRL, 0110,
1343 			(htt_tlv_filter->fp_ctrl_filter &
1344 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1345 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1346 			CTRL, 0111,
1347 			(htt_tlv_filter->fp_ctrl_filter &
1348 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1349 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1350 			CTRL, 1000,
1351 			(htt_tlv_filter->fp_ctrl_filter &
1352 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1353 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1354 			CTRL, 1001,
1355 			(htt_tlv_filter->fp_ctrl_filter &
1356 			FILTER_CTRL_BA) ? 1 : 0);
1357 	}
1358 
1359 	if (htt_tlv_filter->enable_md) {
1360 		/* TYPE: CTRL */
1361 		/* reserved */
1362 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1363 			CTRL, 0000,
1364 			(htt_tlv_filter->md_ctrl_filter &
1365 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1366 		/* reserved */
1367 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1368 			CTRL, 0001,
1369 			(htt_tlv_filter->md_ctrl_filter &
1370 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1371 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1372 			CTRL, 0010,
1373 			(htt_tlv_filter->md_ctrl_filter &
1374 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1375 		/* reserved */
1376 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1377 			CTRL, 0011,
1378 			(htt_tlv_filter->md_ctrl_filter &
1379 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1380 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1381 			CTRL, 0100,
1382 			(htt_tlv_filter->md_ctrl_filter &
1383 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1384 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1385 			CTRL, 0101,
1386 			(htt_tlv_filter->md_ctrl_filter &
1387 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1388 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1389 			CTRL, 0110,
1390 			(htt_tlv_filter->md_ctrl_filter &
1391 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1392 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1393 			CTRL, 0111,
1394 			(htt_tlv_filter->md_ctrl_filter &
1395 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1396 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1397 			CTRL, 1000,
1398 			(htt_tlv_filter->md_ctrl_filter &
1399 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1400 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1401 			CTRL, 1001,
1402 			(htt_tlv_filter->md_ctrl_filter &
1403 			FILTER_CTRL_BA) ? 1 : 0);
1404 	}
1405 
1406 	if (htt_tlv_filter->enable_mo) {
1407 		/* TYPE: CTRL */
1408 		/* reserved */
1409 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1410 			CTRL, 0000,
1411 			(htt_tlv_filter->mo_ctrl_filter &
1412 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1413 		/* reserved */
1414 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1415 			CTRL, 0001,
1416 			(htt_tlv_filter->mo_ctrl_filter &
1417 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1418 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1419 			CTRL, 0010,
1420 			(htt_tlv_filter->mo_ctrl_filter &
1421 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1422 		/* reserved */
1423 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1424 			CTRL, 0011,
1425 			(htt_tlv_filter->mo_ctrl_filter &
1426 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1427 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1428 			CTRL, 0100,
1429 			(htt_tlv_filter->mo_ctrl_filter &
1430 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1431 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1432 			CTRL, 0101,
1433 			(htt_tlv_filter->mo_ctrl_filter &
1434 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1435 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1436 			CTRL, 0110,
1437 			(htt_tlv_filter->mo_ctrl_filter &
1438 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1439 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1440 			CTRL, 0111,
1441 			(htt_tlv_filter->mo_ctrl_filter &
1442 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1443 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1444 			CTRL, 1000,
1445 			(htt_tlv_filter->mo_ctrl_filter &
1446 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1447 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1448 			CTRL, 1001,
1449 			(htt_tlv_filter->mo_ctrl_filter &
1450 			FILTER_CTRL_BA) ? 1 : 0);
1451 	}
1452 
1453 	/* word 5 */
1454 	msg_word++;
1455 	*msg_word = 0;
1456 	if (htt_tlv_filter->enable_fp) {
1457 		/* TYPE: CTRL */
1458 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1459 			CTRL, 1010,
1460 			(htt_tlv_filter->fp_ctrl_filter &
1461 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1462 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1463 			CTRL, 1011,
1464 			(htt_tlv_filter->fp_ctrl_filter &
1465 			FILTER_CTRL_RTS) ? 1 : 0);
1466 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1467 			CTRL, 1100,
1468 			(htt_tlv_filter->fp_ctrl_filter &
1469 			FILTER_CTRL_CTS) ? 1 : 0);
1470 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1471 			CTRL, 1101,
1472 			(htt_tlv_filter->fp_ctrl_filter &
1473 			FILTER_CTRL_ACK) ? 1 : 0);
1474 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1475 			CTRL, 1110,
1476 			(htt_tlv_filter->fp_ctrl_filter &
1477 			FILTER_CTRL_CFEND) ? 1 : 0);
1478 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1479 			CTRL, 1111,
1480 			(htt_tlv_filter->fp_ctrl_filter &
1481 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1482 		/* TYPE: DATA */
1483 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1484 			DATA, MCAST,
1485 			(htt_tlv_filter->fp_data_filter &
1486 			FILTER_DATA_MCAST) ? 1 : 0);
1487 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1488 			DATA, UCAST,
1489 			(htt_tlv_filter->fp_data_filter &
1490 			FILTER_DATA_UCAST) ? 1 : 0);
1491 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1492 			DATA, NULL,
1493 			(htt_tlv_filter->fp_data_filter &
1494 			FILTER_DATA_NULL) ? 1 : 0);
1495 	}
1496 
1497 	if (htt_tlv_filter->enable_md) {
1498 		/* TYPE: CTRL */
1499 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1500 			CTRL, 1010,
1501 			(htt_tlv_filter->md_ctrl_filter &
1502 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1503 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1504 			CTRL, 1011,
1505 			(htt_tlv_filter->md_ctrl_filter &
1506 			FILTER_CTRL_RTS) ? 1 : 0);
1507 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1508 			CTRL, 1100,
1509 			(htt_tlv_filter->md_ctrl_filter &
1510 			FILTER_CTRL_CTS) ? 1 : 0);
1511 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1512 			CTRL, 1101,
1513 			(htt_tlv_filter->md_ctrl_filter &
1514 			FILTER_CTRL_ACK) ? 1 : 0);
1515 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1516 			CTRL, 1110,
1517 			(htt_tlv_filter->md_ctrl_filter &
1518 			FILTER_CTRL_CFEND) ? 1 : 0);
1519 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1520 			CTRL, 1111,
1521 			(htt_tlv_filter->md_ctrl_filter &
1522 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1523 		/* TYPE: DATA */
1524 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1525 			DATA, MCAST,
1526 			(htt_tlv_filter->md_data_filter &
1527 			FILTER_DATA_MCAST) ? 1 : 0);
1528 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1529 			DATA, UCAST,
1530 			(htt_tlv_filter->md_data_filter &
1531 			FILTER_DATA_UCAST) ? 1 : 0);
1532 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1533 			DATA, NULL,
1534 			(htt_tlv_filter->md_data_filter &
1535 			FILTER_DATA_NULL) ? 1 : 0);
1536 	}
1537 
1538 	if (htt_tlv_filter->enable_mo) {
1539 		/* TYPE: CTRL */
1540 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1541 			CTRL, 1010,
1542 			(htt_tlv_filter->mo_ctrl_filter &
1543 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1544 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1545 			CTRL, 1011,
1546 			(htt_tlv_filter->mo_ctrl_filter &
1547 			FILTER_CTRL_RTS) ? 1 : 0);
1548 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1549 			CTRL, 1100,
1550 			(htt_tlv_filter->mo_ctrl_filter &
1551 			FILTER_CTRL_CTS) ? 1 : 0);
1552 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1553 			CTRL, 1101,
1554 			(htt_tlv_filter->mo_ctrl_filter &
1555 			FILTER_CTRL_ACK) ? 1 : 0);
1556 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1557 			CTRL, 1110,
1558 			(htt_tlv_filter->mo_ctrl_filter &
1559 			FILTER_CTRL_CFEND) ? 1 : 0);
1560 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1561 			CTRL, 1111,
1562 			(htt_tlv_filter->mo_ctrl_filter &
1563 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1564 		/* TYPE: DATA */
1565 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1566 			DATA, MCAST,
1567 			(htt_tlv_filter->mo_data_filter &
1568 			FILTER_DATA_MCAST) ? 1 : 0);
1569 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1570 			DATA, UCAST,
1571 			(htt_tlv_filter->mo_data_filter &
1572 			FILTER_DATA_UCAST) ? 1 : 0);
1573 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1574 			DATA, NULL,
1575 			(htt_tlv_filter->mo_data_filter &
1576 			FILTER_DATA_NULL) ? 1 : 0);
1577 	}
1578 
1579 	/* word 6 */
1580 	msg_word++;
1581 	*msg_word = 0;
1582 	tlv_filter = 0;
1583 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START,
1584 		htt_tlv_filter->mpdu_start);
1585 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START,
1586 		htt_tlv_filter->msdu_start);
1587 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET,
1588 		htt_tlv_filter->packet);
1589 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END,
1590 		htt_tlv_filter->msdu_end);
1591 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END,
1592 		htt_tlv_filter->mpdu_end);
1593 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER,
1594 		htt_tlv_filter->packet_header);
1595 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION,
1596 		htt_tlv_filter->attention);
1597 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START,
1598 		htt_tlv_filter->ppdu_start);
1599 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END,
1600 		htt_tlv_filter->ppdu_end);
1601 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS,
1602 		htt_tlv_filter->ppdu_end_user_stats);
1603 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter,
1604 		PPDU_END_USER_STATS_EXT,
1605 		htt_tlv_filter->ppdu_end_user_stats_ext);
1606 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE,
1607 		htt_tlv_filter->ppdu_end_status_done);
1608 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START_USER_INFO,
1609 		htt_tlv_filter->ppdu_start_user_info);
1610 	/* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/
1611 	 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED,
1612 		 htt_tlv_filter->header_per_msdu);
1613 
1614 	HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter);
1615 
1616 	msg_word_data = (uint32_t *)qdf_nbuf_data(htt_msg);
1617 	dp_info("config_data: [0x%x][0x%x][0x%x][0x%x][0x%x][0x%x][0x%x]",
1618 		msg_word_data[0], msg_word_data[1], msg_word_data[2],
1619 		msg_word_data[3], msg_word_data[4], msg_word_data[5],
1620 		msg_word_data[6]);
1621 
1622 	/* word 7 */
1623 	msg_word++;
1624 	*msg_word = 0;
1625 	if (htt_tlv_filter->offset_valid) {
1626 		HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET_SET(*msg_word,
1627 					htt_tlv_filter->rx_packet_offset);
1628 		HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET_SET(*msg_word,
1629 					htt_tlv_filter->rx_header_offset);
1630 
1631 		/* word 8 */
1632 		msg_word++;
1633 		*msg_word = 0;
1634 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET_SET(*msg_word,
1635 					htt_tlv_filter->rx_mpdu_end_offset);
1636 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET_SET(*msg_word,
1637 					htt_tlv_filter->rx_mpdu_start_offset);
1638 
1639 		/* word 9 */
1640 		msg_word++;
1641 		*msg_word = 0;
1642 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET_SET(*msg_word,
1643 					htt_tlv_filter->rx_msdu_end_offset);
1644 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET_SET(*msg_word,
1645 					htt_tlv_filter->rx_msdu_start_offset);
1646 
1647 		/* word 10 */
1648 		msg_word++;
1649 		*msg_word = 0;
1650 		HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET_SET(*msg_word,
1651 					htt_tlv_filter->rx_attn_offset);
1652 
1653 		/* word 11 */
1654 		msg_word++;
1655 		*msg_word = 0;
1656 	} else {
1657 		/* word 11 */
1658 		msg_word += 4;
1659 		*msg_word = 0;
1660 	}
1661 
1662 	if (mon_drop_th > 0)
1663 		HTT_RX_RING_SELECTION_CFG_RX_DROP_THRESHOLD_SET(*msg_word,
1664 								mon_drop_th);
1665 	dp_mon_rx_enable_mpdu_logging(soc->dp_soc, msg_word, htt_tlv_filter);
1666 
1667 	dp_mon_rx_enable_phy_errors(msg_word, htt_tlv_filter);
1668 
1669 	/* word 14*/
1670 	msg_word += 3;
1671 	*msg_word = 0;
1672 
1673 	dp_mon_rx_wmask_subscribe(soc->dp_soc, msg_word, htt_tlv_filter);
1674 
1675 #ifdef FW_SUPPORT_NOT_YET
1676 	/* word 17*/
1677 	msg_word += 3;
1678 	*msg_word = 0;
1679 
1680 	dp_mon_rx_enable_fpmo(soc->dp_soc, msg_word, htt_tlv_filter);
1681 #endif/* FW_SUPPORT_NOT_YET */
1682 
1683 	/* "response_required" field should be set if a HTT response message is
1684 	 * required after setting up the ring.
1685 	 */
1686 	pkt = htt_htc_pkt_alloc(soc);
1687 	if (!pkt) {
1688 		dp_err("pkt alloc failed, ring_type %d ring_id %d htt_ring_id %d",
1689 		       hal_ring_type, srng_params.ring_id, htt_ring_id);
1690 		goto fail1;
1691 	}
1692 
1693 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1694 
1695 	SET_HTC_PACKET_INFO_TX(
1696 		&pkt->htc_pkt,
1697 		dp_htt_h2t_send_complete_free_netbuf,
1698 		qdf_nbuf_data(htt_msg),
1699 		qdf_nbuf_len(htt_msg),
1700 		soc->htc_endpoint,
1701 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
1702 
1703 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1704 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
1705 				     HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG,
1706 				     htt_logger_bufp);
1707 
1708 	if (status != QDF_STATUS_SUCCESS) {
1709 		qdf_nbuf_free(htt_msg);
1710 		htt_htc_pkt_free(soc, pkt);
1711 	}
1712 
1713 	return status;
1714 
1715 fail1:
1716 	qdf_nbuf_free(htt_msg);
1717 fail0:
1718 	return QDF_STATUS_E_FAILURE;
1719 }
1720 
1721 qdf_export_symbol(htt_h2t_rx_ring_cfg);
1722 
1723 #if defined(HTT_STATS_ENABLE)
1724 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1725 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1726 
1727 {
1728 	uint32_t pdev_id;
1729 	uint32_t *msg_word = NULL;
1730 	uint32_t msg_remain_len = 0;
1731 
1732 	msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1733 
1734 	/*COOKIE MSB*/
1735 	pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1736 
1737 	/* stats message length + 16 size of HTT header*/
1738 	msg_remain_len = qdf_min(htt_stats->msg_len + 16,
1739 				(uint32_t)DP_EXT_MSG_LENGTH);
1740 
1741 	dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc,
1742 			msg_word,  msg_remain_len,
1743 			WDI_NO_VAL, pdev_id);
1744 
1745 	if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1746 		htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1747 	}
1748 	/* Need to be freed here as WDI handler will
1749 	 * make a copy of pkt to send data to application
1750 	 */
1751 	qdf_nbuf_free(htt_msg);
1752 	return QDF_STATUS_SUCCESS;
1753 }
1754 #else
1755 static inline QDF_STATUS
1756 dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1757 		      struct dp_soc *soc, qdf_nbuf_t htt_msg)
1758 {
1759 	return QDF_STATUS_E_NOSUPPORT;
1760 }
1761 #endif
1762 
1763 #ifdef HTT_STATS_DEBUGFS_SUPPORT
1764 /* dp_send_htt_stats_dbgfs_msg() - Function to send htt data to upper layer.
1765  * @pdev: dp pdev handle
1766  * @msg_word: HTT msg
1767  * @msg_len: Length of HTT msg sent
1768  *
1769  * Return: none
1770  */
1771 static inline void
1772 dp_htt_stats_dbgfs_send_msg(struct dp_pdev *pdev, uint32_t *msg_word,
1773 			    uint32_t msg_len)
1774 {
1775 	struct htt_dbgfs_cfg dbgfs_cfg;
1776 	int done = 0;
1777 
1778 	/* send 5th word of HTT msg to upper layer */
1779 	dbgfs_cfg.msg_word = (msg_word + 4);
1780 	dbgfs_cfg.m = pdev->dbgfs_cfg->m;
1781 
1782 	/* stats message length + 16 size of HTT header*/
1783 	msg_len = qdf_min(msg_len + HTT_HEADER_LEN, (uint32_t)DP_EXT_MSG_LENGTH);
1784 
1785 	if (pdev->dbgfs_cfg->htt_stats_dbgfs_msg_process)
1786 		pdev->dbgfs_cfg->htt_stats_dbgfs_msg_process(&dbgfs_cfg,
1787 							     (msg_len - HTT_HEADER_LEN));
1788 
1789 	/* Get TLV Done bit from 4th msg word */
1790 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*(msg_word + 3));
1791 	if (done) {
1792 		if (qdf_event_set(&pdev->dbgfs_cfg->htt_stats_dbgfs_event))
1793 			dp_htt_err("%pK: Failed to set event for debugfs htt stats"
1794 				   , pdev->soc);
1795 	}
1796 }
1797 #else
1798 static inline void
1799 dp_htt_stats_dbgfs_send_msg(struct dp_pdev *pdev, uint32_t *msg_word,
1800 			    uint32_t msg_len)
1801 {
1802 }
1803 #endif /* HTT_STATS_DEBUGFS_SUPPORT */
1804 
1805 #ifdef WLAN_SYSFS_DP_STATS
1806 /* dp_htt_stats_sysfs_update_config() - Function to send htt data to upper layer.
1807  * @pdev: dp pdev handle
1808  *
1809  * This function sets the process id and printing mode within the sysfs config
1810  * struct. which enables DP_PRINT statements within this process to write to the
1811  * console buffer provided by the user space.
1812  *
1813  * Return: None
1814  */
1815 static inline void
1816 dp_htt_stats_sysfs_update_config(struct dp_pdev *pdev)
1817 {
1818 	struct dp_soc *soc = pdev->soc;
1819 
1820 	if (!soc) {
1821 		dp_htt_err("soc is null");
1822 		return;
1823 	}
1824 
1825 	if (!soc->sysfs_config) {
1826 		dp_htt_err("soc->sysfs_config is NULL");
1827 		return;
1828 	}
1829 
1830 	/* set sysfs config parameters */
1831 	soc->sysfs_config->process_id = qdf_get_current_pid();
1832 	soc->sysfs_config->printing_mode = PRINTING_MODE_ENABLED;
1833 }
1834 
1835 /*
1836  * dp_htt_stats_sysfs_set_event() - Set sysfs stats event.
1837  * @soc: soc handle.
1838  * @msg_word: Pointer to htt msg word.
1839  *
1840  * @return: void
1841  */
1842 static inline void
1843 dp_htt_stats_sysfs_set_event(struct dp_soc *soc, uint32_t *msg_word)
1844 {
1845 	int done = 0;
1846 
1847 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*(msg_word + 3));
1848 	if (done) {
1849 		if (qdf_event_set(&soc->sysfs_config->sysfs_txrx_fw_request_done))
1850 			dp_htt_err("%pK:event compl Fail to set event ",
1851 				   soc);
1852 	}
1853 }
1854 #else /* WLAN_SYSFS_DP_STATS */
1855 static inline void
1856 dp_htt_stats_sysfs_update_config(struct dp_pdev *pdev)
1857 {
1858 }
1859 
1860 static inline void
1861 dp_htt_stats_sysfs_set_event(struct dp_soc *dp_soc, uint32_t *msg_word)
1862 {
1863 }
1864 #endif /* WLAN_SYSFS_DP_STATS */
1865 
1866 /**
1867  * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats
1868  * @htt_stats: htt stats info
1869  *
1870  * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message
1871  * contains sub messages which are identified by a TLV header.
1872  * In this function we will process the stream of T2H messages and read all the
1873  * TLV contained in the message.
1874  *
1875  * THe following cases have been taken care of
1876  * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer
1877  *		In this case the buffer will contain multiple tlvs.
1878  * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer.
1879  *		Only one tlv will be contained in the HTT message and this tag
1880  *		will extend onto the next buffer.
1881  * Case 3: When the buffer is the continuation of the previous message
1882  * Case 4: tlv length is 0. which will indicate the end of message
1883  *
1884  * return: void
1885  */
1886 static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats,
1887 					struct dp_soc *soc)
1888 {
1889 	htt_tlv_tag_t tlv_type = 0xff;
1890 	qdf_nbuf_t htt_msg = NULL;
1891 	uint32_t *msg_word;
1892 	uint8_t *tlv_buf_head = NULL;
1893 	uint8_t *tlv_buf_tail = NULL;
1894 	uint32_t msg_remain_len = 0;
1895 	uint32_t tlv_remain_len = 0;
1896 	uint32_t *tlv_start;
1897 	int cookie_val = 0;
1898 	int cookie_msb = 0;
1899 	int pdev_id;
1900 	bool copy_stats = false;
1901 	struct dp_pdev *pdev;
1902 
1903 	/* Process node in the HTT message queue */
1904 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1905 		!= NULL) {
1906 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1907 		cookie_val = *(msg_word + 1);
1908 		htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET(
1909 					*(msg_word +
1910 					HTT_T2H_EXT_STATS_TLV_START_OFFSET));
1911 
1912 		if (cookie_val) {
1913 			if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg)
1914 					== QDF_STATUS_SUCCESS) {
1915 				continue;
1916 			}
1917 		}
1918 
1919 		cookie_msb = *(msg_word + 2);
1920 		pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1921 		pdev = soc->pdev_list[pdev_id];
1922 
1923 		if (!cookie_val && (cookie_msb & DBG_STATS_COOKIE_HTT_DBGFS)) {
1924 			dp_htt_stats_dbgfs_send_msg(pdev, msg_word,
1925 						    htt_stats->msg_len);
1926 			qdf_nbuf_free(htt_msg);
1927 			continue;
1928 		}
1929 
1930 		if (!cookie_val && (cookie_msb & DBG_SYSFS_STATS_COOKIE))
1931 			dp_htt_stats_sysfs_update_config(pdev);
1932 
1933 		if (cookie_msb & DBG_STATS_COOKIE_DP_STATS)
1934 			copy_stats = true;
1935 
1936 		/* read 5th word */
1937 		msg_word = msg_word + 4;
1938 		msg_remain_len = qdf_min(htt_stats->msg_len,
1939 				(uint32_t) DP_EXT_MSG_LENGTH);
1940 		/* Keep processing the node till node length is 0 */
1941 		while (msg_remain_len) {
1942 			/*
1943 			 * if message is not a continuation of previous message
1944 			 * read the tlv type and tlv length
1945 			 */
1946 			if (!tlv_buf_head) {
1947 				tlv_type = HTT_STATS_TLV_TAG_GET(
1948 						*msg_word);
1949 				tlv_remain_len = HTT_STATS_TLV_LENGTH_GET(
1950 						*msg_word);
1951 			}
1952 
1953 			if (tlv_remain_len == 0) {
1954 				msg_remain_len = 0;
1955 
1956 				if (tlv_buf_head) {
1957 					qdf_mem_free(tlv_buf_head);
1958 					tlv_buf_head = NULL;
1959 					tlv_buf_tail = NULL;
1960 				}
1961 
1962 				goto error;
1963 			}
1964 
1965 			if (!tlv_buf_head)
1966 				tlv_remain_len += HTT_TLV_HDR_LEN;
1967 
1968 			if ((tlv_remain_len <= msg_remain_len)) {
1969 				/* Case 3 */
1970 				if (tlv_buf_head) {
1971 					qdf_mem_copy(tlv_buf_tail,
1972 							(uint8_t *)msg_word,
1973 							tlv_remain_len);
1974 					tlv_start = (uint32_t *)tlv_buf_head;
1975 				} else {
1976 					/* Case 1 */
1977 					tlv_start = msg_word;
1978 				}
1979 
1980 				if (copy_stats)
1981 					dp_htt_stats_copy_tag(pdev,
1982 							      tlv_type,
1983 							      tlv_start);
1984 				else
1985 					dp_htt_stats_print_tag(pdev,
1986 							       tlv_type,
1987 							       tlv_start);
1988 
1989 				if (tlv_type == HTT_STATS_PEER_DETAILS_TAG ||
1990 				    tlv_type == HTT_STATS_PEER_STATS_CMN_TAG)
1991 					dp_peer_update_inactive_time(pdev,
1992 								     tlv_type,
1993 								     tlv_start);
1994 
1995 				msg_remain_len -= tlv_remain_len;
1996 
1997 				msg_word = (uint32_t *)
1998 					(((uint8_t *)msg_word) +
1999 					tlv_remain_len);
2000 
2001 				tlv_remain_len = 0;
2002 
2003 				if (tlv_buf_head) {
2004 					qdf_mem_free(tlv_buf_head);
2005 					tlv_buf_head = NULL;
2006 					tlv_buf_tail = NULL;
2007 				}
2008 
2009 			} else { /* tlv_remain_len > msg_remain_len */
2010 				/* Case 2 & 3 */
2011 				if (!tlv_buf_head) {
2012 					tlv_buf_head = qdf_mem_malloc(
2013 							tlv_remain_len);
2014 
2015 					if (!tlv_buf_head) {
2016 						QDF_TRACE(QDF_MODULE_ID_TXRX,
2017 								QDF_TRACE_LEVEL_ERROR,
2018 								"Alloc failed");
2019 						goto error;
2020 					}
2021 
2022 					tlv_buf_tail = tlv_buf_head;
2023 				}
2024 
2025 				qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word,
2026 						msg_remain_len);
2027 				tlv_remain_len -= msg_remain_len;
2028 				tlv_buf_tail += msg_remain_len;
2029 			}
2030 		}
2031 
2032 		if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
2033 			htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
2034 		}
2035 
2036 		/* indicate event completion in case the event is done */
2037 		if (!cookie_val && (cookie_msb & DBG_SYSFS_STATS_COOKIE))
2038 			dp_htt_stats_sysfs_set_event(soc, msg_word);
2039 
2040 		qdf_nbuf_free(htt_msg);
2041 	}
2042 	return;
2043 
2044 error:
2045 	qdf_nbuf_free(htt_msg);
2046 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
2047 			!= NULL)
2048 		qdf_nbuf_free(htt_msg);
2049 }
2050 
2051 void htt_t2h_stats_handler(void *context)
2052 {
2053 	struct dp_soc *soc = (struct dp_soc *)context;
2054 	struct htt_stats_context htt_stats;
2055 	uint32_t *msg_word;
2056 	qdf_nbuf_t htt_msg = NULL;
2057 	uint8_t done;
2058 	uint32_t rem_stats;
2059 
2060 	if (!soc) {
2061 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2062 			  "soc is NULL");
2063 		return;
2064 	}
2065 
2066 	if (!qdf_atomic_read(&soc->cmn_init_done)) {
2067 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2068 			  "soc: 0x%pK, init_done: %d", soc,
2069 			  qdf_atomic_read(&soc->cmn_init_done));
2070 		return;
2071 	}
2072 
2073 	qdf_mem_zero(&htt_stats, sizeof(htt_stats));
2074 	qdf_nbuf_queue_init(&htt_stats.msg);
2075 
2076 	/* pull one completed stats from soc->htt_stats_msg and process */
2077 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2078 	if (!soc->htt_stats.num_stats) {
2079 		qdf_spin_unlock_bh(&soc->htt_stats.lock);
2080 		return;
2081 	}
2082 	while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) {
2083 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
2084 		msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET;
2085 		done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2086 		qdf_nbuf_queue_add(&htt_stats.msg, htt_msg);
2087 		/*
2088 		 * Done bit signifies that this is the last T2H buffer in the
2089 		 * stream of HTT EXT STATS message
2090 		 */
2091 		if (done)
2092 			break;
2093 	}
2094 	rem_stats = --soc->htt_stats.num_stats;
2095 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2096 
2097 	/* If there are more stats to process, schedule stats work again.
2098 	 * Scheduling prior to processing ht_stats to queue with early
2099 	 * index
2100 	 */
2101 	if (rem_stats)
2102 		qdf_sched_work(0, &soc->htt_stats.work);
2103 
2104 	dp_process_htt_stat_msg(&htt_stats, soc);
2105 }
2106 
2107 /**
2108  * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats
2109  * @soc: DP SOC handle
2110  * @htt_t2h_msg: HTT message nbuf
2111  *
2112  * return:void
2113  */
2114 static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc,
2115 					    qdf_nbuf_t htt_t2h_msg)
2116 {
2117 	uint8_t done;
2118 	qdf_nbuf_t msg_copy;
2119 	uint32_t *msg_word;
2120 
2121 	msg_word = (uint32_t *)qdf_nbuf_data(htt_t2h_msg);
2122 	msg_word = msg_word + 3;
2123 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2124 
2125 	/*
2126 	 * HTT EXT stats response comes as stream of TLVs which span over
2127 	 * multiple T2H messages.
2128 	 * The first message will carry length of the response.
2129 	 * For rest of the messages length will be zero.
2130 	 *
2131 	 * Clone the T2H message buffer and store it in a list to process
2132 	 * it later.
2133 	 *
2134 	 * The original T2H message buffers gets freed in the T2H HTT event
2135 	 * handler
2136 	 */
2137 	msg_copy = qdf_nbuf_clone(htt_t2h_msg);
2138 
2139 	if (!msg_copy) {
2140 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2141 			  "T2H messge clone failed for HTT EXT STATS");
2142 		goto error;
2143 	}
2144 
2145 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2146 	qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy);
2147 	/*
2148 	 * Done bit signifies that this is the last T2H buffer in the stream of
2149 	 * HTT EXT STATS message
2150 	 */
2151 	if (done) {
2152 		soc->htt_stats.num_stats++;
2153 		qdf_sched_work(0, &soc->htt_stats.work);
2154 	}
2155 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2156 
2157 	return;
2158 
2159 error:
2160 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2161 	while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg))
2162 			!= NULL) {
2163 		qdf_nbuf_free(msg_copy);
2164 	}
2165 	soc->htt_stats.num_stats = 0;
2166 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2167 	return;
2168 }
2169 
2170 /*
2171  * htt_soc_attach_target() - SOC level HTT setup
2172  * @htt_soc:	HTT SOC handle
2173  *
2174  * Return: 0 on success; error code on failure
2175  */
2176 int htt_soc_attach_target(struct htt_soc *htt_soc)
2177 {
2178 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
2179 
2180 	return htt_h2t_ver_req_msg(soc);
2181 }
2182 
2183 void htt_set_htc_handle(struct htt_soc *htt_soc, HTC_HANDLE htc_soc)
2184 {
2185 	htt_soc->htc_soc = htc_soc;
2186 }
2187 
2188 HTC_HANDLE htt_get_htc_handle(struct htt_soc *htt_soc)
2189 {
2190 	return htt_soc->htc_soc;
2191 }
2192 
2193 struct htt_soc *htt_soc_attach(struct dp_soc *soc, HTC_HANDLE htc_handle)
2194 {
2195 	int i;
2196 	int j;
2197 	int alloc_size = HTT_SW_UMAC_RING_IDX_MAX * sizeof(unsigned long);
2198 	struct htt_soc *htt_soc = NULL;
2199 
2200 	htt_soc = qdf_mem_malloc(sizeof(*htt_soc));
2201 	if (!htt_soc) {
2202 		dp_err("HTT attach failed");
2203 		return NULL;
2204 	}
2205 
2206 	for (i = 0; i < MAX_PDEV_CNT; i++) {
2207 		htt_soc->pdevid_tt[i].umac_ttt = qdf_mem_malloc(alloc_size);
2208 		if (!htt_soc->pdevid_tt[i].umac_ttt)
2209 			break;
2210 		qdf_mem_set(htt_soc->pdevid_tt[i].umac_ttt, alloc_size, -1);
2211 		htt_soc->pdevid_tt[i].lmac_ttt = qdf_mem_malloc(alloc_size);
2212 		if (!htt_soc->pdevid_tt[i].lmac_ttt) {
2213 			qdf_mem_free(htt_soc->pdevid_tt[i].umac_ttt);
2214 			break;
2215 		}
2216 		qdf_mem_set(htt_soc->pdevid_tt[i].lmac_ttt, alloc_size, -1);
2217 	}
2218 	if (i != MAX_PDEV_CNT) {
2219 		for (j = 0; j < i; j++) {
2220 			qdf_mem_free(htt_soc->pdevid_tt[j].umac_ttt);
2221 			qdf_mem_free(htt_soc->pdevid_tt[j].lmac_ttt);
2222 		}
2223 		qdf_mem_free(htt_soc);
2224 		return NULL;
2225 	}
2226 
2227 	htt_soc->dp_soc = soc;
2228 	htt_soc->htc_soc = htc_handle;
2229 	HTT_TX_MUTEX_INIT(&htt_soc->htt_tx_mutex);
2230 
2231 	return htt_soc;
2232 }
2233 
2234 #if defined(WDI_EVENT_ENABLE) && \
2235 	!defined(REMOVE_PKT_LOG)
2236 /*
2237  * dp_pktlog_msg_handler() - Pktlog msg handler
2238  * @htt_soc:	 HTT SOC handle
2239  * @msg_word:    Pointer to payload
2240  *
2241  * Return: None
2242  */
2243 static void
2244 dp_pktlog_msg_handler(struct htt_soc *soc,
2245 		      uint32_t *msg_word)
2246 {
2247 	uint8_t pdev_id;
2248 	uint8_t target_pdev_id;
2249 	uint32_t *pl_hdr;
2250 
2251 	target_pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word);
2252 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
2253 							 target_pdev_id);
2254 	pl_hdr = (msg_word + 1);
2255 	dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc,
2256 		pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL,
2257 		pdev_id);
2258 }
2259 #else
2260 static void
2261 dp_pktlog_msg_handler(struct htt_soc *soc,
2262 		      uint32_t *msg_word)
2263 {
2264 }
2265 #endif
2266 
2267 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
2268 /*
2269  * dp_vdev_txrx_hw_stats_handler - Handle vdev stats received from FW
2270  * @soc - htt soc handle
2271  * @ msg_word - buffer containing stats
2272  *
2273  * Return: void
2274  */
2275 static void dp_vdev_txrx_hw_stats_handler(struct htt_soc *soc,
2276 					  uint32_t *msg_word)
2277 {
2278 	struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
2279 	uint8_t pdev_id;
2280 	uint8_t vdev_id;
2281 	uint8_t target_pdev_id;
2282 	uint16_t payload_size;
2283 	struct dp_pdev *pdev;
2284 	struct dp_vdev *vdev;
2285 	uint8_t *tlv_buf;
2286 	uint32_t *tlv_buf_temp;
2287 	uint32_t *tag_buf;
2288 	htt_tlv_tag_t tlv_type;
2289 	uint16_t tlv_length;
2290 	uint64_t pkt_count = 0;
2291 	uint64_t byte_count = 0;
2292 	uint64_t soc_drop_cnt = 0;
2293 	struct cdp_pkt_info tx_comp = { 0 };
2294 	struct cdp_pkt_info tx_failed =  { 0 };
2295 
2296 	target_pdev_id =
2297 		HTT_T2H_VDEVS_TXRX_STATS_PERIODIC_IND_PDEV_ID_GET(*msg_word);
2298 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(dpsoc,
2299 							 target_pdev_id);
2300 
2301 	if (pdev_id >= MAX_PDEV_CNT)
2302 		return;
2303 
2304 	pdev = dpsoc->pdev_list[pdev_id];
2305 	if (!pdev) {
2306 		dp_err("PDEV is NULL for pdev_id:%d", pdev_id);
2307 		return;
2308 	}
2309 
2310 	payload_size =
2311 	HTT_T2H_VDEVS_TXRX_STATS_PERIODIC_IND_PAYLOAD_SIZE_GET(*msg_word);
2312 
2313 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2314 			   (void *)msg_word, payload_size + 16);
2315 
2316 	/* Adjust msg_word to point to the first TLV in buffer */
2317 	msg_word = msg_word + 4;
2318 
2319 	/* Parse the received buffer till payload size reaches 0 */
2320 	while (payload_size > 0) {
2321 		tlv_buf = (uint8_t *)msg_word;
2322 		tlv_buf_temp = msg_word;
2323 		tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
2324 		tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
2325 
2326 		/* Add header size to tlv length*/
2327 		tlv_length += 4;
2328 
2329 		switch (tlv_type) {
2330 		case HTT_STATS_SOC_TXRX_STATS_COMMON_TAG:
2331 		{
2332 			tag_buf = tlv_buf_temp +
2333 					HTT_VDEV_STATS_GET_INDEX(SOC_DROP_CNT);
2334 			soc_drop_cnt = HTT_VDEV_GET_STATS_U64(tag_buf);
2335 			DP_STATS_UPD(dpsoc, tx.tqm_drop_no_peer, soc_drop_cnt);
2336 			break;
2337 		}
2338 		case HTT_STATS_VDEV_TXRX_STATS_HW_STATS_TAG:
2339 		{
2340 			tag_buf = tlv_buf_temp +
2341 					HTT_VDEV_STATS_GET_INDEX(VDEV_ID);
2342 			vdev_id = (uint8_t)(*tag_buf);
2343 			vdev = dp_vdev_get_ref_by_id(dpsoc, vdev_id,
2344 						     DP_MOD_ID_HTT);
2345 
2346 			if (!vdev)
2347 				goto invalid_vdev;
2348 
2349 			/* Extract received packet count from buffer */
2350 			tag_buf = tlv_buf_temp +
2351 					HTT_VDEV_STATS_GET_INDEX(RX_PKT_CNT);
2352 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2353 			DP_STATS_UPD(vdev, rx_i.reo_rcvd_pkt.num, pkt_count);
2354 
2355 			/* Extract received packet byte count from buffer */
2356 			tag_buf = tlv_buf_temp +
2357 					HTT_VDEV_STATS_GET_INDEX(RX_BYTE_CNT);
2358 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2359 			DP_STATS_UPD(vdev, rx_i.reo_rcvd_pkt.bytes, byte_count);
2360 
2361 			/* Extract tx success packet count from buffer */
2362 			tag_buf = tlv_buf_temp +
2363 				HTT_VDEV_STATS_GET_INDEX(TX_SUCCESS_PKT_CNT);
2364 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2365 			tx_comp.num = pkt_count;
2366 
2367 			/* Extract tx success packet byte count from buffer */
2368 			tag_buf = tlv_buf_temp +
2369 				HTT_VDEV_STATS_GET_INDEX(TX_SUCCESS_BYTE_CNT);
2370 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2371 			tx_comp.bytes = byte_count;
2372 
2373 			/* Extract tx retry packet count from buffer */
2374 			tag_buf = tlv_buf_temp +
2375 				HTT_VDEV_STATS_GET_INDEX(TX_RETRY_PKT_CNT);
2376 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2377 			tx_comp.num += pkt_count;
2378 			tx_failed.num = pkt_count;
2379 
2380 			/* Extract tx retry packet byte count from buffer */
2381 			tag_buf = tlv_buf_temp +
2382 				HTT_VDEV_STATS_GET_INDEX(TX_RETRY_BYTE_CNT);
2383 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2384 			tx_comp.bytes += byte_count;
2385 			tx_failed.bytes = byte_count;
2386 
2387 			/* Extract tx drop packet count from buffer */
2388 			tag_buf = tlv_buf_temp +
2389 				HTT_VDEV_STATS_GET_INDEX(TX_DROP_PKT_CNT);
2390 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2391 			tx_comp.num += pkt_count;
2392 			tx_failed.num += pkt_count;
2393 
2394 			/* Extract tx drop packet byte count from buffer */
2395 			tag_buf = tlv_buf_temp +
2396 				HTT_VDEV_STATS_GET_INDEX(TX_DROP_BYTE_CNT);
2397 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2398 			tx_comp.bytes += byte_count;
2399 			tx_failed.bytes += byte_count;
2400 
2401 			/* Extract tx age-out packet count from buffer */
2402 			tag_buf = tlv_buf_temp +
2403 				HTT_VDEV_STATS_GET_INDEX(TX_AGE_OUT_PKT_CNT);
2404 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2405 			tx_comp.num += pkt_count;
2406 			tx_failed.num += pkt_count;
2407 
2408 			/* Extract tx age-out packet byte count from buffer */
2409 			tag_buf = tlv_buf_temp +
2410 				HTT_VDEV_STATS_GET_INDEX(TX_AGE_OUT_BYTE_CNT);
2411 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2412 			tx_comp.bytes += byte_count;
2413 			tx_failed.bytes += byte_count;
2414 
2415 			/* Extract tqm bypass packet count from buffer */
2416 			tag_buf = tlv_buf_temp +
2417 				HTT_VDEV_STATS_GET_INDEX(TX_TQM_BYPASS_PKT_CNT);
2418 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2419 			tx_comp.num += pkt_count;
2420 
2421 			/* Extract tx bypass packet byte count from buffer */
2422 			tag_buf = tlv_buf_temp +
2423 				HTT_VDEV_STATS_GET_INDEX(TX_TQM_BYPASS_BYTE_CNT);
2424 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2425 			tx_comp.bytes += byte_count;
2426 
2427 			DP_STATS_UPD(vdev, tx.comp_pkt.num, tx_comp.num);
2428 			DP_STATS_UPD(vdev, tx.comp_pkt.bytes, tx_comp.bytes);
2429 
2430 			DP_STATS_UPD(vdev, tx.tx_failed, tx_failed.num);
2431 
2432 			dp_vdev_unref_delete(dpsoc, vdev, DP_MOD_ID_HTT);
2433 			break;
2434 		}
2435 		default:
2436 			qdf_assert(0);
2437 		}
2438 invalid_vdev:
2439 		msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
2440 		payload_size -= tlv_length;
2441 	}
2442 }
2443 #else
2444 static void dp_vdev_txrx_hw_stats_handler(struct htt_soc *soc,
2445 					  uint32_t *msg_word)
2446 {}
2447 #endif
2448 
2449 #ifdef CONFIG_SAWF_DEF_QUEUES
2450 static void dp_sawf_def_queues_update_map_report_conf(struct htt_soc *soc,
2451 						      uint32_t *msg_word,
2452 						      qdf_nbuf_t htt_t2h_msg)
2453 {
2454 	dp_htt_sawf_def_queues_map_report_conf(soc, msg_word, htt_t2h_msg);
2455 }
2456 #else
2457 static void dp_sawf_def_queues_update_map_report_conf(struct htt_soc *soc,
2458 						      uint32_t *msg_word,
2459 						      qdf_nbuf_t htt_t2h_msg)
2460 {}
2461 #endif
2462 
2463 #ifdef CONFIG_SAWF
2464 /*
2465  * dp_sawf_msduq_map() - Msdu queue creation information received
2466  * from target
2467  * @soc: soc handle.
2468  * @msg_word: Pointer to htt msg word.
2469  * @htt_t2h_msg: HTT message nbuf
2470  *
2471  * @return: void
2472  */
2473 static void dp_sawf_msduq_map(struct htt_soc *soc, uint32_t *msg_word,
2474 			      qdf_nbuf_t htt_t2h_msg)
2475 {
2476 	dp_htt_sawf_msduq_map(soc, msg_word, htt_t2h_msg);
2477 }
2478 
2479 /*
2480  * dp_sawf_mpdu_stats_handler() - HTT message handler for MPDU stats
2481  * @soc: soc handle.
2482  * @htt_t2h_msg: HTT message nbuf
2483  *
2484  * @return: void
2485  */
2486 static void dp_sawf_mpdu_stats_handler(struct htt_soc *soc,
2487 				       qdf_nbuf_t htt_t2h_msg)
2488 {
2489 	dp_sawf_htt_mpdu_stats_handler(soc, htt_t2h_msg);
2490 }
2491 #else
2492 static void dp_sawf_msduq_map(struct htt_soc *soc, uint32_t *msg_word,
2493 			      qdf_nbuf_t htt_t2h_msg)
2494 {}
2495 
2496 static void dp_sawf_mpdu_stats_handler(struct htt_soc *soc,
2497 				       qdf_nbuf_t htt_t2h_msg)
2498 {}
2499 #endif
2500 
2501 /*
2502  * time_allow_print() - time allow print
2503  * @htt_ring_tt:	ringi_id array of timestamps
2504  * @ring_id:		ring_id (index)
2505  *
2506  * Return: 1 for successfully saving timestamp in array
2507  *	and 0 for timestamp falling within 2 seconds after last one
2508  */
2509 static bool time_allow_print(unsigned long *htt_ring_tt, u_int8_t ring_id)
2510 {
2511 	unsigned long tstamp;
2512 	unsigned long delta;
2513 
2514 	tstamp = qdf_get_system_timestamp();
2515 
2516 	if (!htt_ring_tt)
2517 		return 0; //unable to print backpressure messages
2518 
2519 	if (htt_ring_tt[ring_id] == -1) {
2520 		htt_ring_tt[ring_id] = tstamp;
2521 		return 1;
2522 	}
2523 	delta = tstamp - htt_ring_tt[ring_id];
2524 	if (delta >= 2000) {
2525 		htt_ring_tt[ring_id] = tstamp;
2526 		return 1;
2527 	}
2528 
2529 	return 0;
2530 }
2531 
2532 static void dp_htt_alert_print(enum htt_t2h_msg_type msg_type,
2533 			       struct dp_pdev *pdev, u_int8_t ring_id,
2534 			       u_int16_t hp_idx, u_int16_t tp_idx,
2535 			       u_int32_t bkp_time, char *ring_stype)
2536 {
2537 	dp_alert("seq_num %u msg_type: %d pdev_id: %d ring_type: %s ",
2538 		 pdev->bkp_stats.seq_num, msg_type, pdev->pdev_id, ring_stype);
2539 	dp_alert("ring_id: %d hp_idx: %d tp_idx: %d bkpressure_time_ms: %d ",
2540 		 ring_id, hp_idx, tp_idx, bkp_time);
2541 }
2542 
2543 /**
2544  * dp_get_srng_ring_state_from_hal(): Get hal level ring stats
2545  * @soc: DP_SOC handle
2546  * @srng: DP_SRNG handle
2547  * @ring_type: srng src/dst ring
2548  *
2549  * Return: void
2550  */
2551 static QDF_STATUS
2552 dp_get_srng_ring_state_from_hal(struct dp_soc *soc,
2553 				struct dp_pdev *pdev,
2554 				struct dp_srng *srng,
2555 				enum hal_ring_type ring_type,
2556 				struct dp_srng_ring_state *state)
2557 {
2558 	struct hal_soc *hal_soc;
2559 
2560 	if (!soc || !srng || !srng->hal_srng || !state)
2561 		return QDF_STATUS_E_INVAL;
2562 
2563 	hal_soc = (struct hal_soc *)soc->hal_soc;
2564 
2565 	hal_get_sw_hptp(soc->hal_soc, srng->hal_srng, &state->sw_tail,
2566 			&state->sw_head);
2567 
2568 	hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &state->hw_head,
2569 			&state->hw_tail, ring_type);
2570 
2571 	state->ring_type = ring_type;
2572 
2573 	return QDF_STATUS_SUCCESS;
2574 }
2575 
2576 #ifdef QCA_MONITOR_PKT_SUPPORT
2577 static void
2578 dp_queue_mon_ring_stats(struct dp_pdev *pdev,
2579 			int lmac_id, uint32_t *num_srng,
2580 			struct dp_soc_srngs_state *soc_srngs_state)
2581 {
2582 	QDF_STATUS status;
2583 
2584 	if (pdev->soc->wlan_cfg_ctx->rxdma1_enable) {
2585 		status = dp_get_srng_ring_state_from_hal
2586 			(pdev->soc, pdev,
2587 			 &pdev->soc->rxdma_mon_buf_ring[lmac_id],
2588 			 RXDMA_MONITOR_BUF,
2589 			 &soc_srngs_state->ring_state[*num_srng]);
2590 
2591 		if (status == QDF_STATUS_SUCCESS)
2592 			qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS);
2593 
2594 		status = dp_get_srng_ring_state_from_hal
2595 			(pdev->soc, pdev,
2596 			 &pdev->soc->rxdma_mon_dst_ring[lmac_id],
2597 			 RXDMA_MONITOR_DST,
2598 			 &soc_srngs_state->ring_state[*num_srng]);
2599 
2600 		if (status == QDF_STATUS_SUCCESS)
2601 			qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS);
2602 
2603 		status = dp_get_srng_ring_state_from_hal
2604 			(pdev->soc, pdev,
2605 			 &pdev->soc->rxdma_mon_desc_ring[lmac_id],
2606 			 RXDMA_MONITOR_DESC,
2607 			 &soc_srngs_state->ring_state[*num_srng]);
2608 
2609 		if (status == QDF_STATUS_SUCCESS)
2610 			qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS);
2611 	}
2612 }
2613 #else
2614 static void
2615 dp_queue_mon_ring_stats(struct dp_pdev *pdev,
2616 			int lmac_id, uint32_t *num_srng,
2617 			struct dp_soc_srngs_state *soc_srngs_state)
2618 {
2619 }
2620 #endif
2621 
2622 #ifndef WLAN_DP_DISABLE_TCL_CMD_CRED_SRNG
2623 static inline QDF_STATUS
2624 dp_get_tcl_cmd_cred_ring_state_from_hal(struct dp_pdev *pdev,
2625 					struct dp_srng_ring_state *ring_state)
2626 {
2627 	return dp_get_srng_ring_state_from_hal(pdev->soc, pdev,
2628 					       &pdev->soc->tcl_cmd_credit_ring,
2629 					       TCL_CMD_CREDIT, ring_state);
2630 }
2631 #else
2632 static inline QDF_STATUS
2633 dp_get_tcl_cmd_cred_ring_state_from_hal(struct dp_pdev *pdev,
2634 					struct dp_srng_ring_state *ring_state)
2635 {
2636 	return QDF_STATUS_SUCCESS;
2637 }
2638 #endif
2639 
2640 #ifndef WLAN_DP_DISABLE_TCL_STATUS_SRNG
2641 static inline QDF_STATUS
2642 dp_get_tcl_status_ring_state_from_hal(struct dp_pdev *pdev,
2643 				      struct dp_srng_ring_state *ring_state)
2644 {
2645 	return dp_get_srng_ring_state_from_hal(pdev->soc, pdev,
2646 					       &pdev->soc->tcl_status_ring,
2647 					       TCL_STATUS, ring_state);
2648 }
2649 #else
2650 static inline QDF_STATUS
2651 dp_get_tcl_status_ring_state_from_hal(struct dp_pdev *pdev,
2652 				      struct dp_srng_ring_state *ring_state)
2653 {
2654 	return QDF_STATUS_SUCCESS;
2655 }
2656 #endif
2657 
2658 /**
2659  * dp_queue_srng_ring_stats(): Print pdev hal level ring stats
2660  * @pdev: DP_pdev handle
2661  *
2662  * Return: void
2663  */
2664 static void dp_queue_ring_stats(struct dp_pdev *pdev)
2665 {
2666 	uint32_t i;
2667 	int mac_id;
2668 	int lmac_id;
2669 	uint32_t j = 0;
2670 	struct dp_soc *soc = pdev->soc;
2671 	struct dp_soc_srngs_state * soc_srngs_state = NULL;
2672 	struct dp_soc_srngs_state *drop_srngs_state = NULL;
2673 	QDF_STATUS status;
2674 
2675 	soc_srngs_state = qdf_mem_malloc(sizeof(struct dp_soc_srngs_state));
2676 	if (!soc_srngs_state) {
2677 		dp_htt_alert("Memory alloc failed for back pressure event");
2678 		return;
2679 	}
2680 
2681 	status = dp_get_srng_ring_state_from_hal
2682 				(pdev->soc, pdev,
2683 				 &pdev->soc->reo_exception_ring,
2684 				 REO_EXCEPTION,
2685 				 &soc_srngs_state->ring_state[j]);
2686 
2687 	if (status == QDF_STATUS_SUCCESS)
2688 		qdf_assert_always(++j < DP_MAX_SRNGS);
2689 
2690 	status = dp_get_srng_ring_state_from_hal
2691 				(pdev->soc, pdev,
2692 				 &pdev->soc->reo_reinject_ring,
2693 				 REO_REINJECT,
2694 				 &soc_srngs_state->ring_state[j]);
2695 
2696 	if (status == QDF_STATUS_SUCCESS)
2697 		qdf_assert_always(++j < DP_MAX_SRNGS);
2698 
2699 	status = dp_get_srng_ring_state_from_hal
2700 				(pdev->soc, pdev,
2701 				 &pdev->soc->reo_cmd_ring,
2702 				 REO_CMD,
2703 				 &soc_srngs_state->ring_state[j]);
2704 
2705 	if (status == QDF_STATUS_SUCCESS)
2706 		qdf_assert_always(++j < DP_MAX_SRNGS);
2707 
2708 	status = dp_get_srng_ring_state_from_hal
2709 				(pdev->soc, pdev,
2710 				 &pdev->soc->reo_status_ring,
2711 				 REO_STATUS,
2712 				 &soc_srngs_state->ring_state[j]);
2713 
2714 	if (status == QDF_STATUS_SUCCESS)
2715 		qdf_assert_always(++j < DP_MAX_SRNGS);
2716 
2717 	status = dp_get_srng_ring_state_from_hal
2718 				(pdev->soc, pdev,
2719 				 &pdev->soc->rx_rel_ring,
2720 				 WBM2SW_RELEASE,
2721 				 &soc_srngs_state->ring_state[j]);
2722 
2723 	if (status == QDF_STATUS_SUCCESS)
2724 		qdf_assert_always(++j < DP_MAX_SRNGS);
2725 
2726 	status = dp_get_tcl_cmd_cred_ring_state_from_hal
2727 				(pdev, &soc_srngs_state->ring_state[j]);
2728 	if (status == QDF_STATUS_SUCCESS)
2729 		qdf_assert_always(++j < DP_MAX_SRNGS);
2730 
2731 	status = dp_get_tcl_status_ring_state_from_hal
2732 				(pdev, &soc_srngs_state->ring_state[j]);
2733 	if (status == QDF_STATUS_SUCCESS)
2734 		qdf_assert_always(++j < DP_MAX_SRNGS);
2735 
2736 	status = dp_get_srng_ring_state_from_hal
2737 				(pdev->soc, pdev,
2738 				 &pdev->soc->wbm_desc_rel_ring,
2739 				 SW2WBM_RELEASE,
2740 				 &soc_srngs_state->ring_state[j]);
2741 
2742 	if (status == QDF_STATUS_SUCCESS)
2743 		qdf_assert_always(++j < DP_MAX_SRNGS);
2744 
2745 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
2746 		status = dp_get_srng_ring_state_from_hal
2747 				(pdev->soc, pdev,
2748 				 &pdev->soc->reo_dest_ring[i],
2749 				 REO_DST,
2750 				 &soc_srngs_state->ring_state[j]);
2751 
2752 		if (status == QDF_STATUS_SUCCESS)
2753 			qdf_assert_always(++j < DP_MAX_SRNGS);
2754 	}
2755 
2756 	for (i = 0; i < pdev->soc->num_tcl_data_rings; i++) {
2757 		status = dp_get_srng_ring_state_from_hal
2758 				(pdev->soc, pdev,
2759 				 &pdev->soc->tcl_data_ring[i],
2760 				 TCL_DATA,
2761 				 &soc_srngs_state->ring_state[j]);
2762 
2763 		if (status == QDF_STATUS_SUCCESS)
2764 			qdf_assert_always(++j < DP_MAX_SRNGS);
2765 	}
2766 
2767 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
2768 		status = dp_get_srng_ring_state_from_hal
2769 				(pdev->soc, pdev,
2770 				 &pdev->soc->tx_comp_ring[i],
2771 				 WBM2SW_RELEASE,
2772 				 &soc_srngs_state->ring_state[j]);
2773 
2774 		if (status == QDF_STATUS_SUCCESS)
2775 			qdf_assert_always(++j < DP_MAX_SRNGS);
2776 	}
2777 
2778 	lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc, 0, pdev->pdev_id);
2779 	status = dp_get_srng_ring_state_from_hal
2780 				(pdev->soc, pdev,
2781 				 &pdev->soc->rx_refill_buf_ring
2782 				 [lmac_id],
2783 				 RXDMA_BUF,
2784 				 &soc_srngs_state->ring_state[j]);
2785 
2786 	if (status == QDF_STATUS_SUCCESS)
2787 		qdf_assert_always(++j < DP_MAX_SRNGS);
2788 
2789 	status = dp_get_srng_ring_state_from_hal
2790 				(pdev->soc, pdev,
2791 				 &pdev->rx_refill_buf_ring2,
2792 				 RXDMA_BUF,
2793 				 &soc_srngs_state->ring_state[j]);
2794 
2795 	if (status == QDF_STATUS_SUCCESS)
2796 		qdf_assert_always(++j < DP_MAX_SRNGS);
2797 
2798 
2799 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2800 		dp_get_srng_ring_state_from_hal
2801 				(pdev->soc, pdev,
2802 				 &pdev->rx_mac_buf_ring[i],
2803 				 RXDMA_BUF,
2804 				 &soc_srngs_state->ring_state[j]);
2805 
2806 		if (status == QDF_STATUS_SUCCESS)
2807 			qdf_assert_always(++j < DP_MAX_SRNGS);
2808 	}
2809 
2810 	for (mac_id = 0;
2811 	     mac_id  < soc->wlan_cfg_ctx->num_rxdma_status_rings_per_pdev;
2812 	     mac_id++) {
2813 		lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc,
2814 						     mac_id, pdev->pdev_id);
2815 
2816 		dp_queue_mon_ring_stats(pdev, lmac_id, &j,
2817 					soc_srngs_state);
2818 
2819 		status = dp_get_srng_ring_state_from_hal
2820 			(pdev->soc, pdev,
2821 			 &pdev->soc->rxdma_mon_status_ring[lmac_id],
2822 			 RXDMA_MONITOR_STATUS,
2823 			 &soc_srngs_state->ring_state[j]);
2824 
2825 		if (status == QDF_STATUS_SUCCESS)
2826 			qdf_assert_always(++j < DP_MAX_SRNGS);
2827 	}
2828 
2829 	for (i = 0; i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
2830 		lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc,
2831 						     i, pdev->pdev_id);
2832 
2833 		status = dp_get_srng_ring_state_from_hal
2834 				(pdev->soc, pdev,
2835 				 &pdev->soc->rxdma_err_dst_ring
2836 				 [lmac_id],
2837 				 RXDMA_DST,
2838 				 &soc_srngs_state->ring_state[j]);
2839 
2840 		if (status == QDF_STATUS_SUCCESS)
2841 			qdf_assert_always(++j < DP_MAX_SRNGS);
2842 	}
2843 	soc_srngs_state->max_ring_id = j;
2844 
2845 	qdf_spin_lock_bh(&pdev->bkp_stats.list_lock);
2846 
2847 	soc_srngs_state->seq_num = pdev->bkp_stats.seq_num;
2848 
2849 	if (pdev->bkp_stats.queue_depth >= HTT_BKP_STATS_MAX_QUEUE_DEPTH) {
2850 		drop_srngs_state = TAILQ_FIRST(&pdev->bkp_stats.list);
2851 		qdf_assert_always(drop_srngs_state);
2852 		TAILQ_REMOVE(&pdev->bkp_stats.list, drop_srngs_state,
2853 			     list_elem);
2854 		qdf_mem_free(drop_srngs_state);
2855 		pdev->bkp_stats.queue_depth--;
2856 	}
2857 
2858 	pdev->bkp_stats.queue_depth++;
2859 	TAILQ_INSERT_TAIL(&pdev->bkp_stats.list, soc_srngs_state,
2860 			  list_elem);
2861 	pdev->bkp_stats.seq_num++;
2862 	qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock);
2863 
2864 	qdf_queue_work(0, pdev->bkp_stats.work_queue,
2865 		       &pdev->bkp_stats.work);
2866 }
2867 
2868 /*
2869  * dp_htt_bkp_event_alert() - htt backpressure event alert
2870  * @msg_word:	htt packet context
2871  * @htt_soc:	HTT SOC handle
2872  *
2873  * Return: after attempting to print stats
2874  */
2875 static void dp_htt_bkp_event_alert(u_int32_t *msg_word, struct htt_soc *soc)
2876 {
2877 	u_int8_t ring_type;
2878 	u_int8_t pdev_id;
2879 	uint8_t target_pdev_id;
2880 	u_int8_t ring_id;
2881 	u_int16_t hp_idx;
2882 	u_int16_t tp_idx;
2883 	u_int32_t bkp_time;
2884 	enum htt_t2h_msg_type msg_type;
2885 	struct dp_soc *dpsoc;
2886 	struct dp_pdev *pdev;
2887 	struct dp_htt_timestamp *radio_tt;
2888 
2889 	if (!soc)
2890 		return;
2891 
2892 	dpsoc = (struct dp_soc *)soc->dp_soc;
2893 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
2894 	ring_type = HTT_T2H_RX_BKPRESSURE_RING_TYPE_GET(*msg_word);
2895 	target_pdev_id = HTT_T2H_RX_BKPRESSURE_PDEV_ID_GET(*msg_word);
2896 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
2897 							 target_pdev_id);
2898 	if (pdev_id >= MAX_PDEV_CNT) {
2899 		dp_htt_debug("%pK: pdev id %d is invalid", soc, pdev_id);
2900 		return;
2901 	}
2902 
2903 	pdev = (struct dp_pdev *)dpsoc->pdev_list[pdev_id];
2904 	ring_id = HTT_T2H_RX_BKPRESSURE_RINGID_GET(*msg_word);
2905 	hp_idx = HTT_T2H_RX_BKPRESSURE_HEAD_IDX_GET(*(msg_word + 1));
2906 	tp_idx = HTT_T2H_RX_BKPRESSURE_TAIL_IDX_GET(*(msg_word + 1));
2907 	bkp_time = HTT_T2H_RX_BKPRESSURE_TIME_MS_GET(*(msg_word + 2));
2908 	radio_tt = &soc->pdevid_tt[pdev_id];
2909 
2910 	switch (ring_type) {
2911 	case HTT_SW_RING_TYPE_UMAC:
2912 		if (!time_allow_print(radio_tt->umac_ttt, ring_id))
2913 			return;
2914 		dp_htt_alert_print(msg_type, pdev, ring_id, hp_idx, tp_idx,
2915 				   bkp_time, "HTT_SW_RING_TYPE_UMAC");
2916 	break;
2917 	case HTT_SW_RING_TYPE_LMAC:
2918 		if (!time_allow_print(radio_tt->lmac_ttt, ring_id))
2919 			return;
2920 		dp_htt_alert_print(msg_type, pdev, ring_id, hp_idx, tp_idx,
2921 				   bkp_time, "HTT_SW_RING_TYPE_LMAC");
2922 	break;
2923 	default:
2924 		dp_htt_alert_print(msg_type, pdev, ring_id, hp_idx, tp_idx,
2925 				   bkp_time, "UNKNOWN");
2926 	break;
2927 	}
2928 
2929 	dp_queue_ring_stats(pdev);
2930 }
2931 
2932 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
2933 /*
2934  * dp_offload_ind_handler() - offload msg handler
2935  * @htt_soc: HTT SOC handle
2936  * @msg_word: Pointer to payload
2937  *
2938  * Return: None
2939  */
2940 static void
2941 dp_offload_ind_handler(struct htt_soc *soc, uint32_t *msg_word)
2942 {
2943 	u_int8_t pdev_id;
2944 	u_int8_t target_pdev_id;
2945 
2946 	target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
2947 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
2948 							 target_pdev_id);
2949 	dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_OFFLOAD_TX_DATA, soc->dp_soc,
2950 			     msg_word, HTT_INVALID_VDEV, WDI_NO_VAL,
2951 			     pdev_id);
2952 }
2953 #else
2954 static void
2955 dp_offload_ind_handler(struct htt_soc *soc, uint32_t *msg_word)
2956 {
2957 }
2958 #endif
2959 
2960 #ifdef WLAN_FEATURE_11BE_MLO
2961 #ifdef WLAN_MLO_MULTI_CHIP
2962 static inline void dp_update_mlo_ts_offset(struct dp_soc *soc,
2963 					   uint32_t ts_lo, uint32_t ts_hi)
2964 {
2965 	uint64_t mlo_offset;
2966 
2967 	mlo_offset = ((uint64_t)(ts_hi) << 32 | ts_lo);
2968 	soc->cdp_soc.ops->mlo_ops->mlo_update_mlo_ts_offset
2969 		((struct cdp_soc_t *)soc, mlo_offset);
2970 }
2971 #else
2972 static inline void dp_update_mlo_ts_offset(struct dp_soc *soc,
2973 					   uint32_t ts_lo, uint32_t ts_hi)
2974 {}
2975 #endif
2976 static void dp_htt_mlo_peer_map_handler(struct htt_soc *soc,
2977 					uint32_t *msg_word)
2978 {
2979 	uint8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
2980 	uint8_t *mlo_peer_mac_addr;
2981 	uint16_t mlo_peer_id;
2982 	uint8_t num_links;
2983 	struct dp_mlo_flow_override_info mlo_flow_info[DP_MLO_FLOW_INFO_MAX];
2984 	struct dp_mlo_link_info mlo_link_info[DP_MAX_MLO_LINKS];
2985 	MLO_PEER_MAP_TLV_TAG_ID tlv_type = 0xff;
2986 	uint16_t tlv_len = 0;
2987 	int i = 0;
2988 
2989 	mlo_peer_id = HTT_RX_MLO_PEER_MAP_MLO_PEER_ID_GET(*msg_word);
2990 	num_links =
2991 		HTT_RX_MLO_PEER_MAP_NUM_LOGICAL_LINKS_GET(*msg_word);
2992 	mlo_peer_mac_addr =
2993 	htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
2994 				   &mac_addr_deswizzle_buf[0]);
2995 
2996 	mlo_flow_info[0].ast_idx =
2997 		HTT_RX_MLO_PEER_MAP_PRIMARY_AST_INDEX_GET(*(msg_word + 3));
2998 	mlo_flow_info[0].ast_idx_valid =
2999 		HTT_RX_MLO_PEER_MAP_AST_INDEX_VALID_FLAG_GET(*(msg_word + 3));
3000 	mlo_flow_info[0].chip_id =
3001 		HTT_RX_MLO_PEER_MAP_CHIP_ID_AST_INDEX_GET(*(msg_word + 3));
3002 	mlo_flow_info[0].tidmask =
3003 		HTT_RX_MLO_PEER_MAP_TIDMASK_AST_INDEX_GET(*(msg_word + 3));
3004 	mlo_flow_info[0].cache_set_num =
3005 	HTT_RX_MLO_PEER_MAP_CACHE_SET_NUM_AST_INDEX_GET(*(msg_word + 3));
3006 
3007 	mlo_flow_info[1].ast_idx =
3008 		HTT_RX_MLO_PEER_MAP_PRIMARY_AST_INDEX_GET(*(msg_word + 3));
3009 	mlo_flow_info[1].ast_idx_valid =
3010 		HTT_RX_MLO_PEER_MAP_AST_INDEX_VALID_FLAG_GET(*(msg_word + 3));
3011 	mlo_flow_info[1].chip_id =
3012 		HTT_RX_MLO_PEER_MAP_CHIP_ID_AST_INDEX_GET(*(msg_word + 3));
3013 	mlo_flow_info[1].tidmask =
3014 		HTT_RX_MLO_PEER_MAP_TIDMASK_AST_INDEX_GET(*(msg_word + 3));
3015 	mlo_flow_info[1].cache_set_num =
3016 	HTT_RX_MLO_PEER_MAP_CACHE_SET_NUM_AST_INDEX_GET(*(msg_word + 3));
3017 
3018 	mlo_flow_info[2].ast_idx =
3019 		HTT_RX_MLO_PEER_MAP_PRIMARY_AST_INDEX_GET(*(msg_word + 3));
3020 	mlo_flow_info[2].ast_idx_valid =
3021 		HTT_RX_MLO_PEER_MAP_AST_INDEX_VALID_FLAG_GET(*(msg_word + 3));
3022 	mlo_flow_info[2].chip_id =
3023 		HTT_RX_MLO_PEER_MAP_CHIP_ID_AST_INDEX_GET(*(msg_word + 3));
3024 	mlo_flow_info[2].tidmask =
3025 		HTT_RX_MLO_PEER_MAP_TIDMASK_AST_INDEX_GET(*(msg_word + 3));
3026 	mlo_flow_info[2].cache_set_num =
3027 	HTT_RX_MLO_PEER_MAP_CACHE_SET_NUM_AST_INDEX_GET(*(msg_word + 3));
3028 
3029 	msg_word = msg_word + 8;
3030 	while (msg_word && (i < DP_MAX_MLO_LINKS)) {
3031 		mlo_link_info[i].peer_chip_id = 0xFF;
3032 		mlo_link_info[i].vdev_id = 0xFF;
3033 
3034 		tlv_type = HTT_RX_MLO_PEER_MAP_TLV_TAG_GET(*msg_word);
3035 		tlv_len = HTT_RX_MLO_PEER_MAP_TLV_LENGTH_GET(*msg_word);
3036 
3037 		if (tlv_len == 0) {
3038 			dp_err("TLV Length is 0");
3039 			break;
3040 		}
3041 
3042 		if (tlv_type == MLO_PEER_MAP_TLV_STRUCT_SOC_VDEV_PEER_IDS) {
3043 			mlo_link_info[i].peer_chip_id =
3044 				HTT_RX_MLO_PEER_MAP_CHIP_ID_GET(
3045 							*(msg_word + 1));
3046 			mlo_link_info[i].vdev_id =
3047 				HTT_RX_MLO_PEER_MAP_VDEV_ID_GET(
3048 							*(msg_word + 1));
3049 		}
3050 		/* Add header size to tlv length */
3051 		tlv_len = tlv_len + HTT_TLV_HDR_LEN;
3052 		msg_word = (uint32_t *)(((uint8_t *)msg_word) + tlv_len);
3053 		i++;
3054 	}
3055 
3056 	dp_rx_mlo_peer_map_handler(soc->dp_soc, mlo_peer_id,
3057 				   mlo_peer_mac_addr,
3058 				   mlo_flow_info, mlo_link_info);
3059 }
3060 
3061 static void dp_htt_mlo_peer_unmap_handler(struct htt_soc *soc,
3062 					  uint32_t *msg_word)
3063 {
3064 	uint16_t mlo_peer_id;
3065 
3066 	mlo_peer_id = HTT_RX_MLO_PEER_UNMAP_MLO_PEER_ID_GET(*msg_word);
3067 	dp_rx_mlo_peer_unmap_handler(soc->dp_soc, mlo_peer_id);
3068 }
3069 
3070 static void
3071 dp_rx_mlo_timestamp_ind_handler(struct dp_soc *soc,
3072 				uint32_t *msg_word)
3073 {
3074 	uint8_t pdev_id;
3075 	uint8_t target_pdev_id;
3076 	struct dp_pdev *pdev;
3077 
3078 	if (!soc)
3079 		return;
3080 
3081 	target_pdev_id = HTT_T2H_MLO_TIMESTAMP_OFFSET_PDEV_ID_GET(*msg_word);
3082 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc,
3083 							 target_pdev_id);
3084 
3085 	if (pdev_id >= MAX_PDEV_CNT) {
3086 		dp_htt_debug("%pK: pdev id %d is invalid", soc, pdev_id);
3087 		return;
3088 	}
3089 
3090 	pdev = (struct dp_pdev *)soc->pdev_list[pdev_id];
3091 
3092 	if (!pdev) {
3093 		dp_err("Invalid pdev");
3094 		return;
3095 	}
3096 	dp_wdi_event_handler(WDI_EVENT_MLO_TSTMP, soc,
3097 			     msg_word, HTT_INVALID_PEER, WDI_NO_VAL,
3098 			     pdev_id);
3099 
3100 	qdf_spin_lock_bh(&soc->htt_stats.lock);
3101 	pdev->timestamp.msg_type =
3102 		HTT_T2H_MLO_TIMESTAMP_OFFSET_MSG_TYPE_GET(*msg_word);
3103 	pdev->timestamp.pdev_id = pdev_id;
3104 	pdev->timestamp.chip_id =
3105 		HTT_T2H_MLO_TIMESTAMP_OFFSET_CHIP_ID_GET(*msg_word);
3106 	pdev->timestamp.mac_clk_freq =
3107 		HTT_T2H_MLO_TIMESTAMP_OFFSET_MAC_CLK_FREQ_MHZ_GET(*msg_word);
3108 	pdev->timestamp.sync_tstmp_lo_us = *(msg_word + 1);
3109 	pdev->timestamp.sync_tstmp_hi_us = *(msg_word + 2);
3110 	pdev->timestamp.mlo_offset_lo_us = *(msg_word + 3);
3111 	pdev->timestamp.mlo_offset_hi_us = *(msg_word + 4);
3112 	pdev->timestamp.mlo_offset_clks  = *(msg_word + 5);
3113 	pdev->timestamp.mlo_comp_us =
3114 	HTT_T2H_MLO_TIMESTAMP_OFFSET_MLO_TIMESTAMP_COMP_US_GET(
3115 							*(msg_word + 6));
3116 	pdev->timestamp.mlo_comp_clks =
3117 	HTT_T2H_MLO_TIMESTAMP_OFFSET_MLO_TIMESTAMP_COMP_CLKS_GET(
3118 							*(msg_word + 6));
3119 	pdev->timestamp.mlo_comp_timer =
3120 	HTT_T2H_MLO_TIMESTAMP_OFFSET_MLO_TIMESTAMP_COMP_PERIOD_US_GET(
3121 							*(msg_word + 7));
3122 
3123 	dp_htt_debug("tsf_lo=%d tsf_hi=%d, mlo_ofst_lo=%d, mlo_ofst_hi=%d\n",
3124 		     pdev->timestamp.sync_tstmp_lo_us,
3125 		     pdev->timestamp.sync_tstmp_hi_us,
3126 		     pdev->timestamp.mlo_offset_lo_us,
3127 		     pdev->timestamp.mlo_offset_hi_us);
3128 
3129 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
3130 
3131 	dp_update_mlo_ts_offset(soc,
3132 				pdev->timestamp.mlo_offset_lo_us,
3133 				pdev->timestamp.mlo_offset_hi_us);
3134 }
3135 #else
3136 static void dp_htt_mlo_peer_map_handler(struct htt_soc *soc,
3137 					uint32_t *msg_word)
3138 {
3139 	qdf_assert_always(0);
3140 }
3141 
3142 static void dp_htt_mlo_peer_unmap_handler(struct htt_soc *soc,
3143 					 uint32_t *msg_word)
3144 {
3145 	qdf_assert_always(0);
3146 }
3147 
3148 static void
3149 dp_rx_mlo_timestamp_ind_handler(void *soc_handle,
3150 				uint32_t *msg_word)
3151 {
3152 	qdf_assert_always(0);
3153 }
3154 #endif
3155 
3156 /*
3157  * dp_htt_rx_addba_handler() - RX Addba HTT msg handler
3158  * @soc: DP Soc handler
3159  * @peer_id: ID of peer
3160  * @tid: TID number
3161  * @win_sz: BA window size
3162  *
3163  * Return: None
3164  */
3165 static void
3166 dp_htt_rx_addba_handler(struct dp_soc *soc, uint16_t peer_id,
3167 			uint8_t tid, uint16_t win_sz)
3168 {
3169 	uint16_t status;
3170 	struct dp_peer *peer;
3171 
3172 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
3173 
3174 	if (!peer) {
3175 		dp_err("Peer not found peer id %d", peer_id);
3176 		return;
3177 	}
3178 
3179 	status = dp_addba_requestprocess_wifi3((struct cdp_soc_t *)soc,
3180 					       peer->mac_addr.raw,
3181 					       peer->vdev->vdev_id, 0,
3182 					       tid, 0, win_sz, 0xffff);
3183 
3184 	dp_addba_resp_tx_completion_wifi3(
3185 		(struct cdp_soc_t *)soc,
3186 		peer->mac_addr.raw, peer->vdev->vdev_id,
3187 		tid,
3188 		status);
3189 
3190 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3191 
3192 	dp_info("PeerID %d BAW %d TID %d stat %d",
3193 		peer_id, win_sz, tid, status);
3194 }
3195 
3196 /*
3197  * dp_htt_ppdu_id_fmt_handler() - PPDU ID Format handler
3198  * @htt_soc: HTT SOC handle
3199  * @msg_word: Pointer to payload
3200  *
3201  * Return: None
3202  */
3203 static void
3204 dp_htt_ppdu_id_fmt_handler(struct dp_soc *soc, uint32_t *msg_word)
3205 {
3206 	uint8_t msg_type, valid, bits, offset;
3207 
3208 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
3209 
3210 	msg_word += HTT_PPDU_ID_FMT_IND_LINK_ID_OFFSET;
3211 	valid = HTT_PPDU_ID_FMT_IND_VALID_GET_BITS31_16(*msg_word);
3212 	bits = HTT_PPDU_ID_FMT_IND_BITS_GET_BITS31_16(*msg_word);
3213 	offset = HTT_PPDU_ID_FMT_IND_OFFSET_GET_BITS31_16(*msg_word);
3214 
3215 	dp_info("link_id: valid %u bits %u offset %u", valid, bits, offset);
3216 
3217 	if (valid) {
3218 		soc->link_id_offset = offset;
3219 		soc->link_id_bits = bits;
3220 	}
3221 }
3222 
3223 /*
3224  * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler
3225  * @context:	Opaque context (HTT SOC handle)
3226  * @pkt:	HTC packet
3227  */
3228 static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
3229 {
3230 	struct htt_soc *soc = (struct htt_soc *) context;
3231 	qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
3232 	u_int32_t *msg_word;
3233 	enum htt_t2h_msg_type msg_type;
3234 	bool free_buf = true;
3235 
3236 	/* check for successful message reception */
3237 	if (pkt->Status != QDF_STATUS_SUCCESS) {
3238 		if (pkt->Status != QDF_STATUS_E_CANCELED)
3239 			soc->stats.htc_err_cnt++;
3240 
3241 		qdf_nbuf_free(htt_t2h_msg);
3242 		return;
3243 	}
3244 
3245 	/* TODO: Check if we should pop the HTC/HTT header alignment padding */
3246 
3247 	msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
3248 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
3249 	htt_event_record(soc->htt_logger_handle,
3250 			 msg_type, (uint8_t *)msg_word);
3251 	switch (msg_type) {
3252 	case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
3253 	{
3254 		dp_htt_bkp_event_alert(msg_word, soc);
3255 		break;
3256 	}
3257 	case HTT_T2H_MSG_TYPE_PEER_MAP:
3258 		{
3259 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3260 			u_int8_t *peer_mac_addr;
3261 			u_int16_t peer_id;
3262 			u_int16_t hw_peer_id;
3263 			u_int8_t vdev_id;
3264 			u_int8_t is_wds;
3265 			struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
3266 
3267 			peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
3268 			hw_peer_id =
3269 				HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2));
3270 			vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
3271 			peer_mac_addr = htt_t2h_mac_addr_deswizzle(
3272 				(u_int8_t *) (msg_word+1),
3273 				&mac_addr_deswizzle_buf[0]);
3274 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3275 				QDF_TRACE_LEVEL_DEBUG,
3276 				"HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3277 				peer_id, vdev_id);
3278 
3279 			/*
3280 			 * check if peer already exists for this peer_id, if so
3281 			 * this peer map event is in response for a wds peer add
3282 			 * wmi command sent during wds source port learning.
3283 			 * in this case just add the ast entry to the existing
3284 			 * peer ast_list.
3285 			 */
3286 			is_wds = !!(dpsoc->peer_id_to_obj_map[peer_id]);
3287 			dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id,
3288 					       vdev_id, peer_mac_addr, 0,
3289 					       is_wds);
3290 			break;
3291 		}
3292 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
3293 		{
3294 			u_int16_t peer_id;
3295 			u_int8_t vdev_id;
3296 			u_int8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0};
3297 			peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
3298 			vdev_id = HTT_RX_PEER_UNMAP_VDEV_ID_GET(*msg_word);
3299 
3300 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
3301 						 vdev_id, mac_addr, 0,
3302 						 DP_PEER_WDS_COUNT_INVALID);
3303 			break;
3304 		}
3305 	case HTT_T2H_MSG_TYPE_SEC_IND:
3306 		{
3307 			u_int16_t peer_id;
3308 			enum cdp_sec_type sec_type;
3309 			int is_unicast;
3310 
3311 			peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
3312 			sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
3313 			is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
3314 			/* point to the first part of the Michael key */
3315 			msg_word++;
3316 			dp_rx_sec_ind_handler(
3317 				soc->dp_soc, peer_id, sec_type, is_unicast,
3318 				msg_word, msg_word + 2);
3319 			break;
3320 		}
3321 
3322 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
3323 		{
3324 			free_buf =
3325 				dp_monitor_ppdu_stats_ind_handler(soc,
3326 								  msg_word,
3327 								  htt_t2h_msg);
3328 			break;
3329 		}
3330 
3331 	case HTT_T2H_MSG_TYPE_PKTLOG:
3332 		{
3333 			dp_pktlog_msg_handler(soc, msg_word);
3334 			break;
3335 		}
3336 
3337 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
3338 		{
3339 			/*
3340 			 * HTC maintains runtime pm count for H2T messages that
3341 			 * have a response msg from FW. This count ensures that
3342 			 * in the case FW does not sent out the response or host
3343 			 * did not process this indication runtime_put happens
3344 			 * properly in the cleanup path.
3345 			 */
3346 			if (htc_dec_return_runtime_cnt(soc->htc_soc) >= 0)
3347 				htc_pm_runtime_put(soc->htc_soc);
3348 			else
3349 				soc->stats.htt_ver_req_put_skip++;
3350 			soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
3351 			soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
3352 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
3353 				"target uses HTT version %d.%d; host uses %d.%d",
3354 				soc->tgt_ver.major, soc->tgt_ver.minor,
3355 				HTT_CURRENT_VERSION_MAJOR,
3356 				HTT_CURRENT_VERSION_MINOR);
3357 			if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
3358 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3359 					QDF_TRACE_LEVEL_WARN,
3360 					"*** Incompatible host/target HTT versions!");
3361 			}
3362 			/* abort if the target is incompatible with the host */
3363 			qdf_assert(soc->tgt_ver.major ==
3364 				HTT_CURRENT_VERSION_MAJOR);
3365 			if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
3366 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3367 					QDF_TRACE_LEVEL_INFO_LOW,
3368 					"*** Warning: host/target HTT versions"
3369 					" are different, though compatible!");
3370 			}
3371 			break;
3372 		}
3373 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
3374 		{
3375 			uint16_t peer_id;
3376 			uint8_t tid;
3377 			uint16_t win_sz;
3378 
3379 			/*
3380 			 * Update REO Queue Desc with new values
3381 			 */
3382 			peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
3383 			tid = HTT_RX_ADDBA_TID_GET(*msg_word);
3384 			win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
3385 
3386 			/*
3387 			 * Window size needs to be incremented by 1
3388 			 * since fw needs to represent a value of 256
3389 			 * using just 8 bits
3390 			 */
3391 			dp_htt_rx_addba_handler(soc->dp_soc, peer_id,
3392 						tid, win_sz + 1);
3393 			break;
3394 		}
3395 	case HTT_T2H_MSG_TYPE_RX_ADDBA_EXTN:
3396 		{
3397 			uint16_t peer_id;
3398 			uint8_t tid;
3399 			uint16_t win_sz;
3400 
3401 			peer_id = HTT_RX_ADDBA_EXTN_PEER_ID_GET(*msg_word);
3402 			tid = HTT_RX_ADDBA_EXTN_TID_GET(*msg_word);
3403 
3404 			msg_word++;
3405 			win_sz = HTT_RX_ADDBA_EXTN_WIN_SIZE_GET(*msg_word);
3406 
3407 			dp_htt_rx_addba_handler(soc->dp_soc, peer_id,
3408 						tid, win_sz);
3409 			break;
3410 		}
3411 	case HTT_T2H_PPDU_ID_FMT_IND:
3412 		{
3413 			dp_htt_ppdu_id_fmt_handler(soc->dp_soc, msg_word);
3414 			break;
3415 		}
3416 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
3417 		{
3418 			dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg);
3419 			break;
3420 		}
3421 	case HTT_T2H_MSG_TYPE_PEER_MAP_V2:
3422 		{
3423 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3424 			u_int8_t *peer_mac_addr;
3425 			u_int16_t peer_id;
3426 			u_int16_t hw_peer_id;
3427 			u_int8_t vdev_id;
3428 			bool is_wds;
3429 			u_int16_t ast_hash;
3430 			struct dp_ast_flow_override_info ast_flow_info;
3431 
3432 			qdf_mem_set(&ast_flow_info, 0,
3433 					    sizeof(struct dp_ast_flow_override_info));
3434 
3435 			peer_id = HTT_RX_PEER_MAP_V2_SW_PEER_ID_GET(*msg_word);
3436 			hw_peer_id =
3437 			HTT_RX_PEER_MAP_V2_HW_PEER_ID_GET(*(msg_word + 2));
3438 			vdev_id = HTT_RX_PEER_MAP_V2_VDEV_ID_GET(*msg_word);
3439 			peer_mac_addr =
3440 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3441 						   &mac_addr_deswizzle_buf[0]);
3442 			is_wds =
3443 			HTT_RX_PEER_MAP_V2_NEXT_HOP_GET(*(msg_word + 3));
3444 			ast_hash =
3445 			HTT_RX_PEER_MAP_V2_AST_HASH_VALUE_GET(*(msg_word + 3));
3446 			/*
3447 			 * Update 4 ast_index per peer, ast valid mask
3448 			 * and TID flow valid mask.
3449 			 * AST valid mask is 3 bit field corresponds to
3450 			 * ast_index[3:1]. ast_index 0 is always valid.
3451 			 */
3452 			ast_flow_info.ast_valid_mask =
3453 			HTT_RX_PEER_MAP_V2_AST_VALID_MASK_GET(*(msg_word + 3));
3454 			ast_flow_info.ast_idx[0] = hw_peer_id;
3455 			ast_flow_info.ast_flow_mask[0] =
3456 			HTT_RX_PEER_MAP_V2_AST_0_FLOW_MASK_GET(*(msg_word + 4));
3457 			ast_flow_info.ast_idx[1] =
3458 			HTT_RX_PEER_MAP_V2_AST_INDEX_1_GET(*(msg_word + 4));
3459 			ast_flow_info.ast_flow_mask[1] =
3460 			HTT_RX_PEER_MAP_V2_AST_1_FLOW_MASK_GET(*(msg_word + 4));
3461 			ast_flow_info.ast_idx[2] =
3462 			HTT_RX_PEER_MAP_V2_AST_INDEX_2_GET(*(msg_word + 5));
3463 			ast_flow_info.ast_flow_mask[2] =
3464 			HTT_RX_PEER_MAP_V2_AST_2_FLOW_MASK_GET(*(msg_word + 4));
3465 			ast_flow_info.ast_idx[3] =
3466 			HTT_RX_PEER_MAP_V2_AST_INDEX_3_GET(*(msg_word + 6));
3467 			ast_flow_info.ast_flow_mask[3] =
3468 			HTT_RX_PEER_MAP_V2_AST_3_FLOW_MASK_GET(*(msg_word + 4));
3469 			/*
3470 			 * TID valid mask is applicable only
3471 			 * for HI and LOW priority flows.
3472 			 * tid_valid_mas is 8 bit field corresponds
3473 			 * to TID[7:0]
3474 			 */
3475 			ast_flow_info.tid_valid_low_pri_mask =
3476 			HTT_RX_PEER_MAP_V2_TID_VALID_LOW_PRI_GET(*(msg_word + 5));
3477 			ast_flow_info.tid_valid_hi_pri_mask =
3478 			HTT_RX_PEER_MAP_V2_TID_VALID_HI_PRI_GET(*(msg_word + 5));
3479 
3480 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3481 				  QDF_TRACE_LEVEL_DEBUG,
3482 				  "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3483 				  peer_id, vdev_id);
3484 
3485 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3486 				  QDF_TRACE_LEVEL_INFO,
3487 				  "ast_idx[0] %d ast_idx[1] %d ast_idx[2] %d ast_idx[3] %d n",
3488 				  ast_flow_info.ast_idx[0],
3489 				  ast_flow_info.ast_idx[1],
3490 				  ast_flow_info.ast_idx[2],
3491 				  ast_flow_info.ast_idx[3]);
3492 
3493 			dp_rx_peer_map_handler(soc->dp_soc, peer_id,
3494 					       hw_peer_id, vdev_id,
3495 					       peer_mac_addr, ast_hash,
3496 					       is_wds);
3497 
3498 			/*
3499 			 * Update ast indexes for flow override support
3500 			 * Applicable only for non wds peers
3501 			 */
3502 			if (!soc->dp_soc->ast_offload_support)
3503 				dp_peer_ast_index_flow_queue_map_create(
3504 						soc->dp_soc, is_wds,
3505 						peer_id, peer_mac_addr,
3506 						&ast_flow_info);
3507 
3508 			break;
3509 		}
3510 	case HTT_T2H_MSG_TYPE_PEER_UNMAP_V2:
3511 		{
3512 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3513 			u_int8_t *mac_addr;
3514 			u_int16_t peer_id;
3515 			u_int8_t vdev_id;
3516 			u_int8_t is_wds;
3517 			u_int32_t free_wds_count;
3518 
3519 			peer_id =
3520 			HTT_RX_PEER_UNMAP_V2_SW_PEER_ID_GET(*msg_word);
3521 			vdev_id = HTT_RX_PEER_UNMAP_V2_VDEV_ID_GET(*msg_word);
3522 			mac_addr =
3523 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3524 						   &mac_addr_deswizzle_buf[0]);
3525 			is_wds =
3526 			HTT_RX_PEER_UNMAP_V2_NEXT_HOP_GET(*(msg_word + 2));
3527 			free_wds_count =
3528 			HTT_RX_PEER_UNMAP_V2_PEER_WDS_FREE_COUNT_GET(*(msg_word + 4));
3529 
3530 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3531 				  QDF_TRACE_LEVEL_INFO,
3532 				  "HTT_T2H_MSG_TYPE_PEER_UNMAP msg for peer id %d vdev id %d n",
3533 				  peer_id, vdev_id);
3534 
3535 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
3536 						 vdev_id, mac_addr,
3537 						 is_wds, free_wds_count);
3538 			break;
3539 		}
3540 	case HTT_T2H_MSG_TYPE_RX_DELBA:
3541 		{
3542 			uint16_t peer_id;
3543 			uint8_t tid;
3544 			uint8_t win_sz;
3545 			QDF_STATUS status;
3546 
3547 			peer_id = HTT_RX_DELBA_PEER_ID_GET(*msg_word);
3548 			tid = HTT_RX_DELBA_TID_GET(*msg_word);
3549 			win_sz = HTT_RX_DELBA_WIN_SIZE_GET(*msg_word);
3550 
3551 			status = dp_rx_delba_ind_handler(
3552 				soc->dp_soc,
3553 				peer_id, tid, win_sz);
3554 
3555 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3556 				  QDF_TRACE_LEVEL_INFO,
3557 				  FL("DELBA PeerID %d BAW %d TID %d stat %d"),
3558 				  peer_id, win_sz, tid, status);
3559 			break;
3560 		}
3561 	case HTT_T2H_MSG_TYPE_RX_DELBA_EXTN:
3562 		{
3563 			uint16_t peer_id;
3564 			uint8_t tid;
3565 			uint16_t win_sz;
3566 			QDF_STATUS status;
3567 
3568 			peer_id = HTT_RX_DELBA_EXTN_PEER_ID_GET(*msg_word);
3569 			tid = HTT_RX_DELBA_EXTN_TID_GET(*msg_word);
3570 
3571 			msg_word++;
3572 			win_sz = HTT_RX_DELBA_EXTN_WIN_SIZE_GET(*msg_word);
3573 
3574 			status = dp_rx_delba_ind_handler(soc->dp_soc,
3575 							 peer_id, tid,
3576 							 win_sz);
3577 
3578 			dp_info("DELBA PeerID %d BAW %d TID %d stat %d",
3579 				peer_id, win_sz, tid, status);
3580 			break;
3581 		}
3582 	case HTT_T2H_MSG_TYPE_FSE_CMEM_BASE_SEND:
3583 		{
3584 			uint16_t num_entries;
3585 			uint32_t cmem_ba_lo;
3586 			uint32_t cmem_ba_hi;
3587 
3588 			num_entries = HTT_CMEM_BASE_SEND_NUM_ENTRIES_GET(*msg_word);
3589 			cmem_ba_lo = *(msg_word + 1);
3590 			cmem_ba_hi = *(msg_word + 2);
3591 
3592 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
3593 				  FL("CMEM FSE num_entries %u CMEM BA LO %x HI %x"),
3594 				  num_entries, cmem_ba_lo, cmem_ba_hi);
3595 
3596 			dp_rx_fst_update_cmem_params(soc->dp_soc, num_entries,
3597 						     cmem_ba_lo, cmem_ba_hi);
3598 			break;
3599 		}
3600 	case HTT_T2H_MSG_TYPE_TX_OFFLOAD_DELIVER_IND:
3601 		{
3602 			dp_offload_ind_handler(soc, msg_word);
3603 			break;
3604 		}
3605 	case HTT_T2H_MSG_TYPE_PEER_MAP_V3:
3606 	{
3607 		u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3608 		u_int8_t *peer_mac_addr;
3609 		u_int16_t peer_id;
3610 		u_int16_t hw_peer_id;
3611 		u_int8_t vdev_id;
3612 		uint8_t is_wds;
3613 		u_int16_t ast_hash = 0;
3614 
3615 		peer_id = HTT_RX_PEER_MAP_V3_SW_PEER_ID_GET(*msg_word);
3616 		vdev_id = HTT_RX_PEER_MAP_V3_VDEV_ID_GET(*msg_word);
3617 		peer_mac_addr =
3618 		htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3619 					   &mac_addr_deswizzle_buf[0]);
3620 		hw_peer_id = HTT_RX_PEER_MAP_V3_HW_PEER_ID_GET(*(msg_word + 3));
3621 		ast_hash = HTT_RX_PEER_MAP_V3_CACHE_SET_NUM_GET(*(msg_word + 3));
3622 		is_wds = HTT_RX_PEER_MAP_V3_NEXT_HOP_GET(*(msg_word + 4));
3623 
3624 		dp_htt_info("HTT_T2H_MSG_TYPE_PEER_MAP_V3 msg for peer id %d vdev id %d n",
3625 			    peer_id, vdev_id);
3626 
3627 		dp_rx_peer_map_handler(soc->dp_soc, peer_id,
3628 				       hw_peer_id, vdev_id,
3629 				       peer_mac_addr, ast_hash,
3630 				       is_wds);
3631 
3632 		break;
3633 	}
3634 	case HTT_T2H_MSG_TYPE_MLO_RX_PEER_MAP:
3635 	{
3636 		dp_htt_mlo_peer_map_handler(soc, msg_word);
3637 		break;
3638 	}
3639 	case HTT_T2H_MSG_TYPE_MLO_RX_PEER_UNMAP:
3640 	{
3641 		dp_htt_mlo_peer_unmap_handler(soc, msg_word);
3642 		break;
3643 	}
3644 	case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND:
3645 	{
3646 		dp_rx_mlo_timestamp_ind_handler(soc->dp_soc, msg_word);
3647 		break;
3648 	}
3649 	case HTT_T2H_MSG_TYPE_VDEVS_TXRX_STATS_PERIODIC_IND:
3650 	{
3651 		dp_vdev_txrx_hw_stats_handler(soc, msg_word);
3652 		break;
3653 	}
3654 	case HTT_T2H_SAWF_DEF_QUEUES_MAP_REPORT_CONF:
3655 	{
3656 		dp_sawf_def_queues_update_map_report_conf(soc, msg_word,
3657 							  htt_t2h_msg);
3658 		break;
3659 	}
3660 	case HTT_T2H_SAWF_MSDUQ_INFO_IND:
3661 	{
3662 		dp_sawf_msduq_map(soc, msg_word, htt_t2h_msg);
3663 		break;
3664 	}
3665 	case HTT_T2H_MSG_TYPE_STREAMING_STATS_IND:
3666 	{
3667 		dp_sawf_mpdu_stats_handler(soc, htt_t2h_msg);
3668 		break;
3669 	}
3670 
3671 	default:
3672 		break;
3673 	};
3674 
3675 	/* Free the indication buffer */
3676 	if (free_buf)
3677 		qdf_nbuf_free(htt_t2h_msg);
3678 }
3679 
3680 /*
3681  * dp_htt_h2t_full() - Send full handler (called from HTC)
3682  * @context:	Opaque context (HTT SOC handle)
3683  * @pkt:	HTC packet
3684  *
3685  * Return: enum htc_send_full_action
3686  */
3687 static enum htc_send_full_action
3688 dp_htt_h2t_full(void *context, HTC_PACKET *pkt)
3689 {
3690 	return HTC_SEND_FULL_KEEP;
3691 }
3692 
3693 /*
3694  * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages
3695  * @context:	Opaque context (HTT SOC handle)
3696  * @nbuf:	nbuf containing T2H message
3697  * @pipe_id:	HIF pipe ID
3698  *
3699  * Return: QDF_STATUS
3700  *
3701  * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which
3702  * will be used for packet log and other high-priority HTT messages. Proper
3703  * HTC connection to be added later once required FW changes are available
3704  */
3705 static QDF_STATUS
3706 dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id)
3707 {
3708 	QDF_STATUS rc = QDF_STATUS_SUCCESS;
3709 	HTC_PACKET htc_pkt;
3710 
3711 	qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE);
3712 	qdf_mem_zero(&htc_pkt, sizeof(htc_pkt));
3713 	htc_pkt.Status = QDF_STATUS_SUCCESS;
3714 	htc_pkt.pPktContext = (void *)nbuf;
3715 	dp_htt_t2h_msg_handler(context, &htc_pkt);
3716 
3717 	return rc;
3718 }
3719 
3720 /*
3721  * htt_htc_soc_attach() - Register SOC level HTT instance with HTC
3722  * @htt_soc:	HTT SOC handle
3723  *
3724  * Return: QDF_STATUS
3725  */
3726 static QDF_STATUS
3727 htt_htc_soc_attach(struct htt_soc *soc)
3728 {
3729 	struct htc_service_connect_req connect;
3730 	struct htc_service_connect_resp response;
3731 	QDF_STATUS status;
3732 	struct dp_soc *dpsoc = soc->dp_soc;
3733 
3734 	qdf_mem_zero(&connect, sizeof(connect));
3735 	qdf_mem_zero(&response, sizeof(response));
3736 
3737 	connect.pMetaData = NULL;
3738 	connect.MetaDataLength = 0;
3739 	connect.EpCallbacks.pContext = soc;
3740 	connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
3741 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
3742 	connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler;
3743 
3744 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
3745 	connect.EpCallbacks.EpRecvRefill = NULL;
3746 
3747 	/* N/A, fill is done by HIF */
3748 	connect.EpCallbacks.RecvRefillWaterMark = 1;
3749 
3750 	connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
3751 	/*
3752 	 * Specify how deep to let a queue get before htc_send_pkt will
3753 	 * call the EpSendFull function due to excessive send queue depth.
3754 	 */
3755 	connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
3756 
3757 	/* disable flow control for HTT data message service */
3758 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
3759 
3760 	/* connect to control service */
3761 	connect.service_id = HTT_DATA_MSG_SVC;
3762 
3763 	status = htc_connect_service(soc->htc_soc, &connect, &response);
3764 
3765 	if (status != QDF_STATUS_SUCCESS)
3766 		return status;
3767 
3768 	soc->htc_endpoint = response.Endpoint;
3769 
3770 	hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint);
3771 
3772 	htt_interface_logging_init(&soc->htt_logger_handle, soc->ctrl_psoc);
3773 	dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc,
3774 		dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE);
3775 
3776 	return QDF_STATUS_SUCCESS; /* success */
3777 }
3778 
3779 /*
3780  * htt_soc_initialize() - SOC level HTT initialization
3781  * @htt_soc: Opaque htt SOC handle
3782  * @ctrl_psoc: Opaque ctrl SOC handle
3783  * @htc_soc: SOC level HTC handle
3784  * @hal_soc: Opaque HAL SOC handle
3785  * @osdev: QDF device
3786  *
3787  * Return: HTT handle on success; NULL on failure
3788  */
3789 void *
3790 htt_soc_initialize(struct htt_soc *htt_soc,
3791 		   struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
3792 		   HTC_HANDLE htc_soc,
3793 		   hal_soc_handle_t hal_soc_hdl, qdf_device_t osdev)
3794 {
3795 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
3796 
3797 	soc->osdev = osdev;
3798 	soc->ctrl_psoc = ctrl_psoc;
3799 	soc->htc_soc = htc_soc;
3800 	soc->hal_soc = hal_soc_hdl;
3801 
3802 	if (htt_htc_soc_attach(soc))
3803 		goto fail2;
3804 
3805 	return soc;
3806 
3807 fail2:
3808 	return NULL;
3809 }
3810 
3811 void htt_soc_htc_dealloc(struct htt_soc *htt_handle)
3812 {
3813 	htt_interface_logging_deinit(htt_handle->htt_logger_handle);
3814 	htt_htc_misc_pkt_pool_free(htt_handle);
3815 	htt_htc_pkt_pool_free(htt_handle);
3816 }
3817 
3818 /*
3819  * htt_soc_htc_prealloc() - HTC memory prealloc
3820  * @htt_soc: SOC level HTT handle
3821  *
3822  * Return: QDF_STATUS_SUCCESS on Success or
3823  * QDF_STATUS_E_NOMEM on allocation failure
3824  */
3825 QDF_STATUS htt_soc_htc_prealloc(struct htt_soc *soc)
3826 {
3827 	int i;
3828 
3829 	soc->htt_htc_pkt_freelist = NULL;
3830 	/* pre-allocate some HTC_PACKET objects */
3831 	for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
3832 		struct dp_htt_htc_pkt_union *pkt;
3833 		pkt = qdf_mem_malloc(sizeof(*pkt));
3834 		if (!pkt)
3835 			return QDF_STATUS_E_NOMEM;
3836 
3837 		htt_htc_pkt_free(soc, &pkt->u.pkt);
3838 	}
3839 	return QDF_STATUS_SUCCESS;
3840 }
3841 
3842 /*
3843  * htt_soc_detach() - Free SOC level HTT handle
3844  * @htt_hdl: HTT SOC handle
3845  */
3846 void htt_soc_detach(struct htt_soc *htt_hdl)
3847 {
3848 	int i;
3849 	struct htt_soc *htt_handle = (struct htt_soc *)htt_hdl;
3850 
3851 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3852 		qdf_mem_free(htt_handle->pdevid_tt[i].umac_ttt);
3853 		qdf_mem_free(htt_handle->pdevid_tt[i].lmac_ttt);
3854 	}
3855 
3856 	HTT_TX_MUTEX_DESTROY(&htt_handle->htt_tx_mutex);
3857 	qdf_mem_free(htt_handle);
3858 
3859 }
3860 
3861 /**
3862  * dp_h2t_ext_stats_msg_send(): function to contruct HTT message to pass to FW
3863  * @pdev: DP PDEV handle
3864  * @stats_type_upload_mask: stats type requested by user
3865  * @config_param_0: extra configuration parameters
3866  * @config_param_1: extra configuration parameters
3867  * @config_param_2: extra configuration parameters
3868  * @config_param_3: extra configuration parameters
3869  * @mac_id: mac number
3870  *
3871  * return: QDF STATUS
3872  */
3873 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
3874 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
3875 		uint32_t config_param_1, uint32_t config_param_2,
3876 		uint32_t config_param_3, int cookie_val, int cookie_msb,
3877 		uint8_t mac_id)
3878 {
3879 	struct htt_soc *soc = pdev->soc->htt_handle;
3880 	struct dp_htt_htc_pkt *pkt;
3881 	qdf_nbuf_t msg;
3882 	uint32_t *msg_word;
3883 	uint8_t pdev_mask = 0;
3884 	uint8_t *htt_logger_bufp;
3885 	int mac_for_pdev;
3886 	int target_pdev_id;
3887 	QDF_STATUS status;
3888 
3889 	msg = qdf_nbuf_alloc(
3890 			soc->osdev,
3891 			HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ),
3892 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
3893 
3894 	if (!msg)
3895 		return QDF_STATUS_E_NOMEM;
3896 
3897 	/*TODO:Add support for SOC stats
3898 	 * Bit 0: SOC Stats
3899 	 * Bit 1: Pdev stats for pdev id 0
3900 	 * Bit 2: Pdev stats for pdev id 1
3901 	 * Bit 3: Pdev stats for pdev id 2
3902 	 */
3903 	mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
3904 	target_pdev_id =
3905 	dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev);
3906 
3907 	pdev_mask = 1 << target_pdev_id;
3908 
3909 	/*
3910 	 * Set the length of the message.
3911 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3912 	 * separately during the below call to qdf_nbuf_push_head.
3913 	 * The contribution from the HTC header is added separately inside HTC.
3914 	 */
3915 	if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) {
3916 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3917 				"Failed to expand head for HTT_EXT_STATS");
3918 		qdf_nbuf_free(msg);
3919 		return QDF_STATUS_E_FAILURE;
3920 	}
3921 
3922 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
3923 
3924 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3925 	htt_logger_bufp = (uint8_t *)msg_word;
3926 	*msg_word = 0;
3927 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ);
3928 	HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask);
3929 	HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask);
3930 
3931 	/* word 1 */
3932 	msg_word++;
3933 	*msg_word = 0;
3934 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0);
3935 
3936 	/* word 2 */
3937 	msg_word++;
3938 	*msg_word = 0;
3939 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1);
3940 
3941 	/* word 3 */
3942 	msg_word++;
3943 	*msg_word = 0;
3944 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2);
3945 
3946 	/* word 4 */
3947 	msg_word++;
3948 	*msg_word = 0;
3949 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3);
3950 
3951 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0);
3952 
3953 	/* word 5 */
3954 	msg_word++;
3955 
3956 	/* word 6 */
3957 	msg_word++;
3958 	*msg_word = 0;
3959 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val);
3960 
3961 	/* word 7 */
3962 	msg_word++;
3963 	*msg_word = 0;
3964 	/* Currently Using last 2 bits for pdev_id
3965 	 * For future reference, reserving 3 bits in cookie_msb for pdev_id
3966 	 */
3967 	cookie_msb = (cookie_msb | pdev->pdev_id);
3968 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb);
3969 
3970 	pkt = htt_htc_pkt_alloc(soc);
3971 	if (!pkt) {
3972 		qdf_nbuf_free(msg);
3973 		return QDF_STATUS_E_NOMEM;
3974 	}
3975 
3976 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
3977 
3978 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
3979 			dp_htt_h2t_send_complete_free_netbuf,
3980 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
3981 			soc->htc_endpoint,
3982 			/* tag for FW response msg not guaranteed */
3983 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
3984 
3985 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
3986 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_EXT_STATS_REQ,
3987 				     htt_logger_bufp);
3988 
3989 	if (status != QDF_STATUS_SUCCESS) {
3990 		qdf_nbuf_free(msg);
3991 		htt_htc_pkt_free(soc, pkt);
3992 	}
3993 
3994 	return status;
3995 }
3996 
3997 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
3998 #define HTT_VDEV_TXRX_STATS_RESET_BITMASK_L32_MASK 0xFFFFFFFF
3999 #define HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_MASK 0xFFFFFFFF00000000
4000 #define HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_SHIFT 32
4001 
4002 QDF_STATUS dp_h2t_hw_vdev_stats_config_send(struct dp_soc *dpsoc,
4003 					    uint8_t pdev_id, bool enable,
4004 					    bool reset, uint64_t reset_bitmask)
4005 {
4006 	struct htt_soc *soc = dpsoc->htt_handle;
4007 	struct dp_htt_htc_pkt *pkt;
4008 	qdf_nbuf_t msg;
4009 	uint32_t *msg_word;
4010 	uint8_t *htt_logger_bufp;
4011 	QDF_STATUS status;
4012 	int duration;
4013 	uint32_t bitmask;
4014 	int target_pdev_id;
4015 
4016 	msg = qdf_nbuf_alloc(
4017 			soc->osdev,
4018 			HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_vdevs_txrx_stats_cfg)),
4019 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
4020 
4021 	if (!msg) {
4022 		dp_htt_err("%pK: Fail to allocate "
4023 		"HTT_H2T_HW_VDEV_TXRX_STATS_CFG_MSG_SZ msg buffer", dpsoc);
4024 		return QDF_STATUS_E_NOMEM;
4025 	}
4026 
4027 	if (pdev_id != INVALID_PDEV_ID)
4028 		target_pdev_id = DP_SW2HW_MACID(pdev_id);
4029 	else
4030 		target_pdev_id = 0;
4031 
4032 	duration =
4033 	wlan_cfg_get_vdev_stats_hw_offload_timer(dpsoc->wlan_cfg_ctx);
4034 
4035 	/*
4036 	 * Set the length of the message.
4037 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4038 	 * separately during the below call to qdf_nbuf_push_head.
4039 	 * The contribution from the HTC header is added separately inside HTC.
4040 	 */
4041 	if (!qdf_nbuf_put_tail(msg,
4042 			       sizeof(struct htt_h2t_vdevs_txrx_stats_cfg))) {
4043 		dp_htt_err("%pK: Failed to expand head for HTT_HW_VDEV_STATS"
4044 			   , dpsoc);
4045 		qdf_nbuf_free(msg);
4046 		return QDF_STATUS_E_FAILURE;
4047 	}
4048 
4049 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
4050 
4051 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4052 	htt_logger_bufp = (uint8_t *)msg_word;
4053 	*msg_word = 0;
4054 
4055 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VDEVS_TXRX_STATS_CFG);
4056 	HTT_RX_VDEVS_TXRX_STATS_PDEV_ID_SET(*msg_word, target_pdev_id);
4057 
4058 	HTT_RX_VDEVS_TXRX_STATS_ENABLE_SET(*msg_word, enable);
4059 
4060 	HTT_RX_VDEVS_TXRX_STATS_PERIODIC_INTERVAL_SET(*msg_word,
4061 						      (duration >> 3));
4062 
4063 	HTT_RX_VDEVS_TXRX_STATS_RESET_STATS_BITS_SET(*msg_word, reset);
4064 
4065 	msg_word++;
4066 	*msg_word = 0;
4067 	bitmask = (reset_bitmask & HTT_VDEV_TXRX_STATS_RESET_BITMASK_L32_MASK);
4068 	*msg_word = bitmask;
4069 
4070 	msg_word++;
4071 	*msg_word = 0;
4072 	bitmask =
4073 		((reset_bitmask & HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_MASK) >>
4074 		 HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_SHIFT);
4075 	*msg_word = bitmask;
4076 
4077 	pkt = htt_htc_pkt_alloc(soc);
4078 	if (!pkt) {
4079 		dp_htt_err("%pK: Fail to allocate dp_htt_htc_pkt buffer",
4080 			   dpsoc);
4081 		qdf_assert(0);
4082 		qdf_nbuf_free(msg);
4083 		return QDF_STATUS_E_NOMEM;
4084 	}
4085 
4086 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4087 
4088 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4089 			       dp_htt_h2t_send_complete_free_netbuf,
4090 			       qdf_nbuf_data(msg), qdf_nbuf_len(msg),
4091 			       soc->htc_endpoint,
4092 			       /* tag for no FW response msg */
4093 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
4094 
4095 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4096 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
4097 				     HTT_H2T_MSG_TYPE_VDEVS_TXRX_STATS_CFG,
4098 				     htt_logger_bufp);
4099 
4100 	if (status != QDF_STATUS_SUCCESS) {
4101 		qdf_nbuf_free(msg);
4102 		htt_htc_pkt_free(soc, pkt);
4103 	}
4104 
4105 	return status;
4106 }
4107 #else
4108 QDF_STATUS dp_h2t_hw_vdev_stats_config_send(struct dp_soc *dpsoc,
4109 					    uint8_t pdev_id, bool enable,
4110 					    bool reset, uint64_t reset_bitmask)
4111 {
4112 	return QDF_STATUS_SUCCESS;
4113 }
4114 #endif
4115 
4116 /**
4117  * dp_h2t_3tuple_config_send(): function to contruct 3 tuple configuration
4118  * HTT message to pass to FW
4119  * @pdev: DP PDEV handle
4120  * @tuple_mask: tuple configuration to report 3 tuple hash value in either
4121  * toeplitz_2_or_4 or flow_id_toeplitz in MSDU START TLV.
4122  *
4123  * tuple_mask[1:0]:
4124  *   00 - Do not report 3 tuple hash value
4125  *   10 - Report 3 tuple hash value in toeplitz_2_or_4
4126  *   01 - Report 3 tuple hash value in flow_id_toeplitz
4127  *   11 - Report 3 tuple hash value in both toeplitz_2_or_4 & flow_id_toeplitz
4128  *
4129  * return: QDF STATUS
4130  */
4131 QDF_STATUS dp_h2t_3tuple_config_send(struct dp_pdev *pdev,
4132 				     uint32_t tuple_mask, uint8_t mac_id)
4133 {
4134 	struct htt_soc *soc = pdev->soc->htt_handle;
4135 	struct dp_htt_htc_pkt *pkt;
4136 	qdf_nbuf_t msg;
4137 	uint32_t *msg_word;
4138 	uint8_t *htt_logger_bufp;
4139 	int mac_for_pdev;
4140 	int target_pdev_id;
4141 
4142 	msg = qdf_nbuf_alloc(
4143 			soc->osdev,
4144 			HTT_MSG_BUF_SIZE(HTT_3_TUPLE_HASH_CFG_REQ_BYTES),
4145 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4146 
4147 	if (!msg)
4148 		return QDF_STATUS_E_NOMEM;
4149 
4150 	mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
4151 	target_pdev_id =
4152 	dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev);
4153 
4154 	/*
4155 	 * Set the length of the message.
4156 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4157 	 * separately during the below call to qdf_nbuf_push_head.
4158 	 * The contribution from the HTC header is added separately inside HTC.
4159 	 */
4160 	if (!qdf_nbuf_put_tail(msg, HTT_3_TUPLE_HASH_CFG_REQ_BYTES)) {
4161 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4162 			  "Failed to expand head for HTT_3TUPLE_CONFIG");
4163 		qdf_nbuf_free(msg);
4164 		return QDF_STATUS_E_FAILURE;
4165 	}
4166 
4167 	dp_htt_info("%pK: config_param_sent 0x%x for target_pdev %d\n -------------",
4168 		    pdev->soc, tuple_mask, target_pdev_id);
4169 
4170 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
4171 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4172 	htt_logger_bufp = (uint8_t *)msg_word;
4173 
4174 	*msg_word = 0;
4175 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG);
4176 	HTT_RX_3_TUPLE_HASH_PDEV_ID_SET(*msg_word, target_pdev_id);
4177 
4178 	msg_word++;
4179 	*msg_word = 0;
4180 	HTT_H2T_FLOW_ID_TOEPLITZ_FIELD_CONFIG_SET(*msg_word, tuple_mask);
4181 	HTT_H2T_TOEPLITZ_2_OR_4_FIELD_CONFIG_SET(*msg_word, tuple_mask);
4182 
4183 	pkt = htt_htc_pkt_alloc(soc);
4184 	if (!pkt) {
4185 		qdf_nbuf_free(msg);
4186 		return QDF_STATUS_E_NOMEM;
4187 	}
4188 
4189 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4190 
4191 	SET_HTC_PACKET_INFO_TX(
4192 			&pkt->htc_pkt,
4193 			dp_htt_h2t_send_complete_free_netbuf,
4194 			qdf_nbuf_data(msg),
4195 			qdf_nbuf_len(msg),
4196 			soc->htc_endpoint,
4197 			/* tag for no FW response msg */
4198 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
4199 
4200 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4201 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG,
4202 			    htt_logger_bufp);
4203 
4204 	return QDF_STATUS_SUCCESS;
4205 }
4206 
4207 /* This macro will revert once proper HTT header will define for
4208  * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file
4209  * */
4210 #if defined(WDI_EVENT_ENABLE)
4211 /**
4212  * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW
4213  * @pdev: DP PDEV handle
4214  * @stats_type_upload_mask: stats type requested by user
4215  * @mac_id: Mac id number
4216  *
4217  * return: QDF STATUS
4218  */
4219 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
4220 		uint32_t stats_type_upload_mask, uint8_t mac_id)
4221 {
4222 	struct htt_soc *soc = pdev->soc->htt_handle;
4223 	struct dp_htt_htc_pkt *pkt;
4224 	qdf_nbuf_t msg;
4225 	uint32_t *msg_word;
4226 	uint8_t pdev_mask;
4227 	QDF_STATUS status;
4228 
4229 	msg = qdf_nbuf_alloc(
4230 			soc->osdev,
4231 			HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ),
4232 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
4233 
4234 	if (!msg) {
4235 		dp_htt_err("%pK: Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer"
4236 			   , pdev->soc);
4237 		qdf_assert(0);
4238 		return QDF_STATUS_E_NOMEM;
4239 	}
4240 
4241 	/*TODO:Add support for SOC stats
4242 	 * Bit 0: SOC Stats
4243 	 * Bit 1: Pdev stats for pdev id 0
4244 	 * Bit 2: Pdev stats for pdev id 1
4245 	 * Bit 3: Pdev stats for pdev id 2
4246 	 */
4247 	pdev_mask = 1 << dp_get_target_pdev_id_for_host_pdev_id(pdev->soc,
4248 								mac_id);
4249 
4250 	/*
4251 	 * Set the length of the message.
4252 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4253 	 * separately during the below call to qdf_nbuf_push_head.
4254 	 * The contribution from the HTC header is added separately inside HTC.
4255 	 */
4256 	if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) {
4257 		dp_htt_err("%pK: Failed to expand head for HTT_CFG_STATS"
4258 			   , pdev->soc);
4259 		qdf_nbuf_free(msg);
4260 		return QDF_STATUS_E_FAILURE;
4261 	}
4262 
4263 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
4264 
4265 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4266 	*msg_word = 0;
4267 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
4268 	HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask);
4269 	HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word,
4270 			stats_type_upload_mask);
4271 
4272 	pkt = htt_htc_pkt_alloc(soc);
4273 	if (!pkt) {
4274 		dp_htt_err("%pK: Fail to allocate dp_htt_htc_pkt buffer", pdev->soc);
4275 		qdf_assert(0);
4276 		qdf_nbuf_free(msg);
4277 		return QDF_STATUS_E_NOMEM;
4278 	}
4279 
4280 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4281 
4282 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4283 			dp_htt_h2t_send_complete_free_netbuf,
4284 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
4285 			soc->htc_endpoint,
4286 			/* tag for no FW response msg */
4287 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
4288 
4289 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4290 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG,
4291 				     (uint8_t *)msg_word);
4292 
4293 	if (status != QDF_STATUS_SUCCESS) {
4294 		qdf_nbuf_free(msg);
4295 		htt_htc_pkt_free(soc, pkt);
4296 	}
4297 
4298 	return status;
4299 }
4300 
4301 qdf_export_symbol(dp_h2t_cfg_stats_msg_send);
4302 #endif
4303 
4304 void
4305 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
4306 			     uint32_t *tag_buf)
4307 {
4308 	struct dp_peer *peer = NULL;
4309 	switch (tag_type) {
4310 	case HTT_STATS_PEER_DETAILS_TAG:
4311 	{
4312 		htt_peer_details_tlv *dp_stats_buf =
4313 			(htt_peer_details_tlv *)tag_buf;
4314 
4315 		pdev->fw_stats_peer_id = dp_stats_buf->sw_peer_id;
4316 	}
4317 	break;
4318 	case HTT_STATS_PEER_STATS_CMN_TAG:
4319 	{
4320 		htt_peer_stats_cmn_tlv *dp_stats_buf =
4321 			(htt_peer_stats_cmn_tlv *)tag_buf;
4322 
4323 		peer = dp_peer_get_ref_by_id(pdev->soc, pdev->fw_stats_peer_id,
4324 					     DP_MOD_ID_HTT);
4325 
4326 		if (peer && !peer->bss_peer) {
4327 			peer->stats.tx.inactive_time =
4328 				dp_stats_buf->inactive_time;
4329 			qdf_event_set(&pdev->fw_peer_stats_event);
4330 		}
4331 		if (peer)
4332 			dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
4333 	}
4334 	break;
4335 	default:
4336 		qdf_err("Invalid tag_type");
4337 	}
4338 }
4339 
4340 /**
4341  * dp_htt_rx_flow_fst_setup(): Send HTT Rx FST setup message to FW
4342  * @pdev: DP pdev handle
4343  * @fse_setup_info: FST setup parameters
4344  *
4345  * Return: Success when HTT message is sent, error on failure
4346  */
4347 QDF_STATUS
4348 dp_htt_rx_flow_fst_setup(struct dp_pdev *pdev,
4349 			 struct dp_htt_rx_flow_fst_setup *fse_setup_info)
4350 {
4351 	struct htt_soc *soc = pdev->soc->htt_handle;
4352 	struct dp_htt_htc_pkt *pkt;
4353 	qdf_nbuf_t msg;
4354 	u_int32_t *msg_word;
4355 	struct htt_h2t_msg_rx_fse_setup_t *fse_setup;
4356 	uint8_t *htt_logger_bufp;
4357 	u_int32_t *key;
4358 	QDF_STATUS status;
4359 
4360 	msg = qdf_nbuf_alloc(
4361 		soc->osdev,
4362 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_setup_t)),
4363 		/* reserve room for the HTC header */
4364 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4365 
4366 	if (!msg)
4367 		return QDF_STATUS_E_NOMEM;
4368 
4369 	/*
4370 	 * Set the length of the message.
4371 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4372 	 * separately during the below call to qdf_nbuf_push_head.
4373 	 * The contribution from the HTC header is added separately inside HTC.
4374 	 */
4375 	if (!qdf_nbuf_put_tail(msg,
4376 			       sizeof(struct htt_h2t_msg_rx_fse_setup_t))) {
4377 		qdf_err("Failed to expand head for HTT RX_FSE_SETUP msg");
4378 		return QDF_STATUS_E_FAILURE;
4379 	}
4380 
4381 	/* fill in the message contents */
4382 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4383 
4384 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_setup_t));
4385 	/* rewind beyond alignment pad to get to the HTC header reserved area */
4386 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4387 	htt_logger_bufp = (uint8_t *)msg_word;
4388 
4389 	*msg_word = 0;
4390 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG);
4391 
4392 	fse_setup = (struct htt_h2t_msg_rx_fse_setup_t *)msg_word;
4393 
4394 	HTT_RX_FSE_SETUP_PDEV_ID_SET(*msg_word, fse_setup_info->pdev_id);
4395 
4396 	msg_word++;
4397 	HTT_RX_FSE_SETUP_NUM_REC_SET(*msg_word, fse_setup_info->max_entries);
4398 	HTT_RX_FSE_SETUP_MAX_SEARCH_SET(*msg_word, fse_setup_info->max_search);
4399 	HTT_RX_FSE_SETUP_IP_DA_SA_PREFIX_SET(*msg_word,
4400 					     fse_setup_info->ip_da_sa_prefix);
4401 
4402 	msg_word++;
4403 	HTT_RX_FSE_SETUP_BASE_ADDR_LO_SET(*msg_word,
4404 					  fse_setup_info->base_addr_lo);
4405 	msg_word++;
4406 	HTT_RX_FSE_SETUP_BASE_ADDR_HI_SET(*msg_word,
4407 					  fse_setup_info->base_addr_hi);
4408 
4409 	key = (u_int32_t *)fse_setup_info->hash_key;
4410 	fse_setup->toeplitz31_0 = *key++;
4411 	fse_setup->toeplitz63_32 = *key++;
4412 	fse_setup->toeplitz95_64 = *key++;
4413 	fse_setup->toeplitz127_96 = *key++;
4414 	fse_setup->toeplitz159_128 = *key++;
4415 	fse_setup->toeplitz191_160 = *key++;
4416 	fse_setup->toeplitz223_192 = *key++;
4417 	fse_setup->toeplitz255_224 = *key++;
4418 	fse_setup->toeplitz287_256 = *key++;
4419 	fse_setup->toeplitz314_288 = *key;
4420 
4421 	msg_word++;
4422 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz31_0);
4423 	msg_word++;
4424 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz63_32);
4425 	msg_word++;
4426 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz95_64);
4427 	msg_word++;
4428 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz127_96);
4429 	msg_word++;
4430 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz159_128);
4431 	msg_word++;
4432 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz191_160);
4433 	msg_word++;
4434 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz223_192);
4435 	msg_word++;
4436 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz255_224);
4437 	msg_word++;
4438 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz287_256);
4439 	msg_word++;
4440 	HTT_RX_FSE_SETUP_HASH_314_288_SET(*msg_word,
4441 					  fse_setup->toeplitz314_288);
4442 
4443 	pkt = htt_htc_pkt_alloc(soc);
4444 	if (!pkt) {
4445 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4446 		qdf_assert(0);
4447 		qdf_nbuf_free(msg);
4448 		return QDF_STATUS_E_RESOURCES; /* failure */
4449 	}
4450 
4451 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4452 
4453 	SET_HTC_PACKET_INFO_TX(
4454 		&pkt->htc_pkt,
4455 		dp_htt_h2t_send_complete_free_netbuf,
4456 		qdf_nbuf_data(msg),
4457 		qdf_nbuf_len(msg),
4458 		soc->htc_endpoint,
4459 		/* tag for no FW response msg */
4460 		HTC_TX_PACKET_TAG_RUNTIME_PUT);
4461 
4462 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4463 
4464 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
4465 				     HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG,
4466 				     htt_logger_bufp);
4467 
4468 	if (status == QDF_STATUS_SUCCESS) {
4469 		dp_info("HTT_H2T RX_FSE_SETUP sent to FW for pdev = %u",
4470 			fse_setup_info->pdev_id);
4471 		QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_DEBUG,
4472 				   (void *)fse_setup_info->hash_key,
4473 				   fse_setup_info->hash_key_len);
4474 	} else {
4475 		qdf_nbuf_free(msg);
4476 		htt_htc_pkt_free(soc, pkt);
4477 	}
4478 
4479 	return status;
4480 }
4481 
4482 /**
4483  * dp_htt_rx_flow_fse_operation(): Send HTT Flow Search Entry msg to
4484  * add/del a flow in HW
4485  * @pdev: DP pdev handle
4486  * @fse_op_info: Flow entry parameters
4487  *
4488  * Return: Success when HTT message is sent, error on failure
4489  */
4490 QDF_STATUS
4491 dp_htt_rx_flow_fse_operation(struct dp_pdev *pdev,
4492 			     struct dp_htt_rx_flow_fst_operation *fse_op_info)
4493 {
4494 	struct htt_soc *soc = pdev->soc->htt_handle;
4495 	struct dp_htt_htc_pkt *pkt;
4496 	qdf_nbuf_t msg;
4497 	u_int32_t *msg_word;
4498 	struct htt_h2t_msg_rx_fse_operation_t *fse_operation;
4499 	uint8_t *htt_logger_bufp;
4500 	QDF_STATUS status;
4501 
4502 	msg = qdf_nbuf_alloc(
4503 		soc->osdev,
4504 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_operation_t)),
4505 		/* reserve room for the HTC header */
4506 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4507 	if (!msg)
4508 		return QDF_STATUS_E_NOMEM;
4509 
4510 	/*
4511 	 * Set the length of the message.
4512 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4513 	 * separately during the below call to qdf_nbuf_push_head.
4514 	 * The contribution from the HTC header is added separately inside HTC.
4515 	 */
4516 	if (!qdf_nbuf_put_tail(msg,
4517 			       sizeof(struct htt_h2t_msg_rx_fse_operation_t))) {
4518 		qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
4519 		qdf_nbuf_free(msg);
4520 		return QDF_STATUS_E_FAILURE;
4521 	}
4522 
4523 	/* fill in the message contents */
4524 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4525 
4526 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_operation_t));
4527 	/* rewind beyond alignment pad to get to the HTC header reserved area */
4528 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4529 	htt_logger_bufp = (uint8_t *)msg_word;
4530 
4531 	*msg_word = 0;
4532 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG);
4533 
4534 	fse_operation = (struct htt_h2t_msg_rx_fse_operation_t *)msg_word;
4535 
4536 	HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, fse_op_info->pdev_id);
4537 	msg_word++;
4538 	HTT_RX_FSE_IPSEC_VALID_SET(*msg_word, false);
4539 	if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_ENTRY) {
4540 		HTT_RX_FSE_OPERATION_SET(*msg_word,
4541 					 HTT_RX_FSE_CACHE_INVALIDATE_ENTRY);
4542 		msg_word++;
4543 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4544 		*msg_word,
4545 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_31_0));
4546 		msg_word++;
4547 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4548 		*msg_word,
4549 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_63_32));
4550 		msg_word++;
4551 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4552 		*msg_word,
4553 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_95_64));
4554 		msg_word++;
4555 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4556 		*msg_word,
4557 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_127_96));
4558 		msg_word++;
4559 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4560 		*msg_word,
4561 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_31_0));
4562 		msg_word++;
4563 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4564 		*msg_word,
4565 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_63_32));
4566 		msg_word++;
4567 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4568 		*msg_word,
4569 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_95_64));
4570 		msg_word++;
4571 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4572 		*msg_word,
4573 		qdf_htonl(
4574 		fse_op_info->rx_flow->flow_tuple_info.dest_ip_127_96));
4575 		msg_word++;
4576 		HTT_RX_FSE_SOURCEPORT_SET(
4577 			*msg_word,
4578 			fse_op_info->rx_flow->flow_tuple_info.src_port);
4579 		HTT_RX_FSE_DESTPORT_SET(
4580 			*msg_word,
4581 			fse_op_info->rx_flow->flow_tuple_info.dest_port);
4582 		msg_word++;
4583 		HTT_RX_FSE_L4_PROTO_SET(
4584 			*msg_word,
4585 			fse_op_info->rx_flow->flow_tuple_info.l4_protocol);
4586 	} else if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_FULL) {
4587 		HTT_RX_FSE_OPERATION_SET(*msg_word,
4588 					 HTT_RX_FSE_CACHE_INVALIDATE_FULL);
4589 	} else if (fse_op_info->op_code == DP_HTT_FST_DISABLE) {
4590 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_DISABLE);
4591 	} else if (fse_op_info->op_code == DP_HTT_FST_ENABLE) {
4592 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_ENABLE);
4593 	}
4594 
4595 	pkt = htt_htc_pkt_alloc(soc);
4596 	if (!pkt) {
4597 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4598 		qdf_assert(0);
4599 		qdf_nbuf_free(msg);
4600 		return QDF_STATUS_E_RESOURCES; /* failure */
4601 	}
4602 
4603 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4604 
4605 	SET_HTC_PACKET_INFO_TX(
4606 		&pkt->htc_pkt,
4607 		dp_htt_h2t_send_complete_free_netbuf,
4608 		qdf_nbuf_data(msg),
4609 		qdf_nbuf_len(msg),
4610 		soc->htc_endpoint,
4611 		/* tag for no FW response msg */
4612 		HTC_TX_PACKET_TAG_RUNTIME_PUT);
4613 
4614 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4615 
4616 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
4617 				     HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG,
4618 				     htt_logger_bufp);
4619 
4620 	if (status == QDF_STATUS_SUCCESS) {
4621 		dp_info("HTT_H2T RX_FSE_OPERATION_CFG sent to FW for pdev = %u",
4622 			fse_op_info->pdev_id);
4623 	} else {
4624 		qdf_nbuf_free(msg);
4625 		htt_htc_pkt_free(soc, pkt);
4626 	}
4627 
4628 	return status;
4629 }
4630 
4631 /**
4632  * dp_htt_rx_fisa_config(): Send HTT msg to configure FISA
4633  * @pdev: DP pdev handle
4634  * @fse_op_info: Flow entry parameters
4635  *
4636  * Return: Success when HTT message is sent, error on failure
4637  */
4638 QDF_STATUS
4639 dp_htt_rx_fisa_config(struct dp_pdev *pdev,
4640 		      struct dp_htt_rx_fisa_cfg *fisa_config)
4641 {
4642 	struct htt_soc *soc = pdev->soc->htt_handle;
4643 	struct dp_htt_htc_pkt *pkt;
4644 	qdf_nbuf_t msg;
4645 	u_int32_t *msg_word;
4646 	struct htt_h2t_msg_type_fisa_config_t *htt_fisa_config;
4647 	uint8_t *htt_logger_bufp;
4648 	uint32_t len;
4649 	QDF_STATUS status;
4650 
4651 	len = HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_type_fisa_config_t));
4652 
4653 	msg = qdf_nbuf_alloc(soc->osdev,
4654 			     len,
4655 			     /* reserve room for the HTC header */
4656 			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
4657 			     4,
4658 			     TRUE);
4659 	if (!msg)
4660 		return QDF_STATUS_E_NOMEM;
4661 
4662 	/*
4663 	 * Set the length of the message.
4664 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4665 	 * separately during the below call to qdf_nbuf_push_head.
4666 	 * The contribution from the HTC header is added separately inside HTC.
4667 	 */
4668 	if (!qdf_nbuf_put_tail(msg,
4669 			       sizeof(struct htt_h2t_msg_type_fisa_config_t))) {
4670 		qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
4671 		qdf_nbuf_free(msg);
4672 		return QDF_STATUS_E_FAILURE;
4673 	}
4674 
4675 	/* fill in the message contents */
4676 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4677 
4678 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_type_fisa_config_t));
4679 	/* rewind beyond alignment pad to get to the HTC header reserved area */
4680 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4681 	htt_logger_bufp = (uint8_t *)msg_word;
4682 
4683 	*msg_word = 0;
4684 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FISA_CFG);
4685 
4686 	htt_fisa_config = (struct htt_h2t_msg_type_fisa_config_t *)msg_word;
4687 
4688 	HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, htt_fisa_config->pdev_id);
4689 
4690 	msg_word++;
4691 	HTT_RX_FISA_CONFIG_FISA_V2_ENABLE_SET(*msg_word, 1);
4692 	HTT_RX_FISA_CONFIG_FISA_V2_AGGR_LIMIT_SET(*msg_word, 0xf);
4693 
4694 	msg_word++;
4695 	htt_fisa_config->fisa_timeout_threshold = fisa_config->fisa_timeout;
4696 
4697 	pkt = htt_htc_pkt_alloc(soc);
4698 	if (!pkt) {
4699 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4700 		qdf_assert(0);
4701 		qdf_nbuf_free(msg);
4702 		return QDF_STATUS_E_RESOURCES; /* failure */
4703 	}
4704 
4705 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4706 
4707 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4708 			       dp_htt_h2t_send_complete_free_netbuf,
4709 			       qdf_nbuf_data(msg),
4710 			       qdf_nbuf_len(msg),
4711 			       soc->htc_endpoint,
4712 			       /* tag for no FW response msg */
4713 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
4714 
4715 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4716 
4717 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FISA_CFG,
4718 				     htt_logger_bufp);
4719 
4720 	if (status == QDF_STATUS_SUCCESS) {
4721 		dp_info("HTT_H2T_MSG_TYPE_RX_FISA_CFG sent to FW for pdev = %u",
4722 			fisa_config->pdev_id);
4723 	} else {
4724 		qdf_nbuf_free(msg);
4725 		htt_htc_pkt_free(soc, pkt);
4726 	}
4727 
4728 	return status;
4729 }
4730 
4731 #ifdef WLAN_SUPPORT_PPEDS
4732 /**
4733  * dp_htt_rxdma_rxole_ppe_cfg_set() - Send RxOLE and RxDMA PPE config
4734  * @dp_osc: Data path SoC handle
4735  * @cfg: RxDMA and RxOLE PPE config
4736  *
4737  * Return: Success when HTT message is sent, error on failure
4738  */
4739 QDF_STATUS
4740 dp_htt_rxdma_rxole_ppe_cfg_set(struct dp_soc *soc,
4741 			       struct dp_htt_rxdma_rxole_ppe_config *cfg)
4742 {
4743 	struct htt_soc *htt_handle = soc->htt_handle;
4744 	uint32_t len;
4745 	qdf_nbuf_t msg;
4746 	u_int32_t *msg_word;
4747 	QDF_STATUS status;
4748 	uint8_t *htt_logger_bufp;
4749 	struct dp_htt_htc_pkt *pkt;
4750 
4751 	len = HTT_MSG_BUF_SIZE(
4752 	      sizeof(struct htt_h2t_msg_type_rxdma_rxole_ppe_cfg_t));
4753 
4754 	msg = qdf_nbuf_alloc(soc->osdev,
4755 			     len,
4756 			     /* reserve room for the HTC header */
4757 			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
4758 			     4,
4759 			     TRUE);
4760 	if (!msg)
4761 		return QDF_STATUS_E_NOMEM;
4762 
4763 	/*
4764 	 * Set the length of the message.
4765 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4766 	 * separately during the below call to qdf_nbuf_push_head.
4767 	 * The contribution from the HTC header is added separately inside HTC.
4768 	 */
4769 	if (!qdf_nbuf_put_tail(
4770 		msg, sizeof(struct htt_h2t_msg_type_rxdma_rxole_ppe_cfg_t))) {
4771 		qdf_err("Failed to expand head for HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG msg");
4772 		qdf_nbuf_free(msg);
4773 		return QDF_STATUS_E_FAILURE;
4774 	}
4775 
4776 	/* fill in the message contents */
4777 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4778 
4779 	memset(msg_word, 0,
4780 	       sizeof(struct htt_h2t_msg_type_rxdma_rxole_ppe_cfg_t));
4781 
4782 	/* Rewind beyond alignment pad to get to the HTC header reserved area */
4783 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4784 	htt_logger_bufp = (uint8_t *)msg_word;
4785 
4786 	*msg_word = 0;
4787 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG);
4788 	HTT_PPE_CFG_OVERRIDE_SET(*msg_word, cfg->override);
4789 	HTT_PPE_CFG_REO_DEST_IND_SET(
4790 			*msg_word, cfg->reo_destination_indication);
4791 	HTT_PPE_CFG_MULTI_BUF_MSDU_OVERRIDE_EN_SET(
4792 			*msg_word, cfg->multi_buffer_msdu_override_en);
4793 	HTT_PPE_CFG_INTRA_BSS_OVERRIDE_EN_SET(
4794 			*msg_word, cfg->intra_bss_override);
4795 	HTT_PPE_CFG_DECAP_RAW_OVERRIDE_EN_SET(
4796 			*msg_word, cfg->decap_raw_override);
4797 	HTT_PPE_CFG_DECAP_NWIFI_OVERRIDE_EN_SET(
4798 			*msg_word, cfg->decap_nwifi_override);
4799 	HTT_PPE_CFG_IP_FRAG_OVERRIDE_EN_SET(
4800 			*msg_word, cfg->ip_frag_override);
4801 
4802 	pkt = htt_htc_pkt_alloc(htt_handle);
4803 	if (!pkt) {
4804 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4805 		qdf_assert(0);
4806 		qdf_nbuf_free(msg);
4807 		return QDF_STATUS_E_RESOURCES; /* failure */
4808 	}
4809 
4810 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4811 
4812 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4813 			       dp_htt_h2t_send_complete_free_netbuf,
4814 			       qdf_nbuf_data(msg),
4815 			       qdf_nbuf_len(msg),
4816 			       htt_handle->htc_endpoint,
4817 			       /* tag for no FW response msg */
4818 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
4819 
4820 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4821 
4822 	status = DP_HTT_SEND_HTC_PKT(htt_handle, pkt,
4823 				     HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG,
4824 				     htt_logger_bufp);
4825 
4826 	if (status != QDF_STATUS_SUCCESS) {
4827 		qdf_nbuf_free(msg);
4828 		htt_htc_pkt_free(htt_handle, pkt);
4829 		return status;
4830 	}
4831 
4832 	dp_info("HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG sent");
4833 	return status;
4834 }
4835 #endif /* WLAN_SUPPORT_PPEDS */
4836 
4837 /**
4838  * dp_bk_pressure_stats_handler(): worker function to print back pressure
4839  *				   stats
4840  *
4841  * @context : argument to work function
4842  */
4843 static void dp_bk_pressure_stats_handler(void *context)
4844 {
4845 	struct dp_pdev *pdev = (struct dp_pdev *)context;
4846 	struct dp_soc_srngs_state *soc_srngs_state = NULL;
4847 	const char *ring_name;
4848 	int i;
4849 	struct dp_srng_ring_state *ring_state;
4850 	bool empty_flag;
4851 
4852 	qdf_spin_lock_bh(&pdev->bkp_stats.list_lock);
4853 
4854 	/* Extract only first entry for printing in one work event */
4855 	if (pdev->bkp_stats.queue_depth &&
4856 	    !TAILQ_EMPTY(&pdev->bkp_stats.list)) {
4857 		soc_srngs_state = TAILQ_FIRST(&pdev->bkp_stats.list);
4858 		TAILQ_REMOVE(&pdev->bkp_stats.list, soc_srngs_state,
4859 			     list_elem);
4860 		pdev->bkp_stats.queue_depth--;
4861 	}
4862 
4863 	empty_flag = TAILQ_EMPTY(&pdev->bkp_stats.list);
4864 	qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock);
4865 
4866 	if (soc_srngs_state) {
4867 		DP_PRINT_STATS("### BKP stats for seq_num %u START ###",
4868 			       soc_srngs_state->seq_num);
4869 		for (i = 0; i < soc_srngs_state->max_ring_id; i++) {
4870 			ring_state = &soc_srngs_state->ring_state[i];
4871 			ring_name = dp_srng_get_str_from_hal_ring_type
4872 						(ring_state->ring_type);
4873 			DP_PRINT_STATS("%s: SW:Head pointer = %d Tail Pointer = %d\n",
4874 				       ring_name,
4875 				       ring_state->sw_head,
4876 				       ring_state->sw_tail);
4877 
4878 			DP_PRINT_STATS("%s: HW:Head pointer = %d Tail Pointer = %d\n",
4879 				       ring_name,
4880 				       ring_state->hw_head,
4881 				       ring_state->hw_tail);
4882 		}
4883 
4884 		DP_PRINT_STATS("### BKP stats for seq_num %u COMPLETE ###",
4885 			       soc_srngs_state->seq_num);
4886 		qdf_mem_free(soc_srngs_state);
4887 	}
4888 	dp_print_napi_stats(pdev->soc);
4889 
4890 	/* Schedule work again if queue is not empty */
4891 	if (!empty_flag)
4892 		qdf_queue_work(0, pdev->bkp_stats.work_queue,
4893 			       &pdev->bkp_stats.work);
4894 }
4895 
4896 /*
4897  * dp_pdev_bkp_stats_detach() - detach resources for back pressure stats
4898  *				processing
4899  * @pdev: Datapath PDEV handle
4900  *
4901  */
4902 void dp_pdev_bkp_stats_detach(struct dp_pdev *pdev)
4903 {
4904 	struct dp_soc_srngs_state *ring_state, *ring_state_next;
4905 
4906 	if (!pdev->bkp_stats.work_queue)
4907 		return;
4908 
4909 	qdf_flush_workqueue(0, pdev->bkp_stats.work_queue);
4910 	qdf_destroy_workqueue(0, pdev->bkp_stats.work_queue);
4911 	qdf_flush_work(&pdev->bkp_stats.work);
4912 	qdf_disable_work(&pdev->bkp_stats.work);
4913 	qdf_spin_lock_bh(&pdev->bkp_stats.list_lock);
4914 	TAILQ_FOREACH_SAFE(ring_state, &pdev->bkp_stats.list,
4915 			   list_elem, ring_state_next) {
4916 		TAILQ_REMOVE(&pdev->bkp_stats.list, ring_state,
4917 			     list_elem);
4918 		qdf_mem_free(ring_state);
4919 	}
4920 	qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock);
4921 	qdf_spinlock_destroy(&pdev->bkp_stats.list_lock);
4922 }
4923 
4924 /*
4925  * dp_pdev_bkp_stats_attach() - attach resources for back pressure stats
4926  *				processing
4927  * @pdev: Datapath PDEV handle
4928  *
4929  * Return: QDF_STATUS_SUCCESS: Success
4930  *         QDF_STATUS_E_NOMEM: Error
4931  */
4932 QDF_STATUS dp_pdev_bkp_stats_attach(struct dp_pdev *pdev)
4933 {
4934 	TAILQ_INIT(&pdev->bkp_stats.list);
4935 	pdev->bkp_stats.seq_num = 0;
4936 	pdev->bkp_stats.queue_depth = 0;
4937 
4938 	qdf_create_work(0, &pdev->bkp_stats.work,
4939 			dp_bk_pressure_stats_handler, pdev);
4940 
4941 	pdev->bkp_stats.work_queue =
4942 		qdf_alloc_unbound_workqueue("dp_bkp_work_queue");
4943 	if (!pdev->bkp_stats.work_queue)
4944 		goto fail;
4945 
4946 	qdf_spinlock_create(&pdev->bkp_stats.list_lock);
4947 	return QDF_STATUS_SUCCESS;
4948 
4949 fail:
4950 	dp_htt_alert("BKP stats attach failed");
4951 	qdf_flush_work(&pdev->bkp_stats.work);
4952 	qdf_disable_work(&pdev->bkp_stats.work);
4953 	return QDF_STATUS_E_FAILURE;
4954 }
4955 
4956 #ifdef DP_UMAC_HW_RESET_SUPPORT
4957 QDF_STATUS dp_htt_umac_reset_send_setup_cmd(
4958 		struct dp_soc *soc,
4959 		const struct dp_htt_umac_reset_setup_cmd_params *setup_params)
4960 {
4961 	struct htt_soc *htt_handle = soc->htt_handle;
4962 	uint32_t len;
4963 	qdf_nbuf_t msg;
4964 	u_int32_t *msg_word;
4965 	QDF_STATUS status;
4966 	uint8_t *htt_logger_bufp;
4967 	struct dp_htt_htc_pkt *pkt;
4968 
4969 	len = HTT_MSG_BUF_SIZE(
4970 		HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_BYTES);
4971 
4972 	msg = qdf_nbuf_alloc(soc->osdev,
4973 			     len,
4974 			     /* reserve room for the HTC header */
4975 			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
4976 			     4,
4977 			     TRUE);
4978 	if (!msg)
4979 		return QDF_STATUS_E_NOMEM;
4980 
4981 	/*
4982 	 * Set the length of the message.
4983 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4984 	 * separately during the below call to qdf_nbuf_push_head.
4985 	 * The contribution from the HTC header is added separately inside HTC.
4986 	 */
4987 	if (!qdf_nbuf_put_tail(
4988 		msg, HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_BYTES)) {
4989 		dp_htt_err("Failed to expand head");
4990 		qdf_nbuf_free(msg);
4991 		return QDF_STATUS_E_FAILURE;
4992 	}
4993 
4994 	/* fill in the message contents */
4995 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
4996 
4997 	/* Rewind beyond alignment pad to get to the HTC header reserved area */
4998 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4999 	htt_logger_bufp = (uint8_t *)msg_word;
5000 
5001 	qdf_mem_zero(msg_word,
5002 		     HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_BYTES);
5003 
5004 	HTT_H2T_MSG_TYPE_SET(
5005 		*msg_word,
5006 		HTT_H2T_MSG_TYPE_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP);
5007 	HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_T2H_MSG_METHOD_SET(
5008 		*msg_word, htt_umac_hang_recovery_msg_t2h_msi_and_h2t_polling);
5009 	HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_H2T_MSG_METHOD_SET(
5010 		*msg_word, htt_umac_hang_recovery_msg_t2h_msi_and_h2t_polling);
5011 
5012 	msg_word++;
5013 	*msg_word = setup_params->msi_data;
5014 
5015 	msg_word++;
5016 	*msg_word = sizeof(htt_umac_hang_recovery_msg_shmem_t);
5017 
5018 	msg_word++;
5019 	*msg_word = setup_params->shmem_addr_low;
5020 
5021 	msg_word++;
5022 	*msg_word = setup_params->shmem_addr_high;
5023 
5024 	pkt = htt_htc_pkt_alloc(htt_handle);
5025 	if (!pkt) {
5026 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
5027 		qdf_assert(0);
5028 		qdf_nbuf_free(msg);
5029 		return QDF_STATUS_E_NOMEM;
5030 	}
5031 
5032 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5033 
5034 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
5035 			       dp_htt_h2t_send_complete_free_netbuf,
5036 			       qdf_nbuf_data(msg),
5037 			       qdf_nbuf_len(msg),
5038 			       htt_handle->htc_endpoint,
5039 			       /* tag for no FW response msg */
5040 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
5041 
5042 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5043 
5044 	status = DP_HTT_SEND_HTC_PKT(
5045 			htt_handle, pkt,
5046 			HTT_H2T_MSG_TYPE_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP,
5047 			htt_logger_bufp);
5048 
5049 	if (QDF_IS_STATUS_ERROR(status)) {
5050 		qdf_nbuf_free(msg);
5051 		htt_htc_pkt_free(htt_handle, pkt);
5052 		return status;
5053 	}
5054 
5055 	dp_info("HTT_H2T_MSG_TYPE_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP sent");
5056 	return status;
5057 }
5058 #endif
5059