xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c (revision d94f0fb619d3da5ae22f9943f88d4634e2d28581)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <htt.h>
21 #include <hal_hw_headers.h>
22 #include <hal_api.h>
23 #include "dp_peer.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_rx.h"
27 #include "htt_stats.h"
28 #include "htt_ppdu_stats.h"
29 #include "dp_htt.h"
30 #ifdef WIFI_MONITOR_SUPPORT
31 #include <dp_mon.h>
32 #endif
33 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
34 #include "cdp_txrx_cmn_struct.h"
35 
36 #ifdef FEATURE_PERPKT_INFO
37 #include "dp_ratetable.h"
38 #endif
39 #include <qdf_module.h>
40 #ifdef CONFIG_SAWF_DEF_QUEUES
41 #include <dp_sawf_htt.h>
42 #endif
43 
44 #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
45 
46 #define HTT_HTC_PKT_POOL_INIT_SIZE 64
47 
48 #define HTT_MSG_BUF_SIZE(msg_bytes) \
49 	((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
50 
51 #define HTT_PID_BIT_MASK 0x3
52 
53 #define DP_EXT_MSG_LENGTH 2048
54 #define HTT_HEADER_LEN 16
55 #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16
56 
57 #define HTT_SHIFT_UPPER_TIMESTAMP 32
58 #define HTT_MASK_UPPER_TIMESTAMP 0xFFFFFFFF00000000
59 #define HTT_BKP_STATS_MAX_QUEUE_DEPTH 16
60 
61 struct dp_htt_htc_pkt *
62 htt_htc_pkt_alloc(struct htt_soc *soc)
63 {
64 	struct dp_htt_htc_pkt_union *pkt = NULL;
65 
66 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
67 	if (soc->htt_htc_pkt_freelist) {
68 		pkt = soc->htt_htc_pkt_freelist;
69 		soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next;
70 	}
71 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
72 
73 	if (!pkt)
74 		pkt = qdf_mem_malloc(sizeof(*pkt));
75 
76 	if (!pkt)
77 		return NULL;
78 
79 	htc_packet_set_magic_cookie(&(pkt->u.pkt.htc_pkt), 0);
80 
81 	return &pkt->u.pkt; /* not actually a dereference */
82 }
83 
84 qdf_export_symbol(htt_htc_pkt_alloc);
85 
86 void
87 htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
88 {
89 	struct dp_htt_htc_pkt_union *u_pkt =
90 		(struct dp_htt_htc_pkt_union *)pkt;
91 
92 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
93 	htc_packet_set_magic_cookie(&(u_pkt->u.pkt.htc_pkt), 0);
94 	u_pkt->u.next = soc->htt_htc_pkt_freelist;
95 	soc->htt_htc_pkt_freelist = u_pkt;
96 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
97 }
98 
99 qdf_export_symbol(htt_htc_pkt_free);
100 
101 /*
102  * htt_htc_pkt_pool_free() - Free HTC packet pool
103  * @htt_soc:	HTT SOC handle
104  */
105 void
106 htt_htc_pkt_pool_free(struct htt_soc *soc)
107 {
108 	struct dp_htt_htc_pkt_union *pkt, *next;
109 	pkt = soc->htt_htc_pkt_freelist;
110 	while (pkt) {
111 		next = pkt->u.next;
112 		qdf_mem_free(pkt);
113 		pkt = next;
114 	}
115 	soc->htt_htc_pkt_freelist = NULL;
116 }
117 
118 
119 #ifndef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST
120 
121 /*
122  * htt_htc_misc_pkt_list_trim() - trim misc list
123  * @htt_soc: HTT SOC handle
124  * @level: max no. of pkts in list
125  */
126 static void
127 htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level)
128 {
129 	struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL;
130 	int i = 0;
131 	qdf_nbuf_t netbuf;
132 
133 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
134 	pkt = soc->htt_htc_pkt_misclist;
135 	while (pkt) {
136 		next = pkt->u.next;
137 		/* trim the out grown list*/
138 		if (++i > level) {
139 			netbuf =
140 				(qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
141 			qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
142 			qdf_nbuf_free(netbuf);
143 			qdf_mem_free(pkt);
144 			pkt = NULL;
145 			if (prev)
146 				prev->u.next = NULL;
147 		}
148 		prev = pkt;
149 		pkt = next;
150 	}
151 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
152 }
153 
154 /*
155  * htt_htc_misc_pkt_list_add() - Add pkt to misc list
156  * @htt_soc:	HTT SOC handle
157  * @dp_htt_htc_pkt: pkt to be added to list
158  */
159 void
160 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
161 {
162 	struct dp_htt_htc_pkt_union *u_pkt =
163 				(struct dp_htt_htc_pkt_union *)pkt;
164 	int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc,
165 							pkt->htc_pkt.Endpoint)
166 				+ DP_HTT_HTC_PKT_MISCLIST_SIZE;
167 
168 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
169 	if (soc->htt_htc_pkt_misclist) {
170 		u_pkt->u.next = soc->htt_htc_pkt_misclist;
171 		soc->htt_htc_pkt_misclist = u_pkt;
172 	} else {
173 		soc->htt_htc_pkt_misclist = u_pkt;
174 	}
175 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
176 
177 	/* only ce pipe size + tx_queue_depth could possibly be in use
178 	 * free older packets in the misclist
179 	 */
180 	htt_htc_misc_pkt_list_trim(soc, misclist_trim_level);
181 }
182 
183 qdf_export_symbol(htt_htc_misc_pkt_list_add);
184 #endif  /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
185 
186 /*
187  * htt_htc_misc_pkt_pool_free() - free pkts in misc list
188  * @htt_soc:	HTT SOC handle
189  */
190 static void
191 htt_htc_misc_pkt_pool_free(struct htt_soc *soc)
192 {
193 	struct dp_htt_htc_pkt_union *pkt, *next;
194 	qdf_nbuf_t netbuf;
195 
196 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
197 	pkt = soc->htt_htc_pkt_misclist;
198 
199 	while (pkt) {
200 		next = pkt->u.next;
201 		if (htc_packet_get_magic_cookie(&(pkt->u.pkt.htc_pkt)) !=
202 		    HTC_PACKET_MAGIC_COOKIE) {
203 			pkt = next;
204 			soc->stats.skip_count++;
205 			continue;
206 		}
207 		netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
208 		qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
209 
210 		soc->stats.htc_pkt_free++;
211 		dp_htt_info("%pK: Pkt free count %d",
212 			    soc->dp_soc, soc->stats.htc_pkt_free);
213 
214 		qdf_nbuf_free(netbuf);
215 		qdf_mem_free(pkt);
216 		pkt = next;
217 	}
218 	soc->htt_htc_pkt_misclist = NULL;
219 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
220 	dp_info("HTC Packets, fail count = %d, skip count = %d",
221 		soc->stats.fail_count, soc->stats.skip_count);
222 }
223 
224 /*
225  * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ
226  * @tgt_mac_addr:	Target MAC
227  * @buffer:		Output buffer
228  */
229 static u_int8_t *
230 htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer)
231 {
232 #ifdef BIG_ENDIAN_HOST
233 	/*
234 	 * The host endianness is opposite of the target endianness.
235 	 * To make u_int32_t elements come out correctly, the target->host
236 	 * upload has swizzled the bytes in each u_int32_t element of the
237 	 * message.
238 	 * For byte-array message fields like the MAC address, this
239 	 * upload swizzling puts the bytes in the wrong order, and needs
240 	 * to be undone.
241 	 */
242 	buffer[0] = tgt_mac_addr[3];
243 	buffer[1] = tgt_mac_addr[2];
244 	buffer[2] = tgt_mac_addr[1];
245 	buffer[3] = tgt_mac_addr[0];
246 	buffer[4] = tgt_mac_addr[7];
247 	buffer[5] = tgt_mac_addr[6];
248 	return buffer;
249 #else
250 	/*
251 	 * The host endianness matches the target endianness -
252 	 * we can use the mac addr directly from the message buffer.
253 	 */
254 	return tgt_mac_addr;
255 #endif
256 }
257 
258 /*
259  * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
260  * @soc:	SOC handle
261  * @status:	Completion status
262  * @netbuf:	HTT buffer
263  */
264 static void
265 dp_htt_h2t_send_complete_free_netbuf(
266 	void *soc, A_STATUS status, qdf_nbuf_t netbuf)
267 {
268 	qdf_nbuf_free(netbuf);
269 }
270 
271 #ifdef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST
272 /*
273  * dp_htt_h2t_send_complete() - H2T completion handler
274  * @context:	Opaque context (HTT SOC handle)
275  * @htc_pkt:	HTC packet
276  */
277 static void
278 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
279 {
280 	struct htt_soc *soc =  (struct htt_soc *) context;
281 	struct dp_htt_htc_pkt *htt_pkt;
282 	qdf_nbuf_t netbuf;
283 
284 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
285 
286 	/* process (free or keep) the netbuf that held the message */
287 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
288 	/*
289 	 * adf sendcomplete is required for windows only
290 	 */
291 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
292 	/* free the htt_htc_pkt / HTC_PACKET object */
293 	qdf_nbuf_free(netbuf);
294 	htt_htc_pkt_free(soc, htt_pkt);
295 }
296 
297 #else /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
298 
299 /*
300  *  * dp_htt_h2t_send_complete() - H2T completion handler
301  *   * @context:    Opaque context (HTT SOC handle)
302  *    * @htc_pkt:    HTC packet
303  *     */
304 static void
305 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
306 {
307 	void (*send_complete_part2)(
308 	     void *soc, QDF_STATUS status, qdf_nbuf_t msdu);
309 	struct htt_soc *soc =  (struct htt_soc *) context;
310 	struct dp_htt_htc_pkt *htt_pkt;
311 	qdf_nbuf_t netbuf;
312 
313 	send_complete_part2 = htc_pkt->pPktContext;
314 
315 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
316 
317 	/* process (free or keep) the netbuf that held the message */
318 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
319 	/*
320 	 * adf sendcomplete is required for windows only
321 	*/
322 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
323 	if (send_complete_part2){
324 		send_complete_part2(
325 		    htt_pkt->soc_ctxt, htc_pkt->Status, netbuf);
326 	}
327 	/* free the htt_htc_pkt / HTC_PACKET object */
328 	htt_htc_pkt_free(soc, htt_pkt);
329 }
330 
331 #endif /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
332 
333 /*
334  * dp_htt_h2t_add_tcl_metadata_ver_v1() - Add tcl_metadata verion V1
335  * @htt_soc:	HTT SOC handle
336  * @msg:	Pointer to nbuf
337  *
338  * Return: 0 on success; error code on failure
339  */
340 static int dp_htt_h2t_add_tcl_metadata_ver_v1(struct htt_soc *soc,
341 					      qdf_nbuf_t *msg)
342 {
343 	uint32_t *msg_word;
344 
345 	*msg = qdf_nbuf_alloc(
346 		soc->osdev,
347 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
348 		/* reserve room for the HTC header */
349 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
350 	if (!*msg)
351 		return QDF_STATUS_E_NOMEM;
352 
353 	/*
354 	 * Set the length of the message.
355 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
356 	 * separately during the below call to qdf_nbuf_push_head.
357 	 * The contribution from the HTC header is added separately inside HTC.
358 	 */
359 	if (!qdf_nbuf_put_tail(*msg, HTT_VER_REQ_BYTES)) {
360 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
361 			  "%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
362 			  __func__);
363 		return QDF_STATUS_E_FAILURE;
364 	}
365 
366 	/* fill in the message contents */
367 	msg_word = (u_int32_t *)qdf_nbuf_data(*msg);
368 
369 	/* rewind beyond alignment pad to get to the HTC header reserved area */
370 	qdf_nbuf_push_head(*msg, HTC_HDR_ALIGNMENT_PADDING);
371 
372 	*msg_word = 0;
373 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
374 
375 	return QDF_STATUS_SUCCESS;
376 }
377 
378 #ifdef QCA_DP_TX_FW_METADATA_V2
379 /*
380  * dp_htt_h2t_add_tcl_metadata_ver_v2() - Add tcl_metadata verion V2
381  * @htt_soc:	HTT SOC handle
382  * @msg:	Pointer to nbuf
383  *
384  * Return: 0 on success; error code on failure
385  */
386 static int dp_htt_h2t_add_tcl_metadata_ver_v2(struct htt_soc *soc,
387 					      qdf_nbuf_t *msg)
388 {
389 	uint32_t *msg_word;
390 
391 	*msg = qdf_nbuf_alloc(
392 		soc->osdev,
393 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES + HTT_TCL_METADATA_VER_SZ),
394 		/* reserve room for the HTC header */
395 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
396 	if (!*msg)
397 		return QDF_STATUS_E_NOMEM;
398 
399 	/*
400 	 * Set the length of the message.
401 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
402 	 * separately during the below call to qdf_nbuf_push_head.
403 	 * The contribution from the HTC header is added separately inside HTC.
404 	 */
405 	if (!qdf_nbuf_put_tail(*msg,
406 			       HTT_VER_REQ_BYTES + HTT_TCL_METADATA_VER_SZ)) {
407 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
408 			  "%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
409 			  __func__);
410 		return QDF_STATUS_E_FAILURE;
411 	}
412 
413 	/* fill in the message contents */
414 	msg_word = (u_int32_t *)qdf_nbuf_data(*msg);
415 
416 	/* rewind beyond alignment pad to get to the HTC header reserved area */
417 	qdf_nbuf_push_head(*msg, HTC_HDR_ALIGNMENT_PADDING);
418 
419 	*msg_word = 0;
420 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
421 
422 	/* word 1 */
423 	msg_word++;
424 	*msg_word = 0;
425 	HTT_OPTION_TLV_TAG_SET(*msg_word, HTT_OPTION_TLV_TAG_TCL_METADATA_VER);
426 	HTT_OPTION_TLV_LENGTH_SET(*msg_word, HTT_TCL_METADATA_VER_SZ);
427 	HTT_OPTION_TLV_TCL_METADATA_VER_SET(*msg_word,
428 					    HTT_OPTION_TLV_TCL_METADATA_V2);
429 
430 	return QDF_STATUS_SUCCESS;
431 }
432 
433 /*
434  * dp_htt_h2t_add_tcl_metadata_ver() - Add tcl_metadata verion
435  * @htt_soc:	HTT SOC handle
436  * @msg:	Pointer to nbuf
437  *
438  * Return: 0 on success; error code on failure
439  */
440 static int dp_htt_h2t_add_tcl_metadata_ver(struct htt_soc *soc, qdf_nbuf_t *msg)
441 {
442 	/* Use tcl_metadata_v1 when NSS offload is enabled */
443 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->dp_soc->wlan_cfg_ctx))
444 		return dp_htt_h2t_add_tcl_metadata_ver_v1(soc, msg);
445 	else
446 		return dp_htt_h2t_add_tcl_metadata_ver_v2(soc, msg);
447 }
448 #else
449 static int dp_htt_h2t_add_tcl_metadata_ver(struct htt_soc *soc, qdf_nbuf_t *msg)
450 {
451 	return dp_htt_h2t_add_tcl_metadata_ver_v1(soc, msg);
452 }
453 #endif
454 
455 /*
456  * htt_h2t_ver_req_msg() - Send HTT version request message to target
457  * @htt_soc:	HTT SOC handle
458  *
459  * Return: 0 on success; error code on failure
460  */
461 static int htt_h2t_ver_req_msg(struct htt_soc *soc)
462 {
463 	struct dp_htt_htc_pkt *pkt;
464 	qdf_nbuf_t msg = NULL;
465 	QDF_STATUS status;
466 
467 	status = dp_htt_h2t_add_tcl_metadata_ver(soc, &msg);
468 	if (status != QDF_STATUS_SUCCESS)
469 		return status;
470 
471 	pkt = htt_htc_pkt_alloc(soc);
472 	if (!pkt) {
473 		qdf_nbuf_free(msg);
474 		return QDF_STATUS_E_FAILURE;
475 	}
476 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
477 
478 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
479 		dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg),
480 		qdf_nbuf_len(msg), soc->htc_endpoint,
481 		HTC_TX_PACKET_TAG_RTPM_PUT_RC);
482 
483 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
484 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_VERSION_REQ,
485 				     NULL);
486 
487 	if (status != QDF_STATUS_SUCCESS) {
488 		qdf_nbuf_free(msg);
489 		htt_htc_pkt_free(soc, pkt);
490 	}
491 
492 	return status;
493 }
494 
495 /*
496  * htt_srng_setup() - Send SRNG setup message to target
497  * @htt_soc:	HTT SOC handle
498  * @mac_id:	MAC Id
499  * @hal_srng:	Opaque HAL SRNG pointer
500  * @hal_ring_type:	SRNG ring type
501  *
502  * Return: 0 on success; error code on failure
503  */
504 int htt_srng_setup(struct htt_soc *soc, int mac_id,
505 		   hal_ring_handle_t hal_ring_hdl,
506 		   int hal_ring_type)
507 {
508 	struct dp_htt_htc_pkt *pkt;
509 	qdf_nbuf_t htt_msg;
510 	uint32_t *msg_word;
511 	struct hal_srng_params srng_params;
512 	qdf_dma_addr_t hp_addr, tp_addr;
513 	uint32_t ring_entry_size =
514 		hal_srng_get_entrysize(soc->hal_soc, hal_ring_type);
515 	int htt_ring_type, htt_ring_id;
516 	uint8_t *htt_logger_bufp;
517 	int target_pdev_id;
518 	int lmac_id = dp_get_lmac_id_for_pdev_id(soc->dp_soc, 0, mac_id);
519 	QDF_STATUS status;
520 
521 	/* Sizes should be set in 4-byte words */
522 	ring_entry_size = ring_entry_size >> 2;
523 
524 	htt_msg = qdf_nbuf_alloc(soc->osdev,
525 		HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ),
526 		/* reserve room for the HTC header */
527 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
528 	if (!htt_msg) {
529 		dp_err("htt_msg alloc failed ring type %d", hal_ring_type);
530 		goto fail0;
531 	}
532 
533 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
534 	hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_ring_hdl);
535 	tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_ring_hdl);
536 
537 	switch (hal_ring_type) {
538 	case RXDMA_BUF:
539 #ifdef QCA_HOST2FW_RXBUF_RING
540 		if (srng_params.ring_id ==
541 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
542 		    (lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
543 			htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
544 			htt_ring_type = HTT_SW_TO_SW_RING;
545 #ifdef IPA_OFFLOAD
546 		} else if (srng_params.ring_id ==
547 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2 +
548 		    (lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
549 			htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING;
550 			htt_ring_type = HTT_SW_TO_SW_RING;
551 #endif
552 #else
553 		if (srng_params.ring_id ==
554 			(HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
555 			(lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
556 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
557 			htt_ring_type = HTT_SW_TO_HW_RING;
558 #endif
559 		} else if (srng_params.ring_id ==
560 #ifdef IPA_OFFLOAD
561 			 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 +
562 #else
563 			 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF +
564 #endif
565 			(lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
566 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
567 			htt_ring_type = HTT_SW_TO_HW_RING;
568 		} else {
569 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
570 				   "%s: Ring %d currently not supported",
571 				   __func__, srng_params.ring_id);
572 			goto fail1;
573 		}
574 
575 		break;
576 	case RXDMA_MONITOR_BUF:
577 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
578 							 RXDMA_MONITOR_BUF);
579 		htt_ring_type = HTT_SW_TO_HW_RING;
580 		break;
581 	case RXDMA_MONITOR_STATUS:
582 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
583 		htt_ring_type = HTT_SW_TO_HW_RING;
584 		break;
585 	case RXDMA_MONITOR_DST:
586 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
587 							 RXDMA_MONITOR_DST);
588 		htt_ring_type = HTT_HW_TO_SW_RING;
589 		break;
590 	case RXDMA_MONITOR_DESC:
591 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
592 		htt_ring_type = HTT_SW_TO_HW_RING;
593 		break;
594 	case RXDMA_DST:
595 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
596 		htt_ring_type = HTT_HW_TO_SW_RING;
597 		break;
598 	case TX_MONITOR_BUF:
599 		htt_ring_id = HTT_TX_MON_HOST2MON_BUF_RING;
600 		htt_ring_type = HTT_SW_TO_HW_RING;
601 		break;
602 	case TX_MONITOR_DST:
603 		htt_ring_id = HTT_TX_MON_MON2HOST_DEST_RING;
604 		htt_ring_type = HTT_HW_TO_SW_RING;
605 		break;
606 
607 	default:
608 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
609 			"%s: Ring currently not supported", __func__);
610 			goto fail1;
611 	}
612 
613 	dp_info("ring_type %d ring_id %d htt_ring_id %d hp_addr 0x%llx tp_addr 0x%llx",
614 		hal_ring_type, srng_params.ring_id, htt_ring_id,
615 		(uint64_t)hp_addr,
616 		(uint64_t)tp_addr);
617 	/*
618 	 * Set the length of the message.
619 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
620 	 * separately during the below call to qdf_nbuf_push_head.
621 	 * The contribution from the HTC header is added separately inside HTC.
622 	 */
623 	if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) {
624 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
625 			"%s: Failed to expand head for SRING_SETUP msg",
626 			__func__);
627 		return QDF_STATUS_E_FAILURE;
628 	}
629 
630 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
631 
632 	/* rewind beyond alignment pad to get to the HTC header reserved area */
633 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
634 
635 	/* word 0 */
636 	*msg_word = 0;
637 	htt_logger_bufp = (uint8_t *)msg_word;
638 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP);
639 	target_pdev_id =
640 	dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, mac_id);
641 
642 	if ((htt_ring_type == HTT_SW_TO_HW_RING) ||
643 			(htt_ring_type == HTT_HW_TO_SW_RING))
644 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, target_pdev_id);
645 	else
646 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id);
647 
648 	dp_info("mac_id %d", mac_id);
649 	HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type);
650 	/* TODO: Discuss with FW on changing this to unique ID and using
651 	 * htt_ring_type to send the type of ring
652 	 */
653 	HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id);
654 
655 	/* word 1 */
656 	msg_word++;
657 	*msg_word = 0;
658 	HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word,
659 		srng_params.ring_base_paddr & 0xffffffff);
660 
661 	/* word 2 */
662 	msg_word++;
663 	*msg_word = 0;
664 	HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word,
665 		(uint64_t)srng_params.ring_base_paddr >> 32);
666 
667 	/* word 3 */
668 	msg_word++;
669 	*msg_word = 0;
670 	HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size);
671 	HTT_SRING_SETUP_RING_SIZE_SET(*msg_word,
672 		(ring_entry_size * srng_params.num_entries));
673 	dp_info("entry_size %d", ring_entry_size);
674 	dp_info("num_entries %d", srng_params.num_entries);
675 	dp_info("ring_size %d", (ring_entry_size * srng_params.num_entries));
676 	if (htt_ring_type == HTT_SW_TO_HW_RING)
677 		HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET(
678 						*msg_word, 1);
679 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word,
680 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
681 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word,
682 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
683 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word,
684 		!!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP));
685 
686 	/* word 4 */
687 	msg_word++;
688 	*msg_word = 0;
689 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
690 		hp_addr & 0xffffffff);
691 
692 	/* word 5 */
693 	msg_word++;
694 	*msg_word = 0;
695 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
696 		(uint64_t)hp_addr >> 32);
697 
698 	/* word 6 */
699 	msg_word++;
700 	*msg_word = 0;
701 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
702 		tp_addr & 0xffffffff);
703 
704 	/* word 7 */
705 	msg_word++;
706 	*msg_word = 0;
707 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
708 		(uint64_t)tp_addr >> 32);
709 
710 	/* word 8 */
711 	msg_word++;
712 	*msg_word = 0;
713 	HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word,
714 		srng_params.msi_addr & 0xffffffff);
715 
716 	/* word 9 */
717 	msg_word++;
718 	*msg_word = 0;
719 	HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word,
720 		(uint64_t)(srng_params.msi_addr) >> 32);
721 
722 	/* word 10 */
723 	msg_word++;
724 	*msg_word = 0;
725 	HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word,
726 		qdf_cpu_to_le32(srng_params.msi_data));
727 
728 	/* word 11 */
729 	msg_word++;
730 	*msg_word = 0;
731 	HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word,
732 		srng_params.intr_batch_cntr_thres_entries *
733 		ring_entry_size);
734 	HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word,
735 		srng_params.intr_timer_thres_us >> 3);
736 
737 	/* word 12 */
738 	msg_word++;
739 	*msg_word = 0;
740 	if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
741 		/* TODO: Setting low threshold to 1/8th of ring size - see
742 		 * if this needs to be configurable
743 		 */
744 		HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word,
745 			srng_params.low_threshold);
746 	}
747 	/* "response_required" field should be set if a HTT response message is
748 	 * required after setting up the ring.
749 	 */
750 	pkt = htt_htc_pkt_alloc(soc);
751 	if (!pkt) {
752 		dp_err("pkt alloc failed, ring_type %d ring_id %d htt_ring_id %d",
753 		       hal_ring_type, srng_params.ring_id, htt_ring_id);
754 		goto fail1;
755 	}
756 
757 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
758 
759 	SET_HTC_PACKET_INFO_TX(
760 		&pkt->htc_pkt,
761 		dp_htt_h2t_send_complete_free_netbuf,
762 		qdf_nbuf_data(htt_msg),
763 		qdf_nbuf_len(htt_msg),
764 		soc->htc_endpoint,
765 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
766 
767 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
768 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_SRING_SETUP,
769 				     htt_logger_bufp);
770 
771 	if (status != QDF_STATUS_SUCCESS) {
772 		qdf_nbuf_free(htt_msg);
773 		htt_htc_pkt_free(soc, pkt);
774 	}
775 
776 	return status;
777 
778 fail1:
779 	qdf_nbuf_free(htt_msg);
780 fail0:
781 	return QDF_STATUS_E_FAILURE;
782 }
783 
784 qdf_export_symbol(htt_srng_setup);
785 
786 #ifdef QCA_SUPPORT_FULL_MON
787 /**
788  * htt_h2t_full_mon_cfg() - Send full monitor configuarion msg to FW
789  *
790  * @htt_soc: HTT Soc handle
791  * @pdev_id: Radio id
792  * @dp_full_mon_config: enabled/disable configuration
793  *
794  * Return: Success when HTT message is sent, error on failure
795  */
796 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc,
797 			 uint8_t pdev_id,
798 			 enum dp_full_mon_config config)
799 {
800 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
801 	struct dp_htt_htc_pkt *pkt;
802 	qdf_nbuf_t htt_msg;
803 	uint32_t *msg_word;
804 	uint8_t *htt_logger_bufp;
805 
806 	htt_msg = qdf_nbuf_alloc(soc->osdev,
807 				 HTT_MSG_BUF_SIZE(
808 				 HTT_RX_FULL_MONITOR_MODE_SETUP_SZ),
809 				 /* reserve room for the HTC header */
810 				 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
811 				 4,
812 				 TRUE);
813 	if (!htt_msg)
814 		return QDF_STATUS_E_FAILURE;
815 
816 	/*
817 	 * Set the length of the message.
818 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
819 	 * separately during the below call to qdf_nbuf_push_head.
820 	 * The contribution from the HTC header is added separately inside HTC.
821 	 */
822 	if (!qdf_nbuf_put_tail(htt_msg, HTT_RX_FULL_MONITOR_MODE_SETUP_SZ)) {
823 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
824 			  "%s: Failed to expand head for RX Ring Cfg msg",
825 			  __func__);
826 		goto fail1;
827 	}
828 
829 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
830 
831 	/* rewind beyond alignment pad to get to the HTC header reserved area */
832 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
833 
834 	/* word 0 */
835 	*msg_word = 0;
836 	htt_logger_bufp = (uint8_t *)msg_word;
837 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE);
838 	HTT_RX_FULL_MONITOR_MODE_OPERATION_PDEV_ID_SET(
839 			*msg_word, DP_SW2HW_MACID(pdev_id));
840 
841 	msg_word++;
842 	*msg_word = 0;
843 	/* word 1 */
844 	if (config == DP_FULL_MON_ENABLE) {
845 		HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true);
846 		HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, true);
847 		HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, true);
848 		HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2);
849 	} else if (config == DP_FULL_MON_DISABLE) {
850 		/* As per MAC team's suggestion, While disbaling full monitor
851 		 * mode, Set 'en' bit to true in full monitor mode register.
852 		 */
853 		HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true);
854 		HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, false);
855 		HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, false);
856 		HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2);
857 	}
858 
859 	pkt = htt_htc_pkt_alloc(soc);
860 	if (!pkt) {
861 		qdf_err("HTC packet allocation failed");
862 		goto fail1;
863 	}
864 
865 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
866 
867 	SET_HTC_PACKET_INFO_TX(
868 		&pkt->htc_pkt,
869 		dp_htt_h2t_send_complete_free_netbuf,
870 		qdf_nbuf_data(htt_msg),
871 		qdf_nbuf_len(htt_msg),
872 		soc->htc_endpoint,
873 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
874 
875 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
876 	qdf_debug("config: %d", config);
877 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE,
878 			    htt_logger_bufp);
879 	return QDF_STATUS_SUCCESS;
880 fail1:
881 	qdf_nbuf_free(htt_msg);
882 	return QDF_STATUS_E_FAILURE;
883 }
884 
885 qdf_export_symbol(htt_h2t_full_mon_cfg);
886 #else
887 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc,
888 			 uint8_t pdev_id,
889 			 enum dp_full_mon_config config)
890 {
891 	return 0;
892 }
893 
894 qdf_export_symbol(htt_h2t_full_mon_cfg);
895 #endif
896 
897 #ifdef QCA_UNDECODED_METADATA_SUPPORT
898 static inline void
899 dp_mon_rx_enable_phy_errors(uint32_t *msg_word,
900 			    struct htt_rx_ring_tlv_filter *htt_tlv_filter)
901 {
902 	if (htt_tlv_filter->phy_err_filter_valid) {
903 		HTT_RX_RING_SELECTION_CFG_FP_PHY_ERR_SET
904 			(*msg_word, htt_tlv_filter->fp_phy_err);
905 		HTT_RX_RING_SELECTION_CFG_FP_PHY_ERR_BUF_SRC_SET
906 			(*msg_word, htt_tlv_filter->fp_phy_err_buf_src);
907 		HTT_RX_RING_SELECTION_CFG_FP_PHY_ERR_BUF_DEST_SET
908 			(*msg_word, htt_tlv_filter->fp_phy_err_buf_dest);
909 
910 		/* word 12*/
911 		msg_word++;
912 		*msg_word = 0;
913 		HTT_RX_RING_SELECTION_CFG_PHY_ERR_MASK_SET
914 			(*msg_word, htt_tlv_filter->phy_err_mask);
915 
916 		/* word 13*/
917 		msg_word++;
918 		*msg_word = 0;
919 		HTT_RX_RING_SELECTION_CFG_PHY_ERR_MASK_CONT_SET
920 			(*msg_word, htt_tlv_filter->phy_err_mask_cont);
921 
922 		/* word 14*/
923 		msg_word++;
924 		*msg_word = 0;
925 	} else {
926 		/* word 14*/
927 		msg_word += 3;
928 		*msg_word = 0;
929 	}
930 }
931 #else
932 static inline void
933 dp_mon_rx_enable_phy_errors(uint32_t *msg_word,
934 			    struct htt_rx_ring_tlv_filter *htt_tlv_filter)
935 {
936 	/* word 14*/
937 	msg_word += 3;
938 	*msg_word = 0;
939 }
940 #endif
941 
942 /*
943  * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter
944  * config message to target
945  * @htt_soc:	HTT SOC handle
946  * @pdev_id:	WIN- PDEV Id, MCL- mac id
947  * @hal_srng:	Opaque HAL SRNG pointer
948  * @hal_ring_type:	SRNG ring type
949  * @ring_buf_size:	SRNG buffer size
950  * @htt_tlv_filter:	Rx SRNG TLV and filter setting
951  * Return: 0 on success; error code on failure
952  */
953 int htt_h2t_rx_ring_cfg(struct htt_soc *htt_soc, int pdev_id,
954 			hal_ring_handle_t hal_ring_hdl,
955 			int hal_ring_type, int ring_buf_size,
956 			struct htt_rx_ring_tlv_filter *htt_tlv_filter)
957 {
958 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
959 	struct dp_htt_htc_pkt *pkt;
960 	qdf_nbuf_t htt_msg;
961 	uint32_t *msg_word;
962 	uint32_t *msg_word_data;
963 	struct hal_srng_params srng_params;
964 	uint32_t htt_ring_type, htt_ring_id;
965 	uint32_t tlv_filter;
966 	uint8_t *htt_logger_bufp;
967 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->dp_soc->wlan_cfg_ctx;
968 	uint32_t mon_drop_th = wlan_cfg_get_mon_drop_thresh(wlan_cfg_ctx);
969 	int target_pdev_id;
970 	QDF_STATUS status;
971 
972 	htt_msg = qdf_nbuf_alloc(soc->osdev,
973 		HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ),
974 	/* reserve room for the HTC header */
975 	HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
976 	if (!htt_msg) {
977 		dp_err("htt_msg alloc failed ring type %d", hal_ring_type);
978 		goto fail0;
979 	}
980 
981 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
982 
983 	switch (hal_ring_type) {
984 	case RXDMA_BUF:
985 		htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
986 		htt_ring_type = HTT_SW_TO_HW_RING;
987 		break;
988 	case RXDMA_MONITOR_BUF:
989 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
990 							 RXDMA_MONITOR_BUF);
991 		htt_ring_type = HTT_SW_TO_HW_RING;
992 		break;
993 	case RXDMA_MONITOR_STATUS:
994 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
995 		htt_ring_type = HTT_SW_TO_HW_RING;
996 		break;
997 	case RXDMA_MONITOR_DST:
998 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
999 							 RXDMA_MONITOR_DST);
1000 		htt_ring_type = HTT_HW_TO_SW_RING;
1001 		break;
1002 	case RXDMA_MONITOR_DESC:
1003 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
1004 		htt_ring_type = HTT_SW_TO_HW_RING;
1005 		break;
1006 	case RXDMA_DST:
1007 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
1008 		htt_ring_type = HTT_HW_TO_SW_RING;
1009 		break;
1010 
1011 	default:
1012 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1013 			"%s: Ring currently not supported", __func__);
1014 		goto fail1;
1015 	}
1016 
1017 	dp_info("ring_type %d ring_id %d htt_ring_id %d",
1018 		hal_ring_type, srng_params.ring_id, htt_ring_id);
1019 
1020 	/*
1021 	 * Set the length of the message.
1022 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
1023 	 * separately during the below call to qdf_nbuf_push_head.
1024 	 * The contribution from the HTC header is added separately inside HTC.
1025 	 */
1026 	if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) {
1027 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1028 			"%s: Failed to expand head for RX Ring Cfg msg",
1029 			__func__);
1030 		goto fail1; /* failure */
1031 	}
1032 
1033 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
1034 
1035 	/* rewind beyond alignment pad to get to the HTC header reserved area */
1036 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
1037 
1038 	/* word 0 */
1039 	htt_logger_bufp = (uint8_t *)msg_word;
1040 	*msg_word = 0;
1041 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
1042 
1043 	if (htt_tlv_filter->rx_mon_global_en)
1044 		*msg_word  |= (1 << RXMON_GLOBAL_EN_SHIFT);
1045 
1046 	/*
1047 	 * pdev_id is indexed from 0 whereas mac_id is indexed from 1
1048 	 * SW_TO_SW and SW_TO_HW rings are unaffected by this
1049 	 */
1050 	target_pdev_id =
1051 	dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, pdev_id);
1052 
1053 	if (htt_ring_type == HTT_SW_TO_SW_RING ||
1054 			htt_ring_type == HTT_SW_TO_HW_RING)
1055 		HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word,
1056 						      target_pdev_id);
1057 
1058 	/* TODO: Discuss with FW on changing this to unique ID and using
1059 	 * htt_ring_type to send the type of ring
1060 	 */
1061 	HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id);
1062 
1063 	HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word,
1064 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
1065 
1066 	HTT_RX_RING_SELECTION_CFG_RX_OFFSETS_VALID_SET(*msg_word,
1067 						htt_tlv_filter->offset_valid);
1068 
1069 	if (mon_drop_th > 0)
1070 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1071 								   1);
1072 	else
1073 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1074 								   0);
1075 
1076 	/* word 1 */
1077 	msg_word++;
1078 	*msg_word = 0;
1079 	HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word,
1080 		ring_buf_size);
1081 
1082 	dp_mon_rx_packet_length_set(soc->dp_soc, msg_word, htt_tlv_filter);
1083 	dp_rx_mon_enable(soc->dp_soc, msg_word, htt_tlv_filter);
1084 
1085 	/* word 2 */
1086 	msg_word++;
1087 	*msg_word = 0;
1088 
1089 	if (htt_tlv_filter->enable_fp) {
1090 		/* TYPE: MGMT */
1091 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1092 			FP, MGMT, 0000,
1093 			(htt_tlv_filter->fp_mgmt_filter &
1094 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1095 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1096 			FP, MGMT, 0001,
1097 			(htt_tlv_filter->fp_mgmt_filter &
1098 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1099 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1100 			FP, MGMT, 0010,
1101 			(htt_tlv_filter->fp_mgmt_filter &
1102 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1103 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1104 			FP, MGMT, 0011,
1105 			(htt_tlv_filter->fp_mgmt_filter &
1106 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1107 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1108 			FP, MGMT, 0100,
1109 			(htt_tlv_filter->fp_mgmt_filter &
1110 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1111 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1112 			FP, MGMT, 0101,
1113 			(htt_tlv_filter->fp_mgmt_filter &
1114 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1115 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1116 			FP, MGMT, 0110,
1117 			(htt_tlv_filter->fp_mgmt_filter &
1118 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1119 		/* reserved */
1120 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
1121 			MGMT, 0111,
1122 			(htt_tlv_filter->fp_mgmt_filter &
1123 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1124 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1125 			FP, MGMT, 1000,
1126 			(htt_tlv_filter->fp_mgmt_filter &
1127 			FILTER_MGMT_BEACON) ? 1 : 0);
1128 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1129 			FP, MGMT, 1001,
1130 			(htt_tlv_filter->fp_mgmt_filter &
1131 			FILTER_MGMT_ATIM) ? 1 : 0);
1132 	}
1133 
1134 	if (htt_tlv_filter->enable_md) {
1135 			/* TYPE: MGMT */
1136 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1137 			MD, MGMT, 0000,
1138 			(htt_tlv_filter->md_mgmt_filter &
1139 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1140 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1141 			MD, MGMT, 0001,
1142 			(htt_tlv_filter->md_mgmt_filter &
1143 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1144 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1145 			MD, MGMT, 0010,
1146 			(htt_tlv_filter->md_mgmt_filter &
1147 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1148 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1149 			MD, MGMT, 0011,
1150 			(htt_tlv_filter->md_mgmt_filter &
1151 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1152 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1153 			MD, MGMT, 0100,
1154 			(htt_tlv_filter->md_mgmt_filter &
1155 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1156 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1157 			MD, MGMT, 0101,
1158 			(htt_tlv_filter->md_mgmt_filter &
1159 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1160 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1161 			MD, MGMT, 0110,
1162 			(htt_tlv_filter->md_mgmt_filter &
1163 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1164 		/* reserved */
1165 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
1166 			MGMT, 0111,
1167 			(htt_tlv_filter->md_mgmt_filter &
1168 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1169 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1170 			MD, MGMT, 1000,
1171 			(htt_tlv_filter->md_mgmt_filter &
1172 			FILTER_MGMT_BEACON) ? 1 : 0);
1173 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1174 			MD, MGMT, 1001,
1175 			(htt_tlv_filter->md_mgmt_filter &
1176 			FILTER_MGMT_ATIM) ? 1 : 0);
1177 	}
1178 
1179 	if (htt_tlv_filter->enable_mo) {
1180 		/* TYPE: MGMT */
1181 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1182 			MO, MGMT, 0000,
1183 			(htt_tlv_filter->mo_mgmt_filter &
1184 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1185 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1186 			MO, MGMT, 0001,
1187 			(htt_tlv_filter->mo_mgmt_filter &
1188 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1189 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1190 			MO, MGMT, 0010,
1191 			(htt_tlv_filter->mo_mgmt_filter &
1192 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1193 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1194 			MO, MGMT, 0011,
1195 			(htt_tlv_filter->mo_mgmt_filter &
1196 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1197 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1198 			MO, MGMT, 0100,
1199 			(htt_tlv_filter->mo_mgmt_filter &
1200 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1201 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1202 			MO, MGMT, 0101,
1203 			(htt_tlv_filter->mo_mgmt_filter &
1204 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1205 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1206 			MO, MGMT, 0110,
1207 			(htt_tlv_filter->mo_mgmt_filter &
1208 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1209 		/* reserved */
1210 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
1211 			MGMT, 0111,
1212 			(htt_tlv_filter->mo_mgmt_filter &
1213 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1214 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1215 			MO, MGMT, 1000,
1216 			(htt_tlv_filter->mo_mgmt_filter &
1217 			FILTER_MGMT_BEACON) ? 1 : 0);
1218 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1219 			MO, MGMT, 1001,
1220 			(htt_tlv_filter->mo_mgmt_filter &
1221 			FILTER_MGMT_ATIM) ? 1 : 0);
1222 	}
1223 
1224 	/* word 3 */
1225 	msg_word++;
1226 	*msg_word = 0;
1227 
1228 	if (htt_tlv_filter->enable_fp) {
1229 		/* TYPE: MGMT */
1230 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1231 			FP, MGMT, 1010,
1232 			(htt_tlv_filter->fp_mgmt_filter &
1233 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1234 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1235 			FP, MGMT, 1011,
1236 			(htt_tlv_filter->fp_mgmt_filter &
1237 			FILTER_MGMT_AUTH) ? 1 : 0);
1238 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1239 			FP, MGMT, 1100,
1240 			(htt_tlv_filter->fp_mgmt_filter &
1241 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1242 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1243 			FP, MGMT, 1101,
1244 			(htt_tlv_filter->fp_mgmt_filter &
1245 			FILTER_MGMT_ACTION) ? 1 : 0);
1246 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1247 			FP, MGMT, 1110,
1248 			(htt_tlv_filter->fp_mgmt_filter &
1249 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1250 		/* reserved*/
1251 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
1252 			MGMT, 1111,
1253 			(htt_tlv_filter->fp_mgmt_filter &
1254 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1255 	}
1256 
1257 	if (htt_tlv_filter->enable_md) {
1258 			/* TYPE: MGMT */
1259 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1260 			MD, MGMT, 1010,
1261 			(htt_tlv_filter->md_mgmt_filter &
1262 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1263 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1264 			MD, MGMT, 1011,
1265 			(htt_tlv_filter->md_mgmt_filter &
1266 			FILTER_MGMT_AUTH) ? 1 : 0);
1267 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1268 			MD, MGMT, 1100,
1269 			(htt_tlv_filter->md_mgmt_filter &
1270 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1271 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1272 			MD, MGMT, 1101,
1273 			(htt_tlv_filter->md_mgmt_filter &
1274 			FILTER_MGMT_ACTION) ? 1 : 0);
1275 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1276 			MD, MGMT, 1110,
1277 			(htt_tlv_filter->md_mgmt_filter &
1278 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1279 	}
1280 
1281 	if (htt_tlv_filter->enable_mo) {
1282 		/* TYPE: MGMT */
1283 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1284 			MO, MGMT, 1010,
1285 			(htt_tlv_filter->mo_mgmt_filter &
1286 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1287 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1288 			MO, MGMT, 1011,
1289 			(htt_tlv_filter->mo_mgmt_filter &
1290 			FILTER_MGMT_AUTH) ? 1 : 0);
1291 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1292 			MO, MGMT, 1100,
1293 			(htt_tlv_filter->mo_mgmt_filter &
1294 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1295 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1296 			MO, MGMT, 1101,
1297 			(htt_tlv_filter->mo_mgmt_filter &
1298 			FILTER_MGMT_ACTION) ? 1 : 0);
1299 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1300 			MO, MGMT, 1110,
1301 			(htt_tlv_filter->mo_mgmt_filter &
1302 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1303 		/* reserved*/
1304 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
1305 			MGMT, 1111,
1306 			(htt_tlv_filter->mo_mgmt_filter &
1307 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1308 	}
1309 
1310 	/* word 4 */
1311 	msg_word++;
1312 	*msg_word = 0;
1313 
1314 	if (htt_tlv_filter->enable_fp) {
1315 		/* TYPE: CTRL */
1316 		/* reserved */
1317 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1318 			CTRL, 0000,
1319 			(htt_tlv_filter->fp_ctrl_filter &
1320 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1321 		/* reserved */
1322 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1323 			CTRL, 0001,
1324 			(htt_tlv_filter->fp_ctrl_filter &
1325 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1326 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1327 			CTRL, 0010,
1328 			(htt_tlv_filter->fp_ctrl_filter &
1329 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1330 		/* reserved */
1331 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1332 			CTRL, 0011,
1333 			(htt_tlv_filter->fp_ctrl_filter &
1334 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1335 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1336 			CTRL, 0100,
1337 			(htt_tlv_filter->fp_ctrl_filter &
1338 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1339 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1340 			CTRL, 0101,
1341 			(htt_tlv_filter->fp_ctrl_filter &
1342 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1343 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1344 			CTRL, 0110,
1345 			(htt_tlv_filter->fp_ctrl_filter &
1346 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1347 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1348 			CTRL, 0111,
1349 			(htt_tlv_filter->fp_ctrl_filter &
1350 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1351 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1352 			CTRL, 1000,
1353 			(htt_tlv_filter->fp_ctrl_filter &
1354 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1355 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1356 			CTRL, 1001,
1357 			(htt_tlv_filter->fp_ctrl_filter &
1358 			FILTER_CTRL_BA) ? 1 : 0);
1359 	}
1360 
1361 	if (htt_tlv_filter->enable_md) {
1362 		/* TYPE: CTRL */
1363 		/* reserved */
1364 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1365 			CTRL, 0000,
1366 			(htt_tlv_filter->md_ctrl_filter &
1367 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1368 		/* reserved */
1369 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1370 			CTRL, 0001,
1371 			(htt_tlv_filter->md_ctrl_filter &
1372 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1373 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1374 			CTRL, 0010,
1375 			(htt_tlv_filter->md_ctrl_filter &
1376 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1377 		/* reserved */
1378 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1379 			CTRL, 0011,
1380 			(htt_tlv_filter->md_ctrl_filter &
1381 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1382 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1383 			CTRL, 0100,
1384 			(htt_tlv_filter->md_ctrl_filter &
1385 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1386 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1387 			CTRL, 0101,
1388 			(htt_tlv_filter->md_ctrl_filter &
1389 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1390 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1391 			CTRL, 0110,
1392 			(htt_tlv_filter->md_ctrl_filter &
1393 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1394 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1395 			CTRL, 0111,
1396 			(htt_tlv_filter->md_ctrl_filter &
1397 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1398 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1399 			CTRL, 1000,
1400 			(htt_tlv_filter->md_ctrl_filter &
1401 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1402 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1403 			CTRL, 1001,
1404 			(htt_tlv_filter->md_ctrl_filter &
1405 			FILTER_CTRL_BA) ? 1 : 0);
1406 	}
1407 
1408 	if (htt_tlv_filter->enable_mo) {
1409 		/* TYPE: CTRL */
1410 		/* reserved */
1411 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1412 			CTRL, 0000,
1413 			(htt_tlv_filter->mo_ctrl_filter &
1414 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1415 		/* reserved */
1416 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1417 			CTRL, 0001,
1418 			(htt_tlv_filter->mo_ctrl_filter &
1419 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1420 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1421 			CTRL, 0010,
1422 			(htt_tlv_filter->mo_ctrl_filter &
1423 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1424 		/* reserved */
1425 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1426 			CTRL, 0011,
1427 			(htt_tlv_filter->mo_ctrl_filter &
1428 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1429 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1430 			CTRL, 0100,
1431 			(htt_tlv_filter->mo_ctrl_filter &
1432 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1433 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1434 			CTRL, 0101,
1435 			(htt_tlv_filter->mo_ctrl_filter &
1436 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1437 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1438 			CTRL, 0110,
1439 			(htt_tlv_filter->mo_ctrl_filter &
1440 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1441 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1442 			CTRL, 0111,
1443 			(htt_tlv_filter->mo_ctrl_filter &
1444 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1445 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1446 			CTRL, 1000,
1447 			(htt_tlv_filter->mo_ctrl_filter &
1448 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1449 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1450 			CTRL, 1001,
1451 			(htt_tlv_filter->mo_ctrl_filter &
1452 			FILTER_CTRL_BA) ? 1 : 0);
1453 	}
1454 
1455 	/* word 5 */
1456 	msg_word++;
1457 	*msg_word = 0;
1458 	if (htt_tlv_filter->enable_fp) {
1459 		/* TYPE: CTRL */
1460 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1461 			CTRL, 1010,
1462 			(htt_tlv_filter->fp_ctrl_filter &
1463 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1464 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1465 			CTRL, 1011,
1466 			(htt_tlv_filter->fp_ctrl_filter &
1467 			FILTER_CTRL_RTS) ? 1 : 0);
1468 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1469 			CTRL, 1100,
1470 			(htt_tlv_filter->fp_ctrl_filter &
1471 			FILTER_CTRL_CTS) ? 1 : 0);
1472 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1473 			CTRL, 1101,
1474 			(htt_tlv_filter->fp_ctrl_filter &
1475 			FILTER_CTRL_ACK) ? 1 : 0);
1476 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1477 			CTRL, 1110,
1478 			(htt_tlv_filter->fp_ctrl_filter &
1479 			FILTER_CTRL_CFEND) ? 1 : 0);
1480 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1481 			CTRL, 1111,
1482 			(htt_tlv_filter->fp_ctrl_filter &
1483 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1484 		/* TYPE: DATA */
1485 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1486 			DATA, MCAST,
1487 			(htt_tlv_filter->fp_data_filter &
1488 			FILTER_DATA_MCAST) ? 1 : 0);
1489 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1490 			DATA, UCAST,
1491 			(htt_tlv_filter->fp_data_filter &
1492 			FILTER_DATA_UCAST) ? 1 : 0);
1493 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1494 			DATA, NULL,
1495 			(htt_tlv_filter->fp_data_filter &
1496 			FILTER_DATA_NULL) ? 1 : 0);
1497 	}
1498 
1499 	if (htt_tlv_filter->enable_md) {
1500 		/* TYPE: CTRL */
1501 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1502 			CTRL, 1010,
1503 			(htt_tlv_filter->md_ctrl_filter &
1504 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1505 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1506 			CTRL, 1011,
1507 			(htt_tlv_filter->md_ctrl_filter &
1508 			FILTER_CTRL_RTS) ? 1 : 0);
1509 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1510 			CTRL, 1100,
1511 			(htt_tlv_filter->md_ctrl_filter &
1512 			FILTER_CTRL_CTS) ? 1 : 0);
1513 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1514 			CTRL, 1101,
1515 			(htt_tlv_filter->md_ctrl_filter &
1516 			FILTER_CTRL_ACK) ? 1 : 0);
1517 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1518 			CTRL, 1110,
1519 			(htt_tlv_filter->md_ctrl_filter &
1520 			FILTER_CTRL_CFEND) ? 1 : 0);
1521 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1522 			CTRL, 1111,
1523 			(htt_tlv_filter->md_ctrl_filter &
1524 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1525 		/* TYPE: DATA */
1526 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1527 			DATA, MCAST,
1528 			(htt_tlv_filter->md_data_filter &
1529 			FILTER_DATA_MCAST) ? 1 : 0);
1530 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1531 			DATA, UCAST,
1532 			(htt_tlv_filter->md_data_filter &
1533 			FILTER_DATA_UCAST) ? 1 : 0);
1534 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1535 			DATA, NULL,
1536 			(htt_tlv_filter->md_data_filter &
1537 			FILTER_DATA_NULL) ? 1 : 0);
1538 	}
1539 
1540 	if (htt_tlv_filter->enable_mo) {
1541 		/* TYPE: CTRL */
1542 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1543 			CTRL, 1010,
1544 			(htt_tlv_filter->mo_ctrl_filter &
1545 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1546 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1547 			CTRL, 1011,
1548 			(htt_tlv_filter->mo_ctrl_filter &
1549 			FILTER_CTRL_RTS) ? 1 : 0);
1550 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1551 			CTRL, 1100,
1552 			(htt_tlv_filter->mo_ctrl_filter &
1553 			FILTER_CTRL_CTS) ? 1 : 0);
1554 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1555 			CTRL, 1101,
1556 			(htt_tlv_filter->mo_ctrl_filter &
1557 			FILTER_CTRL_ACK) ? 1 : 0);
1558 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1559 			CTRL, 1110,
1560 			(htt_tlv_filter->mo_ctrl_filter &
1561 			FILTER_CTRL_CFEND) ? 1 : 0);
1562 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1563 			CTRL, 1111,
1564 			(htt_tlv_filter->mo_ctrl_filter &
1565 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1566 		/* TYPE: DATA */
1567 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1568 			DATA, MCAST,
1569 			(htt_tlv_filter->mo_data_filter &
1570 			FILTER_DATA_MCAST) ? 1 : 0);
1571 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1572 			DATA, UCAST,
1573 			(htt_tlv_filter->mo_data_filter &
1574 			FILTER_DATA_UCAST) ? 1 : 0);
1575 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1576 			DATA, NULL,
1577 			(htt_tlv_filter->mo_data_filter &
1578 			FILTER_DATA_NULL) ? 1 : 0);
1579 	}
1580 
1581 	/* word 6 */
1582 	msg_word++;
1583 	*msg_word = 0;
1584 	tlv_filter = 0;
1585 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START,
1586 		htt_tlv_filter->mpdu_start);
1587 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START,
1588 		htt_tlv_filter->msdu_start);
1589 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET,
1590 		htt_tlv_filter->packet);
1591 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END,
1592 		htt_tlv_filter->msdu_end);
1593 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END,
1594 		htt_tlv_filter->mpdu_end);
1595 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER,
1596 		htt_tlv_filter->packet_header);
1597 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION,
1598 		htt_tlv_filter->attention);
1599 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START,
1600 		htt_tlv_filter->ppdu_start);
1601 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END,
1602 		htt_tlv_filter->ppdu_end);
1603 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS,
1604 		htt_tlv_filter->ppdu_end_user_stats);
1605 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter,
1606 		PPDU_END_USER_STATS_EXT,
1607 		htt_tlv_filter->ppdu_end_user_stats_ext);
1608 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE,
1609 		htt_tlv_filter->ppdu_end_status_done);
1610 	/* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/
1611 	 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED,
1612 		 htt_tlv_filter->header_per_msdu);
1613 
1614 	HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter);
1615 
1616 	msg_word_data = (uint32_t *)qdf_nbuf_data(htt_msg);
1617 	dp_info("config_data: [0x%x][0x%x][0x%x][0x%x][0x%x][0x%x][0x%x]",
1618 		msg_word_data[0], msg_word_data[1], msg_word_data[2],
1619 		msg_word_data[3], msg_word_data[4], msg_word_data[5],
1620 		msg_word_data[6]);
1621 
1622 	/* word 7 */
1623 	msg_word++;
1624 	*msg_word = 0;
1625 	if (htt_tlv_filter->offset_valid) {
1626 		HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET_SET(*msg_word,
1627 					htt_tlv_filter->rx_packet_offset);
1628 		HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET_SET(*msg_word,
1629 					htt_tlv_filter->rx_header_offset);
1630 
1631 		/* word 8 */
1632 		msg_word++;
1633 		*msg_word = 0;
1634 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET_SET(*msg_word,
1635 					htt_tlv_filter->rx_mpdu_end_offset);
1636 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET_SET(*msg_word,
1637 					htt_tlv_filter->rx_mpdu_start_offset);
1638 
1639 		/* word 9 */
1640 		msg_word++;
1641 		*msg_word = 0;
1642 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET_SET(*msg_word,
1643 					htt_tlv_filter->rx_msdu_end_offset);
1644 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET_SET(*msg_word,
1645 					htt_tlv_filter->rx_msdu_start_offset);
1646 
1647 		/* word 10 */
1648 		msg_word++;
1649 		*msg_word = 0;
1650 		HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET_SET(*msg_word,
1651 					htt_tlv_filter->rx_attn_offset);
1652 
1653 		/* word 11 */
1654 		msg_word++;
1655 		*msg_word = 0;
1656 	} else {
1657 		/* word 11 */
1658 		msg_word += 4;
1659 		*msg_word = 0;
1660 	}
1661 
1662 	if (mon_drop_th > 0)
1663 		HTT_RX_RING_SELECTION_CFG_RX_DROP_THRESHOLD_SET(*msg_word,
1664 								mon_drop_th);
1665 	dp_mon_rx_enable_mpdu_logging(soc->dp_soc, msg_word, htt_tlv_filter);
1666 
1667 	dp_mon_rx_enable_phy_errors(msg_word, htt_tlv_filter);
1668 
1669 	dp_mon_rx_wmask_subscribe(soc->dp_soc, msg_word, htt_tlv_filter);
1670 
1671 	/* "response_required" field should be set if a HTT response message is
1672 	 * required after setting up the ring.
1673 	 */
1674 	pkt = htt_htc_pkt_alloc(soc);
1675 	if (!pkt) {
1676 		dp_err("pkt alloc failed, ring_type %d ring_id %d htt_ring_id %d",
1677 		       hal_ring_type, srng_params.ring_id, htt_ring_id);
1678 		goto fail1;
1679 	}
1680 
1681 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1682 
1683 	SET_HTC_PACKET_INFO_TX(
1684 		&pkt->htc_pkt,
1685 		dp_htt_h2t_send_complete_free_netbuf,
1686 		qdf_nbuf_data(htt_msg),
1687 		qdf_nbuf_len(htt_msg),
1688 		soc->htc_endpoint,
1689 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
1690 
1691 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1692 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
1693 				     HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG,
1694 				     htt_logger_bufp);
1695 
1696 	if (status != QDF_STATUS_SUCCESS) {
1697 		qdf_nbuf_free(htt_msg);
1698 		htt_htc_pkt_free(soc, pkt);
1699 	}
1700 
1701 	return status;
1702 
1703 fail1:
1704 	qdf_nbuf_free(htt_msg);
1705 fail0:
1706 	return QDF_STATUS_E_FAILURE;
1707 }
1708 
1709 qdf_export_symbol(htt_h2t_rx_ring_cfg);
1710 
1711 #if defined(HTT_STATS_ENABLE)
1712 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1713 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1714 
1715 {
1716 	uint32_t pdev_id;
1717 	uint32_t *msg_word = NULL;
1718 	uint32_t msg_remain_len = 0;
1719 
1720 	msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1721 
1722 	/*COOKIE MSB*/
1723 	pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1724 
1725 	/* stats message length + 16 size of HTT header*/
1726 	msg_remain_len = qdf_min(htt_stats->msg_len + 16,
1727 				(uint32_t)DP_EXT_MSG_LENGTH);
1728 
1729 	dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc,
1730 			msg_word,  msg_remain_len,
1731 			WDI_NO_VAL, pdev_id);
1732 
1733 	if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1734 		htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1735 	}
1736 	/* Need to be freed here as WDI handler will
1737 	 * make a copy of pkt to send data to application
1738 	 */
1739 	qdf_nbuf_free(htt_msg);
1740 	return QDF_STATUS_SUCCESS;
1741 }
1742 #else
1743 static inline QDF_STATUS
1744 dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1745 		      struct dp_soc *soc, qdf_nbuf_t htt_msg)
1746 {
1747 	return QDF_STATUS_E_NOSUPPORT;
1748 }
1749 #endif
1750 
1751 #ifdef HTT_STATS_DEBUGFS_SUPPORT
1752 /* dp_send_htt_stats_dbgfs_msg() - Function to send htt data to upper layer.
1753  * @pdev: dp pdev handle
1754  * @msg_word: HTT msg
1755  * @msg_len: Length of HTT msg sent
1756  *
1757  * Return: none
1758  */
1759 static inline void
1760 dp_htt_stats_dbgfs_send_msg(struct dp_pdev *pdev, uint32_t *msg_word,
1761 			    uint32_t msg_len)
1762 {
1763 	struct htt_dbgfs_cfg dbgfs_cfg;
1764 	int done = 0;
1765 
1766 	/* send 5th word of HTT msg to upper layer */
1767 	dbgfs_cfg.msg_word = (msg_word + 4);
1768 	dbgfs_cfg.m = pdev->dbgfs_cfg->m;
1769 
1770 	/* stats message length + 16 size of HTT header*/
1771 	msg_len = qdf_min(msg_len + HTT_HEADER_LEN, (uint32_t)DP_EXT_MSG_LENGTH);
1772 
1773 	if (pdev->dbgfs_cfg->htt_stats_dbgfs_msg_process)
1774 		pdev->dbgfs_cfg->htt_stats_dbgfs_msg_process(&dbgfs_cfg,
1775 							     (msg_len - HTT_HEADER_LEN));
1776 
1777 	/* Get TLV Done bit from 4th msg word */
1778 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*(msg_word + 3));
1779 	if (done) {
1780 		if (qdf_event_set(&pdev->dbgfs_cfg->htt_stats_dbgfs_event))
1781 			dp_htt_err("%pK: Failed to set event for debugfs htt stats"
1782 				   , pdev->soc);
1783 	}
1784 }
1785 #else
1786 static inline void
1787 dp_htt_stats_dbgfs_send_msg(struct dp_pdev *pdev, uint32_t *msg_word,
1788 			    uint32_t msg_len)
1789 {
1790 }
1791 #endif /* HTT_STATS_DEBUGFS_SUPPORT */
1792 
1793 #ifdef WLAN_SYSFS_DP_STATS
1794 /* dp_htt_stats_sysfs_update_config() - Function to send htt data to upper layer.
1795  * @pdev: dp pdev handle
1796  *
1797  * This function sets the process id and printing mode within the sysfs config
1798  * struct. which enables DP_PRINT statements within this process to write to the
1799  * console buffer provided by the user space.
1800  *
1801  * Return: None
1802  */
1803 static inline void
1804 dp_htt_stats_sysfs_update_config(struct dp_pdev *pdev)
1805 {
1806 	struct dp_soc *soc = pdev->soc;
1807 
1808 	if (!soc) {
1809 		dp_htt_err("soc is null");
1810 		return;
1811 	}
1812 
1813 	if (!soc->sysfs_config) {
1814 		dp_htt_err("soc->sysfs_config is NULL");
1815 		return;
1816 	}
1817 
1818 	/* set sysfs config parameters */
1819 	soc->sysfs_config->process_id = qdf_get_current_pid();
1820 	soc->sysfs_config->printing_mode = PRINTING_MODE_ENABLED;
1821 }
1822 
1823 /*
1824  * dp_htt_stats_sysfs_set_event() - Set sysfs stats event.
1825  * @soc: soc handle.
1826  * @msg_word: Pointer to htt msg word.
1827  *
1828  * @return: void
1829  */
1830 static inline void
1831 dp_htt_stats_sysfs_set_event(struct dp_soc *soc, uint32_t *msg_word)
1832 {
1833 	int done = 0;
1834 
1835 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*(msg_word + 3));
1836 	if (done) {
1837 		if (qdf_event_set(&soc->sysfs_config->sysfs_txrx_fw_request_done))
1838 			dp_htt_err("%pK:event compl Fail to set event ",
1839 				   soc);
1840 	}
1841 }
1842 #else /* WLAN_SYSFS_DP_STATS */
1843 static inline void
1844 dp_htt_stats_sysfs_update_config(struct dp_pdev *pdev)
1845 {
1846 }
1847 
1848 static inline void
1849 dp_htt_stats_sysfs_set_event(struct dp_soc *dp_soc, uint32_t *msg_word)
1850 {
1851 }
1852 #endif /* WLAN_SYSFS_DP_STATS */
1853 
1854 /**
1855  * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats
1856  * @htt_stats: htt stats info
1857  *
1858  * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message
1859  * contains sub messages which are identified by a TLV header.
1860  * In this function we will process the stream of T2H messages and read all the
1861  * TLV contained in the message.
1862  *
1863  * THe following cases have been taken care of
1864  * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer
1865  *		In this case the buffer will contain multiple tlvs.
1866  * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer.
1867  *		Only one tlv will be contained in the HTT message and this tag
1868  *		will extend onto the next buffer.
1869  * Case 3: When the buffer is the continuation of the previous message
1870  * Case 4: tlv length is 0. which will indicate the end of message
1871  *
1872  * return: void
1873  */
1874 static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats,
1875 					struct dp_soc *soc)
1876 {
1877 	htt_tlv_tag_t tlv_type = 0xff;
1878 	qdf_nbuf_t htt_msg = NULL;
1879 	uint32_t *msg_word;
1880 	uint8_t *tlv_buf_head = NULL;
1881 	uint8_t *tlv_buf_tail = NULL;
1882 	uint32_t msg_remain_len = 0;
1883 	uint32_t tlv_remain_len = 0;
1884 	uint32_t *tlv_start;
1885 	int cookie_val = 0;
1886 	int cookie_msb = 0;
1887 	int pdev_id;
1888 	bool copy_stats = false;
1889 	struct dp_pdev *pdev;
1890 
1891 	/* Process node in the HTT message queue */
1892 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1893 		!= NULL) {
1894 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1895 		cookie_val = *(msg_word + 1);
1896 		htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET(
1897 					*(msg_word +
1898 					HTT_T2H_EXT_STATS_TLV_START_OFFSET));
1899 
1900 		if (cookie_val) {
1901 			if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg)
1902 					== QDF_STATUS_SUCCESS) {
1903 				continue;
1904 			}
1905 		}
1906 
1907 		cookie_msb = *(msg_word + 2);
1908 		pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1909 		pdev = soc->pdev_list[pdev_id];
1910 
1911 		if (!cookie_val && (cookie_msb & DBG_STATS_COOKIE_HTT_DBGFS)) {
1912 			dp_htt_stats_dbgfs_send_msg(pdev, msg_word,
1913 						    htt_stats->msg_len);
1914 			qdf_nbuf_free(htt_msg);
1915 			continue;
1916 		}
1917 
1918 		if (!cookie_val && (cookie_msb & DBG_SYSFS_STATS_COOKIE))
1919 			dp_htt_stats_sysfs_update_config(pdev);
1920 
1921 		if (cookie_msb & DBG_STATS_COOKIE_DP_STATS)
1922 			copy_stats = true;
1923 
1924 		/* read 5th word */
1925 		msg_word = msg_word + 4;
1926 		msg_remain_len = qdf_min(htt_stats->msg_len,
1927 				(uint32_t) DP_EXT_MSG_LENGTH);
1928 		/* Keep processing the node till node length is 0 */
1929 		while (msg_remain_len) {
1930 			/*
1931 			 * if message is not a continuation of previous message
1932 			 * read the tlv type and tlv length
1933 			 */
1934 			if (!tlv_buf_head) {
1935 				tlv_type = HTT_STATS_TLV_TAG_GET(
1936 						*msg_word);
1937 				tlv_remain_len = HTT_STATS_TLV_LENGTH_GET(
1938 						*msg_word);
1939 			}
1940 
1941 			if (tlv_remain_len == 0) {
1942 				msg_remain_len = 0;
1943 
1944 				if (tlv_buf_head) {
1945 					qdf_mem_free(tlv_buf_head);
1946 					tlv_buf_head = NULL;
1947 					tlv_buf_tail = NULL;
1948 				}
1949 
1950 				goto error;
1951 			}
1952 
1953 			if (!tlv_buf_head)
1954 				tlv_remain_len += HTT_TLV_HDR_LEN;
1955 
1956 			if ((tlv_remain_len <= msg_remain_len)) {
1957 				/* Case 3 */
1958 				if (tlv_buf_head) {
1959 					qdf_mem_copy(tlv_buf_tail,
1960 							(uint8_t *)msg_word,
1961 							tlv_remain_len);
1962 					tlv_start = (uint32_t *)tlv_buf_head;
1963 				} else {
1964 					/* Case 1 */
1965 					tlv_start = msg_word;
1966 				}
1967 
1968 				if (copy_stats)
1969 					dp_htt_stats_copy_tag(pdev,
1970 							      tlv_type,
1971 							      tlv_start);
1972 				else
1973 					dp_htt_stats_print_tag(pdev,
1974 							       tlv_type,
1975 							       tlv_start);
1976 
1977 				if (tlv_type == HTT_STATS_PEER_DETAILS_TAG ||
1978 				    tlv_type == HTT_STATS_PEER_STATS_CMN_TAG)
1979 					dp_peer_update_inactive_time(pdev,
1980 								     tlv_type,
1981 								     tlv_start);
1982 
1983 				msg_remain_len -= tlv_remain_len;
1984 
1985 				msg_word = (uint32_t *)
1986 					(((uint8_t *)msg_word) +
1987 					tlv_remain_len);
1988 
1989 				tlv_remain_len = 0;
1990 
1991 				if (tlv_buf_head) {
1992 					qdf_mem_free(tlv_buf_head);
1993 					tlv_buf_head = NULL;
1994 					tlv_buf_tail = NULL;
1995 				}
1996 
1997 			} else { /* tlv_remain_len > msg_remain_len */
1998 				/* Case 2 & 3 */
1999 				if (!tlv_buf_head) {
2000 					tlv_buf_head = qdf_mem_malloc(
2001 							tlv_remain_len);
2002 
2003 					if (!tlv_buf_head) {
2004 						QDF_TRACE(QDF_MODULE_ID_TXRX,
2005 								QDF_TRACE_LEVEL_ERROR,
2006 								"Alloc failed");
2007 						goto error;
2008 					}
2009 
2010 					tlv_buf_tail = tlv_buf_head;
2011 				}
2012 
2013 				qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word,
2014 						msg_remain_len);
2015 				tlv_remain_len -= msg_remain_len;
2016 				tlv_buf_tail += msg_remain_len;
2017 			}
2018 		}
2019 
2020 		if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
2021 			htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
2022 		}
2023 
2024 		/* indicate event completion in case the event is done */
2025 		if (!cookie_val && (cookie_msb & DBG_SYSFS_STATS_COOKIE))
2026 			dp_htt_stats_sysfs_set_event(soc, msg_word);
2027 
2028 		qdf_nbuf_free(htt_msg);
2029 	}
2030 	return;
2031 
2032 error:
2033 	qdf_nbuf_free(htt_msg);
2034 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
2035 			!= NULL)
2036 		qdf_nbuf_free(htt_msg);
2037 }
2038 
2039 void htt_t2h_stats_handler(void *context)
2040 {
2041 	struct dp_soc *soc = (struct dp_soc *)context;
2042 	struct htt_stats_context htt_stats;
2043 	uint32_t *msg_word;
2044 	qdf_nbuf_t htt_msg = NULL;
2045 	uint8_t done;
2046 	uint32_t rem_stats;
2047 
2048 	if (!soc) {
2049 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2050 			  "soc is NULL");
2051 		return;
2052 	}
2053 
2054 	if (!qdf_atomic_read(&soc->cmn_init_done)) {
2055 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2056 			  "soc: 0x%pK, init_done: %d", soc,
2057 			  qdf_atomic_read(&soc->cmn_init_done));
2058 		return;
2059 	}
2060 
2061 	qdf_mem_zero(&htt_stats, sizeof(htt_stats));
2062 	qdf_nbuf_queue_init(&htt_stats.msg);
2063 
2064 	/* pull one completed stats from soc->htt_stats_msg and process */
2065 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2066 	if (!soc->htt_stats.num_stats) {
2067 		qdf_spin_unlock_bh(&soc->htt_stats.lock);
2068 		return;
2069 	}
2070 	while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) {
2071 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
2072 		msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET;
2073 		done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2074 		qdf_nbuf_queue_add(&htt_stats.msg, htt_msg);
2075 		/*
2076 		 * Done bit signifies that this is the last T2H buffer in the
2077 		 * stream of HTT EXT STATS message
2078 		 */
2079 		if (done)
2080 			break;
2081 	}
2082 	rem_stats = --soc->htt_stats.num_stats;
2083 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2084 
2085 	/* If there are more stats to process, schedule stats work again.
2086 	 * Scheduling prior to processing ht_stats to queue with early
2087 	 * index
2088 	 */
2089 	if (rem_stats)
2090 		qdf_sched_work(0, &soc->htt_stats.work);
2091 
2092 	dp_process_htt_stat_msg(&htt_stats, soc);
2093 }
2094 
2095 /**
2096  * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats
2097  * @soc: DP SOC handle
2098  * @htt_t2h_msg: HTT message nbuf
2099  *
2100  * return:void
2101  */
2102 static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc,
2103 					    qdf_nbuf_t htt_t2h_msg)
2104 {
2105 	uint8_t done;
2106 	qdf_nbuf_t msg_copy;
2107 	uint32_t *msg_word;
2108 
2109 	msg_word = (uint32_t *)qdf_nbuf_data(htt_t2h_msg);
2110 	msg_word = msg_word + 3;
2111 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2112 
2113 	/*
2114 	 * HTT EXT stats response comes as stream of TLVs which span over
2115 	 * multiple T2H messages.
2116 	 * The first message will carry length of the response.
2117 	 * For rest of the messages length will be zero.
2118 	 *
2119 	 * Clone the T2H message buffer and store it in a list to process
2120 	 * it later.
2121 	 *
2122 	 * The original T2H message buffers gets freed in the T2H HTT event
2123 	 * handler
2124 	 */
2125 	msg_copy = qdf_nbuf_clone(htt_t2h_msg);
2126 
2127 	if (!msg_copy) {
2128 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2129 			  "T2H messge clone failed for HTT EXT STATS");
2130 		goto error;
2131 	}
2132 
2133 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2134 	qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy);
2135 	/*
2136 	 * Done bit signifies that this is the last T2H buffer in the stream of
2137 	 * HTT EXT STATS message
2138 	 */
2139 	if (done) {
2140 		soc->htt_stats.num_stats++;
2141 		qdf_sched_work(0, &soc->htt_stats.work);
2142 	}
2143 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2144 
2145 	return;
2146 
2147 error:
2148 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2149 	while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg))
2150 			!= NULL) {
2151 		qdf_nbuf_free(msg_copy);
2152 	}
2153 	soc->htt_stats.num_stats = 0;
2154 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2155 	return;
2156 }
2157 
2158 /*
2159  * htt_soc_attach_target() - SOC level HTT setup
2160  * @htt_soc:	HTT SOC handle
2161  *
2162  * Return: 0 on success; error code on failure
2163  */
2164 int htt_soc_attach_target(struct htt_soc *htt_soc)
2165 {
2166 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
2167 
2168 	return htt_h2t_ver_req_msg(soc);
2169 }
2170 
2171 void htt_set_htc_handle(struct htt_soc *htt_soc, HTC_HANDLE htc_soc)
2172 {
2173 	htt_soc->htc_soc = htc_soc;
2174 }
2175 
2176 HTC_HANDLE htt_get_htc_handle(struct htt_soc *htt_soc)
2177 {
2178 	return htt_soc->htc_soc;
2179 }
2180 
2181 struct htt_soc *htt_soc_attach(struct dp_soc *soc, HTC_HANDLE htc_handle)
2182 {
2183 	int i;
2184 	int j;
2185 	int alloc_size = HTT_SW_UMAC_RING_IDX_MAX * sizeof(unsigned long);
2186 	struct htt_soc *htt_soc = NULL;
2187 
2188 	htt_soc = qdf_mem_malloc(sizeof(*htt_soc));
2189 	if (!htt_soc) {
2190 		dp_err("HTT attach failed");
2191 		return NULL;
2192 	}
2193 
2194 	for (i = 0; i < MAX_PDEV_CNT; i++) {
2195 		htt_soc->pdevid_tt[i].umac_ttt = qdf_mem_malloc(alloc_size);
2196 		if (!htt_soc->pdevid_tt[i].umac_ttt)
2197 			break;
2198 		qdf_mem_set(htt_soc->pdevid_tt[i].umac_ttt, alloc_size, -1);
2199 		htt_soc->pdevid_tt[i].lmac_ttt = qdf_mem_malloc(alloc_size);
2200 		if (!htt_soc->pdevid_tt[i].lmac_ttt) {
2201 			qdf_mem_free(htt_soc->pdevid_tt[i].umac_ttt);
2202 			break;
2203 		}
2204 		qdf_mem_set(htt_soc->pdevid_tt[i].lmac_ttt, alloc_size, -1);
2205 	}
2206 	if (i != MAX_PDEV_CNT) {
2207 		for (j = 0; j < i; j++) {
2208 			qdf_mem_free(htt_soc->pdevid_tt[j].umac_ttt);
2209 			qdf_mem_free(htt_soc->pdevid_tt[j].lmac_ttt);
2210 		}
2211 		qdf_mem_free(htt_soc);
2212 		return NULL;
2213 	}
2214 
2215 	htt_soc->dp_soc = soc;
2216 	htt_soc->htc_soc = htc_handle;
2217 	HTT_TX_MUTEX_INIT(&htt_soc->htt_tx_mutex);
2218 
2219 	return htt_soc;
2220 }
2221 
2222 #if defined(WDI_EVENT_ENABLE) && \
2223 	!defined(REMOVE_PKT_LOG)
2224 /*
2225  * dp_pktlog_msg_handler() - Pktlog msg handler
2226  * @htt_soc:	 HTT SOC handle
2227  * @msg_word:    Pointer to payload
2228  *
2229  * Return: None
2230  */
2231 static void
2232 dp_pktlog_msg_handler(struct htt_soc *soc,
2233 		      uint32_t *msg_word)
2234 {
2235 	uint8_t pdev_id;
2236 	uint8_t target_pdev_id;
2237 	uint32_t *pl_hdr;
2238 
2239 	target_pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word);
2240 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
2241 							 target_pdev_id);
2242 	pl_hdr = (msg_word + 1);
2243 	dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc,
2244 		pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL,
2245 		pdev_id);
2246 }
2247 #else
2248 static void
2249 dp_pktlog_msg_handler(struct htt_soc *soc,
2250 		      uint32_t *msg_word)
2251 {
2252 }
2253 #endif
2254 
2255 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
2256 /*
2257  * dp_vdev_txrx_hw_stats_handler - Handle vdev stats received from FW
2258  * @soc - htt soc handle
2259  * @ msg_word - buffer containing stats
2260  *
2261  * Return: void
2262  */
2263 static void dp_vdev_txrx_hw_stats_handler(struct htt_soc *soc,
2264 					  uint32_t *msg_word)
2265 {
2266 	struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
2267 	uint8_t pdev_id;
2268 	uint8_t vdev_id;
2269 	uint8_t target_pdev_id;
2270 	uint16_t payload_size;
2271 	struct dp_pdev *pdev;
2272 	struct dp_vdev *vdev;
2273 	uint8_t *tlv_buf;
2274 	uint32_t *tlv_buf_temp;
2275 	uint32_t *tag_buf;
2276 	htt_tlv_tag_t tlv_type;
2277 	uint16_t tlv_length;
2278 	uint64_t pkt_count = 0;
2279 	uint64_t byte_count = 0;
2280 	uint64_t soc_drop_cnt = 0;
2281 	struct cdp_pkt_info tx_comp = { 0 };
2282 	struct cdp_pkt_info tx_failed =  { 0 };
2283 
2284 	target_pdev_id =
2285 		HTT_T2H_VDEVS_TXRX_STATS_PERIODIC_IND_PDEV_ID_GET(*msg_word);
2286 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(dpsoc,
2287 							 target_pdev_id);
2288 
2289 	if (pdev_id >= MAX_PDEV_CNT)
2290 		return;
2291 
2292 	pdev = dpsoc->pdev_list[pdev_id];
2293 	if (!pdev) {
2294 		dp_err("PDEV is NULL for pdev_id:%d", pdev_id);
2295 		return;
2296 	}
2297 
2298 	payload_size =
2299 	HTT_T2H_VDEVS_TXRX_STATS_PERIODIC_IND_PAYLOAD_SIZE_GET(*msg_word);
2300 
2301 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2302 			   (void *)msg_word, payload_size + 16);
2303 
2304 	/* Adjust msg_word to point to the first TLV in buffer */
2305 	msg_word = msg_word + 4;
2306 
2307 	/* Parse the received buffer till payload size reaches 0 */
2308 	while (payload_size > 0) {
2309 		tlv_buf = (uint8_t *)msg_word;
2310 		tlv_buf_temp = msg_word;
2311 		tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
2312 		tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
2313 
2314 		/* Add header size to tlv length*/
2315 		tlv_length += 4;
2316 
2317 		switch (tlv_type) {
2318 		case HTT_STATS_SOC_TXRX_STATS_COMMON_TAG:
2319 		{
2320 			tag_buf = tlv_buf_temp +
2321 					HTT_VDEV_STATS_GET_INDEX(SOC_DROP_CNT);
2322 			soc_drop_cnt = HTT_VDEV_GET_STATS_U64(tag_buf);
2323 			DP_STATS_UPD(dpsoc, tx.tqm_drop_no_peer, soc_drop_cnt);
2324 			break;
2325 		}
2326 		case HTT_STATS_VDEV_TXRX_STATS_HW_STATS_TAG:
2327 		{
2328 			tag_buf = tlv_buf_temp +
2329 					HTT_VDEV_STATS_GET_INDEX(VDEV_ID);
2330 			vdev_id = (uint8_t)(*tag_buf);
2331 			vdev = dp_vdev_get_ref_by_id(dpsoc, vdev_id,
2332 						     DP_MOD_ID_HTT);
2333 
2334 			if (!vdev)
2335 				goto invalid_vdev;
2336 
2337 			/* Extract received packet count from buffer */
2338 			tag_buf = tlv_buf_temp +
2339 					HTT_VDEV_STATS_GET_INDEX(RX_PKT_CNT);
2340 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2341 			DP_STATS_UPD(vdev, rx_i.reo_rcvd_pkt.num, pkt_count);
2342 
2343 			/* Extract received packet byte count from buffer */
2344 			tag_buf = tlv_buf_temp +
2345 					HTT_VDEV_STATS_GET_INDEX(RX_BYTE_CNT);
2346 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2347 			DP_STATS_UPD(vdev, rx_i.reo_rcvd_pkt.bytes, byte_count);
2348 
2349 			/* Extract tx success packet count from buffer */
2350 			tag_buf = tlv_buf_temp +
2351 				HTT_VDEV_STATS_GET_INDEX(TX_SUCCESS_PKT_CNT);
2352 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2353 			tx_comp.num = pkt_count;
2354 
2355 			/* Extract tx success packet byte count from buffer */
2356 			tag_buf = tlv_buf_temp +
2357 				HTT_VDEV_STATS_GET_INDEX(TX_SUCCESS_BYTE_CNT);
2358 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2359 			tx_comp.bytes = byte_count;
2360 
2361 			/* Extract tx retry packet count from buffer */
2362 			tag_buf = tlv_buf_temp +
2363 				HTT_VDEV_STATS_GET_INDEX(TX_RETRY_PKT_CNT);
2364 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2365 			tx_comp.num += pkt_count;
2366 			tx_failed.num = pkt_count;
2367 
2368 			/* Extract tx retry packet byte count from buffer */
2369 			tag_buf = tlv_buf_temp +
2370 				HTT_VDEV_STATS_GET_INDEX(TX_RETRY_BYTE_CNT);
2371 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2372 			tx_comp.bytes += byte_count;
2373 			tx_failed.bytes = byte_count;
2374 
2375 			/* Extract tx drop packet count from buffer */
2376 			tag_buf = tlv_buf_temp +
2377 				HTT_VDEV_STATS_GET_INDEX(TX_DROP_PKT_CNT);
2378 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2379 			tx_comp.num += pkt_count;
2380 			tx_failed.num += pkt_count;
2381 
2382 			/* Extract tx drop packet byte count from buffer */
2383 			tag_buf = tlv_buf_temp +
2384 				HTT_VDEV_STATS_GET_INDEX(TX_DROP_BYTE_CNT);
2385 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2386 			tx_comp.bytes += byte_count;
2387 			tx_failed.bytes += byte_count;
2388 
2389 			/* Extract tx age-out packet count from buffer */
2390 			tag_buf = tlv_buf_temp +
2391 				HTT_VDEV_STATS_GET_INDEX(TX_AGE_OUT_PKT_CNT);
2392 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2393 			tx_comp.num += pkt_count;
2394 			tx_failed.num += pkt_count;
2395 
2396 			/* Extract tx age-out packet byte count from buffer */
2397 			tag_buf = tlv_buf_temp +
2398 				HTT_VDEV_STATS_GET_INDEX(TX_AGE_OUT_BYTE_CNT);
2399 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2400 			tx_comp.bytes += byte_count;
2401 			tx_failed.bytes += byte_count;
2402 
2403 			DP_STATS_UPD(vdev, tx.comp_pkt.num, tx_comp.num);
2404 			DP_STATS_UPD(vdev, tx.comp_pkt.bytes, tx_comp.bytes);
2405 
2406 			DP_STATS_UPD(vdev, tx.tx_failed, tx_failed.num);
2407 
2408 			dp_vdev_unref_delete(dpsoc, vdev, DP_MOD_ID_HTT);
2409 			break;
2410 		}
2411 		default:
2412 			qdf_assert(0);
2413 		}
2414 invalid_vdev:
2415 		msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
2416 		payload_size -= tlv_length;
2417 	}
2418 }
2419 #else
2420 static void dp_vdev_txrx_hw_stats_handler(struct htt_soc *soc,
2421 					  uint32_t *msg_word)
2422 {}
2423 #endif
2424 
2425 #ifdef CONFIG_SAWF_DEF_QUEUES
2426 static void dp_sawf_def_queues_update_map_report_conf(struct htt_soc *soc,
2427 						      uint32_t *msg_word,
2428 						      qdf_nbuf_t htt_t2h_msg)
2429 {
2430 	dp_htt_sawf_def_queues_map_report_conf(soc, msg_word, htt_t2h_msg);
2431 }
2432 #else
2433 static void dp_sawf_def_queues_update_map_report_conf(struct htt_soc *soc,
2434 						      uint32_t *msg_word,
2435 						      qdf_nbuf_t htt_t2h_msg)
2436 {}
2437 #endif
2438 
2439 #ifdef CONFIG_SAWF
2440 /*
2441  * dp_sawf_msduq_map() - Msdu queue creation information received
2442  * from target
2443  * @soc: soc handle.
2444  * @msg_word: Pointer to htt msg word.
2445  * @htt_t2h_msg: HTT message nbuf
2446  *
2447  * @return: void
2448  */
2449 static void dp_sawf_msduq_map(struct htt_soc *soc, uint32_t *msg_word,
2450 			      qdf_nbuf_t htt_t2h_msg)
2451 {
2452 	dp_htt_sawf_msduq_map(soc, msg_word, htt_t2h_msg);
2453 }
2454 #else
2455 static void dp_sawf_msduq_map(struct htt_soc *soc, uint32_t *msg_word,
2456 			      qdf_nbuf_t htt_t2h_msg)
2457 {}
2458 #endif
2459 
2460 /*
2461  * time_allow_print() - time allow print
2462  * @htt_ring_tt:	ringi_id array of timestamps
2463  * @ring_id:		ring_id (index)
2464  *
2465  * Return: 1 for successfully saving timestamp in array
2466  *	and 0 for timestamp falling within 2 seconds after last one
2467  */
2468 static bool time_allow_print(unsigned long *htt_ring_tt, u_int8_t ring_id)
2469 {
2470 	unsigned long tstamp;
2471 	unsigned long delta;
2472 
2473 	tstamp = qdf_get_system_timestamp();
2474 
2475 	if (!htt_ring_tt)
2476 		return 0; //unable to print backpressure messages
2477 
2478 	if (htt_ring_tt[ring_id] == -1) {
2479 		htt_ring_tt[ring_id] = tstamp;
2480 		return 1;
2481 	}
2482 	delta = tstamp - htt_ring_tt[ring_id];
2483 	if (delta >= 2000) {
2484 		htt_ring_tt[ring_id] = tstamp;
2485 		return 1;
2486 	}
2487 
2488 	return 0;
2489 }
2490 
2491 static void dp_htt_alert_print(enum htt_t2h_msg_type msg_type,
2492 			       struct dp_pdev *pdev, u_int8_t ring_id,
2493 			       u_int16_t hp_idx, u_int16_t tp_idx,
2494 			       u_int32_t bkp_time, char *ring_stype)
2495 {
2496 	dp_alert("seq_num %u msg_type: %d pdev_id: %d ring_type: %s ",
2497 		 pdev->bkp_stats.seq_num, msg_type, pdev->pdev_id, ring_stype);
2498 	dp_alert("ring_id: %d hp_idx: %d tp_idx: %d bkpressure_time_ms: %d ",
2499 		 ring_id, hp_idx, tp_idx, bkp_time);
2500 }
2501 
2502 /**
2503  * dp_get_srng_ring_state_from_hal(): Get hal level ring stats
2504  * @soc: DP_SOC handle
2505  * @srng: DP_SRNG handle
2506  * @ring_type: srng src/dst ring
2507  *
2508  * Return: void
2509  */
2510 static QDF_STATUS
2511 dp_get_srng_ring_state_from_hal(struct dp_soc *soc,
2512 				struct dp_pdev *pdev,
2513 				struct dp_srng *srng,
2514 				enum hal_ring_type ring_type,
2515 				struct dp_srng_ring_state *state)
2516 {
2517 	struct hal_soc *hal_soc;
2518 
2519 	if (!soc || !srng || !srng->hal_srng || !state)
2520 		return QDF_STATUS_E_INVAL;
2521 
2522 	hal_soc = (struct hal_soc *)soc->hal_soc;
2523 
2524 	hal_get_sw_hptp(soc->hal_soc, srng->hal_srng, &state->sw_tail,
2525 			&state->sw_head);
2526 
2527 	hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &state->hw_head,
2528 			&state->hw_tail, ring_type);
2529 
2530 	state->ring_type = ring_type;
2531 
2532 	return QDF_STATUS_SUCCESS;
2533 }
2534 
2535 #ifdef QCA_MONITOR_PKT_SUPPORT
2536 static void
2537 dp_queue_mon_ring_stats(struct dp_pdev *pdev,
2538 			int lmac_id, uint32_t *num_srng,
2539 			struct dp_soc_srngs_state *soc_srngs_state)
2540 {
2541 	QDF_STATUS status;
2542 
2543 	if (pdev->soc->wlan_cfg_ctx->rxdma1_enable) {
2544 		status = dp_get_srng_ring_state_from_hal
2545 			(pdev->soc, pdev,
2546 			 &pdev->soc->rxdma_mon_buf_ring[lmac_id],
2547 			 RXDMA_MONITOR_BUF,
2548 			 &soc_srngs_state->ring_state[*num_srng]);
2549 
2550 		if (status == QDF_STATUS_SUCCESS)
2551 			qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS);
2552 
2553 		status = dp_get_srng_ring_state_from_hal
2554 			(pdev->soc, pdev,
2555 			 &pdev->soc->rxdma_mon_dst_ring[lmac_id],
2556 			 RXDMA_MONITOR_DST,
2557 			 &soc_srngs_state->ring_state[*num_srng]);
2558 
2559 		if (status == QDF_STATUS_SUCCESS)
2560 			qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS);
2561 
2562 		status = dp_get_srng_ring_state_from_hal
2563 			(pdev->soc, pdev,
2564 			 &pdev->soc->rxdma_mon_desc_ring[lmac_id],
2565 			 RXDMA_MONITOR_DESC,
2566 			 &soc_srngs_state->ring_state[*num_srng]);
2567 
2568 		if (status == QDF_STATUS_SUCCESS)
2569 			qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS);
2570 	}
2571 }
2572 #else
2573 static void
2574 dp_queue_mon_ring_stats(struct dp_pdev *pdev,
2575 			int lmac_id, uint32_t *num_srng,
2576 			struct dp_soc_srngs_state *soc_srngs_state)
2577 {
2578 }
2579 #endif
2580 
2581 #ifndef WLAN_DP_DISABLE_TCL_CMD_CRED_SRNG
2582 static inline QDF_STATUS
2583 dp_get_tcl_cmd_cred_ring_state_from_hal(struct dp_pdev *pdev,
2584 					struct dp_srng_ring_state *ring_state)
2585 {
2586 	return dp_get_srng_ring_state_from_hal(pdev->soc, pdev,
2587 					       &pdev->soc->tcl_cmd_credit_ring,
2588 					       TCL_CMD_CREDIT, ring_state);
2589 }
2590 #else
2591 static inline QDF_STATUS
2592 dp_get_tcl_cmd_cred_ring_state_from_hal(struct dp_pdev *pdev,
2593 					struct dp_srng_ring_state *ring_state)
2594 {
2595 	return QDF_STATUS_SUCCESS;
2596 }
2597 #endif
2598 
2599 #ifndef WLAN_DP_DISABLE_TCL_STATUS_SRNG
2600 static inline QDF_STATUS
2601 dp_get_tcl_status_ring_state_from_hal(struct dp_pdev *pdev,
2602 				      struct dp_srng_ring_state *ring_state)
2603 {
2604 	return dp_get_srng_ring_state_from_hal(pdev->soc, pdev,
2605 					       &pdev->soc->tcl_status_ring,
2606 					       TCL_STATUS, ring_state);
2607 }
2608 #else
2609 static inline QDF_STATUS
2610 dp_get_tcl_status_ring_state_from_hal(struct dp_pdev *pdev,
2611 				      struct dp_srng_ring_state *ring_state)
2612 {
2613 	return QDF_STATUS_SUCCESS;
2614 }
2615 #endif
2616 
2617 /**
2618  * dp_queue_srng_ring_stats(): Print pdev hal level ring stats
2619  * @pdev: DP_pdev handle
2620  *
2621  * Return: void
2622  */
2623 static void dp_queue_ring_stats(struct dp_pdev *pdev)
2624 {
2625 	uint32_t i;
2626 	int mac_id;
2627 	int lmac_id;
2628 	uint32_t j = 0;
2629 	struct dp_soc *soc = pdev->soc;
2630 	struct dp_soc_srngs_state * soc_srngs_state = NULL;
2631 	struct dp_soc_srngs_state *drop_srngs_state = NULL;
2632 	QDF_STATUS status;
2633 
2634 	soc_srngs_state = qdf_mem_malloc(sizeof(struct dp_soc_srngs_state));
2635 	if (!soc_srngs_state) {
2636 		dp_htt_alert("Memory alloc failed for back pressure event");
2637 		return;
2638 	}
2639 
2640 	status = dp_get_srng_ring_state_from_hal
2641 				(pdev->soc, pdev,
2642 				 &pdev->soc->reo_exception_ring,
2643 				 REO_EXCEPTION,
2644 				 &soc_srngs_state->ring_state[j]);
2645 
2646 	if (status == QDF_STATUS_SUCCESS)
2647 		qdf_assert_always(++j < DP_MAX_SRNGS);
2648 
2649 	status = dp_get_srng_ring_state_from_hal
2650 				(pdev->soc, pdev,
2651 				 &pdev->soc->reo_reinject_ring,
2652 				 REO_REINJECT,
2653 				 &soc_srngs_state->ring_state[j]);
2654 
2655 	if (status == QDF_STATUS_SUCCESS)
2656 		qdf_assert_always(++j < DP_MAX_SRNGS);
2657 
2658 	status = dp_get_srng_ring_state_from_hal
2659 				(pdev->soc, pdev,
2660 				 &pdev->soc->reo_cmd_ring,
2661 				 REO_CMD,
2662 				 &soc_srngs_state->ring_state[j]);
2663 
2664 	if (status == QDF_STATUS_SUCCESS)
2665 		qdf_assert_always(++j < DP_MAX_SRNGS);
2666 
2667 	status = dp_get_srng_ring_state_from_hal
2668 				(pdev->soc, pdev,
2669 				 &pdev->soc->reo_status_ring,
2670 				 REO_STATUS,
2671 				 &soc_srngs_state->ring_state[j]);
2672 
2673 	if (status == QDF_STATUS_SUCCESS)
2674 		qdf_assert_always(++j < DP_MAX_SRNGS);
2675 
2676 	status = dp_get_srng_ring_state_from_hal
2677 				(pdev->soc, pdev,
2678 				 &pdev->soc->rx_rel_ring,
2679 				 WBM2SW_RELEASE,
2680 				 &soc_srngs_state->ring_state[j]);
2681 
2682 	if (status == QDF_STATUS_SUCCESS)
2683 		qdf_assert_always(++j < DP_MAX_SRNGS);
2684 
2685 	status = dp_get_tcl_cmd_cred_ring_state_from_hal
2686 				(pdev, &soc_srngs_state->ring_state[j]);
2687 	if (status == QDF_STATUS_SUCCESS)
2688 		qdf_assert_always(++j < DP_MAX_SRNGS);
2689 
2690 	status = dp_get_tcl_status_ring_state_from_hal
2691 				(pdev, &soc_srngs_state->ring_state[j]);
2692 	if (status == QDF_STATUS_SUCCESS)
2693 		qdf_assert_always(++j < DP_MAX_SRNGS);
2694 
2695 	status = dp_get_srng_ring_state_from_hal
2696 				(pdev->soc, pdev,
2697 				 &pdev->soc->wbm_desc_rel_ring,
2698 				 SW2WBM_RELEASE,
2699 				 &soc_srngs_state->ring_state[j]);
2700 
2701 	if (status == QDF_STATUS_SUCCESS)
2702 		qdf_assert_always(++j < DP_MAX_SRNGS);
2703 
2704 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
2705 		status = dp_get_srng_ring_state_from_hal
2706 				(pdev->soc, pdev,
2707 				 &pdev->soc->reo_dest_ring[i],
2708 				 REO_DST,
2709 				 &soc_srngs_state->ring_state[j]);
2710 
2711 		if (status == QDF_STATUS_SUCCESS)
2712 			qdf_assert_always(++j < DP_MAX_SRNGS);
2713 	}
2714 
2715 	for (i = 0; i < pdev->soc->num_tcl_data_rings; i++) {
2716 		status = dp_get_srng_ring_state_from_hal
2717 				(pdev->soc, pdev,
2718 				 &pdev->soc->tcl_data_ring[i],
2719 				 TCL_DATA,
2720 				 &soc_srngs_state->ring_state[j]);
2721 
2722 		if (status == QDF_STATUS_SUCCESS)
2723 			qdf_assert_always(++j < DP_MAX_SRNGS);
2724 	}
2725 
2726 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
2727 		status = dp_get_srng_ring_state_from_hal
2728 				(pdev->soc, pdev,
2729 				 &pdev->soc->tx_comp_ring[i],
2730 				 WBM2SW_RELEASE,
2731 				 &soc_srngs_state->ring_state[j]);
2732 
2733 		if (status == QDF_STATUS_SUCCESS)
2734 			qdf_assert_always(++j < DP_MAX_SRNGS);
2735 	}
2736 
2737 	lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc, 0, pdev->pdev_id);
2738 	status = dp_get_srng_ring_state_from_hal
2739 				(pdev->soc, pdev,
2740 				 &pdev->soc->rx_refill_buf_ring
2741 				 [lmac_id],
2742 				 RXDMA_BUF,
2743 				 &soc_srngs_state->ring_state[j]);
2744 
2745 	if (status == QDF_STATUS_SUCCESS)
2746 		qdf_assert_always(++j < DP_MAX_SRNGS);
2747 
2748 	status = dp_get_srng_ring_state_from_hal
2749 				(pdev->soc, pdev,
2750 				 &pdev->rx_refill_buf_ring2,
2751 				 RXDMA_BUF,
2752 				 &soc_srngs_state->ring_state[j]);
2753 
2754 	if (status == QDF_STATUS_SUCCESS)
2755 		qdf_assert_always(++j < DP_MAX_SRNGS);
2756 
2757 
2758 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2759 		dp_get_srng_ring_state_from_hal
2760 				(pdev->soc, pdev,
2761 				 &pdev->rx_mac_buf_ring[i],
2762 				 RXDMA_BUF,
2763 				 &soc_srngs_state->ring_state[j]);
2764 
2765 		if (status == QDF_STATUS_SUCCESS)
2766 			qdf_assert_always(++j < DP_MAX_SRNGS);
2767 	}
2768 
2769 	for (mac_id = 0;
2770 	     mac_id  < soc->wlan_cfg_ctx->num_rxdma_status_rings_per_pdev;
2771 	     mac_id++) {
2772 		lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc,
2773 						     mac_id, pdev->pdev_id);
2774 
2775 		dp_queue_mon_ring_stats(pdev, lmac_id, &j,
2776 					soc_srngs_state);
2777 
2778 		status = dp_get_srng_ring_state_from_hal
2779 			(pdev->soc, pdev,
2780 			 &pdev->soc->rxdma_mon_status_ring[lmac_id],
2781 			 RXDMA_MONITOR_STATUS,
2782 			 &soc_srngs_state->ring_state[j]);
2783 
2784 		if (status == QDF_STATUS_SUCCESS)
2785 			qdf_assert_always(++j < DP_MAX_SRNGS);
2786 	}
2787 
2788 	for (i = 0; i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
2789 		lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc,
2790 						     i, pdev->pdev_id);
2791 
2792 		status = dp_get_srng_ring_state_from_hal
2793 				(pdev->soc, pdev,
2794 				 &pdev->soc->rxdma_err_dst_ring
2795 				 [lmac_id],
2796 				 RXDMA_DST,
2797 				 &soc_srngs_state->ring_state[j]);
2798 
2799 		if (status == QDF_STATUS_SUCCESS)
2800 			qdf_assert_always(++j < DP_MAX_SRNGS);
2801 	}
2802 	soc_srngs_state->max_ring_id = j;
2803 
2804 	qdf_spin_lock_bh(&pdev->bkp_stats.list_lock);
2805 
2806 	soc_srngs_state->seq_num = pdev->bkp_stats.seq_num;
2807 
2808 	if (pdev->bkp_stats.queue_depth >= HTT_BKP_STATS_MAX_QUEUE_DEPTH) {
2809 		drop_srngs_state = TAILQ_FIRST(&pdev->bkp_stats.list);
2810 		qdf_assert_always(drop_srngs_state);
2811 		TAILQ_REMOVE(&pdev->bkp_stats.list, drop_srngs_state,
2812 			     list_elem);
2813 		qdf_mem_free(drop_srngs_state);
2814 		pdev->bkp_stats.queue_depth--;
2815 	}
2816 
2817 	pdev->bkp_stats.queue_depth++;
2818 	TAILQ_INSERT_TAIL(&pdev->bkp_stats.list, soc_srngs_state,
2819 			  list_elem);
2820 	pdev->bkp_stats.seq_num++;
2821 	qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock);
2822 
2823 	qdf_queue_work(0, pdev->bkp_stats.work_queue,
2824 		       &pdev->bkp_stats.work);
2825 }
2826 
2827 /*
2828  * dp_htt_bkp_event_alert() - htt backpressure event alert
2829  * @msg_word:	htt packet context
2830  * @htt_soc:	HTT SOC handle
2831  *
2832  * Return: after attempting to print stats
2833  */
2834 static void dp_htt_bkp_event_alert(u_int32_t *msg_word, struct htt_soc *soc)
2835 {
2836 	u_int8_t ring_type;
2837 	u_int8_t pdev_id;
2838 	uint8_t target_pdev_id;
2839 	u_int8_t ring_id;
2840 	u_int16_t hp_idx;
2841 	u_int16_t tp_idx;
2842 	u_int32_t bkp_time;
2843 	enum htt_t2h_msg_type msg_type;
2844 	struct dp_soc *dpsoc;
2845 	struct dp_pdev *pdev;
2846 	struct dp_htt_timestamp *radio_tt;
2847 
2848 	if (!soc)
2849 		return;
2850 
2851 	dpsoc = (struct dp_soc *)soc->dp_soc;
2852 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
2853 	ring_type = HTT_T2H_RX_BKPRESSURE_RING_TYPE_GET(*msg_word);
2854 	target_pdev_id = HTT_T2H_RX_BKPRESSURE_PDEV_ID_GET(*msg_word);
2855 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
2856 							 target_pdev_id);
2857 	if (pdev_id >= MAX_PDEV_CNT) {
2858 		dp_htt_debug("%pK: pdev id %d is invalid", soc, pdev_id);
2859 		return;
2860 	}
2861 
2862 	pdev = (struct dp_pdev *)dpsoc->pdev_list[pdev_id];
2863 	ring_id = HTT_T2H_RX_BKPRESSURE_RINGID_GET(*msg_word);
2864 	hp_idx = HTT_T2H_RX_BKPRESSURE_HEAD_IDX_GET(*(msg_word + 1));
2865 	tp_idx = HTT_T2H_RX_BKPRESSURE_TAIL_IDX_GET(*(msg_word + 1));
2866 	bkp_time = HTT_T2H_RX_BKPRESSURE_TIME_MS_GET(*(msg_word + 2));
2867 	radio_tt = &soc->pdevid_tt[pdev_id];
2868 
2869 	switch (ring_type) {
2870 	case HTT_SW_RING_TYPE_UMAC:
2871 		if (!time_allow_print(radio_tt->umac_ttt, ring_id))
2872 			return;
2873 		dp_htt_alert_print(msg_type, pdev, ring_id, hp_idx, tp_idx,
2874 				   bkp_time, "HTT_SW_RING_TYPE_UMAC");
2875 	break;
2876 	case HTT_SW_RING_TYPE_LMAC:
2877 		if (!time_allow_print(radio_tt->lmac_ttt, ring_id))
2878 			return;
2879 		dp_htt_alert_print(msg_type, pdev, ring_id, hp_idx, tp_idx,
2880 				   bkp_time, "HTT_SW_RING_TYPE_LMAC");
2881 	break;
2882 	default:
2883 		dp_htt_alert_print(msg_type, pdev, ring_id, hp_idx, tp_idx,
2884 				   bkp_time, "UNKNOWN");
2885 	break;
2886 	}
2887 
2888 	dp_queue_ring_stats(pdev);
2889 }
2890 
2891 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
2892 /*
2893  * dp_offload_ind_handler() - offload msg handler
2894  * @htt_soc: HTT SOC handle
2895  * @msg_word: Pointer to payload
2896  *
2897  * Return: None
2898  */
2899 static void
2900 dp_offload_ind_handler(struct htt_soc *soc, uint32_t *msg_word)
2901 {
2902 	u_int8_t pdev_id;
2903 	u_int8_t target_pdev_id;
2904 
2905 	target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
2906 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
2907 							 target_pdev_id);
2908 	dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_OFFLOAD_TX_DATA, soc->dp_soc,
2909 			     msg_word, HTT_INVALID_VDEV, WDI_NO_VAL,
2910 			     pdev_id);
2911 }
2912 #else
2913 static void
2914 dp_offload_ind_handler(struct htt_soc *soc, uint32_t *msg_word)
2915 {
2916 }
2917 #endif
2918 
2919 #ifdef WLAN_FEATURE_11BE_MLO
2920 static void dp_htt_mlo_peer_map_handler(struct htt_soc *soc,
2921 					uint32_t *msg_word)
2922 {
2923 	uint8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
2924 	uint8_t *mlo_peer_mac_addr;
2925 	uint16_t mlo_peer_id;
2926 	uint8_t num_links;
2927 	struct dp_mlo_flow_override_info mlo_flow_info[DP_MLO_FLOW_INFO_MAX];
2928 
2929 	mlo_peer_id = HTT_RX_MLO_PEER_MAP_MLO_PEER_ID_GET(*msg_word);
2930 	num_links =
2931 		HTT_RX_MLO_PEER_MAP_NUM_LOGICAL_LINKS_GET(*msg_word);
2932 	mlo_peer_mac_addr =
2933 	htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
2934 				   &mac_addr_deswizzle_buf[0]);
2935 
2936 	mlo_flow_info[0].ast_idx =
2937 		HTT_RX_MLO_PEER_MAP_PRIMARY_AST_INDEX_GET(*(msg_word + 3));
2938 	mlo_flow_info[0].ast_idx_valid =
2939 		HTT_RX_MLO_PEER_MAP_AST_INDEX_VALID_FLAG_GET(*(msg_word + 3));
2940 	mlo_flow_info[0].chip_id =
2941 		HTT_RX_MLO_PEER_MAP_CHIP_ID_AST_INDEX_GET(*(msg_word + 3));
2942 	mlo_flow_info[0].tidmask =
2943 		HTT_RX_MLO_PEER_MAP_TIDMASK_AST_INDEX_GET(*(msg_word + 3));
2944 	mlo_flow_info[0].cache_set_num =
2945 	HTT_RX_MLO_PEER_MAP_CACHE_SET_NUM_AST_INDEX_GET(*(msg_word + 3));
2946 
2947 	mlo_flow_info[1].ast_idx =
2948 		HTT_RX_MLO_PEER_MAP_PRIMARY_AST_INDEX_GET(*(msg_word + 3));
2949 	mlo_flow_info[1].ast_idx_valid =
2950 		HTT_RX_MLO_PEER_MAP_AST_INDEX_VALID_FLAG_GET(*(msg_word + 3));
2951 	mlo_flow_info[1].chip_id =
2952 		HTT_RX_MLO_PEER_MAP_CHIP_ID_AST_INDEX_GET(*(msg_word + 3));
2953 	mlo_flow_info[1].tidmask =
2954 		HTT_RX_MLO_PEER_MAP_TIDMASK_AST_INDEX_GET(*(msg_word + 3));
2955 	mlo_flow_info[1].cache_set_num =
2956 	HTT_RX_MLO_PEER_MAP_CACHE_SET_NUM_AST_INDEX_GET(*(msg_word + 3));
2957 
2958 	mlo_flow_info[2].ast_idx =
2959 		HTT_RX_MLO_PEER_MAP_PRIMARY_AST_INDEX_GET(*(msg_word + 3));
2960 	mlo_flow_info[2].ast_idx_valid =
2961 		HTT_RX_MLO_PEER_MAP_AST_INDEX_VALID_FLAG_GET(*(msg_word + 3));
2962 	mlo_flow_info[2].chip_id =
2963 		HTT_RX_MLO_PEER_MAP_CHIP_ID_AST_INDEX_GET(*(msg_word + 3));
2964 	mlo_flow_info[2].tidmask =
2965 		HTT_RX_MLO_PEER_MAP_TIDMASK_AST_INDEX_GET(*(msg_word + 3));
2966 	mlo_flow_info[2].cache_set_num =
2967 	HTT_RX_MLO_PEER_MAP_CACHE_SET_NUM_AST_INDEX_GET(*(msg_word + 3));
2968 
2969 	dp_rx_mlo_peer_map_handler(soc->dp_soc, mlo_peer_id,
2970 				   mlo_peer_mac_addr,
2971 				   mlo_flow_info);
2972 }
2973 
2974 static void dp_htt_mlo_peer_unmap_handler(struct htt_soc *soc,
2975 					  uint32_t *msg_word)
2976 {
2977 	uint16_t mlo_peer_id;
2978 
2979 	mlo_peer_id = HTT_RX_MLO_PEER_UNMAP_MLO_PEER_ID_GET(*msg_word);
2980 	dp_rx_mlo_peer_unmap_handler(soc->dp_soc, mlo_peer_id);
2981 }
2982 
2983 static void
2984 dp_rx_mlo_timestamp_ind_handler(struct dp_soc *soc,
2985 				uint32_t *msg_word)
2986 {
2987 	uint8_t pdev_id;
2988 	uint8_t target_pdev_id;
2989 	struct dp_pdev *pdev;
2990 
2991 	if (!soc)
2992 		return;
2993 
2994 	target_pdev_id = HTT_T2H_MLO_TIMESTAMP_OFFSET_PDEV_ID_GET(*msg_word);
2995 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc,
2996 							 target_pdev_id);
2997 
2998 	if (pdev_id >= MAX_PDEV_CNT) {
2999 		dp_htt_debug("%pK: pdev id %d is invalid", soc, pdev_id);
3000 		return;
3001 	}
3002 
3003 	pdev = (struct dp_pdev *)soc->pdev_list[pdev_id];
3004 
3005 	if (!pdev) {
3006 		dp_err("Invalid pdev");
3007 		return;
3008 	}
3009 	dp_wdi_event_handler(WDI_EVENT_MLO_TSTMP, soc,
3010 			     msg_word, HTT_INVALID_PEER, WDI_NO_VAL,
3011 			     pdev_id);
3012 
3013 	qdf_spin_lock_bh(&soc->htt_stats.lock);
3014 	pdev->timestamp.msg_type =
3015 		HTT_T2H_MLO_TIMESTAMP_OFFSET_MSG_TYPE_GET(*msg_word);
3016 	pdev->timestamp.pdev_id = pdev_id;
3017 	pdev->timestamp.chip_id =
3018 		HTT_T2H_MLO_TIMESTAMP_OFFSET_CHIP_ID_GET(*msg_word);
3019 	pdev->timestamp.mac_clk_freq =
3020 		HTT_T2H_MLO_TIMESTAMP_OFFSET_MAC_CLK_FREQ_MHZ_GET(*msg_word);
3021 	pdev->timestamp.sync_tstmp_lo_us = *(msg_word + 1);
3022 	pdev->timestamp.sync_tstmp_hi_us = *(msg_word + 2);
3023 	pdev->timestamp.mlo_offset_lo_us = *(msg_word + 3);
3024 	pdev->timestamp.mlo_offset_hi_us = *(msg_word + 4);
3025 	pdev->timestamp.mlo_offset_clks  = *(msg_word + 5);
3026 	pdev->timestamp.mlo_comp_us =
3027 	HTT_T2H_MLO_TIMESTAMP_OFFSET_MLO_TIMESTAMP_COMP_US_GET(
3028 							*(msg_word + 6));
3029 	pdev->timestamp.mlo_comp_clks =
3030 	HTT_T2H_MLO_TIMESTAMP_OFFSET_MLO_TIMESTAMP_COMP_CLKS_GET(
3031 							*(msg_word + 6));
3032 	pdev->timestamp.mlo_comp_timer =
3033 	HTT_T2H_MLO_TIMESTAMP_OFFSET_MLO_TIMESTAMP_COMP_PERIOD_US_GET(
3034 							*(msg_word + 7));
3035 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
3036 }
3037 #else
3038 static void dp_htt_mlo_peer_map_handler(struct htt_soc *soc,
3039 					uint32_t *msg_word)
3040 {
3041 	qdf_assert_always(0);
3042 }
3043 
3044 static void dp_htt_mlo_peer_unmap_handler(struct htt_soc *soc,
3045 					 uint32_t *msg_word)
3046 {
3047 	qdf_assert_always(0);
3048 }
3049 
3050 static void
3051 dp_rx_mlo_timestamp_ind_handler(void *soc_handle,
3052 				uint32_t *msg_word)
3053 {
3054 	qdf_assert_always(0);
3055 }
3056 #endif
3057 
3058 /*
3059  * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler
3060  * @context:	Opaque context (HTT SOC handle)
3061  * @pkt:	HTC packet
3062  */
3063 static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
3064 {
3065 	struct htt_soc *soc = (struct htt_soc *) context;
3066 	qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
3067 	u_int32_t *msg_word;
3068 	enum htt_t2h_msg_type msg_type;
3069 	bool free_buf = true;
3070 
3071 	/* check for successful message reception */
3072 	if (pkt->Status != QDF_STATUS_SUCCESS) {
3073 		if (pkt->Status != QDF_STATUS_E_CANCELED)
3074 			soc->stats.htc_err_cnt++;
3075 
3076 		qdf_nbuf_free(htt_t2h_msg);
3077 		return;
3078 	}
3079 
3080 	/* TODO: Check if we should pop the HTC/HTT header alignment padding */
3081 
3082 	msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
3083 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
3084 	htt_event_record(soc->htt_logger_handle,
3085 			 msg_type, (uint8_t *)msg_word);
3086 	switch (msg_type) {
3087 	case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
3088 	{
3089 		dp_htt_bkp_event_alert(msg_word, soc);
3090 		break;
3091 	}
3092 	case HTT_T2H_MSG_TYPE_PEER_MAP:
3093 		{
3094 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3095 			u_int8_t *peer_mac_addr;
3096 			u_int16_t peer_id;
3097 			u_int16_t hw_peer_id;
3098 			u_int8_t vdev_id;
3099 			u_int8_t is_wds;
3100 			struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
3101 
3102 			peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
3103 			hw_peer_id =
3104 				HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2));
3105 			vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
3106 			peer_mac_addr = htt_t2h_mac_addr_deswizzle(
3107 				(u_int8_t *) (msg_word+1),
3108 				&mac_addr_deswizzle_buf[0]);
3109 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3110 				QDF_TRACE_LEVEL_INFO,
3111 				"HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3112 				peer_id, vdev_id);
3113 
3114 			/*
3115 			 * check if peer already exists for this peer_id, if so
3116 			 * this peer map event is in response for a wds peer add
3117 			 * wmi command sent during wds source port learning.
3118 			 * in this case just add the ast entry to the existing
3119 			 * peer ast_list.
3120 			 */
3121 			is_wds = !!(dpsoc->peer_id_to_obj_map[peer_id]);
3122 			dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id,
3123 					       vdev_id, peer_mac_addr, 0,
3124 					       is_wds);
3125 			break;
3126 		}
3127 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
3128 		{
3129 			u_int16_t peer_id;
3130 			u_int8_t vdev_id;
3131 			u_int8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0};
3132 			peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
3133 			vdev_id = HTT_RX_PEER_UNMAP_VDEV_ID_GET(*msg_word);
3134 
3135 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
3136 						 vdev_id, mac_addr, 0,
3137 						 DP_PEER_WDS_COUNT_INVALID);
3138 			break;
3139 		}
3140 	case HTT_T2H_MSG_TYPE_SEC_IND:
3141 		{
3142 			u_int16_t peer_id;
3143 			enum cdp_sec_type sec_type;
3144 			int is_unicast;
3145 
3146 			peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
3147 			sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
3148 			is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
3149 			/* point to the first part of the Michael key */
3150 			msg_word++;
3151 			dp_rx_sec_ind_handler(
3152 				soc->dp_soc, peer_id, sec_type, is_unicast,
3153 				msg_word, msg_word + 2);
3154 			break;
3155 		}
3156 
3157 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
3158 		{
3159 			free_buf =
3160 				dp_monitor_ppdu_stats_ind_handler(soc,
3161 								  msg_word,
3162 								  htt_t2h_msg);
3163 			break;
3164 		}
3165 
3166 	case HTT_T2H_MSG_TYPE_PKTLOG:
3167 		{
3168 			dp_pktlog_msg_handler(soc, msg_word);
3169 			break;
3170 		}
3171 
3172 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
3173 		{
3174 			/*
3175 			 * HTC maintains runtime pm count for H2T messages that
3176 			 * have a response msg from FW. This count ensures that
3177 			 * in the case FW does not sent out the response or host
3178 			 * did not process this indication runtime_put happens
3179 			 * properly in the cleanup path.
3180 			 */
3181 			if (htc_dec_return_runtime_cnt(soc->htc_soc) >= 0)
3182 				htc_pm_runtime_put(soc->htc_soc);
3183 			else
3184 				soc->stats.htt_ver_req_put_skip++;
3185 			soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
3186 			soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
3187 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
3188 				"target uses HTT version %d.%d; host uses %d.%d",
3189 				soc->tgt_ver.major, soc->tgt_ver.minor,
3190 				HTT_CURRENT_VERSION_MAJOR,
3191 				HTT_CURRENT_VERSION_MINOR);
3192 			if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
3193 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3194 					QDF_TRACE_LEVEL_WARN,
3195 					"*** Incompatible host/target HTT versions!");
3196 			}
3197 			/* abort if the target is incompatible with the host */
3198 			qdf_assert(soc->tgt_ver.major ==
3199 				HTT_CURRENT_VERSION_MAJOR);
3200 			if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
3201 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3202 					QDF_TRACE_LEVEL_INFO_LOW,
3203 					"*** Warning: host/target HTT versions"
3204 					" are different, though compatible!");
3205 			}
3206 			break;
3207 		}
3208 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
3209 		{
3210 			uint16_t peer_id;
3211 			uint8_t tid;
3212 			uint8_t win_sz;
3213 			uint16_t status;
3214 			struct dp_peer *peer;
3215 
3216 			/*
3217 			 * Update REO Queue Desc with new values
3218 			 */
3219 			peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
3220 			tid = HTT_RX_ADDBA_TID_GET(*msg_word);
3221 			win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
3222 			peer = dp_peer_get_ref_by_id(soc->dp_soc, peer_id,
3223 						     DP_MOD_ID_HTT);
3224 
3225 			/*
3226 			 * Window size needs to be incremented by 1
3227 			 * since fw needs to represent a value of 256
3228 			 * using just 8 bits
3229 			 */
3230 			if (peer) {
3231 				status = dp_addba_requestprocess_wifi3(
3232 					(struct cdp_soc_t *)soc->dp_soc,
3233 					peer->mac_addr.raw, peer->vdev->vdev_id,
3234 					0, tid, 0, win_sz + 1, 0xffff);
3235 
3236 				/*
3237 				 * If PEER_LOCK_REF_PROTECT enbled dec ref
3238 				 * which is inc by dp_peer_get_ref_by_id
3239 				 */
3240 				dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3241 
3242 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3243 					QDF_TRACE_LEVEL_INFO,
3244 					FL("PeerID %d BAW %d TID %d stat %d"),
3245 					peer_id, win_sz, tid, status);
3246 
3247 			} else {
3248 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3249 					QDF_TRACE_LEVEL_ERROR,
3250 					FL("Peer not found peer id %d"),
3251 					peer_id);
3252 			}
3253 			break;
3254 		}
3255 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
3256 		{
3257 			dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg);
3258 			break;
3259 		}
3260 	case HTT_T2H_MSG_TYPE_PEER_MAP_V2:
3261 		{
3262 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3263 			u_int8_t *peer_mac_addr;
3264 			u_int16_t peer_id;
3265 			u_int16_t hw_peer_id;
3266 			u_int8_t vdev_id;
3267 			bool is_wds;
3268 			u_int16_t ast_hash;
3269 			struct dp_ast_flow_override_info ast_flow_info;
3270 
3271 			qdf_mem_set(&ast_flow_info, 0,
3272 					    sizeof(struct dp_ast_flow_override_info));
3273 
3274 			peer_id = HTT_RX_PEER_MAP_V2_SW_PEER_ID_GET(*msg_word);
3275 			hw_peer_id =
3276 			HTT_RX_PEER_MAP_V2_HW_PEER_ID_GET(*(msg_word + 2));
3277 			vdev_id = HTT_RX_PEER_MAP_V2_VDEV_ID_GET(*msg_word);
3278 			peer_mac_addr =
3279 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3280 						   &mac_addr_deswizzle_buf[0]);
3281 			is_wds =
3282 			HTT_RX_PEER_MAP_V2_NEXT_HOP_GET(*(msg_word + 3));
3283 			ast_hash =
3284 			HTT_RX_PEER_MAP_V2_AST_HASH_VALUE_GET(*(msg_word + 3));
3285 			/*
3286 			 * Update 4 ast_index per peer, ast valid mask
3287 			 * and TID flow valid mask.
3288 			 * AST valid mask is 3 bit field corresponds to
3289 			 * ast_index[3:1]. ast_index 0 is always valid.
3290 			 */
3291 			ast_flow_info.ast_valid_mask =
3292 			HTT_RX_PEER_MAP_V2_AST_VALID_MASK_GET(*(msg_word + 3));
3293 			ast_flow_info.ast_idx[0] = hw_peer_id;
3294 			ast_flow_info.ast_flow_mask[0] =
3295 			HTT_RX_PEER_MAP_V2_AST_0_FLOW_MASK_GET(*(msg_word + 4));
3296 			ast_flow_info.ast_idx[1] =
3297 			HTT_RX_PEER_MAP_V2_AST_INDEX_1_GET(*(msg_word + 4));
3298 			ast_flow_info.ast_flow_mask[1] =
3299 			HTT_RX_PEER_MAP_V2_AST_1_FLOW_MASK_GET(*(msg_word + 4));
3300 			ast_flow_info.ast_idx[2] =
3301 			HTT_RX_PEER_MAP_V2_AST_INDEX_2_GET(*(msg_word + 5));
3302 			ast_flow_info.ast_flow_mask[2] =
3303 			HTT_RX_PEER_MAP_V2_AST_2_FLOW_MASK_GET(*(msg_word + 4));
3304 			ast_flow_info.ast_idx[3] =
3305 			HTT_RX_PEER_MAP_V2_AST_INDEX_3_GET(*(msg_word + 6));
3306 			ast_flow_info.ast_flow_mask[3] =
3307 			HTT_RX_PEER_MAP_V2_AST_3_FLOW_MASK_GET(*(msg_word + 4));
3308 			/*
3309 			 * TID valid mask is applicable only
3310 			 * for HI and LOW priority flows.
3311 			 * tid_valid_mas is 8 bit field corresponds
3312 			 * to TID[7:0]
3313 			 */
3314 			ast_flow_info.tid_valid_low_pri_mask =
3315 			HTT_RX_PEER_MAP_V2_TID_VALID_LOW_PRI_GET(*(msg_word + 5));
3316 			ast_flow_info.tid_valid_hi_pri_mask =
3317 			HTT_RX_PEER_MAP_V2_TID_VALID_HI_PRI_GET(*(msg_word + 5));
3318 
3319 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3320 				  QDF_TRACE_LEVEL_INFO,
3321 				  "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3322 				  peer_id, vdev_id);
3323 
3324 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3325 				  QDF_TRACE_LEVEL_INFO,
3326 				  "ast_idx[0] %d ast_idx[1] %d ast_idx[2] %d ast_idx[3] %d n",
3327 				  ast_flow_info.ast_idx[0],
3328 				  ast_flow_info.ast_idx[1],
3329 				  ast_flow_info.ast_idx[2],
3330 				  ast_flow_info.ast_idx[3]);
3331 
3332 			dp_rx_peer_map_handler(soc->dp_soc, peer_id,
3333 					       hw_peer_id, vdev_id,
3334 					       peer_mac_addr, ast_hash,
3335 					       is_wds);
3336 
3337 			/*
3338 			 * Update ast indexes for flow override support
3339 			 * Applicable only for non wds peers
3340 			 */
3341 			if (!soc->dp_soc->ast_offload_support)
3342 				dp_peer_ast_index_flow_queue_map_create(
3343 						soc->dp_soc, is_wds,
3344 						peer_id, peer_mac_addr,
3345 						&ast_flow_info);
3346 
3347 			break;
3348 		}
3349 	case HTT_T2H_MSG_TYPE_PEER_UNMAP_V2:
3350 		{
3351 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3352 			u_int8_t *mac_addr;
3353 			u_int16_t peer_id;
3354 			u_int8_t vdev_id;
3355 			u_int8_t is_wds;
3356 			u_int32_t free_wds_count;
3357 
3358 			peer_id =
3359 			HTT_RX_PEER_UNMAP_V2_SW_PEER_ID_GET(*msg_word);
3360 			vdev_id = HTT_RX_PEER_UNMAP_V2_VDEV_ID_GET(*msg_word);
3361 			mac_addr =
3362 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3363 						   &mac_addr_deswizzle_buf[0]);
3364 			is_wds =
3365 			HTT_RX_PEER_UNMAP_V2_NEXT_HOP_GET(*(msg_word + 2));
3366 			free_wds_count =
3367 			HTT_RX_PEER_UNMAP_V2_PEER_WDS_FREE_COUNT_GET(*(msg_word + 4));
3368 
3369 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3370 				  QDF_TRACE_LEVEL_INFO,
3371 				  "HTT_T2H_MSG_TYPE_PEER_UNMAP msg for peer id %d vdev id %d n",
3372 				  peer_id, vdev_id);
3373 
3374 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
3375 						 vdev_id, mac_addr,
3376 						 is_wds, free_wds_count);
3377 			break;
3378 		}
3379 	case HTT_T2H_MSG_TYPE_RX_DELBA:
3380 		{
3381 			uint16_t peer_id;
3382 			uint8_t tid;
3383 			uint8_t win_sz;
3384 			QDF_STATUS status;
3385 
3386 			peer_id = HTT_RX_DELBA_PEER_ID_GET(*msg_word);
3387 			tid = HTT_RX_DELBA_TID_GET(*msg_word);
3388 			win_sz = HTT_RX_DELBA_WIN_SIZE_GET(*msg_word);
3389 
3390 			status = dp_rx_delba_ind_handler(
3391 				soc->dp_soc,
3392 				peer_id, tid, win_sz);
3393 
3394 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3395 				  QDF_TRACE_LEVEL_INFO,
3396 				  FL("DELBA PeerID %d BAW %d TID %d stat %d"),
3397 				  peer_id, win_sz, tid, status);
3398 			break;
3399 		}
3400 	case HTT_T2H_MSG_TYPE_FSE_CMEM_BASE_SEND:
3401 		{
3402 			uint16_t num_entries;
3403 			uint32_t cmem_ba_lo;
3404 			uint32_t cmem_ba_hi;
3405 
3406 			num_entries = HTT_CMEM_BASE_SEND_NUM_ENTRIES_GET(*msg_word);
3407 			cmem_ba_lo = *(msg_word + 1);
3408 			cmem_ba_hi = *(msg_word + 2);
3409 
3410 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
3411 				  FL("CMEM FSE num_entries %u CMEM BA LO %x HI %x"),
3412 				  num_entries, cmem_ba_lo, cmem_ba_hi);
3413 
3414 			dp_rx_fst_update_cmem_params(soc->dp_soc, num_entries,
3415 						     cmem_ba_lo, cmem_ba_hi);
3416 			break;
3417 		}
3418 	case HTT_T2H_MSG_TYPE_TX_OFFLOAD_DELIVER_IND:
3419 		{
3420 			dp_offload_ind_handler(soc, msg_word);
3421 			break;
3422 		}
3423 	case HTT_T2H_MSG_TYPE_PEER_MAP_V3:
3424 	{
3425 		u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3426 		u_int8_t *peer_mac_addr;
3427 		u_int16_t peer_id;
3428 		u_int16_t hw_peer_id;
3429 		u_int8_t vdev_id;
3430 		uint8_t is_wds;
3431 		u_int16_t ast_hash = 0;
3432 
3433 		peer_id = HTT_RX_PEER_MAP_V3_SW_PEER_ID_GET(*msg_word);
3434 		vdev_id = HTT_RX_PEER_MAP_V3_VDEV_ID_GET(*msg_word);
3435 		peer_mac_addr =
3436 		htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3437 					   &mac_addr_deswizzle_buf[0]);
3438 		hw_peer_id = HTT_RX_PEER_MAP_V3_HW_PEER_ID_GET(*(msg_word + 3));
3439 		ast_hash = HTT_RX_PEER_MAP_V3_CACHE_SET_NUM_GET(*(msg_word + 3));
3440 		is_wds = HTT_RX_PEER_MAP_V3_NEXT_HOP_GET(*(msg_word + 4));
3441 
3442 		dp_htt_info("HTT_T2H_MSG_TYPE_PEER_MAP_V3 msg for peer id %d vdev id %d n",
3443 			    peer_id, vdev_id);
3444 
3445 		dp_rx_peer_map_handler(soc->dp_soc, peer_id,
3446 				       hw_peer_id, vdev_id,
3447 				       peer_mac_addr, ast_hash,
3448 				       is_wds);
3449 
3450 		break;
3451 	}
3452 	case HTT_T2H_MSG_TYPE_MLO_RX_PEER_MAP:
3453 	{
3454 		dp_htt_mlo_peer_map_handler(soc, msg_word);
3455 		break;
3456 	}
3457 	case HTT_T2H_MSG_TYPE_MLO_RX_PEER_UNMAP:
3458 	{
3459 		dp_htt_mlo_peer_unmap_handler(soc, msg_word);
3460 		break;
3461 	}
3462 	case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND:
3463 	{
3464 		dp_rx_mlo_timestamp_ind_handler(soc->dp_soc, msg_word);
3465 		break;
3466 	}
3467 	case HTT_T2H_MSG_TYPE_VDEVS_TXRX_STATS_PERIODIC_IND:
3468 	{
3469 		dp_vdev_txrx_hw_stats_handler(soc, msg_word);
3470 		break;
3471 	}
3472 	case HTT_T2H_SAWF_DEF_QUEUES_MAP_REPORT_CONF:
3473 	{
3474 		dp_sawf_def_queues_update_map_report_conf(soc, msg_word,
3475 							  htt_t2h_msg);
3476 		break;
3477 	}
3478 	case HTT_T2H_SAWF_MSDUQ_INFO_IND:
3479 	{
3480 		dp_sawf_msduq_map(soc, msg_word, htt_t2h_msg);
3481 		break;
3482 	}
3483 
3484 	default:
3485 		break;
3486 	};
3487 
3488 	/* Free the indication buffer */
3489 	if (free_buf)
3490 		qdf_nbuf_free(htt_t2h_msg);
3491 }
3492 
3493 /*
3494  * dp_htt_h2t_full() - Send full handler (called from HTC)
3495  * @context:	Opaque context (HTT SOC handle)
3496  * @pkt:	HTC packet
3497  *
3498  * Return: enum htc_send_full_action
3499  */
3500 static enum htc_send_full_action
3501 dp_htt_h2t_full(void *context, HTC_PACKET *pkt)
3502 {
3503 	return HTC_SEND_FULL_KEEP;
3504 }
3505 
3506 /*
3507  * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages
3508  * @context:	Opaque context (HTT SOC handle)
3509  * @nbuf:	nbuf containing T2H message
3510  * @pipe_id:	HIF pipe ID
3511  *
3512  * Return: QDF_STATUS
3513  *
3514  * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which
3515  * will be used for packet log and other high-priority HTT messages. Proper
3516  * HTC connection to be added later once required FW changes are available
3517  */
3518 static QDF_STATUS
3519 dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id)
3520 {
3521 	QDF_STATUS rc = QDF_STATUS_SUCCESS;
3522 	HTC_PACKET htc_pkt;
3523 
3524 	qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE);
3525 	qdf_mem_zero(&htc_pkt, sizeof(htc_pkt));
3526 	htc_pkt.Status = QDF_STATUS_SUCCESS;
3527 	htc_pkt.pPktContext = (void *)nbuf;
3528 	dp_htt_t2h_msg_handler(context, &htc_pkt);
3529 
3530 	return rc;
3531 }
3532 
3533 /*
3534  * htt_htc_soc_attach() - Register SOC level HTT instance with HTC
3535  * @htt_soc:	HTT SOC handle
3536  *
3537  * Return: QDF_STATUS
3538  */
3539 static QDF_STATUS
3540 htt_htc_soc_attach(struct htt_soc *soc)
3541 {
3542 	struct htc_service_connect_req connect;
3543 	struct htc_service_connect_resp response;
3544 	QDF_STATUS status;
3545 	struct dp_soc *dpsoc = soc->dp_soc;
3546 
3547 	qdf_mem_zero(&connect, sizeof(connect));
3548 	qdf_mem_zero(&response, sizeof(response));
3549 
3550 	connect.pMetaData = NULL;
3551 	connect.MetaDataLength = 0;
3552 	connect.EpCallbacks.pContext = soc;
3553 	connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
3554 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
3555 	connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler;
3556 
3557 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
3558 	connect.EpCallbacks.EpRecvRefill = NULL;
3559 
3560 	/* N/A, fill is done by HIF */
3561 	connect.EpCallbacks.RecvRefillWaterMark = 1;
3562 
3563 	connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
3564 	/*
3565 	 * Specify how deep to let a queue get before htc_send_pkt will
3566 	 * call the EpSendFull function due to excessive send queue depth.
3567 	 */
3568 	connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
3569 
3570 	/* disable flow control for HTT data message service */
3571 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
3572 
3573 	/* connect to control service */
3574 	connect.service_id = HTT_DATA_MSG_SVC;
3575 
3576 	status = htc_connect_service(soc->htc_soc, &connect, &response);
3577 
3578 	if (status != QDF_STATUS_SUCCESS)
3579 		return status;
3580 
3581 	soc->htc_endpoint = response.Endpoint;
3582 
3583 	hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint);
3584 
3585 	htt_interface_logging_init(&soc->htt_logger_handle, soc->ctrl_psoc);
3586 	dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc,
3587 		dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE);
3588 
3589 	return QDF_STATUS_SUCCESS; /* success */
3590 }
3591 
3592 /*
3593  * htt_soc_initialize() - SOC level HTT initialization
3594  * @htt_soc: Opaque htt SOC handle
3595  * @ctrl_psoc: Opaque ctrl SOC handle
3596  * @htc_soc: SOC level HTC handle
3597  * @hal_soc: Opaque HAL SOC handle
3598  * @osdev: QDF device
3599  *
3600  * Return: HTT handle on success; NULL on failure
3601  */
3602 void *
3603 htt_soc_initialize(struct htt_soc *htt_soc,
3604 		   struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
3605 		   HTC_HANDLE htc_soc,
3606 		   hal_soc_handle_t hal_soc_hdl, qdf_device_t osdev)
3607 {
3608 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
3609 
3610 	soc->osdev = osdev;
3611 	soc->ctrl_psoc = ctrl_psoc;
3612 	soc->htc_soc = htc_soc;
3613 	soc->hal_soc = hal_soc_hdl;
3614 
3615 	if (htt_htc_soc_attach(soc))
3616 		goto fail2;
3617 
3618 	return soc;
3619 
3620 fail2:
3621 	return NULL;
3622 }
3623 
3624 void htt_soc_htc_dealloc(struct htt_soc *htt_handle)
3625 {
3626 	htt_interface_logging_deinit(htt_handle->htt_logger_handle);
3627 	htt_htc_misc_pkt_pool_free(htt_handle);
3628 	htt_htc_pkt_pool_free(htt_handle);
3629 }
3630 
3631 /*
3632  * htt_soc_htc_prealloc() - HTC memory prealloc
3633  * @htt_soc: SOC level HTT handle
3634  *
3635  * Return: QDF_STATUS_SUCCESS on Success or
3636  * QDF_STATUS_E_NOMEM on allocation failure
3637  */
3638 QDF_STATUS htt_soc_htc_prealloc(struct htt_soc *soc)
3639 {
3640 	int i;
3641 
3642 	soc->htt_htc_pkt_freelist = NULL;
3643 	/* pre-allocate some HTC_PACKET objects */
3644 	for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
3645 		struct dp_htt_htc_pkt_union *pkt;
3646 		pkt = qdf_mem_malloc(sizeof(*pkt));
3647 		if (!pkt)
3648 			return QDF_STATUS_E_NOMEM;
3649 
3650 		htt_htc_pkt_free(soc, &pkt->u.pkt);
3651 	}
3652 	return QDF_STATUS_SUCCESS;
3653 }
3654 
3655 /*
3656  * htt_soc_detach() - Free SOC level HTT handle
3657  * @htt_hdl: HTT SOC handle
3658  */
3659 void htt_soc_detach(struct htt_soc *htt_hdl)
3660 {
3661 	int i;
3662 	struct htt_soc *htt_handle = (struct htt_soc *)htt_hdl;
3663 
3664 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3665 		qdf_mem_free(htt_handle->pdevid_tt[i].umac_ttt);
3666 		qdf_mem_free(htt_handle->pdevid_tt[i].lmac_ttt);
3667 	}
3668 
3669 	HTT_TX_MUTEX_DESTROY(&htt_handle->htt_tx_mutex);
3670 	qdf_mem_free(htt_handle);
3671 
3672 }
3673 
3674 /**
3675  * dp_h2t_ext_stats_msg_send(): function to contruct HTT message to pass to FW
3676  * @pdev: DP PDEV handle
3677  * @stats_type_upload_mask: stats type requested by user
3678  * @config_param_0: extra configuration parameters
3679  * @config_param_1: extra configuration parameters
3680  * @config_param_2: extra configuration parameters
3681  * @config_param_3: extra configuration parameters
3682  * @mac_id: mac number
3683  *
3684  * return: QDF STATUS
3685  */
3686 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
3687 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
3688 		uint32_t config_param_1, uint32_t config_param_2,
3689 		uint32_t config_param_3, int cookie_val, int cookie_msb,
3690 		uint8_t mac_id)
3691 {
3692 	struct htt_soc *soc = pdev->soc->htt_handle;
3693 	struct dp_htt_htc_pkt *pkt;
3694 	qdf_nbuf_t msg;
3695 	uint32_t *msg_word;
3696 	uint8_t pdev_mask = 0;
3697 	uint8_t *htt_logger_bufp;
3698 	int mac_for_pdev;
3699 	int target_pdev_id;
3700 	QDF_STATUS status;
3701 
3702 	msg = qdf_nbuf_alloc(
3703 			soc->osdev,
3704 			HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ),
3705 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
3706 
3707 	if (!msg)
3708 		return QDF_STATUS_E_NOMEM;
3709 
3710 	/*TODO:Add support for SOC stats
3711 	 * Bit 0: SOC Stats
3712 	 * Bit 1: Pdev stats for pdev id 0
3713 	 * Bit 2: Pdev stats for pdev id 1
3714 	 * Bit 3: Pdev stats for pdev id 2
3715 	 */
3716 	mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
3717 	target_pdev_id =
3718 	dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev);
3719 
3720 	pdev_mask = 1 << target_pdev_id;
3721 
3722 	/*
3723 	 * Set the length of the message.
3724 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3725 	 * separately during the below call to qdf_nbuf_push_head.
3726 	 * The contribution from the HTC header is added separately inside HTC.
3727 	 */
3728 	if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) {
3729 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3730 				"Failed to expand head for HTT_EXT_STATS");
3731 		qdf_nbuf_free(msg);
3732 		return QDF_STATUS_E_FAILURE;
3733 	}
3734 
3735 	dp_htt_tx_stats_info("%pK: cookie <-> %d\n config_param_0 %u\n"
3736 			     "config_param_1 %u\n config_param_2 %u\n"
3737 			     "config_param_4 %u\n -------------",
3738 			     pdev->soc, cookie_val,
3739 			     config_param_0,
3740 			     config_param_1, config_param_2, config_param_3);
3741 
3742 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
3743 
3744 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3745 	htt_logger_bufp = (uint8_t *)msg_word;
3746 	*msg_word = 0;
3747 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ);
3748 	HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask);
3749 	HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask);
3750 
3751 	/* word 1 */
3752 	msg_word++;
3753 	*msg_word = 0;
3754 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0);
3755 
3756 	/* word 2 */
3757 	msg_word++;
3758 	*msg_word = 0;
3759 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1);
3760 
3761 	/* word 3 */
3762 	msg_word++;
3763 	*msg_word = 0;
3764 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2);
3765 
3766 	/* word 4 */
3767 	msg_word++;
3768 	*msg_word = 0;
3769 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3);
3770 
3771 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0);
3772 
3773 	/* word 5 */
3774 	msg_word++;
3775 
3776 	/* word 6 */
3777 	msg_word++;
3778 	*msg_word = 0;
3779 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val);
3780 
3781 	/* word 7 */
3782 	msg_word++;
3783 	*msg_word = 0;
3784 	/* Currently Using last 2 bits for pdev_id
3785 	 * For future reference, reserving 3 bits in cookie_msb for pdev_id
3786 	 */
3787 	cookie_msb = (cookie_msb | pdev->pdev_id);
3788 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb);
3789 
3790 	pkt = htt_htc_pkt_alloc(soc);
3791 	if (!pkt) {
3792 		qdf_nbuf_free(msg);
3793 		return QDF_STATUS_E_NOMEM;
3794 	}
3795 
3796 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
3797 
3798 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
3799 			dp_htt_h2t_send_complete_free_netbuf,
3800 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
3801 			soc->htc_endpoint,
3802 			/* tag for FW response msg not guaranteed */
3803 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
3804 
3805 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
3806 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_EXT_STATS_REQ,
3807 				     htt_logger_bufp);
3808 
3809 	if (status != QDF_STATUS_SUCCESS) {
3810 		qdf_nbuf_free(msg);
3811 		htt_htc_pkt_free(soc, pkt);
3812 	}
3813 
3814 	return status;
3815 }
3816 
3817 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
3818 #define HTT_VDEV_TXRX_STATS_RESET_BITMASK_L32_MASK 0xFFFFFFFF
3819 #define HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_MASK 0xFFFFFFFF00000000
3820 #define HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_SHIFT 32
3821 
3822 QDF_STATUS dp_h2t_hw_vdev_stats_config_send(struct dp_soc *dpsoc,
3823 					    uint8_t pdev_id, bool enable,
3824 					    bool reset, uint64_t reset_bitmask)
3825 {
3826 	struct htt_soc *soc = dpsoc->htt_handle;
3827 	struct dp_htt_htc_pkt *pkt;
3828 	qdf_nbuf_t msg;
3829 	uint32_t *msg_word;
3830 	uint8_t *htt_logger_bufp;
3831 	QDF_STATUS status;
3832 	int duration;
3833 	uint32_t bitmask;
3834 	int target_pdev_id;
3835 
3836 	msg = qdf_nbuf_alloc(
3837 			soc->osdev,
3838 			HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_vdevs_txrx_stats_cfg)),
3839 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
3840 
3841 	if (!msg) {
3842 		dp_htt_err("%pK: Fail to allocate "
3843 		"HTT_H2T_HW_VDEV_TXRX_STATS_CFG_MSG_SZ msg buffer", dpsoc);
3844 		return QDF_STATUS_E_NOMEM;
3845 	}
3846 
3847 	if (pdev_id != INVALID_PDEV_ID)
3848 		target_pdev_id = DP_SW2HW_MACID(pdev_id);
3849 	else
3850 		target_pdev_id = 0;
3851 
3852 	duration =
3853 	wlan_cfg_get_vdev_stats_hw_offload_timer(dpsoc->wlan_cfg_ctx);
3854 
3855 	/*
3856 	 * Set the length of the message.
3857 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3858 	 * separately during the below call to qdf_nbuf_push_head.
3859 	 * The contribution from the HTC header is added separately inside HTC.
3860 	 */
3861 	if (!qdf_nbuf_put_tail(msg,
3862 			       sizeof(struct htt_h2t_vdevs_txrx_stats_cfg))) {
3863 		dp_htt_err("%pK: Failed to expand head for HTT_HW_VDEV_STATS"
3864 			   , dpsoc);
3865 		qdf_nbuf_free(msg);
3866 		return QDF_STATUS_E_FAILURE;
3867 	}
3868 
3869 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
3870 
3871 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3872 	htt_logger_bufp = (uint8_t *)msg_word;
3873 	*msg_word = 0;
3874 
3875 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VDEVS_TXRX_STATS_CFG);
3876 	HTT_RX_VDEVS_TXRX_STATS_PDEV_ID_SET(*msg_word, target_pdev_id);
3877 
3878 	HTT_RX_VDEVS_TXRX_STATS_ENABLE_SET(*msg_word, enable);
3879 
3880 	HTT_RX_VDEVS_TXRX_STATS_PERIODIC_INTERVAL_SET(*msg_word,
3881 						      (duration >> 3));
3882 
3883 	HTT_RX_VDEVS_TXRX_STATS_RESET_STATS_BITS_SET(*msg_word, reset);
3884 
3885 	msg_word++;
3886 	*msg_word = 0;
3887 	bitmask = (reset_bitmask & HTT_VDEV_TXRX_STATS_RESET_BITMASK_L32_MASK);
3888 	*msg_word = bitmask;
3889 
3890 	msg_word++;
3891 	*msg_word = 0;
3892 	bitmask =
3893 		((reset_bitmask & HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_MASK) >>
3894 		 HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_SHIFT);
3895 	*msg_word = bitmask;
3896 
3897 	pkt = htt_htc_pkt_alloc(soc);
3898 	if (!pkt) {
3899 		dp_htt_err("%pK: Fail to allocate dp_htt_htc_pkt buffer",
3900 			   dpsoc);
3901 		qdf_assert(0);
3902 		qdf_nbuf_free(msg);
3903 		return QDF_STATUS_E_NOMEM;
3904 	}
3905 
3906 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
3907 
3908 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
3909 			       dp_htt_h2t_send_complete_free_netbuf,
3910 			       qdf_nbuf_data(msg), qdf_nbuf_len(msg),
3911 			       soc->htc_endpoint,
3912 			       /* tag for no FW response msg */
3913 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
3914 
3915 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
3916 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
3917 				     HTT_H2T_MSG_TYPE_VDEVS_TXRX_STATS_CFG,
3918 				     htt_logger_bufp);
3919 
3920 	if (status != QDF_STATUS_SUCCESS) {
3921 		qdf_nbuf_free(msg);
3922 		htt_htc_pkt_free(soc, pkt);
3923 	}
3924 
3925 	return status;
3926 }
3927 #else
3928 QDF_STATUS dp_h2t_hw_vdev_stats_config_send(struct dp_soc *dpsoc,
3929 					    uint8_t pdev_id, bool enable,
3930 					    bool reset, uint64_t reset_bitmask)
3931 {
3932 	return QDF_STATUS_SUCCESS;
3933 }
3934 #endif
3935 
3936 /**
3937  * dp_h2t_3tuple_config_send(): function to contruct 3 tuple configuration
3938  * HTT message to pass to FW
3939  * @pdev: DP PDEV handle
3940  * @tuple_mask: tuple configuration to report 3 tuple hash value in either
3941  * toeplitz_2_or_4 or flow_id_toeplitz in MSDU START TLV.
3942  *
3943  * tuple_mask[1:0]:
3944  *   00 - Do not report 3 tuple hash value
3945  *   10 - Report 3 tuple hash value in toeplitz_2_or_4
3946  *   01 - Report 3 tuple hash value in flow_id_toeplitz
3947  *   11 - Report 3 tuple hash value in both toeplitz_2_or_4 & flow_id_toeplitz
3948  *
3949  * return: QDF STATUS
3950  */
3951 QDF_STATUS dp_h2t_3tuple_config_send(struct dp_pdev *pdev,
3952 				     uint32_t tuple_mask, uint8_t mac_id)
3953 {
3954 	struct htt_soc *soc = pdev->soc->htt_handle;
3955 	struct dp_htt_htc_pkt *pkt;
3956 	qdf_nbuf_t msg;
3957 	uint32_t *msg_word;
3958 	uint8_t *htt_logger_bufp;
3959 	int mac_for_pdev;
3960 	int target_pdev_id;
3961 
3962 	msg = qdf_nbuf_alloc(
3963 			soc->osdev,
3964 			HTT_MSG_BUF_SIZE(HTT_3_TUPLE_HASH_CFG_REQ_BYTES),
3965 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
3966 
3967 	if (!msg)
3968 		return QDF_STATUS_E_NOMEM;
3969 
3970 	mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
3971 	target_pdev_id =
3972 	dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev);
3973 
3974 	/*
3975 	 * Set the length of the message.
3976 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3977 	 * separately during the below call to qdf_nbuf_push_head.
3978 	 * The contribution from the HTC header is added separately inside HTC.
3979 	 */
3980 	if (!qdf_nbuf_put_tail(msg, HTT_3_TUPLE_HASH_CFG_REQ_BYTES)) {
3981 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3982 			  "Failed to expand head for HTT_3TUPLE_CONFIG");
3983 		qdf_nbuf_free(msg);
3984 		return QDF_STATUS_E_FAILURE;
3985 	}
3986 
3987 	dp_htt_info("%pK: config_param_sent 0x%x for target_pdev %d\n -------------",
3988 		    pdev->soc, tuple_mask, target_pdev_id);
3989 
3990 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
3991 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3992 	htt_logger_bufp = (uint8_t *)msg_word;
3993 
3994 	*msg_word = 0;
3995 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG);
3996 	HTT_RX_3_TUPLE_HASH_PDEV_ID_SET(*msg_word, target_pdev_id);
3997 
3998 	msg_word++;
3999 	*msg_word = 0;
4000 	HTT_H2T_FLOW_ID_TOEPLITZ_FIELD_CONFIG_SET(*msg_word, tuple_mask);
4001 	HTT_H2T_TOEPLITZ_2_OR_4_FIELD_CONFIG_SET(*msg_word, tuple_mask);
4002 
4003 	pkt = htt_htc_pkt_alloc(soc);
4004 	if (!pkt) {
4005 		qdf_nbuf_free(msg);
4006 		return QDF_STATUS_E_NOMEM;
4007 	}
4008 
4009 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4010 
4011 	SET_HTC_PACKET_INFO_TX(
4012 			&pkt->htc_pkt,
4013 			dp_htt_h2t_send_complete_free_netbuf,
4014 			qdf_nbuf_data(msg),
4015 			qdf_nbuf_len(msg),
4016 			soc->htc_endpoint,
4017 			/* tag for no FW response msg */
4018 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
4019 
4020 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4021 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG,
4022 			    htt_logger_bufp);
4023 
4024 	return QDF_STATUS_SUCCESS;
4025 }
4026 
4027 /* This macro will revert once proper HTT header will define for
4028  * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file
4029  * */
4030 #if defined(WDI_EVENT_ENABLE)
4031 /**
4032  * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW
4033  * @pdev: DP PDEV handle
4034  * @stats_type_upload_mask: stats type requested by user
4035  * @mac_id: Mac id number
4036  *
4037  * return: QDF STATUS
4038  */
4039 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
4040 		uint32_t stats_type_upload_mask, uint8_t mac_id)
4041 {
4042 	struct htt_soc *soc = pdev->soc->htt_handle;
4043 	struct dp_htt_htc_pkt *pkt;
4044 	qdf_nbuf_t msg;
4045 	uint32_t *msg_word;
4046 	uint8_t pdev_mask;
4047 	QDF_STATUS status;
4048 
4049 	msg = qdf_nbuf_alloc(
4050 			soc->osdev,
4051 			HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ),
4052 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
4053 
4054 	if (!msg) {
4055 		dp_htt_err("%pK: Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer"
4056 			   , pdev->soc);
4057 		qdf_assert(0);
4058 		return QDF_STATUS_E_NOMEM;
4059 	}
4060 
4061 	/*TODO:Add support for SOC stats
4062 	 * Bit 0: SOC Stats
4063 	 * Bit 1: Pdev stats for pdev id 0
4064 	 * Bit 2: Pdev stats for pdev id 1
4065 	 * Bit 3: Pdev stats for pdev id 2
4066 	 */
4067 	pdev_mask = 1 << dp_get_target_pdev_id_for_host_pdev_id(pdev->soc,
4068 								mac_id);
4069 
4070 	/*
4071 	 * Set the length of the message.
4072 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4073 	 * separately during the below call to qdf_nbuf_push_head.
4074 	 * The contribution from the HTC header is added separately inside HTC.
4075 	 */
4076 	if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) {
4077 		dp_htt_err("%pK: Failed to expand head for HTT_CFG_STATS"
4078 			   , pdev->soc);
4079 		qdf_nbuf_free(msg);
4080 		return QDF_STATUS_E_FAILURE;
4081 	}
4082 
4083 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
4084 
4085 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4086 	*msg_word = 0;
4087 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
4088 	HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask);
4089 	HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word,
4090 			stats_type_upload_mask);
4091 
4092 	pkt = htt_htc_pkt_alloc(soc);
4093 	if (!pkt) {
4094 		dp_htt_err("%pK: Fail to allocate dp_htt_htc_pkt buffer", pdev->soc);
4095 		qdf_assert(0);
4096 		qdf_nbuf_free(msg);
4097 		return QDF_STATUS_E_NOMEM;
4098 	}
4099 
4100 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4101 
4102 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4103 			dp_htt_h2t_send_complete_free_netbuf,
4104 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
4105 			soc->htc_endpoint,
4106 			/* tag for no FW response msg */
4107 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
4108 
4109 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4110 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG,
4111 				     (uint8_t *)msg_word);
4112 
4113 	if (status != QDF_STATUS_SUCCESS) {
4114 		qdf_nbuf_free(msg);
4115 		htt_htc_pkt_free(soc, pkt);
4116 	}
4117 
4118 	return status;
4119 }
4120 
4121 qdf_export_symbol(dp_h2t_cfg_stats_msg_send);
4122 #endif
4123 
4124 void
4125 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
4126 			     uint32_t *tag_buf)
4127 {
4128 	struct dp_peer *peer = NULL;
4129 	switch (tag_type) {
4130 	case HTT_STATS_PEER_DETAILS_TAG:
4131 	{
4132 		htt_peer_details_tlv *dp_stats_buf =
4133 			(htt_peer_details_tlv *)tag_buf;
4134 
4135 		pdev->fw_stats_peer_id = dp_stats_buf->sw_peer_id;
4136 	}
4137 	break;
4138 	case HTT_STATS_PEER_STATS_CMN_TAG:
4139 	{
4140 		htt_peer_stats_cmn_tlv *dp_stats_buf =
4141 			(htt_peer_stats_cmn_tlv *)tag_buf;
4142 
4143 		peer = dp_peer_get_ref_by_id(pdev->soc, pdev->fw_stats_peer_id,
4144 					     DP_MOD_ID_HTT);
4145 
4146 		if (peer && !peer->bss_peer) {
4147 			peer->stats.tx.inactive_time =
4148 				dp_stats_buf->inactive_time;
4149 			qdf_event_set(&pdev->fw_peer_stats_event);
4150 		}
4151 		if (peer)
4152 			dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
4153 	}
4154 	break;
4155 	default:
4156 		qdf_err("Invalid tag_type");
4157 	}
4158 }
4159 
4160 /**
4161  * dp_htt_rx_flow_fst_setup(): Send HTT Rx FST setup message to FW
4162  * @pdev: DP pdev handle
4163  * @fse_setup_info: FST setup parameters
4164  *
4165  * Return: Success when HTT message is sent, error on failure
4166  */
4167 QDF_STATUS
4168 dp_htt_rx_flow_fst_setup(struct dp_pdev *pdev,
4169 			 struct dp_htt_rx_flow_fst_setup *fse_setup_info)
4170 {
4171 	struct htt_soc *soc = pdev->soc->htt_handle;
4172 	struct dp_htt_htc_pkt *pkt;
4173 	qdf_nbuf_t msg;
4174 	u_int32_t *msg_word;
4175 	struct htt_h2t_msg_rx_fse_setup_t *fse_setup;
4176 	uint8_t *htt_logger_bufp;
4177 	u_int32_t *key;
4178 	QDF_STATUS status;
4179 
4180 	msg = qdf_nbuf_alloc(
4181 		soc->osdev,
4182 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_setup_t)),
4183 		/* reserve room for the HTC header */
4184 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4185 
4186 	if (!msg)
4187 		return QDF_STATUS_E_NOMEM;
4188 
4189 	/*
4190 	 * Set the length of the message.
4191 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4192 	 * separately during the below call to qdf_nbuf_push_head.
4193 	 * The contribution from the HTC header is added separately inside HTC.
4194 	 */
4195 	if (!qdf_nbuf_put_tail(msg,
4196 			       sizeof(struct htt_h2t_msg_rx_fse_setup_t))) {
4197 		qdf_err("Failed to expand head for HTT RX_FSE_SETUP msg");
4198 		return QDF_STATUS_E_FAILURE;
4199 	}
4200 
4201 	/* fill in the message contents */
4202 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4203 
4204 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_setup_t));
4205 	/* rewind beyond alignment pad to get to the HTC header reserved area */
4206 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4207 	htt_logger_bufp = (uint8_t *)msg_word;
4208 
4209 	*msg_word = 0;
4210 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG);
4211 
4212 	fse_setup = (struct htt_h2t_msg_rx_fse_setup_t *)msg_word;
4213 
4214 	HTT_RX_FSE_SETUP_PDEV_ID_SET(*msg_word, fse_setup_info->pdev_id);
4215 
4216 	msg_word++;
4217 	HTT_RX_FSE_SETUP_NUM_REC_SET(*msg_word, fse_setup_info->max_entries);
4218 	HTT_RX_FSE_SETUP_MAX_SEARCH_SET(*msg_word, fse_setup_info->max_search);
4219 	HTT_RX_FSE_SETUP_IP_DA_SA_PREFIX_SET(*msg_word,
4220 					     fse_setup_info->ip_da_sa_prefix);
4221 
4222 	msg_word++;
4223 	HTT_RX_FSE_SETUP_BASE_ADDR_LO_SET(*msg_word,
4224 					  fse_setup_info->base_addr_lo);
4225 	msg_word++;
4226 	HTT_RX_FSE_SETUP_BASE_ADDR_HI_SET(*msg_word,
4227 					  fse_setup_info->base_addr_hi);
4228 
4229 	key = (u_int32_t *)fse_setup_info->hash_key;
4230 	fse_setup->toeplitz31_0 = *key++;
4231 	fse_setup->toeplitz63_32 = *key++;
4232 	fse_setup->toeplitz95_64 = *key++;
4233 	fse_setup->toeplitz127_96 = *key++;
4234 	fse_setup->toeplitz159_128 = *key++;
4235 	fse_setup->toeplitz191_160 = *key++;
4236 	fse_setup->toeplitz223_192 = *key++;
4237 	fse_setup->toeplitz255_224 = *key++;
4238 	fse_setup->toeplitz287_256 = *key++;
4239 	fse_setup->toeplitz314_288 = *key;
4240 
4241 	msg_word++;
4242 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz31_0);
4243 	msg_word++;
4244 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz63_32);
4245 	msg_word++;
4246 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz95_64);
4247 	msg_word++;
4248 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz127_96);
4249 	msg_word++;
4250 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz159_128);
4251 	msg_word++;
4252 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz191_160);
4253 	msg_word++;
4254 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz223_192);
4255 	msg_word++;
4256 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz255_224);
4257 	msg_word++;
4258 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz287_256);
4259 	msg_word++;
4260 	HTT_RX_FSE_SETUP_HASH_314_288_SET(*msg_word,
4261 					  fse_setup->toeplitz314_288);
4262 
4263 	pkt = htt_htc_pkt_alloc(soc);
4264 	if (!pkt) {
4265 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4266 		qdf_assert(0);
4267 		qdf_nbuf_free(msg);
4268 		return QDF_STATUS_E_RESOURCES; /* failure */
4269 	}
4270 
4271 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4272 
4273 	SET_HTC_PACKET_INFO_TX(
4274 		&pkt->htc_pkt,
4275 		dp_htt_h2t_send_complete_free_netbuf,
4276 		qdf_nbuf_data(msg),
4277 		qdf_nbuf_len(msg),
4278 		soc->htc_endpoint,
4279 		/* tag for no FW response msg */
4280 		HTC_TX_PACKET_TAG_RUNTIME_PUT);
4281 
4282 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4283 
4284 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
4285 				     HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG,
4286 				     htt_logger_bufp);
4287 
4288 	if (status == QDF_STATUS_SUCCESS) {
4289 		dp_info("HTT_H2T RX_FSE_SETUP sent to FW for pdev = %u",
4290 			fse_setup_info->pdev_id);
4291 		QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_DEBUG,
4292 				   (void *)fse_setup_info->hash_key,
4293 				   fse_setup_info->hash_key_len);
4294 	} else {
4295 		qdf_nbuf_free(msg);
4296 		htt_htc_pkt_free(soc, pkt);
4297 	}
4298 
4299 	return status;
4300 }
4301 
4302 /**
4303  * dp_htt_rx_flow_fse_operation(): Send HTT Flow Search Entry msg to
4304  * add/del a flow in HW
4305  * @pdev: DP pdev handle
4306  * @fse_op_info: Flow entry parameters
4307  *
4308  * Return: Success when HTT message is sent, error on failure
4309  */
4310 QDF_STATUS
4311 dp_htt_rx_flow_fse_operation(struct dp_pdev *pdev,
4312 			     struct dp_htt_rx_flow_fst_operation *fse_op_info)
4313 {
4314 	struct htt_soc *soc = pdev->soc->htt_handle;
4315 	struct dp_htt_htc_pkt *pkt;
4316 	qdf_nbuf_t msg;
4317 	u_int32_t *msg_word;
4318 	struct htt_h2t_msg_rx_fse_operation_t *fse_operation;
4319 	uint8_t *htt_logger_bufp;
4320 	QDF_STATUS status;
4321 
4322 	msg = qdf_nbuf_alloc(
4323 		soc->osdev,
4324 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_operation_t)),
4325 		/* reserve room for the HTC header */
4326 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4327 	if (!msg)
4328 		return QDF_STATUS_E_NOMEM;
4329 
4330 	/*
4331 	 * Set the length of the message.
4332 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4333 	 * separately during the below call to qdf_nbuf_push_head.
4334 	 * The contribution from the HTC header is added separately inside HTC.
4335 	 */
4336 	if (!qdf_nbuf_put_tail(msg,
4337 			       sizeof(struct htt_h2t_msg_rx_fse_operation_t))) {
4338 		qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
4339 		qdf_nbuf_free(msg);
4340 		return QDF_STATUS_E_FAILURE;
4341 	}
4342 
4343 	/* fill in the message contents */
4344 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4345 
4346 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_operation_t));
4347 	/* rewind beyond alignment pad to get to the HTC header reserved area */
4348 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4349 	htt_logger_bufp = (uint8_t *)msg_word;
4350 
4351 	*msg_word = 0;
4352 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG);
4353 
4354 	fse_operation = (struct htt_h2t_msg_rx_fse_operation_t *)msg_word;
4355 
4356 	HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, fse_op_info->pdev_id);
4357 	msg_word++;
4358 	HTT_RX_FSE_IPSEC_VALID_SET(*msg_word, false);
4359 	if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_ENTRY) {
4360 		HTT_RX_FSE_OPERATION_SET(*msg_word,
4361 					 HTT_RX_FSE_CACHE_INVALIDATE_ENTRY);
4362 		msg_word++;
4363 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4364 		*msg_word,
4365 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_31_0));
4366 		msg_word++;
4367 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4368 		*msg_word,
4369 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_63_32));
4370 		msg_word++;
4371 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4372 		*msg_word,
4373 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_95_64));
4374 		msg_word++;
4375 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4376 		*msg_word,
4377 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_127_96));
4378 		msg_word++;
4379 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4380 		*msg_word,
4381 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_31_0));
4382 		msg_word++;
4383 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4384 		*msg_word,
4385 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_63_32));
4386 		msg_word++;
4387 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4388 		*msg_word,
4389 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_95_64));
4390 		msg_word++;
4391 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4392 		*msg_word,
4393 		qdf_htonl(
4394 		fse_op_info->rx_flow->flow_tuple_info.dest_ip_127_96));
4395 		msg_word++;
4396 		HTT_RX_FSE_SOURCEPORT_SET(
4397 			*msg_word,
4398 			fse_op_info->rx_flow->flow_tuple_info.src_port);
4399 		HTT_RX_FSE_DESTPORT_SET(
4400 			*msg_word,
4401 			fse_op_info->rx_flow->flow_tuple_info.dest_port);
4402 		msg_word++;
4403 		HTT_RX_FSE_L4_PROTO_SET(
4404 			*msg_word,
4405 			fse_op_info->rx_flow->flow_tuple_info.l4_protocol);
4406 	} else if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_FULL) {
4407 		HTT_RX_FSE_OPERATION_SET(*msg_word,
4408 					 HTT_RX_FSE_CACHE_INVALIDATE_FULL);
4409 	} else if (fse_op_info->op_code == DP_HTT_FST_DISABLE) {
4410 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_DISABLE);
4411 	} else if (fse_op_info->op_code == DP_HTT_FST_ENABLE) {
4412 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_ENABLE);
4413 	}
4414 
4415 	pkt = htt_htc_pkt_alloc(soc);
4416 	if (!pkt) {
4417 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4418 		qdf_assert(0);
4419 		qdf_nbuf_free(msg);
4420 		return QDF_STATUS_E_RESOURCES; /* failure */
4421 	}
4422 
4423 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4424 
4425 	SET_HTC_PACKET_INFO_TX(
4426 		&pkt->htc_pkt,
4427 		dp_htt_h2t_send_complete_free_netbuf,
4428 		qdf_nbuf_data(msg),
4429 		qdf_nbuf_len(msg),
4430 		soc->htc_endpoint,
4431 		/* tag for no FW response msg */
4432 		HTC_TX_PACKET_TAG_RUNTIME_PUT);
4433 
4434 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4435 
4436 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
4437 				     HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG,
4438 				     htt_logger_bufp);
4439 
4440 	if (status == QDF_STATUS_SUCCESS) {
4441 		dp_info("HTT_H2T RX_FSE_OPERATION_CFG sent to FW for pdev = %u",
4442 			fse_op_info->pdev_id);
4443 	} else {
4444 		qdf_nbuf_free(msg);
4445 		htt_htc_pkt_free(soc, pkt);
4446 	}
4447 
4448 	return status;
4449 }
4450 
4451 /**
4452  * dp_htt_rx_fisa_config(): Send HTT msg to configure FISA
4453  * @pdev: DP pdev handle
4454  * @fse_op_info: Flow entry parameters
4455  *
4456  * Return: Success when HTT message is sent, error on failure
4457  */
4458 QDF_STATUS
4459 dp_htt_rx_fisa_config(struct dp_pdev *pdev,
4460 		      struct dp_htt_rx_fisa_cfg *fisa_config)
4461 {
4462 	struct htt_soc *soc = pdev->soc->htt_handle;
4463 	struct dp_htt_htc_pkt *pkt;
4464 	qdf_nbuf_t msg;
4465 	u_int32_t *msg_word;
4466 	struct htt_h2t_msg_type_fisa_config_t *htt_fisa_config;
4467 	uint8_t *htt_logger_bufp;
4468 	uint32_t len;
4469 	QDF_STATUS status;
4470 
4471 	len = HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_type_fisa_config_t));
4472 
4473 	msg = qdf_nbuf_alloc(soc->osdev,
4474 			     len,
4475 			     /* reserve room for the HTC header */
4476 			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
4477 			     4,
4478 			     TRUE);
4479 	if (!msg)
4480 		return QDF_STATUS_E_NOMEM;
4481 
4482 	/*
4483 	 * Set the length of the message.
4484 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4485 	 * separately during the below call to qdf_nbuf_push_head.
4486 	 * The contribution from the HTC header is added separately inside HTC.
4487 	 */
4488 	if (!qdf_nbuf_put_tail(msg,
4489 			       sizeof(struct htt_h2t_msg_type_fisa_config_t))) {
4490 		qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
4491 		qdf_nbuf_free(msg);
4492 		return QDF_STATUS_E_FAILURE;
4493 	}
4494 
4495 	/* fill in the message contents */
4496 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4497 
4498 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_type_fisa_config_t));
4499 	/* rewind beyond alignment pad to get to the HTC header reserved area */
4500 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4501 	htt_logger_bufp = (uint8_t *)msg_word;
4502 
4503 	*msg_word = 0;
4504 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FISA_CFG);
4505 
4506 	htt_fisa_config = (struct htt_h2t_msg_type_fisa_config_t *)msg_word;
4507 
4508 	HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, htt_fisa_config->pdev_id);
4509 
4510 	msg_word++;
4511 	HTT_RX_FISA_CONFIG_FISA_V2_ENABLE_SET(*msg_word, 1);
4512 	HTT_RX_FISA_CONFIG_FISA_V2_AGGR_LIMIT_SET(*msg_word, 0xf);
4513 
4514 	msg_word++;
4515 	htt_fisa_config->fisa_timeout_threshold = fisa_config->fisa_timeout;
4516 
4517 	pkt = htt_htc_pkt_alloc(soc);
4518 	if (!pkt) {
4519 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4520 		qdf_assert(0);
4521 		qdf_nbuf_free(msg);
4522 		return QDF_STATUS_E_RESOURCES; /* failure */
4523 	}
4524 
4525 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4526 
4527 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4528 			       dp_htt_h2t_send_complete_free_netbuf,
4529 			       qdf_nbuf_data(msg),
4530 			       qdf_nbuf_len(msg),
4531 			       soc->htc_endpoint,
4532 			       /* tag for no FW response msg */
4533 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
4534 
4535 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4536 
4537 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FISA_CFG,
4538 				     htt_logger_bufp);
4539 
4540 	if (status == QDF_STATUS_SUCCESS) {
4541 		dp_info("HTT_H2T_MSG_TYPE_RX_FISA_CFG sent to FW for pdev = %u",
4542 			fisa_config->pdev_id);
4543 	} else {
4544 		qdf_nbuf_free(msg);
4545 		htt_htc_pkt_free(soc, pkt);
4546 	}
4547 
4548 	return status;
4549 }
4550 
4551 #ifdef WLAN_SUPPORT_PPEDS
4552 /**
4553  * dp_htt_rxdma_rxole_ppe_cfg_set() - Send RxOLE and RxDMA PPE config
4554  * @dp_osc: Data path SoC handle
4555  * @cfg: RxDMA and RxOLE PPE config
4556  *
4557  * Return: Success when HTT message is sent, error on failure
4558  */
4559 QDF_STATUS
4560 dp_htt_rxdma_rxole_ppe_cfg_set(struct dp_soc *soc,
4561 			       struct dp_htt_rxdma_rxole_ppe_config *cfg)
4562 {
4563 	struct htt_soc *htt_handle = soc->htt_handle;
4564 	uint32_t len;
4565 	qdf_nbuf_t msg;
4566 	u_int32_t *msg_word;
4567 	QDF_STATUS status;
4568 	uint8_t *htt_logger_bufp;
4569 	struct dp_htt_htc_pkt *pkt;
4570 
4571 	len = HTT_MSG_BUF_SIZE(
4572 	      sizeof(struct htt_h2t_msg_type_rxdma_rxole_ppe_cfg_t));
4573 
4574 	msg = qdf_nbuf_alloc(soc->osdev,
4575 			     len,
4576 			     /* reserve room for the HTC header */
4577 			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
4578 			     4,
4579 			     TRUE);
4580 	if (!msg)
4581 		return QDF_STATUS_E_NOMEM;
4582 
4583 	/*
4584 	 * Set the length of the message.
4585 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4586 	 * separately during the below call to qdf_nbuf_push_head.
4587 	 * The contribution from the HTC header is added separately inside HTC.
4588 	 */
4589 	if (!qdf_nbuf_put_tail(
4590 		msg, sizeof(struct htt_h2t_msg_type_rxdma_rxole_ppe_cfg_t))) {
4591 		qdf_err("Failed to expand head for HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG msg");
4592 		qdf_nbuf_free(msg);
4593 		return QDF_STATUS_E_FAILURE;
4594 	}
4595 
4596 	/* fill in the message contents */
4597 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4598 
4599 	memset(msg_word, 0,
4600 	       sizeof(struct htt_h2t_msg_type_rxdma_rxole_ppe_cfg_t));
4601 
4602 	/* Rewind beyond alignment pad to get to the HTC header reserved area */
4603 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4604 	htt_logger_bufp = (uint8_t *)msg_word;
4605 
4606 	*msg_word = 0;
4607 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG);
4608 	HTT_PPE_CFG_OVERRIDE_SET(*msg_word, cfg->override);
4609 	HTT_PPE_CFG_REO_DEST_IND_SET(
4610 			*msg_word, cfg->reo_destination_indication);
4611 	HTT_PPE_CFG_MULTI_BUF_MSDU_OVERRIDE_EN_SET(
4612 			*msg_word, cfg->multi_buffer_msdu_override_en);
4613 	HTT_PPE_CFG_INTRA_BSS_OVERRIDE_EN_SET(
4614 			*msg_word, cfg->intra_bss_override);
4615 	HTT_PPE_CFG_DECAP_RAW_OVERRIDE_EN_SET(
4616 			*msg_word, cfg->decap_raw_override);
4617 	HTT_PPE_CFG_DECAP_NWIFI_OVERRIDE_EN_SET(
4618 			*msg_word, cfg->decap_nwifi_override);
4619 	HTT_PPE_CFG_IP_FRAG_OVERRIDE_EN_SET(
4620 			*msg_word, cfg->ip_frag_override);
4621 
4622 	pkt = htt_htc_pkt_alloc(htt_handle);
4623 	if (!pkt) {
4624 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4625 		qdf_assert(0);
4626 		qdf_nbuf_free(msg);
4627 		return QDF_STATUS_E_RESOURCES; /* failure */
4628 	}
4629 
4630 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4631 
4632 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4633 			       dp_htt_h2t_send_complete_free_netbuf,
4634 			       qdf_nbuf_data(msg),
4635 			       qdf_nbuf_len(msg),
4636 			       htt_handle->htc_endpoint,
4637 			       /* tag for no FW response msg */
4638 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
4639 
4640 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4641 
4642 	status = DP_HTT_SEND_HTC_PKT(htt_handle, pkt,
4643 				     HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG,
4644 				     htt_logger_bufp);
4645 
4646 	if (status != QDF_STATUS_SUCCESS) {
4647 		qdf_nbuf_free(msg);
4648 		htt_htc_pkt_free(htt_handle, pkt);
4649 		return status;
4650 	}
4651 
4652 	dp_info("HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG sent");
4653 	return status;
4654 }
4655 #endif /* WLAN_SUPPORT_PPEDS */
4656 
4657 /**
4658  * dp_bk_pressure_stats_handler(): worker function to print back pressure
4659  *				   stats
4660  *
4661  * @context : argument to work function
4662  */
4663 static void dp_bk_pressure_stats_handler(void *context)
4664 {
4665 	struct dp_pdev *pdev = (struct dp_pdev *)context;
4666 	struct dp_soc_srngs_state *soc_srngs_state = NULL;
4667 	const char *ring_name;
4668 	int i;
4669 	struct dp_srng_ring_state *ring_state;
4670 	bool empty_flag;
4671 
4672 	qdf_spin_lock_bh(&pdev->bkp_stats.list_lock);
4673 
4674 	/* Extract only first entry for printing in one work event */
4675 	if (pdev->bkp_stats.queue_depth &&
4676 	    !TAILQ_EMPTY(&pdev->bkp_stats.list)) {
4677 		soc_srngs_state = TAILQ_FIRST(&pdev->bkp_stats.list);
4678 		TAILQ_REMOVE(&pdev->bkp_stats.list, soc_srngs_state,
4679 			     list_elem);
4680 		pdev->bkp_stats.queue_depth--;
4681 	}
4682 
4683 	empty_flag = TAILQ_EMPTY(&pdev->bkp_stats.list);
4684 	qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock);
4685 
4686 	if (soc_srngs_state) {
4687 		DP_PRINT_STATS("### BKP stats for seq_num %u START ###",
4688 			       soc_srngs_state->seq_num);
4689 		for (i = 0; i < soc_srngs_state->max_ring_id; i++) {
4690 			ring_state = &soc_srngs_state->ring_state[i];
4691 			ring_name = dp_srng_get_str_from_hal_ring_type
4692 						(ring_state->ring_type);
4693 			DP_PRINT_STATS("%s: SW:Head pointer = %d Tail Pointer = %d\n",
4694 				       ring_name,
4695 				       ring_state->sw_head,
4696 				       ring_state->sw_tail);
4697 
4698 			DP_PRINT_STATS("%s: HW:Head pointer = %d Tail Pointer = %d\n",
4699 				       ring_name,
4700 				       ring_state->hw_head,
4701 				       ring_state->hw_tail);
4702 		}
4703 
4704 		DP_PRINT_STATS("### BKP stats for seq_num %u COMPLETE ###",
4705 			       soc_srngs_state->seq_num);
4706 		qdf_mem_free(soc_srngs_state);
4707 	}
4708 	dp_print_napi_stats(pdev->soc);
4709 
4710 	/* Schedule work again if queue is not empty */
4711 	if (!empty_flag)
4712 		qdf_queue_work(0, pdev->bkp_stats.work_queue,
4713 			       &pdev->bkp_stats.work);
4714 }
4715 
4716 /*
4717  * dp_pdev_bkp_stats_detach() - detach resources for back pressure stats
4718  *				processing
4719  * @pdev: Datapath PDEV handle
4720  *
4721  */
4722 void dp_pdev_bkp_stats_detach(struct dp_pdev *pdev)
4723 {
4724 	struct dp_soc_srngs_state *ring_state, *ring_state_next;
4725 
4726 	if (!pdev->bkp_stats.work_queue)
4727 		return;
4728 
4729 	qdf_flush_workqueue(0, pdev->bkp_stats.work_queue);
4730 	qdf_destroy_workqueue(0, pdev->bkp_stats.work_queue);
4731 	qdf_flush_work(&pdev->bkp_stats.work);
4732 	qdf_disable_work(&pdev->bkp_stats.work);
4733 	qdf_spin_lock_bh(&pdev->bkp_stats.list_lock);
4734 	TAILQ_FOREACH_SAFE(ring_state, &pdev->bkp_stats.list,
4735 			   list_elem, ring_state_next) {
4736 		TAILQ_REMOVE(&pdev->bkp_stats.list, ring_state,
4737 			     list_elem);
4738 		qdf_mem_free(ring_state);
4739 	}
4740 	qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock);
4741 	qdf_spinlock_destroy(&pdev->bkp_stats.list_lock);
4742 }
4743 
4744 /*
4745  * dp_pdev_bkp_stats_attach() - attach resources for back pressure stats
4746  *				processing
4747  * @pdev: Datapath PDEV handle
4748  *
4749  * Return: QDF_STATUS_SUCCESS: Success
4750  *         QDF_STATUS_E_NOMEM: Error
4751  */
4752 QDF_STATUS dp_pdev_bkp_stats_attach(struct dp_pdev *pdev)
4753 {
4754 	TAILQ_INIT(&pdev->bkp_stats.list);
4755 	pdev->bkp_stats.seq_num = 0;
4756 	pdev->bkp_stats.queue_depth = 0;
4757 
4758 	qdf_create_work(0, &pdev->bkp_stats.work,
4759 			dp_bk_pressure_stats_handler, pdev);
4760 
4761 	pdev->bkp_stats.work_queue =
4762 		qdf_alloc_unbound_workqueue("dp_bkp_work_queue");
4763 	if (!pdev->bkp_stats.work_queue)
4764 		goto fail;
4765 
4766 	qdf_spinlock_create(&pdev->bkp_stats.list_lock);
4767 	return QDF_STATUS_SUCCESS;
4768 
4769 fail:
4770 	dp_htt_alert("BKP stats attach failed");
4771 	qdf_flush_work(&pdev->bkp_stats.work);
4772 	qdf_disable_work(&pdev->bkp_stats.work);
4773 	return QDF_STATUS_E_FAILURE;
4774 }
4775