xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c (revision 5db38f17138c409346f0ba0d72e13640f35d04b5)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <htt.h>
21 #include <hal_hw_headers.h>
22 #include <hal_api.h>
23 #include "dp_peer.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_ipa.h"
27 #include "dp_rx.h"
28 #include "htt_stats.h"
29 #include "htt_ppdu_stats.h"
30 #include "dp_htt.h"
31 #ifdef WIFI_MONITOR_SUPPORT
32 #include <dp_mon.h>
33 #endif
34 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
35 #include "cdp_txrx_cmn_struct.h"
36 #ifdef IPA_OPT_WIFI_DP
37 #include "cdp_txrx_ipa.h"
38 #endif
39 #ifdef FEATURE_PERPKT_INFO
40 #include "dp_ratetable.h"
41 #endif
42 #include <qdf_module.h>
43 #ifdef CONFIG_SAWF_DEF_QUEUES
44 #include <dp_sawf_htt.h>
45 #endif
46 
47 #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
48 
49 #define HTT_HTC_PKT_POOL_INIT_SIZE 64
50 
51 #define HTT_MSG_BUF_SIZE(msg_bytes) \
52 	((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
53 
54 #define HTT_PID_BIT_MASK 0x3
55 
56 #define DP_EXT_MSG_LENGTH 2048
57 #define HTT_HEADER_LEN 16
58 #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16
59 
60 #define HTT_SHIFT_UPPER_TIMESTAMP 32
61 #define HTT_MASK_UPPER_TIMESTAMP 0xFFFFFFFF00000000
62 #define HTT_BKP_STATS_MAX_QUEUE_DEPTH 16
63 
64 struct dp_htt_htc_pkt *
65 htt_htc_pkt_alloc(struct htt_soc *soc)
66 {
67 	struct dp_htt_htc_pkt_union *pkt = NULL;
68 
69 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
70 	if (soc->htt_htc_pkt_freelist) {
71 		pkt = soc->htt_htc_pkt_freelist;
72 		soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next;
73 	}
74 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
75 
76 	if (!pkt)
77 		pkt = qdf_mem_malloc(sizeof(*pkt));
78 
79 	if (!pkt)
80 		return NULL;
81 
82 	htc_packet_set_magic_cookie(&(pkt->u.pkt.htc_pkt), 0);
83 
84 	return &pkt->u.pkt; /* not actually a dereference */
85 }
86 
87 qdf_export_symbol(htt_htc_pkt_alloc);
88 
89 void
90 htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
91 {
92 	struct dp_htt_htc_pkt_union *u_pkt =
93 		(struct dp_htt_htc_pkt_union *)pkt;
94 
95 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
96 	htc_packet_set_magic_cookie(&(u_pkt->u.pkt.htc_pkt), 0);
97 	u_pkt->u.next = soc->htt_htc_pkt_freelist;
98 	soc->htt_htc_pkt_freelist = u_pkt;
99 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
100 }
101 
102 qdf_export_symbol(htt_htc_pkt_free);
103 
104 void
105 htt_htc_pkt_pool_free(struct htt_soc *soc)
106 {
107 	struct dp_htt_htc_pkt_union *pkt, *next;
108 	pkt = soc->htt_htc_pkt_freelist;
109 	while (pkt) {
110 		next = pkt->u.next;
111 		qdf_mem_free(pkt);
112 		pkt = next;
113 	}
114 	soc->htt_htc_pkt_freelist = NULL;
115 }
116 
117 
118 #ifndef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST
119 
120 /**
121  * htt_htc_misc_pkt_list_trim() - trim misc list
122  * @soc: HTT SOC handle
123  * @level: max no. of pkts in list
124  */
125 static void
126 htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level)
127 {
128 	struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL;
129 	int i = 0;
130 	qdf_nbuf_t netbuf;
131 
132 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
133 	pkt = soc->htt_htc_pkt_misclist;
134 	while (pkt) {
135 		next = pkt->u.next;
136 		/* trim the out grown list*/
137 		if (++i > level) {
138 			netbuf =
139 				(qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
140 			qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
141 			qdf_nbuf_free(netbuf);
142 			qdf_mem_free(pkt);
143 			pkt = NULL;
144 			if (prev)
145 				prev->u.next = NULL;
146 		}
147 		prev = pkt;
148 		pkt = next;
149 	}
150 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
151 }
152 
153 void
154 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
155 {
156 	struct dp_htt_htc_pkt_union *u_pkt =
157 				(struct dp_htt_htc_pkt_union *)pkt;
158 	int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc,
159 							pkt->htc_pkt.Endpoint)
160 				+ DP_HTT_HTC_PKT_MISCLIST_SIZE;
161 
162 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
163 	if (soc->htt_htc_pkt_misclist) {
164 		u_pkt->u.next = soc->htt_htc_pkt_misclist;
165 		soc->htt_htc_pkt_misclist = u_pkt;
166 	} else {
167 		soc->htt_htc_pkt_misclist = u_pkt;
168 	}
169 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
170 
171 	/* only ce pipe size + tx_queue_depth could possibly be in use
172 	 * free older packets in the misclist
173 	 */
174 	htt_htc_misc_pkt_list_trim(soc, misclist_trim_level);
175 }
176 
177 qdf_export_symbol(htt_htc_misc_pkt_list_add);
178 #endif  /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
179 
180 /**
181  * htt_htc_misc_pkt_pool_free() - free pkts in misc list
182  * @soc:	HTT SOC handle
183  */
184 static void
185 htt_htc_misc_pkt_pool_free(struct htt_soc *soc)
186 {
187 	struct dp_htt_htc_pkt_union *pkt, *next;
188 	qdf_nbuf_t netbuf;
189 
190 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
191 	pkt = soc->htt_htc_pkt_misclist;
192 
193 	while (pkt) {
194 		next = pkt->u.next;
195 		if (htc_packet_get_magic_cookie(&(pkt->u.pkt.htc_pkt)) !=
196 		    HTC_PACKET_MAGIC_COOKIE) {
197 			pkt = next;
198 			soc->stats.skip_count++;
199 			continue;
200 		}
201 		netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
202 		qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
203 
204 		soc->stats.htc_pkt_free++;
205 		dp_htt_info("%pK: Pkt free count %d",
206 			    soc->dp_soc, soc->stats.htc_pkt_free);
207 
208 		qdf_nbuf_free(netbuf);
209 		qdf_mem_free(pkt);
210 		pkt = next;
211 	}
212 	soc->htt_htc_pkt_misclist = NULL;
213 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
214 	dp_info("HTC Packets, fail count = %d, skip count = %d",
215 		soc->stats.fail_count, soc->stats.skip_count);
216 }
217 
218 /**
219  * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ
220  * @tgt_mac_addr:	Target MAC
221  * @buffer:		Output buffer
222  */
223 static u_int8_t *
224 htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer)
225 {
226 #ifdef BIG_ENDIAN_HOST
227 	/*
228 	 * The host endianness is opposite of the target endianness.
229 	 * To make u_int32_t elements come out correctly, the target->host
230 	 * upload has swizzled the bytes in each u_int32_t element of the
231 	 * message.
232 	 * For byte-array message fields like the MAC address, this
233 	 * upload swizzling puts the bytes in the wrong order, and needs
234 	 * to be undone.
235 	 */
236 	buffer[0] = tgt_mac_addr[3];
237 	buffer[1] = tgt_mac_addr[2];
238 	buffer[2] = tgt_mac_addr[1];
239 	buffer[3] = tgt_mac_addr[0];
240 	buffer[4] = tgt_mac_addr[7];
241 	buffer[5] = tgt_mac_addr[6];
242 	return buffer;
243 #else
244 	/*
245 	 * The host endianness matches the target endianness -
246 	 * we can use the mac addr directly from the message buffer.
247 	 */
248 	return tgt_mac_addr;
249 #endif
250 }
251 
252 /**
253  * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
254  * @soc:	SOC handle
255  * @status:	Completion status
256  * @netbuf:	HTT buffer
257  */
258 static void
259 dp_htt_h2t_send_complete_free_netbuf(
260 	void *soc, A_STATUS status, qdf_nbuf_t netbuf)
261 {
262 	qdf_nbuf_free(netbuf);
263 }
264 
265 #ifdef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST
266 void
267 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
268 {
269 	struct htt_soc *soc =  (struct htt_soc *) context;
270 	struct dp_htt_htc_pkt *htt_pkt;
271 	qdf_nbuf_t netbuf;
272 
273 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
274 
275 	/* process (free or keep) the netbuf that held the message */
276 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
277 	/*
278 	 * adf sendcomplete is required for windows only
279 	 */
280 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
281 	/* free the htt_htc_pkt / HTC_PACKET object */
282 	qdf_nbuf_free(netbuf);
283 	htt_htc_pkt_free(soc, htt_pkt);
284 }
285 
286 #else /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
287 
288 void
289 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
290 {
291 	void (*send_complete_part2)(
292 	     void *soc, QDF_STATUS status, qdf_nbuf_t msdu);
293 	struct htt_soc *soc =  (struct htt_soc *) context;
294 	struct dp_htt_htc_pkt *htt_pkt;
295 	qdf_nbuf_t netbuf;
296 
297 	send_complete_part2 = htc_pkt->pPktContext;
298 
299 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
300 
301 	/* process (free or keep) the netbuf that held the message */
302 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
303 	/*
304 	 * adf sendcomplete is required for windows only
305 	 */
306 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
307 	if (send_complete_part2){
308 		send_complete_part2(
309 		    htt_pkt->soc_ctxt, htc_pkt->Status, netbuf);
310 	}
311 	/* free the htt_htc_pkt / HTC_PACKET object */
312 	htt_htc_pkt_free(soc, htt_pkt);
313 }
314 
315 #endif /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
316 
317 /**
318  * dp_htt_h2t_add_tcl_metadata_ver_v1() - Add tcl_metadata version V1
319  * @soc:	HTT SOC handle
320  * @msg:	Pointer to nbuf
321  *
322  * Return: 0 on success; error code on failure
323  */
324 static int dp_htt_h2t_add_tcl_metadata_ver_v1(struct htt_soc *soc,
325 					      qdf_nbuf_t *msg)
326 {
327 	uint32_t *msg_word;
328 
329 	*msg = qdf_nbuf_alloc(
330 		soc->osdev,
331 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
332 		/* reserve room for the HTC header */
333 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
334 	if (!*msg)
335 		return QDF_STATUS_E_NOMEM;
336 
337 	/*
338 	 * Set the length of the message.
339 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
340 	 * separately during the below call to qdf_nbuf_push_head.
341 	 * The contribution from the HTC header is added separately inside HTC.
342 	 */
343 	if (!qdf_nbuf_put_tail(*msg, HTT_VER_REQ_BYTES)) {
344 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
345 			  "%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
346 			  __func__);
347 		return QDF_STATUS_E_FAILURE;
348 	}
349 
350 	/* fill in the message contents */
351 	msg_word = (u_int32_t *)qdf_nbuf_data(*msg);
352 
353 	/* rewind beyond alignment pad to get to the HTC header reserved area */
354 	qdf_nbuf_push_head(*msg, HTC_HDR_ALIGNMENT_PADDING);
355 
356 	*msg_word = 0;
357 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
358 
359 	return QDF_STATUS_SUCCESS;
360 }
361 
362 #ifdef QCA_DP_TX_FW_METADATA_V2
363 /**
364  * dp_htt_h2t_add_tcl_metadata_ver_v2() - Add tcl_metadata version V2
365  * @soc:	HTT SOC handle
366  * @msg:	Pointer to nbuf
367  *
368  * Return: 0 on success; error code on failure
369  */
370 static int dp_htt_h2t_add_tcl_metadata_ver_v2(struct htt_soc *soc,
371 					      qdf_nbuf_t *msg)
372 {
373 	uint32_t *msg_word;
374 
375 	*msg = qdf_nbuf_alloc(
376 		soc->osdev,
377 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES + HTT_TCL_METADATA_VER_SZ),
378 		/* reserve room for the HTC header */
379 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
380 	if (!*msg)
381 		return QDF_STATUS_E_NOMEM;
382 
383 	/*
384 	 * Set the length of the message.
385 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
386 	 * separately during the below call to qdf_nbuf_push_head.
387 	 * The contribution from the HTC header is added separately inside HTC.
388 	 */
389 	if (!qdf_nbuf_put_tail(*msg,
390 			       HTT_VER_REQ_BYTES + HTT_TCL_METADATA_VER_SZ)) {
391 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
392 			  "%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
393 			  __func__);
394 		return QDF_STATUS_E_FAILURE;
395 	}
396 
397 	/* fill in the message contents */
398 	msg_word = (u_int32_t *)qdf_nbuf_data(*msg);
399 
400 	/* rewind beyond alignment pad to get to the HTC header reserved area */
401 	qdf_nbuf_push_head(*msg, HTC_HDR_ALIGNMENT_PADDING);
402 
403 	*msg_word = 0;
404 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
405 
406 	/* word 1 */
407 	msg_word++;
408 	*msg_word = 0;
409 	HTT_OPTION_TLV_TAG_SET(*msg_word, HTT_OPTION_TLV_TAG_TCL_METADATA_VER);
410 	HTT_OPTION_TLV_LENGTH_SET(*msg_word, HTT_TCL_METADATA_VER_SZ);
411 	HTT_OPTION_TLV_TCL_METADATA_VER_SET(*msg_word,
412 					    HTT_OPTION_TLV_TCL_METADATA_V21);
413 
414 	return QDF_STATUS_SUCCESS;
415 }
416 
417 /**
418  * dp_htt_h2t_add_tcl_metadata_ver() - Add tcl_metadata version
419  * @soc:	HTT SOC handle
420  * @msg:	Pointer to nbuf
421  *
422  * Return: 0 on success; error code on failure
423  */
424 static int dp_htt_h2t_add_tcl_metadata_ver(struct htt_soc *soc, qdf_nbuf_t *msg)
425 {
426 	/* Use tcl_metadata_v1 when NSS offload is enabled */
427 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->dp_soc->wlan_cfg_ctx) ||
428 	    soc->dp_soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_FTM_MODE)
429 		return dp_htt_h2t_add_tcl_metadata_ver_v1(soc, msg);
430 	else
431 		return dp_htt_h2t_add_tcl_metadata_ver_v2(soc, msg);
432 }
433 #else
434 static int dp_htt_h2t_add_tcl_metadata_ver(struct htt_soc *soc, qdf_nbuf_t *msg)
435 {
436 	return dp_htt_h2t_add_tcl_metadata_ver_v1(soc, msg);
437 }
438 #endif
439 
440 /**
441  * htt_h2t_ver_req_msg() - Send HTT version request message to target
442  * @soc:	HTT SOC handle
443  *
444  * Return: 0 on success; error code on failure
445  */
446 static int htt_h2t_ver_req_msg(struct htt_soc *soc)
447 {
448 	struct dp_htt_htc_pkt *pkt;
449 	qdf_nbuf_t msg = NULL;
450 	QDF_STATUS status;
451 
452 	status = dp_htt_h2t_add_tcl_metadata_ver(soc, &msg);
453 	if (status != QDF_STATUS_SUCCESS)
454 		return status;
455 
456 	pkt = htt_htc_pkt_alloc(soc);
457 	if (!pkt) {
458 		qdf_nbuf_free(msg);
459 		return QDF_STATUS_E_FAILURE;
460 	}
461 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
462 
463 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
464 		dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg),
465 		qdf_nbuf_len(msg), soc->htc_endpoint,
466 		HTC_TX_PACKET_TAG_RTPM_PUT_RC);
467 
468 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
469 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_VERSION_REQ,
470 				     NULL);
471 
472 	if (status != QDF_STATUS_SUCCESS) {
473 		qdf_nbuf_free(msg);
474 		htt_htc_pkt_free(soc, pkt);
475 	}
476 
477 	return status;
478 }
479 
480 #ifdef IPA_OPT_WIFI_DP
481 QDF_STATUS htt_h2t_rx_cce_super_rule_setup(struct htt_soc *soc, void *param)
482 {
483 	struct wifi_dp_flt_setup *flt_params =
484 			(struct wifi_dp_flt_setup *)param;
485 	struct dp_htt_htc_pkt *pkt;
486 	qdf_nbuf_t msg;
487 	uint32_t *msg_word;
488 	uint8_t *htt_logger_bufp;
489 	uint16_t ver = 0;
490 	uint8_t i, valid = 0;
491 	uint8_t num_filters = flt_params->num_filters;
492 	uint8_t pdev_id = flt_params->pdev_id;
493 	uint8_t op = flt_params->op;
494 	uint16_t ipv4 = qdf_ntohs(QDF_NBUF_TRAC_IPV4_ETH_TYPE);
495 	uint16_t ipv6 = qdf_ntohs(QDF_NBUF_TRAC_IPV6_ETH_TYPE);
496 	QDF_STATUS status;
497 
498 	if (num_filters > RX_CCE_SUPER_RULE_SETUP_NUM) {
499 		dp_htt_err("Wrong filter count %d", num_filters);
500 		return QDF_STATUS_FILT_REQ_ERROR;
501 	}
502 
503 	msg = qdf_nbuf_alloc(soc->osdev,
504 			     HTT_MSG_BUF_SIZE(HTT_RX_CCE_SUPER_RULE_SETUP_SZ),
505 			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
506 			     true);
507 	if (!msg) {
508 		dp_htt_err("Fail to allocate SUPER_RULE_SETUP msg ");
509 		return QDF_STATUS_E_FAILURE;
510 	}
511 
512 	qdf_nbuf_put_tail(msg, HTT_RX_CCE_SUPER_RULE_SETUP_SZ);
513 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
514 	memset(msg_word, 0, HTT_RX_CCE_SUPER_RULE_SETUP_SZ);
515 
516 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
517 	htt_logger_bufp = (uint8_t *)msg_word;
518 
519 	*msg_word = 0;
520 	HTT_H2T_MSG_TYPE_SET(*msg_word,
521 			     HTT_H2T_MSG_TYPE_RX_CCE_SUPER_RULE_SETUP);
522 	HTT_RX_CCE_SUPER_RULE_SETUP_PDEV_ID_SET(*msg_word, pdev_id);
523 	HTT_RX_CCE_SUPER_RULE_SETUP_OPERATION_SET(*msg_word, op);
524 
525 	/* Set cce_super_rule_params */
526 	for (i = 0; i < RX_CCE_SUPER_RULE_SETUP_NUM; i++) {
527 		valid = flt_params->flt_addr_params[i].valid;
528 		ver = flt_params->flt_addr_params[i].l3_type;
529 		msg_word++;
530 
531 		if (ver == ipv4) {
532 			HTT_RX_CCE_SUPER_RULE_SETUP_IPV4_ADDR_ARRAY_SET(
533 				msg_word,
534 				flt_params->flt_addr_params[i].src_ipv4_addr);
535 		} else if (ver == ipv6) {
536 			HTT_RX_CCE_SUPER_RULE_SETUP_IPV6_ADDR_ARRAY_SET(
537 				msg_word,
538 				flt_params->flt_addr_params[i].src_ipv6_addr);
539 		} else {
540 			dp_htt_debug("Filter %d not in use.", i);
541 		}
542 
543 		/* move uint32_t *msg_word by IPV6 addr size */
544 		msg_word += (QDF_IPV6_ADDR_SIZE / 4);
545 
546 		if (ver == ipv4) {
547 			HTT_RX_CCE_SUPER_RULE_SETUP_IPV4_ADDR_ARRAY_SET(
548 				msg_word,
549 				flt_params->flt_addr_params[i].dst_ipv4_addr);
550 		} else if (ver == ipv6) {
551 			HTT_RX_CCE_SUPER_RULE_SETUP_IPV6_ADDR_ARRAY_SET(
552 				msg_word,
553 				flt_params->flt_addr_params[i].dst_ipv6_addr);
554 		} else {
555 			dp_htt_debug("Filter %d not in use.", i);
556 		}
557 
558 		/* move uint32_t *msg_word by IPV6 addr size */
559 		msg_word += (QDF_IPV6_ADDR_SIZE / 4);
560 		HTT_RX_CCE_SUPER_RULE_SETUP_L3_TYPE_SET(*msg_word, ver);
561 		HTT_RX_CCE_SUPER_RULE_SETUP_L4_TYPE_SET(
562 					*msg_word,
563 					flt_params->flt_addr_params[i].l4_type);
564 		HTT_RX_CCE_SUPER_RULE_SETUP_IS_VALID_SET(*msg_word, valid);
565 		msg_word++;
566 		HTT_RX_CCE_SUPER_RULE_SETUP_L4_SRC_PORT_SET(
567 				*msg_word,
568 				flt_params->flt_addr_params[i].src_port);
569 		HTT_RX_CCE_SUPER_RULE_SETUP_L4_DST_PORT_SET(
570 				*msg_word,
571 				flt_params->flt_addr_params[i].dst_port);
572 
573 		dp_info("opt_dp:: pdev: %u ver %u, flt_num %u, op %u",
574 			pdev_id, ver, i, op);
575 		dp_info("valid %u", valid);
576 	}
577 
578 	pkt = htt_htc_pkt_alloc(soc);
579 	if (!pkt) {
580 		dp_htt_err("Fail to allocate dp_htt_htc_pkt buffer");
581 		qdf_assert(0);
582 		qdf_nbuf_free(msg);
583 		return QDF_STATUS_E_NOMEM;
584 	}
585 
586 	pkt->soc_ctxt = NULL; /*not used during send-done callback */
587 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
588 			       dp_htt_h2t_send_complete_free_netbuf,
589 			       qdf_nbuf_data(msg), qdf_nbuf_len(msg),
590 			       soc->htc_endpoint,
591 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
592 
593 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
594 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
595 				     HTT_H2T_MSG_TYPE_RX_CCE_SUPER_RULE_SETUP,
596 				     htt_logger_bufp);
597 
598 	if (status != QDF_STATUS_SUCCESS) {
599 		qdf_nbuf_free(msg);
600 		htt_htc_pkt_free(soc, pkt);
601 	}
602 	return status;
603 }
604 #endif /* IPA_OPT_WIFI_DP */
605 
606 int htt_srng_setup(struct htt_soc *soc, int mac_id,
607 		   hal_ring_handle_t hal_ring_hdl,
608 		   int hal_ring_type)
609 {
610 	struct dp_htt_htc_pkt *pkt;
611 	qdf_nbuf_t htt_msg;
612 	uint32_t *msg_word;
613 	struct hal_srng_params srng_params;
614 	qdf_dma_addr_t hp_addr, tp_addr;
615 	uint32_t ring_entry_size =
616 		hal_srng_get_entrysize(soc->hal_soc, hal_ring_type);
617 	int htt_ring_type, htt_ring_id;
618 	uint8_t *htt_logger_bufp;
619 	int target_pdev_id;
620 	int lmac_id = dp_get_lmac_id_for_pdev_id(soc->dp_soc, 0, mac_id);
621 	QDF_STATUS status;
622 
623 	/* Sizes should be set in 4-byte words */
624 	ring_entry_size = ring_entry_size >> 2;
625 
626 	htt_msg = qdf_nbuf_alloc(soc->osdev,
627 		HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ),
628 		/* reserve room for the HTC header */
629 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
630 	if (!htt_msg) {
631 		dp_err("htt_msg alloc failed ring type %d", hal_ring_type);
632 		goto fail0;
633 	}
634 
635 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
636 	hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_ring_hdl);
637 	tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_ring_hdl);
638 
639 	switch (hal_ring_type) {
640 	case RXDMA_BUF:
641 #ifdef QCA_HOST2FW_RXBUF_RING
642 		if (srng_params.ring_id ==
643 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
644 		    (lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
645 			htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
646 			htt_ring_type = HTT_SW_TO_SW_RING;
647 #ifdef IPA_OFFLOAD
648 		} else if (srng_params.ring_id ==
649 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 +
650 		    (lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
651 			htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING;
652 			htt_ring_type = HTT_SW_TO_SW_RING;
653 #ifdef IPA_WDI3_VLAN_SUPPORT
654 		} else if (srng_params.ring_id ==
655 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2 +
656 		    (lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
657 			htt_ring_id = HTT_HOST3_TO_FW_RXBUF_RING;
658 			htt_ring_type = HTT_SW_TO_SW_RING;
659 #endif
660 #endif
661 #else
662 		if (srng_params.ring_id ==
663 			(HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
664 			(lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
665 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
666 			htt_ring_type = HTT_SW_TO_HW_RING;
667 #endif
668 		} else if (srng_params.ring_id ==
669 			 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF +
670 			(lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
671 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
672 			htt_ring_type = HTT_SW_TO_HW_RING;
673 #ifdef FEATURE_DIRECT_LINK
674 		} else if (srng_params.ring_id ==
675 			   (HAL_SRNG_WMAC1_RX_DIRECT_LINK_SW_REFILL_RING +
676 			    (lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
677 			htt_ring_id = HTT_LPASS_TO_FW_RXBUF_RING;
678 			htt_ring_type = HTT_SW_TO_SW_RING;
679 #endif
680 		} else {
681 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
682 				   "%s: Ring %d currently not supported",
683 				   __func__, srng_params.ring_id);
684 			goto fail1;
685 		}
686 
687 		break;
688 	case RXDMA_MONITOR_BUF:
689 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
690 							 RXDMA_MONITOR_BUF);
691 		htt_ring_type = HTT_SW_TO_HW_RING;
692 		break;
693 	case RXDMA_MONITOR_STATUS:
694 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
695 		htt_ring_type = HTT_SW_TO_HW_RING;
696 		break;
697 	case RXDMA_MONITOR_DST:
698 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
699 							 RXDMA_MONITOR_DST);
700 		htt_ring_type = HTT_HW_TO_SW_RING;
701 		break;
702 	case RXDMA_MONITOR_DESC:
703 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
704 		htt_ring_type = HTT_SW_TO_HW_RING;
705 		break;
706 	case RXDMA_DST:
707 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
708 		htt_ring_type = HTT_HW_TO_SW_RING;
709 		break;
710 	case TX_MONITOR_BUF:
711 		htt_ring_id = HTT_TX_MON_HOST2MON_BUF_RING;
712 		htt_ring_type = HTT_SW_TO_HW_RING;
713 		break;
714 	case TX_MONITOR_DST:
715 		htt_ring_id = HTT_TX_MON_MON2HOST_DEST_RING;
716 		htt_ring_type = HTT_HW_TO_SW_RING;
717 		break;
718 
719 	default:
720 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
721 			"%s: Ring currently not supported", __func__);
722 			goto fail1;
723 	}
724 
725 	dp_info("ring_type %d ring_id %d htt_ring_id %d hp_addr 0x%llx tp_addr 0x%llx",
726 		hal_ring_type, srng_params.ring_id, htt_ring_id,
727 		(uint64_t)hp_addr,
728 		(uint64_t)tp_addr);
729 	/*
730 	 * Set the length of the message.
731 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
732 	 * separately during the below call to qdf_nbuf_push_head.
733 	 * The contribution from the HTC header is added separately inside HTC.
734 	 */
735 	if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) {
736 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
737 			"%s: Failed to expand head for SRING_SETUP msg",
738 			__func__);
739 		return QDF_STATUS_E_FAILURE;
740 	}
741 
742 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
743 
744 	/* rewind beyond alignment pad to get to the HTC header reserved area */
745 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
746 
747 	/* word 0 */
748 	*msg_word = 0;
749 	htt_logger_bufp = (uint8_t *)msg_word;
750 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP);
751 	target_pdev_id =
752 	dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, mac_id);
753 
754 	if ((htt_ring_type == HTT_SW_TO_HW_RING) ||
755 			(htt_ring_type == HTT_HW_TO_SW_RING))
756 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, target_pdev_id);
757 	else
758 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id);
759 
760 	dp_info("mac_id %d", mac_id);
761 	HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type);
762 	/* TODO: Discuss with FW on changing this to unique ID and using
763 	 * htt_ring_type to send the type of ring
764 	 */
765 	HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id);
766 
767 	/* word 1 */
768 	msg_word++;
769 	*msg_word = 0;
770 	HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word,
771 		srng_params.ring_base_paddr & 0xffffffff);
772 
773 	/* word 2 */
774 	msg_word++;
775 	*msg_word = 0;
776 	HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word,
777 		(uint64_t)srng_params.ring_base_paddr >> 32);
778 
779 	/* word 3 */
780 	msg_word++;
781 	*msg_word = 0;
782 	HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size);
783 	HTT_SRING_SETUP_RING_SIZE_SET(*msg_word,
784 		(ring_entry_size * srng_params.num_entries));
785 	dp_info("entry_size %d", ring_entry_size);
786 	dp_info("num_entries %d", srng_params.num_entries);
787 	dp_info("ring_size %d", (ring_entry_size * srng_params.num_entries));
788 	if (htt_ring_type == HTT_SW_TO_HW_RING)
789 		HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET(
790 						*msg_word, 1);
791 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word,
792 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
793 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word,
794 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
795 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word,
796 		!!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP));
797 
798 	/* word 4 */
799 	msg_word++;
800 	*msg_word = 0;
801 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
802 		hp_addr & 0xffffffff);
803 
804 	/* word 5 */
805 	msg_word++;
806 	*msg_word = 0;
807 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
808 		(uint64_t)hp_addr >> 32);
809 
810 	/* word 6 */
811 	msg_word++;
812 	*msg_word = 0;
813 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
814 		tp_addr & 0xffffffff);
815 
816 	/* word 7 */
817 	msg_word++;
818 	*msg_word = 0;
819 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
820 		(uint64_t)tp_addr >> 32);
821 
822 	/* word 8 */
823 	msg_word++;
824 	*msg_word = 0;
825 	HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word,
826 		srng_params.msi_addr & 0xffffffff);
827 
828 	/* word 9 */
829 	msg_word++;
830 	*msg_word = 0;
831 	HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word,
832 		(uint64_t)(srng_params.msi_addr) >> 32);
833 
834 	/* word 10 */
835 	msg_word++;
836 	*msg_word = 0;
837 	HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word,
838 		qdf_cpu_to_le32(srng_params.msi_data));
839 
840 	/* word 11 */
841 	msg_word++;
842 	*msg_word = 0;
843 	HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word,
844 		srng_params.intr_batch_cntr_thres_entries *
845 		ring_entry_size);
846 	HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word,
847 		srng_params.intr_timer_thres_us >> 3);
848 
849 	/* word 12 */
850 	msg_word++;
851 	*msg_word = 0;
852 	if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
853 		/* TODO: Setting low threshold to 1/8th of ring size - see
854 		 * if this needs to be configurable
855 		 */
856 		HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word,
857 			srng_params.low_threshold);
858 	}
859 	/* "response_required" field should be set if a HTT response message is
860 	 * required after setting up the ring.
861 	 */
862 	pkt = htt_htc_pkt_alloc(soc);
863 	if (!pkt) {
864 		dp_err("pkt alloc failed, ring_type %d ring_id %d htt_ring_id %d",
865 		       hal_ring_type, srng_params.ring_id, htt_ring_id);
866 		goto fail1;
867 	}
868 
869 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
870 
871 	SET_HTC_PACKET_INFO_TX(
872 		&pkt->htc_pkt,
873 		dp_htt_h2t_send_complete_free_netbuf,
874 		qdf_nbuf_data(htt_msg),
875 		qdf_nbuf_len(htt_msg),
876 		soc->htc_endpoint,
877 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
878 
879 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
880 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_SRING_SETUP,
881 				     htt_logger_bufp);
882 
883 	if (status != QDF_STATUS_SUCCESS) {
884 		qdf_nbuf_free(htt_msg);
885 		htt_htc_pkt_free(soc, pkt);
886 	}
887 
888 	return status;
889 
890 fail1:
891 	qdf_nbuf_free(htt_msg);
892 fail0:
893 	return QDF_STATUS_E_FAILURE;
894 }
895 
896 qdf_export_symbol(htt_srng_setup);
897 
898 #ifdef QCA_SUPPORT_FULL_MON
899 /**
900  * htt_h2t_full_mon_cfg() - Send full monitor configuration msg to FW
901  *
902  * @htt_soc: HTT Soc handle
903  * @pdev_id: Radio id
904  * @config: enabled/disable configuration
905  *
906  * Return: Success when HTT message is sent, error on failure
907  */
908 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc,
909 			 uint8_t pdev_id,
910 			 enum dp_full_mon_config config)
911 {
912 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
913 	struct dp_htt_htc_pkt *pkt;
914 	qdf_nbuf_t htt_msg;
915 	uint32_t *msg_word;
916 	uint8_t *htt_logger_bufp;
917 
918 	htt_msg = qdf_nbuf_alloc(soc->osdev,
919 				 HTT_MSG_BUF_SIZE(
920 				 HTT_RX_FULL_MONITOR_MODE_SETUP_SZ),
921 				 /* reserve room for the HTC header */
922 				 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
923 				 4,
924 				 TRUE);
925 	if (!htt_msg)
926 		return QDF_STATUS_E_FAILURE;
927 
928 	/*
929 	 * Set the length of the message.
930 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
931 	 * separately during the below call to qdf_nbuf_push_head.
932 	 * The contribution from the HTC header is added separately inside HTC.
933 	 */
934 	if (!qdf_nbuf_put_tail(htt_msg, HTT_RX_FULL_MONITOR_MODE_SETUP_SZ)) {
935 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
936 			  "%s: Failed to expand head for RX Ring Cfg msg",
937 			  __func__);
938 		goto fail1;
939 	}
940 
941 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
942 
943 	/* rewind beyond alignment pad to get to the HTC header reserved area */
944 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
945 
946 	/* word 0 */
947 	*msg_word = 0;
948 	htt_logger_bufp = (uint8_t *)msg_word;
949 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE);
950 	HTT_RX_FULL_MONITOR_MODE_OPERATION_PDEV_ID_SET(
951 			*msg_word, DP_SW2HW_MACID(pdev_id));
952 
953 	msg_word++;
954 	*msg_word = 0;
955 	/* word 1 */
956 	if (config == DP_FULL_MON_ENABLE) {
957 		HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true);
958 		HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, true);
959 		HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, true);
960 		HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2);
961 	} else if (config == DP_FULL_MON_DISABLE) {
962 		/* As per MAC team's suggestion, While disabling full monitor
963 		 * mode, Set 'en' bit to true in full monitor mode register.
964 		 */
965 		HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true);
966 		HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, false);
967 		HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, false);
968 		HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2);
969 	}
970 
971 	pkt = htt_htc_pkt_alloc(soc);
972 	if (!pkt) {
973 		qdf_err("HTC packet allocation failed");
974 		goto fail1;
975 	}
976 
977 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
978 
979 	SET_HTC_PACKET_INFO_TX(
980 		&pkt->htc_pkt,
981 		dp_htt_h2t_send_complete_free_netbuf,
982 		qdf_nbuf_data(htt_msg),
983 		qdf_nbuf_len(htt_msg),
984 		soc->htc_endpoint,
985 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
986 
987 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
988 	qdf_debug("config: %d", config);
989 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE,
990 			    htt_logger_bufp);
991 	return QDF_STATUS_SUCCESS;
992 fail1:
993 	qdf_nbuf_free(htt_msg);
994 	return QDF_STATUS_E_FAILURE;
995 }
996 
997 qdf_export_symbol(htt_h2t_full_mon_cfg);
998 #else
999 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc,
1000 			 uint8_t pdev_id,
1001 			 enum dp_full_mon_config config)
1002 {
1003 	return 0;
1004 }
1005 
1006 qdf_export_symbol(htt_h2t_full_mon_cfg);
1007 #endif
1008 
1009 #ifdef QCA_UNDECODED_METADATA_SUPPORT
1010 static inline void
1011 dp_mon_rx_enable_phy_errors(uint32_t *msg_word,
1012 			    struct htt_rx_ring_tlv_filter *htt_tlv_filter)
1013 {
1014 	if (htt_tlv_filter->phy_err_filter_valid) {
1015 		HTT_RX_RING_SELECTION_CFG_FP_PHY_ERR_SET
1016 			(*msg_word, htt_tlv_filter->fp_phy_err);
1017 		HTT_RX_RING_SELECTION_CFG_FP_PHY_ERR_BUF_SRC_SET
1018 			(*msg_word, htt_tlv_filter->fp_phy_err_buf_src);
1019 		HTT_RX_RING_SELECTION_CFG_FP_PHY_ERR_BUF_DEST_SET
1020 			(*msg_word, htt_tlv_filter->fp_phy_err_buf_dest);
1021 
1022 		/* word 12*/
1023 		msg_word++;
1024 		*msg_word = 0;
1025 		HTT_RX_RING_SELECTION_CFG_PHY_ERR_MASK_SET
1026 			(*msg_word, htt_tlv_filter->phy_err_mask);
1027 
1028 		/* word 13*/
1029 		msg_word++;
1030 		*msg_word = 0;
1031 		HTT_RX_RING_SELECTION_CFG_PHY_ERR_MASK_CONT_SET
1032 			(*msg_word, htt_tlv_filter->phy_err_mask_cont);
1033 	}
1034 }
1035 #else
1036 static inline void
1037 dp_mon_rx_enable_phy_errors(uint32_t *msg_word,
1038 			    struct htt_rx_ring_tlv_filter *htt_tlv_filter)
1039 {
1040 }
1041 #endif
1042 
1043 int htt_h2t_rx_ring_cfg(struct htt_soc *htt_soc, int pdev_id,
1044 			hal_ring_handle_t hal_ring_hdl,
1045 			int hal_ring_type, int ring_buf_size,
1046 			struct htt_rx_ring_tlv_filter *htt_tlv_filter)
1047 {
1048 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
1049 	struct dp_htt_htc_pkt *pkt;
1050 	qdf_nbuf_t htt_msg;
1051 	uint32_t *msg_word;
1052 	uint32_t *msg_word_data;
1053 	struct hal_srng_params srng_params;
1054 	uint32_t htt_ring_type, htt_ring_id;
1055 	uint32_t tlv_filter;
1056 	uint8_t *htt_logger_bufp;
1057 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->dp_soc->wlan_cfg_ctx;
1058 	uint32_t mon_drop_th = wlan_cfg_get_mon_drop_thresh(wlan_cfg_ctx);
1059 	int target_pdev_id;
1060 	QDF_STATUS status;
1061 
1062 	htt_msg = qdf_nbuf_alloc(soc->osdev,
1063 		HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ),
1064 	/* reserve room for the HTC header */
1065 	HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
1066 	if (!htt_msg) {
1067 		dp_err("htt_msg alloc failed ring type %d", hal_ring_type);
1068 		goto fail0;
1069 	}
1070 
1071 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
1072 
1073 	switch (hal_ring_type) {
1074 	case RXDMA_BUF:
1075 		htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
1076 		htt_ring_type = HTT_SW_TO_HW_RING;
1077 		break;
1078 	case RXDMA_MONITOR_BUF:
1079 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
1080 							 RXDMA_MONITOR_BUF);
1081 		htt_ring_type = HTT_SW_TO_HW_RING;
1082 		break;
1083 	case RXDMA_MONITOR_STATUS:
1084 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
1085 		htt_ring_type = HTT_SW_TO_HW_RING;
1086 		break;
1087 	case RXDMA_MONITOR_DST:
1088 		htt_ring_id = dp_htt_get_mon_htt_ring_id(soc->dp_soc,
1089 							 RXDMA_MONITOR_DST);
1090 		htt_ring_type = HTT_HW_TO_SW_RING;
1091 		break;
1092 	case RXDMA_MONITOR_DESC:
1093 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
1094 		htt_ring_type = HTT_SW_TO_HW_RING;
1095 		break;
1096 	case RXDMA_DST:
1097 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
1098 		htt_ring_type = HTT_HW_TO_SW_RING;
1099 		break;
1100 
1101 	default:
1102 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1103 			"%s: Ring currently not supported", __func__);
1104 		goto fail1;
1105 	}
1106 
1107 	dp_info("ring_type %d ring_id %d htt_ring_id %d",
1108 		hal_ring_type, srng_params.ring_id, htt_ring_id);
1109 
1110 	/*
1111 	 * Set the length of the message.
1112 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
1113 	 * separately during the below call to qdf_nbuf_push_head.
1114 	 * The contribution from the HTC header is added separately inside HTC.
1115 	 */
1116 	if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) {
1117 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1118 			"%s: Failed to expand head for RX Ring Cfg msg",
1119 			__func__);
1120 		goto fail1; /* failure */
1121 	}
1122 
1123 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
1124 
1125 	/* rewind beyond alignment pad to get to the HTC header reserved area */
1126 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
1127 
1128 	/* word 0 */
1129 	htt_logger_bufp = (uint8_t *)msg_word;
1130 	*msg_word = 0;
1131 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
1132 
1133 	/* applicable only for post Li */
1134 	dp_rx_mon_enable(soc->dp_soc, msg_word, htt_tlv_filter);
1135 
1136 	/*
1137 	 * pdev_id is indexed from 0 whereas mac_id is indexed from 1
1138 	 * SW_TO_SW and SW_TO_HW rings are unaffected by this
1139 	 */
1140 	target_pdev_id =
1141 	dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, pdev_id);
1142 
1143 	if (htt_ring_type == HTT_SW_TO_SW_RING ||
1144 			htt_ring_type == HTT_SW_TO_HW_RING ||
1145 			htt_ring_type == HTT_HW_TO_SW_RING)
1146 		HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word,
1147 						      target_pdev_id);
1148 
1149 	/* TODO: Discuss with FW on changing this to unique ID and using
1150 	 * htt_ring_type to send the type of ring
1151 	 */
1152 	HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id);
1153 
1154 	HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word,
1155 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
1156 
1157 	HTT_RX_RING_SELECTION_CFG_RX_OFFSETS_VALID_SET(*msg_word,
1158 						htt_tlv_filter->offset_valid);
1159 
1160 	if (mon_drop_th > 0)
1161 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1162 								   1);
1163 	else
1164 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1165 								   0);
1166 
1167 	/* word 1 */
1168 	msg_word++;
1169 	*msg_word = 0;
1170 	HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word,
1171 		ring_buf_size);
1172 
1173 	dp_mon_rx_packet_length_set(soc->dp_soc, msg_word, htt_tlv_filter);
1174 	dp_mon_rx_hdr_length_set(soc->dp_soc, msg_word, htt_tlv_filter);
1175 	dp_mon_rx_mac_filter_set(soc->dp_soc, msg_word, htt_tlv_filter);
1176 
1177 	/* word 2 */
1178 	msg_word++;
1179 	*msg_word = 0;
1180 
1181 	if (htt_tlv_filter->enable_fp) {
1182 		/* TYPE: MGMT */
1183 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1184 			FP, MGMT, 0000,
1185 			(htt_tlv_filter->fp_mgmt_filter &
1186 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1187 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1188 			FP, MGMT, 0001,
1189 			(htt_tlv_filter->fp_mgmt_filter &
1190 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1191 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1192 			FP, MGMT, 0010,
1193 			(htt_tlv_filter->fp_mgmt_filter &
1194 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1195 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1196 			FP, MGMT, 0011,
1197 			(htt_tlv_filter->fp_mgmt_filter &
1198 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1199 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1200 			FP, MGMT, 0100,
1201 			(htt_tlv_filter->fp_mgmt_filter &
1202 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1203 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1204 			FP, MGMT, 0101,
1205 			(htt_tlv_filter->fp_mgmt_filter &
1206 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1207 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1208 			FP, MGMT, 0110,
1209 			(htt_tlv_filter->fp_mgmt_filter &
1210 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1211 		/* reserved */
1212 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
1213 			MGMT, 0111,
1214 			(htt_tlv_filter->fp_mgmt_filter &
1215 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1216 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1217 			FP, MGMT, 1000,
1218 			(htt_tlv_filter->fp_mgmt_filter &
1219 			FILTER_MGMT_BEACON) ? 1 : 0);
1220 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1221 			FP, MGMT, 1001,
1222 			(htt_tlv_filter->fp_mgmt_filter &
1223 			FILTER_MGMT_ATIM) ? 1 : 0);
1224 	}
1225 
1226 	if (htt_tlv_filter->enable_md) {
1227 			/* TYPE: MGMT */
1228 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1229 			MD, MGMT, 0000,
1230 			(htt_tlv_filter->md_mgmt_filter &
1231 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1232 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1233 			MD, MGMT, 0001,
1234 			(htt_tlv_filter->md_mgmt_filter &
1235 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1236 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1237 			MD, MGMT, 0010,
1238 			(htt_tlv_filter->md_mgmt_filter &
1239 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1240 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1241 			MD, MGMT, 0011,
1242 			(htt_tlv_filter->md_mgmt_filter &
1243 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1244 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1245 			MD, MGMT, 0100,
1246 			(htt_tlv_filter->md_mgmt_filter &
1247 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1248 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1249 			MD, MGMT, 0101,
1250 			(htt_tlv_filter->md_mgmt_filter &
1251 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1252 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1253 			MD, MGMT, 0110,
1254 			(htt_tlv_filter->md_mgmt_filter &
1255 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1256 		/* reserved */
1257 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
1258 			MGMT, 0111,
1259 			(htt_tlv_filter->md_mgmt_filter &
1260 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1261 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1262 			MD, MGMT, 1000,
1263 			(htt_tlv_filter->md_mgmt_filter &
1264 			FILTER_MGMT_BEACON) ? 1 : 0);
1265 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1266 			MD, MGMT, 1001,
1267 			(htt_tlv_filter->md_mgmt_filter &
1268 			FILTER_MGMT_ATIM) ? 1 : 0);
1269 	}
1270 
1271 	if (htt_tlv_filter->enable_mo) {
1272 		/* TYPE: MGMT */
1273 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1274 			MO, MGMT, 0000,
1275 			(htt_tlv_filter->mo_mgmt_filter &
1276 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1277 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1278 			MO, MGMT, 0001,
1279 			(htt_tlv_filter->mo_mgmt_filter &
1280 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1281 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1282 			MO, MGMT, 0010,
1283 			(htt_tlv_filter->mo_mgmt_filter &
1284 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1285 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1286 			MO, MGMT, 0011,
1287 			(htt_tlv_filter->mo_mgmt_filter &
1288 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1289 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1290 			MO, MGMT, 0100,
1291 			(htt_tlv_filter->mo_mgmt_filter &
1292 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1293 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1294 			MO, MGMT, 0101,
1295 			(htt_tlv_filter->mo_mgmt_filter &
1296 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1297 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1298 			MO, MGMT, 0110,
1299 			(htt_tlv_filter->mo_mgmt_filter &
1300 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1301 		/* reserved */
1302 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
1303 			MGMT, 0111,
1304 			(htt_tlv_filter->mo_mgmt_filter &
1305 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1306 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1307 			MO, MGMT, 1000,
1308 			(htt_tlv_filter->mo_mgmt_filter &
1309 			FILTER_MGMT_BEACON) ? 1 : 0);
1310 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1311 			MO, MGMT, 1001,
1312 			(htt_tlv_filter->mo_mgmt_filter &
1313 			FILTER_MGMT_ATIM) ? 1 : 0);
1314 	}
1315 
1316 	/* word 3 */
1317 	msg_word++;
1318 	*msg_word = 0;
1319 
1320 	if (htt_tlv_filter->enable_fp) {
1321 		/* TYPE: MGMT */
1322 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1323 			FP, MGMT, 1010,
1324 			(htt_tlv_filter->fp_mgmt_filter &
1325 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1326 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1327 			FP, MGMT, 1011,
1328 			(htt_tlv_filter->fp_mgmt_filter &
1329 			FILTER_MGMT_AUTH) ? 1 : 0);
1330 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1331 			FP, MGMT, 1100,
1332 			(htt_tlv_filter->fp_mgmt_filter &
1333 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1334 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1335 			FP, MGMT, 1101,
1336 			(htt_tlv_filter->fp_mgmt_filter &
1337 			FILTER_MGMT_ACTION) ? 1 : 0);
1338 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1339 			FP, MGMT, 1110,
1340 			(htt_tlv_filter->fp_mgmt_filter &
1341 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1342 		/* reserved*/
1343 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
1344 			MGMT, 1111,
1345 			(htt_tlv_filter->fp_mgmt_filter &
1346 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1347 	}
1348 
1349 	if (htt_tlv_filter->enable_md) {
1350 			/* TYPE: MGMT */
1351 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1352 			MD, MGMT, 1010,
1353 			(htt_tlv_filter->md_mgmt_filter &
1354 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1355 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1356 			MD, MGMT, 1011,
1357 			(htt_tlv_filter->md_mgmt_filter &
1358 			FILTER_MGMT_AUTH) ? 1 : 0);
1359 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1360 			MD, MGMT, 1100,
1361 			(htt_tlv_filter->md_mgmt_filter &
1362 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1363 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1364 			MD, MGMT, 1101,
1365 			(htt_tlv_filter->md_mgmt_filter &
1366 			FILTER_MGMT_ACTION) ? 1 : 0);
1367 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1368 			MD, MGMT, 1110,
1369 			(htt_tlv_filter->md_mgmt_filter &
1370 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1371 	}
1372 
1373 	if (htt_tlv_filter->enable_mo) {
1374 		/* TYPE: MGMT */
1375 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1376 			MO, MGMT, 1010,
1377 			(htt_tlv_filter->mo_mgmt_filter &
1378 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1379 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1380 			MO, MGMT, 1011,
1381 			(htt_tlv_filter->mo_mgmt_filter &
1382 			FILTER_MGMT_AUTH) ? 1 : 0);
1383 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1384 			MO, MGMT, 1100,
1385 			(htt_tlv_filter->mo_mgmt_filter &
1386 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1387 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1388 			MO, MGMT, 1101,
1389 			(htt_tlv_filter->mo_mgmt_filter &
1390 			FILTER_MGMT_ACTION) ? 1 : 0);
1391 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1392 			MO, MGMT, 1110,
1393 			(htt_tlv_filter->mo_mgmt_filter &
1394 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1395 		/* reserved*/
1396 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
1397 			MGMT, 1111,
1398 			(htt_tlv_filter->mo_mgmt_filter &
1399 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1400 	}
1401 
1402 	/* word 4 */
1403 	msg_word++;
1404 	*msg_word = 0;
1405 
1406 	if (htt_tlv_filter->enable_fp) {
1407 		/* TYPE: CTRL */
1408 		/* reserved */
1409 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1410 			CTRL, 0000,
1411 			(htt_tlv_filter->fp_ctrl_filter &
1412 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1413 		/* reserved */
1414 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1415 			CTRL, 0001,
1416 			(htt_tlv_filter->fp_ctrl_filter &
1417 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1418 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1419 			CTRL, 0010,
1420 			(htt_tlv_filter->fp_ctrl_filter &
1421 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1422 		/* reserved */
1423 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1424 			CTRL, 0011,
1425 			(htt_tlv_filter->fp_ctrl_filter &
1426 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1427 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1428 			CTRL, 0100,
1429 			(htt_tlv_filter->fp_ctrl_filter &
1430 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1431 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1432 			CTRL, 0101,
1433 			(htt_tlv_filter->fp_ctrl_filter &
1434 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1435 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1436 			CTRL, 0110,
1437 			(htt_tlv_filter->fp_ctrl_filter &
1438 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1439 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1440 			CTRL, 0111,
1441 			(htt_tlv_filter->fp_ctrl_filter &
1442 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1443 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1444 			CTRL, 1000,
1445 			(htt_tlv_filter->fp_ctrl_filter &
1446 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1447 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1448 			CTRL, 1001,
1449 			(htt_tlv_filter->fp_ctrl_filter &
1450 			FILTER_CTRL_BA) ? 1 : 0);
1451 	}
1452 
1453 	if (htt_tlv_filter->enable_md) {
1454 		/* TYPE: CTRL */
1455 		/* reserved */
1456 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1457 			CTRL, 0000,
1458 			(htt_tlv_filter->md_ctrl_filter &
1459 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1460 		/* reserved */
1461 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1462 			CTRL, 0001,
1463 			(htt_tlv_filter->md_ctrl_filter &
1464 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1465 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1466 			CTRL, 0010,
1467 			(htt_tlv_filter->md_ctrl_filter &
1468 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1469 		/* reserved */
1470 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1471 			CTRL, 0011,
1472 			(htt_tlv_filter->md_ctrl_filter &
1473 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1474 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1475 			CTRL, 0100,
1476 			(htt_tlv_filter->md_ctrl_filter &
1477 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1478 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1479 			CTRL, 0101,
1480 			(htt_tlv_filter->md_ctrl_filter &
1481 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1482 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1483 			CTRL, 0110,
1484 			(htt_tlv_filter->md_ctrl_filter &
1485 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1486 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1487 			CTRL, 0111,
1488 			(htt_tlv_filter->md_ctrl_filter &
1489 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1490 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1491 			CTRL, 1000,
1492 			(htt_tlv_filter->md_ctrl_filter &
1493 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1494 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1495 			CTRL, 1001,
1496 			(htt_tlv_filter->md_ctrl_filter &
1497 			FILTER_CTRL_BA) ? 1 : 0);
1498 	}
1499 
1500 	if (htt_tlv_filter->enable_mo) {
1501 		/* TYPE: CTRL */
1502 		/* reserved */
1503 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1504 			CTRL, 0000,
1505 			(htt_tlv_filter->mo_ctrl_filter &
1506 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1507 		/* reserved */
1508 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1509 			CTRL, 0001,
1510 			(htt_tlv_filter->mo_ctrl_filter &
1511 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1512 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1513 			CTRL, 0010,
1514 			(htt_tlv_filter->mo_ctrl_filter &
1515 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1516 		/* reserved */
1517 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1518 			CTRL, 0011,
1519 			(htt_tlv_filter->mo_ctrl_filter &
1520 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1521 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1522 			CTRL, 0100,
1523 			(htt_tlv_filter->mo_ctrl_filter &
1524 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1525 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1526 			CTRL, 0101,
1527 			(htt_tlv_filter->mo_ctrl_filter &
1528 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1529 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1530 			CTRL, 0110,
1531 			(htt_tlv_filter->mo_ctrl_filter &
1532 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1533 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1534 			CTRL, 0111,
1535 			(htt_tlv_filter->mo_ctrl_filter &
1536 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1537 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1538 			CTRL, 1000,
1539 			(htt_tlv_filter->mo_ctrl_filter &
1540 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1541 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1542 			CTRL, 1001,
1543 			(htt_tlv_filter->mo_ctrl_filter &
1544 			FILTER_CTRL_BA) ? 1 : 0);
1545 	}
1546 
1547 	/* word 5 */
1548 	msg_word++;
1549 	*msg_word = 0;
1550 	if (htt_tlv_filter->enable_fp) {
1551 		/* TYPE: CTRL */
1552 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1553 			CTRL, 1010,
1554 			(htt_tlv_filter->fp_ctrl_filter &
1555 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1556 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1557 			CTRL, 1011,
1558 			(htt_tlv_filter->fp_ctrl_filter &
1559 			FILTER_CTRL_RTS) ? 1 : 0);
1560 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1561 			CTRL, 1100,
1562 			(htt_tlv_filter->fp_ctrl_filter &
1563 			FILTER_CTRL_CTS) ? 1 : 0);
1564 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1565 			CTRL, 1101,
1566 			(htt_tlv_filter->fp_ctrl_filter &
1567 			FILTER_CTRL_ACK) ? 1 : 0);
1568 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1569 			CTRL, 1110,
1570 			(htt_tlv_filter->fp_ctrl_filter &
1571 			FILTER_CTRL_CFEND) ? 1 : 0);
1572 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1573 			CTRL, 1111,
1574 			(htt_tlv_filter->fp_ctrl_filter &
1575 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1576 		/* TYPE: DATA */
1577 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1578 			DATA, MCAST,
1579 			(htt_tlv_filter->fp_data_filter &
1580 			FILTER_DATA_MCAST) ? 1 : 0);
1581 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1582 			DATA, UCAST,
1583 			(htt_tlv_filter->fp_data_filter &
1584 			FILTER_DATA_UCAST) ? 1 : 0);
1585 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1586 			DATA, NULL,
1587 			(htt_tlv_filter->fp_data_filter &
1588 			FILTER_DATA_NULL) ? 1 : 0);
1589 	}
1590 
1591 	if (htt_tlv_filter->enable_md) {
1592 		/* TYPE: CTRL */
1593 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1594 			CTRL, 1010,
1595 			(htt_tlv_filter->md_ctrl_filter &
1596 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1597 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1598 			CTRL, 1011,
1599 			(htt_tlv_filter->md_ctrl_filter &
1600 			FILTER_CTRL_RTS) ? 1 : 0);
1601 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1602 			CTRL, 1100,
1603 			(htt_tlv_filter->md_ctrl_filter &
1604 			FILTER_CTRL_CTS) ? 1 : 0);
1605 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1606 			CTRL, 1101,
1607 			(htt_tlv_filter->md_ctrl_filter &
1608 			FILTER_CTRL_ACK) ? 1 : 0);
1609 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1610 			CTRL, 1110,
1611 			(htt_tlv_filter->md_ctrl_filter &
1612 			FILTER_CTRL_CFEND) ? 1 : 0);
1613 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1614 			CTRL, 1111,
1615 			(htt_tlv_filter->md_ctrl_filter &
1616 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1617 		/* TYPE: DATA */
1618 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1619 			DATA, MCAST,
1620 			(htt_tlv_filter->md_data_filter &
1621 			FILTER_DATA_MCAST) ? 1 : 0);
1622 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1623 			DATA, UCAST,
1624 			(htt_tlv_filter->md_data_filter &
1625 			FILTER_DATA_UCAST) ? 1 : 0);
1626 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1627 			DATA, NULL,
1628 			(htt_tlv_filter->md_data_filter &
1629 			FILTER_DATA_NULL) ? 1 : 0);
1630 	}
1631 
1632 	if (htt_tlv_filter->enable_mo) {
1633 		/* TYPE: CTRL */
1634 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1635 			CTRL, 1010,
1636 			(htt_tlv_filter->mo_ctrl_filter &
1637 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1638 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1639 			CTRL, 1011,
1640 			(htt_tlv_filter->mo_ctrl_filter &
1641 			FILTER_CTRL_RTS) ? 1 : 0);
1642 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1643 			CTRL, 1100,
1644 			(htt_tlv_filter->mo_ctrl_filter &
1645 			FILTER_CTRL_CTS) ? 1 : 0);
1646 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1647 			CTRL, 1101,
1648 			(htt_tlv_filter->mo_ctrl_filter &
1649 			FILTER_CTRL_ACK) ? 1 : 0);
1650 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1651 			CTRL, 1110,
1652 			(htt_tlv_filter->mo_ctrl_filter &
1653 			FILTER_CTRL_CFEND) ? 1 : 0);
1654 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1655 			CTRL, 1111,
1656 			(htt_tlv_filter->mo_ctrl_filter &
1657 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1658 		/* TYPE: DATA */
1659 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1660 			DATA, MCAST,
1661 			(htt_tlv_filter->mo_data_filter &
1662 			FILTER_DATA_MCAST) ? 1 : 0);
1663 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1664 			DATA, UCAST,
1665 			(htt_tlv_filter->mo_data_filter &
1666 			FILTER_DATA_UCAST) ? 1 : 0);
1667 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1668 			DATA, NULL,
1669 			(htt_tlv_filter->mo_data_filter &
1670 			FILTER_DATA_NULL) ? 1 : 0);
1671 	}
1672 
1673 	/* word 6 */
1674 	msg_word++;
1675 	*msg_word = 0;
1676 	tlv_filter = 0;
1677 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START,
1678 		htt_tlv_filter->mpdu_start);
1679 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START,
1680 		htt_tlv_filter->msdu_start);
1681 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET,
1682 		htt_tlv_filter->packet);
1683 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END,
1684 		htt_tlv_filter->msdu_end);
1685 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END,
1686 		htt_tlv_filter->mpdu_end);
1687 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER,
1688 		htt_tlv_filter->packet_header);
1689 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION,
1690 		htt_tlv_filter->attention);
1691 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START,
1692 		htt_tlv_filter->ppdu_start);
1693 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END,
1694 		htt_tlv_filter->ppdu_end);
1695 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS,
1696 		htt_tlv_filter->ppdu_end_user_stats);
1697 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter,
1698 		PPDU_END_USER_STATS_EXT,
1699 		htt_tlv_filter->ppdu_end_user_stats_ext);
1700 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE,
1701 		htt_tlv_filter->ppdu_end_status_done);
1702 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START_USER_INFO,
1703 		htt_tlv_filter->ppdu_start_user_info);
1704 	/* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/
1705 	 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED,
1706 		 htt_tlv_filter->header_per_msdu);
1707 
1708 	HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter);
1709 
1710 	msg_word_data = (uint32_t *)qdf_nbuf_data(htt_msg);
1711 	dp_info("config_data: [0x%x][0x%x][0x%x][0x%x][0x%x][0x%x][0x%x]",
1712 		msg_word_data[0], msg_word_data[1], msg_word_data[2],
1713 		msg_word_data[3], msg_word_data[4], msg_word_data[5],
1714 		msg_word_data[6]);
1715 
1716 	/* word 7 */
1717 	msg_word++;
1718 	*msg_word = 0;
1719 	if (htt_tlv_filter->offset_valid) {
1720 		HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET_SET(*msg_word,
1721 					htt_tlv_filter->rx_packet_offset);
1722 		HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET_SET(*msg_word,
1723 					htt_tlv_filter->rx_header_offset);
1724 
1725 		/* word 8 */
1726 		msg_word++;
1727 		*msg_word = 0;
1728 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET_SET(*msg_word,
1729 					htt_tlv_filter->rx_mpdu_end_offset);
1730 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET_SET(*msg_word,
1731 					htt_tlv_filter->rx_mpdu_start_offset);
1732 
1733 		/* word 9 */
1734 		msg_word++;
1735 		*msg_word = 0;
1736 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET_SET(*msg_word,
1737 					htt_tlv_filter->rx_msdu_end_offset);
1738 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET_SET(*msg_word,
1739 					htt_tlv_filter->rx_msdu_start_offset);
1740 
1741 		/* word 10 */
1742 		msg_word++;
1743 		*msg_word = 0;
1744 		HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET_SET(*msg_word,
1745 					htt_tlv_filter->rx_attn_offset);
1746 
1747 		/* word 11 */
1748 		msg_word++;
1749 		*msg_word = 0;
1750 	} else {
1751 		/* word 11 */
1752 		msg_word += 4;
1753 		*msg_word = 0;
1754 	}
1755 
1756 	soc->dp_soc->arch_ops.dp_rx_word_mask_subscribe(
1757 						soc->dp_soc,
1758 						msg_word,
1759 						(void *)htt_tlv_filter);
1760 
1761 	dp_mon_rx_wmask_subscribe(soc->dp_soc, msg_word, htt_tlv_filter);
1762 
1763 	if (mon_drop_th > 0)
1764 		HTT_RX_RING_SELECTION_CFG_RX_DROP_THRESHOLD_SET(*msg_word,
1765 				mon_drop_th);
1766 
1767 	dp_mon_rx_enable_mpdu_logging(soc->dp_soc, msg_word, htt_tlv_filter);
1768 
1769 	dp_mon_rx_enable_phy_errors(msg_word, htt_tlv_filter);
1770 
1771 	/* word 14*/
1772 	msg_word += 3;
1773 
1774 	/* word 15*/
1775 	msg_word++;
1776 
1777 	/* word 16*/
1778 	msg_word++;
1779 	*msg_word = 0;
1780 
1781 	dp_mon_rx_enable_pkt_tlv_offset(soc->dp_soc, msg_word, htt_tlv_filter);
1782 
1783 	/* word 20 and 21*/
1784 	msg_word += 4;
1785 	*msg_word = 0;
1786 
1787 	dp_mon_rx_enable_fpmo(soc->dp_soc, msg_word, htt_tlv_filter);
1788 
1789 	/* "response_required" field should be set if a HTT response message is
1790 	 * required after setting up the ring.
1791 	 */
1792 	pkt = htt_htc_pkt_alloc(soc);
1793 	if (!pkt) {
1794 		dp_err("pkt alloc failed, ring_type %d ring_id %d htt_ring_id %d",
1795 		       hal_ring_type, srng_params.ring_id, htt_ring_id);
1796 		goto fail1;
1797 	}
1798 
1799 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1800 
1801 	SET_HTC_PACKET_INFO_TX(
1802 		&pkt->htc_pkt,
1803 		dp_htt_h2t_send_complete_free_netbuf,
1804 		qdf_nbuf_data(htt_msg),
1805 		qdf_nbuf_len(htt_msg),
1806 		soc->htc_endpoint,
1807 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
1808 
1809 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1810 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
1811 				     HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG,
1812 				     htt_logger_bufp);
1813 
1814 	if (status != QDF_STATUS_SUCCESS) {
1815 		qdf_nbuf_free(htt_msg);
1816 		htt_htc_pkt_free(soc, pkt);
1817 	}
1818 
1819 	return status;
1820 
1821 fail1:
1822 	qdf_nbuf_free(htt_msg);
1823 fail0:
1824 	return QDF_STATUS_E_FAILURE;
1825 }
1826 
1827 qdf_export_symbol(htt_h2t_rx_ring_cfg);
1828 
1829 #if defined(HTT_STATS_ENABLE)
1830 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1831 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1832 
1833 {
1834 	uint32_t pdev_id;
1835 	uint32_t *msg_word = NULL;
1836 	uint32_t msg_remain_len = 0;
1837 
1838 	msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1839 
1840 	/*COOKIE MSB*/
1841 	pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1842 
1843 	/* stats message length + 16 size of HTT header*/
1844 	msg_remain_len = qdf_min(htt_stats->msg_len + 16,
1845 				(uint32_t)DP_EXT_MSG_LENGTH);
1846 
1847 	dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc,
1848 			msg_word,  msg_remain_len,
1849 			WDI_NO_VAL, pdev_id);
1850 
1851 	if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1852 		htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1853 	}
1854 	/* Need to be freed here as WDI handler will
1855 	 * make a copy of pkt to send data to application
1856 	 */
1857 	qdf_nbuf_free(htt_msg);
1858 	return QDF_STATUS_SUCCESS;
1859 }
1860 #else
1861 static inline QDF_STATUS
1862 dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1863 		      struct dp_soc *soc, qdf_nbuf_t htt_msg)
1864 {
1865 	return QDF_STATUS_E_NOSUPPORT;
1866 }
1867 #endif
1868 
1869 #ifdef HTT_STATS_DEBUGFS_SUPPORT
1870 /* dp_send_htt_stats_dbgfs_msg() - Function to send htt data to upper layer.
1871  * @pdev: dp pdev handle
1872  * @msg_word: HTT msg
1873  * @msg_len: Length of HTT msg sent
1874  *
1875  * Return: none
1876  */
1877 static inline void
1878 dp_htt_stats_dbgfs_send_msg(struct dp_pdev *pdev, uint32_t *msg_word,
1879 			    uint32_t msg_len)
1880 {
1881 	struct htt_dbgfs_cfg dbgfs_cfg;
1882 	int done = 0;
1883 
1884 	/* send 5th word of HTT msg to upper layer */
1885 	dbgfs_cfg.msg_word = (msg_word + 4);
1886 	dbgfs_cfg.m = pdev->dbgfs_cfg->m;
1887 
1888 	/* stats message length + 16 size of HTT header*/
1889 	msg_len = qdf_min(msg_len + HTT_HEADER_LEN, (uint32_t)DP_EXT_MSG_LENGTH);
1890 
1891 	if (pdev->dbgfs_cfg->htt_stats_dbgfs_msg_process)
1892 		pdev->dbgfs_cfg->htt_stats_dbgfs_msg_process(&dbgfs_cfg,
1893 							     (msg_len - HTT_HEADER_LEN));
1894 
1895 	/* Get TLV Done bit from 4th msg word */
1896 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*(msg_word + 3));
1897 	if (done) {
1898 		if (qdf_event_set(&pdev->dbgfs_cfg->htt_stats_dbgfs_event))
1899 			dp_htt_err("%pK: Failed to set event for debugfs htt stats"
1900 				   , pdev->soc);
1901 	}
1902 }
1903 #else
1904 static inline void
1905 dp_htt_stats_dbgfs_send_msg(struct dp_pdev *pdev, uint32_t *msg_word,
1906 			    uint32_t msg_len)
1907 {
1908 }
1909 #endif /* HTT_STATS_DEBUGFS_SUPPORT */
1910 
1911 #ifdef WLAN_SYSFS_DP_STATS
1912 /* dp_htt_stats_sysfs_update_config() - Function to send htt data to upper layer.
1913  * @pdev: dp pdev handle
1914  *
1915  * This function sets the process id and printing mode within the sysfs config
1916  * struct. which enables DP_PRINT statements within this process to write to the
1917  * console buffer provided by the user space.
1918  *
1919  * Return: None
1920  */
1921 static inline void
1922 dp_htt_stats_sysfs_update_config(struct dp_pdev *pdev)
1923 {
1924 	struct dp_soc *soc = pdev->soc;
1925 
1926 	if (!soc) {
1927 		dp_htt_err("soc is null");
1928 		return;
1929 	}
1930 
1931 	if (!soc->sysfs_config) {
1932 		dp_htt_err("soc->sysfs_config is NULL");
1933 		return;
1934 	}
1935 
1936 	/* set sysfs config parameters */
1937 	soc->sysfs_config->process_id = qdf_get_current_pid();
1938 	soc->sysfs_config->printing_mode = PRINTING_MODE_ENABLED;
1939 }
1940 
1941 /**
1942  * dp_htt_stats_sysfs_set_event() - Set sysfs stats event.
1943  * @soc: soc handle.
1944  * @msg_word: Pointer to htt msg word.
1945  *
1946  * Return: void
1947  */
1948 static inline void
1949 dp_htt_stats_sysfs_set_event(struct dp_soc *soc, uint32_t *msg_word)
1950 {
1951 	int done = 0;
1952 
1953 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*(msg_word + 3));
1954 	if (done) {
1955 		if (qdf_event_set(&soc->sysfs_config->sysfs_txrx_fw_request_done))
1956 			dp_htt_err("%pK:event compl Fail to set event ",
1957 				   soc);
1958 	}
1959 }
1960 #else /* WLAN_SYSFS_DP_STATS */
1961 static inline void
1962 dp_htt_stats_sysfs_update_config(struct dp_pdev *pdev)
1963 {
1964 }
1965 
1966 static inline void
1967 dp_htt_stats_sysfs_set_event(struct dp_soc *dp_soc, uint32_t *msg_word)
1968 {
1969 }
1970 #endif /* WLAN_SYSFS_DP_STATS */
1971 
1972 /* dp_htt_set_pdev_obss_stats() - Function to set pdev obss stats.
1973  * @pdev: dp pdev handle
1974  * @tag_type: HTT TLV tag type
1975  * @tag_buf: TLV buffer pointer
1976  *
1977  * Return: None
1978  */
1979 static inline void
1980 dp_htt_set_pdev_obss_stats(struct dp_pdev *pdev, uint32_t tag_type,
1981 			   uint32_t *tag_buf)
1982 {
1983 	if (tag_type != HTT_STATS_PDEV_OBSS_PD_TAG) {
1984 		dp_err("Tag mismatch");
1985 		return;
1986 	}
1987 	qdf_mem_copy(&pdev->stats.htt_tx_pdev_stats.obss_pd_stats_tlv,
1988 		     tag_buf, sizeof(struct cdp_pdev_obss_pd_stats_tlv));
1989 	qdf_event_set(&pdev->fw_obss_stats_event);
1990 }
1991 
1992 /**
1993  * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats
1994  * @htt_stats: htt stats info
1995  * @soc: dp_soc
1996  *
1997  * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message
1998  * contains sub messages which are identified by a TLV header.
1999  * In this function we will process the stream of T2H messages and read all the
2000  * TLV contained in the message.
2001  *
2002  * The following cases have been taken care of
2003  * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer
2004  *		In this case the buffer will contain multiple tlvs.
2005  * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer.
2006  *		Only one tlv will be contained in the HTT message and this tag
2007  *		will extend onto the next buffer.
2008  * Case 3: When the buffer is the continuation of the previous message
2009  * Case 4: tlv length is 0. which will indicate the end of message
2010  *
2011  * Return: void
2012  */
2013 static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats,
2014 					struct dp_soc *soc)
2015 {
2016 	htt_tlv_tag_t tlv_type = 0xff;
2017 	qdf_nbuf_t htt_msg = NULL;
2018 	uint32_t *msg_word;
2019 	uint8_t *tlv_buf_head = NULL;
2020 	uint8_t *tlv_buf_tail = NULL;
2021 	uint32_t msg_remain_len = 0;
2022 	uint32_t tlv_remain_len = 0;
2023 	uint32_t *tlv_start;
2024 	int cookie_val = 0;
2025 	int cookie_msb = 0;
2026 	int pdev_id;
2027 	bool copy_stats = false;
2028 	struct dp_pdev *pdev;
2029 
2030 	/* Process node in the HTT message queue */
2031 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
2032 		!= NULL) {
2033 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
2034 		cookie_val = *(msg_word + 1);
2035 		htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET(
2036 					*(msg_word +
2037 					HTT_T2H_EXT_STATS_TLV_START_OFFSET));
2038 
2039 		if (cookie_val) {
2040 			if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg)
2041 					== QDF_STATUS_SUCCESS) {
2042 				continue;
2043 			}
2044 		}
2045 
2046 		cookie_msb = *(msg_word + 2);
2047 		pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
2048 		pdev = soc->pdev_list[pdev_id];
2049 
2050 		if (!cookie_val && (cookie_msb & DBG_STATS_COOKIE_HTT_DBGFS)) {
2051 			dp_htt_stats_dbgfs_send_msg(pdev, msg_word,
2052 						    htt_stats->msg_len);
2053 			qdf_nbuf_free(htt_msg);
2054 			continue;
2055 		}
2056 
2057 		if (!cookie_val && (cookie_msb & DBG_SYSFS_STATS_COOKIE))
2058 			dp_htt_stats_sysfs_update_config(pdev);
2059 
2060 		if (cookie_msb & DBG_STATS_COOKIE_DP_STATS)
2061 			copy_stats = true;
2062 
2063 		/* read 5th word */
2064 		msg_word = msg_word + 4;
2065 		msg_remain_len = qdf_min(htt_stats->msg_len,
2066 				(uint32_t) DP_EXT_MSG_LENGTH);
2067 		/* Keep processing the node till node length is 0 */
2068 		while (msg_remain_len) {
2069 			/*
2070 			 * if message is not a continuation of previous message
2071 			 * read the tlv type and tlv length
2072 			 */
2073 			if (!tlv_buf_head) {
2074 				tlv_type = HTT_STATS_TLV_TAG_GET(
2075 						*msg_word);
2076 				tlv_remain_len = HTT_STATS_TLV_LENGTH_GET(
2077 						*msg_word);
2078 			}
2079 
2080 			if (tlv_remain_len == 0) {
2081 				msg_remain_len = 0;
2082 
2083 				if (tlv_buf_head) {
2084 					qdf_mem_free(tlv_buf_head);
2085 					tlv_buf_head = NULL;
2086 					tlv_buf_tail = NULL;
2087 				}
2088 
2089 				goto error;
2090 			}
2091 
2092 			if (!tlv_buf_head)
2093 				tlv_remain_len += HTT_TLV_HDR_LEN;
2094 
2095 			if ((tlv_remain_len <= msg_remain_len)) {
2096 				/* Case 3 */
2097 				if (tlv_buf_head) {
2098 					qdf_mem_copy(tlv_buf_tail,
2099 							(uint8_t *)msg_word,
2100 							tlv_remain_len);
2101 					tlv_start = (uint32_t *)tlv_buf_head;
2102 				} else {
2103 					/* Case 1 */
2104 					tlv_start = msg_word;
2105 				}
2106 
2107 				if (copy_stats)
2108 					dp_htt_stats_copy_tag(pdev,
2109 							      tlv_type,
2110 							      tlv_start);
2111 				else
2112 					dp_htt_stats_print_tag(pdev,
2113 							       tlv_type,
2114 							       tlv_start);
2115 
2116 				if (tlv_type == HTT_STATS_PEER_DETAILS_TAG ||
2117 				    tlv_type == HTT_STATS_PEER_STATS_CMN_TAG)
2118 					dp_peer_update_inactive_time(pdev,
2119 								     tlv_type,
2120 								     tlv_start);
2121 
2122 				if (cookie_msb & DBG_STATS_COOKIE_HTT_OBSS)
2123 					dp_htt_set_pdev_obss_stats(pdev,
2124 								   tlv_type,
2125 								   tlv_start);
2126 
2127 				msg_remain_len -= tlv_remain_len;
2128 
2129 				msg_word = (uint32_t *)
2130 					(((uint8_t *)msg_word) +
2131 					tlv_remain_len);
2132 
2133 				tlv_remain_len = 0;
2134 
2135 				if (tlv_buf_head) {
2136 					qdf_mem_free(tlv_buf_head);
2137 					tlv_buf_head = NULL;
2138 					tlv_buf_tail = NULL;
2139 				}
2140 
2141 			} else { /* tlv_remain_len > msg_remain_len */
2142 				/* Case 2 & 3 */
2143 				if (!tlv_buf_head) {
2144 					tlv_buf_head = qdf_mem_malloc(
2145 							tlv_remain_len);
2146 
2147 					if (!tlv_buf_head) {
2148 						QDF_TRACE(QDF_MODULE_ID_TXRX,
2149 								QDF_TRACE_LEVEL_ERROR,
2150 								"Alloc failed");
2151 						goto error;
2152 					}
2153 
2154 					tlv_buf_tail = tlv_buf_head;
2155 				}
2156 
2157 				qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word,
2158 						msg_remain_len);
2159 				tlv_remain_len -= msg_remain_len;
2160 				tlv_buf_tail += msg_remain_len;
2161 			}
2162 		}
2163 
2164 		if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
2165 			htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
2166 		}
2167 
2168 		/* indicate event completion in case the event is done */
2169 		if (!cookie_val && (cookie_msb & DBG_SYSFS_STATS_COOKIE))
2170 			dp_htt_stats_sysfs_set_event(soc, msg_word);
2171 
2172 		qdf_nbuf_free(htt_msg);
2173 	}
2174 	return;
2175 
2176 error:
2177 	qdf_nbuf_free(htt_msg);
2178 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
2179 			!= NULL)
2180 		qdf_nbuf_free(htt_msg);
2181 }
2182 
2183 void htt_t2h_stats_handler(void *context)
2184 {
2185 	struct dp_soc *soc = (struct dp_soc *)context;
2186 	struct htt_stats_context htt_stats;
2187 	uint32_t *msg_word;
2188 	qdf_nbuf_t htt_msg = NULL;
2189 	uint8_t done;
2190 	uint32_t rem_stats;
2191 
2192 	if (!soc) {
2193 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2194 			  "soc is NULL");
2195 		return;
2196 	}
2197 
2198 	if (!qdf_atomic_read(&soc->cmn_init_done)) {
2199 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2200 			  "soc: 0x%pK, init_done: %d", soc,
2201 			  qdf_atomic_read(&soc->cmn_init_done));
2202 		return;
2203 	}
2204 
2205 	qdf_mem_zero(&htt_stats, sizeof(htt_stats));
2206 	qdf_nbuf_queue_init(&htt_stats.msg);
2207 
2208 	/* pull one completed stats from soc->htt_stats_msg and process */
2209 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2210 	if (!soc->htt_stats.num_stats) {
2211 		qdf_spin_unlock_bh(&soc->htt_stats.lock);
2212 		return;
2213 	}
2214 	while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) {
2215 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
2216 		msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET;
2217 		done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2218 		qdf_nbuf_queue_add(&htt_stats.msg, htt_msg);
2219 		/*
2220 		 * Done bit signifies that this is the last T2H buffer in the
2221 		 * stream of HTT EXT STATS message
2222 		 */
2223 		if (done)
2224 			break;
2225 	}
2226 	rem_stats = --soc->htt_stats.num_stats;
2227 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2228 
2229 	/* If there are more stats to process, schedule stats work again.
2230 	 * Scheduling prior to processing ht_stats to queue with early
2231 	 * index
2232 	 */
2233 	if (rem_stats)
2234 		qdf_sched_work(0, &soc->htt_stats.work);
2235 
2236 	dp_process_htt_stat_msg(&htt_stats, soc);
2237 }
2238 
2239 /**
2240  * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats
2241  * @soc: DP SOC handle
2242  * @htt_t2h_msg: HTT message nbuf
2243  *
2244  * return:void
2245  */
2246 static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc,
2247 					    qdf_nbuf_t htt_t2h_msg)
2248 {
2249 	uint8_t done;
2250 	qdf_nbuf_t msg_copy;
2251 	uint32_t *msg_word;
2252 
2253 	msg_word = (uint32_t *)qdf_nbuf_data(htt_t2h_msg);
2254 	msg_word = msg_word + 3;
2255 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2256 
2257 	/*
2258 	 * HTT EXT stats response comes as stream of TLVs which span over
2259 	 * multiple T2H messages.
2260 	 * The first message will carry length of the response.
2261 	 * For rest of the messages length will be zero.
2262 	 *
2263 	 * Clone the T2H message buffer and store it in a list to process
2264 	 * it later.
2265 	 *
2266 	 * The original T2H message buffers gets freed in the T2H HTT event
2267 	 * handler
2268 	 */
2269 	msg_copy = qdf_nbuf_clone(htt_t2h_msg);
2270 
2271 	if (!msg_copy) {
2272 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2273 			  "T2H message clone failed for HTT EXT STATS");
2274 		goto error;
2275 	}
2276 
2277 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2278 	qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy);
2279 	/*
2280 	 * Done bit signifies that this is the last T2H buffer in the stream of
2281 	 * HTT EXT STATS message
2282 	 */
2283 	if (done) {
2284 		soc->htt_stats.num_stats++;
2285 		qdf_sched_work(0, &soc->htt_stats.work);
2286 	}
2287 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2288 
2289 	return;
2290 
2291 error:
2292 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2293 	while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg))
2294 			!= NULL) {
2295 		qdf_nbuf_free(msg_copy);
2296 	}
2297 	soc->htt_stats.num_stats = 0;
2298 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2299 	return;
2300 }
2301 
2302 int htt_soc_attach_target(struct htt_soc *htt_soc)
2303 {
2304 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
2305 
2306 	return htt_h2t_ver_req_msg(soc);
2307 }
2308 
2309 void htt_set_htc_handle(struct htt_soc *htt_soc, HTC_HANDLE htc_soc)
2310 {
2311 	htt_soc->htc_soc = htc_soc;
2312 }
2313 
2314 HTC_HANDLE htt_get_htc_handle(struct htt_soc *htt_soc)
2315 {
2316 	return htt_soc->htc_soc;
2317 }
2318 
2319 struct htt_soc *htt_soc_attach(struct dp_soc *soc, HTC_HANDLE htc_handle)
2320 {
2321 	int i;
2322 	int j;
2323 	int umac_alloc_size = HTT_SW_UMAC_RING_IDX_MAX *
2324 			      sizeof(struct bp_handler);
2325 	int lmac_alloc_size = HTT_SW_LMAC_RING_IDX_MAX *
2326 			      sizeof(struct bp_handler);
2327 	struct htt_soc *htt_soc = NULL;
2328 
2329 	htt_soc = qdf_mem_malloc(sizeof(*htt_soc));
2330 	if (!htt_soc) {
2331 		dp_err("HTT attach failed");
2332 		return NULL;
2333 	}
2334 
2335 	for (i = 0; i < MAX_PDEV_CNT; i++) {
2336 		htt_soc->pdevid_tt[i].umac_path =
2337 			qdf_mem_malloc(umac_alloc_size);
2338 		if (!htt_soc->pdevid_tt[i].umac_path)
2339 			break;
2340 		for (j = 0; j < HTT_SW_UMAC_RING_IDX_MAX; j++)
2341 			htt_soc->pdevid_tt[i].umac_path[j].bp_start_tt = -1;
2342 		htt_soc->pdevid_tt[i].lmac_path =
2343 			qdf_mem_malloc(lmac_alloc_size);
2344 		if (!htt_soc->pdevid_tt[i].lmac_path) {
2345 			qdf_mem_free(htt_soc->pdevid_tt[i].umac_path);
2346 			break;
2347 		}
2348 		for (j = 0; j < HTT_SW_LMAC_RING_IDX_MAX ; j++)
2349 			htt_soc->pdevid_tt[i].lmac_path[j].bp_start_tt = -1;
2350 	}
2351 
2352 	if (i != MAX_PDEV_CNT) {
2353 		for (j = 0; j < i; j++) {
2354 			qdf_mem_free(htt_soc->pdevid_tt[j].umac_path);
2355 			qdf_mem_free(htt_soc->pdevid_tt[j].lmac_path);
2356 		}
2357 		qdf_mem_free(htt_soc);
2358 		return NULL;
2359 	}
2360 
2361 	htt_soc->dp_soc = soc;
2362 	htt_soc->htc_soc = htc_handle;
2363 	HTT_TX_MUTEX_INIT(&htt_soc->htt_tx_mutex);
2364 
2365 	return htt_soc;
2366 }
2367 
2368 #if defined(WDI_EVENT_ENABLE) && \
2369 	!defined(REMOVE_PKT_LOG)
2370 /**
2371  * dp_pktlog_msg_handler() - Pktlog msg handler
2372  * @soc:	 HTT SOC handle
2373  * @msg_word:    Pointer to payload
2374  *
2375  * Return: None
2376  */
2377 static void
2378 dp_pktlog_msg_handler(struct htt_soc *soc,
2379 		      uint32_t *msg_word)
2380 {
2381 	uint8_t pdev_id;
2382 	uint8_t target_pdev_id;
2383 	uint32_t *pl_hdr;
2384 
2385 	target_pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word);
2386 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
2387 							 target_pdev_id);
2388 	pl_hdr = (msg_word + 1);
2389 	dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc,
2390 		pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL,
2391 		pdev_id);
2392 }
2393 #else
2394 static void
2395 dp_pktlog_msg_handler(struct htt_soc *soc,
2396 		      uint32_t *msg_word)
2397 {
2398 }
2399 #endif
2400 
2401 #ifdef QCA_SUPPORT_PRIMARY_LINK_MIGRATE
2402 QDF_STATUS
2403 dp_h2t_ptqm_migration_msg_send(struct dp_soc *dp_soc, uint16_t vdev_id,
2404 			       uint8_t pdev_id,
2405 			       uint8_t chip_id, uint16_t peer_id,
2406 			       uint16_t ml_peer_id, uint16_t src_info,
2407 			       QDF_STATUS status)
2408 {
2409 	struct htt_soc *soc = dp_soc->htt_handle;
2410 	struct dp_htt_htc_pkt *pkt;
2411 	uint8_t *htt_logger_bufp;
2412 	qdf_nbuf_t msg;
2413 	uint32_t *msg_word;
2414 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
2415 	bool src_info_valid = false;
2416 
2417 	msg = qdf_nbuf_alloc(
2418 			soc->osdev,
2419 			HTT_MSG_BUF_SIZE(sizeof(htt_h2t_primary_link_peer_migrate_resp_t)),
2420 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
2421 
2422 	if (!msg)
2423 		return QDF_STATUS_E_NOMEM;
2424 
2425 	/*
2426 	 * Set the length of the message.
2427 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
2428 	 * separately during the below call to qdf_nbuf_push_head.
2429 	 * The contribution from the HTC header is added separately inside HTC.
2430 	 */
2431 	if (qdf_nbuf_put_tail(msg, sizeof(htt_h2t_primary_link_peer_migrate_resp_t))
2432 			      == NULL) {
2433 		dp_htt_err("Failed to expand head for"
2434 			   "HTT_H2T_MSG_TYPE_PRIMARY_LINK_PEER_MIGRATE_RESP");
2435 		qdf_nbuf_free(msg);
2436 		return QDF_STATUS_E_FAILURE;
2437 	}
2438 
2439 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
2440 	memset(msg_word, 0, sizeof(htt_h2t_primary_link_peer_migrate_resp_t));
2441 
2442 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
2443 	htt_logger_bufp = (uint8_t *)msg_word;
2444 	*msg_word = 0;
2445 	HTT_H2T_MSG_TYPE_SET(*msg_word,
2446 			     HTT_H2T_MSG_TYPE_PRIMARY_LINK_PEER_MIGRATE_RESP);
2447 	HTT_H2T_PRIMARY_LINK_PEER_MIGRATE_PDEV_ID_SET(*msg_word, pdev_id);
2448 	HTT_H2T_PRIMARY_LINK_PEER_MIGRATE_CHIP_ID_SET(*msg_word, chip_id);
2449 	HTT_H2T_PRIMARY_LINK_PEER_MIGRATE_VDEV_ID_SET(*msg_word, vdev_id);
2450 
2451 	/* word 1 */
2452 	msg_word++;
2453 	*msg_word = 0;
2454 	HTT_H2T_PRIMARY_LINK_PEER_MIGRATE_SW_LINK_PEER_ID_SET(*msg_word,
2455 							      peer_id);
2456 	HTT_H2T_PRIMARY_LINK_PEER_MIGRATE_ML_PEER_ID_SET(*msg_word,
2457 							 ml_peer_id);
2458 
2459 	/* word 1 */
2460 	msg_word++;
2461 	*msg_word = 0;
2462 
2463 	if (src_info != 0)
2464 		src_info_valid = true;
2465 
2466 	HTT_H2T_PRIMARY_LINK_PEER_MIGRATE_SRC_INFO_VALID_SET(*msg_word,
2467 							     src_info_valid);
2468 	HTT_H2T_PRIMARY_LINK_PEER_MIGRATE_SRC_INFO_SET(*msg_word,
2469 						       src_info);
2470 	HTT_H2T_PRIMARY_LINK_PEER_MIGRATE_STATUS_SET(*msg_word,
2471 						     status);
2472 
2473 	pkt = htt_htc_pkt_alloc(soc);
2474 	if (!pkt) {
2475 		dp_htt_err("Fail to allocate dp_htt_htc_pkt buffer");
2476 		qdf_nbuf_free(msg);
2477 		return QDF_STATUS_E_NOMEM;
2478 	}
2479 
2480 	pkt->soc_ctxt = NULL;
2481 
2482 	/* macro to set packet parameters for TX */
2483 	SET_HTC_PACKET_INFO_TX(
2484 			&pkt->htc_pkt,
2485 			dp_htt_h2t_send_complete_free_netbuf,
2486 			qdf_nbuf_data(msg),
2487 			qdf_nbuf_len(msg),
2488 			soc->htc_endpoint,
2489 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
2490 
2491 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
2492 
2493 	ret = DP_HTT_SEND_HTC_PKT(
2494 			soc, pkt,
2495 			HTT_H2T_MSG_TYPE_PRIMARY_LINK_PEER_MIGRATE_RESP,
2496 			htt_logger_bufp);
2497 
2498 	if (ret != QDF_STATUS_SUCCESS) {
2499 		qdf_nbuf_free(msg);
2500 		htt_htc_pkt_free(soc, pkt);
2501 	}
2502 
2503 	return ret;
2504 }
2505 #endif
2506 
2507 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
2508 /**
2509  * dp_vdev_txrx_hw_stats_handler - Handle vdev stats received from FW
2510  * @soc: htt soc handle
2511  * @msg_word: buffer containing stats
2512  *
2513  * Return: void
2514  */
2515 static void dp_vdev_txrx_hw_stats_handler(struct htt_soc *soc,
2516 					  uint32_t *msg_word)
2517 {
2518 	struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
2519 	uint8_t pdev_id;
2520 	uint8_t vdev_id;
2521 	uint8_t target_pdev_id;
2522 	uint16_t payload_size;
2523 	struct dp_pdev *pdev;
2524 	struct dp_vdev *vdev;
2525 	uint8_t *tlv_buf;
2526 	uint32_t *tlv_buf_temp;
2527 	uint32_t *tag_buf;
2528 	htt_tlv_tag_t tlv_type;
2529 	uint16_t tlv_length;
2530 	uint64_t pkt_count = 0;
2531 	uint64_t byte_count = 0;
2532 	uint64_t soc_drop_cnt = 0;
2533 	struct cdp_pkt_info tx_comp = { 0 };
2534 	struct cdp_pkt_info tx_failed =  { 0 };
2535 
2536 	target_pdev_id =
2537 		HTT_T2H_VDEVS_TXRX_STATS_PERIODIC_IND_PDEV_ID_GET(*msg_word);
2538 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(dpsoc,
2539 							 target_pdev_id);
2540 
2541 	if (pdev_id >= MAX_PDEV_CNT)
2542 		return;
2543 
2544 	pdev = dpsoc->pdev_list[pdev_id];
2545 	if (!pdev) {
2546 		dp_err("PDEV is NULL for pdev_id:%d", pdev_id);
2547 		return;
2548 	}
2549 
2550 	payload_size =
2551 	HTT_T2H_VDEVS_TXRX_STATS_PERIODIC_IND_PAYLOAD_SIZE_GET(*msg_word);
2552 
2553 	qdf_trace_hex_dump(QDF_MODULE_ID_DP_HTT, QDF_TRACE_LEVEL_INFO,
2554 			   (void *)msg_word, payload_size + 16);
2555 
2556 	/* Adjust msg_word to point to the first TLV in buffer */
2557 	msg_word = msg_word + 4;
2558 
2559 	/* Parse the received buffer till payload size reaches 0 */
2560 	while (payload_size > 0) {
2561 		tlv_buf = (uint8_t *)msg_word;
2562 		tlv_buf_temp = msg_word;
2563 		tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
2564 		tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
2565 
2566 		/* Add header size to tlv length*/
2567 		tlv_length += 4;
2568 
2569 		switch (tlv_type) {
2570 		case HTT_STATS_SOC_TXRX_STATS_COMMON_TAG:
2571 		{
2572 			tag_buf = tlv_buf_temp +
2573 					HTT_VDEV_STATS_GET_INDEX(SOC_DROP_CNT);
2574 			soc_drop_cnt = HTT_VDEV_GET_STATS_U64(tag_buf);
2575 			DP_STATS_UPD(dpsoc, tx.tqm_drop_no_peer, soc_drop_cnt);
2576 			break;
2577 		}
2578 		case HTT_STATS_VDEV_TXRX_STATS_HW_STATS_TAG:
2579 		{
2580 			tag_buf = tlv_buf_temp +
2581 					HTT_VDEV_STATS_GET_INDEX(VDEV_ID);
2582 			vdev_id = (uint8_t)(*tag_buf);
2583 			vdev = dp_vdev_get_ref_by_id(dpsoc, vdev_id,
2584 						     DP_MOD_ID_HTT);
2585 
2586 			if (!vdev)
2587 				goto invalid_vdev;
2588 
2589 			/* Extract received packet count from buffer */
2590 			tag_buf = tlv_buf_temp +
2591 					HTT_VDEV_STATS_GET_INDEX(RX_PKT_CNT);
2592 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2593 			DP_STATS_UPD(vdev, rx_i.reo_rcvd_pkt.num, pkt_count);
2594 
2595 			/* Extract received packet byte count from buffer */
2596 			tag_buf = tlv_buf_temp +
2597 					HTT_VDEV_STATS_GET_INDEX(RX_BYTE_CNT);
2598 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2599 			DP_STATS_UPD(vdev, rx_i.reo_rcvd_pkt.bytes, byte_count);
2600 
2601 			/* Extract tx success packet count from buffer */
2602 			tag_buf = tlv_buf_temp +
2603 				HTT_VDEV_STATS_GET_INDEX(TX_SUCCESS_PKT_CNT);
2604 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2605 			tx_comp.num = pkt_count;
2606 
2607 			/* Extract tx success packet byte count from buffer */
2608 			tag_buf = tlv_buf_temp +
2609 				HTT_VDEV_STATS_GET_INDEX(TX_SUCCESS_BYTE_CNT);
2610 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2611 			tx_comp.bytes = byte_count;
2612 
2613 			/* Extract tx retry packet count from buffer */
2614 			tag_buf = tlv_buf_temp +
2615 				HTT_VDEV_STATS_GET_INDEX(TX_RETRY_PKT_CNT);
2616 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2617 			tx_comp.num += pkt_count;
2618 			tx_failed.num = pkt_count;
2619 
2620 			/* Extract tx retry packet byte count from buffer */
2621 			tag_buf = tlv_buf_temp +
2622 				HTT_VDEV_STATS_GET_INDEX(TX_RETRY_BYTE_CNT);
2623 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2624 			tx_comp.bytes += byte_count;
2625 			tx_failed.bytes = byte_count;
2626 
2627 			/* Extract tx drop packet count from buffer */
2628 			tag_buf = tlv_buf_temp +
2629 				HTT_VDEV_STATS_GET_INDEX(TX_DROP_PKT_CNT);
2630 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2631 			tx_comp.num += pkt_count;
2632 			tx_failed.num += pkt_count;
2633 
2634 			/* Extract tx drop packet byte count from buffer */
2635 			tag_buf = tlv_buf_temp +
2636 				HTT_VDEV_STATS_GET_INDEX(TX_DROP_BYTE_CNT);
2637 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2638 			tx_comp.bytes += byte_count;
2639 			tx_failed.bytes += byte_count;
2640 
2641 			/* Extract tx age-out packet count from buffer */
2642 			tag_buf = tlv_buf_temp +
2643 				HTT_VDEV_STATS_GET_INDEX(TX_AGE_OUT_PKT_CNT);
2644 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2645 			tx_comp.num += pkt_count;
2646 			tx_failed.num += pkt_count;
2647 
2648 			/* Extract tx age-out packet byte count from buffer */
2649 			tag_buf = tlv_buf_temp +
2650 				HTT_VDEV_STATS_GET_INDEX(TX_AGE_OUT_BYTE_CNT);
2651 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2652 			tx_comp.bytes += byte_count;
2653 			tx_failed.bytes += byte_count;
2654 
2655 			/* Extract tqm bypass packet count from buffer */
2656 			tag_buf = tlv_buf_temp +
2657 				HTT_VDEV_STATS_GET_INDEX(TX_TQM_BYPASS_PKT_CNT);
2658 			pkt_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2659 			tx_comp.num += pkt_count;
2660 
2661 			/* Extract tx bypass packet byte count from buffer */
2662 			tag_buf = tlv_buf_temp +
2663 				HTT_VDEV_STATS_GET_INDEX(TX_TQM_BYPASS_BYTE_CNT);
2664 			byte_count = HTT_VDEV_GET_STATS_U64(tag_buf);
2665 			tx_comp.bytes += byte_count;
2666 
2667 			DP_STATS_UPD(vdev, tx.comp_pkt.num, tx_comp.num);
2668 			DP_STATS_UPD(vdev, tx.comp_pkt.bytes, tx_comp.bytes);
2669 
2670 			DP_STATS_UPD(vdev, tx.tx_failed, tx_failed.num);
2671 
2672 			dp_vdev_unref_delete(dpsoc, vdev, DP_MOD_ID_HTT);
2673 			break;
2674 		}
2675 		default:
2676 			qdf_assert(0);
2677 		}
2678 invalid_vdev:
2679 		msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
2680 		payload_size -= tlv_length;
2681 	}
2682 }
2683 #else
2684 static void dp_vdev_txrx_hw_stats_handler(struct htt_soc *soc,
2685 					  uint32_t *msg_word)
2686 {}
2687 #endif
2688 
2689 #ifdef CONFIG_SAWF_DEF_QUEUES
2690 static void dp_sawf_def_queues_update_map_report_conf(struct htt_soc *soc,
2691 						      uint32_t *msg_word,
2692 						      qdf_nbuf_t htt_t2h_msg)
2693 {
2694 	dp_htt_sawf_def_queues_map_report_conf(soc, msg_word, htt_t2h_msg);
2695 }
2696 #else
2697 static void dp_sawf_def_queues_update_map_report_conf(struct htt_soc *soc,
2698 						      uint32_t *msg_word,
2699 						      qdf_nbuf_t htt_t2h_msg)
2700 {}
2701 #endif
2702 
2703 #ifdef CONFIG_SAWF
2704 /**
2705  * dp_sawf_msduq_map() - Msdu queue creation information received
2706  * from target
2707  * @soc: soc handle.
2708  * @msg_word: Pointer to htt msg word.
2709  * @htt_t2h_msg: HTT message nbuf
2710  *
2711  * Return: void
2712  */
2713 static void dp_sawf_msduq_map(struct htt_soc *soc, uint32_t *msg_word,
2714 			      qdf_nbuf_t htt_t2h_msg)
2715 {
2716 	dp_htt_sawf_msduq_map(soc, msg_word, htt_t2h_msg);
2717 }
2718 
2719 /**
2720  * dp_sawf_dynamic_ast_update() - Dynamic AST index update for SAWF peer
2721  * from target
2722  * @soc: soc handle.
2723  * @msg_word: Pointer to htt msg word.
2724  * @htt_t2h_msg: HTT message nbuf
2725  *
2726  * Return: void
2727  */
2728 static void dp_sawf_dynamic_ast_update(struct htt_soc *soc, uint32_t *msg_word,
2729 				       qdf_nbuf_t htt_t2h_msg)
2730 {
2731 	dp_htt_sawf_dynamic_ast_update(soc, msg_word, htt_t2h_msg);
2732 }
2733 
2734 /**
2735  * dp_sawf_mpdu_stats_handler() - HTT message handler for MPDU stats
2736  * @soc: soc handle.
2737  * @htt_t2h_msg: HTT message nbuf
2738  *
2739  * Return: void
2740  */
2741 static void dp_sawf_mpdu_stats_handler(struct htt_soc *soc,
2742 				       qdf_nbuf_t htt_t2h_msg)
2743 {
2744 	dp_sawf_htt_mpdu_stats_handler(soc, htt_t2h_msg);
2745 }
2746 #else
2747 static void dp_sawf_msduq_map(struct htt_soc *soc, uint32_t *msg_word,
2748 			      qdf_nbuf_t htt_t2h_msg)
2749 {}
2750 
2751 static void dp_sawf_mpdu_stats_handler(struct htt_soc *soc,
2752 				       qdf_nbuf_t htt_t2h_msg)
2753 {}
2754 static void dp_sawf_dynamic_ast_update(struct htt_soc *soc, uint32_t *msg_word,
2755 				       qdf_nbuf_t htt_t2h_msg)
2756 {}
2757 #endif
2758 
2759 /**
2760  * time_allow_print() - time allow print
2761  * @htt_bp_handler:	backpressure handler
2762  * @ring_id:		ring_id (index)
2763  * @th_time:		threshold time
2764  *
2765  * Return: 1 for successfully saving timestamp in array
2766  *	and 0 for timestamp falling within 2 seconds after last one
2767  */
2768 static bool time_allow_print(struct bp_handler *htt_bp_handler,
2769 			     u_int8_t ring_id, u_int32_t th_time)
2770 {
2771 	unsigned long tstamp;
2772 	struct bp_handler *path = &htt_bp_handler[ring_id];
2773 
2774 	tstamp = qdf_get_system_timestamp();
2775 
2776 	if (!path)
2777 		return 0; //unable to print backpressure messages
2778 
2779 	if (path->bp_start_tt == -1) {
2780 		path->bp_start_tt = tstamp;
2781 		path->bp_duration = 0;
2782 		path->bp_last_tt = tstamp;
2783 		path->bp_counter = 1;
2784 		return 1;
2785 	}
2786 
2787 	path->bp_duration = tstamp - path->bp_start_tt;
2788 	path->bp_last_tt = tstamp;
2789 	path->bp_counter++;
2790 
2791 	if (path->bp_duration >= th_time) {
2792 		path->bp_start_tt = -1;
2793 		return 1;
2794 	}
2795 
2796 	return 0;
2797 }
2798 
2799 static void dp_htt_alert_print(enum htt_t2h_msg_type msg_type,
2800 			       struct dp_pdev *pdev, u_int8_t ring_id,
2801 			       u_int16_t hp_idx, u_int16_t tp_idx,
2802 			       u_int32_t bkp_time,
2803 			       struct bp_handler *htt_bp_handler,
2804 			       char *ring_stype)
2805 {
2806 	dp_alert("seq_num %u msg_type: %d pdev_id: %d ring_type: %s ",
2807 		 pdev->bkp_stats.seq_num, msg_type, pdev->pdev_id, ring_stype);
2808 	dp_alert("ring_id: %d hp_idx: %d tp_idx: %d bkpressure_time_ms: %d ",
2809 		 ring_id, hp_idx, tp_idx, bkp_time);
2810 	dp_alert("last_bp_event: %ld, total_bp_duration: %ld, bp_counter: %ld",
2811 		 htt_bp_handler[ring_id].bp_last_tt,
2812 		 htt_bp_handler[ring_id].bp_duration,
2813 		 htt_bp_handler[ring_id].bp_counter);
2814 }
2815 
2816 /**
2817  * dp_get_srng_ring_state_from_hal(): Get hal level ring stats
2818  * @soc: DP_SOC handle
2819  * @pdev: DP pdev handle
2820  * @srng: DP_SRNG handle
2821  * @ring_type: srng src/dst ring
2822  * @state: ring state
2823  * @pdev: pdev
2824  * @srng: DP_SRNG handle
2825  * @ring_type: srng src/dst ring
2826  * @state: ring_state
2827  *
2828  * Return: void
2829  */
2830 static QDF_STATUS
2831 dp_get_srng_ring_state_from_hal(struct dp_soc *soc,
2832 				struct dp_pdev *pdev,
2833 				struct dp_srng *srng,
2834 				enum hal_ring_type ring_type,
2835 				struct dp_srng_ring_state *state)
2836 {
2837 	struct hal_soc *hal_soc;
2838 
2839 	if (!soc || !srng || !srng->hal_srng || !state)
2840 		return QDF_STATUS_E_INVAL;
2841 
2842 	hal_soc = (struct hal_soc *)soc->hal_soc;
2843 
2844 	hal_get_sw_hptp(soc->hal_soc, srng->hal_srng, &state->sw_tail,
2845 			&state->sw_head);
2846 
2847 	hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &state->hw_head,
2848 			&state->hw_tail, ring_type);
2849 
2850 	state->ring_type = ring_type;
2851 
2852 	return QDF_STATUS_SUCCESS;
2853 }
2854 
2855 #ifdef QCA_MONITOR_PKT_SUPPORT
2856 static void
2857 dp_queue_mon_ring_stats(struct dp_pdev *pdev,
2858 			int lmac_id, uint32_t *num_srng,
2859 			struct dp_soc_srngs_state *soc_srngs_state)
2860 {
2861 	QDF_STATUS status;
2862 
2863 	if (pdev->soc->wlan_cfg_ctx->rxdma1_enable) {
2864 		status = dp_get_srng_ring_state_from_hal
2865 			(pdev->soc, pdev,
2866 			 &pdev->soc->rxdma_mon_buf_ring[lmac_id],
2867 			 RXDMA_MONITOR_BUF,
2868 			 &soc_srngs_state->ring_state[*num_srng]);
2869 
2870 		if (status == QDF_STATUS_SUCCESS)
2871 			qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS);
2872 
2873 		status = dp_get_srng_ring_state_from_hal
2874 			(pdev->soc, pdev,
2875 			 &pdev->soc->rxdma_mon_dst_ring[lmac_id],
2876 			 RXDMA_MONITOR_DST,
2877 			 &soc_srngs_state->ring_state[*num_srng]);
2878 
2879 		if (status == QDF_STATUS_SUCCESS)
2880 			qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS);
2881 
2882 		status = dp_get_srng_ring_state_from_hal
2883 			(pdev->soc, pdev,
2884 			 &pdev->soc->rxdma_mon_desc_ring[lmac_id],
2885 			 RXDMA_MONITOR_DESC,
2886 			 &soc_srngs_state->ring_state[*num_srng]);
2887 
2888 		if (status == QDF_STATUS_SUCCESS)
2889 			qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS);
2890 	}
2891 }
2892 #else
2893 static void
2894 dp_queue_mon_ring_stats(struct dp_pdev *pdev,
2895 			int lmac_id, uint32_t *num_srng,
2896 			struct dp_soc_srngs_state *soc_srngs_state)
2897 {
2898 }
2899 #endif
2900 
2901 #ifndef WLAN_DP_DISABLE_TCL_CMD_CRED_SRNG
2902 static inline QDF_STATUS
2903 dp_get_tcl_cmd_cred_ring_state_from_hal(struct dp_pdev *pdev,
2904 					struct dp_srng_ring_state *ring_state)
2905 {
2906 	return dp_get_srng_ring_state_from_hal(pdev->soc, pdev,
2907 					       &pdev->soc->tcl_cmd_credit_ring,
2908 					       TCL_CMD_CREDIT, ring_state);
2909 }
2910 #else
2911 static inline QDF_STATUS
2912 dp_get_tcl_cmd_cred_ring_state_from_hal(struct dp_pdev *pdev,
2913 					struct dp_srng_ring_state *ring_state)
2914 {
2915 	return QDF_STATUS_SUCCESS;
2916 }
2917 #endif
2918 
2919 #ifndef WLAN_DP_DISABLE_TCL_STATUS_SRNG
2920 static inline QDF_STATUS
2921 dp_get_tcl_status_ring_state_from_hal(struct dp_pdev *pdev,
2922 				      struct dp_srng_ring_state *ring_state)
2923 {
2924 	return dp_get_srng_ring_state_from_hal(pdev->soc, pdev,
2925 					       &pdev->soc->tcl_status_ring,
2926 					       TCL_STATUS, ring_state);
2927 }
2928 #else
2929 static inline QDF_STATUS
2930 dp_get_tcl_status_ring_state_from_hal(struct dp_pdev *pdev,
2931 				      struct dp_srng_ring_state *ring_state)
2932 {
2933 	return QDF_STATUS_SUCCESS;
2934 }
2935 #endif
2936 
2937 /**
2938  * dp_queue_ring_stats() - Print pdev hal level ring stats
2939  * dp_queue_ring_stats(): Print pdev hal level ring stats
2940  * @pdev: DP_pdev handle
2941  *
2942  * Return: void
2943  */
2944 static void dp_queue_ring_stats(struct dp_pdev *pdev)
2945 {
2946 	uint32_t i;
2947 	int mac_id;
2948 	int lmac_id;
2949 	uint32_t j = 0;
2950 	struct dp_soc *soc = pdev->soc;
2951 	struct dp_soc_srngs_state * soc_srngs_state = NULL;
2952 	struct dp_soc_srngs_state *drop_srngs_state = NULL;
2953 	QDF_STATUS status;
2954 
2955 	soc_srngs_state = qdf_mem_malloc(sizeof(struct dp_soc_srngs_state));
2956 	if (!soc_srngs_state) {
2957 		dp_htt_alert("Memory alloc failed for back pressure event");
2958 		return;
2959 	}
2960 
2961 	status = dp_get_srng_ring_state_from_hal
2962 				(pdev->soc, pdev,
2963 				 &pdev->soc->reo_exception_ring,
2964 				 REO_EXCEPTION,
2965 				 &soc_srngs_state->ring_state[j]);
2966 
2967 	if (status == QDF_STATUS_SUCCESS)
2968 		qdf_assert_always(++j < DP_MAX_SRNGS);
2969 
2970 	status = dp_get_srng_ring_state_from_hal
2971 				(pdev->soc, pdev,
2972 				 &pdev->soc->reo_reinject_ring,
2973 				 REO_REINJECT,
2974 				 &soc_srngs_state->ring_state[j]);
2975 
2976 	if (status == QDF_STATUS_SUCCESS)
2977 		qdf_assert_always(++j < DP_MAX_SRNGS);
2978 
2979 	status = dp_get_srng_ring_state_from_hal
2980 				(pdev->soc, pdev,
2981 				 &pdev->soc->reo_cmd_ring,
2982 				 REO_CMD,
2983 				 &soc_srngs_state->ring_state[j]);
2984 
2985 	if (status == QDF_STATUS_SUCCESS)
2986 		qdf_assert_always(++j < DP_MAX_SRNGS);
2987 
2988 	status = dp_get_srng_ring_state_from_hal
2989 				(pdev->soc, pdev,
2990 				 &pdev->soc->reo_status_ring,
2991 				 REO_STATUS,
2992 				 &soc_srngs_state->ring_state[j]);
2993 
2994 	if (status == QDF_STATUS_SUCCESS)
2995 		qdf_assert_always(++j < DP_MAX_SRNGS);
2996 
2997 	status = dp_get_srng_ring_state_from_hal
2998 				(pdev->soc, pdev,
2999 				 &pdev->soc->rx_rel_ring,
3000 				 WBM2SW_RELEASE,
3001 				 &soc_srngs_state->ring_state[j]);
3002 
3003 	if (status == QDF_STATUS_SUCCESS)
3004 		qdf_assert_always(++j < DP_MAX_SRNGS);
3005 
3006 	status = dp_get_tcl_cmd_cred_ring_state_from_hal
3007 				(pdev, &soc_srngs_state->ring_state[j]);
3008 	if (status == QDF_STATUS_SUCCESS)
3009 		qdf_assert_always(++j < DP_MAX_SRNGS);
3010 
3011 	status = dp_get_tcl_status_ring_state_from_hal
3012 				(pdev, &soc_srngs_state->ring_state[j]);
3013 	if (status == QDF_STATUS_SUCCESS)
3014 		qdf_assert_always(++j < DP_MAX_SRNGS);
3015 
3016 	status = dp_get_srng_ring_state_from_hal
3017 				(pdev->soc, pdev,
3018 				 &pdev->soc->wbm_desc_rel_ring,
3019 				 SW2WBM_RELEASE,
3020 				 &soc_srngs_state->ring_state[j]);
3021 
3022 	if (status == QDF_STATUS_SUCCESS)
3023 		qdf_assert_always(++j < DP_MAX_SRNGS);
3024 
3025 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
3026 		status = dp_get_srng_ring_state_from_hal
3027 				(pdev->soc, pdev,
3028 				 &pdev->soc->reo_dest_ring[i],
3029 				 REO_DST,
3030 				 &soc_srngs_state->ring_state[j]);
3031 
3032 		if (status == QDF_STATUS_SUCCESS)
3033 			qdf_assert_always(++j < DP_MAX_SRNGS);
3034 	}
3035 
3036 	for (i = 0; i < pdev->soc->num_tcl_data_rings; i++) {
3037 		status = dp_get_srng_ring_state_from_hal
3038 				(pdev->soc, pdev,
3039 				 &pdev->soc->tcl_data_ring[i],
3040 				 TCL_DATA,
3041 				 &soc_srngs_state->ring_state[j]);
3042 
3043 		if (status == QDF_STATUS_SUCCESS)
3044 			qdf_assert_always(++j < DP_MAX_SRNGS);
3045 	}
3046 
3047 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
3048 		status = dp_get_srng_ring_state_from_hal
3049 				(pdev->soc, pdev,
3050 				 &pdev->soc->tx_comp_ring[i],
3051 				 WBM2SW_RELEASE,
3052 				 &soc_srngs_state->ring_state[j]);
3053 
3054 		if (status == QDF_STATUS_SUCCESS)
3055 			qdf_assert_always(++j < DP_MAX_SRNGS);
3056 	}
3057 
3058 	lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc, 0, pdev->pdev_id);
3059 	status = dp_get_srng_ring_state_from_hal
3060 				(pdev->soc, pdev,
3061 				 &pdev->soc->rx_refill_buf_ring
3062 				 [lmac_id],
3063 				 RXDMA_BUF,
3064 				 &soc_srngs_state->ring_state[j]);
3065 
3066 	if (status == QDF_STATUS_SUCCESS)
3067 		qdf_assert_always(++j < DP_MAX_SRNGS);
3068 
3069 	status = dp_get_srng_ring_state_from_hal
3070 				(pdev->soc, pdev,
3071 				 &pdev->rx_refill_buf_ring2,
3072 				 RXDMA_BUF,
3073 				 &soc_srngs_state->ring_state[j]);
3074 
3075 	if (status == QDF_STATUS_SUCCESS)
3076 		qdf_assert_always(++j < DP_MAX_SRNGS);
3077 
3078 
3079 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
3080 		dp_get_srng_ring_state_from_hal
3081 				(pdev->soc, pdev,
3082 				 &pdev->rx_mac_buf_ring[i],
3083 				 RXDMA_BUF,
3084 				 &soc_srngs_state->ring_state[j]);
3085 
3086 		if (status == QDF_STATUS_SUCCESS)
3087 			qdf_assert_always(++j < DP_MAX_SRNGS);
3088 	}
3089 
3090 	for (mac_id = 0;
3091 	     mac_id  < soc->wlan_cfg_ctx->num_rxdma_status_rings_per_pdev;
3092 	     mac_id++) {
3093 		lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc,
3094 						     mac_id, pdev->pdev_id);
3095 
3096 		dp_queue_mon_ring_stats(pdev, lmac_id, &j,
3097 					soc_srngs_state);
3098 
3099 		status = dp_get_srng_ring_state_from_hal
3100 			(pdev->soc, pdev,
3101 			 &pdev->soc->rxdma_mon_status_ring[lmac_id],
3102 			 RXDMA_MONITOR_STATUS,
3103 			 &soc_srngs_state->ring_state[j]);
3104 
3105 		if (status == QDF_STATUS_SUCCESS)
3106 			qdf_assert_always(++j < DP_MAX_SRNGS);
3107 	}
3108 
3109 	for (i = 0; i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
3110 		lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc,
3111 						     i, pdev->pdev_id);
3112 
3113 		status = dp_get_srng_ring_state_from_hal
3114 				(pdev->soc, pdev,
3115 				 &pdev->soc->rxdma_err_dst_ring
3116 				 [lmac_id],
3117 				 RXDMA_DST,
3118 				 &soc_srngs_state->ring_state[j]);
3119 
3120 		if (status == QDF_STATUS_SUCCESS)
3121 			qdf_assert_always(++j < DP_MAX_SRNGS);
3122 	}
3123 	soc_srngs_state->max_ring_id = j;
3124 
3125 	qdf_spin_lock_bh(&pdev->bkp_stats.list_lock);
3126 
3127 	soc_srngs_state->seq_num = pdev->bkp_stats.seq_num;
3128 
3129 	if (pdev->bkp_stats.queue_depth >= HTT_BKP_STATS_MAX_QUEUE_DEPTH) {
3130 		drop_srngs_state = TAILQ_FIRST(&pdev->bkp_stats.list);
3131 		qdf_assert_always(drop_srngs_state);
3132 		TAILQ_REMOVE(&pdev->bkp_stats.list, drop_srngs_state,
3133 			     list_elem);
3134 		qdf_mem_free(drop_srngs_state);
3135 		pdev->bkp_stats.queue_depth--;
3136 	}
3137 
3138 	pdev->bkp_stats.queue_depth++;
3139 	TAILQ_INSERT_TAIL(&pdev->bkp_stats.list, soc_srngs_state,
3140 			  list_elem);
3141 	pdev->bkp_stats.seq_num++;
3142 	qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock);
3143 
3144 	qdf_queue_work(0, pdev->bkp_stats.work_queue,
3145 		       &pdev->bkp_stats.work);
3146 }
3147 
3148 /**
3149  * dp_htt_bkp_event_alert() - htt backpressure event alert
3150  * @msg_word:	htt packet context
3151  * @soc:	HTT SOC handle
3152  *
3153  * Return: after attempting to print stats
3154  */
3155 static void dp_htt_bkp_event_alert(u_int32_t *msg_word, struct htt_soc *soc)
3156 {
3157 	u_int8_t ring_type;
3158 	u_int8_t pdev_id;
3159 	uint8_t target_pdev_id;
3160 	u_int8_t ring_id;
3161 	u_int16_t hp_idx;
3162 	u_int16_t tp_idx;
3163 	u_int32_t bkp_time;
3164 	u_int32_t th_time;
3165 	enum htt_t2h_msg_type msg_type;
3166 	struct dp_soc *dpsoc;
3167 	struct dp_pdev *pdev;
3168 	struct dp_htt_timestamp *radio_tt;
3169 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3170 
3171 
3172 	if (!soc)
3173 		return;
3174 
3175 	dpsoc = (struct dp_soc *)soc->dp_soc;
3176 	soc_cfg_ctx = dpsoc->wlan_cfg_ctx;
3177 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
3178 	ring_type = HTT_T2H_RX_BKPRESSURE_RING_TYPE_GET(*msg_word);
3179 	target_pdev_id = HTT_T2H_RX_BKPRESSURE_PDEV_ID_GET(*msg_word);
3180 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
3181 							 target_pdev_id);
3182 	if (pdev_id >= MAX_PDEV_CNT) {
3183 		dp_htt_debug("%pK: pdev id %d is invalid", soc, pdev_id);
3184 		return;
3185 	}
3186 
3187 	th_time = wlan_cfg_time_control_bp(soc_cfg_ctx);
3188 	pdev = (struct dp_pdev *)dpsoc->pdev_list[pdev_id];
3189 	ring_id = HTT_T2H_RX_BKPRESSURE_RINGID_GET(*msg_word);
3190 	hp_idx = HTT_T2H_RX_BKPRESSURE_HEAD_IDX_GET(*(msg_word + 1));
3191 	tp_idx = HTT_T2H_RX_BKPRESSURE_TAIL_IDX_GET(*(msg_word + 1));
3192 	bkp_time = HTT_T2H_RX_BKPRESSURE_TIME_MS_GET(*(msg_word + 2));
3193 	radio_tt = &soc->pdevid_tt[pdev_id];
3194 
3195 	switch (ring_type) {
3196 	case HTT_SW_RING_TYPE_UMAC:
3197 		if (!time_allow_print(radio_tt->umac_path, ring_id, th_time))
3198 			return;
3199 		dp_htt_alert_print(msg_type, pdev, ring_id, hp_idx, tp_idx,
3200 				   bkp_time, radio_tt->umac_path,
3201 				   "HTT_SW_RING_TYPE_UMAC");
3202 	break;
3203 	case HTT_SW_RING_TYPE_LMAC:
3204 		if (!time_allow_print(radio_tt->lmac_path, ring_id, th_time))
3205 			return;
3206 		dp_htt_alert_print(msg_type, pdev, ring_id, hp_idx, tp_idx,
3207 				   bkp_time, radio_tt->lmac_path,
3208 				   "HTT_SW_RING_TYPE_LMAC");
3209 	break;
3210 	default:
3211 		dp_alert("Invalid ring type: %d", ring_type);
3212 	break;
3213 	}
3214 
3215 	dp_queue_ring_stats(pdev);
3216 }
3217 
3218 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
3219 /**
3220  * dp_offload_ind_handler() - offload msg handler
3221  * @soc: HTT SOC handle
3222  * @msg_word: Pointer to payload
3223  *
3224  * Return: None
3225  */
3226 static void
3227 dp_offload_ind_handler(struct htt_soc *soc, uint32_t *msg_word)
3228 {
3229 	u_int8_t pdev_id;
3230 	u_int8_t target_pdev_id;
3231 
3232 	target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
3233 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
3234 							 target_pdev_id);
3235 	dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_OFFLOAD_TX_DATA, soc->dp_soc,
3236 			     msg_word, HTT_INVALID_VDEV, WDI_NO_VAL,
3237 			     pdev_id);
3238 }
3239 #else
3240 static void
3241 dp_offload_ind_handler(struct htt_soc *soc, uint32_t *msg_word)
3242 {
3243 }
3244 #endif
3245 
3246 #ifdef WLAN_FEATURE_11BE_MLO
3247 #ifdef WLAN_MLO_MULTI_CHIP
3248 static inline void dp_update_mlo_ts_offset(struct dp_soc *soc,
3249 					   uint32_t ts_lo, uint32_t ts_hi)
3250 {
3251 	uint64_t mlo_offset;
3252 
3253 	mlo_offset = ((uint64_t)(ts_hi) << 32 | ts_lo);
3254 	soc->cdp_soc.ops->mlo_ops->mlo_update_mlo_ts_offset
3255 		((struct cdp_soc_t *)soc, mlo_offset);
3256 }
3257 
3258 static inline
3259 void dp_update_mlo_delta_tsf2(struct dp_soc *soc, struct dp_pdev *pdev)
3260 {
3261 	uint64_t delta_tsf2 = 0;
3262 
3263 	hal_get_tsf2_offset(soc->hal_soc, pdev->lmac_id, &delta_tsf2);
3264 	soc->cdp_soc.ops->mlo_ops->mlo_update_delta_tsf2
3265 		((struct cdp_soc_t *)soc, pdev->pdev_id, delta_tsf2);
3266 }
3267 #else
3268 static inline void dp_update_mlo_ts_offset(struct dp_soc *soc,
3269 					   uint32_t ts_lo, uint32_t ts_hi)
3270 {}
3271 static inline
3272 void dp_update_mlo_delta_tsf2(struct dp_soc *soc, struct dp_pdev *pdev)
3273 {}
3274 #endif
3275 static void dp_htt_mlo_peer_map_handler(struct htt_soc *soc,
3276 					uint32_t *msg_word)
3277 {
3278 	uint8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3279 	uint8_t *mlo_peer_mac_addr;
3280 	uint16_t mlo_peer_id;
3281 	uint8_t num_links;
3282 	struct dp_mlo_flow_override_info mlo_flow_info[DP_MLO_FLOW_INFO_MAX];
3283 	struct dp_mlo_link_info mlo_link_info[DP_MAX_MLO_LINKS];
3284 	MLO_PEER_MAP_TLV_TAG_ID tlv_type = 0xff;
3285 	uint16_t tlv_len = 0;
3286 	int i = 0;
3287 
3288 	mlo_peer_id = HTT_RX_MLO_PEER_MAP_MLO_PEER_ID_GET(*msg_word);
3289 	num_links =
3290 		HTT_RX_MLO_PEER_MAP_NUM_LOGICAL_LINKS_GET(*msg_word);
3291 	mlo_peer_mac_addr =
3292 	htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3293 				   &mac_addr_deswizzle_buf[0]);
3294 
3295 	mlo_flow_info[0].ast_idx =
3296 		HTT_RX_MLO_PEER_MAP_PRIMARY_AST_INDEX_GET(*(msg_word + 3));
3297 	mlo_flow_info[0].ast_idx_valid =
3298 		HTT_RX_MLO_PEER_MAP_AST_INDEX_VALID_FLAG_GET(*(msg_word + 3));
3299 	mlo_flow_info[0].chip_id =
3300 		HTT_RX_MLO_PEER_MAP_CHIP_ID_AST_INDEX_GET(*(msg_word + 3));
3301 	mlo_flow_info[0].tidmask =
3302 		HTT_RX_MLO_PEER_MAP_TIDMASK_AST_INDEX_GET(*(msg_word + 3));
3303 	mlo_flow_info[0].cache_set_num =
3304 	HTT_RX_MLO_PEER_MAP_CACHE_SET_NUM_AST_INDEX_GET(*(msg_word + 3));
3305 
3306 	mlo_flow_info[1].ast_idx =
3307 		HTT_RX_MLO_PEER_MAP_PRIMARY_AST_INDEX_GET(*(msg_word + 3));
3308 	mlo_flow_info[1].ast_idx_valid =
3309 		HTT_RX_MLO_PEER_MAP_AST_INDEX_VALID_FLAG_GET(*(msg_word + 3));
3310 	mlo_flow_info[1].chip_id =
3311 		HTT_RX_MLO_PEER_MAP_CHIP_ID_AST_INDEX_GET(*(msg_word + 3));
3312 	mlo_flow_info[1].tidmask =
3313 		HTT_RX_MLO_PEER_MAP_TIDMASK_AST_INDEX_GET(*(msg_word + 3));
3314 	mlo_flow_info[1].cache_set_num =
3315 	HTT_RX_MLO_PEER_MAP_CACHE_SET_NUM_AST_INDEX_GET(*(msg_word + 3));
3316 
3317 	mlo_flow_info[2].ast_idx =
3318 		HTT_RX_MLO_PEER_MAP_PRIMARY_AST_INDEX_GET(*(msg_word + 3));
3319 	mlo_flow_info[2].ast_idx_valid =
3320 		HTT_RX_MLO_PEER_MAP_AST_INDEX_VALID_FLAG_GET(*(msg_word + 3));
3321 	mlo_flow_info[2].chip_id =
3322 		HTT_RX_MLO_PEER_MAP_CHIP_ID_AST_INDEX_GET(*(msg_word + 3));
3323 	mlo_flow_info[2].tidmask =
3324 		HTT_RX_MLO_PEER_MAP_TIDMASK_AST_INDEX_GET(*(msg_word + 3));
3325 	mlo_flow_info[2].cache_set_num =
3326 	HTT_RX_MLO_PEER_MAP_CACHE_SET_NUM_AST_INDEX_GET(*(msg_word + 3));
3327 
3328 	msg_word = msg_word + 8;
3329 	while (msg_word && (i < DP_MAX_MLO_LINKS)) {
3330 		mlo_link_info[i].peer_chip_id = 0xFF;
3331 		mlo_link_info[i].vdev_id = 0xFF;
3332 
3333 		tlv_type = HTT_RX_MLO_PEER_MAP_TLV_TAG_GET(*msg_word);
3334 		tlv_len = HTT_RX_MLO_PEER_MAP_TLV_LENGTH_GET(*msg_word);
3335 
3336 		if (tlv_len == 0) {
3337 			dp_err("TLV Length is 0");
3338 			break;
3339 		}
3340 
3341 		if (tlv_type == MLO_PEER_MAP_TLV_STRUCT_SOC_VDEV_PEER_IDS) {
3342 			mlo_link_info[i].peer_chip_id =
3343 				HTT_RX_MLO_PEER_MAP_CHIP_ID_GET(
3344 							*(msg_word + 1));
3345 			mlo_link_info[i].vdev_id =
3346 				HTT_RX_MLO_PEER_MAP_VDEV_ID_GET(
3347 							*(msg_word + 1));
3348 		}
3349 		/* Add header size to tlv length */
3350 		tlv_len = tlv_len + HTT_TLV_HDR_LEN;
3351 		msg_word = (uint32_t *)(((uint8_t *)msg_word) + tlv_len);
3352 		i++;
3353 	}
3354 
3355 	dp_rx_mlo_peer_map_handler(soc->dp_soc, mlo_peer_id,
3356 				   mlo_peer_mac_addr,
3357 				   mlo_flow_info, mlo_link_info);
3358 }
3359 
3360 #ifdef QCA_SUPPORT_PRIMARY_LINK_MIGRATE
3361 static void dp_htt_t2h_primary_link_migration(struct htt_soc *soc,
3362 					      uint32_t *msg_word)
3363 {
3364 	u_int16_t peer_id;
3365 	u_int16_t ml_peer_id;
3366 	u_int16_t vdev_id;
3367 	u_int8_t pdev_id;
3368 	u_int8_t chip_id;
3369 
3370 	vdev_id = HTT_T2H_PRIMARY_LINK_PEER_MIGRATE_VDEV_ID_GET(
3371 			*msg_word);
3372 	pdev_id = HTT_T2H_PRIMARY_LINK_PEER_MIGRATE_PDEV_ID_GET(
3373 			*msg_word);
3374 	chip_id = HTT_T2H_PRIMARY_LINK_PEER_MIGRATE_CHIP_ID_GET(
3375 			*msg_word);
3376 	ml_peer_id = HTT_T2H_PRIMARY_LINK_PEER_MIGRATE_ML_PEER_ID_GET(
3377 			*(msg_word + 1));
3378 	peer_id = HTT_T2H_PRIMARY_LINK_PEER_MIGRATE_SW_LINK_PEER_ID_GET(
3379 			*(msg_word + 1));
3380 
3381 	dp_htt_info("HTT_T2H_MSG_TYPE_PRIMARY_PEER_MIGRATE_IND msg"
3382 		    "for peer id %d vdev id %d", peer_id, vdev_id);
3383 
3384 	dp_htt_reo_migration(soc->dp_soc, peer_id, ml_peer_id,
3385 			vdev_id, pdev_id, chip_id);
3386 }
3387 #else
3388 static void dp_htt_t2h_primary_link_migration(struct htt_soc *soc,
3389 					      uint32_t *msg_word)
3390 {
3391 }
3392 #endif
3393 
3394 static void dp_htt_mlo_peer_unmap_handler(struct htt_soc *soc,
3395 					  uint32_t *msg_word)
3396 {
3397 	uint16_t mlo_peer_id;
3398 
3399 	mlo_peer_id = HTT_RX_MLO_PEER_UNMAP_MLO_PEER_ID_GET(*msg_word);
3400 	dp_rx_mlo_peer_unmap_handler(soc->dp_soc, mlo_peer_id);
3401 }
3402 
3403 static void
3404 dp_rx_mlo_timestamp_ind_handler(struct dp_soc *soc,
3405 				uint32_t *msg_word)
3406 {
3407 	uint8_t pdev_id;
3408 	uint8_t target_pdev_id;
3409 	struct dp_pdev *pdev;
3410 
3411 	if (!soc)
3412 		return;
3413 
3414 	target_pdev_id = HTT_T2H_MLO_TIMESTAMP_OFFSET_PDEV_ID_GET(*msg_word);
3415 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc,
3416 							 target_pdev_id);
3417 
3418 	if (pdev_id >= MAX_PDEV_CNT) {
3419 		dp_htt_debug("%pK: pdev id %d is invalid", soc, pdev_id);
3420 		return;
3421 	}
3422 
3423 	pdev = (struct dp_pdev *)soc->pdev_list[pdev_id];
3424 
3425 	if (!pdev) {
3426 		dp_err("Invalid pdev");
3427 		return;
3428 	}
3429 	dp_wdi_event_handler(WDI_EVENT_MLO_TSTMP, soc,
3430 			     msg_word, HTT_INVALID_PEER, WDI_NO_VAL,
3431 			     pdev_id);
3432 
3433 	qdf_spin_lock_bh(&soc->htt_stats.lock);
3434 	pdev->timestamp.msg_type =
3435 		HTT_T2H_MLO_TIMESTAMP_OFFSET_MSG_TYPE_GET(*msg_word);
3436 	pdev->timestamp.pdev_id = pdev_id;
3437 	pdev->timestamp.chip_id =
3438 		HTT_T2H_MLO_TIMESTAMP_OFFSET_CHIP_ID_GET(*msg_word);
3439 	pdev->timestamp.mac_clk_freq =
3440 		HTT_T2H_MLO_TIMESTAMP_OFFSET_MAC_CLK_FREQ_MHZ_GET(*msg_word);
3441 	pdev->timestamp.sync_tstmp_lo_us = *(msg_word + 1);
3442 	pdev->timestamp.sync_tstmp_hi_us = *(msg_word + 2);
3443 	pdev->timestamp.mlo_offset_lo_us = *(msg_word + 3);
3444 	pdev->timestamp.mlo_offset_hi_us = *(msg_word + 4);
3445 	pdev->timestamp.mlo_offset_clks  = *(msg_word + 5);
3446 	pdev->timestamp.mlo_comp_us =
3447 	HTT_T2H_MLO_TIMESTAMP_OFFSET_MLO_TIMESTAMP_COMP_US_GET(
3448 							*(msg_word + 6));
3449 	pdev->timestamp.mlo_comp_clks =
3450 	HTT_T2H_MLO_TIMESTAMP_OFFSET_MLO_TIMESTAMP_COMP_CLKS_GET(
3451 							*(msg_word + 6));
3452 	pdev->timestamp.mlo_comp_timer =
3453 	HTT_T2H_MLO_TIMESTAMP_OFFSET_MLO_TIMESTAMP_COMP_PERIOD_US_GET(
3454 							*(msg_word + 7));
3455 
3456 	dp_htt_debug("tsf_lo=%d tsf_hi=%d, mlo_ofst_lo=%d, mlo_ofst_hi=%d",
3457 		     pdev->timestamp.sync_tstmp_lo_us,
3458 		     pdev->timestamp.sync_tstmp_hi_us,
3459 		     pdev->timestamp.mlo_offset_lo_us,
3460 		     pdev->timestamp.mlo_offset_hi_us);
3461 
3462 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
3463 
3464 	dp_update_mlo_ts_offset(soc,
3465 				pdev->timestamp.mlo_offset_lo_us,
3466 				pdev->timestamp.mlo_offset_hi_us);
3467 
3468 	dp_update_mlo_delta_tsf2(soc, pdev);
3469 }
3470 #else
3471 static void dp_htt_mlo_peer_map_handler(struct htt_soc *soc,
3472 					uint32_t *msg_word)
3473 {
3474 	qdf_assert_always(0);
3475 }
3476 
3477 static void dp_htt_mlo_peer_unmap_handler(struct htt_soc *soc,
3478 					 uint32_t *msg_word)
3479 {
3480 	qdf_assert_always(0);
3481 }
3482 
3483 static void
3484 dp_rx_mlo_timestamp_ind_handler(void *soc_handle,
3485 				uint32_t *msg_word)
3486 {
3487 	qdf_assert_always(0);
3488 }
3489 
3490 static void dp_htt_t2h_primary_link_migration(struct htt_soc *soc,
3491 					      uint32_t *msg_word)
3492 {
3493 }
3494 #endif
3495 
3496 /**
3497  * dp_htt_rx_addba_handler() - RX Addba HTT msg handler
3498  * @soc: DP Soc handler
3499  * @peer_id: ID of peer
3500  * @tid: TID number
3501  * @win_sz: BA window size
3502  *
3503  * Return: None
3504  */
3505 static void
3506 dp_htt_rx_addba_handler(struct dp_soc *soc, uint16_t peer_id,
3507 			uint8_t tid, uint16_t win_sz)
3508 {
3509 	uint16_t status;
3510 	struct dp_peer *peer;
3511 
3512 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
3513 
3514 	if (!peer) {
3515 		dp_err("Peer not found peer id %d", peer_id);
3516 		return;
3517 	}
3518 
3519 	status = dp_addba_requestprocess_wifi3((struct cdp_soc_t *)soc,
3520 					       peer->mac_addr.raw,
3521 					       peer->vdev->vdev_id, 0,
3522 					       tid, 0, win_sz, 0xffff);
3523 
3524 	dp_addba_resp_tx_completion_wifi3(
3525 		(struct cdp_soc_t *)soc,
3526 		peer->mac_addr.raw, peer->vdev->vdev_id,
3527 		tid,
3528 		status);
3529 
3530 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3531 
3532 	dp_info("PeerID %d BAW %d TID %d stat %d",
3533 		peer_id, win_sz, tid, status);
3534 }
3535 
3536 /**
3537  * dp_htt_ppdu_id_fmt_handler() - PPDU ID Format handler
3538  * @soc: HTT SOC handle
3539  * @msg_word: Pointer to payload
3540  *
3541  * Return: None
3542  */
3543 static void
3544 dp_htt_ppdu_id_fmt_handler(struct dp_soc *soc, uint32_t *msg_word)
3545 {
3546 	uint8_t msg_type, valid, bits, offset;
3547 
3548 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
3549 
3550 	msg_word += HTT_PPDU_ID_FMT_IND_LINK_ID_OFFSET;
3551 	valid = HTT_PPDU_ID_FMT_IND_VALID_GET_BITS31_16(*msg_word);
3552 	bits = HTT_PPDU_ID_FMT_IND_BITS_GET_BITS31_16(*msg_word);
3553 	offset = HTT_PPDU_ID_FMT_IND_OFFSET_GET_BITS31_16(*msg_word);
3554 
3555 	dp_info("link_id: valid %u bits %u offset %u", valid, bits, offset);
3556 
3557 	if (valid) {
3558 		soc->link_id_offset = offset;
3559 		soc->link_id_bits = bits;
3560 	}
3561 }
3562 
3563 #ifdef IPA_OPT_WIFI_DP
3564 static void dp_ipa_rx_cce_super_rule_setup_done_handler(struct htt_soc *soc,
3565 							uint32_t *msg_word)
3566 {
3567 	uint8_t pdev_id = 0;
3568 	uint8_t resp_type = 0;
3569 	uint8_t is_rules_enough = 0;
3570 	uint8_t num_rules_avail = 0;
3571 	int filter0_result = 0, filter1_result = 0;
3572 	bool is_success = false;
3573 
3574 	pdev_id = HTT_RX_CCE_SUPER_RULE_SETUP_DONE_PDEV_ID_GET(*msg_word);
3575 	resp_type = HTT_RX_CCE_SUPER_RULE_SETUP_DONE_RESPONSE_TYPE_GET(
3576 								*msg_word);
3577 	dp_info("opt_dp:: cce_super_rule_rsp pdev_id: %d resp_type: %d",
3578 		pdev_id, resp_type);
3579 
3580 	switch (resp_type) {
3581 	case HTT_RX_CCE_SUPER_RULE_SETUP_REQ_RESPONSE:
3582 	{
3583 		is_rules_enough =
3584 			HTT_RX_CCE_SUPER_RULE_SETUP_DONE_IS_RULE_ENOUGH_GET(
3585 								*msg_word);
3586 		num_rules_avail =
3587 			HTT_RX_CCE_SUPER_RULE_SETUP_DONE_AVAIL_RULE_NUM_GET(
3588 								*msg_word);
3589 		if (is_rules_enough == 1) {
3590 			is_success = true;
3591 			soc->stats.reserve_fail_cnt = 0;
3592 		} else {
3593 			is_success = false;
3594 			soc->stats.reserve_fail_cnt++;
3595 			if (soc->stats.reserve_fail_cnt >
3596 					MAX_RESERVE_FAIL_ATTEMPT) {
3597 				/*
3598 				 * IPA will retry only after an hour by default
3599 				 * after MAX_RESERVE_FAIL_ATTEMPT
3600 				 */
3601 				soc->stats.abort_count++;
3602 				soc->stats.reserve_fail_cnt = 0;
3603 				dp_info(
3604 				  "opt_dp: Filter reserve failed max attempts");
3605 			}
3606 			dp_info("opt_dp:: Filter reserve failed. Rules avail %d",
3607 				num_rules_avail);
3608 		}
3609 		dp_ipa_wdi_opt_dpath_notify_flt_rsvd(is_success);
3610 		break;
3611 	}
3612 	case HTT_RX_CCE_SUPER_RULE_INSTALL_RESPONSE:
3613 	{
3614 		filter0_result =
3615 			HTT_RX_CCE_SUPER_RULE_SETUP_DONE_CFG_RESULT_0_GET(
3616 								     *msg_word);
3617 		filter1_result =
3618 			HTT_RX_CCE_SUPER_RULE_SETUP_DONE_CFG_RESULT_1_GET(
3619 								     *msg_word);
3620 
3621 		dp_ipa_wdi_opt_dpath_notify_flt_add_rem_cb(filter0_result,
3622 							   filter1_result);
3623 		break;
3624 	}
3625 	case HTT_RX_CCE_SUPER_RULE_RELEASE_RESPONSE:
3626 	{
3627 		filter0_result =
3628 			HTT_RX_CCE_SUPER_RULE_SETUP_DONE_CFG_RESULT_0_GET(
3629 								     *msg_word);
3630 		filter1_result =
3631 			HTT_RX_CCE_SUPER_RULE_SETUP_DONE_CFG_RESULT_1_GET(
3632 								     *msg_word);
3633 
3634 		dp_ipa_wdi_opt_dpath_notify_flt_rlsd(filter0_result,
3635 						     filter1_result);
3636 		break;
3637 	}
3638 	default:
3639 		dp_info("opt_dp:: Wrong Super rule setup response");
3640 	};
3641 
3642 	dp_info("opt_dp:: cce super rule resp type: %d, is_rules_enough: %d",
3643 		resp_type, is_rules_enough);
3644 	dp_info("num_rules_avail: %d, rslt0: %d, rslt1: %d",
3645 		num_rules_avail, filter0_result, filter1_result);
3646 }
3647 #else
3648 static void dp_ipa_rx_cce_super_rule_setup_done_handler(struct htt_soc *soc,
3649 							uint32_t *msg_word)
3650 {
3651 }
3652 #endif
3653 
3654 void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
3655 {
3656 	struct htt_soc *soc = (struct htt_soc *) context;
3657 	qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
3658 	u_int32_t *msg_word;
3659 	enum htt_t2h_msg_type msg_type;
3660 	bool free_buf = true;
3661 
3662 	/* check for successful message reception */
3663 	if (pkt->Status != QDF_STATUS_SUCCESS) {
3664 		if (pkt->Status != QDF_STATUS_E_CANCELED)
3665 			soc->stats.htc_err_cnt++;
3666 
3667 		qdf_nbuf_free(htt_t2h_msg);
3668 		return;
3669 	}
3670 
3671 	/* TODO: Check if we should pop the HTC/HTT header alignment padding */
3672 
3673 	msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
3674 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
3675 	htt_event_record(soc->htt_logger_handle,
3676 			 msg_type, (uint8_t *)msg_word);
3677 	switch (msg_type) {
3678 	case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
3679 	{
3680 		dp_htt_bkp_event_alert(msg_word, soc);
3681 		break;
3682 	}
3683 	case HTT_T2H_MSG_TYPE_PEER_MAP:
3684 		{
3685 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3686 			u_int8_t *peer_mac_addr;
3687 			u_int16_t peer_id;
3688 			u_int16_t hw_peer_id;
3689 			u_int8_t vdev_id;
3690 			u_int8_t is_wds;
3691 			struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
3692 
3693 			peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
3694 			hw_peer_id =
3695 				HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2));
3696 			vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
3697 			peer_mac_addr = htt_t2h_mac_addr_deswizzle(
3698 				(u_int8_t *) (msg_word+1),
3699 				&mac_addr_deswizzle_buf[0]);
3700 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3701 				QDF_TRACE_LEVEL_DEBUG,
3702 				"HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3703 				peer_id, vdev_id);
3704 
3705 			/*
3706 			 * check if peer already exists for this peer_id, if so
3707 			 * this peer map event is in response for a wds peer add
3708 			 * wmi command sent during wds source port learning.
3709 			 * in this case just add the ast entry to the existing
3710 			 * peer ast_list.
3711 			 */
3712 			is_wds = !!(dpsoc->peer_id_to_obj_map[peer_id]);
3713 			dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id,
3714 					       vdev_id, peer_mac_addr, 0,
3715 					       is_wds);
3716 			break;
3717 		}
3718 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
3719 		{
3720 			u_int16_t peer_id;
3721 			u_int8_t vdev_id;
3722 			u_int8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0};
3723 			peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
3724 			vdev_id = HTT_RX_PEER_UNMAP_VDEV_ID_GET(*msg_word);
3725 
3726 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
3727 						 vdev_id, mac_addr, 0,
3728 						 DP_PEER_WDS_COUNT_INVALID);
3729 			break;
3730 		}
3731 	case HTT_T2H_MSG_TYPE_SEC_IND:
3732 		{
3733 			u_int16_t peer_id;
3734 			enum cdp_sec_type sec_type;
3735 			int is_unicast;
3736 
3737 			peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
3738 			sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
3739 			is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
3740 			/* point to the first part of the Michael key */
3741 			msg_word++;
3742 			dp_rx_sec_ind_handler(
3743 				soc->dp_soc, peer_id, sec_type, is_unicast,
3744 				msg_word, msg_word + 2);
3745 			break;
3746 		}
3747 
3748 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
3749 		{
3750 			free_buf =
3751 				dp_monitor_ppdu_stats_ind_handler(soc,
3752 								  msg_word,
3753 								  htt_t2h_msg);
3754 			break;
3755 		}
3756 
3757 	case HTT_T2H_MSG_TYPE_PKTLOG:
3758 		{
3759 			dp_pktlog_msg_handler(soc, msg_word);
3760 			break;
3761 		}
3762 
3763 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
3764 		{
3765 			/*
3766 			 * HTC maintains runtime pm count for H2T messages that
3767 			 * have a response msg from FW. This count ensures that
3768 			 * in the case FW does not sent out the response or host
3769 			 * did not process this indication runtime_put happens
3770 			 * properly in the cleanup path.
3771 			 */
3772 			if (htc_dec_return_htt_runtime_cnt(soc->htc_soc) >= 0)
3773 				htc_pm_runtime_put(soc->htc_soc);
3774 			else
3775 				soc->stats.htt_ver_req_put_skip++;
3776 			soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
3777 			soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
3778 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
3779 				"target uses HTT version %d.%d; host uses %d.%d",
3780 				soc->tgt_ver.major, soc->tgt_ver.minor,
3781 				HTT_CURRENT_VERSION_MAJOR,
3782 				HTT_CURRENT_VERSION_MINOR);
3783 			if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
3784 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3785 					QDF_TRACE_LEVEL_WARN,
3786 					"*** Incompatible host/target HTT versions!");
3787 			}
3788 			/* abort if the target is incompatible with the host */
3789 			qdf_assert(soc->tgt_ver.major ==
3790 				HTT_CURRENT_VERSION_MAJOR);
3791 			if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
3792 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3793 					QDF_TRACE_LEVEL_INFO_LOW,
3794 					"*** Warning: host/target HTT versions"
3795 					" are different, though compatible!");
3796 			}
3797 			break;
3798 		}
3799 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
3800 		{
3801 			uint16_t peer_id;
3802 			uint8_t tid;
3803 			uint16_t win_sz;
3804 
3805 			/*
3806 			 * Update REO Queue Desc with new values
3807 			 */
3808 			peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
3809 			tid = HTT_RX_ADDBA_TID_GET(*msg_word);
3810 			win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
3811 
3812 			/*
3813 			 * Window size needs to be incremented by 1
3814 			 * since fw needs to represent a value of 256
3815 			 * using just 8 bits
3816 			 */
3817 			dp_htt_rx_addba_handler(soc->dp_soc, peer_id,
3818 						tid, win_sz + 1);
3819 			break;
3820 		}
3821 	case HTT_T2H_MSG_TYPE_RX_ADDBA_EXTN:
3822 		{
3823 			uint16_t peer_id;
3824 			uint8_t tid;
3825 			uint16_t win_sz;
3826 
3827 			peer_id = HTT_RX_ADDBA_EXTN_PEER_ID_GET(*msg_word);
3828 			tid = HTT_RX_ADDBA_EXTN_TID_GET(*msg_word);
3829 
3830 			msg_word++;
3831 			win_sz = HTT_RX_ADDBA_EXTN_WIN_SIZE_GET(*msg_word);
3832 
3833 			dp_htt_rx_addba_handler(soc->dp_soc, peer_id,
3834 						tid, win_sz);
3835 			break;
3836 		}
3837 	case HTT_T2H_PPDU_ID_FMT_IND:
3838 		{
3839 			dp_htt_ppdu_id_fmt_handler(soc->dp_soc, msg_word);
3840 			break;
3841 		}
3842 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
3843 		{
3844 			dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg);
3845 			break;
3846 		}
3847 	case HTT_T2H_MSG_TYPE_PEER_MAP_V2:
3848 		{
3849 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3850 			u_int8_t *peer_mac_addr;
3851 			u_int16_t peer_id;
3852 			u_int16_t hw_peer_id;
3853 			u_int8_t vdev_id;
3854 			bool is_wds;
3855 			u_int16_t ast_hash;
3856 			struct dp_ast_flow_override_info ast_flow_info = {0};
3857 
3858 			peer_id = HTT_RX_PEER_MAP_V2_SW_PEER_ID_GET(*msg_word);
3859 			hw_peer_id =
3860 			HTT_RX_PEER_MAP_V2_HW_PEER_ID_GET(*(msg_word + 2));
3861 			vdev_id = HTT_RX_PEER_MAP_V2_VDEV_ID_GET(*msg_word);
3862 			peer_mac_addr =
3863 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3864 						   &mac_addr_deswizzle_buf[0]);
3865 			is_wds =
3866 			HTT_RX_PEER_MAP_V2_NEXT_HOP_GET(*(msg_word + 3));
3867 			ast_hash =
3868 			HTT_RX_PEER_MAP_V2_AST_HASH_VALUE_GET(*(msg_word + 3));
3869 			/*
3870 			 * Update 4 ast_index per peer, ast valid mask
3871 			 * and TID flow valid mask.
3872 			 * AST valid mask is 3 bit field corresponds to
3873 			 * ast_index[3:1]. ast_index 0 is always valid.
3874 			 */
3875 			ast_flow_info.ast_valid_mask =
3876 			HTT_RX_PEER_MAP_V2_AST_VALID_MASK_GET(*(msg_word + 3));
3877 			ast_flow_info.ast_idx[0] = hw_peer_id;
3878 			ast_flow_info.ast_flow_mask[0] =
3879 			HTT_RX_PEER_MAP_V2_AST_0_FLOW_MASK_GET(*(msg_word + 4));
3880 			ast_flow_info.ast_idx[1] =
3881 			HTT_RX_PEER_MAP_V2_AST_INDEX_1_GET(*(msg_word + 4));
3882 			ast_flow_info.ast_flow_mask[1] =
3883 			HTT_RX_PEER_MAP_V2_AST_1_FLOW_MASK_GET(*(msg_word + 4));
3884 			ast_flow_info.ast_idx[2] =
3885 			HTT_RX_PEER_MAP_V2_AST_INDEX_2_GET(*(msg_word + 5));
3886 			ast_flow_info.ast_flow_mask[2] =
3887 			HTT_RX_PEER_MAP_V2_AST_2_FLOW_MASK_GET(*(msg_word + 4));
3888 			ast_flow_info.ast_idx[3] =
3889 			HTT_RX_PEER_MAP_V2_AST_INDEX_3_GET(*(msg_word + 6));
3890 			ast_flow_info.ast_flow_mask[3] =
3891 			HTT_RX_PEER_MAP_V2_AST_3_FLOW_MASK_GET(*(msg_word + 4));
3892 			/*
3893 			 * TID valid mask is applicable only
3894 			 * for HI and LOW priority flows.
3895 			 * tid_valid_mas is 8 bit field corresponds
3896 			 * to TID[7:0]
3897 			 */
3898 			ast_flow_info.tid_valid_low_pri_mask =
3899 			HTT_RX_PEER_MAP_V2_TID_VALID_LOW_PRI_GET(*(msg_word + 5));
3900 			ast_flow_info.tid_valid_hi_pri_mask =
3901 			HTT_RX_PEER_MAP_V2_TID_VALID_HI_PRI_GET(*(msg_word + 5));
3902 
3903 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3904 				  QDF_TRACE_LEVEL_DEBUG,
3905 				  "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3906 				  peer_id, vdev_id);
3907 
3908 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3909 				  QDF_TRACE_LEVEL_INFO,
3910 				  "ast_idx[0] %d ast_idx[1] %d ast_idx[2] %d ast_idx[3] %d n",
3911 				  ast_flow_info.ast_idx[0],
3912 				  ast_flow_info.ast_idx[1],
3913 				  ast_flow_info.ast_idx[2],
3914 				  ast_flow_info.ast_idx[3]);
3915 
3916 			dp_rx_peer_map_handler(soc->dp_soc, peer_id,
3917 					       hw_peer_id, vdev_id,
3918 					       peer_mac_addr, ast_hash,
3919 					       is_wds);
3920 
3921 			/*
3922 			 * Update ast indexes for flow override support
3923 			 * Applicable only for non wds peers
3924 			 */
3925 			if (!soc->dp_soc->ast_offload_support)
3926 				dp_peer_ast_index_flow_queue_map_create(
3927 						soc->dp_soc, is_wds,
3928 						peer_id, peer_mac_addr,
3929 						&ast_flow_info);
3930 
3931 			break;
3932 		}
3933 	case HTT_T2H_MSG_TYPE_PEER_UNMAP_V2:
3934 		{
3935 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3936 			u_int8_t *mac_addr;
3937 			u_int16_t peer_id;
3938 			u_int8_t vdev_id;
3939 			u_int8_t is_wds;
3940 			u_int32_t free_wds_count;
3941 
3942 			peer_id =
3943 			HTT_RX_PEER_UNMAP_V2_SW_PEER_ID_GET(*msg_word);
3944 			vdev_id = HTT_RX_PEER_UNMAP_V2_VDEV_ID_GET(*msg_word);
3945 			mac_addr =
3946 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3947 						   &mac_addr_deswizzle_buf[0]);
3948 			is_wds =
3949 			HTT_RX_PEER_UNMAP_V2_NEXT_HOP_GET(*(msg_word + 2));
3950 			free_wds_count =
3951 			HTT_RX_PEER_UNMAP_V2_PEER_WDS_FREE_COUNT_GET(*(msg_word + 4));
3952 
3953 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3954 				  QDF_TRACE_LEVEL_INFO,
3955 				  "HTT_T2H_MSG_TYPE_PEER_UNMAP msg for peer id %d vdev id %d n",
3956 				  peer_id, vdev_id);
3957 
3958 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
3959 						 vdev_id, mac_addr,
3960 						 is_wds, free_wds_count);
3961 			break;
3962 		}
3963 	case HTT_T2H_MSG_TYPE_RX_DELBA:
3964 		{
3965 			uint16_t peer_id;
3966 			uint8_t tid;
3967 			uint8_t win_sz;
3968 			QDF_STATUS status;
3969 
3970 			peer_id = HTT_RX_DELBA_PEER_ID_GET(*msg_word);
3971 			tid = HTT_RX_DELBA_TID_GET(*msg_word);
3972 			win_sz = HTT_RX_DELBA_WIN_SIZE_GET(*msg_word);
3973 
3974 			status = dp_rx_delba_ind_handler(
3975 				soc->dp_soc,
3976 				peer_id, tid, win_sz);
3977 
3978 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3979 				  QDF_TRACE_LEVEL_INFO,
3980 				  FL("DELBA PeerID %d BAW %d TID %d stat %d"),
3981 				  peer_id, win_sz, tid, status);
3982 			break;
3983 		}
3984 	case HTT_T2H_MSG_TYPE_RX_DELBA_EXTN:
3985 		{
3986 			uint16_t peer_id;
3987 			uint8_t tid;
3988 			uint16_t win_sz;
3989 			QDF_STATUS status;
3990 
3991 			peer_id = HTT_RX_DELBA_EXTN_PEER_ID_GET(*msg_word);
3992 			tid = HTT_RX_DELBA_EXTN_TID_GET(*msg_word);
3993 
3994 			msg_word++;
3995 			win_sz = HTT_RX_DELBA_EXTN_WIN_SIZE_GET(*msg_word);
3996 
3997 			status = dp_rx_delba_ind_handler(soc->dp_soc,
3998 							 peer_id, tid,
3999 							 win_sz);
4000 
4001 			dp_info("DELBA PeerID %d BAW %d TID %d stat %d",
4002 				peer_id, win_sz, tid, status);
4003 			break;
4004 		}
4005 	case HTT_T2H_MSG_TYPE_FSE_CMEM_BASE_SEND:
4006 		{
4007 			uint16_t num_entries;
4008 			uint32_t cmem_ba_lo;
4009 			uint32_t cmem_ba_hi;
4010 
4011 			num_entries = HTT_CMEM_BASE_SEND_NUM_ENTRIES_GET(*msg_word);
4012 			cmem_ba_lo = *(msg_word + 1);
4013 			cmem_ba_hi = *(msg_word + 2);
4014 
4015 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
4016 				  FL("CMEM FSE num_entries %u CMEM BA LO %x HI %x"),
4017 				  num_entries, cmem_ba_lo, cmem_ba_hi);
4018 
4019 			dp_rx_fst_update_cmem_params(soc->dp_soc, num_entries,
4020 						     cmem_ba_lo, cmem_ba_hi);
4021 			break;
4022 		}
4023 	case HTT_T2H_MSG_TYPE_TX_OFFLOAD_DELIVER_IND:
4024 		{
4025 			dp_offload_ind_handler(soc, msg_word);
4026 			break;
4027 		}
4028 	case HTT_T2H_MSG_TYPE_PEER_MAP_V3:
4029 	{
4030 		u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
4031 		u_int8_t *peer_mac_addr;
4032 		u_int16_t peer_id;
4033 		u_int16_t hw_peer_id;
4034 		u_int8_t vdev_id;
4035 		uint8_t is_wds;
4036 		u_int16_t ast_hash = 0;
4037 
4038 		peer_id = HTT_RX_PEER_MAP_V3_SW_PEER_ID_GET(*msg_word);
4039 		vdev_id = HTT_RX_PEER_MAP_V3_VDEV_ID_GET(*msg_word);
4040 		peer_mac_addr =
4041 		htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
4042 					   &mac_addr_deswizzle_buf[0]);
4043 		hw_peer_id = HTT_RX_PEER_MAP_V3_HW_PEER_ID_GET(*(msg_word + 3));
4044 		ast_hash = HTT_RX_PEER_MAP_V3_CACHE_SET_NUM_GET(*(msg_word + 3));
4045 		is_wds = HTT_RX_PEER_MAP_V3_NEXT_HOP_GET(*(msg_word + 4));
4046 
4047 		dp_htt_info("HTT_T2H_MSG_TYPE_PEER_MAP_V3 msg for peer id %d vdev id %d n",
4048 			    peer_id, vdev_id);
4049 
4050 		dp_rx_peer_map_handler(soc->dp_soc, peer_id,
4051 				       hw_peer_id, vdev_id,
4052 				       peer_mac_addr, ast_hash,
4053 				       is_wds);
4054 
4055 		break;
4056 	}
4057 	case HTT_T2H_MSG_TYPE_PRIMARY_LINK_PEER_MIGRATE_IND:
4058 	{
4059 		dp_htt_t2h_primary_link_migration(soc, msg_word);
4060 		break;
4061 	}
4062 	case HTT_T2H_MSG_TYPE_MLO_RX_PEER_MAP:
4063 	{
4064 		dp_htt_mlo_peer_map_handler(soc, msg_word);
4065 		break;
4066 	}
4067 	case HTT_T2H_MSG_TYPE_MLO_RX_PEER_UNMAP:
4068 	{
4069 		dp_htt_mlo_peer_unmap_handler(soc, msg_word);
4070 		break;
4071 	}
4072 	case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND:
4073 	{
4074 		dp_rx_mlo_timestamp_ind_handler(soc->dp_soc, msg_word);
4075 		break;
4076 	}
4077 	case HTT_T2H_MSG_TYPE_VDEVS_TXRX_STATS_PERIODIC_IND:
4078 	{
4079 		dp_vdev_txrx_hw_stats_handler(soc, msg_word);
4080 		break;
4081 	}
4082 	case HTT_T2H_SAWF_DEF_QUEUES_MAP_REPORT_CONF:
4083 	{
4084 		dp_sawf_def_queues_update_map_report_conf(soc, msg_word,
4085 							  htt_t2h_msg);
4086 		break;
4087 	}
4088 	case HTT_T2H_SAWF_MSDUQ_INFO_IND:
4089 	{
4090 		dp_sawf_msduq_map(soc, msg_word, htt_t2h_msg);
4091 		break;
4092 	}
4093 	case HTT_T2H_MSG_TYPE_PEER_AST_OVERRIDE_INDEX_IND:
4094 	{
4095 		dp_sawf_dynamic_ast_update(soc, msg_word, htt_t2h_msg);
4096 		break;
4097 	}
4098 	case HTT_T2H_MSG_TYPE_STREAMING_STATS_IND:
4099 	{
4100 		dp_sawf_mpdu_stats_handler(soc, htt_t2h_msg);
4101 		break;
4102 	}
4103 	case HTT_T2H_MSG_TYPE_RX_CCE_SUPER_RULE_SETUP_DONE:
4104 	{
4105 		dp_ipa_rx_cce_super_rule_setup_done_handler(soc, msg_word);
4106 		break;
4107 	}
4108 	default:
4109 		break;
4110 	};
4111 
4112 	/* Free the indication buffer */
4113 	if (free_buf)
4114 		qdf_nbuf_free(htt_t2h_msg);
4115 }
4116 
4117 enum htc_send_full_action
4118 dp_htt_h2t_full(void *context, HTC_PACKET *pkt)
4119 {
4120 	return HTC_SEND_FULL_KEEP;
4121 }
4122 
4123 QDF_STATUS
4124 dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id)
4125 {
4126 	QDF_STATUS rc = QDF_STATUS_SUCCESS;
4127 	HTC_PACKET htc_pkt;
4128 
4129 	qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE);
4130 	qdf_mem_zero(&htc_pkt, sizeof(htc_pkt));
4131 	htc_pkt.Status = QDF_STATUS_SUCCESS;
4132 	htc_pkt.pPktContext = (void *)nbuf;
4133 	dp_htt_t2h_msg_handler(context, &htc_pkt);
4134 
4135 	return rc;
4136 }
4137 
4138 /**
4139  * htt_htc_soc_attach() - Register SOC level HTT instance with HTC
4140  * @soc:	HTT SOC handle
4141  *
4142  * Return: QDF_STATUS
4143  */
4144 static QDF_STATUS
4145 htt_htc_soc_attach(struct htt_soc *soc)
4146 {
4147 	struct htc_service_connect_req connect;
4148 	struct htc_service_connect_resp response;
4149 	QDF_STATUS status;
4150 	struct dp_soc *dpsoc = soc->dp_soc;
4151 
4152 	qdf_mem_zero(&connect, sizeof(connect));
4153 	qdf_mem_zero(&response, sizeof(response));
4154 
4155 	connect.pMetaData = NULL;
4156 	connect.MetaDataLength = 0;
4157 	connect.EpCallbacks.pContext = soc;
4158 	connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
4159 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
4160 	connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler;
4161 
4162 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
4163 	connect.EpCallbacks.EpRecvRefill = NULL;
4164 
4165 	/* N/A, fill is done by HIF */
4166 	connect.EpCallbacks.RecvRefillWaterMark = 1;
4167 
4168 	connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
4169 	/*
4170 	 * Specify how deep to let a queue get before htc_send_pkt will
4171 	 * call the EpSendFull function due to excessive send queue depth.
4172 	 */
4173 	connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
4174 
4175 	/* disable flow control for HTT data message service */
4176 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
4177 
4178 	/* connect to control service */
4179 	connect.service_id = HTT_DATA_MSG_SVC;
4180 
4181 	status = htc_connect_service(soc->htc_soc, &connect, &response);
4182 
4183 	if (status != QDF_STATUS_SUCCESS)
4184 		return status;
4185 
4186 	soc->htc_endpoint = response.Endpoint;
4187 
4188 	hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint);
4189 
4190 	htt_interface_logging_init(&soc->htt_logger_handle, soc->ctrl_psoc);
4191 	dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc,
4192 		dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE);
4193 
4194 	return QDF_STATUS_SUCCESS; /* success */
4195 }
4196 
4197 void *
4198 htt_soc_initialize(struct htt_soc *htt_soc,
4199 		   struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
4200 		   HTC_HANDLE htc_soc,
4201 		   hal_soc_handle_t hal_soc_hdl, qdf_device_t osdev)
4202 {
4203 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
4204 
4205 	soc->osdev = osdev;
4206 	soc->ctrl_psoc = ctrl_psoc;
4207 	soc->htc_soc = htc_soc;
4208 	soc->hal_soc = hal_soc_hdl;
4209 
4210 	if (htt_htc_soc_attach(soc))
4211 		goto fail2;
4212 
4213 	return soc;
4214 
4215 fail2:
4216 	return NULL;
4217 }
4218 
4219 void htt_soc_htc_dealloc(struct htt_soc *htt_handle)
4220 {
4221 	htt_interface_logging_deinit(htt_handle->htt_logger_handle);
4222 	htt_htc_misc_pkt_pool_free(htt_handle);
4223 	htt_htc_pkt_pool_free(htt_handle);
4224 }
4225 
4226 QDF_STATUS htt_soc_htc_prealloc(struct htt_soc *soc)
4227 {
4228 	int i;
4229 
4230 	soc->htt_htc_pkt_freelist = NULL;
4231 	/* pre-allocate some HTC_PACKET objects */
4232 	for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
4233 		struct dp_htt_htc_pkt_union *pkt;
4234 		pkt = qdf_mem_malloc(sizeof(*pkt));
4235 		if (!pkt)
4236 			return QDF_STATUS_E_NOMEM;
4237 
4238 		htt_htc_pkt_free(soc, &pkt->u.pkt);
4239 	}
4240 	return QDF_STATUS_SUCCESS;
4241 }
4242 
4243 void htt_soc_detach(struct htt_soc *htt_hdl)
4244 {
4245 	int i;
4246 	struct htt_soc *htt_handle = (struct htt_soc *)htt_hdl;
4247 
4248 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4249 		qdf_mem_free(htt_handle->pdevid_tt[i].umac_path);
4250 		qdf_mem_free(htt_handle->pdevid_tt[i].lmac_path);
4251 	}
4252 
4253 	HTT_TX_MUTEX_DESTROY(&htt_handle->htt_tx_mutex);
4254 	qdf_mem_free(htt_handle);
4255 
4256 }
4257 
4258 /**
4259  * dp_h2t_ext_stats_msg_send(): function to construct HTT message to pass to FW
4260  * @pdev: DP PDEV handle
4261  * @stats_type_upload_mask: stats type requested by user
4262  * @config_param_0: extra configuration parameters
4263  * @config_param_1: extra configuration parameters
4264  * @config_param_2: extra configuration parameters
4265  * @config_param_3: extra configuration parameters
4266  * @cookie_val: cookie value
4267  * @cookie_msb: msb of debug status cookie
4268  * @mac_id: mac number
4269  *
4270  * return: QDF STATUS
4271  */
4272 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
4273 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
4274 		uint32_t config_param_1, uint32_t config_param_2,
4275 		uint32_t config_param_3, int cookie_val, int cookie_msb,
4276 		uint8_t mac_id)
4277 {
4278 	struct htt_soc *soc = pdev->soc->htt_handle;
4279 	struct dp_htt_htc_pkt *pkt;
4280 	qdf_nbuf_t msg;
4281 	uint32_t *msg_word;
4282 	uint8_t pdev_mask = 0;
4283 	uint8_t *htt_logger_bufp;
4284 	int mac_for_pdev;
4285 	int target_pdev_id;
4286 	QDF_STATUS status;
4287 
4288 	msg = qdf_nbuf_alloc(
4289 			soc->osdev,
4290 			HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ),
4291 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4292 
4293 	if (!msg)
4294 		return QDF_STATUS_E_NOMEM;
4295 
4296 	/*TODO:Add support for SOC stats
4297 	 * Bit 0: SOC Stats
4298 	 * Bit 1: Pdev stats for pdev id 0
4299 	 * Bit 2: Pdev stats for pdev id 1
4300 	 * Bit 3: Pdev stats for pdev id 2
4301 	 */
4302 	mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
4303 	target_pdev_id =
4304 	dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev);
4305 
4306 	pdev_mask = 1 << target_pdev_id;
4307 
4308 	/*
4309 	 * Set the length of the message.
4310 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4311 	 * separately during the below call to qdf_nbuf_push_head.
4312 	 * The contribution from the HTC header is added separately inside HTC.
4313 	 */
4314 	if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) {
4315 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4316 				"Failed to expand head for HTT_EXT_STATS");
4317 		qdf_nbuf_free(msg);
4318 		return QDF_STATUS_E_FAILURE;
4319 	}
4320 
4321 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
4322 
4323 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4324 	htt_logger_bufp = (uint8_t *)msg_word;
4325 	*msg_word = 0;
4326 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ);
4327 	HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask);
4328 	HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask);
4329 
4330 	/* word 1 */
4331 	msg_word++;
4332 	*msg_word = 0;
4333 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0);
4334 
4335 	/* word 2 */
4336 	msg_word++;
4337 	*msg_word = 0;
4338 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1);
4339 
4340 	/* word 3 */
4341 	msg_word++;
4342 	*msg_word = 0;
4343 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2);
4344 
4345 	/* word 4 */
4346 	msg_word++;
4347 	*msg_word = 0;
4348 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3);
4349 
4350 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0);
4351 
4352 	/* word 5 */
4353 	msg_word++;
4354 
4355 	/* word 6 */
4356 	msg_word++;
4357 	*msg_word = 0;
4358 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val);
4359 
4360 	/* word 7 */
4361 	msg_word++;
4362 	*msg_word = 0;
4363 	/* Currently Using last 2 bits for pdev_id
4364 	 * For future reference, reserving 3 bits in cookie_msb for pdev_id
4365 	 */
4366 	cookie_msb = (cookie_msb | pdev->pdev_id);
4367 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb);
4368 
4369 	pkt = htt_htc_pkt_alloc(soc);
4370 	if (!pkt) {
4371 		qdf_nbuf_free(msg);
4372 		return QDF_STATUS_E_NOMEM;
4373 	}
4374 
4375 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4376 
4377 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4378 			dp_htt_h2t_send_complete_free_netbuf,
4379 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
4380 			soc->htc_endpoint,
4381 			/* tag for FW response msg not guaranteed */
4382 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
4383 
4384 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4385 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_EXT_STATS_REQ,
4386 				     htt_logger_bufp);
4387 
4388 	if (status != QDF_STATUS_SUCCESS) {
4389 		qdf_nbuf_free(msg);
4390 		htt_htc_pkt_free(soc, pkt);
4391 	}
4392 
4393 	return status;
4394 }
4395 
4396 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
4397 #define HTT_VDEV_TXRX_STATS_RESET_BITMASK_L32_MASK 0xFFFFFFFF
4398 #define HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_MASK 0xFFFFFFFF00000000
4399 #define HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_SHIFT 32
4400 
4401 QDF_STATUS dp_h2t_hw_vdev_stats_config_send(struct dp_soc *dpsoc,
4402 					    uint8_t pdev_id, bool enable,
4403 					    bool reset, uint64_t reset_bitmask)
4404 {
4405 	struct htt_soc *soc = dpsoc->htt_handle;
4406 	struct dp_htt_htc_pkt *pkt;
4407 	qdf_nbuf_t msg;
4408 	uint32_t *msg_word;
4409 	uint8_t *htt_logger_bufp;
4410 	QDF_STATUS status;
4411 	int duration;
4412 	uint32_t bitmask;
4413 	int target_pdev_id;
4414 
4415 	msg = qdf_nbuf_alloc(
4416 			soc->osdev,
4417 			HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_vdevs_txrx_stats_cfg)),
4418 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
4419 
4420 	if (!msg) {
4421 		dp_htt_err("%pK: Fail to allocate "
4422 		"HTT_H2T_HW_VDEV_TXRX_STATS_CFG_MSG_SZ msg buffer", dpsoc);
4423 		return QDF_STATUS_E_NOMEM;
4424 	}
4425 
4426 	if (pdev_id != INVALID_PDEV_ID)
4427 		target_pdev_id = DP_SW2HW_MACID(pdev_id);
4428 	else
4429 		target_pdev_id = 0;
4430 
4431 	duration =
4432 	wlan_cfg_get_vdev_stats_hw_offload_timer(dpsoc->wlan_cfg_ctx);
4433 
4434 	/*
4435 	 * Set the length of the message.
4436 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4437 	 * separately during the below call to qdf_nbuf_push_head.
4438 	 * The contribution from the HTC header is added separately inside HTC.
4439 	 */
4440 	if (!qdf_nbuf_put_tail(msg,
4441 			       sizeof(struct htt_h2t_vdevs_txrx_stats_cfg))) {
4442 		dp_htt_err("%pK: Failed to expand head for HTT_HW_VDEV_STATS"
4443 			   , dpsoc);
4444 		qdf_nbuf_free(msg);
4445 		return QDF_STATUS_E_FAILURE;
4446 	}
4447 
4448 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
4449 
4450 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4451 	htt_logger_bufp = (uint8_t *)msg_word;
4452 	*msg_word = 0;
4453 
4454 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VDEVS_TXRX_STATS_CFG);
4455 	HTT_RX_VDEVS_TXRX_STATS_PDEV_ID_SET(*msg_word, target_pdev_id);
4456 
4457 	HTT_RX_VDEVS_TXRX_STATS_ENABLE_SET(*msg_word, enable);
4458 
4459 	HTT_RX_VDEVS_TXRX_STATS_PERIODIC_INTERVAL_SET(*msg_word,
4460 						      (duration >> 3));
4461 
4462 	HTT_RX_VDEVS_TXRX_STATS_RESET_STATS_BITS_SET(*msg_word, reset);
4463 
4464 	msg_word++;
4465 	*msg_word = 0;
4466 	bitmask = (reset_bitmask & HTT_VDEV_TXRX_STATS_RESET_BITMASK_L32_MASK);
4467 	*msg_word = bitmask;
4468 
4469 	msg_word++;
4470 	*msg_word = 0;
4471 	bitmask =
4472 		((reset_bitmask & HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_MASK) >>
4473 		 HTT_VDEV_TXRX_STATS_RESET_BITMASK_U32_SHIFT);
4474 	*msg_word = bitmask;
4475 
4476 	pkt = htt_htc_pkt_alloc(soc);
4477 	if (!pkt) {
4478 		dp_htt_err("%pK: Fail to allocate dp_htt_htc_pkt buffer",
4479 			   dpsoc);
4480 		qdf_assert(0);
4481 		qdf_nbuf_free(msg);
4482 		return QDF_STATUS_E_NOMEM;
4483 	}
4484 
4485 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4486 
4487 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4488 			       dp_htt_h2t_send_complete_free_netbuf,
4489 			       qdf_nbuf_data(msg), qdf_nbuf_len(msg),
4490 			       soc->htc_endpoint,
4491 			       /* tag for no FW response msg */
4492 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
4493 
4494 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4495 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
4496 				     HTT_H2T_MSG_TYPE_VDEVS_TXRX_STATS_CFG,
4497 				     htt_logger_bufp);
4498 
4499 	if (status != QDF_STATUS_SUCCESS) {
4500 		qdf_nbuf_free(msg);
4501 		htt_htc_pkt_free(soc, pkt);
4502 	}
4503 
4504 	return status;
4505 }
4506 #else
4507 QDF_STATUS dp_h2t_hw_vdev_stats_config_send(struct dp_soc *dpsoc,
4508 					    uint8_t pdev_id, bool enable,
4509 					    bool reset, uint64_t reset_bitmask)
4510 {
4511 	return QDF_STATUS_SUCCESS;
4512 }
4513 #endif
4514 
4515 /**
4516  * dp_h2t_3tuple_config_send(): function to construct 3 tuple configuration
4517  * HTT message to pass to FW
4518  * @pdev: DP PDEV handle
4519  * @tuple_mask: tuple configuration to report 3 tuple hash value in either
4520  * toeplitz_2_or_4 or flow_id_toeplitz in MSDU START TLV.
4521  * @mac_id: mac id
4522  *
4523  * tuple_mask[1:0]:
4524  *   00 - Do not report 3 tuple hash value
4525  *   10 - Report 3 tuple hash value in toeplitz_2_or_4
4526  *   01 - Report 3 tuple hash value in flow_id_toeplitz
4527  *   11 - Report 3 tuple hash value in both toeplitz_2_or_4 & flow_id_toeplitz
4528  *
4529  * return: QDF STATUS
4530  */
4531 QDF_STATUS dp_h2t_3tuple_config_send(struct dp_pdev *pdev,
4532 				     uint32_t tuple_mask, uint8_t mac_id)
4533 {
4534 	struct htt_soc *soc = pdev->soc->htt_handle;
4535 	struct dp_htt_htc_pkt *pkt;
4536 	qdf_nbuf_t msg;
4537 	uint32_t *msg_word;
4538 	uint8_t *htt_logger_bufp;
4539 	int mac_for_pdev;
4540 	int target_pdev_id;
4541 
4542 	msg = qdf_nbuf_alloc(
4543 			soc->osdev,
4544 			HTT_MSG_BUF_SIZE(HTT_3_TUPLE_HASH_CFG_REQ_BYTES),
4545 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4546 
4547 	if (!msg)
4548 		return QDF_STATUS_E_NOMEM;
4549 
4550 	mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
4551 	target_pdev_id =
4552 	dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev);
4553 
4554 	/*
4555 	 * Set the length of the message.
4556 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4557 	 * separately during the below call to qdf_nbuf_push_head.
4558 	 * The contribution from the HTC header is added separately inside HTC.
4559 	 */
4560 	if (!qdf_nbuf_put_tail(msg, HTT_3_TUPLE_HASH_CFG_REQ_BYTES)) {
4561 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4562 			  "Failed to expand head for HTT_3TUPLE_CONFIG");
4563 		qdf_nbuf_free(msg);
4564 		return QDF_STATUS_E_FAILURE;
4565 	}
4566 
4567 	dp_htt_info("%pK: config_param_sent 0x%x for target_pdev %d\n -------------",
4568 		    pdev->soc, tuple_mask, target_pdev_id);
4569 
4570 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
4571 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4572 	htt_logger_bufp = (uint8_t *)msg_word;
4573 
4574 	*msg_word = 0;
4575 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG);
4576 	HTT_RX_3_TUPLE_HASH_PDEV_ID_SET(*msg_word, target_pdev_id);
4577 
4578 	msg_word++;
4579 	*msg_word = 0;
4580 	HTT_H2T_FLOW_ID_TOEPLITZ_FIELD_CONFIG_SET(*msg_word, tuple_mask);
4581 	HTT_H2T_TOEPLITZ_2_OR_4_FIELD_CONFIG_SET(*msg_word, tuple_mask);
4582 
4583 	pkt = htt_htc_pkt_alloc(soc);
4584 	if (!pkt) {
4585 		qdf_nbuf_free(msg);
4586 		return QDF_STATUS_E_NOMEM;
4587 	}
4588 
4589 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4590 
4591 	SET_HTC_PACKET_INFO_TX(
4592 			&pkt->htc_pkt,
4593 			dp_htt_h2t_send_complete_free_netbuf,
4594 			qdf_nbuf_data(msg),
4595 			qdf_nbuf_len(msg),
4596 			soc->htc_endpoint,
4597 			/* tag for no FW response msg */
4598 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
4599 
4600 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4601 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG,
4602 			    htt_logger_bufp);
4603 
4604 	return QDF_STATUS_SUCCESS;
4605 }
4606 
4607 /* This macro will revert once proper HTT header will define for
4608  * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file
4609  * */
4610 #if defined(WDI_EVENT_ENABLE)
4611 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
4612 		uint32_t stats_type_upload_mask, uint8_t mac_id)
4613 {
4614 	struct htt_soc *soc = pdev->soc->htt_handle;
4615 	struct dp_htt_htc_pkt *pkt;
4616 	qdf_nbuf_t msg;
4617 	uint32_t *msg_word;
4618 	uint8_t pdev_mask;
4619 	QDF_STATUS status;
4620 
4621 	msg = qdf_nbuf_alloc(
4622 			soc->osdev,
4623 			HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ),
4624 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
4625 
4626 	if (!msg) {
4627 		dp_htt_err("%pK: Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer"
4628 			   , pdev->soc);
4629 		qdf_assert(0);
4630 		return QDF_STATUS_E_NOMEM;
4631 	}
4632 
4633 	/*TODO:Add support for SOC stats
4634 	 * Bit 0: SOC Stats
4635 	 * Bit 1: Pdev stats for pdev id 0
4636 	 * Bit 2: Pdev stats for pdev id 1
4637 	 * Bit 3: Pdev stats for pdev id 2
4638 	 */
4639 	pdev_mask = 1 << dp_get_target_pdev_id_for_host_pdev_id(pdev->soc,
4640 								mac_id);
4641 
4642 	/*
4643 	 * Set the length of the message.
4644 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4645 	 * separately during the below call to qdf_nbuf_push_head.
4646 	 * The contribution from the HTC header is added separately inside HTC.
4647 	 */
4648 	if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) {
4649 		dp_htt_err("%pK: Failed to expand head for HTT_CFG_STATS"
4650 			   , pdev->soc);
4651 		qdf_nbuf_free(msg);
4652 		return QDF_STATUS_E_FAILURE;
4653 	}
4654 
4655 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
4656 
4657 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4658 	*msg_word = 0;
4659 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
4660 	HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask);
4661 	HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word,
4662 			stats_type_upload_mask);
4663 
4664 	pkt = htt_htc_pkt_alloc(soc);
4665 	if (!pkt) {
4666 		dp_htt_err("%pK: Fail to allocate dp_htt_htc_pkt buffer", pdev->soc);
4667 		qdf_assert(0);
4668 		qdf_nbuf_free(msg);
4669 		return QDF_STATUS_E_NOMEM;
4670 	}
4671 
4672 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4673 
4674 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4675 			dp_htt_h2t_send_complete_free_netbuf,
4676 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
4677 			soc->htc_endpoint,
4678 			/* tag for no FW response msg */
4679 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
4680 
4681 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4682 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG,
4683 				     (uint8_t *)msg_word);
4684 
4685 	if (status != QDF_STATUS_SUCCESS) {
4686 		qdf_nbuf_free(msg);
4687 		htt_htc_pkt_free(soc, pkt);
4688 	}
4689 
4690 	return status;
4691 }
4692 
4693 qdf_export_symbol(dp_h2t_cfg_stats_msg_send);
4694 #endif
4695 
4696 void
4697 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
4698 			     uint32_t *tag_buf)
4699 {
4700 	struct dp_peer *peer = NULL;
4701 	switch (tag_type) {
4702 	case HTT_STATS_PEER_DETAILS_TAG:
4703 	{
4704 		htt_peer_details_tlv *dp_stats_buf =
4705 			(htt_peer_details_tlv *)tag_buf;
4706 
4707 		pdev->fw_stats_peer_id = dp_stats_buf->sw_peer_id;
4708 	}
4709 	break;
4710 	case HTT_STATS_PEER_STATS_CMN_TAG:
4711 	{
4712 		htt_peer_stats_cmn_tlv *dp_stats_buf =
4713 			(htt_peer_stats_cmn_tlv *)tag_buf;
4714 
4715 		peer = dp_peer_get_ref_by_id(pdev->soc, pdev->fw_stats_peer_id,
4716 					     DP_MOD_ID_HTT);
4717 
4718 		if (peer && !peer->bss_peer) {
4719 			peer->stats.tx.inactive_time =
4720 				dp_stats_buf->inactive_time;
4721 			qdf_event_set(&pdev->fw_peer_stats_event);
4722 		}
4723 		if (peer)
4724 			dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
4725 	}
4726 	break;
4727 	default:
4728 		qdf_err("Invalid tag_type: %u", tag_type);
4729 	}
4730 }
4731 
4732 QDF_STATUS
4733 dp_htt_rx_flow_fst_setup(struct dp_pdev *pdev,
4734 			 struct dp_htt_rx_flow_fst_setup *fse_setup_info)
4735 {
4736 	struct htt_soc *soc = pdev->soc->htt_handle;
4737 	struct dp_htt_htc_pkt *pkt;
4738 	qdf_nbuf_t msg;
4739 	u_int32_t *msg_word;
4740 	struct htt_h2t_msg_rx_fse_setup_t *fse_setup;
4741 	uint8_t *htt_logger_bufp;
4742 	u_int32_t *key;
4743 	QDF_STATUS status;
4744 
4745 	msg = qdf_nbuf_alloc(
4746 		soc->osdev,
4747 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_setup_t)),
4748 		/* reserve room for the HTC header */
4749 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4750 
4751 	if (!msg)
4752 		return QDF_STATUS_E_NOMEM;
4753 
4754 	/*
4755 	 * Set the length of the message.
4756 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4757 	 * separately during the below call to qdf_nbuf_push_head.
4758 	 * The contribution from the HTC header is added separately inside HTC.
4759 	 */
4760 	if (!qdf_nbuf_put_tail(msg,
4761 			       sizeof(struct htt_h2t_msg_rx_fse_setup_t))) {
4762 		qdf_err("Failed to expand head for HTT RX_FSE_SETUP msg");
4763 		return QDF_STATUS_E_FAILURE;
4764 	}
4765 
4766 	/* fill in the message contents */
4767 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4768 
4769 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_setup_t));
4770 	/* rewind beyond alignment pad to get to the HTC header reserved area */
4771 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4772 	htt_logger_bufp = (uint8_t *)msg_word;
4773 
4774 	*msg_word = 0;
4775 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG);
4776 
4777 	fse_setup = (struct htt_h2t_msg_rx_fse_setup_t *)msg_word;
4778 
4779 	HTT_RX_FSE_SETUP_PDEV_ID_SET(*msg_word, fse_setup_info->pdev_id);
4780 
4781 	msg_word++;
4782 	HTT_RX_FSE_SETUP_NUM_REC_SET(*msg_word, fse_setup_info->max_entries);
4783 	HTT_RX_FSE_SETUP_MAX_SEARCH_SET(*msg_word, fse_setup_info->max_search);
4784 	HTT_RX_FSE_SETUP_IP_DA_SA_PREFIX_SET(*msg_word,
4785 					     fse_setup_info->ip_da_sa_prefix);
4786 
4787 	msg_word++;
4788 	HTT_RX_FSE_SETUP_BASE_ADDR_LO_SET(*msg_word,
4789 					  fse_setup_info->base_addr_lo);
4790 	msg_word++;
4791 	HTT_RX_FSE_SETUP_BASE_ADDR_HI_SET(*msg_word,
4792 					  fse_setup_info->base_addr_hi);
4793 
4794 	key = (u_int32_t *)fse_setup_info->hash_key;
4795 	fse_setup->toeplitz31_0 = *key++;
4796 	fse_setup->toeplitz63_32 = *key++;
4797 	fse_setup->toeplitz95_64 = *key++;
4798 	fse_setup->toeplitz127_96 = *key++;
4799 	fse_setup->toeplitz159_128 = *key++;
4800 	fse_setup->toeplitz191_160 = *key++;
4801 	fse_setup->toeplitz223_192 = *key++;
4802 	fse_setup->toeplitz255_224 = *key++;
4803 	fse_setup->toeplitz287_256 = *key++;
4804 	fse_setup->toeplitz314_288 = *key;
4805 
4806 	msg_word++;
4807 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz31_0);
4808 	msg_word++;
4809 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz63_32);
4810 	msg_word++;
4811 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz95_64);
4812 	msg_word++;
4813 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz127_96);
4814 	msg_word++;
4815 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz159_128);
4816 	msg_word++;
4817 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz191_160);
4818 	msg_word++;
4819 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz223_192);
4820 	msg_word++;
4821 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz255_224);
4822 	msg_word++;
4823 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz287_256);
4824 	msg_word++;
4825 	HTT_RX_FSE_SETUP_HASH_314_288_SET(*msg_word,
4826 					  fse_setup->toeplitz314_288);
4827 
4828 	pkt = htt_htc_pkt_alloc(soc);
4829 	if (!pkt) {
4830 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4831 		qdf_assert(0);
4832 		qdf_nbuf_free(msg);
4833 		return QDF_STATUS_E_RESOURCES; /* failure */
4834 	}
4835 
4836 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4837 
4838 	SET_HTC_PACKET_INFO_TX(
4839 		&pkt->htc_pkt,
4840 		dp_htt_h2t_send_complete_free_netbuf,
4841 		qdf_nbuf_data(msg),
4842 		qdf_nbuf_len(msg),
4843 		soc->htc_endpoint,
4844 		/* tag for no FW response msg */
4845 		HTC_TX_PACKET_TAG_RUNTIME_PUT);
4846 
4847 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4848 
4849 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
4850 				     HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG,
4851 				     htt_logger_bufp);
4852 
4853 	if (status == QDF_STATUS_SUCCESS) {
4854 		dp_info("HTT_H2T RX_FSE_SETUP sent to FW for pdev = %u",
4855 			fse_setup_info->pdev_id);
4856 		QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_DEBUG,
4857 				   (void *)fse_setup_info->hash_key,
4858 				   fse_setup_info->hash_key_len);
4859 	} else {
4860 		qdf_nbuf_free(msg);
4861 		htt_htc_pkt_free(soc, pkt);
4862 	}
4863 
4864 	return status;
4865 }
4866 
4867 QDF_STATUS
4868 dp_htt_rx_flow_fse_operation(struct dp_pdev *pdev,
4869 			     struct dp_htt_rx_flow_fst_operation *fse_op_info)
4870 {
4871 	struct htt_soc *soc = pdev->soc->htt_handle;
4872 	struct dp_htt_htc_pkt *pkt;
4873 	qdf_nbuf_t msg;
4874 	u_int32_t *msg_word;
4875 	struct htt_h2t_msg_rx_fse_operation_t *fse_operation;
4876 	uint8_t *htt_logger_bufp;
4877 	QDF_STATUS status;
4878 
4879 	msg = qdf_nbuf_alloc(
4880 		soc->osdev,
4881 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_operation_t)),
4882 		/* reserve room for the HTC header */
4883 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4884 	if (!msg)
4885 		return QDF_STATUS_E_NOMEM;
4886 
4887 	/*
4888 	 * Set the length of the message.
4889 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4890 	 * separately during the below call to qdf_nbuf_push_head.
4891 	 * The contribution from the HTC header is added separately inside HTC.
4892 	 */
4893 	if (!qdf_nbuf_put_tail(msg,
4894 			       sizeof(struct htt_h2t_msg_rx_fse_operation_t))) {
4895 		qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
4896 		qdf_nbuf_free(msg);
4897 		return QDF_STATUS_E_FAILURE;
4898 	}
4899 
4900 	/* fill in the message contents */
4901 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4902 
4903 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_operation_t));
4904 	/* rewind beyond alignment pad to get to the HTC header reserved area */
4905 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4906 	htt_logger_bufp = (uint8_t *)msg_word;
4907 
4908 	*msg_word = 0;
4909 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG);
4910 
4911 	fse_operation = (struct htt_h2t_msg_rx_fse_operation_t *)msg_word;
4912 
4913 	HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, fse_op_info->pdev_id);
4914 	msg_word++;
4915 	HTT_RX_FSE_IPSEC_VALID_SET(*msg_word, false);
4916 	if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_ENTRY) {
4917 		HTT_RX_FSE_OPERATION_SET(*msg_word,
4918 					 HTT_RX_FSE_CACHE_INVALIDATE_ENTRY);
4919 		msg_word++;
4920 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4921 		*msg_word,
4922 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_31_0));
4923 		msg_word++;
4924 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4925 		*msg_word,
4926 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_63_32));
4927 		msg_word++;
4928 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4929 		*msg_word,
4930 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_95_64));
4931 		msg_word++;
4932 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4933 		*msg_word,
4934 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_127_96));
4935 		msg_word++;
4936 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4937 		*msg_word,
4938 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_31_0));
4939 		msg_word++;
4940 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4941 		*msg_word,
4942 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_63_32));
4943 		msg_word++;
4944 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4945 		*msg_word,
4946 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_95_64));
4947 		msg_word++;
4948 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4949 		*msg_word,
4950 		qdf_htonl(
4951 		fse_op_info->rx_flow->flow_tuple_info.dest_ip_127_96));
4952 		msg_word++;
4953 		HTT_RX_FSE_SOURCEPORT_SET(
4954 			*msg_word,
4955 			fse_op_info->rx_flow->flow_tuple_info.src_port);
4956 		HTT_RX_FSE_DESTPORT_SET(
4957 			*msg_word,
4958 			fse_op_info->rx_flow->flow_tuple_info.dest_port);
4959 		msg_word++;
4960 		HTT_RX_FSE_L4_PROTO_SET(
4961 			*msg_word,
4962 			fse_op_info->rx_flow->flow_tuple_info.l4_protocol);
4963 	} else if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_FULL) {
4964 		HTT_RX_FSE_OPERATION_SET(*msg_word,
4965 					 HTT_RX_FSE_CACHE_INVALIDATE_FULL);
4966 	} else if (fse_op_info->op_code == DP_HTT_FST_DISABLE) {
4967 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_DISABLE);
4968 	} else if (fse_op_info->op_code == DP_HTT_FST_ENABLE) {
4969 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_ENABLE);
4970 	}
4971 
4972 	pkt = htt_htc_pkt_alloc(soc);
4973 	if (!pkt) {
4974 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4975 		qdf_assert(0);
4976 		qdf_nbuf_free(msg);
4977 		return QDF_STATUS_E_RESOURCES; /* failure */
4978 	}
4979 
4980 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4981 
4982 	SET_HTC_PACKET_INFO_TX(
4983 		&pkt->htc_pkt,
4984 		dp_htt_h2t_send_complete_free_netbuf,
4985 		qdf_nbuf_data(msg),
4986 		qdf_nbuf_len(msg),
4987 		soc->htc_endpoint,
4988 		/* tag for no FW response msg */
4989 		HTC_TX_PACKET_TAG_RUNTIME_PUT);
4990 
4991 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4992 
4993 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
4994 				     HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG,
4995 				     htt_logger_bufp);
4996 
4997 	if (status == QDF_STATUS_SUCCESS) {
4998 		dp_info("HTT_H2T RX_FSE_OPERATION_CFG sent to FW for pdev = %u",
4999 			fse_op_info->pdev_id);
5000 	} else {
5001 		qdf_nbuf_free(msg);
5002 		htt_htc_pkt_free(soc, pkt);
5003 	}
5004 
5005 	return status;
5006 }
5007 
5008 /**
5009  * dp_htt_rx_fisa_config(): Send HTT msg to configure FISA
5010  * @pdev: DP pdev handle
5011  * @fisa_config: Fisa config struct
5012  *
5013  * Return: Success when HTT message is sent, error on failure
5014  */
5015 QDF_STATUS
5016 dp_htt_rx_fisa_config(struct dp_pdev *pdev,
5017 		      struct dp_htt_rx_fisa_cfg *fisa_config)
5018 {
5019 	struct htt_soc *soc = pdev->soc->htt_handle;
5020 	struct dp_htt_htc_pkt *pkt;
5021 	qdf_nbuf_t msg;
5022 	u_int32_t *msg_word;
5023 	struct htt_h2t_msg_type_fisa_config_t *htt_fisa_config;
5024 	uint8_t *htt_logger_bufp;
5025 	uint32_t len;
5026 	QDF_STATUS status;
5027 
5028 	len = HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_type_fisa_config_t));
5029 
5030 	msg = qdf_nbuf_alloc(soc->osdev,
5031 			     len,
5032 			     /* reserve room for the HTC header */
5033 			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
5034 			     4,
5035 			     TRUE);
5036 	if (!msg)
5037 		return QDF_STATUS_E_NOMEM;
5038 
5039 	/*
5040 	 * Set the length of the message.
5041 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
5042 	 * separately during the below call to qdf_nbuf_push_head.
5043 	 * The contribution from the HTC header is added separately inside HTC.
5044 	 */
5045 	if (!qdf_nbuf_put_tail(msg,
5046 			       sizeof(struct htt_h2t_msg_type_fisa_config_t))) {
5047 		qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
5048 		qdf_nbuf_free(msg);
5049 		return QDF_STATUS_E_FAILURE;
5050 	}
5051 
5052 	/* fill in the message contents */
5053 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
5054 
5055 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_type_fisa_config_t));
5056 	/* rewind beyond alignment pad to get to the HTC header reserved area */
5057 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
5058 	htt_logger_bufp = (uint8_t *)msg_word;
5059 
5060 	*msg_word = 0;
5061 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FISA_CFG);
5062 
5063 	htt_fisa_config = (struct htt_h2t_msg_type_fisa_config_t *)msg_word;
5064 
5065 	HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, htt_fisa_config->pdev_id);
5066 
5067 	msg_word++;
5068 	HTT_RX_FISA_CONFIG_FISA_V2_ENABLE_SET(*msg_word, 1);
5069 	HTT_RX_FISA_CONFIG_FISA_V2_AGGR_LIMIT_SET(*msg_word, 0xf);
5070 
5071 	msg_word++;
5072 	htt_fisa_config->fisa_timeout_threshold = fisa_config->fisa_timeout;
5073 
5074 	pkt = htt_htc_pkt_alloc(soc);
5075 	if (!pkt) {
5076 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
5077 		qdf_assert(0);
5078 		qdf_nbuf_free(msg);
5079 		return QDF_STATUS_E_RESOURCES; /* failure */
5080 	}
5081 
5082 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5083 
5084 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
5085 			       dp_htt_h2t_send_complete_free_netbuf,
5086 			       qdf_nbuf_data(msg),
5087 			       qdf_nbuf_len(msg),
5088 			       soc->htc_endpoint,
5089 			       /* tag for no FW response msg */
5090 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
5091 
5092 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5093 
5094 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FISA_CFG,
5095 				     htt_logger_bufp);
5096 
5097 	if (status == QDF_STATUS_SUCCESS) {
5098 		dp_info("HTT_H2T_MSG_TYPE_RX_FISA_CFG sent to FW for pdev = %u",
5099 			fisa_config->pdev_id);
5100 	} else {
5101 		qdf_nbuf_free(msg);
5102 		htt_htc_pkt_free(soc, pkt);
5103 	}
5104 
5105 	return status;
5106 }
5107 
5108 #ifdef WLAN_SUPPORT_PPEDS
5109 /**
5110  * dp_htt_rxdma_rxole_ppe_cfg_set() - Send RxOLE and RxDMA PPE config
5111  * @soc: Data path SoC handle
5112  * @cfg: RxDMA and RxOLE PPE config
5113  *
5114  * Return: Success when HTT message is sent, error on failure
5115  */
5116 QDF_STATUS
5117 dp_htt_rxdma_rxole_ppe_cfg_set(struct dp_soc *soc,
5118 			       struct dp_htt_rxdma_rxole_ppe_config *cfg)
5119 {
5120 	struct htt_soc *htt_handle = soc->htt_handle;
5121 	uint32_t len;
5122 	qdf_nbuf_t msg;
5123 	u_int32_t *msg_word;
5124 	QDF_STATUS status;
5125 	uint8_t *htt_logger_bufp;
5126 	struct dp_htt_htc_pkt *pkt;
5127 
5128 	len = HTT_MSG_BUF_SIZE(
5129 	      sizeof(struct htt_h2t_msg_type_rxdma_rxole_ppe_cfg_t));
5130 
5131 	msg = qdf_nbuf_alloc(soc->osdev,
5132 			     len,
5133 			     /* reserve room for the HTC header */
5134 			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
5135 			     4,
5136 			     TRUE);
5137 	if (!msg)
5138 		return QDF_STATUS_E_NOMEM;
5139 
5140 	/*
5141 	 * Set the length of the message.
5142 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
5143 	 * separately during the below call to qdf_nbuf_push_head.
5144 	 * The contribution from the HTC header is added separately inside HTC.
5145 	 */
5146 	if (!qdf_nbuf_put_tail(
5147 		msg, sizeof(struct htt_h2t_msg_type_rxdma_rxole_ppe_cfg_t))) {
5148 		qdf_err("Failed to expand head for HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG msg");
5149 		qdf_nbuf_free(msg);
5150 		return QDF_STATUS_E_FAILURE;
5151 	}
5152 
5153 	/* fill in the message contents */
5154 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
5155 
5156 	memset(msg_word, 0,
5157 	       sizeof(struct htt_h2t_msg_type_rxdma_rxole_ppe_cfg_t));
5158 
5159 	/* Rewind beyond alignment pad to get to the HTC header reserved area */
5160 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
5161 	htt_logger_bufp = (uint8_t *)msg_word;
5162 
5163 	*msg_word = 0;
5164 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG);
5165 	HTT_PPE_CFG_OVERRIDE_SET(*msg_word, cfg->override);
5166 	HTT_PPE_CFG_REO_DEST_IND_SET(
5167 			*msg_word, cfg->reo_destination_indication);
5168 	HTT_PPE_CFG_MULTI_BUF_MSDU_OVERRIDE_EN_SET(
5169 			*msg_word, cfg->multi_buffer_msdu_override_en);
5170 	HTT_PPE_CFG_INTRA_BSS_OVERRIDE_EN_SET(
5171 			*msg_word, cfg->intra_bss_override);
5172 	HTT_PPE_CFG_DECAP_RAW_OVERRIDE_EN_SET(
5173 			*msg_word, cfg->decap_raw_override);
5174 	HTT_PPE_CFG_DECAP_NWIFI_OVERRIDE_EN_SET(
5175 			*msg_word, cfg->decap_nwifi_override);
5176 	HTT_PPE_CFG_IP_FRAG_OVERRIDE_EN_SET(
5177 			*msg_word, cfg->ip_frag_override);
5178 
5179 	pkt = htt_htc_pkt_alloc(htt_handle);
5180 	if (!pkt) {
5181 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
5182 		qdf_assert(0);
5183 		qdf_nbuf_free(msg);
5184 		return QDF_STATUS_E_RESOURCES; /* failure */
5185 	}
5186 
5187 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5188 
5189 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
5190 			       dp_htt_h2t_send_complete_free_netbuf,
5191 			       qdf_nbuf_data(msg),
5192 			       qdf_nbuf_len(msg),
5193 			       htt_handle->htc_endpoint,
5194 			       /* tag for no FW response msg */
5195 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
5196 
5197 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5198 
5199 	status = DP_HTT_SEND_HTC_PKT(htt_handle, pkt,
5200 				     HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG,
5201 				     htt_logger_bufp);
5202 
5203 	if (status != QDF_STATUS_SUCCESS) {
5204 		qdf_nbuf_free(msg);
5205 		htt_htc_pkt_free(htt_handle, pkt);
5206 		return status;
5207 	}
5208 
5209 	dp_info("HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG sent");
5210 	return status;
5211 }
5212 #endif /* WLAN_SUPPORT_PPEDS */
5213 
5214 /**
5215  * dp_bk_pressure_stats_handler(): worker function to print back pressure
5216  *				   stats
5217  *
5218  * @context : argument to work function
5219  */
5220 static void dp_bk_pressure_stats_handler(void *context)
5221 {
5222 	struct dp_pdev *pdev = (struct dp_pdev *)context;
5223 	struct dp_soc_srngs_state *soc_srngs_state = NULL;
5224 	const char *ring_name;
5225 	int i;
5226 	struct dp_srng_ring_state *ring_state;
5227 	bool empty_flag;
5228 
5229 	qdf_spin_lock_bh(&pdev->bkp_stats.list_lock);
5230 
5231 	/* Extract only first entry for printing in one work event */
5232 	if (pdev->bkp_stats.queue_depth &&
5233 	    !TAILQ_EMPTY(&pdev->bkp_stats.list)) {
5234 		soc_srngs_state = TAILQ_FIRST(&pdev->bkp_stats.list);
5235 		TAILQ_REMOVE(&pdev->bkp_stats.list, soc_srngs_state,
5236 			     list_elem);
5237 		pdev->bkp_stats.queue_depth--;
5238 	}
5239 
5240 	empty_flag = TAILQ_EMPTY(&pdev->bkp_stats.list);
5241 	qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock);
5242 
5243 	if (soc_srngs_state) {
5244 		DP_PRINT_STATS("### BKP stats for seq_num %u START ###",
5245 			       soc_srngs_state->seq_num);
5246 		for (i = 0; i < soc_srngs_state->max_ring_id; i++) {
5247 			ring_state = &soc_srngs_state->ring_state[i];
5248 			ring_name = dp_srng_get_str_from_hal_ring_type
5249 						(ring_state->ring_type);
5250 			DP_PRINT_STATS("%s: SW:Head pointer = %d Tail Pointer = %d\n",
5251 				       ring_name,
5252 				       ring_state->sw_head,
5253 				       ring_state->sw_tail);
5254 
5255 			DP_PRINT_STATS("%s: HW:Head pointer = %d Tail Pointer = %d\n",
5256 				       ring_name,
5257 				       ring_state->hw_head,
5258 				       ring_state->hw_tail);
5259 		}
5260 
5261 		DP_PRINT_STATS("### BKP stats for seq_num %u COMPLETE ###",
5262 			       soc_srngs_state->seq_num);
5263 		qdf_mem_free(soc_srngs_state);
5264 	}
5265 	dp_print_napi_stats(pdev->soc);
5266 
5267 	/* Schedule work again if queue is not empty */
5268 	if (!empty_flag)
5269 		qdf_queue_work(0, pdev->bkp_stats.work_queue,
5270 			       &pdev->bkp_stats.work);
5271 }
5272 
5273 void dp_pdev_bkp_stats_detach(struct dp_pdev *pdev)
5274 {
5275 	struct dp_soc_srngs_state *ring_state, *ring_state_next;
5276 
5277 	if (!pdev->bkp_stats.work_queue)
5278 		return;
5279 
5280 	qdf_flush_workqueue(0, pdev->bkp_stats.work_queue);
5281 	qdf_destroy_workqueue(0, pdev->bkp_stats.work_queue);
5282 	qdf_flush_work(&pdev->bkp_stats.work);
5283 	qdf_disable_work(&pdev->bkp_stats.work);
5284 	qdf_spin_lock_bh(&pdev->bkp_stats.list_lock);
5285 	TAILQ_FOREACH_SAFE(ring_state, &pdev->bkp_stats.list,
5286 			   list_elem, ring_state_next) {
5287 		TAILQ_REMOVE(&pdev->bkp_stats.list, ring_state,
5288 			     list_elem);
5289 		qdf_mem_free(ring_state);
5290 	}
5291 	qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock);
5292 	qdf_spinlock_destroy(&pdev->bkp_stats.list_lock);
5293 }
5294 
5295 QDF_STATUS dp_pdev_bkp_stats_attach(struct dp_pdev *pdev)
5296 {
5297 	TAILQ_INIT(&pdev->bkp_stats.list);
5298 	pdev->bkp_stats.seq_num = 0;
5299 	pdev->bkp_stats.queue_depth = 0;
5300 
5301 	qdf_create_work(0, &pdev->bkp_stats.work,
5302 			dp_bk_pressure_stats_handler, pdev);
5303 
5304 	pdev->bkp_stats.work_queue =
5305 		qdf_alloc_unbound_workqueue("dp_bkp_work_queue");
5306 	if (!pdev->bkp_stats.work_queue)
5307 		goto fail;
5308 
5309 	qdf_spinlock_create(&pdev->bkp_stats.list_lock);
5310 	return QDF_STATUS_SUCCESS;
5311 
5312 fail:
5313 	dp_htt_alert("BKP stats attach failed");
5314 	qdf_flush_work(&pdev->bkp_stats.work);
5315 	qdf_disable_work(&pdev->bkp_stats.work);
5316 	return QDF_STATUS_E_FAILURE;
5317 }
5318 
5319 #ifdef DP_UMAC_HW_RESET_SUPPORT
5320 QDF_STATUS dp_htt_umac_reset_send_setup_cmd(
5321 		struct dp_soc *soc,
5322 		const struct dp_htt_umac_reset_setup_cmd_params *setup_params)
5323 {
5324 	struct htt_soc *htt_handle = soc->htt_handle;
5325 	uint32_t len;
5326 	qdf_nbuf_t msg;
5327 	u_int32_t *msg_word;
5328 	QDF_STATUS status;
5329 	uint8_t *htt_logger_bufp;
5330 	struct dp_htt_htc_pkt *pkt;
5331 
5332 	len = HTT_MSG_BUF_SIZE(
5333 		HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_BYTES);
5334 
5335 	msg = qdf_nbuf_alloc(soc->osdev,
5336 			     len,
5337 			     /* reserve room for the HTC header */
5338 			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
5339 			     4,
5340 			     TRUE);
5341 	if (!msg)
5342 		return QDF_STATUS_E_NOMEM;
5343 
5344 	/*
5345 	 * Set the length of the message.
5346 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
5347 	 * separately during the below call to qdf_nbuf_push_head.
5348 	 * The contribution from the HTC header is added separately inside HTC.
5349 	 */
5350 	if (!qdf_nbuf_put_tail(
5351 		msg, HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_BYTES)) {
5352 		dp_htt_err("Failed to expand head");
5353 		qdf_nbuf_free(msg);
5354 		return QDF_STATUS_E_FAILURE;
5355 	}
5356 
5357 	/* fill in the message contents */
5358 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
5359 
5360 	/* Rewind beyond alignment pad to get to the HTC header reserved area */
5361 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
5362 	htt_logger_bufp = (uint8_t *)msg_word;
5363 
5364 	qdf_mem_zero(msg_word,
5365 		     HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_BYTES);
5366 
5367 	HTT_H2T_MSG_TYPE_SET(
5368 		*msg_word,
5369 		HTT_H2T_MSG_TYPE_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP);
5370 	HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_T2H_MSG_METHOD_SET(
5371 		*msg_word, htt_umac_hang_recovery_msg_t2h_msi_and_h2t_polling);
5372 	HTT_H2T_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP_H2T_MSG_METHOD_SET(
5373 		*msg_word, htt_umac_hang_recovery_msg_t2h_msi_and_h2t_polling);
5374 
5375 	msg_word++;
5376 	*msg_word = setup_params->msi_data;
5377 
5378 	msg_word++;
5379 	*msg_word = sizeof(htt_umac_hang_recovery_msg_shmem_t);
5380 
5381 	msg_word++;
5382 	*msg_word = setup_params->shmem_addr_low;
5383 
5384 	msg_word++;
5385 	*msg_word = setup_params->shmem_addr_high;
5386 
5387 	pkt = htt_htc_pkt_alloc(htt_handle);
5388 	if (!pkt) {
5389 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
5390 		qdf_assert(0);
5391 		qdf_nbuf_free(msg);
5392 		return QDF_STATUS_E_NOMEM;
5393 	}
5394 
5395 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5396 
5397 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
5398 			       dp_htt_h2t_send_complete_free_netbuf,
5399 			       qdf_nbuf_data(msg),
5400 			       qdf_nbuf_len(msg),
5401 			       htt_handle->htc_endpoint,
5402 			       /* tag for no FW response msg */
5403 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
5404 
5405 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5406 
5407 	status = DP_HTT_SEND_HTC_PKT(
5408 			htt_handle, pkt,
5409 			HTT_H2T_MSG_TYPE_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP,
5410 			htt_logger_bufp);
5411 
5412 	if (QDF_IS_STATUS_ERROR(status)) {
5413 		qdf_nbuf_free(msg);
5414 		htt_htc_pkt_free(htt_handle, pkt);
5415 		return status;
5416 	}
5417 
5418 	dp_info("HTT_H2T_MSG_TYPE_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP sent");
5419 	return status;
5420 }
5421 
5422 QDF_STATUS dp_htt_umac_reset_send_start_pre_reset_cmd(
5423 		struct dp_soc *soc, bool is_initiator, bool is_umac_hang)
5424 {
5425 	struct htt_soc *htt_handle = soc->htt_handle;
5426 	uint32_t len;
5427 	qdf_nbuf_t msg;
5428 	u_int32_t *msg_word;
5429 	QDF_STATUS status;
5430 	uint8_t *htt_logger_bufp;
5431 	struct dp_htt_htc_pkt *pkt;
5432 
5433 	len = HTT_MSG_BUF_SIZE(
5434 		HTT_H2T_UMAC_HANG_RECOVERY_START_PRE_RESET_BYTES);
5435 
5436 	msg = qdf_nbuf_alloc(soc->osdev,
5437 			     len,
5438 			     /* reserve room for the HTC header */
5439 			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
5440 			     4,
5441 			     TRUE);
5442 	if (!msg)
5443 		return QDF_STATUS_E_NOMEM;
5444 
5445 	/*
5446 	 * Set the length of the message.
5447 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
5448 	 * separately during the below call to qdf_nbuf_push_head.
5449 	 * The contribution from the HTC header is added separately inside HTC.
5450 	 */
5451 	if (!qdf_nbuf_put_tail(
5452 		msg, HTT_H2T_UMAC_HANG_RECOVERY_START_PRE_RESET_BYTES)) {
5453 		dp_htt_err("Failed to expand head");
5454 		qdf_nbuf_free(msg);
5455 		return QDF_STATUS_E_FAILURE;
5456 	}
5457 
5458 	/* fill in the message contents */
5459 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
5460 
5461 	/* Rewind beyond alignment pad to get to the HTC header reserved area */
5462 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
5463 	htt_logger_bufp = (uint8_t *)msg_word;
5464 
5465 	qdf_mem_zero(msg_word,
5466 		     HTT_H2T_UMAC_HANG_RECOVERY_START_PRE_RESET_BYTES);
5467 
5468 	HTT_H2T_MSG_TYPE_SET(
5469 		*msg_word,
5470 		HTT_H2T_MSG_TYPE_UMAC_HANG_RECOVERY_SOC_START_PRE_RESET);
5471 
5472 	HTT_H2T_UMAC_HANG_RECOVERY_START_PRE_RESET_IS_INITIATOR_SET(
5473 		*msg_word, is_initiator);
5474 
5475 	HTT_H2T_UMAC_HANG_RECOVERY_START_PRE_RESET_IS_UMAC_HANG_SET(
5476 		*msg_word, is_umac_hang);
5477 
5478 	pkt = htt_htc_pkt_alloc(htt_handle);
5479 	if (!pkt) {
5480 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
5481 		qdf_assert(0);
5482 		qdf_nbuf_free(msg);
5483 		return QDF_STATUS_E_NOMEM;
5484 	}
5485 
5486 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5487 
5488 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
5489 			       dp_htt_h2t_send_complete_free_netbuf,
5490 			       qdf_nbuf_data(msg),
5491 			       qdf_nbuf_len(msg),
5492 			       htt_handle->htc_endpoint,
5493 			       /* tag for no FW response msg */
5494 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
5495 
5496 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5497 
5498 	status = DP_HTT_SEND_HTC_PKT(
5499 			htt_handle, pkt,
5500 			HTT_H2T_MSG_TYPE_UMAC_HANG_RECOVERY_SOC_START_PRE_RESET,
5501 			htt_logger_bufp);
5502 
5503 	if (QDF_IS_STATUS_ERROR(status)) {
5504 		qdf_nbuf_free(msg);
5505 		htt_htc_pkt_free(htt_handle, pkt);
5506 		return status;
5507 	}
5508 
5509 	dp_info("HTT_H2T_MSG_TYPE_UMAC_HANG_RECOVERY_SOC_START_PRE_RESET sent");
5510 	return status;
5511 }
5512 #endif
5513