xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c (revision 11f5a63a6cbdda84849a730de22f0a71e635d58c)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <htt.h>
20 #include <hal_hw_headers.h>
21 #include <hal_api.h>
22 #include "dp_peer.h"
23 #include "dp_types.h"
24 #include "dp_internal.h"
25 #include "dp_rx_mon.h"
26 #include "htt_stats.h"
27 #include "htt_ppdu_stats.h"
28 #include "dp_htt.h"
29 #include "dp_rx.h"
30 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
31 #include "cdp_txrx_cmn_struct.h"
32 
33 #ifdef FEATURE_PERPKT_INFO
34 #include "dp_ratetable.h"
35 #endif
36 
37 #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
38 
39 #define HTT_HTC_PKT_POOL_INIT_SIZE 64
40 
41 #define HTT_MSG_BUF_SIZE(msg_bytes) \
42 	((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
43 
44 #define HTT_PID_BIT_MASK 0x3
45 
46 #define DP_EXT_MSG_LENGTH 2048
47 
48 #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16
49 
50 /*
51  * dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap() - Get ppdu stats tlv
52  * bitmap for sniffer mode
53  * @bitmap: received bitmap
54  *
55  * Return: expected bitmap value, returns zero if doesn't match with
56  * either 64-bit Tx window or 256-bit window tlv bitmap
57  */
58 int
59 dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(uint32_t bitmap)
60 {
61 	if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64))
62 		return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64;
63 	else if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256))
64 		return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256;
65 
66 	return 0;
67 }
68 
69 #ifdef FEATURE_PERPKT_INFO
70 /*
71  * dp_peer_copy_delay_stats() - copy ppdu stats to peer delayed stats.
72  * @peer: Datapath peer handle
73  * @ppdu: PPDU Descriptor
74  *
75  * Return: None
76  *
77  * on Tx data frame, we may get delayed ba set
78  * in htt_ppdu_stats_user_common_tlv. which mean we get Block Ack(BA) after we
79  * request Block Ack Request(BAR). Successful msdu is received only after Block
80  * Ack. To populate peer stats we need successful msdu(data frame).
81  * So we hold the Tx data stats on delayed_ba for stats update.
82  */
83 static inline void
84 dp_peer_copy_delay_stats(struct dp_peer *peer,
85 			 struct cdp_tx_completion_ppdu_user *ppdu)
86 {
87 	struct dp_pdev *pdev;
88 	struct dp_vdev *vdev;
89 
90 	if (peer->last_delayed_ba) {
91 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
92 			  "BA not yet recv for prev delayed ppdu[%d]\n",
93 			  peer->last_delayed_ba_ppduid);
94 		vdev = peer->vdev;
95 		if (vdev) {
96 			pdev = vdev->pdev;
97 			pdev->stats.cdp_delayed_ba_not_recev++;
98 		}
99 	}
100 
101 	peer->delayed_ba_ppdu_stats.ltf_size = ppdu->ltf_size;
102 	peer->delayed_ba_ppdu_stats.stbc = ppdu->stbc;
103 	peer->delayed_ba_ppdu_stats.he_re = ppdu->he_re;
104 	peer->delayed_ba_ppdu_stats.txbf = ppdu->txbf;
105 	peer->delayed_ba_ppdu_stats.bw = ppdu->bw;
106 	peer->delayed_ba_ppdu_stats.nss = ppdu->nss;
107 	peer->delayed_ba_ppdu_stats.preamble = ppdu->preamble;
108 	peer->delayed_ba_ppdu_stats.gi = ppdu->gi;
109 	peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
110 	peer->delayed_ba_ppdu_stats.ldpc = ppdu->ldpc;
111 	peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
112 	peer->delayed_ba_ppdu_stats.mpdu_tried_ucast = ppdu->mpdu_tried_ucast;
113 	peer->delayed_ba_ppdu_stats.mpdu_tried_mcast = ppdu->mpdu_tried_mcast;
114 	peer->delayed_ba_ppdu_stats.frame_ctrl = ppdu->frame_ctrl;
115 	peer->delayed_ba_ppdu_stats.qos_ctrl = ppdu->qos_ctrl;
116 	peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
117 
118 	peer->delayed_ba_ppdu_stats.ru_start = ppdu->ru_start;
119 	peer->delayed_ba_ppdu_stats.ru_tones = ppdu->ru_tones;
120 	peer->delayed_ba_ppdu_stats.is_mcast = ppdu->is_mcast;
121 
122 	peer->delayed_ba_ppdu_stats.user_pos = ppdu->user_pos;
123 	peer->delayed_ba_ppdu_stats.mu_group_id = ppdu->mu_group_id;
124 
125 	peer->last_delayed_ba = true;
126 }
127 
128 /*
129  * dp_peer_copy_stats_to_bar() - copy delayed stats to ppdu stats.
130  * @peer: Datapath peer handle
131  * @ppdu: PPDU Descriptor
132  *
133  * Return: None
134  *
135  * For Tx BAR, PPDU stats TLV include Block Ack info. PPDU info
136  * from Tx BAR frame not required to populate peer stats.
137  * But we need successful MPDU and MSDU to update previous
138  * transmitted Tx data frame. Overwrite ppdu stats with the previous
139  * stored ppdu stats.
140  */
141 static void
142 dp_peer_copy_stats_to_bar(struct dp_peer *peer,
143 			  struct cdp_tx_completion_ppdu_user *ppdu)
144 {
145 	ppdu->ltf_size = peer->delayed_ba_ppdu_stats.ltf_size;
146 	ppdu->stbc = peer->delayed_ba_ppdu_stats.stbc;
147 	ppdu->he_re = peer->delayed_ba_ppdu_stats.he_re;
148 	ppdu->txbf = peer->delayed_ba_ppdu_stats.txbf;
149 	ppdu->bw = peer->delayed_ba_ppdu_stats.bw;
150 	ppdu->nss = peer->delayed_ba_ppdu_stats.nss;
151 	ppdu->preamble = peer->delayed_ba_ppdu_stats.preamble;
152 	ppdu->gi = peer->delayed_ba_ppdu_stats.gi;
153 	ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm;
154 	ppdu->ldpc = peer->delayed_ba_ppdu_stats.ldpc;
155 	ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm;
156 	ppdu->mpdu_tried_ucast = peer->delayed_ba_ppdu_stats.mpdu_tried_ucast;
157 	ppdu->mpdu_tried_mcast = peer->delayed_ba_ppdu_stats.mpdu_tried_mcast;
158 	ppdu->frame_ctrl = peer->delayed_ba_ppdu_stats.frame_ctrl;
159 	ppdu->qos_ctrl = peer->delayed_ba_ppdu_stats.qos_ctrl;
160 	ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm;
161 
162 	ppdu->ru_start = peer->delayed_ba_ppdu_stats.ru_start;
163 	ppdu->ru_tones = peer->delayed_ba_ppdu_stats.ru_tones;
164 	ppdu->is_mcast = peer->delayed_ba_ppdu_stats.is_mcast;
165 
166 	ppdu->user_pos = peer->delayed_ba_ppdu_stats.user_pos;
167 	ppdu->mu_group_id = peer->delayed_ba_ppdu_stats.mu_group_id;
168 
169 	peer->last_delayed_ba = false;
170 }
171 
172 /*
173  * dp_tx_rate_stats_update() - Update rate per-peer statistics
174  * @peer: Datapath peer handle
175  * @ppdu: PPDU Descriptor
176  *
177  * Return: None
178  */
179 static void
180 dp_tx_rate_stats_update(struct dp_peer *peer,
181 			struct cdp_tx_completion_ppdu_user *ppdu)
182 {
183 	uint32_t ratekbps = 0;
184 	uint64_t ppdu_tx_rate = 0;
185 	uint32_t rix;
186 	uint16_t ratecode = 0;
187 
188 	if (!peer || !ppdu)
189 		return;
190 
191 	ratekbps = dp_getrateindex(ppdu->gi,
192 				   ppdu->mcs,
193 				   ppdu->nss,
194 				   ppdu->preamble,
195 				   ppdu->bw,
196 				   &rix,
197 				   &ratecode);
198 
199 	DP_STATS_UPD(peer, tx.last_tx_rate, ratekbps);
200 
201 	if (!ratekbps)
202 		return;
203 
204 	/* Calculate goodput in non-training period
205 	 * In training period, don't do anything as
206 	 * pending pkt is send as goodput.
207 	 */
208 	if ((!peer->bss_peer) && (!ppdu->sa_is_training)) {
209 		ppdu->sa_goodput = ((ratekbps / CDP_NUM_KB_IN_MB) *
210 				(CDP_PERCENT_MACRO - ppdu->current_rate_per));
211 	}
212 	ppdu->rix = rix;
213 	ppdu->tx_ratekbps = ratekbps;
214 	ppdu->tx_ratecode = ratecode;
215 	peer->stats.tx.avg_tx_rate =
216 		dp_ath_rate_lpf(peer->stats.tx.avg_tx_rate, ratekbps);
217 	ppdu_tx_rate = dp_ath_rate_out(peer->stats.tx.avg_tx_rate);
218 	DP_STATS_UPD(peer, tx.rnd_avg_tx_rate, ppdu_tx_rate);
219 
220 	if (peer->vdev) {
221 		/*
222 		 * In STA mode:
223 		 *	We get ucast stats as BSS peer stats.
224 		 *
225 		 * In AP mode:
226 		 *	We get mcast stats as BSS peer stats.
227 		 *	We get ucast stats as assoc peer stats.
228 		 */
229 		if (peer->vdev->opmode == wlan_op_mode_ap && peer->bss_peer) {
230 			peer->vdev->stats.tx.mcast_last_tx_rate = ratekbps;
231 			peer->vdev->stats.tx.mcast_last_tx_rate_mcs = ppdu->mcs;
232 		} else {
233 			peer->vdev->stats.tx.last_tx_rate = ratekbps;
234 			peer->vdev->stats.tx.last_tx_rate_mcs = ppdu->mcs;
235 		}
236 	}
237 }
238 
239 /*
240  * dp_tx_stats_update() - Update per-peer statistics
241  * @pdev: Datapath pdev handle
242  * @peer: Datapath peer handle
243  * @ppdu: PPDU Descriptor
244  * @ack_rssi: RSSI of last ack received
245  *
246  * Return: None
247  */
248 static void
249 dp_tx_stats_update(struct dp_pdev *pdev, struct dp_peer *peer,
250 		   struct cdp_tx_completion_ppdu_user *ppdu,
251 		   uint32_t ack_rssi)
252 {
253 	uint8_t preamble, mcs;
254 	uint16_t num_msdu;
255 	uint16_t num_mpdu;
256 	uint16_t mpdu_tried;
257 
258 	preamble = ppdu->preamble;
259 	mcs = ppdu->mcs;
260 	num_msdu = ppdu->num_msdu;
261 	num_mpdu = ppdu->mpdu_success;
262 	mpdu_tried = ppdu->mpdu_tried_ucast + ppdu->mpdu_tried_mcast;
263 
264 	/* If the peer statistics are already processed as part of
265 	 * per-MSDU completion handler, do not process these again in per-PPDU
266 	 * indications */
267 	if (pdev->soc->process_tx_status)
268 		return;
269 
270 	if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK) {
271 		return;
272 	}
273 
274 	if (ppdu->is_ppdu_cookie_valid)
275 		DP_STATS_INC(peer, tx.num_ppdu_cookie_valid, 1);
276 
277 	if (ppdu->mu_group_id <= MAX_MU_GROUP_ID &&
278 	    ppdu->ppdu_type != HTT_PPDU_STATS_PPDU_TYPE_SU) {
279 		if (unlikely(!(ppdu->mu_group_id & (MAX_MU_GROUP_ID - 1))))
280 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
281 				  "mu_group_id out of bound!!\n");
282 		else
283 			DP_STATS_UPD(peer, tx.mu_group_id[ppdu->mu_group_id],
284 				     (ppdu->user_pos + 1));
285 	}
286 
287 	if (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA ||
288 	    ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA) {
289 		DP_STATS_UPD(peer, tx.ru_tones, ppdu->ru_tones);
290 		DP_STATS_UPD(peer, tx.ru_start, ppdu->ru_start);
291 		switch (ppdu->ru_tones) {
292 		case RU_26:
293 			DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].num_msdu,
294 				     num_msdu);
295 			DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].num_mpdu,
296 				     num_mpdu);
297 			DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].mpdu_tried,
298 				     mpdu_tried);
299 		break;
300 		case RU_52:
301 			DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].num_msdu,
302 				     num_msdu);
303 			DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].num_mpdu,
304 				     num_mpdu);
305 			DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].mpdu_tried,
306 				     mpdu_tried);
307 		break;
308 		case RU_106:
309 			DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].num_msdu,
310 				     num_msdu);
311 			DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].num_mpdu,
312 				     num_mpdu);
313 			DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].mpdu_tried,
314 				     mpdu_tried);
315 		break;
316 		case RU_242:
317 			DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].num_msdu,
318 				     num_msdu);
319 			DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].num_mpdu,
320 				     num_mpdu);
321 			DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].mpdu_tried,
322 				     mpdu_tried);
323 		break;
324 		case RU_484:
325 			DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].num_msdu,
326 				     num_msdu);
327 			DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].num_mpdu,
328 				     num_mpdu);
329 			DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].mpdu_tried,
330 				     mpdu_tried);
331 		break;
332 		case RU_996:
333 			DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].num_msdu,
334 				     num_msdu);
335 			DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].num_mpdu,
336 				     num_mpdu);
337 			DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].mpdu_tried,
338 				     mpdu_tried);
339 		break;
340 		}
341 	}
342 
343 	DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].num_msdu,
344 		     num_msdu);
345 	DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].num_mpdu,
346 		     num_mpdu);
347 	DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].mpdu_tried,
348 		     mpdu_tried);
349 
350 	DP_STATS_INC_PKT(peer, tx.comp_pkt,
351 			num_msdu, (ppdu->success_bytes +
352 				ppdu->retry_bytes + ppdu->failed_bytes));
353 	DP_STATS_UPD(peer, tx.tx_rate, ppdu->tx_rate);
354 	DP_STATS_INC(peer, tx.sgi_count[ppdu->gi], num_msdu);
355 	DP_STATS_INC(peer, tx.bw[ppdu->bw], num_msdu);
356 	DP_STATS_INC(peer, tx.nss[ppdu->nss], num_msdu);
357 	if (ppdu->tid < CDP_DATA_TID_MAX)
358 		DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)],
359 			     num_msdu);
360 	DP_STATS_INCC(peer, tx.stbc, num_msdu, ppdu->stbc);
361 	DP_STATS_INCC(peer, tx.ldpc, num_msdu, ppdu->ldpc);
362 	if (!(ppdu->is_mcast) && ppdu->ack_rssi_valid)
363 		DP_STATS_UPD(peer, tx.last_ack_rssi, ack_rssi);
364 
365 	DP_STATS_INCC(peer,
366 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
367 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
368 	DP_STATS_INCC(peer,
369 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
370 			((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
371 	DP_STATS_INCC(peer,
372 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
373 			((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
374 	DP_STATS_INCC(peer,
375 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
376 			((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B)));
377 	DP_STATS_INCC(peer,
378 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
379 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
380 	DP_STATS_INCC(peer,
381 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
382 			((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
383 	DP_STATS_INCC(peer,
384 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
385 			((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
386 	DP_STATS_INCC(peer,
387 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
388 			((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
389 	DP_STATS_INCC(peer,
390 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
391 			((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX)));
392 	DP_STATS_INCC(peer,
393 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
394 			((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX)));
395 	DP_STATS_INCC(peer, tx.ampdu_cnt, num_msdu, ppdu->is_ampdu);
396 	DP_STATS_INCC(peer, tx.non_ampdu_cnt, num_msdu, !(ppdu->is_ampdu));
397 
398 	dp_peer_stats_notify(pdev, peer);
399 
400 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
401 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
402 			     &peer->stats, ppdu->peer_id,
403 			     UPDATE_PEER_STATS, pdev->pdev_id);
404 #endif
405 }
406 #endif
407 
408 #ifdef WLAN_TX_PKT_CAPTURE_ENH
409 #include "dp_tx_capture.h"
410 #else
411 static inline void
412 dp_process_ppdu_stats_update_failed_bitmap(struct dp_pdev *pdev,
413 					   void *data,
414 					   uint32_t ppdu_id,
415 					   uint32_t size)
416 {
417 }
418 #endif
419 
420 /*
421  * htt_htc_pkt_alloc() - Allocate HTC packet buffer
422  * @htt_soc:	HTT SOC handle
423  *
424  * Return: Pointer to htc packet buffer
425  */
426 static struct dp_htt_htc_pkt *
427 htt_htc_pkt_alloc(struct htt_soc *soc)
428 {
429 	struct dp_htt_htc_pkt_union *pkt = NULL;
430 
431 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
432 	if (soc->htt_htc_pkt_freelist) {
433 		pkt = soc->htt_htc_pkt_freelist;
434 		soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next;
435 	}
436 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
437 
438 	if (!pkt)
439 		pkt = qdf_mem_malloc(sizeof(*pkt));
440 	return &pkt->u.pkt; /* not actually a dereference */
441 }
442 
443 /*
444  * htt_htc_pkt_free() - Free HTC packet buffer
445  * @htt_soc:	HTT SOC handle
446  */
447 static void
448 htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
449 {
450 	struct dp_htt_htc_pkt_union *u_pkt =
451 		(struct dp_htt_htc_pkt_union *)pkt;
452 
453 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
454 	u_pkt->u.next = soc->htt_htc_pkt_freelist;
455 	soc->htt_htc_pkt_freelist = u_pkt;
456 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
457 }
458 
459 /*
460  * htt_htc_pkt_pool_free() - Free HTC packet pool
461  * @htt_soc:	HTT SOC handle
462  */
463 static void
464 htt_htc_pkt_pool_free(struct htt_soc *soc)
465 {
466 	struct dp_htt_htc_pkt_union *pkt, *next;
467 	pkt = soc->htt_htc_pkt_freelist;
468 	while (pkt) {
469 		next = pkt->u.next;
470 		qdf_mem_free(pkt);
471 		pkt = next;
472 	}
473 	soc->htt_htc_pkt_freelist = NULL;
474 }
475 
476 /*
477  * htt_htc_misc_pkt_list_trim() - trim misc list
478  * @htt_soc: HTT SOC handle
479  * @level: max no. of pkts in list
480  */
481 static void
482 htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level)
483 {
484 	struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL;
485 	int i = 0;
486 	qdf_nbuf_t netbuf;
487 
488 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
489 	pkt = soc->htt_htc_pkt_misclist;
490 	while (pkt) {
491 		next = pkt->u.next;
492 		/* trim the out grown list*/
493 		if (++i > level) {
494 			netbuf =
495 				(qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
496 			qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
497 			qdf_nbuf_free(netbuf);
498 			qdf_mem_free(pkt);
499 			pkt = NULL;
500 			if (prev)
501 				prev->u.next = NULL;
502 		}
503 		prev = pkt;
504 		pkt = next;
505 	}
506 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
507 }
508 
509 /*
510  * htt_htc_misc_pkt_list_add() - Add pkt to misc list
511  * @htt_soc:	HTT SOC handle
512  * @dp_htt_htc_pkt: pkt to be added to list
513  */
514 static void
515 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
516 {
517 	struct dp_htt_htc_pkt_union *u_pkt =
518 				(struct dp_htt_htc_pkt_union *)pkt;
519 	int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc,
520 							pkt->htc_pkt.Endpoint)
521 				+ DP_HTT_HTC_PKT_MISCLIST_SIZE;
522 
523 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
524 	if (soc->htt_htc_pkt_misclist) {
525 		u_pkt->u.next = soc->htt_htc_pkt_misclist;
526 		soc->htt_htc_pkt_misclist = u_pkt;
527 	} else {
528 		soc->htt_htc_pkt_misclist = u_pkt;
529 	}
530 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
531 
532 	/* only ce pipe size + tx_queue_depth could possibly be in use
533 	 * free older packets in the misclist
534 	 */
535 	htt_htc_misc_pkt_list_trim(soc, misclist_trim_level);
536 }
537 
538 /**
539  * DP_HTT_SEND_HTC_PKT() - Send htt packet from host
540  * @soc : HTT SOC handle
541  * @pkt: pkt to be send
542  * @cmd : command to be recorded in dp htt logger
543  * @buf : Pointer to buffer needs to be recored for above cmd
544  *
545  * Return: None
546  */
547 static inline void DP_HTT_SEND_HTC_PKT(struct htt_soc *soc,
548 				       struct dp_htt_htc_pkt *pkt, uint8_t cmd,
549 				       uint8_t *buf)
550 {
551 	htt_command_record(soc->htt_logger_handle, cmd, buf);
552 	if (htc_send_pkt(soc->htc_soc, &pkt->htc_pkt) ==
553 	    QDF_STATUS_SUCCESS)
554 		htt_htc_misc_pkt_list_add(soc, pkt);
555 }
556 
557 /*
558  * htt_htc_misc_pkt_pool_free() - free pkts in misc list
559  * @htt_soc:	HTT SOC handle
560  */
561 static void
562 htt_htc_misc_pkt_pool_free(struct htt_soc *soc)
563 {
564 	struct dp_htt_htc_pkt_union *pkt, *next;
565 	qdf_nbuf_t netbuf;
566 
567 	pkt = soc->htt_htc_pkt_misclist;
568 
569 	while (pkt) {
570 		next = pkt->u.next;
571 		netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
572 		qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
573 
574 		soc->stats.htc_pkt_free++;
575 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
576 			 "%s: Pkt free count %d",
577 			 __func__, soc->stats.htc_pkt_free);
578 
579 		qdf_nbuf_free(netbuf);
580 		qdf_mem_free(pkt);
581 		pkt = next;
582 	}
583 	soc->htt_htc_pkt_misclist = NULL;
584 }
585 
586 /*
587  * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ
588  * @tgt_mac_addr:	Target MAC
589  * @buffer:		Output buffer
590  */
591 static u_int8_t *
592 htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer)
593 {
594 #ifdef BIG_ENDIAN_HOST
595 	/*
596 	 * The host endianness is opposite of the target endianness.
597 	 * To make u_int32_t elements come out correctly, the target->host
598 	 * upload has swizzled the bytes in each u_int32_t element of the
599 	 * message.
600 	 * For byte-array message fields like the MAC address, this
601 	 * upload swizzling puts the bytes in the wrong order, and needs
602 	 * to be undone.
603 	 */
604 	buffer[0] = tgt_mac_addr[3];
605 	buffer[1] = tgt_mac_addr[2];
606 	buffer[2] = tgt_mac_addr[1];
607 	buffer[3] = tgt_mac_addr[0];
608 	buffer[4] = tgt_mac_addr[7];
609 	buffer[5] = tgt_mac_addr[6];
610 	return buffer;
611 #else
612 	/*
613 	 * The host endianness matches the target endianness -
614 	 * we can use the mac addr directly from the message buffer.
615 	 */
616 	return tgt_mac_addr;
617 #endif
618 }
619 
620 /*
621  * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
622  * @soc:	SOC handle
623  * @status:	Completion status
624  * @netbuf:	HTT buffer
625  */
626 static void
627 dp_htt_h2t_send_complete_free_netbuf(
628 	void *soc, A_STATUS status, qdf_nbuf_t netbuf)
629 {
630 	qdf_nbuf_free(netbuf);
631 }
632 
633 /*
634  * dp_htt_h2t_send_complete() - H2T completion handler
635  * @context:	Opaque context (HTT SOC handle)
636  * @htc_pkt:	HTC packet
637  */
638 static void
639 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
640 {
641 	void (*send_complete_part2)(
642 		void *soc, QDF_STATUS status, qdf_nbuf_t msdu);
643 	struct htt_soc *soc =  (struct htt_soc *) context;
644 	struct dp_htt_htc_pkt *htt_pkt;
645 	qdf_nbuf_t netbuf;
646 
647 	send_complete_part2 = htc_pkt->pPktContext;
648 
649 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
650 
651 	/* process (free or keep) the netbuf that held the message */
652 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
653 	/*
654 	 * adf sendcomplete is required for windows only
655 	 */
656 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
657 	if (send_complete_part2) {
658 		send_complete_part2(
659 			htt_pkt->soc_ctxt, htc_pkt->Status, netbuf);
660 	}
661 	/* free the htt_htc_pkt / HTC_PACKET object */
662 	htt_htc_pkt_free(soc, htt_pkt);
663 }
664 
665 /*
666  * htt_h2t_ver_req_msg() - Send HTT version request message to target
667  * @htt_soc:	HTT SOC handle
668  *
669  * Return: 0 on success; error code on failure
670  */
671 static int htt_h2t_ver_req_msg(struct htt_soc *soc)
672 {
673 	struct dp_htt_htc_pkt *pkt;
674 	qdf_nbuf_t msg;
675 	uint32_t *msg_word;
676 
677 	msg = qdf_nbuf_alloc(
678 		soc->osdev,
679 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
680 		/* reserve room for the HTC header */
681 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
682 	if (!msg)
683 		return QDF_STATUS_E_NOMEM;
684 
685 	/*
686 	 * Set the length of the message.
687 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
688 	 * separately during the below call to qdf_nbuf_push_head.
689 	 * The contribution from the HTC header is added separately inside HTC.
690 	 */
691 	if (qdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES) == NULL) {
692 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
693 			"%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
694 			__func__);
695 		return QDF_STATUS_E_FAILURE;
696 	}
697 
698 	/* fill in the message contents */
699 	msg_word = (u_int32_t *) qdf_nbuf_data(msg);
700 
701 	/* rewind beyond alignment pad to get to the HTC header reserved area */
702 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
703 
704 	*msg_word = 0;
705 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
706 
707 	pkt = htt_htc_pkt_alloc(soc);
708 	if (!pkt) {
709 		qdf_nbuf_free(msg);
710 		return QDF_STATUS_E_FAILURE;
711 	}
712 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
713 
714 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
715 		dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg),
716 		qdf_nbuf_len(msg), soc->htc_endpoint,
717 		1); /* tag - not relevant here */
718 
719 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
720 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_VERSION_REQ, NULL);
721 	return 0;
722 }
723 
724 /*
725  * htt_srng_setup() - Send SRNG setup message to target
726  * @htt_soc:	HTT SOC handle
727  * @mac_id:	MAC Id
728  * @hal_srng:	Opaque HAL SRNG pointer
729  * @hal_ring_type:	SRNG ring type
730  *
731  * Return: 0 on success; error code on failure
732  */
733 int htt_srng_setup(struct htt_soc *soc, int mac_id,
734 		   hal_ring_handle_t hal_ring_hdl,
735 		   int hal_ring_type)
736 {
737 	struct dp_htt_htc_pkt *pkt;
738 	qdf_nbuf_t htt_msg;
739 	uint32_t *msg_word;
740 	struct hal_srng_params srng_params;
741 	qdf_dma_addr_t hp_addr, tp_addr;
742 	uint32_t ring_entry_size =
743 		hal_srng_get_entrysize(soc->hal_soc, hal_ring_type);
744 	int htt_ring_type, htt_ring_id;
745 	uint8_t *htt_logger_bufp;
746 
747 	/* Sizes should be set in 4-byte words */
748 	ring_entry_size = ring_entry_size >> 2;
749 
750 	htt_msg = qdf_nbuf_alloc(soc->osdev,
751 		HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ),
752 		/* reserve room for the HTC header */
753 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
754 	if (!htt_msg)
755 		goto fail0;
756 
757 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
758 	hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_ring_hdl);
759 	tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_ring_hdl);
760 
761 	switch (hal_ring_type) {
762 	case RXDMA_BUF:
763 #ifdef QCA_HOST2FW_RXBUF_RING
764 		if (srng_params.ring_id ==
765 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0)) {
766 			htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
767 			htt_ring_type = HTT_SW_TO_SW_RING;
768 #ifdef IPA_OFFLOAD
769 		} else if (srng_params.ring_id ==
770 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2)) {
771 			htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING;
772 			htt_ring_type = HTT_SW_TO_SW_RING;
773 #endif
774 #else
775 		if (srng_params.ring_id ==
776 			(HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
777 			  (mac_id * HAL_MAX_RINGS_PER_LMAC))) {
778 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
779 			htt_ring_type = HTT_SW_TO_HW_RING;
780 #endif
781 		} else if (srng_params.ring_id ==
782 #ifdef IPA_OFFLOAD
783 			 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 +
784 #else
785 			 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF +
786 #endif
787 			  (mac_id * HAL_MAX_RINGS_PER_LMAC))) {
788 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
789 			htt_ring_type = HTT_SW_TO_HW_RING;
790 		} else {
791 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
792 				   "%s: Ring %d currently not supported",
793 				   __func__, srng_params.ring_id);
794 			goto fail1;
795 		}
796 
797 		dp_info("ring_type %d ring_id %d htt_ring_id %d hp_addr 0x%llx tp_addr 0x%llx",
798 			hal_ring_type, srng_params.ring_id, htt_ring_id,
799 			(uint64_t)hp_addr,
800 			(uint64_t)tp_addr);
801 		break;
802 	case RXDMA_MONITOR_BUF:
803 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
804 		htt_ring_type = HTT_SW_TO_HW_RING;
805 		break;
806 	case RXDMA_MONITOR_STATUS:
807 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
808 		htt_ring_type = HTT_SW_TO_HW_RING;
809 		break;
810 	case RXDMA_MONITOR_DST:
811 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
812 		htt_ring_type = HTT_HW_TO_SW_RING;
813 		break;
814 	case RXDMA_MONITOR_DESC:
815 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
816 		htt_ring_type = HTT_SW_TO_HW_RING;
817 		break;
818 	case RXDMA_DST:
819 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
820 		htt_ring_type = HTT_HW_TO_SW_RING;
821 		break;
822 
823 	default:
824 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
825 			"%s: Ring currently not supported", __func__);
826 			goto fail1;
827 	}
828 
829 	/*
830 	 * Set the length of the message.
831 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
832 	 * separately during the below call to qdf_nbuf_push_head.
833 	 * The contribution from the HTC header is added separately inside HTC.
834 	 */
835 	if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) {
836 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
837 			"%s: Failed to expand head for SRING_SETUP msg",
838 			__func__);
839 		return QDF_STATUS_E_FAILURE;
840 	}
841 
842 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
843 
844 	/* rewind beyond alignment pad to get to the HTC header reserved area */
845 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
846 
847 	/* word 0 */
848 	*msg_word = 0;
849 	htt_logger_bufp = (uint8_t *)msg_word;
850 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP);
851 
852 	if ((htt_ring_type == HTT_SW_TO_HW_RING) ||
853 			(htt_ring_type == HTT_HW_TO_SW_RING))
854 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word,
855 			 DP_SW2HW_MACID(mac_id));
856 	else
857 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id);
858 
859 	dp_info("%s: mac_id %d", __func__, mac_id);
860 	HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type);
861 	/* TODO: Discuss with FW on changing this to unique ID and using
862 	 * htt_ring_type to send the type of ring
863 	 */
864 	HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id);
865 
866 	/* word 1 */
867 	msg_word++;
868 	*msg_word = 0;
869 	HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word,
870 		srng_params.ring_base_paddr & 0xffffffff);
871 
872 	/* word 2 */
873 	msg_word++;
874 	*msg_word = 0;
875 	HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word,
876 		(uint64_t)srng_params.ring_base_paddr >> 32);
877 
878 	/* word 3 */
879 	msg_word++;
880 	*msg_word = 0;
881 	HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size);
882 	HTT_SRING_SETUP_RING_SIZE_SET(*msg_word,
883 		(ring_entry_size * srng_params.num_entries));
884 	dp_info("%s: entry_size %d", __func__, ring_entry_size);
885 	dp_info("%s: num_entries %d", __func__, srng_params.num_entries);
886 	dp_info("%s: ring_size %d", __func__,
887 		(ring_entry_size * srng_params.num_entries));
888 	if (htt_ring_type == HTT_SW_TO_HW_RING)
889 		HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET(
890 						*msg_word, 1);
891 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word,
892 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
893 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word,
894 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
895 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word,
896 		!!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP));
897 
898 	/* word 4 */
899 	msg_word++;
900 	*msg_word = 0;
901 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
902 		hp_addr & 0xffffffff);
903 
904 	/* word 5 */
905 	msg_word++;
906 	*msg_word = 0;
907 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
908 		(uint64_t)hp_addr >> 32);
909 
910 	/* word 6 */
911 	msg_word++;
912 	*msg_word = 0;
913 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
914 		tp_addr & 0xffffffff);
915 
916 	/* word 7 */
917 	msg_word++;
918 	*msg_word = 0;
919 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
920 		(uint64_t)tp_addr >> 32);
921 
922 	/* word 8 */
923 	msg_word++;
924 	*msg_word = 0;
925 	HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word,
926 		srng_params.msi_addr & 0xffffffff);
927 
928 	/* word 9 */
929 	msg_word++;
930 	*msg_word = 0;
931 	HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word,
932 		(uint64_t)(srng_params.msi_addr) >> 32);
933 
934 	/* word 10 */
935 	msg_word++;
936 	*msg_word = 0;
937 	HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word,
938 		srng_params.msi_data);
939 
940 	/* word 11 */
941 	msg_word++;
942 	*msg_word = 0;
943 	HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word,
944 		srng_params.intr_batch_cntr_thres_entries *
945 		ring_entry_size);
946 	HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word,
947 		srng_params.intr_timer_thres_us >> 3);
948 
949 	/* word 12 */
950 	msg_word++;
951 	*msg_word = 0;
952 	if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
953 		/* TODO: Setting low threshold to 1/8th of ring size - see
954 		 * if this needs to be configurable
955 		 */
956 		HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word,
957 			srng_params.low_threshold);
958 	}
959 	/* "response_required" field should be set if a HTT response message is
960 	 * required after setting up the ring.
961 	 */
962 	pkt = htt_htc_pkt_alloc(soc);
963 	if (!pkt)
964 		goto fail1;
965 
966 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
967 
968 	SET_HTC_PACKET_INFO_TX(
969 		&pkt->htc_pkt,
970 		dp_htt_h2t_send_complete_free_netbuf,
971 		qdf_nbuf_data(htt_msg),
972 		qdf_nbuf_len(htt_msg),
973 		soc->htc_endpoint,
974 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
975 
976 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
977 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_SRING_SETUP,
978 			    htt_logger_bufp);
979 
980 	return QDF_STATUS_SUCCESS;
981 
982 fail1:
983 	qdf_nbuf_free(htt_msg);
984 fail0:
985 	return QDF_STATUS_E_FAILURE;
986 }
987 
988 /*
989  * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter
990  * config message to target
991  * @htt_soc:	HTT SOC handle
992  * @pdev_id:	PDEV Id
993  * @hal_srng:	Opaque HAL SRNG pointer
994  * @hal_ring_type:	SRNG ring type
995  * @ring_buf_size:	SRNG buffer size
996  * @htt_tlv_filter:	Rx SRNG TLV and filter setting
997  * Return: 0 on success; error code on failure
998  */
999 int htt_h2t_rx_ring_cfg(struct htt_soc *htt_soc, int pdev_id,
1000 			hal_ring_handle_t hal_ring_hdl,
1001 			int hal_ring_type, int ring_buf_size,
1002 			struct htt_rx_ring_tlv_filter *htt_tlv_filter)
1003 {
1004 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
1005 	struct dp_htt_htc_pkt *pkt;
1006 	qdf_nbuf_t htt_msg;
1007 	uint32_t *msg_word;
1008 	struct hal_srng_params srng_params;
1009 	uint32_t htt_ring_type, htt_ring_id;
1010 	uint32_t tlv_filter;
1011 	uint8_t *htt_logger_bufp;
1012 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->dp_soc->wlan_cfg_ctx;
1013 	uint32_t mon_drop_th = wlan_cfg_get_mon_drop_thresh(wlan_cfg_ctx);
1014 
1015 	htt_msg = qdf_nbuf_alloc(soc->osdev,
1016 		HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ),
1017 	/* reserve room for the HTC header */
1018 	HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
1019 	if (!htt_msg)
1020 		goto fail0;
1021 
1022 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
1023 
1024 	switch (hal_ring_type) {
1025 	case RXDMA_BUF:
1026 		htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
1027 		htt_ring_type = HTT_SW_TO_HW_RING;
1028 		break;
1029 	case RXDMA_MONITOR_BUF:
1030 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
1031 		htt_ring_type = HTT_SW_TO_HW_RING;
1032 		break;
1033 	case RXDMA_MONITOR_STATUS:
1034 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
1035 		htt_ring_type = HTT_SW_TO_HW_RING;
1036 		break;
1037 	case RXDMA_MONITOR_DST:
1038 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
1039 		htt_ring_type = HTT_HW_TO_SW_RING;
1040 		break;
1041 	case RXDMA_MONITOR_DESC:
1042 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
1043 		htt_ring_type = HTT_SW_TO_HW_RING;
1044 		break;
1045 	case RXDMA_DST:
1046 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
1047 		htt_ring_type = HTT_HW_TO_SW_RING;
1048 		break;
1049 
1050 	default:
1051 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1052 			"%s: Ring currently not supported", __func__);
1053 		goto fail1;
1054 	}
1055 
1056 	/*
1057 	 * Set the length of the message.
1058 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
1059 	 * separately during the below call to qdf_nbuf_push_head.
1060 	 * The contribution from the HTC header is added separately inside HTC.
1061 	 */
1062 	if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) {
1063 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1064 			"%s: Failed to expand head for RX Ring Cfg msg",
1065 			__func__);
1066 		goto fail1; /* failure */
1067 	}
1068 
1069 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
1070 
1071 	/* rewind beyond alignment pad to get to the HTC header reserved area */
1072 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
1073 
1074 	/* word 0 */
1075 	htt_logger_bufp = (uint8_t *)msg_word;
1076 	*msg_word = 0;
1077 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
1078 
1079 	/*
1080 	 * pdev_id is indexed from 0 whereas mac_id is indexed from 1
1081 	 * SW_TO_SW and SW_TO_HW rings are unaffected by this
1082 	 */
1083 	if (htt_ring_type == HTT_SW_TO_SW_RING ||
1084 			htt_ring_type == HTT_SW_TO_HW_RING)
1085 		HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word,
1086 						DP_SW2HW_MACID(pdev_id));
1087 
1088 	/* TODO: Discuss with FW on changing this to unique ID and using
1089 	 * htt_ring_type to send the type of ring
1090 	 */
1091 	HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id);
1092 
1093 	HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word,
1094 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
1095 
1096 	HTT_RX_RING_SELECTION_CFG_PKT_TLV_SET(*msg_word,
1097 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
1098 
1099 	HTT_RX_RING_SELECTION_CFG_RX_OFFSETS_VALID_SET(*msg_word,
1100 						htt_tlv_filter->offset_valid);
1101 
1102 	if (mon_drop_th > 0)
1103 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1104 								   1);
1105 	else
1106 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1107 								   0);
1108 
1109 	/* word 1 */
1110 	msg_word++;
1111 	*msg_word = 0;
1112 	HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word,
1113 		ring_buf_size);
1114 
1115 	/* word 2 */
1116 	msg_word++;
1117 	*msg_word = 0;
1118 
1119 	if (htt_tlv_filter->enable_fp) {
1120 		/* TYPE: MGMT */
1121 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1122 			FP, MGMT, 0000,
1123 			(htt_tlv_filter->fp_mgmt_filter &
1124 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1125 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1126 			FP, MGMT, 0001,
1127 			(htt_tlv_filter->fp_mgmt_filter &
1128 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1129 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1130 			FP, MGMT, 0010,
1131 			(htt_tlv_filter->fp_mgmt_filter &
1132 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1133 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1134 			FP, MGMT, 0011,
1135 			(htt_tlv_filter->fp_mgmt_filter &
1136 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1137 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1138 			FP, MGMT, 0100,
1139 			(htt_tlv_filter->fp_mgmt_filter &
1140 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1141 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1142 			FP, MGMT, 0101,
1143 			(htt_tlv_filter->fp_mgmt_filter &
1144 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1145 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1146 			FP, MGMT, 0110,
1147 			(htt_tlv_filter->fp_mgmt_filter &
1148 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1149 		/* reserved */
1150 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
1151 			MGMT, 0111,
1152 			(htt_tlv_filter->fp_mgmt_filter &
1153 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1154 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1155 			FP, MGMT, 1000,
1156 			(htt_tlv_filter->fp_mgmt_filter &
1157 			FILTER_MGMT_BEACON) ? 1 : 0);
1158 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1159 			FP, MGMT, 1001,
1160 			(htt_tlv_filter->fp_mgmt_filter &
1161 			FILTER_MGMT_ATIM) ? 1 : 0);
1162 	}
1163 
1164 	if (htt_tlv_filter->enable_md) {
1165 			/* TYPE: MGMT */
1166 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1167 			MD, MGMT, 0000,
1168 			(htt_tlv_filter->md_mgmt_filter &
1169 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1170 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1171 			MD, MGMT, 0001,
1172 			(htt_tlv_filter->md_mgmt_filter &
1173 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1174 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1175 			MD, MGMT, 0010,
1176 			(htt_tlv_filter->md_mgmt_filter &
1177 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1178 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1179 			MD, MGMT, 0011,
1180 			(htt_tlv_filter->md_mgmt_filter &
1181 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1182 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1183 			MD, MGMT, 0100,
1184 			(htt_tlv_filter->md_mgmt_filter &
1185 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1186 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1187 			MD, MGMT, 0101,
1188 			(htt_tlv_filter->md_mgmt_filter &
1189 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1190 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1191 			MD, MGMT, 0110,
1192 			(htt_tlv_filter->md_mgmt_filter &
1193 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1194 		/* reserved */
1195 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
1196 			MGMT, 0111,
1197 			(htt_tlv_filter->md_mgmt_filter &
1198 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1199 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1200 			MD, MGMT, 1000,
1201 			(htt_tlv_filter->md_mgmt_filter &
1202 			FILTER_MGMT_BEACON) ? 1 : 0);
1203 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1204 			MD, MGMT, 1001,
1205 			(htt_tlv_filter->md_mgmt_filter &
1206 			FILTER_MGMT_ATIM) ? 1 : 0);
1207 	}
1208 
1209 	if (htt_tlv_filter->enable_mo) {
1210 		/* TYPE: MGMT */
1211 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1212 			MO, MGMT, 0000,
1213 			(htt_tlv_filter->mo_mgmt_filter &
1214 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1215 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1216 			MO, MGMT, 0001,
1217 			(htt_tlv_filter->mo_mgmt_filter &
1218 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1219 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1220 			MO, MGMT, 0010,
1221 			(htt_tlv_filter->mo_mgmt_filter &
1222 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1223 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1224 			MO, MGMT, 0011,
1225 			(htt_tlv_filter->mo_mgmt_filter &
1226 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1227 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1228 			MO, MGMT, 0100,
1229 			(htt_tlv_filter->mo_mgmt_filter &
1230 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1231 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1232 			MO, MGMT, 0101,
1233 			(htt_tlv_filter->mo_mgmt_filter &
1234 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1235 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1236 			MO, MGMT, 0110,
1237 			(htt_tlv_filter->mo_mgmt_filter &
1238 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1239 		/* reserved */
1240 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
1241 			MGMT, 0111,
1242 			(htt_tlv_filter->mo_mgmt_filter &
1243 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1244 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1245 			MO, MGMT, 1000,
1246 			(htt_tlv_filter->mo_mgmt_filter &
1247 			FILTER_MGMT_BEACON) ? 1 : 0);
1248 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1249 			MO, MGMT, 1001,
1250 			(htt_tlv_filter->mo_mgmt_filter &
1251 			FILTER_MGMT_ATIM) ? 1 : 0);
1252 	}
1253 
1254 	/* word 3 */
1255 	msg_word++;
1256 	*msg_word = 0;
1257 
1258 	if (htt_tlv_filter->enable_fp) {
1259 		/* TYPE: MGMT */
1260 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1261 			FP, MGMT, 1010,
1262 			(htt_tlv_filter->fp_mgmt_filter &
1263 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1264 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1265 			FP, MGMT, 1011,
1266 			(htt_tlv_filter->fp_mgmt_filter &
1267 			FILTER_MGMT_AUTH) ? 1 : 0);
1268 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1269 			FP, MGMT, 1100,
1270 			(htt_tlv_filter->fp_mgmt_filter &
1271 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1272 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1273 			FP, MGMT, 1101,
1274 			(htt_tlv_filter->fp_mgmt_filter &
1275 			FILTER_MGMT_ACTION) ? 1 : 0);
1276 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1277 			FP, MGMT, 1110,
1278 			(htt_tlv_filter->fp_mgmt_filter &
1279 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1280 		/* reserved*/
1281 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
1282 			MGMT, 1111,
1283 			(htt_tlv_filter->fp_mgmt_filter &
1284 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1285 	}
1286 
1287 	if (htt_tlv_filter->enable_md) {
1288 			/* TYPE: MGMT */
1289 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1290 			MD, MGMT, 1010,
1291 			(htt_tlv_filter->md_mgmt_filter &
1292 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1293 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1294 			MD, MGMT, 1011,
1295 			(htt_tlv_filter->md_mgmt_filter &
1296 			FILTER_MGMT_AUTH) ? 1 : 0);
1297 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1298 			MD, MGMT, 1100,
1299 			(htt_tlv_filter->md_mgmt_filter &
1300 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1301 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1302 			MD, MGMT, 1101,
1303 			(htt_tlv_filter->md_mgmt_filter &
1304 			FILTER_MGMT_ACTION) ? 1 : 0);
1305 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1306 			MD, MGMT, 1110,
1307 			(htt_tlv_filter->md_mgmt_filter &
1308 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1309 	}
1310 
1311 	if (htt_tlv_filter->enable_mo) {
1312 		/* TYPE: MGMT */
1313 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1314 			MO, MGMT, 1010,
1315 			(htt_tlv_filter->mo_mgmt_filter &
1316 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1317 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1318 			MO, MGMT, 1011,
1319 			(htt_tlv_filter->mo_mgmt_filter &
1320 			FILTER_MGMT_AUTH) ? 1 : 0);
1321 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1322 			MO, MGMT, 1100,
1323 			(htt_tlv_filter->mo_mgmt_filter &
1324 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1325 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1326 			MO, MGMT, 1101,
1327 			(htt_tlv_filter->mo_mgmt_filter &
1328 			FILTER_MGMT_ACTION) ? 1 : 0);
1329 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1330 			MO, MGMT, 1110,
1331 			(htt_tlv_filter->mo_mgmt_filter &
1332 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1333 		/* reserved*/
1334 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
1335 			MGMT, 1111,
1336 			(htt_tlv_filter->mo_mgmt_filter &
1337 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1338 	}
1339 
1340 	/* word 4 */
1341 	msg_word++;
1342 	*msg_word = 0;
1343 
1344 	if (htt_tlv_filter->enable_fp) {
1345 		/* TYPE: CTRL */
1346 		/* reserved */
1347 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1348 			CTRL, 0000,
1349 			(htt_tlv_filter->fp_ctrl_filter &
1350 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1351 		/* reserved */
1352 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1353 			CTRL, 0001,
1354 			(htt_tlv_filter->fp_ctrl_filter &
1355 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1356 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1357 			CTRL, 0010,
1358 			(htt_tlv_filter->fp_ctrl_filter &
1359 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1360 		/* reserved */
1361 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1362 			CTRL, 0011,
1363 			(htt_tlv_filter->fp_ctrl_filter &
1364 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1365 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1366 			CTRL, 0100,
1367 			(htt_tlv_filter->fp_ctrl_filter &
1368 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1369 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1370 			CTRL, 0101,
1371 			(htt_tlv_filter->fp_ctrl_filter &
1372 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1373 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1374 			CTRL, 0110,
1375 			(htt_tlv_filter->fp_ctrl_filter &
1376 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1377 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1378 			CTRL, 0111,
1379 			(htt_tlv_filter->fp_ctrl_filter &
1380 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1381 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1382 			CTRL, 1000,
1383 			(htt_tlv_filter->fp_ctrl_filter &
1384 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1385 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1386 			CTRL, 1001,
1387 			(htt_tlv_filter->fp_ctrl_filter &
1388 			FILTER_CTRL_BA) ? 1 : 0);
1389 	}
1390 
1391 	if (htt_tlv_filter->enable_md) {
1392 		/* TYPE: CTRL */
1393 		/* reserved */
1394 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1395 			CTRL, 0000,
1396 			(htt_tlv_filter->md_ctrl_filter &
1397 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1398 		/* reserved */
1399 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1400 			CTRL, 0001,
1401 			(htt_tlv_filter->md_ctrl_filter &
1402 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1403 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1404 			CTRL, 0010,
1405 			(htt_tlv_filter->md_ctrl_filter &
1406 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1407 		/* reserved */
1408 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1409 			CTRL, 0011,
1410 			(htt_tlv_filter->md_ctrl_filter &
1411 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1412 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1413 			CTRL, 0100,
1414 			(htt_tlv_filter->md_ctrl_filter &
1415 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1416 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1417 			CTRL, 0101,
1418 			(htt_tlv_filter->md_ctrl_filter &
1419 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1420 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1421 			CTRL, 0110,
1422 			(htt_tlv_filter->md_ctrl_filter &
1423 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1424 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1425 			CTRL, 0111,
1426 			(htt_tlv_filter->md_ctrl_filter &
1427 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1428 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1429 			CTRL, 1000,
1430 			(htt_tlv_filter->md_ctrl_filter &
1431 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1432 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1433 			CTRL, 1001,
1434 			(htt_tlv_filter->md_ctrl_filter &
1435 			FILTER_CTRL_BA) ? 1 : 0);
1436 	}
1437 
1438 	if (htt_tlv_filter->enable_mo) {
1439 		/* TYPE: CTRL */
1440 		/* reserved */
1441 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1442 			CTRL, 0000,
1443 			(htt_tlv_filter->mo_ctrl_filter &
1444 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1445 		/* reserved */
1446 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1447 			CTRL, 0001,
1448 			(htt_tlv_filter->mo_ctrl_filter &
1449 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1450 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1451 			CTRL, 0010,
1452 			(htt_tlv_filter->mo_ctrl_filter &
1453 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1454 		/* reserved */
1455 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1456 			CTRL, 0011,
1457 			(htt_tlv_filter->mo_ctrl_filter &
1458 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1459 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1460 			CTRL, 0100,
1461 			(htt_tlv_filter->mo_ctrl_filter &
1462 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1463 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1464 			CTRL, 0101,
1465 			(htt_tlv_filter->mo_ctrl_filter &
1466 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1467 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1468 			CTRL, 0110,
1469 			(htt_tlv_filter->mo_ctrl_filter &
1470 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1471 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1472 			CTRL, 0111,
1473 			(htt_tlv_filter->mo_ctrl_filter &
1474 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1475 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1476 			CTRL, 1000,
1477 			(htt_tlv_filter->mo_ctrl_filter &
1478 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1479 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1480 			CTRL, 1001,
1481 			(htt_tlv_filter->mo_ctrl_filter &
1482 			FILTER_CTRL_BA) ? 1 : 0);
1483 	}
1484 
1485 	/* word 5 */
1486 	msg_word++;
1487 	*msg_word = 0;
1488 	if (htt_tlv_filter->enable_fp) {
1489 		/* TYPE: CTRL */
1490 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1491 			CTRL, 1010,
1492 			(htt_tlv_filter->fp_ctrl_filter &
1493 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1494 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1495 			CTRL, 1011,
1496 			(htt_tlv_filter->fp_ctrl_filter &
1497 			FILTER_CTRL_RTS) ? 1 : 0);
1498 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1499 			CTRL, 1100,
1500 			(htt_tlv_filter->fp_ctrl_filter &
1501 			FILTER_CTRL_CTS) ? 1 : 0);
1502 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1503 			CTRL, 1101,
1504 			(htt_tlv_filter->fp_ctrl_filter &
1505 			FILTER_CTRL_ACK) ? 1 : 0);
1506 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1507 			CTRL, 1110,
1508 			(htt_tlv_filter->fp_ctrl_filter &
1509 			FILTER_CTRL_CFEND) ? 1 : 0);
1510 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1511 			CTRL, 1111,
1512 			(htt_tlv_filter->fp_ctrl_filter &
1513 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1514 		/* TYPE: DATA */
1515 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1516 			DATA, MCAST,
1517 			(htt_tlv_filter->fp_data_filter &
1518 			FILTER_DATA_MCAST) ? 1 : 0);
1519 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1520 			DATA, UCAST,
1521 			(htt_tlv_filter->fp_data_filter &
1522 			FILTER_DATA_UCAST) ? 1 : 0);
1523 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1524 			DATA, NULL,
1525 			(htt_tlv_filter->fp_data_filter &
1526 			FILTER_DATA_NULL) ? 1 : 0);
1527 	}
1528 
1529 	if (htt_tlv_filter->enable_md) {
1530 		/* TYPE: CTRL */
1531 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1532 			CTRL, 1010,
1533 			(htt_tlv_filter->md_ctrl_filter &
1534 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1535 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1536 			CTRL, 1011,
1537 			(htt_tlv_filter->md_ctrl_filter &
1538 			FILTER_CTRL_RTS) ? 1 : 0);
1539 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1540 			CTRL, 1100,
1541 			(htt_tlv_filter->md_ctrl_filter &
1542 			FILTER_CTRL_CTS) ? 1 : 0);
1543 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1544 			CTRL, 1101,
1545 			(htt_tlv_filter->md_ctrl_filter &
1546 			FILTER_CTRL_ACK) ? 1 : 0);
1547 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1548 			CTRL, 1110,
1549 			(htt_tlv_filter->md_ctrl_filter &
1550 			FILTER_CTRL_CFEND) ? 1 : 0);
1551 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1552 			CTRL, 1111,
1553 			(htt_tlv_filter->md_ctrl_filter &
1554 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1555 		/* TYPE: DATA */
1556 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1557 			DATA, MCAST,
1558 			(htt_tlv_filter->md_data_filter &
1559 			FILTER_DATA_MCAST) ? 1 : 0);
1560 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1561 			DATA, UCAST,
1562 			(htt_tlv_filter->md_data_filter &
1563 			FILTER_DATA_UCAST) ? 1 : 0);
1564 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1565 			DATA, NULL,
1566 			(htt_tlv_filter->md_data_filter &
1567 			FILTER_DATA_NULL) ? 1 : 0);
1568 	}
1569 
1570 	if (htt_tlv_filter->enable_mo) {
1571 		/* TYPE: CTRL */
1572 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1573 			CTRL, 1010,
1574 			(htt_tlv_filter->mo_ctrl_filter &
1575 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1576 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1577 			CTRL, 1011,
1578 			(htt_tlv_filter->mo_ctrl_filter &
1579 			FILTER_CTRL_RTS) ? 1 : 0);
1580 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1581 			CTRL, 1100,
1582 			(htt_tlv_filter->mo_ctrl_filter &
1583 			FILTER_CTRL_CTS) ? 1 : 0);
1584 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1585 			CTRL, 1101,
1586 			(htt_tlv_filter->mo_ctrl_filter &
1587 			FILTER_CTRL_ACK) ? 1 : 0);
1588 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1589 			CTRL, 1110,
1590 			(htt_tlv_filter->mo_ctrl_filter &
1591 			FILTER_CTRL_CFEND) ? 1 : 0);
1592 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1593 			CTRL, 1111,
1594 			(htt_tlv_filter->mo_ctrl_filter &
1595 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1596 		/* TYPE: DATA */
1597 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1598 			DATA, MCAST,
1599 			(htt_tlv_filter->mo_data_filter &
1600 			FILTER_DATA_MCAST) ? 1 : 0);
1601 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1602 			DATA, UCAST,
1603 			(htt_tlv_filter->mo_data_filter &
1604 			FILTER_DATA_UCAST) ? 1 : 0);
1605 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1606 			DATA, NULL,
1607 			(htt_tlv_filter->mo_data_filter &
1608 			FILTER_DATA_NULL) ? 1 : 0);
1609 	}
1610 
1611 	/* word 6 */
1612 	msg_word++;
1613 	*msg_word = 0;
1614 	tlv_filter = 0;
1615 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START,
1616 		htt_tlv_filter->mpdu_start);
1617 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START,
1618 		htt_tlv_filter->msdu_start);
1619 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET,
1620 		htt_tlv_filter->packet);
1621 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END,
1622 		htt_tlv_filter->msdu_end);
1623 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END,
1624 		htt_tlv_filter->mpdu_end);
1625 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER,
1626 		htt_tlv_filter->packet_header);
1627 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION,
1628 		htt_tlv_filter->attention);
1629 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START,
1630 		htt_tlv_filter->ppdu_start);
1631 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END,
1632 		htt_tlv_filter->ppdu_end);
1633 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS,
1634 		htt_tlv_filter->ppdu_end_user_stats);
1635 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter,
1636 		PPDU_END_USER_STATS_EXT,
1637 		htt_tlv_filter->ppdu_end_user_stats_ext);
1638 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE,
1639 		htt_tlv_filter->ppdu_end_status_done);
1640 	/* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/
1641 	 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED,
1642 		 htt_tlv_filter->header_per_msdu);
1643 
1644 	HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter);
1645 
1646 	msg_word++;
1647 	*msg_word = 0;
1648 	if (htt_tlv_filter->offset_valid) {
1649 		HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET_SET(*msg_word,
1650 					htt_tlv_filter->rx_packet_offset);
1651 		HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET_SET(*msg_word,
1652 					htt_tlv_filter->rx_header_offset);
1653 
1654 		msg_word++;
1655 		*msg_word = 0;
1656 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET_SET(*msg_word,
1657 					htt_tlv_filter->rx_mpdu_end_offset);
1658 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET_SET(*msg_word,
1659 					htt_tlv_filter->rx_mpdu_start_offset);
1660 
1661 		msg_word++;
1662 		*msg_word = 0;
1663 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET_SET(*msg_word,
1664 					htt_tlv_filter->rx_msdu_end_offset);
1665 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET_SET(*msg_word,
1666 					htt_tlv_filter->rx_msdu_start_offset);
1667 
1668 		msg_word++;
1669 		*msg_word = 0;
1670 		HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET_SET(*msg_word,
1671 					htt_tlv_filter->rx_attn_offset);
1672 		msg_word++;
1673 		*msg_word = 0;
1674 	} else {
1675 		msg_word += 4;
1676 		*msg_word = 0;
1677 	}
1678 
1679 	if (mon_drop_th > 0)
1680 		HTT_RX_RING_SELECTION_CFG_RX_DROP_THRESHOLD_SET(*msg_word,
1681 								mon_drop_th);
1682 
1683 	/* "response_required" field should be set if a HTT response message is
1684 	 * required after setting up the ring.
1685 	 */
1686 	pkt = htt_htc_pkt_alloc(soc);
1687 	if (!pkt)
1688 		goto fail1;
1689 
1690 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1691 
1692 	SET_HTC_PACKET_INFO_TX(
1693 		&pkt->htc_pkt,
1694 		dp_htt_h2t_send_complete_free_netbuf,
1695 		qdf_nbuf_data(htt_msg),
1696 		qdf_nbuf_len(htt_msg),
1697 		soc->htc_endpoint,
1698 		1); /* tag - not relevant here */
1699 
1700 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1701 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG,
1702 			    htt_logger_bufp);
1703 	return QDF_STATUS_SUCCESS;
1704 
1705 fail1:
1706 	qdf_nbuf_free(htt_msg);
1707 fail0:
1708 	return QDF_STATUS_E_FAILURE;
1709 }
1710 
1711 #if defined(HTT_STATS_ENABLE)
1712 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1713 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1714 
1715 {
1716 	uint32_t pdev_id;
1717 	uint32_t *msg_word = NULL;
1718 	uint32_t msg_remain_len = 0;
1719 
1720 	msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1721 
1722 	/*COOKIE MSB*/
1723 	pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1724 
1725 	/* stats message length + 16 size of HTT header*/
1726 	msg_remain_len = qdf_min(htt_stats->msg_len + 16,
1727 				(uint32_t)DP_EXT_MSG_LENGTH);
1728 
1729 	dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc,
1730 			msg_word,  msg_remain_len,
1731 			WDI_NO_VAL, pdev_id);
1732 
1733 	if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1734 		htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1735 	}
1736 	/* Need to be freed here as WDI handler will
1737 	 * make a copy of pkt to send data to application
1738 	 */
1739 	qdf_nbuf_free(htt_msg);
1740 	return QDF_STATUS_SUCCESS;
1741 }
1742 #else
1743 static inline QDF_STATUS
1744 dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1745 		      struct dp_soc *soc, qdf_nbuf_t htt_msg)
1746 {
1747 	return QDF_STATUS_E_NOSUPPORT;
1748 }
1749 #endif
1750 /**
1751  * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats
1752  * @htt_stats: htt stats info
1753  *
1754  * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message
1755  * contains sub messages which are identified by a TLV header.
1756  * In this function we will process the stream of T2H messages and read all the
1757  * TLV contained in the message.
1758  *
1759  * THe following cases have been taken care of
1760  * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer
1761  *		In this case the buffer will contain multiple tlvs.
1762  * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer.
1763  *		Only one tlv will be contained in the HTT message and this tag
1764  *		will extend onto the next buffer.
1765  * Case 3: When the buffer is the continuation of the previous message
1766  * Case 4: tlv length is 0. which will indicate the end of message
1767  *
1768  * return: void
1769  */
1770 static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats,
1771 					struct dp_soc *soc)
1772 {
1773 	htt_tlv_tag_t tlv_type = 0xff;
1774 	qdf_nbuf_t htt_msg = NULL;
1775 	uint32_t *msg_word;
1776 	uint8_t *tlv_buf_head = NULL;
1777 	uint8_t *tlv_buf_tail = NULL;
1778 	uint32_t msg_remain_len = 0;
1779 	uint32_t tlv_remain_len = 0;
1780 	uint32_t *tlv_start;
1781 	int cookie_val;
1782 	int cookie_msb;
1783 	int pdev_id;
1784 	bool copy_stats = false;
1785 	struct dp_pdev *pdev;
1786 
1787 	/* Process node in the HTT message queue */
1788 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1789 		!= NULL) {
1790 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1791 		cookie_val = *(msg_word + 1);
1792 		htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET(
1793 					*(msg_word +
1794 					HTT_T2H_EXT_STATS_TLV_START_OFFSET));
1795 
1796 		if (cookie_val) {
1797 			if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg)
1798 					== QDF_STATUS_SUCCESS) {
1799 				continue;
1800 			}
1801 		}
1802 
1803 		cookie_msb = *(msg_word + 2);
1804 		pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1805 		pdev = soc->pdev_list[pdev_id];
1806 
1807 		if (cookie_msb >> 2) {
1808 			copy_stats = true;
1809 		}
1810 
1811 		/* read 5th word */
1812 		msg_word = msg_word + 4;
1813 		msg_remain_len = qdf_min(htt_stats->msg_len,
1814 				(uint32_t) DP_EXT_MSG_LENGTH);
1815 		/* Keep processing the node till node length is 0 */
1816 		while (msg_remain_len) {
1817 			/*
1818 			 * if message is not a continuation of previous message
1819 			 * read the tlv type and tlv length
1820 			 */
1821 			if (!tlv_buf_head) {
1822 				tlv_type = HTT_STATS_TLV_TAG_GET(
1823 						*msg_word);
1824 				tlv_remain_len = HTT_STATS_TLV_LENGTH_GET(
1825 						*msg_word);
1826 			}
1827 
1828 			if (tlv_remain_len == 0) {
1829 				msg_remain_len = 0;
1830 
1831 				if (tlv_buf_head) {
1832 					qdf_mem_free(tlv_buf_head);
1833 					tlv_buf_head = NULL;
1834 					tlv_buf_tail = NULL;
1835 				}
1836 
1837 				goto error;
1838 			}
1839 
1840 			if (!tlv_buf_head)
1841 				tlv_remain_len += HTT_TLV_HDR_LEN;
1842 
1843 			if ((tlv_remain_len <= msg_remain_len)) {
1844 				/* Case 3 */
1845 				if (tlv_buf_head) {
1846 					qdf_mem_copy(tlv_buf_tail,
1847 							(uint8_t *)msg_word,
1848 							tlv_remain_len);
1849 					tlv_start = (uint32_t *)tlv_buf_head;
1850 				} else {
1851 					/* Case 1 */
1852 					tlv_start = msg_word;
1853 				}
1854 
1855 				if (copy_stats)
1856 					dp_htt_stats_copy_tag(pdev,
1857 							      tlv_type,
1858 							      tlv_start);
1859 				else
1860 					dp_htt_stats_print_tag(pdev,
1861 							       tlv_type,
1862 							       tlv_start);
1863 
1864 				if (tlv_type == HTT_STATS_PEER_DETAILS_TAG ||
1865 				    tlv_type == HTT_STATS_PEER_STATS_CMN_TAG)
1866 					dp_peer_update_inactive_time(pdev,
1867 								     tlv_type,
1868 								     tlv_start);
1869 
1870 				msg_remain_len -= tlv_remain_len;
1871 
1872 				msg_word = (uint32_t *)
1873 					(((uint8_t *)msg_word) +
1874 					tlv_remain_len);
1875 
1876 				tlv_remain_len = 0;
1877 
1878 				if (tlv_buf_head) {
1879 					qdf_mem_free(tlv_buf_head);
1880 					tlv_buf_head = NULL;
1881 					tlv_buf_tail = NULL;
1882 				}
1883 
1884 			} else { /* tlv_remain_len > msg_remain_len */
1885 				/* Case 2 & 3 */
1886 				if (!tlv_buf_head) {
1887 					tlv_buf_head = qdf_mem_malloc(
1888 							tlv_remain_len);
1889 
1890 					if (!tlv_buf_head) {
1891 						QDF_TRACE(QDF_MODULE_ID_TXRX,
1892 								QDF_TRACE_LEVEL_ERROR,
1893 								"Alloc failed");
1894 						goto error;
1895 					}
1896 
1897 					tlv_buf_tail = tlv_buf_head;
1898 				}
1899 
1900 				qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word,
1901 						msg_remain_len);
1902 				tlv_remain_len -= msg_remain_len;
1903 				tlv_buf_tail += msg_remain_len;
1904 			}
1905 		}
1906 
1907 		if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1908 			htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1909 		}
1910 
1911 		qdf_nbuf_free(htt_msg);
1912 	}
1913 	return;
1914 
1915 error:
1916 	qdf_nbuf_free(htt_msg);
1917 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1918 			!= NULL)
1919 		qdf_nbuf_free(htt_msg);
1920 }
1921 
1922 void htt_t2h_stats_handler(void *context)
1923 {
1924 	struct dp_soc *soc = (struct dp_soc *)context;
1925 	struct htt_stats_context htt_stats;
1926 	uint32_t *msg_word;
1927 	qdf_nbuf_t htt_msg = NULL;
1928 	uint8_t done;
1929 	uint32_t rem_stats;
1930 
1931 	if (!soc || !qdf_atomic_read(&soc->cmn_init_done)) {
1932 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1933 			"soc: 0x%pK, init_done: %d", soc,
1934 			qdf_atomic_read(&soc->cmn_init_done));
1935 		return;
1936 	}
1937 
1938 	qdf_mem_zero(&htt_stats, sizeof(htt_stats));
1939 	qdf_nbuf_queue_init(&htt_stats.msg);
1940 
1941 	/* pull one completed stats from soc->htt_stats_msg and process */
1942 	qdf_spin_lock_bh(&soc->htt_stats.lock);
1943 	if (!soc->htt_stats.num_stats) {
1944 		qdf_spin_unlock_bh(&soc->htt_stats.lock);
1945 		return;
1946 	}
1947 	while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) {
1948 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1949 		msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET;
1950 		done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
1951 		qdf_nbuf_queue_add(&htt_stats.msg, htt_msg);
1952 		/*
1953 		 * Done bit signifies that this is the last T2H buffer in the
1954 		 * stream of HTT EXT STATS message
1955 		 */
1956 		if (done)
1957 			break;
1958 	}
1959 	rem_stats = --soc->htt_stats.num_stats;
1960 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
1961 
1962 	/* If there are more stats to process, schedule stats work again.
1963 	 * Scheduling prior to processing ht_stats to queue with early
1964 	 * index
1965 	 */
1966 	if (rem_stats)
1967 		qdf_sched_work(0, &soc->htt_stats.work);
1968 
1969 	dp_process_htt_stat_msg(&htt_stats, soc);
1970 }
1971 
1972 /*
1973  * dp_get_ppdu_info_user_index: Find and allocate a per-user descriptor for a PPDU,
1974  * if a new peer id arrives in a PPDU
1975  * pdev: DP pdev handle
1976  * @peer_id : peer unique identifier
1977  * @ppdu_info: per ppdu tlv structure
1978  *
1979  * return:user index to be populated
1980  */
1981 #ifdef FEATURE_PERPKT_INFO
1982 static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev,
1983 						uint16_t peer_id,
1984 						struct ppdu_info *ppdu_info)
1985 {
1986 	uint8_t user_index = 0;
1987 	struct cdp_tx_completion_ppdu *ppdu_desc;
1988 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1989 
1990 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1991 
1992 	while ((user_index + 1) <= ppdu_info->last_user) {
1993 		ppdu_user_desc = &ppdu_desc->user[user_index];
1994 		if (ppdu_user_desc->peer_id != peer_id) {
1995 			user_index++;
1996 			continue;
1997 		} else {
1998 			/* Max users possible is 8 so user array index should
1999 			 * not exceed 7
2000 			 */
2001 			qdf_assert_always(user_index <= CDP_MU_MAX_USER_INDEX);
2002 			return user_index;
2003 		}
2004 	}
2005 
2006 	ppdu_info->last_user++;
2007 	/* Max users possible is 8 so last user should not exceed 8 */
2008 	qdf_assert_always(ppdu_info->last_user <= CDP_MU_MAX_USERS);
2009 	return ppdu_info->last_user - 1;
2010 }
2011 
2012 /*
2013  * dp_process_ppdu_stats_common_tlv: Process htt_ppdu_stats_common_tlv
2014  * pdev: DP pdev handle
2015  * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv
2016  * @ppdu_info: per ppdu tlv structure
2017  *
2018  * return:void
2019  */
2020 static void dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev,
2021 		uint32_t *tag_buf, struct ppdu_info *ppdu_info)
2022 {
2023 	uint16_t frame_type;
2024 	uint16_t frame_ctrl;
2025 	uint16_t freq;
2026 	struct dp_soc *soc = NULL;
2027 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
2028 
2029 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2030 
2031 	tag_buf += 2;
2032 	ppdu_info->sched_cmdid =
2033 		HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_GET(*tag_buf);
2034 	ppdu_desc->num_users =
2035 		HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf);
2036 	tag_buf++;
2037 	frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf);
2038 
2039 	frame_ctrl = ppdu_desc->frame_ctrl;
2040 
2041 	switch (frame_type) {
2042 	case HTT_STATS_FTYPE_TIDQ_DATA_SU:
2043 	case HTT_STATS_FTYPE_TIDQ_DATA_MU:
2044 		/*
2045 		 * for management packet, frame type come as DATA_SU
2046 		 * need to check frame_ctrl before setting frame_type
2047 		 */
2048 		if (HTT_GET_FRAME_CTRL_TYPE(frame_ctrl) <= FRAME_CTRL_TYPE_CTRL)
2049 			ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
2050 		else
2051 			ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA;
2052 	break;
2053 	case HTT_STATS_FTYPE_SGEN_MU_BAR:
2054 	case HTT_STATS_FTYPE_SGEN_BAR:
2055 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_BAR;
2056 	break;
2057 	default:
2058 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
2059 	break;
2060 	}
2061 
2062 	tag_buf += 2;
2063 	ppdu_desc->tx_duration = *tag_buf;
2064 	tag_buf += 3;
2065 	ppdu_desc->ppdu_start_timestamp = *tag_buf;
2066 
2067 	ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
2068 					ppdu_desc->tx_duration;
2069 	/* Ack time stamp is same as end time stamp*/
2070 	ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
2071 
2072 	tag_buf++;
2073 
2074 	freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf);
2075 	if (freq != ppdu_desc->channel) {
2076 		soc = pdev->soc;
2077 		ppdu_desc->channel = freq;
2078 		if (soc && soc->cdp_soc.ol_ops->freq_to_channel)
2079 			pdev->operating_channel =
2080 		soc->cdp_soc.ol_ops->freq_to_channel(pdev->ctrl_pdev, freq);
2081 	}
2082 
2083 	ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf);
2084 }
2085 
2086 /*
2087  * dp_process_ppdu_stats_user_common_tlv: Process ppdu_stats_user_common
2088  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv
2089  * @ppdu_info: per ppdu tlv structure
2090  *
2091  * return:void
2092  */
2093 static void dp_process_ppdu_stats_user_common_tlv(
2094 		struct dp_pdev *pdev, uint32_t *tag_buf,
2095 		struct ppdu_info *ppdu_info)
2096 {
2097 	uint16_t peer_id;
2098 	struct cdp_tx_completion_ppdu *ppdu_desc;
2099 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2100 	uint8_t curr_user_index = 0;
2101 
2102 	ppdu_desc =
2103 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2104 
2105 	tag_buf++;
2106 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
2107 
2108 	curr_user_index =
2109 		dp_get_ppdu_info_user_index(pdev,
2110 					    peer_id, ppdu_info);
2111 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2112 
2113 	if (peer_id == DP_SCAN_PEER_ID) {
2114 		ppdu_desc->vdev_id =
2115 			HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(*tag_buf);
2116 	} else {
2117 		if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2118 			return;
2119 	}
2120 
2121 	ppdu_user_desc->peer_id = peer_id;
2122 
2123 	tag_buf++;
2124 
2125 	if (HTT_PPDU_STATS_USER_COMMON_TLV_DELAYED_BA_GET(*tag_buf)) {
2126 		ppdu_user_desc->delayed_ba = 1;
2127 		ppdu_desc->delayed_ba = 1;
2128 	}
2129 
2130 	if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) {
2131 		ppdu_user_desc->is_mcast = true;
2132 		ppdu_user_desc->mpdu_tried_mcast =
2133 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
2134 		ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast;
2135 	} else {
2136 		ppdu_user_desc->mpdu_tried_ucast =
2137 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
2138 	}
2139 
2140 	tag_buf++;
2141 
2142 	ppdu_user_desc->qos_ctrl =
2143 		HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf);
2144 	ppdu_user_desc->frame_ctrl =
2145 		HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf);
2146 	ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl;
2147 
2148 	if (ppdu_user_desc->delayed_ba)
2149 		ppdu_user_desc->mpdu_success = 0;
2150 
2151 	tag_buf += 3;
2152 
2153 	if (HTT_PPDU_STATS_IS_OPAQUE_VALID_GET(*tag_buf)) {
2154 		ppdu_user_desc->ppdu_cookie =
2155 			HTT_PPDU_STATS_HOST_OPAQUE_COOKIE_GET(*tag_buf);
2156 		ppdu_user_desc->is_ppdu_cookie_valid = 1;
2157 	}
2158 }
2159 
2160 
2161 /**
2162  * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv
2163  * @pdev: DP pdev handle
2164  * @tag_buf: T2H message buffer carrying the user rate TLV
2165  * @ppdu_info: per ppdu tlv structure
2166  *
2167  * return:void
2168  */
2169 static void dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev,
2170 		uint32_t *tag_buf,
2171 		struct ppdu_info *ppdu_info)
2172 {
2173 	uint16_t peer_id;
2174 	struct dp_peer *peer;
2175 	struct cdp_tx_completion_ppdu *ppdu_desc;
2176 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2177 	uint8_t curr_user_index = 0;
2178 	struct dp_vdev *vdev;
2179 
2180 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2181 
2182 	tag_buf++;
2183 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
2184 
2185 	curr_user_index =
2186 		dp_get_ppdu_info_user_index(pdev,
2187 					    peer_id, ppdu_info);
2188 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2189 	if (peer_id == DP_SCAN_PEER_ID) {
2190 		vdev =
2191 		       dp_get_vdev_from_soc_vdev_id_wifi3(pdev->soc,
2192 							  ppdu_desc->vdev_id);
2193 		if (!vdev)
2194 			return;
2195 		qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw,
2196 			     QDF_MAC_ADDR_SIZE);
2197 	} else {
2198 		peer = dp_peer_find_by_id(pdev->soc, peer_id);
2199 		if (!peer)
2200 			return;
2201 		qdf_mem_copy(ppdu_user_desc->mac_addr,
2202 			     peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
2203 		dp_peer_unref_del_find_by_id(peer);
2204 	}
2205 
2206 	ppdu_user_desc->peer_id = peer_id;
2207 
2208 	ppdu_user_desc->tid =
2209 		HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf);
2210 
2211 	tag_buf += 1;
2212 
2213 	ppdu_user_desc->user_pos =
2214 		HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_GET(*tag_buf);
2215 	ppdu_user_desc->mu_group_id =
2216 		HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_GET(*tag_buf);
2217 
2218 	tag_buf += 1;
2219 
2220 	ppdu_user_desc->ru_start =
2221 		HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf);
2222 	ppdu_user_desc->ru_tones =
2223 		(HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) -
2224 		HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1;
2225 
2226 	tag_buf += 2;
2227 
2228 	ppdu_user_desc->ppdu_type =
2229 		HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf);
2230 
2231 	tag_buf++;
2232 	ppdu_user_desc->tx_rate = *tag_buf;
2233 
2234 	ppdu_user_desc->ltf_size =
2235 		HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf);
2236 	ppdu_user_desc->stbc =
2237 		HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf);
2238 	ppdu_user_desc->he_re =
2239 		HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf);
2240 	ppdu_user_desc->txbf =
2241 		HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf);
2242 	ppdu_user_desc->bw =
2243 		HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf) - 2;
2244 	ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf);
2245 	ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf);
2246 	ppdu_user_desc->preamble =
2247 		HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf);
2248 	ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf);
2249 	ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf);
2250 	ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf);
2251 }
2252 
2253 /*
2254  * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv: Process
2255  * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
2256  * pdev: DP PDEV handle
2257  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
2258  * @ppdu_info: per ppdu tlv structure
2259  *
2260  * return:void
2261  */
2262 static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
2263 		struct dp_pdev *pdev, uint32_t *tag_buf,
2264 		struct ppdu_info *ppdu_info)
2265 {
2266 	htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf =
2267 		(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf;
2268 
2269 	struct cdp_tx_completion_ppdu *ppdu_desc;
2270 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2271 	uint8_t curr_user_index = 0;
2272 	uint16_t peer_id;
2273 	uint32_t size = CDP_BA_64_BIT_MAP_SIZE_DWORDS;
2274 
2275 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2276 
2277 	tag_buf++;
2278 
2279 	peer_id =
2280 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2281 
2282 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2283 		return;
2284 
2285 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2286 
2287 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2288 	ppdu_user_desc->peer_id = peer_id;
2289 
2290 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
2291 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
2292 		     sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
2293 
2294 	dp_process_ppdu_stats_update_failed_bitmap(pdev,
2295 						   (void *)ppdu_user_desc,
2296 						   ppdu_info->ppdu_id,
2297 						   size);
2298 }
2299 
2300 /*
2301  * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv: Process
2302  * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
2303  * soc: DP SOC handle
2304  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
2305  * @ppdu_info: per ppdu tlv structure
2306  *
2307  * return:void
2308  */
2309 static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
2310 		struct dp_pdev *pdev, uint32_t *tag_buf,
2311 		struct ppdu_info *ppdu_info)
2312 {
2313 	htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf =
2314 		(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf;
2315 
2316 	struct cdp_tx_completion_ppdu *ppdu_desc;
2317 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2318 	uint8_t curr_user_index = 0;
2319 	uint16_t peer_id;
2320 	uint32_t size = CDP_BA_256_BIT_MAP_SIZE_DWORDS;
2321 
2322 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2323 
2324 	tag_buf++;
2325 
2326 	peer_id =
2327 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2328 
2329 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2330 		return;
2331 
2332 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2333 
2334 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2335 	ppdu_user_desc->peer_id = peer_id;
2336 
2337 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
2338 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
2339 		     sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2340 
2341 	dp_process_ppdu_stats_update_failed_bitmap(pdev,
2342 						   (void *)ppdu_user_desc,
2343 						   ppdu_info->ppdu_id,
2344 						   size);
2345 }
2346 
2347 /*
2348  * dp_process_ppdu_stats_user_cmpltn_common_tlv: Process
2349  * htt_ppdu_stats_user_cmpltn_common_tlv
2350  * soc: DP SOC handle
2351  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv
2352  * @ppdu_info: per ppdu tlv structure
2353  *
2354  * return:void
2355  */
2356 static void dp_process_ppdu_stats_user_cmpltn_common_tlv(
2357 		struct dp_pdev *pdev, uint32_t *tag_buf,
2358 		struct ppdu_info *ppdu_info)
2359 {
2360 	uint16_t peer_id;
2361 	struct cdp_tx_completion_ppdu *ppdu_desc;
2362 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2363 	uint8_t curr_user_index = 0;
2364 	uint8_t bw_iter;
2365 	htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf =
2366 		(htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf;
2367 
2368 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2369 
2370 	tag_buf++;
2371 	peer_id =
2372 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf);
2373 
2374 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2375 		return;
2376 
2377 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2378 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2379 	ppdu_user_desc->peer_id = peer_id;
2380 	ppdu_desc->last_usr_index = curr_user_index;
2381 
2382 	ppdu_user_desc->completion_status =
2383 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET(
2384 				*tag_buf);
2385 
2386 	ppdu_user_desc->tid =
2387 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf);
2388 
2389 
2390 	tag_buf++;
2391 	if (qdf_likely(ppdu_user_desc->completion_status ==
2392 			HTT_PPDU_STATS_USER_STATUS_OK)) {
2393 		ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi;
2394 		ppdu_user_desc->ack_rssi_valid = 1;
2395 	} else {
2396 		ppdu_user_desc->ack_rssi_valid = 0;
2397 	}
2398 
2399 	tag_buf++;
2400 
2401 	ppdu_user_desc->mpdu_success =
2402 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf);
2403 
2404 	ppdu_user_desc->mpdu_failed =
2405 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_GET(*tag_buf) -
2406 						ppdu_user_desc->mpdu_success;
2407 
2408 	tag_buf++;
2409 
2410 	ppdu_user_desc->long_retries =
2411 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf);
2412 
2413 	ppdu_user_desc->short_retries =
2414 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf);
2415 	ppdu_user_desc->retry_msdus =
2416 		ppdu_user_desc->long_retries + ppdu_user_desc->short_retries;
2417 
2418 	ppdu_user_desc->is_ampdu =
2419 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf);
2420 	ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu;
2421 
2422 	/*
2423 	 * increase successful mpdu counter from
2424 	 * htt_ppdu_stats_user_cmpltn_common_tlv
2425 	 */
2426 	ppdu_info->mpdu_compltn_common_tlv += ppdu_user_desc->mpdu_success;
2427 
2428 	/*
2429 	 * MU BAR may send request to n users but we may received ack only from
2430 	 * m users. To have count of number of users respond back, we have a
2431 	 * separate counter bar_num_users per PPDU that get increment for every
2432 	 * htt_ppdu_stats_user_cmpltn_common_tlv
2433 	 */
2434 	ppdu_desc->bar_num_users++;
2435 
2436 	tag_buf++;
2437 	for (bw_iter = 0; bw_iter < CDP_RSSI_CHAIN_LEN; bw_iter++) {
2438 		ppdu_user_desc->rssi_chain[bw_iter] =
2439 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CHAIN_RSSI_GET(*tag_buf);
2440 		tag_buf++;
2441 	}
2442 
2443 	ppdu_user_desc->sa_tx_antenna =
2444 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TX_ANTENNA_MASK_GET(*tag_buf);
2445 
2446 	tag_buf++;
2447 	ppdu_user_desc->sa_is_training =
2448 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_TRAINING_GET(*tag_buf);
2449 	if (ppdu_user_desc->sa_is_training) {
2450 		ppdu_user_desc->sa_goodput =
2451 			HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PENDING_TRAINING_PKTS_GET(*tag_buf);
2452 	}
2453 
2454 	tag_buf++;
2455 	for (bw_iter = 0; bw_iter < CDP_NUM_SA_BW; bw_iter++) {
2456 		ppdu_user_desc->sa_max_rates[bw_iter] =
2457 			HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MAX_RATES_GET(tag_buf[bw_iter]);
2458 	}
2459 
2460 	tag_buf += CDP_NUM_SA_BW;
2461 	ppdu_user_desc->current_rate_per =
2462 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CURRENT_RATE_PER_GET(*tag_buf);
2463 }
2464 
2465 /*
2466  * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv: Process
2467  * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2468  * pdev: DP PDEV handle
2469  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2470  * @ppdu_info: per ppdu tlv structure
2471  *
2472  * return:void
2473  */
2474 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
2475 		struct dp_pdev *pdev, uint32_t *tag_buf,
2476 		struct ppdu_info *ppdu_info)
2477 {
2478 	htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf =
2479 		(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf;
2480 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2481 	struct cdp_tx_completion_ppdu *ppdu_desc;
2482 	uint8_t curr_user_index = 0;
2483 	uint16_t peer_id;
2484 
2485 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2486 
2487 	tag_buf++;
2488 
2489 	peer_id =
2490 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2491 
2492 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2493 		return;
2494 
2495 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2496 
2497 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2498 	ppdu_user_desc->peer_id = peer_id;
2499 
2500 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2501 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2502 		     sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
2503 }
2504 
2505 /*
2506  * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv: Process
2507  * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2508  * pdev: DP PDEV handle
2509  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2510  * @ppdu_info: per ppdu tlv structure
2511  *
2512  * return:void
2513  */
2514 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
2515 		struct dp_pdev *pdev, uint32_t *tag_buf,
2516 		struct ppdu_info *ppdu_info)
2517 {
2518 	htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf =
2519 		(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf;
2520 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2521 	struct cdp_tx_completion_ppdu *ppdu_desc;
2522 	uint8_t curr_user_index = 0;
2523 	uint16_t peer_id;
2524 
2525 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2526 
2527 	tag_buf++;
2528 
2529 	peer_id =
2530 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2531 
2532 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2533 		return;
2534 
2535 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2536 
2537 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2538 	ppdu_user_desc->peer_id = peer_id;
2539 
2540 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2541 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2542 		     sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2543 }
2544 
2545 /*
2546  * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv: Process
2547  * htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2548  * pdev: DP PDE handle
2549  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2550  * @ppdu_info: per ppdu tlv structure
2551  *
2552  * return:void
2553  */
2554 static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
2555 		struct dp_pdev *pdev, uint32_t *tag_buf,
2556 		struct ppdu_info *ppdu_info)
2557 {
2558 	uint16_t peer_id;
2559 	struct cdp_tx_completion_ppdu *ppdu_desc;
2560 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2561 	uint8_t curr_user_index = 0;
2562 
2563 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2564 
2565 	tag_buf += 2;
2566 	peer_id =
2567 	HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf);
2568 
2569 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2570 		return;
2571 
2572 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2573 
2574 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2575 	ppdu_user_desc->peer_id = peer_id;
2576 
2577 	tag_buf++;
2578 	ppdu_user_desc->tid =
2579 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_TID_NUM_GET(*tag_buf);
2580 	ppdu_user_desc->num_mpdu =
2581 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf);
2582 
2583 	ppdu_user_desc->num_msdu =
2584 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf);
2585 
2586 	ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu;
2587 
2588 	tag_buf++;
2589 	ppdu_user_desc->start_seq =
2590 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_GET(
2591 			*tag_buf);
2592 
2593 	tag_buf++;
2594 	ppdu_user_desc->success_bytes = *tag_buf;
2595 
2596 	/* increase successful mpdu counter */
2597 	ppdu_info->mpdu_ack_ba_tlv += ppdu_user_desc->num_mpdu;
2598 }
2599 
2600 /*
2601  * dp_process_ppdu_stats_user_common_array_tlv: Process
2602  * htt_ppdu_stats_user_common_array_tlv
2603  * pdev: DP PDEV handle
2604  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2605  * @ppdu_info: per ppdu tlv structure
2606  *
2607  * return:void
2608  */
2609 static void dp_process_ppdu_stats_user_common_array_tlv(
2610 		struct dp_pdev *pdev, uint32_t *tag_buf,
2611 		struct ppdu_info *ppdu_info)
2612 {
2613 	uint32_t peer_id;
2614 	struct cdp_tx_completion_ppdu *ppdu_desc;
2615 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2616 	uint8_t curr_user_index = 0;
2617 	struct htt_tx_ppdu_stats_info *dp_stats_buf;
2618 
2619 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2620 
2621 	tag_buf++;
2622 	dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf;
2623 	tag_buf += 3;
2624 	peer_id =
2625 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf);
2626 
2627 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) {
2628 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2629 			"Invalid peer");
2630 		return;
2631 	}
2632 
2633 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2634 
2635 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2636 
2637 	ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes;
2638 	ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes;
2639 
2640 	tag_buf++;
2641 
2642 	ppdu_user_desc->success_msdus =
2643 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf);
2644 	ppdu_user_desc->retry_bytes =
2645 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf);
2646 	tag_buf++;
2647 	ppdu_user_desc->failed_msdus =
2648 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf);
2649 }
2650 
2651 /*
2652  * dp_process_ppdu_stats_flush_tlv: Process
2653  * htt_ppdu_stats_flush_tlv
2654  * @pdev: DP PDEV handle
2655  * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv
2656  *
2657  * return:void
2658  */
2659 static void dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev,
2660 						uint32_t *tag_buf)
2661 {
2662 	uint32_t peer_id;
2663 	uint32_t drop_reason;
2664 	uint8_t tid;
2665 	uint32_t num_msdu;
2666 	struct dp_peer *peer;
2667 
2668 	tag_buf++;
2669 	drop_reason = *tag_buf;
2670 
2671 	tag_buf++;
2672 	num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf);
2673 
2674 	tag_buf++;
2675 	peer_id =
2676 		HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf);
2677 
2678 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2679 	if (!peer)
2680 		return;
2681 
2682 	tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf);
2683 
2684 	if (drop_reason == HTT_FLUSH_EXCESS_RETRIES) {
2685 		DP_STATS_INC(peer, tx.excess_retries_per_ac[TID_TO_WME_AC(tid)],
2686 					num_msdu);
2687 	}
2688 
2689 	dp_peer_unref_del_find_by_id(peer);
2690 }
2691 
2692 #ifndef WLAN_TX_PKT_CAPTURE_ENH
2693 /*
2694  * dp_deliver_mgmt_frm: Process
2695  * @pdev: DP PDEV handle
2696  * @nbuf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2697  *
2698  * return: void
2699  */
2700 void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
2701 {
2702 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2703 		dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc,
2704 				     nbuf, HTT_INVALID_PEER,
2705 				     WDI_NO_VAL, pdev->pdev_id);
2706 	}
2707 }
2708 #endif
2709 
2710 /*
2711  * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv: Process
2712  * htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2713  * @pdev: DP PDEV handle
2714  * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2715  * @length: tlv_length
2716  *
2717  * return:QDF_STATUS_SUCCESS if nbuf as to be freed in caller
2718  */
2719 static QDF_STATUS
2720 dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev,
2721 					      qdf_nbuf_t tag_buf,
2722 					      uint32_t ppdu_id)
2723 {
2724 	uint32_t *nbuf_ptr;
2725 	uint8_t trim_size;
2726 
2727 	if ((!pdev->tx_sniffer_enable) && (!pdev->mcopy_mode) &&
2728 	    (!pdev->bpr_enable) && (!pdev->tx_capture_enabled))
2729 		return QDF_STATUS_SUCCESS;
2730 
2731 	trim_size = ((pdev->mgmtctrl_frm_info.mgmt_buf +
2732 		      HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) -
2733 		      qdf_nbuf_data(tag_buf));
2734 
2735 	if (!qdf_nbuf_pull_head(tag_buf, trim_size))
2736 		return QDF_STATUS_SUCCESS;
2737 
2738 	qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) -
2739 			    pdev->mgmtctrl_frm_info.mgmt_buf_len);
2740 
2741 	nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(
2742 				tag_buf, sizeof(ppdu_id));
2743 	*nbuf_ptr = ppdu_id;
2744 
2745 	if (pdev->bpr_enable) {
2746 		dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc,
2747 				     tag_buf, HTT_INVALID_PEER,
2748 				     WDI_NO_VAL, pdev->pdev_id);
2749 	}
2750 
2751 	dp_deliver_mgmt_frm(pdev, tag_buf);
2752 
2753 	return QDF_STATUS_E_ALREADY;
2754 }
2755 
2756 /**
2757  * dp_validate_fix_ppdu_tlv(): Function to validate the length of PPDU
2758  *
2759  * If the TLV length sent as part of PPDU TLV is less that expected size i.e
2760  * size of corresponding data structure, pad the remaining bytes with zeros
2761  * and continue processing the TLVs
2762  *
2763  * @pdev: DP pdev handle
2764  * @tag_buf: TLV buffer
2765  * @tlv_expected_size: Expected size of Tag
2766  * @tlv_len: TLV length received from FW
2767  *
2768  * Return: Pointer to updated TLV
2769  */
2770 static inline uint32_t *dp_validate_fix_ppdu_tlv(struct dp_pdev *pdev,
2771 						 uint32_t *tag_buf,
2772 						 uint16_t tlv_expected_size,
2773 						 uint16_t tlv_len)
2774 {
2775 	uint32_t *tlv_desc = tag_buf;
2776 
2777 	qdf_assert_always(tlv_len != 0);
2778 
2779 	if (tlv_len < tlv_expected_size) {
2780 		qdf_mem_zero(pdev->ppdu_tlv_buf, tlv_expected_size);
2781 		qdf_mem_copy(pdev->ppdu_tlv_buf, tag_buf, tlv_len);
2782 		tlv_desc = pdev->ppdu_tlv_buf;
2783 	}
2784 
2785 	return tlv_desc;
2786 }
2787 
2788 /**
2789  * dp_process_ppdu_tag(): Function to process the PPDU TLVs
2790  * @pdev: DP pdev handle
2791  * @tag_buf: TLV buffer
2792  * @tlv_len: length of tlv
2793  * @ppdu_info: per ppdu tlv structure
2794  *
2795  * return: void
2796  */
2797 static void dp_process_ppdu_tag(struct dp_pdev *pdev, uint32_t *tag_buf,
2798 		uint32_t tlv_len, struct ppdu_info *ppdu_info)
2799 {
2800 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2801 	uint16_t tlv_expected_size;
2802 	uint32_t *tlv_desc;
2803 
2804 	switch (tlv_type) {
2805 	case HTT_PPDU_STATS_COMMON_TLV:
2806 		tlv_expected_size = sizeof(htt_ppdu_stats_common_tlv);
2807 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2808 						    tlv_expected_size, tlv_len);
2809 		dp_process_ppdu_stats_common_tlv(pdev, tlv_desc, ppdu_info);
2810 		break;
2811 	case HTT_PPDU_STATS_USR_COMMON_TLV:
2812 		tlv_expected_size = sizeof(htt_ppdu_stats_user_common_tlv);
2813 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2814 						    tlv_expected_size, tlv_len);
2815 		dp_process_ppdu_stats_user_common_tlv(pdev, tlv_desc,
2816 						      ppdu_info);
2817 		break;
2818 	case HTT_PPDU_STATS_USR_RATE_TLV:
2819 		tlv_expected_size = sizeof(htt_ppdu_stats_user_rate_tlv);
2820 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2821 						    tlv_expected_size, tlv_len);
2822 		dp_process_ppdu_stats_user_rate_tlv(pdev, tlv_desc,
2823 						    ppdu_info);
2824 		break;
2825 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV:
2826 		tlv_expected_size =
2827 			sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv);
2828 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2829 						    tlv_expected_size, tlv_len);
2830 		dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
2831 				pdev, tlv_desc, ppdu_info);
2832 		break;
2833 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV:
2834 		tlv_expected_size =
2835 			sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv);
2836 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2837 						    tlv_expected_size, tlv_len);
2838 		dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
2839 				pdev, tlv_desc, ppdu_info);
2840 		break;
2841 	case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV:
2842 		tlv_expected_size =
2843 			sizeof(htt_ppdu_stats_user_cmpltn_common_tlv);
2844 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2845 						    tlv_expected_size, tlv_len);
2846 		dp_process_ppdu_stats_user_cmpltn_common_tlv(
2847 				pdev, tlv_desc, ppdu_info);
2848 		break;
2849 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV:
2850 		tlv_expected_size =
2851 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv);
2852 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2853 						    tlv_expected_size, tlv_len);
2854 		dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
2855 				pdev, tlv_desc, ppdu_info);
2856 		break;
2857 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV:
2858 		tlv_expected_size =
2859 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv);
2860 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2861 						    tlv_expected_size, tlv_len);
2862 		dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
2863 				pdev, tlv_desc, ppdu_info);
2864 		break;
2865 	case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV:
2866 		tlv_expected_size =
2867 			sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv);
2868 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2869 						    tlv_expected_size, tlv_len);
2870 		dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
2871 				pdev, tlv_desc, ppdu_info);
2872 		break;
2873 	case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV:
2874 		tlv_expected_size =
2875 			sizeof(htt_ppdu_stats_usr_common_array_tlv_v);
2876 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2877 						    tlv_expected_size, tlv_len);
2878 		dp_process_ppdu_stats_user_common_array_tlv(
2879 				pdev, tlv_desc, ppdu_info);
2880 		break;
2881 	case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV:
2882 		tlv_expected_size = sizeof(htt_ppdu_stats_flush_tlv);
2883 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2884 						    tlv_expected_size, tlv_len);
2885 		dp_process_ppdu_stats_user_compltn_flush_tlv(
2886 				pdev, tlv_desc);
2887 		break;
2888 	default:
2889 		break;
2890 	}
2891 }
2892 
2893 /**
2894  * dp_ppdu_desc_user_stats_update(): Function to update TX user stats
2895  * @pdev: DP pdev handle
2896  * @ppdu_info: per PPDU TLV descriptor
2897  *
2898  * return: void
2899  */
2900 void
2901 dp_ppdu_desc_user_stats_update(struct dp_pdev *pdev,
2902 			       struct ppdu_info *ppdu_info)
2903 {
2904 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
2905 	struct dp_peer *peer = NULL;
2906 	uint32_t tlv_bitmap_expected;
2907 	uint32_t tlv_bitmap_default;
2908 	uint16_t i;
2909 	uint32_t num_users;
2910 
2911 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
2912 		qdf_nbuf_data(ppdu_info->nbuf);
2913 
2914 	ppdu_desc->num_users = ppdu_info->last_user;
2915 	ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
2916 
2917 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
2918 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2919 		if (ppdu_info->is_ampdu)
2920 			tlv_bitmap_expected =
2921 				dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
2922 					ppdu_info->tlv_bitmap);
2923 	}
2924 
2925 	tlv_bitmap_default = tlv_bitmap_expected;
2926 
2927 	if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
2928 		num_users = ppdu_desc->bar_num_users;
2929 		ppdu_desc->num_users = ppdu_desc->bar_num_users;
2930 	} else {
2931 		num_users = ppdu_desc->num_users;
2932 	}
2933 
2934 	for (i = 0; i < num_users; i++) {
2935 		ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu;
2936 		ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu;
2937 
2938 		peer = dp_peer_find_by_id(pdev->soc,
2939 					  ppdu_desc->user[i].peer_id);
2940 		/**
2941 		 * This check is to make sure peer is not deleted
2942 		 * after processing the TLVs.
2943 		 */
2944 		if (!peer)
2945 			continue;
2946 
2947 		ppdu_desc->user[i].cookie = (void *)peer->wlanstats_ctx;
2948 		if (ppdu_desc->user[i].completion_status !=
2949 		    HTT_PPDU_STATS_USER_STATUS_OK) {
2950 			tlv_bitmap_expected = tlv_bitmap_expected & 0xFF;
2951 			if ((ppdu_desc->user[i].tid < CDP_DATA_TID_MAX ||
2952 			     (ppdu_desc->user[i].tid == CDP_DATA_NON_QOS_TID)) &&
2953 			      (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA)) {
2954 				DP_STATS_INC(peer, tx.retries,
2955 					     (ppdu_desc->user[i].long_retries +
2956 					      ppdu_desc->user[i].short_retries));
2957 				DP_STATS_INC(peer, tx.tx_failed,
2958 					     ppdu_desc->user[i].failed_msdus);
2959 			}
2960 		}
2961 
2962 		/*
2963 		 * different frame like DATA, BAR or CTRL has different
2964 		 * tlv bitmap expected. Apart from ACK_BA_STATUS TLV, we
2965 		 * receive other tlv in-order/sequential from fw.
2966 		 * Since ACK_BA_STATUS TLV come from Hardware it is
2967 		 * asynchronous So we need to depend on some tlv to confirm
2968 		 * all tlv is received for a ppdu.
2969 		 * So we depend on both HTT_PPDU_STATS_COMMON_TLV and
2970 		 * ACK_BA_STATUS_TLV.
2971 		 */
2972 		if (!(ppdu_info->tlv_bitmap &
2973 		      (1 << HTT_PPDU_STATS_COMMON_TLV)) ||
2974 		    !(ppdu_info->tlv_bitmap &
2975 		      (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV))) {
2976 			dp_peer_unref_del_find_by_id(peer);
2977 			continue;
2978 		}
2979 
2980 		/**
2981 		 * Update tx stats for data frames having Qos as well as
2982 		 * non-Qos data tid
2983 		 */
2984 
2985 		if ((ppdu_desc->user[i].tid < CDP_DATA_TID_MAX ||
2986 		     (ppdu_desc->user[i].tid == CDP_DATA_NON_QOS_TID)) &&
2987 		      (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL)) {
2988 
2989 			dp_tx_stats_update(pdev, peer,
2990 					   &ppdu_desc->user[i],
2991 					   ppdu_desc->ack_rssi);
2992 			dp_tx_rate_stats_update(peer, &ppdu_desc->user[i]);
2993 		}
2994 
2995 		dp_peer_unref_del_find_by_id(peer);
2996 		tlv_bitmap_expected = tlv_bitmap_default;
2997 	}
2998 }
2999 
3000 #ifndef WLAN_TX_PKT_CAPTURE_ENH
3001 
3002 /**
3003  * dp_ppdu_desc_deliver(): Function to deliver Tx PPDU status descriptor
3004  * to upper layer
3005  * @pdev: DP pdev handle
3006  * @ppdu_info: per PPDU TLV descriptor
3007  *
3008  * return: void
3009  */
3010 static
3011 void dp_ppdu_desc_deliver(struct dp_pdev *pdev,
3012 			  struct ppdu_info *ppdu_info)
3013 {
3014 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3015 	qdf_nbuf_t nbuf;
3016 
3017 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
3018 		qdf_nbuf_data(ppdu_info->nbuf);
3019 
3020 	dp_ppdu_desc_user_stats_update(pdev, ppdu_info);
3021 
3022 	/*
3023 	 * Remove from the list
3024 	 */
3025 	TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
3026 	nbuf = ppdu_info->nbuf;
3027 	pdev->list_depth--;
3028 	qdf_mem_free(ppdu_info);
3029 
3030 	qdf_assert_always(nbuf);
3031 
3032 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
3033 		qdf_nbuf_data(nbuf);
3034 
3035 	/**
3036 	 * Deliver PPDU stats only for valid (acked) data frames if
3037 	 * sniffer mode is not enabled.
3038 	 * If sniffer mode is enabled, PPDU stats for all frames
3039 	 * including mgmt/control frames should be delivered to upper layer
3040 	 */
3041 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
3042 		dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC, pdev->soc,
3043 				nbuf, HTT_INVALID_PEER,
3044 				WDI_NO_VAL, pdev->pdev_id);
3045 	} else {
3046 		if (ppdu_desc->num_mpdu != 0 && ppdu_desc->num_users != 0 &&
3047 				ppdu_desc->frame_ctrl & HTT_FRAMECTRL_DATATYPE) {
3048 
3049 			dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
3050 					pdev->soc, nbuf, HTT_INVALID_PEER,
3051 					WDI_NO_VAL, pdev->pdev_id);
3052 		} else
3053 			qdf_nbuf_free(nbuf);
3054 	}
3055 	return;
3056 }
3057 
3058 #endif
3059 
3060 /**
3061  * dp_get_ppdu_desc(): Function to allocate new PPDU status
3062  * desc for new ppdu id
3063  * @pdev: DP pdev handle
3064  * @ppdu_id: PPDU unique identifier
3065  * @tlv_type: TLV type received
3066  *
3067  * return: ppdu_info per ppdu tlv structure
3068  */
3069 static
3070 struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id,
3071 			uint8_t tlv_type)
3072 {
3073 	struct ppdu_info *ppdu_info = NULL;
3074 
3075 	/*
3076 	 * Find ppdu_id node exists or not
3077 	 */
3078 	TAILQ_FOREACH(ppdu_info, &pdev->ppdu_info_list, ppdu_info_list_elem) {
3079 
3080 		if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) {
3081 			break;
3082 		}
3083 	}
3084 
3085 	if (ppdu_info) {
3086 		if (ppdu_info->tlv_bitmap & (1 << tlv_type)) {
3087 			/**
3088 			 * if we get tlv_type that is already been processed
3089 			 * for ppdu, that means we got a new ppdu with same
3090 			 * ppdu id. Hence Flush the older ppdu
3091 			 * for MUMIMO and OFDMA, In a PPDU we have
3092 			 * multiple user with same tlv types. tlv bitmap is
3093 			 * used to check whether SU or MU_MIMO/OFDMA
3094 			 */
3095 			if (!(ppdu_info->tlv_bitmap &
3096 			    (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)))
3097 				return ppdu_info;
3098 
3099 			/**
3100 			 * apart from ACK BA STATUS TLV rest all comes in order
3101 			 * so if tlv type not ACK BA STATUS TLV we can deliver
3102 			 * ppdu_info
3103 			 */
3104 			if (tlv_type ==
3105 			    HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)
3106 				return ppdu_info;
3107 
3108 			dp_ppdu_desc_deliver(pdev, ppdu_info);
3109 		} else {
3110 			return ppdu_info;
3111 		}
3112 	}
3113 
3114 	/**
3115 	 * Flush the head ppdu descriptor if ppdu desc list reaches max
3116 	 * threshold
3117 	 */
3118 	if (pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
3119 		ppdu_info = TAILQ_FIRST(&pdev->ppdu_info_list);
3120 		dp_ppdu_desc_deliver(pdev, ppdu_info);
3121 	}
3122 
3123 	/*
3124 	 * Allocate new ppdu_info node
3125 	 */
3126 	ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info));
3127 	if (!ppdu_info)
3128 		return NULL;
3129 
3130 	ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
3131 			sizeof(struct cdp_tx_completion_ppdu), 0, 4,
3132 			TRUE);
3133 	if (!ppdu_info->nbuf) {
3134 		qdf_mem_free(ppdu_info);
3135 		return NULL;
3136 	}
3137 
3138 	ppdu_info->ppdu_desc =
3139 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
3140 	qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf),
3141 			sizeof(struct cdp_tx_completion_ppdu));
3142 
3143 	if (qdf_nbuf_put_tail(ppdu_info->nbuf,
3144 			sizeof(struct cdp_tx_completion_ppdu)) == NULL) {
3145 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3146 				"No tailroom for HTT PPDU");
3147 		qdf_nbuf_free(ppdu_info->nbuf);
3148 		ppdu_info->nbuf = NULL;
3149 		ppdu_info->last_user = 0;
3150 		qdf_mem_free(ppdu_info);
3151 		return NULL;
3152 	}
3153 
3154 	/**
3155 	 * No lock is needed because all PPDU TLVs are processed in
3156 	 * same context and this list is updated in same context
3157 	 */
3158 	TAILQ_INSERT_TAIL(&pdev->ppdu_info_list, ppdu_info,
3159 			ppdu_info_list_elem);
3160 	pdev->list_depth++;
3161 	return ppdu_info;
3162 }
3163 
3164 /**
3165  * dp_htt_process_tlv(): Function to process each PPDU TLVs
3166  * @pdev: DP pdev handle
3167  * @htt_t2h_msg: HTT target to host message
3168  *
3169  * return: ppdu_info per ppdu tlv structure
3170  */
3171 
3172 static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev,
3173 		qdf_nbuf_t htt_t2h_msg)
3174 {
3175 	uint32_t length;
3176 	uint32_t ppdu_id;
3177 	uint8_t tlv_type;
3178 	uint32_t tlv_length, tlv_bitmap_expected;
3179 	uint8_t *tlv_buf;
3180 	struct ppdu_info *ppdu_info = NULL;
3181 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3182 	struct dp_peer *peer;
3183 	uint32_t i = 0;
3184 
3185 	uint32_t *msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
3186 
3187 	length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word);
3188 
3189 	msg_word = msg_word + 1;
3190 	ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word);
3191 
3192 
3193 	msg_word = msg_word + 3;
3194 	while (length > 0) {
3195 		tlv_buf = (uint8_t *)msg_word;
3196 		tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
3197 		tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
3198 		if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG))
3199 			pdev->stats.ppdu_stats_counter[tlv_type]++;
3200 
3201 		if (tlv_length == 0)
3202 			break;
3203 
3204 		tlv_length += HTT_TLV_HDR_LEN;
3205 
3206 		/**
3207 		 * Not allocating separate ppdu descriptor for MGMT Payload
3208 		 * TLV as this is sent as separate WDI indication and it
3209 		 * doesn't contain any ppdu information
3210 		 */
3211 		if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) {
3212 			pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf;
3213 			pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id;
3214 			pdev->mgmtctrl_frm_info.mgmt_buf_len =
3215 				HTT_PPDU_STATS_TX_MGMTCTRL_TLV_FRAME_LENGTH_GET
3216 						(*(msg_word + 1));
3217 			msg_word =
3218 				(uint32_t *)((uint8_t *)tlv_buf + tlv_length);
3219 			length -= (tlv_length);
3220 			continue;
3221 		}
3222 
3223 		ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type);
3224 		if (!ppdu_info)
3225 			return NULL;
3226 		ppdu_info->ppdu_id = ppdu_id;
3227 		ppdu_info->tlv_bitmap |= (1 << tlv_type);
3228 
3229 		dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info);
3230 
3231 		/**
3232 		 * Increment pdev level tlv count to monitor
3233 		 * missing TLVs
3234 		 */
3235 		pdev->tlv_count++;
3236 		ppdu_info->last_tlv_cnt = pdev->tlv_count;
3237 		msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
3238 		length -= (tlv_length);
3239 	}
3240 
3241 	if (!ppdu_info)
3242 		return NULL;
3243 
3244 	pdev->last_ppdu_id = ppdu_id;
3245 
3246 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
3247 
3248 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
3249 		if (ppdu_info->is_ampdu)
3250 			tlv_bitmap_expected =
3251 				dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
3252 					ppdu_info->tlv_bitmap);
3253 	}
3254 
3255 	ppdu_desc = ppdu_info->ppdu_desc;
3256 
3257 	if (!ppdu_desc)
3258 		return NULL;
3259 
3260 	if (ppdu_desc->user[ppdu_desc->last_usr_index].completion_status !=
3261 	    HTT_PPDU_STATS_USER_STATUS_OK) {
3262 		tlv_bitmap_expected = tlv_bitmap_expected & 0xFF;
3263 	}
3264 
3265 	if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA &&
3266 	    (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_COMMON_TLV)) &&
3267 	    ppdu_desc->delayed_ba) {
3268 		for (i = 0; i < ppdu_desc->num_users; i++) {
3269 			uint32_t ppdu_id;
3270 
3271 			ppdu_id = ppdu_desc->ppdu_id;
3272 			peer = dp_peer_find_by_id(pdev->soc,
3273 						  ppdu_desc->user[i].peer_id);
3274 			/**
3275 			 * This check is to make sure peer is not deleted
3276 			 * after processing the TLVs.
3277 			 */
3278 			if (!peer)
3279 				continue;
3280 
3281 			/**
3282 			 * save delayed ba user info
3283 			 */
3284 			if (ppdu_desc->user[i].delayed_ba) {
3285 				dp_peer_copy_delay_stats(peer,
3286 							 &ppdu_desc->user[i]);
3287 				peer->last_delayed_ba_ppduid = ppdu_id;
3288 			}
3289 			dp_peer_unref_del_find_by_id(peer);
3290 		}
3291 	}
3292 
3293 	/*
3294 	 * when frame type is BAR and STATS_COMMON_TLV is set
3295 	 * copy the store peer delayed info to BAR status
3296 	 */
3297 	if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR &&
3298 	    (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_COMMON_TLV))) {
3299 		for (i = 0; i < ppdu_desc->bar_num_users; i++) {
3300 			peer = dp_peer_find_by_id(pdev->soc,
3301 						  ppdu_desc->user[i].peer_id);
3302 			/**
3303 			 * This check is to make sure peer is not deleted
3304 			 * after processing the TLVs.
3305 			 */
3306 			if (!peer)
3307 				continue;
3308 
3309 			if (peer->last_delayed_ba) {
3310 				dp_peer_copy_stats_to_bar(peer,
3311 							  &ppdu_desc->user[i]);
3312 			}
3313 			dp_peer_unref_del_find_by_id(peer);
3314 		}
3315 	}
3316 
3317 	/*
3318 	 * for frame type DATA and BAR, we update stats based on MSDU,
3319 	 * successful msdu and mpdu are populate from ACK BA STATUS TLV
3320 	 * which comes out of order. successful mpdu also populated from
3321 	 * COMPLTN COMMON TLV which comes in order. for every ppdu_info
3322 	 * we store successful mpdu from both tlv and compare before delivering
3323 	 * to make sure we received ACK BA STATUS TLV.
3324 	 */
3325 	if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL) {
3326 		/*
3327 		 * successful mpdu count should match with both tlv
3328 		 */
3329 		if (ppdu_info->mpdu_compltn_common_tlv !=
3330 		    ppdu_info->mpdu_ack_ba_tlv)
3331 			return NULL;
3332 	}
3333 
3334 	/**
3335 	 * Once all the TLVs for a given PPDU has been processed,
3336 	 * return PPDU status to be delivered to higher layer
3337 	 */
3338 	if (ppdu_info->tlv_bitmap != 0 &&
3339 	    ppdu_info->tlv_bitmap == tlv_bitmap_expected)
3340 		return ppdu_info;
3341 
3342 	return NULL;
3343 }
3344 #endif /* FEATURE_PERPKT_INFO */
3345 
3346 /**
3347  * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW
3348  * @soc: DP SOC handle
3349  * @pdev_id: pdev id
3350  * @htt_t2h_msg: HTT message nbuf
3351  *
3352  * return:void
3353  */
3354 #if defined(WDI_EVENT_ENABLE)
3355 #ifdef FEATURE_PERPKT_INFO
3356 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
3357 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
3358 {
3359 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
3360 	struct ppdu_info *ppdu_info = NULL;
3361 	bool free_buf = true;
3362 
3363 	if (!pdev)
3364 		return true;
3365 
3366 	if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
3367 	    !pdev->mcopy_mode && !pdev->bpr_enable)
3368 		return free_buf;
3369 
3370 	ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg);
3371 
3372 	if (pdev->mgmtctrl_frm_info.mgmt_buf) {
3373 		if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv
3374 		    (pdev, htt_t2h_msg, pdev->mgmtctrl_frm_info.ppdu_id) !=
3375 		    QDF_STATUS_SUCCESS)
3376 			free_buf = false;
3377 	}
3378 
3379 	if (ppdu_info)
3380 		dp_ppdu_desc_deliver(pdev, ppdu_info);
3381 
3382 	pdev->mgmtctrl_frm_info.mgmt_buf = NULL;
3383 	pdev->mgmtctrl_frm_info.mgmt_buf_len = 0;
3384 	pdev->mgmtctrl_frm_info.ppdu_id = 0;
3385 
3386 	return free_buf;
3387 }
3388 #else
3389 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
3390 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
3391 {
3392 	return true;
3393 }
3394 #endif
3395 #endif
3396 
3397 /**
3398  * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats
3399  * @soc: DP SOC handle
3400  * @htt_t2h_msg: HTT message nbuf
3401  *
3402  * return:void
3403  */
3404 static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc,
3405 		qdf_nbuf_t htt_t2h_msg)
3406 {
3407 	uint8_t done;
3408 	qdf_nbuf_t msg_copy;
3409 	uint32_t *msg_word;
3410 
3411 	msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
3412 	msg_word = msg_word + 3;
3413 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
3414 
3415 	/*
3416 	 * HTT EXT stats response comes as stream of TLVs which span over
3417 	 * multiple T2H messages.
3418 	 * The first message will carry length of the response.
3419 	 * For rest of the messages length will be zero.
3420 	 *
3421 	 * Clone the T2H message buffer and store it in a list to process
3422 	 * it later.
3423 	 *
3424 	 * The original T2H message buffers gets freed in the T2H HTT event
3425 	 * handler
3426 	 */
3427 	msg_copy = qdf_nbuf_clone(htt_t2h_msg);
3428 
3429 	if (!msg_copy) {
3430 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
3431 				"T2H messge clone failed for HTT EXT STATS");
3432 		goto error;
3433 	}
3434 
3435 	qdf_spin_lock_bh(&soc->htt_stats.lock);
3436 	qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy);
3437 	/*
3438 	 * Done bit signifies that this is the last T2H buffer in the stream of
3439 	 * HTT EXT STATS message
3440 	 */
3441 	if (done) {
3442 		soc->htt_stats.num_stats++;
3443 		qdf_sched_work(0, &soc->htt_stats.work);
3444 	}
3445 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
3446 
3447 	return;
3448 
3449 error:
3450 	qdf_spin_lock_bh(&soc->htt_stats.lock);
3451 	while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg))
3452 			!= NULL) {
3453 		qdf_nbuf_free(msg_copy);
3454 	}
3455 	soc->htt_stats.num_stats = 0;
3456 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
3457 	return;
3458 
3459 }
3460 
3461 /*
3462  * htt_soc_attach_target() - SOC level HTT setup
3463  * @htt_soc:	HTT SOC handle
3464  *
3465  * Return: 0 on success; error code on failure
3466  */
3467 int htt_soc_attach_target(struct htt_soc *htt_soc)
3468 {
3469 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
3470 
3471 	return htt_h2t_ver_req_msg(soc);
3472 }
3473 
3474 void htt_set_htc_handle(struct htt_soc *htt_soc, HTC_HANDLE htc_soc)
3475 {
3476 	htt_soc->htc_soc = htc_soc;
3477 }
3478 
3479 HTC_HANDLE htt_get_htc_handle(struct htt_soc *htt_soc)
3480 {
3481 	return htt_soc->htc_soc;
3482 }
3483 
3484 struct htt_soc *htt_soc_attach(struct dp_soc *soc, HTC_HANDLE htc_handle)
3485 {
3486 	int i;
3487 	int j;
3488 	int alloc_size = HTT_SW_UMAC_RING_IDX_MAX * sizeof(unsigned long);
3489 	struct htt_soc *htt_soc = NULL;
3490 
3491 	htt_soc = qdf_mem_malloc(sizeof(*htt_soc));
3492 	if (!htt_soc) {
3493 		dp_err("HTT attach failed");
3494 		return NULL;
3495 	}
3496 
3497 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3498 		htt_soc->pdevid_tt[i].umac_ttt = qdf_mem_malloc(alloc_size);
3499 		if (!htt_soc->pdevid_tt[i].umac_ttt)
3500 			break;
3501 		qdf_mem_set(htt_soc->pdevid_tt[i].umac_ttt, alloc_size, -1);
3502 		htt_soc->pdevid_tt[i].lmac_ttt = qdf_mem_malloc(alloc_size);
3503 		if (!htt_soc->pdevid_tt[i].lmac_ttt) {
3504 			qdf_mem_free(htt_soc->pdevid_tt[i].umac_ttt);
3505 			break;
3506 		}
3507 		qdf_mem_set(htt_soc->pdevid_tt[i].lmac_ttt, alloc_size, -1);
3508 	}
3509 	if (i != MAX_PDEV_CNT) {
3510 		for (j = 0; j < i; j++) {
3511 			qdf_mem_free(htt_soc->pdevid_tt[i].umac_ttt);
3512 			qdf_mem_free(htt_soc->pdevid_tt[i].lmac_ttt);
3513 		}
3514 		return NULL;
3515 	}
3516 
3517 	htt_soc->dp_soc = soc;
3518 	htt_soc->htc_soc = htc_handle;
3519 	HTT_TX_MUTEX_INIT(&htt_soc->htt_tx_mutex);
3520 
3521 	return htt_soc;
3522 }
3523 
3524 #if defined(WDI_EVENT_ENABLE) && !defined(REMOVE_PKT_LOG)
3525 /*
3526  * dp_ppdu_stats_ind_handler() - PPDU stats msg handler
3527  * @htt_soc:	 HTT SOC handle
3528  * @msg_word:    Pointer to payload
3529  * @htt_t2h_msg: HTT msg nbuf
3530  *
3531  * Return: True if buffer should be freed by caller.
3532  */
3533 static bool
3534 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
3535 				uint32_t *msg_word,
3536 				qdf_nbuf_t htt_t2h_msg)
3537 {
3538 	u_int8_t pdev_id;
3539 	bool free_buf;
3540 	qdf_nbuf_set_pktlen(htt_t2h_msg, HTT_T2H_MAX_MSG_SIZE);
3541 	pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
3542 	pdev_id = DP_HW2SW_MACID(pdev_id);
3543 	free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id,
3544 					      htt_t2h_msg);
3545 	dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc,
3546 		htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL,
3547 		pdev_id);
3548 	return free_buf;
3549 }
3550 #else
3551 static bool
3552 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
3553 				uint32_t *msg_word,
3554 				qdf_nbuf_t htt_t2h_msg)
3555 {
3556 	return true;
3557 }
3558 #endif
3559 
3560 #if defined(WDI_EVENT_ENABLE) && \
3561 	!defined(REMOVE_PKT_LOG)
3562 /*
3563  * dp_pktlog_msg_handler() - Pktlog msg handler
3564  * @htt_soc:	 HTT SOC handle
3565  * @msg_word:    Pointer to payload
3566  *
3567  * Return: None
3568  */
3569 static void
3570 dp_pktlog_msg_handler(struct htt_soc *soc,
3571 		      uint32_t *msg_word)
3572 {
3573 	uint8_t pdev_id;
3574 	uint32_t *pl_hdr;
3575 
3576 	pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word);
3577 	pdev_id = DP_HW2SW_MACID(pdev_id);
3578 	pl_hdr = (msg_word + 1);
3579 	dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc,
3580 		pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL,
3581 		pdev_id);
3582 }
3583 #else
3584 static void
3585 dp_pktlog_msg_handler(struct htt_soc *soc,
3586 		      uint32_t *msg_word)
3587 {
3588 }
3589 #endif
3590 
3591 /*
3592  * time_allow_print() - time allow print
3593  * @htt_ring_tt:	ringi_id array of timestamps
3594  * @ring_id:		ring_id (index)
3595  *
3596  * Return: 1 for successfully saving timestamp in array
3597  *	and 0 for timestamp falling within 2 seconds after last one
3598  */
3599 static bool time_allow_print(unsigned long *htt_ring_tt, u_int8_t ring_id)
3600 {
3601 	unsigned long tstamp;
3602 	unsigned long delta;
3603 
3604 	tstamp = qdf_get_system_timestamp();
3605 
3606 	if (!htt_ring_tt)
3607 		return 0; //unable to print backpressure messages
3608 
3609 	if (htt_ring_tt[ring_id] == -1) {
3610 		htt_ring_tt[ring_id] = tstamp;
3611 		return 1;
3612 	}
3613 	delta = tstamp - htt_ring_tt[ring_id];
3614 	if (delta >= 2000) {
3615 		htt_ring_tt[ring_id] = tstamp;
3616 		return 1;
3617 	}
3618 
3619 	return 0;
3620 }
3621 
3622 static void dp_htt_alert_print(enum htt_t2h_msg_type msg_type,
3623 			       u_int8_t pdev_id, u_int8_t ring_id,
3624 			       u_int16_t hp_idx, u_int16_t tp_idx,
3625 			       u_int32_t bkp_time, char *ring_stype)
3626 {
3627 	dp_alert("msg_type: %d pdev_id: %d ring_type: %s ",
3628 		 msg_type, pdev_id, ring_stype);
3629 	dp_alert("ring_id: %d hp_idx: %d tp_idx: %d bkpressure_time_ms: %d ",
3630 		 ring_id, hp_idx, tp_idx, bkp_time);
3631 }
3632 
3633 /*
3634  * dp_htt_bkp_event_alert() - htt backpressure event alert
3635  * @msg_word:	htt packet context
3636  * @htt_soc:	HTT SOC handle
3637  *
3638  * Return: after attempting to print stats
3639  */
3640 static void dp_htt_bkp_event_alert(u_int32_t *msg_word, struct htt_soc *soc)
3641 {
3642 	u_int8_t ring_type;
3643 	u_int8_t pdev_id;
3644 	u_int8_t ring_id;
3645 	u_int16_t hp_idx;
3646 	u_int16_t tp_idx;
3647 	u_int32_t bkp_time;
3648 	enum htt_t2h_msg_type msg_type;
3649 	struct dp_soc *dpsoc;
3650 	struct dp_pdev *pdev;
3651 	struct dp_htt_timestamp *radio_tt;
3652 
3653 	if (!soc)
3654 		return;
3655 
3656 	dpsoc = (struct dp_soc *)soc->dp_soc;
3657 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
3658 	ring_type = HTT_T2H_RX_BKPRESSURE_RING_TYPE_GET(*msg_word);
3659 	pdev_id = HTT_T2H_RX_BKPRESSURE_PDEV_ID_GET(*msg_word);
3660 	pdev_id = DP_HW2SW_MACID(pdev_id);
3661 	pdev = (struct dp_pdev *)dpsoc->pdev_list[pdev_id];
3662 	ring_id = HTT_T2H_RX_BKPRESSURE_RINGID_GET(*msg_word);
3663 	hp_idx = HTT_T2H_RX_BKPRESSURE_HEAD_IDX_GET(*(msg_word + 1));
3664 	tp_idx = HTT_T2H_RX_BKPRESSURE_TAIL_IDX_GET(*(msg_word + 1));
3665 	bkp_time = HTT_T2H_RX_BKPRESSURE_TIME_MS_GET(*(msg_word + 2));
3666 	radio_tt = &soc->pdevid_tt[pdev_id];
3667 
3668 	switch (ring_type) {
3669 	case HTT_SW_RING_TYPE_UMAC:
3670 		if (!time_allow_print(radio_tt->umac_ttt, ring_id))
3671 			return;
3672 		dp_htt_alert_print(msg_type, pdev_id, ring_id, hp_idx, tp_idx,
3673 				   bkp_time, "HTT_SW_RING_TYPE_LMAC");
3674 	break;
3675 	case HTT_SW_RING_TYPE_LMAC:
3676 		if (!time_allow_print(radio_tt->lmac_ttt, ring_id))
3677 			return;
3678 		dp_htt_alert_print(msg_type, pdev_id, ring_id, hp_idx, tp_idx,
3679 				   bkp_time, "HTT_SW_RING_TYPE_LMAC");
3680 	break;
3681 	default:
3682 		dp_htt_alert_print(msg_type, pdev_id, ring_id, hp_idx, tp_idx,
3683 				   bkp_time, "UNKNOWN");
3684 	break;
3685 	}
3686 
3687 	dp_print_ring_stats(pdev);
3688 	dp_print_napi_stats(pdev->soc);
3689 }
3690 
3691 /*
3692  * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler
3693  * @context:	Opaque context (HTT SOC handle)
3694  * @pkt:	HTC packet
3695  */
3696 static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
3697 {
3698 	struct htt_soc *soc = (struct htt_soc *) context;
3699 	qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
3700 	u_int32_t *msg_word;
3701 	enum htt_t2h_msg_type msg_type;
3702 	bool free_buf = true;
3703 
3704 	/* check for successful message reception */
3705 	if (pkt->Status != QDF_STATUS_SUCCESS) {
3706 		if (pkt->Status != QDF_STATUS_E_CANCELED)
3707 			soc->stats.htc_err_cnt++;
3708 
3709 		qdf_nbuf_free(htt_t2h_msg);
3710 		return;
3711 	}
3712 
3713 	/* TODO: Check if we should pop the HTC/HTT header alignment padding */
3714 
3715 	msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
3716 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
3717 	htt_event_record(soc->htt_logger_handle,
3718 			 msg_type, (uint8_t *)msg_word);
3719 	switch (msg_type) {
3720 	case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
3721 	{
3722 		dp_htt_bkp_event_alert(msg_word, soc);
3723 		break;
3724 	}
3725 	case HTT_T2H_MSG_TYPE_PEER_MAP:
3726 		{
3727 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3728 			u_int8_t *peer_mac_addr;
3729 			u_int16_t peer_id;
3730 			u_int16_t hw_peer_id;
3731 			u_int8_t vdev_id;
3732 			u_int8_t is_wds;
3733 			struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
3734 
3735 			peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
3736 			hw_peer_id =
3737 				HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2));
3738 			vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
3739 			peer_mac_addr = htt_t2h_mac_addr_deswizzle(
3740 				(u_int8_t *) (msg_word+1),
3741 				&mac_addr_deswizzle_buf[0]);
3742 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3743 				QDF_TRACE_LEVEL_INFO,
3744 				"HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3745 				peer_id, vdev_id);
3746 
3747 			/*
3748 			 * check if peer already exists for this peer_id, if so
3749 			 * this peer map event is in response for a wds peer add
3750 			 * wmi command sent during wds source port learning.
3751 			 * in this case just add the ast entry to the existing
3752 			 * peer ast_list.
3753 			 */
3754 			is_wds = !!(dpsoc->peer_id_to_obj_map[peer_id]);
3755 			dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id,
3756 					       vdev_id, peer_mac_addr, 0,
3757 					       is_wds);
3758 			break;
3759 		}
3760 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
3761 		{
3762 			u_int16_t peer_id;
3763 			u_int8_t vdev_id;
3764 			u_int8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0};
3765 			peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
3766 			vdev_id = HTT_RX_PEER_UNMAP_VDEV_ID_GET(*msg_word);
3767 
3768 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
3769 						 vdev_id, mac_addr, 0);
3770 			break;
3771 		}
3772 	case HTT_T2H_MSG_TYPE_SEC_IND:
3773 		{
3774 			u_int16_t peer_id;
3775 			enum cdp_sec_type sec_type;
3776 			int is_unicast;
3777 
3778 			peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
3779 			sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
3780 			is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
3781 			/* point to the first part of the Michael key */
3782 			msg_word++;
3783 			dp_rx_sec_ind_handler(
3784 				soc->dp_soc, peer_id, sec_type, is_unicast,
3785 				msg_word, msg_word + 2);
3786 			break;
3787 		}
3788 
3789 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
3790 		{
3791 			free_buf = dp_ppdu_stats_ind_handler(soc, msg_word,
3792 							     htt_t2h_msg);
3793 			break;
3794 		}
3795 
3796 	case HTT_T2H_MSG_TYPE_PKTLOG:
3797 		{
3798 			dp_pktlog_msg_handler(soc, msg_word);
3799 			break;
3800 		}
3801 
3802 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
3803 		{
3804 			htc_pm_runtime_put(soc->htc_soc);
3805 			soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
3806 			soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
3807 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
3808 				"target uses HTT version %d.%d; host uses %d.%d",
3809 				soc->tgt_ver.major, soc->tgt_ver.minor,
3810 				HTT_CURRENT_VERSION_MAJOR,
3811 				HTT_CURRENT_VERSION_MINOR);
3812 			if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
3813 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3814 					QDF_TRACE_LEVEL_ERROR,
3815 					"*** Incompatible host/target HTT versions!");
3816 			}
3817 			/* abort if the target is incompatible with the host */
3818 			qdf_assert(soc->tgt_ver.major ==
3819 				HTT_CURRENT_VERSION_MAJOR);
3820 			if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
3821 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3822 					QDF_TRACE_LEVEL_WARN,
3823 					"*** Warning: host/target HTT versions"
3824 					" are different, though compatible!");
3825 			}
3826 			break;
3827 		}
3828 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
3829 		{
3830 			uint16_t peer_id;
3831 			uint8_t tid;
3832 			uint8_t win_sz;
3833 			uint16_t status;
3834 			struct dp_peer *peer;
3835 
3836 			/*
3837 			 * Update REO Queue Desc with new values
3838 			 */
3839 			peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
3840 			tid = HTT_RX_ADDBA_TID_GET(*msg_word);
3841 			win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
3842 			peer = dp_peer_find_by_id(soc->dp_soc, peer_id);
3843 
3844 			/*
3845 			 * Window size needs to be incremented by 1
3846 			 * since fw needs to represent a value of 256
3847 			 * using just 8 bits
3848 			 */
3849 			if (peer) {
3850 				status = dp_addba_requestprocess_wifi3(peer,
3851 						0, tid, 0, win_sz + 1, 0xffff);
3852 
3853 				/*
3854 				 * If PEER_LOCK_REF_PROTECT enbled dec ref
3855 				 * which is inc by dp_peer_find_by_id
3856 				 */
3857 				dp_peer_unref_del_find_by_id(peer);
3858 
3859 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3860 					QDF_TRACE_LEVEL_INFO,
3861 					FL("PeerID %d BAW %d TID %d stat %d"),
3862 					peer_id, win_sz, tid, status);
3863 
3864 			} else {
3865 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3866 					QDF_TRACE_LEVEL_ERROR,
3867 					FL("Peer not found peer id %d"),
3868 					peer_id);
3869 			}
3870 			break;
3871 		}
3872 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
3873 		{
3874 			dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg);
3875 			break;
3876 		}
3877 	case HTT_T2H_MSG_TYPE_PEER_MAP_V2:
3878 		{
3879 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3880 			u_int8_t *peer_mac_addr;
3881 			u_int16_t peer_id;
3882 			u_int16_t hw_peer_id;
3883 			u_int8_t vdev_id;
3884 			bool is_wds;
3885 			u_int16_t ast_hash;
3886 
3887 			peer_id = HTT_RX_PEER_MAP_V2_SW_PEER_ID_GET(*msg_word);
3888 			hw_peer_id =
3889 			HTT_RX_PEER_MAP_V2_HW_PEER_ID_GET(*(msg_word + 2));
3890 			vdev_id = HTT_RX_PEER_MAP_V2_VDEV_ID_GET(*msg_word);
3891 			peer_mac_addr =
3892 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3893 						   &mac_addr_deswizzle_buf[0]);
3894 			is_wds =
3895 			HTT_RX_PEER_MAP_V2_NEXT_HOP_GET(*(msg_word + 3));
3896 			ast_hash =
3897 			HTT_RX_PEER_MAP_V2_AST_HASH_VALUE_GET(*(msg_word + 3));
3898 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3899 				  QDF_TRACE_LEVEL_INFO,
3900 				  "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3901 				  peer_id, vdev_id);
3902 
3903 			dp_rx_peer_map_handler(soc->dp_soc, peer_id,
3904 					       hw_peer_id, vdev_id,
3905 					       peer_mac_addr, ast_hash,
3906 					       is_wds);
3907 			break;
3908 		}
3909 	case HTT_T2H_MSG_TYPE_PEER_UNMAP_V2:
3910 		{
3911 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3912 			u_int8_t *mac_addr;
3913 			u_int16_t peer_id;
3914 			u_int8_t vdev_id;
3915 			u_int8_t is_wds;
3916 
3917 			peer_id =
3918 			HTT_RX_PEER_UNMAP_V2_SW_PEER_ID_GET(*msg_word);
3919 			vdev_id = HTT_RX_PEER_UNMAP_V2_VDEV_ID_GET(*msg_word);
3920 			mac_addr =
3921 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3922 						   &mac_addr_deswizzle_buf[0]);
3923 			is_wds =
3924 			HTT_RX_PEER_UNMAP_V2_NEXT_HOP_GET(*(msg_word + 2));
3925 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3926 				  QDF_TRACE_LEVEL_INFO,
3927 				  "HTT_T2H_MSG_TYPE_PEER_UNMAP msg for peer id %d vdev id %d n",
3928 				  peer_id, vdev_id);
3929 
3930 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
3931 						 vdev_id, mac_addr,
3932 						 is_wds);
3933 			break;
3934 		}
3935 	default:
3936 		break;
3937 	};
3938 
3939 	/* Free the indication buffer */
3940 	if (free_buf)
3941 		qdf_nbuf_free(htt_t2h_msg);
3942 }
3943 
3944 /*
3945  * dp_htt_h2t_full() - Send full handler (called from HTC)
3946  * @context:	Opaque context (HTT SOC handle)
3947  * @pkt:	HTC packet
3948  *
3949  * Return: enum htc_send_full_action
3950  */
3951 static enum htc_send_full_action
3952 dp_htt_h2t_full(void *context, HTC_PACKET *pkt)
3953 {
3954 	return HTC_SEND_FULL_KEEP;
3955 }
3956 
3957 /*
3958  * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages
3959  * @context:	Opaque context (HTT SOC handle)
3960  * @nbuf:	nbuf containing T2H message
3961  * @pipe_id:	HIF pipe ID
3962  *
3963  * Return: QDF_STATUS
3964  *
3965  * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which
3966  * will be used for packet log and other high-priority HTT messages. Proper
3967  * HTC connection to be added later once required FW changes are available
3968  */
3969 static QDF_STATUS
3970 dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id)
3971 {
3972 	QDF_STATUS rc = QDF_STATUS_SUCCESS;
3973 	HTC_PACKET htc_pkt;
3974 
3975 	qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE);
3976 	qdf_mem_zero(&htc_pkt, sizeof(htc_pkt));
3977 	htc_pkt.Status = QDF_STATUS_SUCCESS;
3978 	htc_pkt.pPktContext = (void *)nbuf;
3979 	dp_htt_t2h_msg_handler(context, &htc_pkt);
3980 
3981 	return rc;
3982 }
3983 
3984 /*
3985  * htt_htc_soc_attach() - Register SOC level HTT instance with HTC
3986  * @htt_soc:	HTT SOC handle
3987  *
3988  * Return: QDF_STATUS
3989  */
3990 static QDF_STATUS
3991 htt_htc_soc_attach(struct htt_soc *soc)
3992 {
3993 	struct htc_service_connect_req connect;
3994 	struct htc_service_connect_resp response;
3995 	QDF_STATUS status;
3996 	struct dp_soc *dpsoc = soc->dp_soc;
3997 
3998 	qdf_mem_zero(&connect, sizeof(connect));
3999 	qdf_mem_zero(&response, sizeof(response));
4000 
4001 	connect.pMetaData = NULL;
4002 	connect.MetaDataLength = 0;
4003 	connect.EpCallbacks.pContext = soc;
4004 	connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
4005 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
4006 	connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler;
4007 
4008 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
4009 	connect.EpCallbacks.EpRecvRefill = NULL;
4010 
4011 	/* N/A, fill is done by HIF */
4012 	connect.EpCallbacks.RecvRefillWaterMark = 1;
4013 
4014 	connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
4015 	/*
4016 	 * Specify how deep to let a queue get before htc_send_pkt will
4017 	 * call the EpSendFull function due to excessive send queue depth.
4018 	 */
4019 	connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
4020 
4021 	/* disable flow control for HTT data message service */
4022 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
4023 
4024 	/* connect to control service */
4025 	connect.service_id = HTT_DATA_MSG_SVC;
4026 
4027 	status = htc_connect_service(soc->htc_soc, &connect, &response);
4028 
4029 	if (status != QDF_STATUS_SUCCESS)
4030 		return status;
4031 
4032 	soc->htc_endpoint = response.Endpoint;
4033 
4034 	hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint);
4035 
4036 	htt_interface_logging_init(&soc->htt_logger_handle);
4037 	dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc,
4038 		dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE);
4039 
4040 	return QDF_STATUS_SUCCESS; /* success */
4041 }
4042 
4043 /*
4044  * htt_soc_initialize() - SOC level HTT initialization
4045  * @htt_soc: Opaque htt SOC handle
4046  * @ctrl_psoc: Opaque ctrl SOC handle
4047  * @htc_soc: SOC level HTC handle
4048  * @hal_soc: Opaque HAL SOC handle
4049  * @osdev: QDF device
4050  *
4051  * Return: HTT handle on success; NULL on failure
4052  */
4053 void *
4054 htt_soc_initialize(struct htt_soc *htt_soc,
4055 		   struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
4056 		   HTC_HANDLE htc_soc,
4057 		   hal_soc_handle_t hal_soc_hdl, qdf_device_t osdev)
4058 {
4059 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
4060 
4061 	soc->osdev = osdev;
4062 	soc->ctrl_psoc = ctrl_psoc;
4063 	soc->htc_soc = htc_soc;
4064 	soc->hal_soc = hal_soc_hdl;
4065 
4066 	if (htt_htc_soc_attach(soc))
4067 		goto fail2;
4068 
4069 	return soc;
4070 
4071 fail2:
4072 	return NULL;
4073 }
4074 
4075 void htt_soc_htc_dealloc(struct htt_soc *htt_handle)
4076 {
4077 	htt_interface_logging_deinit(htt_handle->htt_logger_handle);
4078 	htt_htc_misc_pkt_pool_free(htt_handle);
4079 	htt_htc_pkt_pool_free(htt_handle);
4080 }
4081 
4082 /*
4083  * htt_soc_htc_prealloc() - HTC memory prealloc
4084  * @htt_soc: SOC level HTT handle
4085  *
4086  * Return: QDF_STATUS_SUCCESS on Success or
4087  * QDF_STATUS_E_NOMEM on allocation failure
4088  */
4089 QDF_STATUS htt_soc_htc_prealloc(struct htt_soc *soc)
4090 {
4091 	int i;
4092 
4093 	soc->htt_htc_pkt_freelist = NULL;
4094 	/* pre-allocate some HTC_PACKET objects */
4095 	for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
4096 		struct dp_htt_htc_pkt_union *pkt;
4097 		pkt = qdf_mem_malloc(sizeof(*pkt));
4098 		if (!pkt)
4099 			return QDF_STATUS_E_NOMEM;
4100 
4101 		htt_htc_pkt_free(soc, &pkt->u.pkt);
4102 	}
4103 	return QDF_STATUS_SUCCESS;
4104 }
4105 
4106 /*
4107  * htt_soc_detach() - Free SOC level HTT handle
4108  * @htt_hdl: HTT SOC handle
4109  */
4110 void htt_soc_detach(struct htt_soc *htt_hdl)
4111 {
4112 	int i;
4113 	struct htt_soc *htt_handle = (struct htt_soc *)htt_hdl;
4114 
4115 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4116 		qdf_mem_free(htt_handle->pdevid_tt[i].umac_ttt);
4117 		qdf_mem_free(htt_handle->pdevid_tt[i].lmac_ttt);
4118 	}
4119 
4120 	HTT_TX_MUTEX_DESTROY(&htt_handle->htt_tx_mutex);
4121 	qdf_mem_free(htt_handle);
4122 
4123 }
4124 
4125 /**
4126  * dp_h2t_ext_stats_msg_send(): function to contruct HTT message to pass to FW
4127  * @pdev: DP PDEV handle
4128  * @stats_type_upload_mask: stats type requested by user
4129  * @config_param_0: extra configuration parameters
4130  * @config_param_1: extra configuration parameters
4131  * @config_param_2: extra configuration parameters
4132  * @config_param_3: extra configuration parameters
4133  * @mac_id: mac number
4134  *
4135  * return: QDF STATUS
4136  */
4137 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
4138 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
4139 		uint32_t config_param_1, uint32_t config_param_2,
4140 		uint32_t config_param_3, int cookie_val, int cookie_msb,
4141 		uint8_t mac_id)
4142 {
4143 	struct htt_soc *soc = pdev->soc->htt_handle;
4144 	struct dp_htt_htc_pkt *pkt;
4145 	qdf_nbuf_t msg;
4146 	uint32_t *msg_word;
4147 	uint8_t pdev_mask = 0;
4148 	uint8_t *htt_logger_bufp;
4149 
4150 	msg = qdf_nbuf_alloc(
4151 			soc->osdev,
4152 			HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ),
4153 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4154 
4155 	if (!msg)
4156 		return QDF_STATUS_E_NOMEM;
4157 
4158 	/*TODO:Add support for SOC stats
4159 	 * Bit 0: SOC Stats
4160 	 * Bit 1: Pdev stats for pdev id 0
4161 	 * Bit 2: Pdev stats for pdev id 1
4162 	 * Bit 3: Pdev stats for pdev id 2
4163 	 */
4164 	mac_id = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
4165 
4166 	pdev_mask = 1 << DP_SW2HW_MACID(mac_id);
4167 	/*
4168 	 * Set the length of the message.
4169 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4170 	 * separately during the below call to qdf_nbuf_push_head.
4171 	 * The contribution from the HTC header is added separately inside HTC.
4172 	 */
4173 	if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) {
4174 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4175 				"Failed to expand head for HTT_EXT_STATS");
4176 		qdf_nbuf_free(msg);
4177 		return QDF_STATUS_E_FAILURE;
4178 	}
4179 
4180 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4181 		"-----%s:%d----\n cookie <-> %d\n config_param_0 %u\n"
4182 		"config_param_1 %u\n config_param_2 %u\n"
4183 		"config_param_4 %u\n -------------",
4184 		__func__, __LINE__, cookie_val, config_param_0,
4185 		config_param_1, config_param_2,	config_param_3);
4186 
4187 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
4188 
4189 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4190 	htt_logger_bufp = (uint8_t *)msg_word;
4191 	*msg_word = 0;
4192 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ);
4193 	HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask);
4194 	HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask);
4195 
4196 	/* word 1 */
4197 	msg_word++;
4198 	*msg_word = 0;
4199 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0);
4200 
4201 	/* word 2 */
4202 	msg_word++;
4203 	*msg_word = 0;
4204 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1);
4205 
4206 	/* word 3 */
4207 	msg_word++;
4208 	*msg_word = 0;
4209 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2);
4210 
4211 	/* word 4 */
4212 	msg_word++;
4213 	*msg_word = 0;
4214 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3);
4215 
4216 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0);
4217 
4218 	/* word 5 */
4219 	msg_word++;
4220 
4221 	/* word 6 */
4222 	msg_word++;
4223 	*msg_word = 0;
4224 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val);
4225 
4226 	/* word 7 */
4227 	msg_word++;
4228 	*msg_word = 0;
4229 	/*Using last 2 bits for pdev_id */
4230 	cookie_msb = ((cookie_msb << 2) | pdev->pdev_id);
4231 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb);
4232 
4233 	pkt = htt_htc_pkt_alloc(soc);
4234 	if (!pkt) {
4235 		qdf_nbuf_free(msg);
4236 		return QDF_STATUS_E_NOMEM;
4237 	}
4238 
4239 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4240 
4241 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4242 			dp_htt_h2t_send_complete_free_netbuf,
4243 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
4244 			soc->htc_endpoint,
4245 			/* tag for FW response msg not guaranteed */
4246 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
4247 
4248 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4249 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_EXT_STATS_REQ,
4250 			    htt_logger_bufp);
4251 	return 0;
4252 }
4253 
4254 /* This macro will revert once proper HTT header will define for
4255  * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file
4256  * */
4257 #if defined(WDI_EVENT_ENABLE)
4258 /**
4259  * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW
4260  * @pdev: DP PDEV handle
4261  * @stats_type_upload_mask: stats type requested by user
4262  * @mac_id: Mac id number
4263  *
4264  * return: QDF STATUS
4265  */
4266 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
4267 		uint32_t stats_type_upload_mask, uint8_t mac_id)
4268 {
4269 	struct htt_soc *soc = pdev->soc->htt_handle;
4270 	struct dp_htt_htc_pkt *pkt;
4271 	qdf_nbuf_t msg;
4272 	uint32_t *msg_word;
4273 	uint8_t pdev_mask;
4274 
4275 	msg = qdf_nbuf_alloc(
4276 			soc->osdev,
4277 			HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ),
4278 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
4279 
4280 	if (!msg) {
4281 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4282 		"Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer");
4283 		qdf_assert(0);
4284 		return QDF_STATUS_E_NOMEM;
4285 	}
4286 
4287 	/*TODO:Add support for SOC stats
4288 	 * Bit 0: SOC Stats
4289 	 * Bit 1: Pdev stats for pdev id 0
4290 	 * Bit 2: Pdev stats for pdev id 1
4291 	 * Bit 3: Pdev stats for pdev id 2
4292 	 */
4293 	pdev_mask = 1 << DP_SW2HW_MACID(mac_id);
4294 
4295 	/*
4296 	 * Set the length of the message.
4297 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4298 	 * separately during the below call to qdf_nbuf_push_head.
4299 	 * The contribution from the HTC header is added separately inside HTC.
4300 	 */
4301 	if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) {
4302 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4303 				"Failed to expand head for HTT_CFG_STATS");
4304 		qdf_nbuf_free(msg);
4305 		return QDF_STATUS_E_FAILURE;
4306 	}
4307 
4308 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
4309 
4310 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4311 	*msg_word = 0;
4312 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
4313 	HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask);
4314 	HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word,
4315 			stats_type_upload_mask);
4316 
4317 	pkt = htt_htc_pkt_alloc(soc);
4318 	if (!pkt) {
4319 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4320 				"Fail to allocate dp_htt_htc_pkt buffer");
4321 		qdf_assert(0);
4322 		qdf_nbuf_free(msg);
4323 		return QDF_STATUS_E_NOMEM;
4324 	}
4325 
4326 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4327 
4328 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4329 			dp_htt_h2t_send_complete_free_netbuf,
4330 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
4331 			soc->htc_endpoint,
4332 			1); /* tag - not relevant here */
4333 
4334 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4335 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG,
4336 			    (uint8_t *)msg_word);
4337 	return 0;
4338 }
4339 #endif
4340 
4341 void
4342 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
4343 			     uint32_t *tag_buf)
4344 {
4345 	switch (tag_type) {
4346 	case HTT_STATS_PEER_DETAILS_TAG:
4347 	{
4348 		htt_peer_details_tlv *dp_stats_buf =
4349 			(htt_peer_details_tlv *)tag_buf;
4350 
4351 		pdev->fw_stats_peer_id = dp_stats_buf->sw_peer_id;
4352 	}
4353 	break;
4354 	case HTT_STATS_PEER_STATS_CMN_TAG:
4355 	{
4356 		htt_peer_stats_cmn_tlv *dp_stats_buf =
4357 			(htt_peer_stats_cmn_tlv *)tag_buf;
4358 
4359 		struct dp_peer *peer = dp_peer_find_by_id(pdev->soc,
4360 						pdev->fw_stats_peer_id);
4361 
4362 		if (peer && !peer->bss_peer) {
4363 			peer->stats.tx.inactive_time =
4364 				dp_stats_buf->inactive_time;
4365 			qdf_event_set(&pdev->fw_peer_stats_event);
4366 		}
4367 		if (peer)
4368 			dp_peer_unref_del_find_by_id(peer);
4369 	}
4370 	break;
4371 	default:
4372 		qdf_err("Invalid tag_type");
4373 	}
4374 }
4375 
4376 /**
4377  * dp_htt_rx_flow_fst_setup(): Send HTT Rx FST setup message to FW
4378  * @pdev: DP pdev handle
4379  * @fse_setup_info: FST setup parameters
4380  *
4381  * Return: Success when HTT message is sent, error on failure
4382  */
4383 QDF_STATUS
4384 dp_htt_rx_flow_fst_setup(struct dp_pdev *pdev,
4385 			 struct dp_htt_rx_flow_fst_setup *fse_setup_info)
4386 {
4387 	struct htt_soc *soc = pdev->soc->htt_handle;
4388 	struct dp_htt_htc_pkt *pkt;
4389 	qdf_nbuf_t msg;
4390 	u_int32_t *msg_word;
4391 	struct htt_h2t_msg_rx_fse_setup_t *fse_setup;
4392 	uint8_t *htt_logger_bufp;
4393 	u_int32_t *key;
4394 
4395 	msg = qdf_nbuf_alloc(
4396 		soc->osdev,
4397 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_setup_t)),
4398 		/* reserve room for the HTC header */
4399 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4400 
4401 	if (!msg)
4402 		return QDF_STATUS_E_NOMEM;
4403 
4404 	/*
4405 	 * Set the length of the message.
4406 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4407 	 * separately during the below call to qdf_nbuf_push_head.
4408 	 * The contribution from the HTC header is added separately inside HTC.
4409 	 */
4410 	if (!qdf_nbuf_put_tail(msg,
4411 			       sizeof(struct htt_h2t_msg_rx_fse_setup_t))) {
4412 		qdf_err("Failed to expand head for HTT RX_FSE_SETUP msg");
4413 		return QDF_STATUS_E_FAILURE;
4414 	}
4415 
4416 	/* fill in the message contents */
4417 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4418 
4419 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_setup_t));
4420 	/* rewind beyond alignment pad to get to the HTC header reserved area */
4421 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4422 	htt_logger_bufp = (uint8_t *)msg_word;
4423 
4424 	*msg_word = 0;
4425 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG);
4426 
4427 	fse_setup = (struct htt_h2t_msg_rx_fse_setup_t *)msg_word;
4428 
4429 	HTT_RX_FSE_SETUP_PDEV_ID_SET(*msg_word, fse_setup_info->pdev_id);
4430 
4431 	msg_word++;
4432 	HTT_RX_FSE_SETUP_NUM_REC_SET(*msg_word, fse_setup_info->max_entries);
4433 	HTT_RX_FSE_SETUP_MAX_SEARCH_SET(*msg_word, fse_setup_info->max_search);
4434 	HTT_RX_FSE_SETUP_IP_DA_SA_PREFIX_SET(*msg_word,
4435 					     fse_setup_info->ip_da_sa_prefix);
4436 
4437 	msg_word++;
4438 	HTT_RX_FSE_SETUP_BASE_ADDR_LO_SET(*msg_word,
4439 					  fse_setup_info->base_addr_lo);
4440 	msg_word++;
4441 	HTT_RX_FSE_SETUP_BASE_ADDR_HI_SET(*msg_word,
4442 					  fse_setup_info->base_addr_hi);
4443 
4444 	key = (u_int32_t *)fse_setup_info->hash_key;
4445 	fse_setup->toeplitz31_0 = *key++;
4446 	fse_setup->toeplitz63_32 = *key++;
4447 	fse_setup->toeplitz95_64 = *key++;
4448 	fse_setup->toeplitz127_96 = *key++;
4449 	fse_setup->toeplitz159_128 = *key++;
4450 	fse_setup->toeplitz191_160 = *key++;
4451 	fse_setup->toeplitz223_192 = *key++;
4452 	fse_setup->toeplitz255_224 = *key++;
4453 	fse_setup->toeplitz287_256 = *key++;
4454 	fse_setup->toeplitz314_288 = *key;
4455 
4456 	msg_word++;
4457 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz31_0);
4458 	msg_word++;
4459 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz63_32);
4460 	msg_word++;
4461 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz95_64);
4462 	msg_word++;
4463 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz127_96);
4464 	msg_word++;
4465 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz159_128);
4466 	msg_word++;
4467 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz191_160);
4468 	msg_word++;
4469 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz223_192);
4470 	msg_word++;
4471 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz255_224);
4472 	msg_word++;
4473 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz287_256);
4474 	msg_word++;
4475 	HTT_RX_FSE_SETUP_HASH_314_288_SET(*msg_word,
4476 					  fse_setup->toeplitz314_288);
4477 
4478 	pkt = htt_htc_pkt_alloc(soc);
4479 	if (!pkt) {
4480 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4481 		qdf_assert(0);
4482 		qdf_nbuf_free(msg);
4483 		return QDF_STATUS_E_RESOURCES; /* failure */
4484 	}
4485 
4486 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4487 
4488 	SET_HTC_PACKET_INFO_TX(
4489 		&pkt->htc_pkt,
4490 		dp_htt_h2t_send_complete_free_netbuf,
4491 		qdf_nbuf_data(msg),
4492 		qdf_nbuf_len(msg),
4493 		soc->htc_endpoint,
4494 		1); /* tag - not relevant here */
4495 
4496 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4497 
4498 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG,
4499 			    htt_logger_bufp);
4500 
4501 	qdf_info("HTT_H2T RX_FSE_SETUP sent to FW for pdev = %u",
4502 		 fse_setup_info->pdev_id);
4503 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_DEBUG,
4504 			   (void *)fse_setup_info->hash_key,
4505 			   fse_setup_info->hash_key_len);
4506 
4507 	return QDF_STATUS_SUCCESS;
4508 }
4509 
4510 /**
4511  * dp_htt_rx_flow_fse_operation(): Send HTT Flow Search Entry msg to
4512  * add/del a flow in HW
4513  * @pdev: DP pdev handle
4514  * @fse_op_info: Flow entry parameters
4515  *
4516  * Return: Success when HTT message is sent, error on failure
4517  */
4518 QDF_STATUS
4519 dp_htt_rx_flow_fse_operation(struct dp_pdev *pdev,
4520 			     struct dp_htt_rx_flow_fst_operation *fse_op_info)
4521 {
4522 	struct htt_soc *soc = pdev->soc->htt_handle;
4523 	struct dp_htt_htc_pkt *pkt;
4524 	qdf_nbuf_t msg;
4525 	u_int32_t *msg_word;
4526 	struct htt_h2t_msg_rx_fse_operation_t *fse_operation;
4527 	uint8_t *htt_logger_bufp;
4528 
4529 	msg = qdf_nbuf_alloc(
4530 		soc->osdev,
4531 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_operation_t)),
4532 		/* reserve room for the HTC header */
4533 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4534 	if (!msg)
4535 		return QDF_STATUS_E_NOMEM;
4536 
4537 	/*
4538 	 * Set the length of the message.
4539 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4540 	 * separately during the below call to qdf_nbuf_push_head.
4541 	 * The contribution from the HTC header is added separately inside HTC.
4542 	 */
4543 	if (!qdf_nbuf_put_tail(msg,
4544 			       sizeof(struct htt_h2t_msg_rx_fse_operation_t))) {
4545 		qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
4546 		return QDF_STATUS_E_FAILURE;
4547 	}
4548 
4549 	/* fill in the message contents */
4550 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4551 
4552 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_operation_t));
4553 	/* rewind beyond alignment pad to get to the HTC header reserved area */
4554 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4555 	htt_logger_bufp = (uint8_t *)msg_word;
4556 
4557 	*msg_word = 0;
4558 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG);
4559 
4560 	fse_operation = (struct htt_h2t_msg_rx_fse_operation_t *)msg_word;
4561 
4562 	HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, fse_op_info->pdev_id);
4563 	msg_word++;
4564 	HTT_RX_FSE_IPSEC_VALID_SET(*msg_word, false);
4565 	if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_ENTRY) {
4566 		HTT_RX_FSE_OPERATION_SET(*msg_word,
4567 					 HTT_RX_FSE_CACHE_INVALIDATE_ENTRY);
4568 		msg_word++;
4569 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4570 		*msg_word,
4571 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_31_0));
4572 		msg_word++;
4573 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4574 		*msg_word,
4575 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_63_32));
4576 		msg_word++;
4577 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4578 		*msg_word,
4579 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_95_64));
4580 		msg_word++;
4581 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4582 		*msg_word,
4583 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_127_96));
4584 		msg_word++;
4585 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4586 		*msg_word,
4587 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_31_0));
4588 		msg_word++;
4589 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4590 		*msg_word,
4591 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_63_32));
4592 		msg_word++;
4593 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4594 		*msg_word,
4595 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_95_64));
4596 		msg_word++;
4597 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4598 		*msg_word,
4599 		qdf_htonl(
4600 		fse_op_info->rx_flow->flow_tuple_info.dest_ip_127_96));
4601 		msg_word++;
4602 		HTT_RX_FSE_SOURCEPORT_SET(
4603 			*msg_word,
4604 			fse_op_info->rx_flow->flow_tuple_info.src_port);
4605 		HTT_RX_FSE_DESTPORT_SET(
4606 			*msg_word,
4607 			fse_op_info->rx_flow->flow_tuple_info.dest_port);
4608 		msg_word++;
4609 		HTT_RX_FSE_L4_PROTO_SET(
4610 			*msg_word,
4611 			fse_op_info->rx_flow->flow_tuple_info.l4_protocol);
4612 	} else if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_FULL) {
4613 		HTT_RX_FSE_OPERATION_SET(*msg_word,
4614 					 HTT_RX_FSE_CACHE_INVALIDATE_FULL);
4615 	} else if (fse_op_info->op_code == DP_HTT_FST_DISABLE) {
4616 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_DISABLE);
4617 	} else if (fse_op_info->op_code == DP_HTT_FST_ENABLE) {
4618 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_ENABLE);
4619 	}
4620 
4621 	pkt = htt_htc_pkt_alloc(soc);
4622 	if (!pkt) {
4623 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4624 		qdf_assert(0);
4625 		qdf_nbuf_free(msg);
4626 		return QDF_STATUS_E_RESOURCES; /* failure */
4627 	}
4628 
4629 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4630 
4631 	SET_HTC_PACKET_INFO_TX(
4632 		&pkt->htc_pkt,
4633 		dp_htt_h2t_send_complete_free_netbuf,
4634 		qdf_nbuf_data(msg),
4635 		qdf_nbuf_len(msg),
4636 		soc->htc_endpoint,
4637 		1); /* tag - not relevant here */
4638 
4639 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4640 
4641 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG,
4642 			    htt_logger_bufp);
4643 
4644 	qdf_info("HTT_H2T RX_FSE_OPERATION_CFG sent to FW for pdev = %u",
4645 		 fse_op_info->pdev_id);
4646 
4647 	return QDF_STATUS_SUCCESS;
4648 }
4649