xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c (revision e0a2b698834158b78f72a4d0a6a83a607c50218b)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <htt.h>
20 #include <hal_hw_headers.h>
21 #include <hal_api.h>
22 #include "dp_peer.h"
23 #include "dp_types.h"
24 #include "dp_internal.h"
25 #include "dp_rx_mon.h"
26 #include "htt_stats.h"
27 #include "htt_ppdu_stats.h"
28 #include "dp_htt.h"
29 #include "dp_rx.h"
30 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
31 #include "cdp_txrx_cmn_struct.h"
32 
33 #ifdef FEATURE_PERPKT_INFO
34 #include "dp_ratetable.h"
35 #endif
36 
37 #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
38 
39 #define HTT_HTC_PKT_POOL_INIT_SIZE 64
40 
41 #define HTT_MSG_BUF_SIZE(msg_bytes) \
42 	((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
43 
44 #define HTT_PID_BIT_MASK 0x3
45 
46 #define DP_EXT_MSG_LENGTH 2048
47 
48 #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16
49 
50 #define HTT_SHIFT_UPPER_TIMESTAMP 32
51 #define HTT_MASK_UPPER_TIMESTAMP 0xFFFFFFFF00000000
52 
53 /*
54  * dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap() - Get ppdu stats tlv
55  * bitmap for sniffer mode
56  * @bitmap: received bitmap
57  *
58  * Return: expected bitmap value, returns zero if doesn't match with
59  * either 64-bit Tx window or 256-bit window tlv bitmap
60  */
61 int
62 dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(uint32_t bitmap)
63 {
64 	if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64))
65 		return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64;
66 	else if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256))
67 		return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256;
68 
69 	return 0;
70 }
71 
72 #ifdef FEATURE_PERPKT_INFO
73 /*
74  * dp_peer_copy_delay_stats() - copy ppdu stats to peer delayed stats.
75  * @peer: Datapath peer handle
76  * @ppdu: PPDU Descriptor
77  *
78  * Return: None
79  *
80  * on Tx data frame, we may get delayed ba set
81  * in htt_ppdu_stats_user_common_tlv. which mean we get Block Ack(BA) after we
82  * request Block Ack Request(BAR). Successful msdu is received only after Block
83  * Ack. To populate peer stats we need successful msdu(data frame).
84  * So we hold the Tx data stats on delayed_ba for stats update.
85  */
86 static inline void
87 dp_peer_copy_delay_stats(struct dp_peer *peer,
88 			 struct cdp_tx_completion_ppdu_user *ppdu)
89 {
90 	struct dp_pdev *pdev;
91 	struct dp_vdev *vdev;
92 
93 	if (peer->last_delayed_ba) {
94 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
95 			  "BA not yet recv for prev delayed ppdu[%d]\n",
96 			  peer->last_delayed_ba_ppduid);
97 		vdev = peer->vdev;
98 		if (vdev) {
99 			pdev = vdev->pdev;
100 			pdev->stats.cdp_delayed_ba_not_recev++;
101 		}
102 	}
103 
104 	peer->delayed_ba_ppdu_stats.ltf_size = ppdu->ltf_size;
105 	peer->delayed_ba_ppdu_stats.stbc = ppdu->stbc;
106 	peer->delayed_ba_ppdu_stats.he_re = ppdu->he_re;
107 	peer->delayed_ba_ppdu_stats.txbf = ppdu->txbf;
108 	peer->delayed_ba_ppdu_stats.bw = ppdu->bw;
109 	peer->delayed_ba_ppdu_stats.nss = ppdu->nss;
110 	peer->delayed_ba_ppdu_stats.preamble = ppdu->preamble;
111 	peer->delayed_ba_ppdu_stats.gi = ppdu->gi;
112 	peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
113 	peer->delayed_ba_ppdu_stats.ldpc = ppdu->ldpc;
114 	peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
115 	peer->delayed_ba_ppdu_stats.mpdu_tried_ucast = ppdu->mpdu_tried_ucast;
116 	peer->delayed_ba_ppdu_stats.mpdu_tried_mcast = ppdu->mpdu_tried_mcast;
117 	peer->delayed_ba_ppdu_stats.frame_ctrl = ppdu->frame_ctrl;
118 	peer->delayed_ba_ppdu_stats.qos_ctrl = ppdu->qos_ctrl;
119 	peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
120 
121 	peer->delayed_ba_ppdu_stats.ru_start = ppdu->ru_start;
122 	peer->delayed_ba_ppdu_stats.ru_tones = ppdu->ru_tones;
123 	peer->delayed_ba_ppdu_stats.is_mcast = ppdu->is_mcast;
124 
125 	peer->delayed_ba_ppdu_stats.user_pos = ppdu->user_pos;
126 	peer->delayed_ba_ppdu_stats.mu_group_id = ppdu->mu_group_id;
127 
128 	peer->last_delayed_ba = true;
129 }
130 
131 /*
132  * dp_peer_copy_stats_to_bar() - copy delayed stats to ppdu stats.
133  * @peer: Datapath peer handle
134  * @ppdu: PPDU Descriptor
135  *
136  * Return: None
137  *
138  * For Tx BAR, PPDU stats TLV include Block Ack info. PPDU info
139  * from Tx BAR frame not required to populate peer stats.
140  * But we need successful MPDU and MSDU to update previous
141  * transmitted Tx data frame. Overwrite ppdu stats with the previous
142  * stored ppdu stats.
143  */
144 static void
145 dp_peer_copy_stats_to_bar(struct dp_peer *peer,
146 			  struct cdp_tx_completion_ppdu_user *ppdu)
147 {
148 	ppdu->ltf_size = peer->delayed_ba_ppdu_stats.ltf_size;
149 	ppdu->stbc = peer->delayed_ba_ppdu_stats.stbc;
150 	ppdu->he_re = peer->delayed_ba_ppdu_stats.he_re;
151 	ppdu->txbf = peer->delayed_ba_ppdu_stats.txbf;
152 	ppdu->bw = peer->delayed_ba_ppdu_stats.bw;
153 	ppdu->nss = peer->delayed_ba_ppdu_stats.nss;
154 	ppdu->preamble = peer->delayed_ba_ppdu_stats.preamble;
155 	ppdu->gi = peer->delayed_ba_ppdu_stats.gi;
156 	ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm;
157 	ppdu->ldpc = peer->delayed_ba_ppdu_stats.ldpc;
158 	ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm;
159 	ppdu->mpdu_tried_ucast = peer->delayed_ba_ppdu_stats.mpdu_tried_ucast;
160 	ppdu->mpdu_tried_mcast = peer->delayed_ba_ppdu_stats.mpdu_tried_mcast;
161 	ppdu->frame_ctrl = peer->delayed_ba_ppdu_stats.frame_ctrl;
162 	ppdu->qos_ctrl = peer->delayed_ba_ppdu_stats.qos_ctrl;
163 	ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm;
164 
165 	ppdu->ru_start = peer->delayed_ba_ppdu_stats.ru_start;
166 	ppdu->ru_tones = peer->delayed_ba_ppdu_stats.ru_tones;
167 	ppdu->is_mcast = peer->delayed_ba_ppdu_stats.is_mcast;
168 
169 	ppdu->user_pos = peer->delayed_ba_ppdu_stats.user_pos;
170 	ppdu->mu_group_id = peer->delayed_ba_ppdu_stats.mu_group_id;
171 
172 	peer->last_delayed_ba = false;
173 }
174 
175 /*
176  * dp_tx_rate_stats_update() - Update rate per-peer statistics
177  * @peer: Datapath peer handle
178  * @ppdu: PPDU Descriptor
179  *
180  * Return: None
181  */
182 static void
183 dp_tx_rate_stats_update(struct dp_peer *peer,
184 			struct cdp_tx_completion_ppdu_user *ppdu)
185 {
186 	uint32_t ratekbps = 0;
187 	uint64_t ppdu_tx_rate = 0;
188 	uint32_t rix;
189 	uint16_t ratecode = 0;
190 
191 	if (!peer || !ppdu)
192 		return;
193 
194 	ratekbps = dp_getrateindex(ppdu->gi,
195 				   ppdu->mcs,
196 				   ppdu->nss,
197 				   ppdu->preamble,
198 				   ppdu->bw,
199 				   &rix,
200 				   &ratecode);
201 
202 	DP_STATS_UPD(peer, tx.last_tx_rate, ratekbps);
203 
204 	if (!ratekbps)
205 		return;
206 
207 	/* Calculate goodput in non-training period
208 	 * In training period, don't do anything as
209 	 * pending pkt is send as goodput.
210 	 */
211 	if ((!peer->bss_peer) && (!ppdu->sa_is_training)) {
212 		ppdu->sa_goodput = ((ratekbps / CDP_NUM_KB_IN_MB) *
213 				(CDP_PERCENT_MACRO - ppdu->current_rate_per));
214 	}
215 	ppdu->rix = rix;
216 	ppdu->tx_ratekbps = ratekbps;
217 	ppdu->tx_ratecode = ratecode;
218 	peer->stats.tx.avg_tx_rate =
219 		dp_ath_rate_lpf(peer->stats.tx.avg_tx_rate, ratekbps);
220 	ppdu_tx_rate = dp_ath_rate_out(peer->stats.tx.avg_tx_rate);
221 	DP_STATS_UPD(peer, tx.rnd_avg_tx_rate, ppdu_tx_rate);
222 
223 	if (peer->vdev) {
224 		/*
225 		 * In STA mode:
226 		 *	We get ucast stats as BSS peer stats.
227 		 *
228 		 * In AP mode:
229 		 *	We get mcast stats as BSS peer stats.
230 		 *	We get ucast stats as assoc peer stats.
231 		 */
232 		if (peer->vdev->opmode == wlan_op_mode_ap && peer->bss_peer) {
233 			peer->vdev->stats.tx.mcast_last_tx_rate = ratekbps;
234 			peer->vdev->stats.tx.mcast_last_tx_rate_mcs = ppdu->mcs;
235 		} else {
236 			peer->vdev->stats.tx.last_tx_rate = ratekbps;
237 			peer->vdev->stats.tx.last_tx_rate_mcs = ppdu->mcs;
238 		}
239 	}
240 }
241 
242 /*
243  * dp_tx_stats_update() - Update per-peer statistics
244  * @pdev: Datapath pdev handle
245  * @peer: Datapath peer handle
246  * @ppdu: PPDU Descriptor
247  * @ack_rssi: RSSI of last ack received
248  *
249  * Return: None
250  */
251 static void
252 dp_tx_stats_update(struct dp_pdev *pdev, struct dp_peer *peer,
253 		   struct cdp_tx_completion_ppdu_user *ppdu,
254 		   uint32_t ack_rssi)
255 {
256 	uint8_t preamble, mcs;
257 	uint16_t num_msdu;
258 	uint16_t num_mpdu;
259 	uint16_t mpdu_tried;
260 	uint16_t mpdu_failed;
261 
262 	preamble = ppdu->preamble;
263 	mcs = ppdu->mcs;
264 	num_msdu = ppdu->num_msdu;
265 	num_mpdu = ppdu->mpdu_success;
266 	mpdu_tried = ppdu->mpdu_tried_ucast + ppdu->mpdu_tried_mcast;
267 	mpdu_failed = mpdu_tried - num_mpdu;
268 
269 	/* If the peer statistics are already processed as part of
270 	 * per-MSDU completion handler, do not process these again in per-PPDU
271 	 * indications */
272 	if (pdev->soc->process_tx_status)
273 		return;
274 
275 	if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK) {
276 		/*
277 		 * All failed mpdu will be retried, so incrementing
278 		 * retries mpdu based on mpdu failed. Even for
279 		 * ack failure i.e for long retries we get
280 		 * mpdu failed equal mpdu tried.
281 		 */
282 		DP_STATS_INC(peer, tx.retries, mpdu_failed);
283 		DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus);
284 		return;
285 	}
286 
287 	if (ppdu->is_ppdu_cookie_valid)
288 		DP_STATS_INC(peer, tx.num_ppdu_cookie_valid, 1);
289 
290 	if (ppdu->mu_group_id <= MAX_MU_GROUP_ID &&
291 	    ppdu->ppdu_type != HTT_PPDU_STATS_PPDU_TYPE_SU) {
292 		if (unlikely(!(ppdu->mu_group_id & (MAX_MU_GROUP_ID - 1))))
293 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
294 				  "mu_group_id out of bound!!\n");
295 		else
296 			DP_STATS_UPD(peer, tx.mu_group_id[ppdu->mu_group_id],
297 				     (ppdu->user_pos + 1));
298 	}
299 
300 	if (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA ||
301 	    ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA) {
302 		DP_STATS_UPD(peer, tx.ru_tones, ppdu->ru_tones);
303 		DP_STATS_UPD(peer, tx.ru_start, ppdu->ru_start);
304 		switch (ppdu->ru_tones) {
305 		case RU_26:
306 			DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].num_msdu,
307 				     num_msdu);
308 			DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].num_mpdu,
309 				     num_mpdu);
310 			DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].mpdu_tried,
311 				     mpdu_tried);
312 		break;
313 		case RU_52:
314 			DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].num_msdu,
315 				     num_msdu);
316 			DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].num_mpdu,
317 				     num_mpdu);
318 			DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].mpdu_tried,
319 				     mpdu_tried);
320 		break;
321 		case RU_106:
322 			DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].num_msdu,
323 				     num_msdu);
324 			DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].num_mpdu,
325 				     num_mpdu);
326 			DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].mpdu_tried,
327 				     mpdu_tried);
328 		break;
329 		case RU_242:
330 			DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].num_msdu,
331 				     num_msdu);
332 			DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].num_mpdu,
333 				     num_mpdu);
334 			DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].mpdu_tried,
335 				     mpdu_tried);
336 		break;
337 		case RU_484:
338 			DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].num_msdu,
339 				     num_msdu);
340 			DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].num_mpdu,
341 				     num_mpdu);
342 			DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].mpdu_tried,
343 				     mpdu_tried);
344 		break;
345 		case RU_996:
346 			DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].num_msdu,
347 				     num_msdu);
348 			DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].num_mpdu,
349 				     num_mpdu);
350 			DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].mpdu_tried,
351 				     mpdu_tried);
352 		break;
353 		}
354 	}
355 
356 	/*
357 	 * All failed mpdu will be retried, so incrementing
358 	 * retries mpdu based on mpdu failed. Even for
359 	 * ack failure i.e for long retries we get
360 	 * mpdu failed equal mpdu tried.
361 	 */
362 	DP_STATS_INC(peer, tx.retries, mpdu_failed);
363 	DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus);
364 
365 	DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].num_msdu,
366 		     num_msdu);
367 	DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].num_mpdu,
368 		     num_mpdu);
369 	DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].mpdu_tried,
370 		     mpdu_tried);
371 
372 	DP_STATS_INC_PKT(peer, tx.comp_pkt,
373 			num_msdu, (ppdu->success_bytes +
374 				ppdu->retry_bytes + ppdu->failed_bytes));
375 	DP_STATS_UPD(peer, tx.tx_rate, ppdu->tx_rate);
376 	DP_STATS_INC(peer, tx.sgi_count[ppdu->gi], num_msdu);
377 	DP_STATS_INC(peer, tx.bw[ppdu->bw], num_msdu);
378 	DP_STATS_INC(peer, tx.nss[ppdu->nss], num_msdu);
379 	if (ppdu->tid < CDP_DATA_TID_MAX)
380 		DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)],
381 			     num_msdu);
382 	DP_STATS_INCC(peer, tx.stbc, num_msdu, ppdu->stbc);
383 	DP_STATS_INCC(peer, tx.ldpc, num_msdu, ppdu->ldpc);
384 	if (!(ppdu->is_mcast) && ppdu->ack_rssi_valid)
385 		DP_STATS_UPD(peer, tx.last_ack_rssi, ack_rssi);
386 
387 	DP_STATS_INCC(peer,
388 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
389 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
390 	DP_STATS_INCC(peer,
391 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
392 			((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
393 	DP_STATS_INCC(peer,
394 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
395 			((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
396 	DP_STATS_INCC(peer,
397 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
398 			((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B)));
399 	DP_STATS_INCC(peer,
400 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
401 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
402 	DP_STATS_INCC(peer,
403 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
404 			((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
405 	DP_STATS_INCC(peer,
406 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
407 			((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
408 	DP_STATS_INCC(peer,
409 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
410 			((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
411 	DP_STATS_INCC(peer,
412 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
413 			((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX)));
414 	DP_STATS_INCC(peer,
415 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
416 			((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX)));
417 	DP_STATS_INCC(peer, tx.ampdu_cnt, num_msdu, ppdu->is_ampdu);
418 	DP_STATS_INCC(peer, tx.non_ampdu_cnt, num_msdu, !(ppdu->is_ampdu));
419 
420 	dp_peer_stats_notify(pdev, peer);
421 
422 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
423 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
424 			     &peer->stats, ppdu->peer_id,
425 			     UPDATE_PEER_STATS, pdev->pdev_id);
426 #endif
427 }
428 #endif
429 
430 #ifdef WLAN_TX_PKT_CAPTURE_ENH
431 #include "dp_tx_capture.h"
432 #else
433 static inline void
434 dp_process_ppdu_stats_update_failed_bitmap(struct dp_pdev *pdev,
435 					   void *data,
436 					   uint32_t ppdu_id,
437 					   uint32_t size)
438 {
439 }
440 #endif
441 
442 /*
443  * htt_htc_pkt_alloc() - Allocate HTC packet buffer
444  * @htt_soc:	HTT SOC handle
445  *
446  * Return: Pointer to htc packet buffer
447  */
448 static struct dp_htt_htc_pkt *
449 htt_htc_pkt_alloc(struct htt_soc *soc)
450 {
451 	struct dp_htt_htc_pkt_union *pkt = NULL;
452 
453 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
454 	if (soc->htt_htc_pkt_freelist) {
455 		pkt = soc->htt_htc_pkt_freelist;
456 		soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next;
457 	}
458 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
459 
460 	if (!pkt)
461 		pkt = qdf_mem_malloc(sizeof(*pkt));
462 	return &pkt->u.pkt; /* not actually a dereference */
463 }
464 
465 /*
466  * htt_htc_pkt_free() - Free HTC packet buffer
467  * @htt_soc:	HTT SOC handle
468  */
469 static void
470 htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
471 {
472 	struct dp_htt_htc_pkt_union *u_pkt =
473 		(struct dp_htt_htc_pkt_union *)pkt;
474 
475 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
476 	u_pkt->u.next = soc->htt_htc_pkt_freelist;
477 	soc->htt_htc_pkt_freelist = u_pkt;
478 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
479 }
480 
481 /*
482  * htt_htc_pkt_pool_free() - Free HTC packet pool
483  * @htt_soc:	HTT SOC handle
484  */
485 static void
486 htt_htc_pkt_pool_free(struct htt_soc *soc)
487 {
488 	struct dp_htt_htc_pkt_union *pkt, *next;
489 	pkt = soc->htt_htc_pkt_freelist;
490 	while (pkt) {
491 		next = pkt->u.next;
492 		qdf_mem_free(pkt);
493 		pkt = next;
494 	}
495 	soc->htt_htc_pkt_freelist = NULL;
496 }
497 
498 /*
499  * htt_htc_misc_pkt_list_trim() - trim misc list
500  * @htt_soc: HTT SOC handle
501  * @level: max no. of pkts in list
502  */
503 static void
504 htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level)
505 {
506 	struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL;
507 	int i = 0;
508 	qdf_nbuf_t netbuf;
509 
510 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
511 	pkt = soc->htt_htc_pkt_misclist;
512 	while (pkt) {
513 		next = pkt->u.next;
514 		/* trim the out grown list*/
515 		if (++i > level) {
516 			netbuf =
517 				(qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
518 			qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
519 			qdf_nbuf_free(netbuf);
520 			qdf_mem_free(pkt);
521 			pkt = NULL;
522 			if (prev)
523 				prev->u.next = NULL;
524 		}
525 		prev = pkt;
526 		pkt = next;
527 	}
528 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
529 }
530 
531 /*
532  * htt_htc_misc_pkt_list_add() - Add pkt to misc list
533  * @htt_soc:	HTT SOC handle
534  * @dp_htt_htc_pkt: pkt to be added to list
535  */
536 static void
537 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
538 {
539 	struct dp_htt_htc_pkt_union *u_pkt =
540 				(struct dp_htt_htc_pkt_union *)pkt;
541 	int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc,
542 							pkt->htc_pkt.Endpoint)
543 				+ DP_HTT_HTC_PKT_MISCLIST_SIZE;
544 
545 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
546 	if (soc->htt_htc_pkt_misclist) {
547 		u_pkt->u.next = soc->htt_htc_pkt_misclist;
548 		soc->htt_htc_pkt_misclist = u_pkt;
549 	} else {
550 		soc->htt_htc_pkt_misclist = u_pkt;
551 	}
552 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
553 
554 	/* only ce pipe size + tx_queue_depth could possibly be in use
555 	 * free older packets in the misclist
556 	 */
557 	htt_htc_misc_pkt_list_trim(soc, misclist_trim_level);
558 }
559 
560 /**
561  * DP_HTT_SEND_HTC_PKT() - Send htt packet from host
562  * @soc : HTT SOC handle
563  * @pkt: pkt to be send
564  * @cmd : command to be recorded in dp htt logger
565  * @buf : Pointer to buffer needs to be recored for above cmd
566  *
567  * Return: None
568  */
569 static inline void DP_HTT_SEND_HTC_PKT(struct htt_soc *soc,
570 				       struct dp_htt_htc_pkt *pkt, uint8_t cmd,
571 				       uint8_t *buf)
572 {
573 	htt_command_record(soc->htt_logger_handle, cmd, buf);
574 	if (htc_send_pkt(soc->htc_soc, &pkt->htc_pkt) ==
575 	    QDF_STATUS_SUCCESS)
576 		htt_htc_misc_pkt_list_add(soc, pkt);
577 }
578 
579 /*
580  * htt_htc_misc_pkt_pool_free() - free pkts in misc list
581  * @htt_soc:	HTT SOC handle
582  */
583 static void
584 htt_htc_misc_pkt_pool_free(struct htt_soc *soc)
585 {
586 	struct dp_htt_htc_pkt_union *pkt, *next;
587 	qdf_nbuf_t netbuf;
588 
589 	pkt = soc->htt_htc_pkt_misclist;
590 
591 	while (pkt) {
592 		next = pkt->u.next;
593 		netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
594 		qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
595 
596 		soc->stats.htc_pkt_free++;
597 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
598 			 "%s: Pkt free count %d",
599 			 __func__, soc->stats.htc_pkt_free);
600 
601 		qdf_nbuf_free(netbuf);
602 		qdf_mem_free(pkt);
603 		pkt = next;
604 	}
605 	soc->htt_htc_pkt_misclist = NULL;
606 }
607 
608 /*
609  * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ
610  * @tgt_mac_addr:	Target MAC
611  * @buffer:		Output buffer
612  */
613 static u_int8_t *
614 htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer)
615 {
616 #ifdef BIG_ENDIAN_HOST
617 	/*
618 	 * The host endianness is opposite of the target endianness.
619 	 * To make u_int32_t elements come out correctly, the target->host
620 	 * upload has swizzled the bytes in each u_int32_t element of the
621 	 * message.
622 	 * For byte-array message fields like the MAC address, this
623 	 * upload swizzling puts the bytes in the wrong order, and needs
624 	 * to be undone.
625 	 */
626 	buffer[0] = tgt_mac_addr[3];
627 	buffer[1] = tgt_mac_addr[2];
628 	buffer[2] = tgt_mac_addr[1];
629 	buffer[3] = tgt_mac_addr[0];
630 	buffer[4] = tgt_mac_addr[7];
631 	buffer[5] = tgt_mac_addr[6];
632 	return buffer;
633 #else
634 	/*
635 	 * The host endianness matches the target endianness -
636 	 * we can use the mac addr directly from the message buffer.
637 	 */
638 	return tgt_mac_addr;
639 #endif
640 }
641 
642 /*
643  * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
644  * @soc:	SOC handle
645  * @status:	Completion status
646  * @netbuf:	HTT buffer
647  */
648 static void
649 dp_htt_h2t_send_complete_free_netbuf(
650 	void *soc, A_STATUS status, qdf_nbuf_t netbuf)
651 {
652 	qdf_nbuf_free(netbuf);
653 }
654 
655 /*
656  * dp_htt_h2t_send_complete() - H2T completion handler
657  * @context:	Opaque context (HTT SOC handle)
658  * @htc_pkt:	HTC packet
659  */
660 static void
661 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
662 {
663 	void (*send_complete_part2)(
664 		void *soc, QDF_STATUS status, qdf_nbuf_t msdu);
665 	struct htt_soc *soc =  (struct htt_soc *) context;
666 	struct dp_htt_htc_pkt *htt_pkt;
667 	qdf_nbuf_t netbuf;
668 
669 	send_complete_part2 = htc_pkt->pPktContext;
670 
671 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
672 
673 	/* process (free or keep) the netbuf that held the message */
674 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
675 	/*
676 	 * adf sendcomplete is required for windows only
677 	 */
678 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
679 	if (send_complete_part2) {
680 		send_complete_part2(
681 			htt_pkt->soc_ctxt, htc_pkt->Status, netbuf);
682 	}
683 	/* free the htt_htc_pkt / HTC_PACKET object */
684 	htt_htc_pkt_free(soc, htt_pkt);
685 }
686 
687 /*
688  * htt_h2t_ver_req_msg() - Send HTT version request message to target
689  * @htt_soc:	HTT SOC handle
690  *
691  * Return: 0 on success; error code on failure
692  */
693 static int htt_h2t_ver_req_msg(struct htt_soc *soc)
694 {
695 	struct dp_htt_htc_pkt *pkt;
696 	qdf_nbuf_t msg;
697 	uint32_t *msg_word;
698 
699 	msg = qdf_nbuf_alloc(
700 		soc->osdev,
701 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
702 		/* reserve room for the HTC header */
703 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
704 	if (!msg)
705 		return QDF_STATUS_E_NOMEM;
706 
707 	/*
708 	 * Set the length of the message.
709 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
710 	 * separately during the below call to qdf_nbuf_push_head.
711 	 * The contribution from the HTC header is added separately inside HTC.
712 	 */
713 	if (qdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES) == NULL) {
714 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
715 			"%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
716 			__func__);
717 		return QDF_STATUS_E_FAILURE;
718 	}
719 
720 	/* fill in the message contents */
721 	msg_word = (u_int32_t *) qdf_nbuf_data(msg);
722 
723 	/* rewind beyond alignment pad to get to the HTC header reserved area */
724 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
725 
726 	*msg_word = 0;
727 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
728 
729 	pkt = htt_htc_pkt_alloc(soc);
730 	if (!pkt) {
731 		qdf_nbuf_free(msg);
732 		return QDF_STATUS_E_FAILURE;
733 	}
734 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
735 
736 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
737 		dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg),
738 		qdf_nbuf_len(msg), soc->htc_endpoint,
739 		1); /* tag - not relevant here */
740 
741 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
742 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_VERSION_REQ, NULL);
743 	return 0;
744 }
745 
746 /*
747  * htt_srng_setup() - Send SRNG setup message to target
748  * @htt_soc:	HTT SOC handle
749  * @mac_id:	MAC Id
750  * @hal_srng:	Opaque HAL SRNG pointer
751  * @hal_ring_type:	SRNG ring type
752  *
753  * Return: 0 on success; error code on failure
754  */
755 int htt_srng_setup(struct htt_soc *soc, int mac_id,
756 		   hal_ring_handle_t hal_ring_hdl,
757 		   int hal_ring_type)
758 {
759 	struct dp_htt_htc_pkt *pkt;
760 	qdf_nbuf_t htt_msg;
761 	uint32_t *msg_word;
762 	struct hal_srng_params srng_params;
763 	qdf_dma_addr_t hp_addr, tp_addr;
764 	uint32_t ring_entry_size =
765 		hal_srng_get_entrysize(soc->hal_soc, hal_ring_type);
766 	int htt_ring_type, htt_ring_id;
767 	uint8_t *htt_logger_bufp;
768 	int target_pdev_id;
769 	int lmac_id = dp_get_lmac_id_for_pdev_id(soc->dp_soc, 0, mac_id);
770 
771 	/* Sizes should be set in 4-byte words */
772 	ring_entry_size = ring_entry_size >> 2;
773 
774 	htt_msg = qdf_nbuf_alloc(soc->osdev,
775 		HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ),
776 		/* reserve room for the HTC header */
777 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
778 	if (!htt_msg)
779 		goto fail0;
780 
781 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
782 	hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_ring_hdl);
783 	tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_ring_hdl);
784 
785 	switch (hal_ring_type) {
786 	case RXDMA_BUF:
787 #ifdef QCA_HOST2FW_RXBUF_RING
788 		if (srng_params.ring_id ==
789 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0)) {
790 			htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
791 			htt_ring_type = HTT_SW_TO_SW_RING;
792 #ifdef IPA_OFFLOAD
793 		} else if (srng_params.ring_id ==
794 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2)) {
795 			htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING;
796 			htt_ring_type = HTT_SW_TO_SW_RING;
797 #endif
798 #else
799 		if (srng_params.ring_id ==
800 			(HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
801 			(lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
802 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
803 			htt_ring_type = HTT_SW_TO_HW_RING;
804 #endif
805 		} else if (srng_params.ring_id ==
806 #ifdef IPA_OFFLOAD
807 			 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 +
808 #else
809 			 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF +
810 #endif
811 			(lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
812 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
813 			htt_ring_type = HTT_SW_TO_HW_RING;
814 		} else {
815 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
816 				   "%s: Ring %d currently not supported",
817 				   __func__, srng_params.ring_id);
818 			goto fail1;
819 		}
820 
821 		dp_info("ring_type %d ring_id %d htt_ring_id %d hp_addr 0x%llx tp_addr 0x%llx",
822 			hal_ring_type, srng_params.ring_id, htt_ring_id,
823 			(uint64_t)hp_addr,
824 			(uint64_t)tp_addr);
825 		break;
826 	case RXDMA_MONITOR_BUF:
827 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
828 		htt_ring_type = HTT_SW_TO_HW_RING;
829 		break;
830 	case RXDMA_MONITOR_STATUS:
831 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
832 		htt_ring_type = HTT_SW_TO_HW_RING;
833 		break;
834 	case RXDMA_MONITOR_DST:
835 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
836 		htt_ring_type = HTT_HW_TO_SW_RING;
837 		break;
838 	case RXDMA_MONITOR_DESC:
839 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
840 		htt_ring_type = HTT_SW_TO_HW_RING;
841 		break;
842 	case RXDMA_DST:
843 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
844 		htt_ring_type = HTT_HW_TO_SW_RING;
845 		break;
846 
847 	default:
848 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
849 			"%s: Ring currently not supported", __func__);
850 			goto fail1;
851 	}
852 
853 	/*
854 	 * Set the length of the message.
855 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
856 	 * separately during the below call to qdf_nbuf_push_head.
857 	 * The contribution from the HTC header is added separately inside HTC.
858 	 */
859 	if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) {
860 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
861 			"%s: Failed to expand head for SRING_SETUP msg",
862 			__func__);
863 		return QDF_STATUS_E_FAILURE;
864 	}
865 
866 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
867 
868 	/* rewind beyond alignment pad to get to the HTC header reserved area */
869 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
870 
871 	/* word 0 */
872 	*msg_word = 0;
873 	htt_logger_bufp = (uint8_t *)msg_word;
874 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP);
875 	target_pdev_id =
876 	dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, mac_id);
877 
878 	if ((htt_ring_type == HTT_SW_TO_HW_RING) ||
879 			(htt_ring_type == HTT_HW_TO_SW_RING))
880 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, target_pdev_id);
881 	else
882 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id);
883 
884 	dp_info("%s: mac_id %d", __func__, mac_id);
885 	HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type);
886 	/* TODO: Discuss with FW on changing this to unique ID and using
887 	 * htt_ring_type to send the type of ring
888 	 */
889 	HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id);
890 
891 	/* word 1 */
892 	msg_word++;
893 	*msg_word = 0;
894 	HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word,
895 		srng_params.ring_base_paddr & 0xffffffff);
896 
897 	/* word 2 */
898 	msg_word++;
899 	*msg_word = 0;
900 	HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word,
901 		(uint64_t)srng_params.ring_base_paddr >> 32);
902 
903 	/* word 3 */
904 	msg_word++;
905 	*msg_word = 0;
906 	HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size);
907 	HTT_SRING_SETUP_RING_SIZE_SET(*msg_word,
908 		(ring_entry_size * srng_params.num_entries));
909 	dp_info("%s: entry_size %d", __func__, ring_entry_size);
910 	dp_info("%s: num_entries %d", __func__, srng_params.num_entries);
911 	dp_info("%s: ring_size %d", __func__,
912 		(ring_entry_size * srng_params.num_entries));
913 	if (htt_ring_type == HTT_SW_TO_HW_RING)
914 		HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET(
915 						*msg_word, 1);
916 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word,
917 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
918 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word,
919 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
920 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word,
921 		!!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP));
922 
923 	/* word 4 */
924 	msg_word++;
925 	*msg_word = 0;
926 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
927 		hp_addr & 0xffffffff);
928 
929 	/* word 5 */
930 	msg_word++;
931 	*msg_word = 0;
932 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
933 		(uint64_t)hp_addr >> 32);
934 
935 	/* word 6 */
936 	msg_word++;
937 	*msg_word = 0;
938 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
939 		tp_addr & 0xffffffff);
940 
941 	/* word 7 */
942 	msg_word++;
943 	*msg_word = 0;
944 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
945 		(uint64_t)tp_addr >> 32);
946 
947 	/* word 8 */
948 	msg_word++;
949 	*msg_word = 0;
950 	HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word,
951 		srng_params.msi_addr & 0xffffffff);
952 
953 	/* word 9 */
954 	msg_word++;
955 	*msg_word = 0;
956 	HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word,
957 		(uint64_t)(srng_params.msi_addr) >> 32);
958 
959 	/* word 10 */
960 	msg_word++;
961 	*msg_word = 0;
962 	HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word,
963 		srng_params.msi_data);
964 
965 	/* word 11 */
966 	msg_word++;
967 	*msg_word = 0;
968 	HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word,
969 		srng_params.intr_batch_cntr_thres_entries *
970 		ring_entry_size);
971 	HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word,
972 		srng_params.intr_timer_thres_us >> 3);
973 
974 	/* word 12 */
975 	msg_word++;
976 	*msg_word = 0;
977 	if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
978 		/* TODO: Setting low threshold to 1/8th of ring size - see
979 		 * if this needs to be configurable
980 		 */
981 		HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word,
982 			srng_params.low_threshold);
983 	}
984 	/* "response_required" field should be set if a HTT response message is
985 	 * required after setting up the ring.
986 	 */
987 	pkt = htt_htc_pkt_alloc(soc);
988 	if (!pkt)
989 		goto fail1;
990 
991 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
992 
993 	SET_HTC_PACKET_INFO_TX(
994 		&pkt->htc_pkt,
995 		dp_htt_h2t_send_complete_free_netbuf,
996 		qdf_nbuf_data(htt_msg),
997 		qdf_nbuf_len(htt_msg),
998 		soc->htc_endpoint,
999 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
1000 
1001 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1002 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_SRING_SETUP,
1003 			    htt_logger_bufp);
1004 
1005 	return QDF_STATUS_SUCCESS;
1006 
1007 fail1:
1008 	qdf_nbuf_free(htt_msg);
1009 fail0:
1010 	return QDF_STATUS_E_FAILURE;
1011 }
1012 
1013 /*
1014  * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter
1015  * config message to target
1016  * @htt_soc:	HTT SOC handle
1017  * @pdev_id:	WIN- PDEV Id, MCL- mac id
1018  * @hal_srng:	Opaque HAL SRNG pointer
1019  * @hal_ring_type:	SRNG ring type
1020  * @ring_buf_size:	SRNG buffer size
1021  * @htt_tlv_filter:	Rx SRNG TLV and filter setting
1022  * Return: 0 on success; error code on failure
1023  */
1024 int htt_h2t_rx_ring_cfg(struct htt_soc *htt_soc, int pdev_id,
1025 			hal_ring_handle_t hal_ring_hdl,
1026 			int hal_ring_type, int ring_buf_size,
1027 			struct htt_rx_ring_tlv_filter *htt_tlv_filter)
1028 {
1029 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
1030 	struct dp_htt_htc_pkt *pkt;
1031 	qdf_nbuf_t htt_msg;
1032 	uint32_t *msg_word;
1033 	struct hal_srng_params srng_params;
1034 	uint32_t htt_ring_type, htt_ring_id;
1035 	uint32_t tlv_filter;
1036 	uint8_t *htt_logger_bufp;
1037 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->dp_soc->wlan_cfg_ctx;
1038 	uint32_t mon_drop_th = wlan_cfg_get_mon_drop_thresh(wlan_cfg_ctx);
1039 	int target_pdev_id;
1040 
1041 	htt_msg = qdf_nbuf_alloc(soc->osdev,
1042 		HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ),
1043 	/* reserve room for the HTC header */
1044 	HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
1045 	if (!htt_msg)
1046 		goto fail0;
1047 
1048 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
1049 
1050 	switch (hal_ring_type) {
1051 	case RXDMA_BUF:
1052 		htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
1053 		htt_ring_type = HTT_SW_TO_HW_RING;
1054 		break;
1055 	case RXDMA_MONITOR_BUF:
1056 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
1057 		htt_ring_type = HTT_SW_TO_HW_RING;
1058 		break;
1059 	case RXDMA_MONITOR_STATUS:
1060 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
1061 		htt_ring_type = HTT_SW_TO_HW_RING;
1062 		break;
1063 	case RXDMA_MONITOR_DST:
1064 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
1065 		htt_ring_type = HTT_HW_TO_SW_RING;
1066 		break;
1067 	case RXDMA_MONITOR_DESC:
1068 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
1069 		htt_ring_type = HTT_SW_TO_HW_RING;
1070 		break;
1071 	case RXDMA_DST:
1072 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
1073 		htt_ring_type = HTT_HW_TO_SW_RING;
1074 		break;
1075 
1076 	default:
1077 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1078 			"%s: Ring currently not supported", __func__);
1079 		goto fail1;
1080 	}
1081 
1082 	/*
1083 	 * Set the length of the message.
1084 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
1085 	 * separately during the below call to qdf_nbuf_push_head.
1086 	 * The contribution from the HTC header is added separately inside HTC.
1087 	 */
1088 	if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) {
1089 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1090 			"%s: Failed to expand head for RX Ring Cfg msg",
1091 			__func__);
1092 		goto fail1; /* failure */
1093 	}
1094 
1095 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
1096 
1097 	/* rewind beyond alignment pad to get to the HTC header reserved area */
1098 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
1099 
1100 	/* word 0 */
1101 	htt_logger_bufp = (uint8_t *)msg_word;
1102 	*msg_word = 0;
1103 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
1104 
1105 	/*
1106 	 * pdev_id is indexed from 0 whereas mac_id is indexed from 1
1107 	 * SW_TO_SW and SW_TO_HW rings are unaffected by this
1108 	 */
1109 	target_pdev_id =
1110 	dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, pdev_id);
1111 
1112 	if (htt_ring_type == HTT_SW_TO_SW_RING ||
1113 			htt_ring_type == HTT_SW_TO_HW_RING)
1114 		HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word,
1115 						      target_pdev_id);
1116 
1117 	/* TODO: Discuss with FW on changing this to unique ID and using
1118 	 * htt_ring_type to send the type of ring
1119 	 */
1120 	HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id);
1121 
1122 	HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word,
1123 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
1124 
1125 	HTT_RX_RING_SELECTION_CFG_PKT_TLV_SET(*msg_word,
1126 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
1127 
1128 	HTT_RX_RING_SELECTION_CFG_RX_OFFSETS_VALID_SET(*msg_word,
1129 						htt_tlv_filter->offset_valid);
1130 
1131 	if (mon_drop_th > 0)
1132 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1133 								   1);
1134 	else
1135 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1136 								   0);
1137 
1138 	/* word 1 */
1139 	msg_word++;
1140 	*msg_word = 0;
1141 	HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word,
1142 		ring_buf_size);
1143 
1144 	/* word 2 */
1145 	msg_word++;
1146 	*msg_word = 0;
1147 
1148 	if (htt_tlv_filter->enable_fp) {
1149 		/* TYPE: MGMT */
1150 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1151 			FP, MGMT, 0000,
1152 			(htt_tlv_filter->fp_mgmt_filter &
1153 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1154 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1155 			FP, MGMT, 0001,
1156 			(htt_tlv_filter->fp_mgmt_filter &
1157 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1158 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1159 			FP, MGMT, 0010,
1160 			(htt_tlv_filter->fp_mgmt_filter &
1161 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1162 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1163 			FP, MGMT, 0011,
1164 			(htt_tlv_filter->fp_mgmt_filter &
1165 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1166 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1167 			FP, MGMT, 0100,
1168 			(htt_tlv_filter->fp_mgmt_filter &
1169 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1170 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1171 			FP, MGMT, 0101,
1172 			(htt_tlv_filter->fp_mgmt_filter &
1173 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1174 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1175 			FP, MGMT, 0110,
1176 			(htt_tlv_filter->fp_mgmt_filter &
1177 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1178 		/* reserved */
1179 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
1180 			MGMT, 0111,
1181 			(htt_tlv_filter->fp_mgmt_filter &
1182 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1183 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1184 			FP, MGMT, 1000,
1185 			(htt_tlv_filter->fp_mgmt_filter &
1186 			FILTER_MGMT_BEACON) ? 1 : 0);
1187 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1188 			FP, MGMT, 1001,
1189 			(htt_tlv_filter->fp_mgmt_filter &
1190 			FILTER_MGMT_ATIM) ? 1 : 0);
1191 	}
1192 
1193 	if (htt_tlv_filter->enable_md) {
1194 			/* TYPE: MGMT */
1195 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1196 			MD, MGMT, 0000,
1197 			(htt_tlv_filter->md_mgmt_filter &
1198 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1199 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1200 			MD, MGMT, 0001,
1201 			(htt_tlv_filter->md_mgmt_filter &
1202 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1203 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1204 			MD, MGMT, 0010,
1205 			(htt_tlv_filter->md_mgmt_filter &
1206 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1207 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1208 			MD, MGMT, 0011,
1209 			(htt_tlv_filter->md_mgmt_filter &
1210 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1211 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1212 			MD, MGMT, 0100,
1213 			(htt_tlv_filter->md_mgmt_filter &
1214 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1215 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1216 			MD, MGMT, 0101,
1217 			(htt_tlv_filter->md_mgmt_filter &
1218 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1219 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1220 			MD, MGMT, 0110,
1221 			(htt_tlv_filter->md_mgmt_filter &
1222 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1223 		/* reserved */
1224 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
1225 			MGMT, 0111,
1226 			(htt_tlv_filter->md_mgmt_filter &
1227 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1228 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1229 			MD, MGMT, 1000,
1230 			(htt_tlv_filter->md_mgmt_filter &
1231 			FILTER_MGMT_BEACON) ? 1 : 0);
1232 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1233 			MD, MGMT, 1001,
1234 			(htt_tlv_filter->md_mgmt_filter &
1235 			FILTER_MGMT_ATIM) ? 1 : 0);
1236 	}
1237 
1238 	if (htt_tlv_filter->enable_mo) {
1239 		/* TYPE: MGMT */
1240 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1241 			MO, MGMT, 0000,
1242 			(htt_tlv_filter->mo_mgmt_filter &
1243 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1244 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1245 			MO, MGMT, 0001,
1246 			(htt_tlv_filter->mo_mgmt_filter &
1247 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1248 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1249 			MO, MGMT, 0010,
1250 			(htt_tlv_filter->mo_mgmt_filter &
1251 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1252 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1253 			MO, MGMT, 0011,
1254 			(htt_tlv_filter->mo_mgmt_filter &
1255 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1256 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1257 			MO, MGMT, 0100,
1258 			(htt_tlv_filter->mo_mgmt_filter &
1259 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1260 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1261 			MO, MGMT, 0101,
1262 			(htt_tlv_filter->mo_mgmt_filter &
1263 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1264 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1265 			MO, MGMT, 0110,
1266 			(htt_tlv_filter->mo_mgmt_filter &
1267 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1268 		/* reserved */
1269 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
1270 			MGMT, 0111,
1271 			(htt_tlv_filter->mo_mgmt_filter &
1272 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1273 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1274 			MO, MGMT, 1000,
1275 			(htt_tlv_filter->mo_mgmt_filter &
1276 			FILTER_MGMT_BEACON) ? 1 : 0);
1277 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1278 			MO, MGMT, 1001,
1279 			(htt_tlv_filter->mo_mgmt_filter &
1280 			FILTER_MGMT_ATIM) ? 1 : 0);
1281 	}
1282 
1283 	/* word 3 */
1284 	msg_word++;
1285 	*msg_word = 0;
1286 
1287 	if (htt_tlv_filter->enable_fp) {
1288 		/* TYPE: MGMT */
1289 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1290 			FP, MGMT, 1010,
1291 			(htt_tlv_filter->fp_mgmt_filter &
1292 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1293 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1294 			FP, MGMT, 1011,
1295 			(htt_tlv_filter->fp_mgmt_filter &
1296 			FILTER_MGMT_AUTH) ? 1 : 0);
1297 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1298 			FP, MGMT, 1100,
1299 			(htt_tlv_filter->fp_mgmt_filter &
1300 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1301 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1302 			FP, MGMT, 1101,
1303 			(htt_tlv_filter->fp_mgmt_filter &
1304 			FILTER_MGMT_ACTION) ? 1 : 0);
1305 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1306 			FP, MGMT, 1110,
1307 			(htt_tlv_filter->fp_mgmt_filter &
1308 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1309 		/* reserved*/
1310 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
1311 			MGMT, 1111,
1312 			(htt_tlv_filter->fp_mgmt_filter &
1313 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1314 	}
1315 
1316 	if (htt_tlv_filter->enable_md) {
1317 			/* TYPE: MGMT */
1318 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1319 			MD, MGMT, 1010,
1320 			(htt_tlv_filter->md_mgmt_filter &
1321 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1322 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1323 			MD, MGMT, 1011,
1324 			(htt_tlv_filter->md_mgmt_filter &
1325 			FILTER_MGMT_AUTH) ? 1 : 0);
1326 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1327 			MD, MGMT, 1100,
1328 			(htt_tlv_filter->md_mgmt_filter &
1329 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1330 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1331 			MD, MGMT, 1101,
1332 			(htt_tlv_filter->md_mgmt_filter &
1333 			FILTER_MGMT_ACTION) ? 1 : 0);
1334 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1335 			MD, MGMT, 1110,
1336 			(htt_tlv_filter->md_mgmt_filter &
1337 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1338 	}
1339 
1340 	if (htt_tlv_filter->enable_mo) {
1341 		/* TYPE: MGMT */
1342 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1343 			MO, MGMT, 1010,
1344 			(htt_tlv_filter->mo_mgmt_filter &
1345 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1346 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1347 			MO, MGMT, 1011,
1348 			(htt_tlv_filter->mo_mgmt_filter &
1349 			FILTER_MGMT_AUTH) ? 1 : 0);
1350 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1351 			MO, MGMT, 1100,
1352 			(htt_tlv_filter->mo_mgmt_filter &
1353 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1354 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1355 			MO, MGMT, 1101,
1356 			(htt_tlv_filter->mo_mgmt_filter &
1357 			FILTER_MGMT_ACTION) ? 1 : 0);
1358 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1359 			MO, MGMT, 1110,
1360 			(htt_tlv_filter->mo_mgmt_filter &
1361 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1362 		/* reserved*/
1363 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
1364 			MGMT, 1111,
1365 			(htt_tlv_filter->mo_mgmt_filter &
1366 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1367 	}
1368 
1369 	/* word 4 */
1370 	msg_word++;
1371 	*msg_word = 0;
1372 
1373 	if (htt_tlv_filter->enable_fp) {
1374 		/* TYPE: CTRL */
1375 		/* reserved */
1376 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1377 			CTRL, 0000,
1378 			(htt_tlv_filter->fp_ctrl_filter &
1379 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1380 		/* reserved */
1381 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1382 			CTRL, 0001,
1383 			(htt_tlv_filter->fp_ctrl_filter &
1384 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1385 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1386 			CTRL, 0010,
1387 			(htt_tlv_filter->fp_ctrl_filter &
1388 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1389 		/* reserved */
1390 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1391 			CTRL, 0011,
1392 			(htt_tlv_filter->fp_ctrl_filter &
1393 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1394 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1395 			CTRL, 0100,
1396 			(htt_tlv_filter->fp_ctrl_filter &
1397 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1398 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1399 			CTRL, 0101,
1400 			(htt_tlv_filter->fp_ctrl_filter &
1401 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1402 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1403 			CTRL, 0110,
1404 			(htt_tlv_filter->fp_ctrl_filter &
1405 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1406 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1407 			CTRL, 0111,
1408 			(htt_tlv_filter->fp_ctrl_filter &
1409 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1410 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1411 			CTRL, 1000,
1412 			(htt_tlv_filter->fp_ctrl_filter &
1413 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1414 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1415 			CTRL, 1001,
1416 			(htt_tlv_filter->fp_ctrl_filter &
1417 			FILTER_CTRL_BA) ? 1 : 0);
1418 	}
1419 
1420 	if (htt_tlv_filter->enable_md) {
1421 		/* TYPE: CTRL */
1422 		/* reserved */
1423 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1424 			CTRL, 0000,
1425 			(htt_tlv_filter->md_ctrl_filter &
1426 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1427 		/* reserved */
1428 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1429 			CTRL, 0001,
1430 			(htt_tlv_filter->md_ctrl_filter &
1431 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1432 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1433 			CTRL, 0010,
1434 			(htt_tlv_filter->md_ctrl_filter &
1435 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1436 		/* reserved */
1437 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1438 			CTRL, 0011,
1439 			(htt_tlv_filter->md_ctrl_filter &
1440 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1441 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1442 			CTRL, 0100,
1443 			(htt_tlv_filter->md_ctrl_filter &
1444 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1445 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1446 			CTRL, 0101,
1447 			(htt_tlv_filter->md_ctrl_filter &
1448 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1449 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1450 			CTRL, 0110,
1451 			(htt_tlv_filter->md_ctrl_filter &
1452 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1453 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1454 			CTRL, 0111,
1455 			(htt_tlv_filter->md_ctrl_filter &
1456 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1457 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1458 			CTRL, 1000,
1459 			(htt_tlv_filter->md_ctrl_filter &
1460 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1461 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1462 			CTRL, 1001,
1463 			(htt_tlv_filter->md_ctrl_filter &
1464 			FILTER_CTRL_BA) ? 1 : 0);
1465 	}
1466 
1467 	if (htt_tlv_filter->enable_mo) {
1468 		/* TYPE: CTRL */
1469 		/* reserved */
1470 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1471 			CTRL, 0000,
1472 			(htt_tlv_filter->mo_ctrl_filter &
1473 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1474 		/* reserved */
1475 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1476 			CTRL, 0001,
1477 			(htt_tlv_filter->mo_ctrl_filter &
1478 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1479 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1480 			CTRL, 0010,
1481 			(htt_tlv_filter->mo_ctrl_filter &
1482 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1483 		/* reserved */
1484 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1485 			CTRL, 0011,
1486 			(htt_tlv_filter->mo_ctrl_filter &
1487 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1488 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1489 			CTRL, 0100,
1490 			(htt_tlv_filter->mo_ctrl_filter &
1491 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1492 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1493 			CTRL, 0101,
1494 			(htt_tlv_filter->mo_ctrl_filter &
1495 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1496 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1497 			CTRL, 0110,
1498 			(htt_tlv_filter->mo_ctrl_filter &
1499 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1500 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1501 			CTRL, 0111,
1502 			(htt_tlv_filter->mo_ctrl_filter &
1503 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1504 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1505 			CTRL, 1000,
1506 			(htt_tlv_filter->mo_ctrl_filter &
1507 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1508 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1509 			CTRL, 1001,
1510 			(htt_tlv_filter->mo_ctrl_filter &
1511 			FILTER_CTRL_BA) ? 1 : 0);
1512 	}
1513 
1514 	/* word 5 */
1515 	msg_word++;
1516 	*msg_word = 0;
1517 	if (htt_tlv_filter->enable_fp) {
1518 		/* TYPE: CTRL */
1519 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1520 			CTRL, 1010,
1521 			(htt_tlv_filter->fp_ctrl_filter &
1522 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1523 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1524 			CTRL, 1011,
1525 			(htt_tlv_filter->fp_ctrl_filter &
1526 			FILTER_CTRL_RTS) ? 1 : 0);
1527 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1528 			CTRL, 1100,
1529 			(htt_tlv_filter->fp_ctrl_filter &
1530 			FILTER_CTRL_CTS) ? 1 : 0);
1531 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1532 			CTRL, 1101,
1533 			(htt_tlv_filter->fp_ctrl_filter &
1534 			FILTER_CTRL_ACK) ? 1 : 0);
1535 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1536 			CTRL, 1110,
1537 			(htt_tlv_filter->fp_ctrl_filter &
1538 			FILTER_CTRL_CFEND) ? 1 : 0);
1539 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1540 			CTRL, 1111,
1541 			(htt_tlv_filter->fp_ctrl_filter &
1542 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1543 		/* TYPE: DATA */
1544 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1545 			DATA, MCAST,
1546 			(htt_tlv_filter->fp_data_filter &
1547 			FILTER_DATA_MCAST) ? 1 : 0);
1548 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1549 			DATA, UCAST,
1550 			(htt_tlv_filter->fp_data_filter &
1551 			FILTER_DATA_UCAST) ? 1 : 0);
1552 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1553 			DATA, NULL,
1554 			(htt_tlv_filter->fp_data_filter &
1555 			FILTER_DATA_NULL) ? 1 : 0);
1556 	}
1557 
1558 	if (htt_tlv_filter->enable_md) {
1559 		/* TYPE: CTRL */
1560 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1561 			CTRL, 1010,
1562 			(htt_tlv_filter->md_ctrl_filter &
1563 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1564 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1565 			CTRL, 1011,
1566 			(htt_tlv_filter->md_ctrl_filter &
1567 			FILTER_CTRL_RTS) ? 1 : 0);
1568 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1569 			CTRL, 1100,
1570 			(htt_tlv_filter->md_ctrl_filter &
1571 			FILTER_CTRL_CTS) ? 1 : 0);
1572 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1573 			CTRL, 1101,
1574 			(htt_tlv_filter->md_ctrl_filter &
1575 			FILTER_CTRL_ACK) ? 1 : 0);
1576 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1577 			CTRL, 1110,
1578 			(htt_tlv_filter->md_ctrl_filter &
1579 			FILTER_CTRL_CFEND) ? 1 : 0);
1580 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1581 			CTRL, 1111,
1582 			(htt_tlv_filter->md_ctrl_filter &
1583 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1584 		/* TYPE: DATA */
1585 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1586 			DATA, MCAST,
1587 			(htt_tlv_filter->md_data_filter &
1588 			FILTER_DATA_MCAST) ? 1 : 0);
1589 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1590 			DATA, UCAST,
1591 			(htt_tlv_filter->md_data_filter &
1592 			FILTER_DATA_UCAST) ? 1 : 0);
1593 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1594 			DATA, NULL,
1595 			(htt_tlv_filter->md_data_filter &
1596 			FILTER_DATA_NULL) ? 1 : 0);
1597 	}
1598 
1599 	if (htt_tlv_filter->enable_mo) {
1600 		/* TYPE: CTRL */
1601 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1602 			CTRL, 1010,
1603 			(htt_tlv_filter->mo_ctrl_filter &
1604 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1605 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1606 			CTRL, 1011,
1607 			(htt_tlv_filter->mo_ctrl_filter &
1608 			FILTER_CTRL_RTS) ? 1 : 0);
1609 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1610 			CTRL, 1100,
1611 			(htt_tlv_filter->mo_ctrl_filter &
1612 			FILTER_CTRL_CTS) ? 1 : 0);
1613 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1614 			CTRL, 1101,
1615 			(htt_tlv_filter->mo_ctrl_filter &
1616 			FILTER_CTRL_ACK) ? 1 : 0);
1617 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1618 			CTRL, 1110,
1619 			(htt_tlv_filter->mo_ctrl_filter &
1620 			FILTER_CTRL_CFEND) ? 1 : 0);
1621 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1622 			CTRL, 1111,
1623 			(htt_tlv_filter->mo_ctrl_filter &
1624 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1625 		/* TYPE: DATA */
1626 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1627 			DATA, MCAST,
1628 			(htt_tlv_filter->mo_data_filter &
1629 			FILTER_DATA_MCAST) ? 1 : 0);
1630 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1631 			DATA, UCAST,
1632 			(htt_tlv_filter->mo_data_filter &
1633 			FILTER_DATA_UCAST) ? 1 : 0);
1634 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1635 			DATA, NULL,
1636 			(htt_tlv_filter->mo_data_filter &
1637 			FILTER_DATA_NULL) ? 1 : 0);
1638 	}
1639 
1640 	/* word 6 */
1641 	msg_word++;
1642 	*msg_word = 0;
1643 	tlv_filter = 0;
1644 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START,
1645 		htt_tlv_filter->mpdu_start);
1646 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START,
1647 		htt_tlv_filter->msdu_start);
1648 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET,
1649 		htt_tlv_filter->packet);
1650 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END,
1651 		htt_tlv_filter->msdu_end);
1652 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END,
1653 		htt_tlv_filter->mpdu_end);
1654 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER,
1655 		htt_tlv_filter->packet_header);
1656 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION,
1657 		htt_tlv_filter->attention);
1658 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START,
1659 		htt_tlv_filter->ppdu_start);
1660 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END,
1661 		htt_tlv_filter->ppdu_end);
1662 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS,
1663 		htt_tlv_filter->ppdu_end_user_stats);
1664 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter,
1665 		PPDU_END_USER_STATS_EXT,
1666 		htt_tlv_filter->ppdu_end_user_stats_ext);
1667 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE,
1668 		htt_tlv_filter->ppdu_end_status_done);
1669 	/* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/
1670 	 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED,
1671 		 htt_tlv_filter->header_per_msdu);
1672 
1673 	HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter);
1674 
1675 	msg_word++;
1676 	*msg_word = 0;
1677 	if (htt_tlv_filter->offset_valid) {
1678 		HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET_SET(*msg_word,
1679 					htt_tlv_filter->rx_packet_offset);
1680 		HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET_SET(*msg_word,
1681 					htt_tlv_filter->rx_header_offset);
1682 
1683 		msg_word++;
1684 		*msg_word = 0;
1685 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET_SET(*msg_word,
1686 					htt_tlv_filter->rx_mpdu_end_offset);
1687 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET_SET(*msg_word,
1688 					htt_tlv_filter->rx_mpdu_start_offset);
1689 
1690 		msg_word++;
1691 		*msg_word = 0;
1692 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET_SET(*msg_word,
1693 					htt_tlv_filter->rx_msdu_end_offset);
1694 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET_SET(*msg_word,
1695 					htt_tlv_filter->rx_msdu_start_offset);
1696 
1697 		msg_word++;
1698 		*msg_word = 0;
1699 		HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET_SET(*msg_word,
1700 					htt_tlv_filter->rx_attn_offset);
1701 		msg_word++;
1702 		*msg_word = 0;
1703 	} else {
1704 		msg_word += 4;
1705 		*msg_word = 0;
1706 	}
1707 
1708 	if (mon_drop_th > 0)
1709 		HTT_RX_RING_SELECTION_CFG_RX_DROP_THRESHOLD_SET(*msg_word,
1710 								mon_drop_th);
1711 
1712 	/* "response_required" field should be set if a HTT response message is
1713 	 * required after setting up the ring.
1714 	 */
1715 	pkt = htt_htc_pkt_alloc(soc);
1716 	if (!pkt)
1717 		goto fail1;
1718 
1719 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1720 
1721 	SET_HTC_PACKET_INFO_TX(
1722 		&pkt->htc_pkt,
1723 		dp_htt_h2t_send_complete_free_netbuf,
1724 		qdf_nbuf_data(htt_msg),
1725 		qdf_nbuf_len(htt_msg),
1726 		soc->htc_endpoint,
1727 		1); /* tag - not relevant here */
1728 
1729 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1730 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG,
1731 			    htt_logger_bufp);
1732 	return QDF_STATUS_SUCCESS;
1733 
1734 fail1:
1735 	qdf_nbuf_free(htt_msg);
1736 fail0:
1737 	return QDF_STATUS_E_FAILURE;
1738 }
1739 
1740 #if defined(HTT_STATS_ENABLE)
1741 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1742 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1743 
1744 {
1745 	uint32_t pdev_id;
1746 	uint32_t *msg_word = NULL;
1747 	uint32_t msg_remain_len = 0;
1748 
1749 	msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1750 
1751 	/*COOKIE MSB*/
1752 	pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1753 
1754 	/* stats message length + 16 size of HTT header*/
1755 	msg_remain_len = qdf_min(htt_stats->msg_len + 16,
1756 				(uint32_t)DP_EXT_MSG_LENGTH);
1757 
1758 	dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc,
1759 			msg_word,  msg_remain_len,
1760 			WDI_NO_VAL, pdev_id);
1761 
1762 	if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1763 		htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1764 	}
1765 	/* Need to be freed here as WDI handler will
1766 	 * make a copy of pkt to send data to application
1767 	 */
1768 	qdf_nbuf_free(htt_msg);
1769 	return QDF_STATUS_SUCCESS;
1770 }
1771 #else
1772 static inline QDF_STATUS
1773 dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1774 		      struct dp_soc *soc, qdf_nbuf_t htt_msg)
1775 {
1776 	return QDF_STATUS_E_NOSUPPORT;
1777 }
1778 #endif
1779 /**
1780  * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats
1781  * @htt_stats: htt stats info
1782  *
1783  * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message
1784  * contains sub messages which are identified by a TLV header.
1785  * In this function we will process the stream of T2H messages and read all the
1786  * TLV contained in the message.
1787  *
1788  * THe following cases have been taken care of
1789  * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer
1790  *		In this case the buffer will contain multiple tlvs.
1791  * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer.
1792  *		Only one tlv will be contained in the HTT message and this tag
1793  *		will extend onto the next buffer.
1794  * Case 3: When the buffer is the continuation of the previous message
1795  * Case 4: tlv length is 0. which will indicate the end of message
1796  *
1797  * return: void
1798  */
1799 static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats,
1800 					struct dp_soc *soc)
1801 {
1802 	htt_tlv_tag_t tlv_type = 0xff;
1803 	qdf_nbuf_t htt_msg = NULL;
1804 	uint32_t *msg_word;
1805 	uint8_t *tlv_buf_head = NULL;
1806 	uint8_t *tlv_buf_tail = NULL;
1807 	uint32_t msg_remain_len = 0;
1808 	uint32_t tlv_remain_len = 0;
1809 	uint32_t *tlv_start;
1810 	int cookie_val;
1811 	int cookie_msb;
1812 	int pdev_id;
1813 	bool copy_stats = false;
1814 	struct dp_pdev *pdev;
1815 
1816 	/* Process node in the HTT message queue */
1817 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1818 		!= NULL) {
1819 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1820 		cookie_val = *(msg_word + 1);
1821 		htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET(
1822 					*(msg_word +
1823 					HTT_T2H_EXT_STATS_TLV_START_OFFSET));
1824 
1825 		if (cookie_val) {
1826 			if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg)
1827 					== QDF_STATUS_SUCCESS) {
1828 				continue;
1829 			}
1830 		}
1831 
1832 		cookie_msb = *(msg_word + 2);
1833 		pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1834 		pdev = soc->pdev_list[pdev_id];
1835 
1836 		if (cookie_msb >> 2) {
1837 			copy_stats = true;
1838 		}
1839 
1840 		/* read 5th word */
1841 		msg_word = msg_word + 4;
1842 		msg_remain_len = qdf_min(htt_stats->msg_len,
1843 				(uint32_t) DP_EXT_MSG_LENGTH);
1844 		/* Keep processing the node till node length is 0 */
1845 		while (msg_remain_len) {
1846 			/*
1847 			 * if message is not a continuation of previous message
1848 			 * read the tlv type and tlv length
1849 			 */
1850 			if (!tlv_buf_head) {
1851 				tlv_type = HTT_STATS_TLV_TAG_GET(
1852 						*msg_word);
1853 				tlv_remain_len = HTT_STATS_TLV_LENGTH_GET(
1854 						*msg_word);
1855 			}
1856 
1857 			if (tlv_remain_len == 0) {
1858 				msg_remain_len = 0;
1859 
1860 				if (tlv_buf_head) {
1861 					qdf_mem_free(tlv_buf_head);
1862 					tlv_buf_head = NULL;
1863 					tlv_buf_tail = NULL;
1864 				}
1865 
1866 				goto error;
1867 			}
1868 
1869 			if (!tlv_buf_head)
1870 				tlv_remain_len += HTT_TLV_HDR_LEN;
1871 
1872 			if ((tlv_remain_len <= msg_remain_len)) {
1873 				/* Case 3 */
1874 				if (tlv_buf_head) {
1875 					qdf_mem_copy(tlv_buf_tail,
1876 							(uint8_t *)msg_word,
1877 							tlv_remain_len);
1878 					tlv_start = (uint32_t *)tlv_buf_head;
1879 				} else {
1880 					/* Case 1 */
1881 					tlv_start = msg_word;
1882 				}
1883 
1884 				if (copy_stats)
1885 					dp_htt_stats_copy_tag(pdev,
1886 							      tlv_type,
1887 							      tlv_start);
1888 				else
1889 					dp_htt_stats_print_tag(pdev,
1890 							       tlv_type,
1891 							       tlv_start);
1892 
1893 				if (tlv_type == HTT_STATS_PEER_DETAILS_TAG ||
1894 				    tlv_type == HTT_STATS_PEER_STATS_CMN_TAG)
1895 					dp_peer_update_inactive_time(pdev,
1896 								     tlv_type,
1897 								     tlv_start);
1898 
1899 				msg_remain_len -= tlv_remain_len;
1900 
1901 				msg_word = (uint32_t *)
1902 					(((uint8_t *)msg_word) +
1903 					tlv_remain_len);
1904 
1905 				tlv_remain_len = 0;
1906 
1907 				if (tlv_buf_head) {
1908 					qdf_mem_free(tlv_buf_head);
1909 					tlv_buf_head = NULL;
1910 					tlv_buf_tail = NULL;
1911 				}
1912 
1913 			} else { /* tlv_remain_len > msg_remain_len */
1914 				/* Case 2 & 3 */
1915 				if (!tlv_buf_head) {
1916 					tlv_buf_head = qdf_mem_malloc(
1917 							tlv_remain_len);
1918 
1919 					if (!tlv_buf_head) {
1920 						QDF_TRACE(QDF_MODULE_ID_TXRX,
1921 								QDF_TRACE_LEVEL_ERROR,
1922 								"Alloc failed");
1923 						goto error;
1924 					}
1925 
1926 					tlv_buf_tail = tlv_buf_head;
1927 				}
1928 
1929 				qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word,
1930 						msg_remain_len);
1931 				tlv_remain_len -= msg_remain_len;
1932 				tlv_buf_tail += msg_remain_len;
1933 			}
1934 		}
1935 
1936 		if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1937 			htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1938 		}
1939 
1940 		qdf_nbuf_free(htt_msg);
1941 	}
1942 	return;
1943 
1944 error:
1945 	qdf_nbuf_free(htt_msg);
1946 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1947 			!= NULL)
1948 		qdf_nbuf_free(htt_msg);
1949 }
1950 
1951 void htt_t2h_stats_handler(void *context)
1952 {
1953 	struct dp_soc *soc = (struct dp_soc *)context;
1954 	struct htt_stats_context htt_stats;
1955 	uint32_t *msg_word;
1956 	qdf_nbuf_t htt_msg = NULL;
1957 	uint8_t done;
1958 	uint32_t rem_stats;
1959 
1960 	if (!soc || !qdf_atomic_read(&soc->cmn_init_done)) {
1961 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1962 			"soc: 0x%pK, init_done: %d", soc,
1963 			qdf_atomic_read(&soc->cmn_init_done));
1964 		return;
1965 	}
1966 
1967 	qdf_mem_zero(&htt_stats, sizeof(htt_stats));
1968 	qdf_nbuf_queue_init(&htt_stats.msg);
1969 
1970 	/* pull one completed stats from soc->htt_stats_msg and process */
1971 	qdf_spin_lock_bh(&soc->htt_stats.lock);
1972 	if (!soc->htt_stats.num_stats) {
1973 		qdf_spin_unlock_bh(&soc->htt_stats.lock);
1974 		return;
1975 	}
1976 	while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) {
1977 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1978 		msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET;
1979 		done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
1980 		qdf_nbuf_queue_add(&htt_stats.msg, htt_msg);
1981 		/*
1982 		 * Done bit signifies that this is the last T2H buffer in the
1983 		 * stream of HTT EXT STATS message
1984 		 */
1985 		if (done)
1986 			break;
1987 	}
1988 	rem_stats = --soc->htt_stats.num_stats;
1989 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
1990 
1991 	/* If there are more stats to process, schedule stats work again.
1992 	 * Scheduling prior to processing ht_stats to queue with early
1993 	 * index
1994 	 */
1995 	if (rem_stats)
1996 		qdf_sched_work(0, &soc->htt_stats.work);
1997 
1998 	dp_process_htt_stat_msg(&htt_stats, soc);
1999 }
2000 
2001 /*
2002  * dp_get_ppdu_info_user_index: Find and allocate a per-user descriptor for a PPDU,
2003  * if a new peer id arrives in a PPDU
2004  * pdev: DP pdev handle
2005  * @peer_id : peer unique identifier
2006  * @ppdu_info: per ppdu tlv structure
2007  *
2008  * return:user index to be populated
2009  */
2010 #ifdef FEATURE_PERPKT_INFO
2011 static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev,
2012 						uint16_t peer_id,
2013 						struct ppdu_info *ppdu_info)
2014 {
2015 	uint8_t user_index = 0;
2016 	struct cdp_tx_completion_ppdu *ppdu_desc;
2017 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2018 
2019 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2020 
2021 	while ((user_index + 1) <= ppdu_info->last_user) {
2022 		ppdu_user_desc = &ppdu_desc->user[user_index];
2023 		if (ppdu_user_desc->peer_id != peer_id) {
2024 			user_index++;
2025 			continue;
2026 		} else {
2027 			/* Max users possible is 8 so user array index should
2028 			 * not exceed 7
2029 			 */
2030 			qdf_assert_always(user_index <= CDP_MU_MAX_USER_INDEX);
2031 			return user_index;
2032 		}
2033 	}
2034 
2035 	ppdu_info->last_user++;
2036 	/* Max users possible is 8 so last user should not exceed 8 */
2037 	qdf_assert_always(ppdu_info->last_user <= CDP_MU_MAX_USERS);
2038 	return ppdu_info->last_user - 1;
2039 }
2040 
2041 /*
2042  * dp_process_ppdu_stats_common_tlv: Process htt_ppdu_stats_common_tlv
2043  * pdev: DP pdev handle
2044  * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv
2045  * @ppdu_info: per ppdu tlv structure
2046  *
2047  * return:void
2048  */
2049 static void dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev,
2050 		uint32_t *tag_buf, struct ppdu_info *ppdu_info)
2051 {
2052 	uint16_t frame_type;
2053 	uint16_t frame_ctrl;
2054 	uint16_t freq;
2055 	struct dp_soc *soc = NULL;
2056 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
2057 	uint64_t ppdu_start_timestamp;
2058 	uint32_t *start_tag_buf;
2059 
2060 	start_tag_buf = tag_buf;
2061 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2062 
2063 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RING_ID_SCH_CMD_ID);
2064 	ppdu_info->sched_cmdid =
2065 		HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_GET(*tag_buf);
2066 	ppdu_desc->num_users =
2067 		HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf);
2068 
2069 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(QTYPE_FRM_TYPE);
2070 	frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf);
2071 	ppdu_desc->htt_frame_type = frame_type;
2072 
2073 	frame_ctrl = ppdu_desc->frame_ctrl;
2074 
2075 	switch (frame_type) {
2076 	case HTT_STATS_FTYPE_TIDQ_DATA_SU:
2077 	case HTT_STATS_FTYPE_TIDQ_DATA_MU:
2078 	case HTT_STATS_FTYPE_SGEN_QOS_NULL:
2079 		/*
2080 		 * for management packet, frame type come as DATA_SU
2081 		 * need to check frame_ctrl before setting frame_type
2082 		 */
2083 		if (HTT_GET_FRAME_CTRL_TYPE(frame_ctrl) <= FRAME_CTRL_TYPE_CTRL)
2084 			ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
2085 		else
2086 			ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA;
2087 	break;
2088 	case HTT_STATS_FTYPE_SGEN_MU_BAR:
2089 	case HTT_STATS_FTYPE_SGEN_BAR:
2090 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_BAR;
2091 		ppdu_desc->bar_ppdu_id = ppdu_info->ppdu_id;
2092 	break;
2093 	default:
2094 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
2095 	break;
2096 	}
2097 
2098 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(FES_DUR_US);
2099 	ppdu_desc->tx_duration = *tag_buf;
2100 
2101 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_L32_US);
2102 	ppdu_desc->ppdu_start_timestamp = *tag_buf;
2103 
2104 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(CHAN_MHZ_PHY_MODE);
2105 	freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf);
2106 	if (freq != ppdu_desc->channel) {
2107 		soc = pdev->soc;
2108 		ppdu_desc->channel = freq;
2109 		if (soc && soc->cdp_soc.ol_ops->freq_to_channel)
2110 			pdev->operating_channel =
2111 		soc->cdp_soc.ol_ops->freq_to_channel(soc->ctrl_psoc,
2112 						     pdev->pdev_id, freq);
2113 	}
2114 
2115 	ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf);
2116 
2117 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RESV_NUM_UL_BEAM);
2118 	ppdu_desc->beam_change =
2119 		HTT_PPDU_STATS_COMMON_TLV_BEAM_CHANGE_GET(*tag_buf);
2120 
2121 	dp_tx_capture_htt_frame_counter(pdev, frame_type);
2122 
2123 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_U32_US);
2124 	ppdu_start_timestamp = *tag_buf;
2125 	ppdu_desc->ppdu_start_timestamp |= ((ppdu_start_timestamp <<
2126 					     HTT_SHIFT_UPPER_TIMESTAMP) &
2127 					    HTT_MASK_UPPER_TIMESTAMP);
2128 
2129 	ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
2130 					ppdu_desc->tx_duration;
2131 	/* Ack time stamp is same as end time stamp*/
2132 	ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
2133 
2134 	ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
2135 					ppdu_desc->tx_duration;
2136 
2137 	ppdu_desc->bar_ppdu_start_timestamp = ppdu_desc->ppdu_start_timestamp;
2138 	ppdu_desc->bar_ppdu_end_timestamp = ppdu_desc->ppdu_end_timestamp;
2139 	ppdu_desc->bar_tx_duration = ppdu_desc->tx_duration;
2140 
2141 	/* Ack time stamp is same as end time stamp*/
2142 	ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
2143 }
2144 
2145 /*
2146  * dp_process_ppdu_stats_user_common_tlv: Process ppdu_stats_user_common
2147  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv
2148  * @ppdu_info: per ppdu tlv structure
2149  *
2150  * return:void
2151  */
2152 static void dp_process_ppdu_stats_user_common_tlv(
2153 		struct dp_pdev *pdev, uint32_t *tag_buf,
2154 		struct ppdu_info *ppdu_info)
2155 {
2156 	uint16_t peer_id;
2157 	struct cdp_tx_completion_ppdu *ppdu_desc;
2158 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2159 	uint8_t curr_user_index = 0;
2160 	struct dp_peer *peer;
2161 	struct dp_vdev *vdev;
2162 
2163 	ppdu_desc =
2164 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2165 
2166 	tag_buf++;
2167 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
2168 
2169 	curr_user_index =
2170 		dp_get_ppdu_info_user_index(pdev,
2171 					    peer_id, ppdu_info);
2172 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2173 
2174 	if (peer_id == DP_SCAN_PEER_ID) {
2175 		ppdu_desc->vdev_id =
2176 			HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(*tag_buf);
2177 		vdev =
2178 		       dp_get_vdev_from_soc_vdev_id_wifi3(pdev->soc,
2179 							  ppdu_desc->vdev_id);
2180 		if (!vdev)
2181 			return;
2182 		qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw,
2183 			     QDF_MAC_ADDR_SIZE);
2184 	} else {
2185 		peer = dp_peer_find_by_id(pdev->soc, peer_id);
2186 		if (!peer)
2187 			return;
2188 		qdf_mem_copy(ppdu_user_desc->mac_addr,
2189 			     peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
2190 		dp_peer_unref_del_find_by_id(peer);
2191 	}
2192 
2193 	ppdu_user_desc->peer_id = peer_id;
2194 
2195 	tag_buf++;
2196 
2197 	if (HTT_PPDU_STATS_USER_COMMON_TLV_DELAYED_BA_GET(*tag_buf)) {
2198 		ppdu_user_desc->delayed_ba = 1;
2199 		ppdu_desc->delayed_ba = 1;
2200 	}
2201 
2202 	if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) {
2203 		ppdu_user_desc->is_mcast = true;
2204 		ppdu_user_desc->mpdu_tried_mcast =
2205 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
2206 		ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast;
2207 	} else {
2208 		ppdu_user_desc->mpdu_tried_ucast =
2209 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
2210 	}
2211 
2212 	tag_buf++;
2213 
2214 	ppdu_user_desc->qos_ctrl =
2215 		HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf);
2216 	ppdu_user_desc->frame_ctrl =
2217 		HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf);
2218 	ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl;
2219 
2220 	if (ppdu_user_desc->delayed_ba)
2221 		ppdu_user_desc->mpdu_success = 0;
2222 
2223 	tag_buf += 3;
2224 
2225 	if (HTT_PPDU_STATS_IS_OPAQUE_VALID_GET(*tag_buf)) {
2226 		ppdu_user_desc->ppdu_cookie =
2227 			HTT_PPDU_STATS_HOST_OPAQUE_COOKIE_GET(*tag_buf);
2228 		ppdu_user_desc->is_ppdu_cookie_valid = 1;
2229 	}
2230 }
2231 
2232 
2233 /**
2234  * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv
2235  * @pdev: DP pdev handle
2236  * @tag_buf: T2H message buffer carrying the user rate TLV
2237  * @ppdu_info: per ppdu tlv structure
2238  *
2239  * return:void
2240  */
2241 static void dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev,
2242 		uint32_t *tag_buf,
2243 		struct ppdu_info *ppdu_info)
2244 {
2245 	uint16_t peer_id;
2246 	struct dp_peer *peer;
2247 	struct cdp_tx_completion_ppdu *ppdu_desc;
2248 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2249 	uint8_t curr_user_index = 0;
2250 	struct dp_vdev *vdev;
2251 
2252 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2253 
2254 	tag_buf++;
2255 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
2256 
2257 	curr_user_index =
2258 		dp_get_ppdu_info_user_index(pdev,
2259 					    peer_id, ppdu_info);
2260 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2261 	if (peer_id == DP_SCAN_PEER_ID) {
2262 		vdev =
2263 		       dp_get_vdev_from_soc_vdev_id_wifi3(pdev->soc,
2264 							  ppdu_desc->vdev_id);
2265 		if (!vdev)
2266 			return;
2267 	} else {
2268 		peer = dp_peer_find_by_id(pdev->soc, peer_id);
2269 		if (!peer)
2270 			return;
2271 		dp_peer_unref_del_find_by_id(peer);
2272 	}
2273 
2274 	ppdu_user_desc->peer_id = peer_id;
2275 
2276 	ppdu_user_desc->tid =
2277 		HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf);
2278 
2279 	tag_buf += 1;
2280 
2281 	ppdu_user_desc->user_pos =
2282 		HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_GET(*tag_buf);
2283 	ppdu_user_desc->mu_group_id =
2284 		HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_GET(*tag_buf);
2285 
2286 	tag_buf += 1;
2287 
2288 	ppdu_user_desc->ru_start =
2289 		HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf);
2290 	ppdu_user_desc->ru_tones =
2291 		(HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) -
2292 		HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1;
2293 
2294 	tag_buf += 2;
2295 
2296 	ppdu_user_desc->ppdu_type =
2297 		HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf);
2298 
2299 	tag_buf++;
2300 	ppdu_user_desc->tx_rate = *tag_buf;
2301 
2302 	ppdu_user_desc->ltf_size =
2303 		HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf);
2304 	ppdu_user_desc->stbc =
2305 		HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf);
2306 	ppdu_user_desc->he_re =
2307 		HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf);
2308 	ppdu_user_desc->txbf =
2309 		HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf);
2310 	ppdu_user_desc->bw =
2311 		HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf) - 2;
2312 	ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf);
2313 	ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf);
2314 	ppdu_user_desc->preamble =
2315 		HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf);
2316 	ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf);
2317 	ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf);
2318 	ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf);
2319 }
2320 
2321 /*
2322  * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv: Process
2323  * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
2324  * pdev: DP PDEV handle
2325  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
2326  * @ppdu_info: per ppdu tlv structure
2327  *
2328  * return:void
2329  */
2330 static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
2331 		struct dp_pdev *pdev, uint32_t *tag_buf,
2332 		struct ppdu_info *ppdu_info)
2333 {
2334 	htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf =
2335 		(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf;
2336 
2337 	struct cdp_tx_completion_ppdu *ppdu_desc;
2338 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2339 	uint8_t curr_user_index = 0;
2340 	uint16_t peer_id;
2341 	uint32_t size = CDP_BA_64_BIT_MAP_SIZE_DWORDS;
2342 
2343 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2344 
2345 	tag_buf++;
2346 
2347 	peer_id =
2348 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2349 
2350 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2351 		return;
2352 
2353 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2354 
2355 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2356 	ppdu_user_desc->peer_id = peer_id;
2357 
2358 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
2359 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
2360 		     sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
2361 
2362 	dp_process_ppdu_stats_update_failed_bitmap(pdev,
2363 						   (void *)ppdu_user_desc,
2364 						   ppdu_info->ppdu_id,
2365 						   size);
2366 }
2367 
2368 /*
2369  * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv: Process
2370  * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
2371  * soc: DP SOC handle
2372  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
2373  * @ppdu_info: per ppdu tlv structure
2374  *
2375  * return:void
2376  */
2377 static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
2378 		struct dp_pdev *pdev, uint32_t *tag_buf,
2379 		struct ppdu_info *ppdu_info)
2380 {
2381 	htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf =
2382 		(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf;
2383 
2384 	struct cdp_tx_completion_ppdu *ppdu_desc;
2385 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2386 	uint8_t curr_user_index = 0;
2387 	uint16_t peer_id;
2388 	uint32_t size = CDP_BA_256_BIT_MAP_SIZE_DWORDS;
2389 
2390 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2391 
2392 	tag_buf++;
2393 
2394 	peer_id =
2395 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2396 
2397 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2398 		return;
2399 
2400 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2401 
2402 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2403 	ppdu_user_desc->peer_id = peer_id;
2404 
2405 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
2406 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
2407 		     sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2408 
2409 	dp_process_ppdu_stats_update_failed_bitmap(pdev,
2410 						   (void *)ppdu_user_desc,
2411 						   ppdu_info->ppdu_id,
2412 						   size);
2413 }
2414 
2415 /*
2416  * dp_process_ppdu_stats_user_cmpltn_common_tlv: Process
2417  * htt_ppdu_stats_user_cmpltn_common_tlv
2418  * soc: DP SOC handle
2419  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv
2420  * @ppdu_info: per ppdu tlv structure
2421  *
2422  * return:void
2423  */
2424 static void dp_process_ppdu_stats_user_cmpltn_common_tlv(
2425 		struct dp_pdev *pdev, uint32_t *tag_buf,
2426 		struct ppdu_info *ppdu_info)
2427 {
2428 	uint16_t peer_id;
2429 	struct cdp_tx_completion_ppdu *ppdu_desc;
2430 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2431 	uint8_t curr_user_index = 0;
2432 	uint8_t bw_iter;
2433 	htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf =
2434 		(htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf;
2435 
2436 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2437 
2438 	tag_buf++;
2439 	peer_id =
2440 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf);
2441 
2442 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2443 		return;
2444 
2445 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2446 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2447 	ppdu_user_desc->peer_id = peer_id;
2448 	ppdu_desc->last_usr_index = curr_user_index;
2449 
2450 	ppdu_user_desc->completion_status =
2451 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET(
2452 				*tag_buf);
2453 
2454 	ppdu_user_desc->tid =
2455 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf);
2456 
2457 
2458 	tag_buf++;
2459 	if (qdf_likely(ppdu_user_desc->completion_status ==
2460 			HTT_PPDU_STATS_USER_STATUS_OK)) {
2461 		ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi;
2462 		ppdu_user_desc->ack_rssi_valid = 1;
2463 	} else {
2464 		ppdu_user_desc->ack_rssi_valid = 0;
2465 	}
2466 
2467 	tag_buf++;
2468 
2469 	ppdu_user_desc->mpdu_success =
2470 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf);
2471 
2472 	ppdu_user_desc->mpdu_failed =
2473 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_GET(*tag_buf) -
2474 						ppdu_user_desc->mpdu_success;
2475 
2476 	tag_buf++;
2477 
2478 	ppdu_user_desc->long_retries =
2479 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf);
2480 
2481 	ppdu_user_desc->short_retries =
2482 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf);
2483 	ppdu_user_desc->retry_msdus =
2484 		ppdu_user_desc->long_retries + ppdu_user_desc->short_retries;
2485 
2486 	ppdu_user_desc->is_ampdu =
2487 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf);
2488 	ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu;
2489 
2490 	ppdu_desc->resp_type =
2491 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_GET(*tag_buf);
2492 	ppdu_desc->mprot_type =
2493 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPROT_TYPE_GET(*tag_buf);
2494 	ppdu_desc->rts_success =
2495 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_SUCCESS_GET(*tag_buf);
2496 	ppdu_desc->rts_failure =
2497 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_FAILURE_GET(*tag_buf);
2498 
2499 	/*
2500 	 * increase successful mpdu counter from
2501 	 * htt_ppdu_stats_user_cmpltn_common_tlv
2502 	 */
2503 	ppdu_info->mpdu_compltn_common_tlv += ppdu_user_desc->mpdu_success;
2504 
2505 	/*
2506 	 * MU BAR may send request to n users but we may received ack only from
2507 	 * m users. To have count of number of users respond back, we have a
2508 	 * separate counter bar_num_users per PPDU that get increment for every
2509 	 * htt_ppdu_stats_user_cmpltn_common_tlv
2510 	 */
2511 	ppdu_desc->bar_num_users++;
2512 
2513 	tag_buf++;
2514 	for (bw_iter = 0; bw_iter < CDP_RSSI_CHAIN_LEN; bw_iter++) {
2515 		ppdu_user_desc->rssi_chain[bw_iter] =
2516 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CHAIN_RSSI_GET(*tag_buf);
2517 		tag_buf++;
2518 	}
2519 
2520 	ppdu_user_desc->sa_tx_antenna =
2521 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TX_ANTENNA_MASK_GET(*tag_buf);
2522 
2523 	tag_buf++;
2524 	ppdu_user_desc->sa_is_training =
2525 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_TRAINING_GET(*tag_buf);
2526 	if (ppdu_user_desc->sa_is_training) {
2527 		ppdu_user_desc->sa_goodput =
2528 			HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PENDING_TRAINING_PKTS_GET(*tag_buf);
2529 	}
2530 
2531 	tag_buf++;
2532 	for (bw_iter = 0; bw_iter < CDP_NUM_SA_BW; bw_iter++) {
2533 		ppdu_user_desc->sa_max_rates[bw_iter] =
2534 			HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MAX_RATES_GET(tag_buf[bw_iter]);
2535 	}
2536 
2537 	tag_buf += CDP_NUM_SA_BW;
2538 	ppdu_user_desc->current_rate_per =
2539 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CURRENT_RATE_PER_GET(*tag_buf);
2540 }
2541 
2542 /*
2543  * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv: Process
2544  * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2545  * pdev: DP PDEV handle
2546  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2547  * @ppdu_info: per ppdu tlv structure
2548  *
2549  * return:void
2550  */
2551 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
2552 		struct dp_pdev *pdev, uint32_t *tag_buf,
2553 		struct ppdu_info *ppdu_info)
2554 {
2555 	htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf =
2556 		(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf;
2557 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2558 	struct cdp_tx_completion_ppdu *ppdu_desc;
2559 	uint8_t curr_user_index = 0;
2560 	uint16_t peer_id;
2561 
2562 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2563 
2564 	tag_buf++;
2565 
2566 	peer_id =
2567 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2568 
2569 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2570 		return;
2571 
2572 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2573 
2574 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2575 	ppdu_user_desc->peer_id = peer_id;
2576 
2577 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2578 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2579 		     sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
2580 	ppdu_user_desc->ba_size = CDP_BA_64_BIT_MAP_SIZE_DWORDS * 32;
2581 }
2582 
2583 /*
2584  * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv: Process
2585  * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2586  * pdev: DP PDEV handle
2587  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2588  * @ppdu_info: per ppdu tlv structure
2589  *
2590  * return:void
2591  */
2592 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
2593 		struct dp_pdev *pdev, uint32_t *tag_buf,
2594 		struct ppdu_info *ppdu_info)
2595 {
2596 	htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf =
2597 		(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf;
2598 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2599 	struct cdp_tx_completion_ppdu *ppdu_desc;
2600 	uint8_t curr_user_index = 0;
2601 	uint16_t peer_id;
2602 
2603 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2604 
2605 	tag_buf++;
2606 
2607 	peer_id =
2608 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2609 
2610 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2611 		return;
2612 
2613 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2614 
2615 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2616 	ppdu_user_desc->peer_id = peer_id;
2617 
2618 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2619 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2620 		     sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2621 	ppdu_user_desc->ba_size = CDP_BA_256_BIT_MAP_SIZE_DWORDS * 32;
2622 }
2623 
2624 /*
2625  * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv: Process
2626  * htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2627  * pdev: DP PDE handle
2628  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2629  * @ppdu_info: per ppdu tlv structure
2630  *
2631  * return:void
2632  */
2633 static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
2634 		struct dp_pdev *pdev, uint32_t *tag_buf,
2635 		struct ppdu_info *ppdu_info)
2636 {
2637 	uint16_t peer_id;
2638 	struct cdp_tx_completion_ppdu *ppdu_desc;
2639 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2640 	uint8_t curr_user_index = 0;
2641 
2642 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2643 
2644 	tag_buf += 2;
2645 	peer_id =
2646 	HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf);
2647 
2648 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2649 		return;
2650 
2651 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2652 
2653 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2654 	ppdu_user_desc->peer_id = peer_id;
2655 
2656 	tag_buf++;
2657 	/* not to update ppdu_desc->tid from this TLV */
2658 	ppdu_user_desc->num_mpdu =
2659 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf);
2660 
2661 	ppdu_user_desc->num_msdu =
2662 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf);
2663 
2664 	ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu;
2665 
2666 	tag_buf++;
2667 	ppdu_user_desc->start_seq =
2668 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_GET(
2669 			*tag_buf);
2670 
2671 	tag_buf++;
2672 	ppdu_user_desc->success_bytes = *tag_buf;
2673 
2674 	/* increase successful mpdu counter */
2675 	ppdu_info->mpdu_ack_ba_tlv += ppdu_user_desc->num_mpdu;
2676 }
2677 
2678 /*
2679  * dp_process_ppdu_stats_user_common_array_tlv: Process
2680  * htt_ppdu_stats_user_common_array_tlv
2681  * pdev: DP PDEV handle
2682  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2683  * @ppdu_info: per ppdu tlv structure
2684  *
2685  * return:void
2686  */
2687 static void dp_process_ppdu_stats_user_common_array_tlv(
2688 		struct dp_pdev *pdev, uint32_t *tag_buf,
2689 		struct ppdu_info *ppdu_info)
2690 {
2691 	uint32_t peer_id;
2692 	struct cdp_tx_completion_ppdu *ppdu_desc;
2693 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2694 	uint8_t curr_user_index = 0;
2695 	struct htt_tx_ppdu_stats_info *dp_stats_buf;
2696 
2697 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2698 
2699 	tag_buf++;
2700 	dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf;
2701 	tag_buf += 3;
2702 	peer_id =
2703 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf);
2704 
2705 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) {
2706 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2707 			"Invalid peer");
2708 		return;
2709 	}
2710 
2711 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2712 
2713 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2714 
2715 	ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes;
2716 	ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes;
2717 
2718 	tag_buf++;
2719 
2720 	ppdu_user_desc->success_msdus =
2721 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf);
2722 	ppdu_user_desc->retry_bytes =
2723 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf);
2724 	tag_buf++;
2725 	ppdu_user_desc->failed_msdus =
2726 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf);
2727 }
2728 
2729 /*
2730  * dp_process_ppdu_stats_flush_tlv: Process
2731  * htt_ppdu_stats_flush_tlv
2732  * @pdev: DP PDEV handle
2733  * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv
2734  * @ppdu_info: per ppdu tlv structure
2735  *
2736  * return:void
2737  */
2738 static void
2739 dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev,
2740 					     uint32_t *tag_buf,
2741 					     struct ppdu_info *ppdu_info)
2742 {
2743 	struct cdp_tx_completion_ppdu *ppdu_desc;
2744 	uint32_t peer_id;
2745 	uint8_t tid;
2746 	struct dp_peer *peer;
2747 
2748 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
2749 				qdf_nbuf_data(ppdu_info->nbuf);
2750 	ppdu_desc->is_flush = 1;
2751 
2752 	tag_buf++;
2753 	ppdu_desc->drop_reason = *tag_buf;
2754 
2755 	tag_buf++;
2756 	ppdu_desc->num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf);
2757 	ppdu_desc->num_mpdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_GET(*tag_buf);
2758 	ppdu_desc->flow_type = HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_GET(*tag_buf);
2759 
2760 	tag_buf++;
2761 	peer_id = HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf);
2762 	tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf);
2763 
2764 	ppdu_desc->user[0].peer_id = peer_id;
2765 	ppdu_desc->user[0].tid = tid;
2766 
2767 	ppdu_desc->queue_type =
2768 			HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_GET(*tag_buf);
2769 
2770 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2771 	if (!peer)
2772 		return;
2773 
2774 	if (ppdu_desc->drop_reason == HTT_FLUSH_EXCESS_RETRIES) {
2775 		DP_STATS_INC(peer,
2776 			     tx.excess_retries_per_ac[TID_TO_WME_AC(tid)],
2777 			     ppdu_desc->num_msdu);
2778 	}
2779 
2780 	dp_peer_unref_del_find_by_id(peer);
2781 }
2782 
2783 #ifndef WLAN_TX_PKT_CAPTURE_ENH
2784 /*
2785  * dp_deliver_mgmt_frm: Process
2786  * @pdev: DP PDEV handle
2787  * @nbuf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2788  *
2789  * return: void
2790  */
2791 void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
2792 {
2793 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2794 		dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc,
2795 				     nbuf, HTT_INVALID_PEER,
2796 				     WDI_NO_VAL, pdev->pdev_id);
2797 	}
2798 }
2799 #endif
2800 
2801 /*
2802  * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv: Process
2803  * htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2804  * @pdev: DP PDEV handle
2805  * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2806  * @length: tlv_length
2807  *
2808  * return:QDF_STATUS_SUCCESS if nbuf as to be freed in caller
2809  */
2810 static QDF_STATUS
2811 dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev,
2812 					      qdf_nbuf_t tag_buf,
2813 					      uint32_t ppdu_id)
2814 {
2815 	uint32_t *nbuf_ptr;
2816 	uint8_t trim_size;
2817 	size_t head_size;
2818 	struct cdp_tx_mgmt_comp_info *ptr_mgmt_comp_info;
2819 	uint32_t *msg_word;
2820 	uint32_t tsf_hdr;
2821 
2822 	if ((!pdev->tx_sniffer_enable) && (!pdev->mcopy_mode) &&
2823 	    (!pdev->bpr_enable) && (!pdev->tx_capture_enabled))
2824 		return QDF_STATUS_SUCCESS;
2825 
2826 	/*
2827 	 * get timestamp from htt_t2h_ppdu_stats_ind_hdr_t
2828 	 */
2829 	msg_word = (uint32_t *)qdf_nbuf_data(tag_buf);
2830 	msg_word = msg_word + 2;
2831 	tsf_hdr = *msg_word;
2832 
2833 	trim_size = ((pdev->mgmtctrl_frm_info.mgmt_buf +
2834 		      HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) -
2835 		      qdf_nbuf_data(tag_buf));
2836 
2837 	if (!qdf_nbuf_pull_head(tag_buf, trim_size))
2838 		return QDF_STATUS_SUCCESS;
2839 
2840 	qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) -
2841 			    pdev->mgmtctrl_frm_info.mgmt_buf_len);
2842 
2843 	if (pdev->tx_capture_enabled) {
2844 		head_size = sizeof(struct cdp_tx_mgmt_comp_info);
2845 		if (qdf_unlikely(qdf_nbuf_headroom(tag_buf) < head_size)) {
2846 			qdf_err("Fail to get headroom h_sz %d h_avail %d\n",
2847 				head_size, qdf_nbuf_headroom(tag_buf));
2848 			qdf_assert_always(0);
2849 			return QDF_STATUS_E_NOMEM;
2850 		}
2851 		ptr_mgmt_comp_info = (struct cdp_tx_mgmt_comp_info *)
2852 					qdf_nbuf_push_head(tag_buf, head_size);
2853 		qdf_assert_always(ptr_mgmt_comp_info);
2854 		ptr_mgmt_comp_info->ppdu_id = ppdu_id;
2855 		ptr_mgmt_comp_info->is_sgen_pkt = true;
2856 		ptr_mgmt_comp_info->tx_tsf = tsf_hdr;
2857 	} else {
2858 		head_size = sizeof(ppdu_id);
2859 		nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(tag_buf, head_size);
2860 		*nbuf_ptr = ppdu_id;
2861 	}
2862 
2863 	if (pdev->bpr_enable) {
2864 		dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc,
2865 				     tag_buf, HTT_INVALID_PEER,
2866 				     WDI_NO_VAL, pdev->pdev_id);
2867 	}
2868 
2869 	dp_deliver_mgmt_frm(pdev, tag_buf);
2870 
2871 	return QDF_STATUS_E_ALREADY;
2872 }
2873 
2874 /**
2875  * dp_validate_fix_ppdu_tlv(): Function to validate the length of PPDU
2876  *
2877  * If the TLV length sent as part of PPDU TLV is less that expected size i.e
2878  * size of corresponding data structure, pad the remaining bytes with zeros
2879  * and continue processing the TLVs
2880  *
2881  * @pdev: DP pdev handle
2882  * @tag_buf: TLV buffer
2883  * @tlv_expected_size: Expected size of Tag
2884  * @tlv_len: TLV length received from FW
2885  *
2886  * Return: Pointer to updated TLV
2887  */
2888 static inline uint32_t *dp_validate_fix_ppdu_tlv(struct dp_pdev *pdev,
2889 						 uint32_t *tag_buf,
2890 						 uint16_t tlv_expected_size,
2891 						 uint16_t tlv_len)
2892 {
2893 	uint32_t *tlv_desc = tag_buf;
2894 
2895 	qdf_assert_always(tlv_len != 0);
2896 
2897 	if (tlv_len < tlv_expected_size) {
2898 		qdf_mem_zero(pdev->ppdu_tlv_buf, tlv_expected_size);
2899 		qdf_mem_copy(pdev->ppdu_tlv_buf, tag_buf, tlv_len);
2900 		tlv_desc = pdev->ppdu_tlv_buf;
2901 	}
2902 
2903 	return tlv_desc;
2904 }
2905 
2906 /**
2907  * dp_process_ppdu_tag(): Function to process the PPDU TLVs
2908  * @pdev: DP pdev handle
2909  * @tag_buf: TLV buffer
2910  * @tlv_len: length of tlv
2911  * @ppdu_info: per ppdu tlv structure
2912  *
2913  * return: void
2914  */
2915 static void dp_process_ppdu_tag(struct dp_pdev *pdev, uint32_t *tag_buf,
2916 		uint32_t tlv_len, struct ppdu_info *ppdu_info)
2917 {
2918 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2919 	uint16_t tlv_expected_size;
2920 	uint32_t *tlv_desc;
2921 
2922 	switch (tlv_type) {
2923 	case HTT_PPDU_STATS_COMMON_TLV:
2924 		tlv_expected_size = sizeof(htt_ppdu_stats_common_tlv);
2925 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2926 						    tlv_expected_size, tlv_len);
2927 		dp_process_ppdu_stats_common_tlv(pdev, tlv_desc, ppdu_info);
2928 		break;
2929 	case HTT_PPDU_STATS_USR_COMMON_TLV:
2930 		tlv_expected_size = sizeof(htt_ppdu_stats_user_common_tlv);
2931 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2932 						    tlv_expected_size, tlv_len);
2933 		dp_process_ppdu_stats_user_common_tlv(pdev, tlv_desc,
2934 						      ppdu_info);
2935 		break;
2936 	case HTT_PPDU_STATS_USR_RATE_TLV:
2937 		tlv_expected_size = sizeof(htt_ppdu_stats_user_rate_tlv);
2938 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2939 						    tlv_expected_size, tlv_len);
2940 		dp_process_ppdu_stats_user_rate_tlv(pdev, tlv_desc,
2941 						    ppdu_info);
2942 		break;
2943 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV:
2944 		tlv_expected_size =
2945 			sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv);
2946 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2947 						    tlv_expected_size, tlv_len);
2948 		dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
2949 				pdev, tlv_desc, ppdu_info);
2950 		break;
2951 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV:
2952 		tlv_expected_size =
2953 			sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv);
2954 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2955 						    tlv_expected_size, tlv_len);
2956 		dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
2957 				pdev, tlv_desc, ppdu_info);
2958 		break;
2959 	case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV:
2960 		tlv_expected_size =
2961 			sizeof(htt_ppdu_stats_user_cmpltn_common_tlv);
2962 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2963 						    tlv_expected_size, tlv_len);
2964 		dp_process_ppdu_stats_user_cmpltn_common_tlv(
2965 				pdev, tlv_desc, ppdu_info);
2966 		break;
2967 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV:
2968 		tlv_expected_size =
2969 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv);
2970 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2971 						    tlv_expected_size, tlv_len);
2972 		dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
2973 				pdev, tlv_desc, ppdu_info);
2974 		break;
2975 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV:
2976 		tlv_expected_size =
2977 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv);
2978 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2979 						    tlv_expected_size, tlv_len);
2980 		dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
2981 				pdev, tlv_desc, ppdu_info);
2982 		break;
2983 	case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV:
2984 		tlv_expected_size =
2985 			sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv);
2986 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2987 						    tlv_expected_size, tlv_len);
2988 		dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
2989 				pdev, tlv_desc, ppdu_info);
2990 		break;
2991 	case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV:
2992 		tlv_expected_size =
2993 			sizeof(htt_ppdu_stats_usr_common_array_tlv_v);
2994 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2995 						    tlv_expected_size, tlv_len);
2996 		dp_process_ppdu_stats_user_common_array_tlv(
2997 				pdev, tlv_desc, ppdu_info);
2998 		break;
2999 	case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV:
3000 		tlv_expected_size = sizeof(htt_ppdu_stats_flush_tlv);
3001 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3002 						    tlv_expected_size, tlv_len);
3003 		dp_process_ppdu_stats_user_compltn_flush_tlv(pdev, tlv_desc,
3004 							     ppdu_info);
3005 		break;
3006 	default:
3007 		break;
3008 	}
3009 }
3010 
3011 /**
3012  * dp_ppdu_desc_user_stats_update(): Function to update TX user stats
3013  * @pdev: DP pdev handle
3014  * @ppdu_info: per PPDU TLV descriptor
3015  *
3016  * return: void
3017  */
3018 void
3019 dp_ppdu_desc_user_stats_update(struct dp_pdev *pdev,
3020 			       struct ppdu_info *ppdu_info)
3021 {
3022 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3023 	struct dp_peer *peer = NULL;
3024 	uint32_t tlv_bitmap_expected;
3025 	uint32_t tlv_bitmap_default;
3026 	uint16_t i;
3027 	uint32_t num_users;
3028 
3029 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
3030 		qdf_nbuf_data(ppdu_info->nbuf);
3031 
3032 	ppdu_desc->num_users = ppdu_info->last_user;
3033 	ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
3034 
3035 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
3036 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode ||
3037 	    pdev->tx_capture_enabled) {
3038 		if (ppdu_info->is_ampdu)
3039 			tlv_bitmap_expected =
3040 				dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
3041 					ppdu_info->tlv_bitmap);
3042 	}
3043 
3044 	tlv_bitmap_default = tlv_bitmap_expected;
3045 
3046 	if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
3047 		num_users = ppdu_desc->bar_num_users;
3048 		ppdu_desc->num_users = ppdu_desc->bar_num_users;
3049 	} else {
3050 		num_users = ppdu_desc->num_users;
3051 	}
3052 
3053 	for (i = 0; i < num_users; i++) {
3054 		ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu;
3055 		ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu;
3056 
3057 		peer = dp_peer_find_by_id(pdev->soc,
3058 					  ppdu_desc->user[i].peer_id);
3059 		/**
3060 		 * This check is to make sure peer is not deleted
3061 		 * after processing the TLVs.
3062 		 */
3063 		if (!peer)
3064 			continue;
3065 
3066 		ppdu_desc->user[i].cookie = (void *)peer->wlanstats_ctx;
3067 
3068 		/*
3069 		 * different frame like DATA, BAR or CTRL has different
3070 		 * tlv bitmap expected. Apart from ACK_BA_STATUS TLV, we
3071 		 * receive other tlv in-order/sequential from fw.
3072 		 * Since ACK_BA_STATUS TLV come from Hardware it is
3073 		 * asynchronous So we need to depend on some tlv to confirm
3074 		 * all tlv is received for a ppdu.
3075 		 * So we depend on both HTT_PPDU_STATS_COMMON_TLV and
3076 		 * ACK_BA_STATUS_TLV. for failure packet we won't get
3077 		 * ACK_BA_STATUS_TLV.
3078 		 */
3079 		if (!(ppdu_info->tlv_bitmap &
3080 		      (1 << HTT_PPDU_STATS_COMMON_TLV)) ||
3081 		    (!(ppdu_info->tlv_bitmap &
3082 		       (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)) &&
3083 		     (ppdu_desc->user[i].completion_status ==
3084 		      HTT_PPDU_STATS_USER_STATUS_OK))) {
3085 			dp_peer_unref_del_find_by_id(peer);
3086 			continue;
3087 		}
3088 
3089 		/**
3090 		 * Update tx stats for data frames having Qos as well as
3091 		 * non-Qos data tid
3092 		 */
3093 
3094 		if ((ppdu_desc->user[i].tid < CDP_DATA_TID_MAX ||
3095 		     (ppdu_desc->user[i].tid == CDP_DATA_NON_QOS_TID) ||
3096 		     (ppdu_desc->htt_frame_type ==
3097 		      HTT_STATS_FTYPE_SGEN_QOS_NULL)) &&
3098 		      (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL)) {
3099 
3100 			dp_tx_stats_update(pdev, peer,
3101 					   &ppdu_desc->user[i],
3102 					   ppdu_desc->ack_rssi);
3103 			dp_tx_rate_stats_update(peer, &ppdu_desc->user[i]);
3104 		}
3105 
3106 		dp_peer_unref_del_find_by_id(peer);
3107 		tlv_bitmap_expected = tlv_bitmap_default;
3108 	}
3109 }
3110 
3111 #ifndef WLAN_TX_PKT_CAPTURE_ENH
3112 
3113 /**
3114  * dp_ppdu_desc_deliver(): Function to deliver Tx PPDU status descriptor
3115  * to upper layer
3116  * @pdev: DP pdev handle
3117  * @ppdu_info: per PPDU TLV descriptor
3118  *
3119  * return: void
3120  */
3121 static
3122 void dp_ppdu_desc_deliver(struct dp_pdev *pdev,
3123 			  struct ppdu_info *ppdu_info)
3124 {
3125 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3126 	qdf_nbuf_t nbuf;
3127 
3128 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
3129 		qdf_nbuf_data(ppdu_info->nbuf);
3130 
3131 	dp_ppdu_desc_user_stats_update(pdev, ppdu_info);
3132 
3133 	/*
3134 	 * Remove from the list
3135 	 */
3136 	TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
3137 	nbuf = ppdu_info->nbuf;
3138 	pdev->list_depth--;
3139 	qdf_mem_free(ppdu_info);
3140 
3141 	qdf_assert_always(nbuf);
3142 
3143 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
3144 		qdf_nbuf_data(nbuf);
3145 
3146 	/**
3147 	 * Deliver PPDU stats only for valid (acked) data frames if
3148 	 * sniffer mode is not enabled.
3149 	 * If sniffer mode is enabled, PPDU stats for all frames
3150 	 * including mgmt/control frames should be delivered to upper layer
3151 	 */
3152 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
3153 		dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC, pdev->soc,
3154 				nbuf, HTT_INVALID_PEER,
3155 				WDI_NO_VAL, pdev->pdev_id);
3156 	} else {
3157 		if (ppdu_desc->num_mpdu != 0 && ppdu_desc->num_users != 0 &&
3158 				ppdu_desc->frame_ctrl & HTT_FRAMECTRL_DATATYPE) {
3159 
3160 			dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
3161 					pdev->soc, nbuf, HTT_INVALID_PEER,
3162 					WDI_NO_VAL, pdev->pdev_id);
3163 		} else
3164 			qdf_nbuf_free(nbuf);
3165 	}
3166 	return;
3167 }
3168 
3169 #endif
3170 
3171 /**
3172  * dp_get_ppdu_desc(): Function to allocate new PPDU status
3173  * desc for new ppdu id
3174  * @pdev: DP pdev handle
3175  * @ppdu_id: PPDU unique identifier
3176  * @tlv_type: TLV type received
3177  *
3178  * return: ppdu_info per ppdu tlv structure
3179  */
3180 static
3181 struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id,
3182 			uint8_t tlv_type)
3183 {
3184 	struct ppdu_info *ppdu_info = NULL;
3185 
3186 	/*
3187 	 * Find ppdu_id node exists or not
3188 	 */
3189 	TAILQ_FOREACH(ppdu_info, &pdev->ppdu_info_list, ppdu_info_list_elem) {
3190 
3191 		if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) {
3192 			break;
3193 		}
3194 	}
3195 
3196 	if (ppdu_info) {
3197 		if (ppdu_info->tlv_bitmap & (1 << tlv_type)) {
3198 			/**
3199 			 * if we get tlv_type that is already been processed
3200 			 * for ppdu, that means we got a new ppdu with same
3201 			 * ppdu id. Hence Flush the older ppdu
3202 			 * for MUMIMO and OFDMA, In a PPDU we have
3203 			 * multiple user with same tlv types. tlv bitmap is
3204 			 * used to check whether SU or MU_MIMO/OFDMA
3205 			 */
3206 			if (!(ppdu_info->tlv_bitmap &
3207 			    (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)))
3208 				return ppdu_info;
3209 
3210 			/**
3211 			 * apart from ACK BA STATUS TLV rest all comes in order
3212 			 * so if tlv type not ACK BA STATUS TLV we can deliver
3213 			 * ppdu_info
3214 			 */
3215 			if (tlv_type ==
3216 			    HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)
3217 				return ppdu_info;
3218 
3219 			dp_ppdu_desc_deliver(pdev, ppdu_info);
3220 		} else {
3221 			return ppdu_info;
3222 		}
3223 	}
3224 
3225 	/**
3226 	 * Flush the head ppdu descriptor if ppdu desc list reaches max
3227 	 * threshold
3228 	 */
3229 	if (pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
3230 		ppdu_info = TAILQ_FIRST(&pdev->ppdu_info_list);
3231 		dp_ppdu_desc_deliver(pdev, ppdu_info);
3232 	}
3233 
3234 	/*
3235 	 * Allocate new ppdu_info node
3236 	 */
3237 	ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info));
3238 	if (!ppdu_info)
3239 		return NULL;
3240 
3241 	ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
3242 			sizeof(struct cdp_tx_completion_ppdu), 0, 4,
3243 			TRUE);
3244 	if (!ppdu_info->nbuf) {
3245 		qdf_mem_free(ppdu_info);
3246 		return NULL;
3247 	}
3248 
3249 	ppdu_info->ppdu_desc =
3250 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
3251 	qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf),
3252 			sizeof(struct cdp_tx_completion_ppdu));
3253 
3254 	if (qdf_nbuf_put_tail(ppdu_info->nbuf,
3255 			sizeof(struct cdp_tx_completion_ppdu)) == NULL) {
3256 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3257 				"No tailroom for HTT PPDU");
3258 		qdf_nbuf_free(ppdu_info->nbuf);
3259 		ppdu_info->nbuf = NULL;
3260 		ppdu_info->last_user = 0;
3261 		qdf_mem_free(ppdu_info);
3262 		return NULL;
3263 	}
3264 
3265 	/**
3266 	 * No lock is needed because all PPDU TLVs are processed in
3267 	 * same context and this list is updated in same context
3268 	 */
3269 	TAILQ_INSERT_TAIL(&pdev->ppdu_info_list, ppdu_info,
3270 			ppdu_info_list_elem);
3271 	pdev->list_depth++;
3272 	return ppdu_info;
3273 }
3274 
3275 /**
3276  * dp_htt_process_tlv(): Function to process each PPDU TLVs
3277  * @pdev: DP pdev handle
3278  * @htt_t2h_msg: HTT target to host message
3279  *
3280  * return: ppdu_info per ppdu tlv structure
3281  */
3282 
3283 static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev,
3284 		qdf_nbuf_t htt_t2h_msg)
3285 {
3286 	uint32_t length;
3287 	uint32_t ppdu_id;
3288 	uint8_t tlv_type;
3289 	uint32_t tlv_length, tlv_bitmap_expected;
3290 	uint8_t *tlv_buf;
3291 	struct ppdu_info *ppdu_info = NULL;
3292 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3293 	struct dp_peer *peer;
3294 	uint32_t i = 0;
3295 
3296 	uint32_t *msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
3297 
3298 	length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word);
3299 
3300 	msg_word = msg_word + 1;
3301 	ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word);
3302 
3303 
3304 	msg_word = msg_word + 3;
3305 	while (length > 0) {
3306 		tlv_buf = (uint8_t *)msg_word;
3307 		tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
3308 		tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
3309 		if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG))
3310 			pdev->stats.ppdu_stats_counter[tlv_type]++;
3311 
3312 		if (tlv_length == 0)
3313 			break;
3314 
3315 		tlv_length += HTT_TLV_HDR_LEN;
3316 
3317 		/**
3318 		 * Not allocating separate ppdu descriptor for MGMT Payload
3319 		 * TLV as this is sent as separate WDI indication and it
3320 		 * doesn't contain any ppdu information
3321 		 */
3322 		if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) {
3323 			pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf;
3324 			pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id;
3325 			pdev->mgmtctrl_frm_info.mgmt_buf_len =
3326 				HTT_PPDU_STATS_TX_MGMTCTRL_TLV_FRAME_LENGTH_GET
3327 						(*(msg_word + 1));
3328 			msg_word =
3329 				(uint32_t *)((uint8_t *)tlv_buf + tlv_length);
3330 			length -= (tlv_length);
3331 			continue;
3332 		}
3333 
3334 		ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type);
3335 		if (!ppdu_info)
3336 			return NULL;
3337 		ppdu_info->ppdu_desc->bss_color =
3338 			pdev->rx_mon_recv_status.bsscolor;
3339 
3340 		ppdu_info->ppdu_id = ppdu_id;
3341 		ppdu_info->tlv_bitmap |= (1 << tlv_type);
3342 
3343 		dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info);
3344 
3345 		/**
3346 		 * Increment pdev level tlv count to monitor
3347 		 * missing TLVs
3348 		 */
3349 		pdev->tlv_count++;
3350 		ppdu_info->last_tlv_cnt = pdev->tlv_count;
3351 		msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
3352 		length -= (tlv_length);
3353 	}
3354 
3355 	if (!ppdu_info)
3356 		return NULL;
3357 
3358 	pdev->last_ppdu_id = ppdu_id;
3359 
3360 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
3361 
3362 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode ||
3363 	    pdev->tx_capture_enabled) {
3364 		if (ppdu_info->is_ampdu)
3365 			tlv_bitmap_expected =
3366 				dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
3367 					ppdu_info->tlv_bitmap);
3368 	}
3369 
3370 	ppdu_desc = ppdu_info->ppdu_desc;
3371 
3372 	if (!ppdu_desc)
3373 		return NULL;
3374 
3375 	if (ppdu_desc->user[ppdu_desc->last_usr_index].completion_status !=
3376 	    HTT_PPDU_STATS_USER_STATUS_OK) {
3377 		tlv_bitmap_expected = tlv_bitmap_expected & 0xFF;
3378 	}
3379 
3380 	if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA &&
3381 	    (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_COMMON_TLV)) &&
3382 	    ppdu_desc->delayed_ba) {
3383 		for (i = 0; i < ppdu_desc->num_users; i++) {
3384 			uint32_t ppdu_id;
3385 
3386 			ppdu_id = ppdu_desc->ppdu_id;
3387 			peer = dp_peer_find_by_id(pdev->soc,
3388 						  ppdu_desc->user[i].peer_id);
3389 			/**
3390 			 * This check is to make sure peer is not deleted
3391 			 * after processing the TLVs.
3392 			 */
3393 			if (!peer)
3394 				continue;
3395 
3396 			/**
3397 			 * save delayed ba user info
3398 			 */
3399 			if (ppdu_desc->user[i].delayed_ba) {
3400 				dp_peer_copy_delay_stats(peer,
3401 							 &ppdu_desc->user[i]);
3402 				peer->last_delayed_ba_ppduid = ppdu_id;
3403 			}
3404 			dp_peer_unref_del_find_by_id(peer);
3405 		}
3406 	}
3407 
3408 	/*
3409 	 * when frame type is BAR and STATS_COMMON_TLV is set
3410 	 * copy the store peer delayed info to BAR status
3411 	 */
3412 	if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR &&
3413 	    (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_COMMON_TLV))) {
3414 		for (i = 0; i < ppdu_desc->bar_num_users; i++) {
3415 			peer = dp_peer_find_by_id(pdev->soc,
3416 						  ppdu_desc->user[i].peer_id);
3417 			/**
3418 			 * This check is to make sure peer is not deleted
3419 			 * after processing the TLVs.
3420 			 */
3421 			if (!peer)
3422 				continue;
3423 
3424 			if (peer->last_delayed_ba) {
3425 				dp_peer_copy_stats_to_bar(peer,
3426 							  &ppdu_desc->user[i]);
3427 				ppdu_desc->bar_ppdu_id = ppdu_desc->ppdu_id;
3428 				ppdu_desc->ppdu_id =
3429 					peer->last_delayed_ba_ppduid;
3430 			}
3431 			dp_peer_unref_del_find_by_id(peer);
3432 		}
3433 	}
3434 
3435 	/*
3436 	 * for frame type DATA and BAR, we update stats based on MSDU,
3437 	 * successful msdu and mpdu are populate from ACK BA STATUS TLV
3438 	 * which comes out of order. successful mpdu also populated from
3439 	 * COMPLTN COMMON TLV which comes in order. for every ppdu_info
3440 	 * we store successful mpdu from both tlv and compare before delivering
3441 	 * to make sure we received ACK BA STATUS TLV. For some self generated
3442 	 * frame we won't get ack ba status tlv so no need to wait for
3443 	 * ack ba status tlv.
3444 	 */
3445 	if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL &&
3446 	    ppdu_desc->htt_frame_type != HTT_STATS_FTYPE_SGEN_QOS_NULL) {
3447 		/*
3448 		 * successful mpdu count should match with both tlv
3449 		 */
3450 		if (ppdu_info->mpdu_compltn_common_tlv !=
3451 		    ppdu_info->mpdu_ack_ba_tlv)
3452 			return NULL;
3453 	}
3454 
3455 	/**
3456 	 * Once all the TLVs for a given PPDU has been processed,
3457 	 * return PPDU status to be delivered to higher layer.
3458 	 * tlv_bitmap_expected can't be available for different frame type.
3459 	 * But STATS COMMON TLV is the last TLV from the FW for a ppdu.
3460 	 * apart from ACK BA TLV, FW sends other TLV in sequential order.
3461 	 * flush tlv comes separate.
3462 	 */
3463 	if ((ppdu_info->tlv_bitmap != 0 &&
3464 	     (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_COMMON_TLV))) ||
3465 	    (ppdu_info->tlv_bitmap &
3466 	     (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV)))
3467 		return ppdu_info;
3468 
3469 	return NULL;
3470 }
3471 #endif /* FEATURE_PERPKT_INFO */
3472 
3473 /**
3474  * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW
3475  * @soc: DP SOC handle
3476  * @pdev_id: pdev id
3477  * @htt_t2h_msg: HTT message nbuf
3478  *
3479  * return:void
3480  */
3481 #if defined(WDI_EVENT_ENABLE)
3482 #ifdef FEATURE_PERPKT_INFO
3483 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
3484 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
3485 {
3486 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
3487 	struct ppdu_info *ppdu_info = NULL;
3488 	bool free_buf = true;
3489 
3490 	if (!pdev)
3491 		return true;
3492 
3493 	if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
3494 	    !pdev->mcopy_mode && !pdev->bpr_enable)
3495 		return free_buf;
3496 
3497 	ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg);
3498 
3499 	if (pdev->mgmtctrl_frm_info.mgmt_buf) {
3500 		if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv
3501 		    (pdev, htt_t2h_msg, pdev->mgmtctrl_frm_info.ppdu_id) !=
3502 		    QDF_STATUS_SUCCESS)
3503 			free_buf = false;
3504 	}
3505 
3506 	if (ppdu_info)
3507 		dp_ppdu_desc_deliver(pdev, ppdu_info);
3508 
3509 	pdev->mgmtctrl_frm_info.mgmt_buf = NULL;
3510 	pdev->mgmtctrl_frm_info.mgmt_buf_len = 0;
3511 	pdev->mgmtctrl_frm_info.ppdu_id = 0;
3512 
3513 	return free_buf;
3514 }
3515 #else
3516 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
3517 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
3518 {
3519 	return true;
3520 }
3521 #endif
3522 #endif
3523 
3524 /**
3525  * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats
3526  * @soc: DP SOC handle
3527  * @htt_t2h_msg: HTT message nbuf
3528  *
3529  * return:void
3530  */
3531 static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc,
3532 		qdf_nbuf_t htt_t2h_msg)
3533 {
3534 	uint8_t done;
3535 	qdf_nbuf_t msg_copy;
3536 	uint32_t *msg_word;
3537 
3538 	msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
3539 	msg_word = msg_word + 3;
3540 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
3541 
3542 	/*
3543 	 * HTT EXT stats response comes as stream of TLVs which span over
3544 	 * multiple T2H messages.
3545 	 * The first message will carry length of the response.
3546 	 * For rest of the messages length will be zero.
3547 	 *
3548 	 * Clone the T2H message buffer and store it in a list to process
3549 	 * it later.
3550 	 *
3551 	 * The original T2H message buffers gets freed in the T2H HTT event
3552 	 * handler
3553 	 */
3554 	msg_copy = qdf_nbuf_clone(htt_t2h_msg);
3555 
3556 	if (!msg_copy) {
3557 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
3558 				"T2H messge clone failed for HTT EXT STATS");
3559 		goto error;
3560 	}
3561 
3562 	qdf_spin_lock_bh(&soc->htt_stats.lock);
3563 	qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy);
3564 	/*
3565 	 * Done bit signifies that this is the last T2H buffer in the stream of
3566 	 * HTT EXT STATS message
3567 	 */
3568 	if (done) {
3569 		soc->htt_stats.num_stats++;
3570 		qdf_sched_work(0, &soc->htt_stats.work);
3571 	}
3572 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
3573 
3574 	return;
3575 
3576 error:
3577 	qdf_spin_lock_bh(&soc->htt_stats.lock);
3578 	while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg))
3579 			!= NULL) {
3580 		qdf_nbuf_free(msg_copy);
3581 	}
3582 	soc->htt_stats.num_stats = 0;
3583 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
3584 	return;
3585 
3586 }
3587 
3588 /*
3589  * htt_soc_attach_target() - SOC level HTT setup
3590  * @htt_soc:	HTT SOC handle
3591  *
3592  * Return: 0 on success; error code on failure
3593  */
3594 int htt_soc_attach_target(struct htt_soc *htt_soc)
3595 {
3596 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
3597 
3598 	return htt_h2t_ver_req_msg(soc);
3599 }
3600 
3601 void htt_set_htc_handle(struct htt_soc *htt_soc, HTC_HANDLE htc_soc)
3602 {
3603 	htt_soc->htc_soc = htc_soc;
3604 }
3605 
3606 HTC_HANDLE htt_get_htc_handle(struct htt_soc *htt_soc)
3607 {
3608 	return htt_soc->htc_soc;
3609 }
3610 
3611 struct htt_soc *htt_soc_attach(struct dp_soc *soc, HTC_HANDLE htc_handle)
3612 {
3613 	int i;
3614 	int j;
3615 	int alloc_size = HTT_SW_UMAC_RING_IDX_MAX * sizeof(unsigned long);
3616 	struct htt_soc *htt_soc = NULL;
3617 
3618 	htt_soc = qdf_mem_malloc(sizeof(*htt_soc));
3619 	if (!htt_soc) {
3620 		dp_err("HTT attach failed");
3621 		return NULL;
3622 	}
3623 
3624 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3625 		htt_soc->pdevid_tt[i].umac_ttt = qdf_mem_malloc(alloc_size);
3626 		if (!htt_soc->pdevid_tt[i].umac_ttt)
3627 			break;
3628 		qdf_mem_set(htt_soc->pdevid_tt[i].umac_ttt, alloc_size, -1);
3629 		htt_soc->pdevid_tt[i].lmac_ttt = qdf_mem_malloc(alloc_size);
3630 		if (!htt_soc->pdevid_tt[i].lmac_ttt) {
3631 			qdf_mem_free(htt_soc->pdevid_tt[i].umac_ttt);
3632 			break;
3633 		}
3634 		qdf_mem_set(htt_soc->pdevid_tt[i].lmac_ttt, alloc_size, -1);
3635 	}
3636 	if (i != MAX_PDEV_CNT) {
3637 		for (j = 0; j < i; j++) {
3638 			qdf_mem_free(htt_soc->pdevid_tt[i].umac_ttt);
3639 			qdf_mem_free(htt_soc->pdevid_tt[i].lmac_ttt);
3640 		}
3641 		return NULL;
3642 	}
3643 
3644 	htt_soc->dp_soc = soc;
3645 	htt_soc->htc_soc = htc_handle;
3646 	HTT_TX_MUTEX_INIT(&htt_soc->htt_tx_mutex);
3647 
3648 	return htt_soc;
3649 }
3650 
3651 #if defined(WDI_EVENT_ENABLE) && !defined(REMOVE_PKT_LOG)
3652 /*
3653  * dp_ppdu_stats_ind_handler() - PPDU stats msg handler
3654  * @htt_soc:	 HTT SOC handle
3655  * @msg_word:    Pointer to payload
3656  * @htt_t2h_msg: HTT msg nbuf
3657  *
3658  * Return: True if buffer should be freed by caller.
3659  */
3660 static bool
3661 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
3662 				uint32_t *msg_word,
3663 				qdf_nbuf_t htt_t2h_msg)
3664 {
3665 	u_int8_t pdev_id;
3666 	u_int8_t target_pdev_id;
3667 	bool free_buf;
3668 	qdf_nbuf_set_pktlen(htt_t2h_msg, HTT_T2H_MAX_MSG_SIZE);
3669 	target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
3670 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
3671 							 target_pdev_id);
3672 	free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id,
3673 					      htt_t2h_msg);
3674 	dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc,
3675 		htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL,
3676 		pdev_id);
3677 	return free_buf;
3678 }
3679 #else
3680 static bool
3681 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
3682 				uint32_t *msg_word,
3683 				qdf_nbuf_t htt_t2h_msg)
3684 {
3685 	return true;
3686 }
3687 #endif
3688 
3689 #if defined(WDI_EVENT_ENABLE) && \
3690 	!defined(REMOVE_PKT_LOG)
3691 /*
3692  * dp_pktlog_msg_handler() - Pktlog msg handler
3693  * @htt_soc:	 HTT SOC handle
3694  * @msg_word:    Pointer to payload
3695  *
3696  * Return: None
3697  */
3698 static void
3699 dp_pktlog_msg_handler(struct htt_soc *soc,
3700 		      uint32_t *msg_word)
3701 {
3702 	uint8_t pdev_id;
3703 	uint8_t target_pdev_id;
3704 	uint32_t *pl_hdr;
3705 
3706 	target_pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word);
3707 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
3708 							 target_pdev_id);
3709 	pl_hdr = (msg_word + 1);
3710 	dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc,
3711 		pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL,
3712 		pdev_id);
3713 }
3714 #else
3715 static void
3716 dp_pktlog_msg_handler(struct htt_soc *soc,
3717 		      uint32_t *msg_word)
3718 {
3719 }
3720 #endif
3721 
3722 /*
3723  * time_allow_print() - time allow print
3724  * @htt_ring_tt:	ringi_id array of timestamps
3725  * @ring_id:		ring_id (index)
3726  *
3727  * Return: 1 for successfully saving timestamp in array
3728  *	and 0 for timestamp falling within 2 seconds after last one
3729  */
3730 static bool time_allow_print(unsigned long *htt_ring_tt, u_int8_t ring_id)
3731 {
3732 	unsigned long tstamp;
3733 	unsigned long delta;
3734 
3735 	tstamp = qdf_get_system_timestamp();
3736 
3737 	if (!htt_ring_tt)
3738 		return 0; //unable to print backpressure messages
3739 
3740 	if (htt_ring_tt[ring_id] == -1) {
3741 		htt_ring_tt[ring_id] = tstamp;
3742 		return 1;
3743 	}
3744 	delta = tstamp - htt_ring_tt[ring_id];
3745 	if (delta >= 2000) {
3746 		htt_ring_tt[ring_id] = tstamp;
3747 		return 1;
3748 	}
3749 
3750 	return 0;
3751 }
3752 
3753 static void dp_htt_alert_print(enum htt_t2h_msg_type msg_type,
3754 			       u_int8_t pdev_id, u_int8_t ring_id,
3755 			       u_int16_t hp_idx, u_int16_t tp_idx,
3756 			       u_int32_t bkp_time, char *ring_stype)
3757 {
3758 	dp_alert("msg_type: %d pdev_id: %d ring_type: %s ",
3759 		 msg_type, pdev_id, ring_stype);
3760 	dp_alert("ring_id: %d hp_idx: %d tp_idx: %d bkpressure_time_ms: %d ",
3761 		 ring_id, hp_idx, tp_idx, bkp_time);
3762 }
3763 
3764 /*
3765  * dp_htt_bkp_event_alert() - htt backpressure event alert
3766  * @msg_word:	htt packet context
3767  * @htt_soc:	HTT SOC handle
3768  *
3769  * Return: after attempting to print stats
3770  */
3771 static void dp_htt_bkp_event_alert(u_int32_t *msg_word, struct htt_soc *soc)
3772 {
3773 	u_int8_t ring_type;
3774 	u_int8_t pdev_id;
3775 	uint8_t target_pdev_id;
3776 	u_int8_t ring_id;
3777 	u_int16_t hp_idx;
3778 	u_int16_t tp_idx;
3779 	u_int32_t bkp_time;
3780 	enum htt_t2h_msg_type msg_type;
3781 	struct dp_soc *dpsoc;
3782 	struct dp_pdev *pdev;
3783 	struct dp_htt_timestamp *radio_tt;
3784 
3785 	if (!soc)
3786 		return;
3787 
3788 	dpsoc = (struct dp_soc *)soc->dp_soc;
3789 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
3790 	ring_type = HTT_T2H_RX_BKPRESSURE_RING_TYPE_GET(*msg_word);
3791 	target_pdev_id = HTT_T2H_RX_BKPRESSURE_PDEV_ID_GET(*msg_word);
3792 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
3793 							 target_pdev_id);
3794 	pdev = (struct dp_pdev *)dpsoc->pdev_list[pdev_id];
3795 	ring_id = HTT_T2H_RX_BKPRESSURE_RINGID_GET(*msg_word);
3796 	hp_idx = HTT_T2H_RX_BKPRESSURE_HEAD_IDX_GET(*(msg_word + 1));
3797 	tp_idx = HTT_T2H_RX_BKPRESSURE_TAIL_IDX_GET(*(msg_word + 1));
3798 	bkp_time = HTT_T2H_RX_BKPRESSURE_TIME_MS_GET(*(msg_word + 2));
3799 	radio_tt = &soc->pdevid_tt[pdev_id];
3800 
3801 	switch (ring_type) {
3802 	case HTT_SW_RING_TYPE_UMAC:
3803 		if (!time_allow_print(radio_tt->umac_ttt, ring_id))
3804 			return;
3805 		dp_htt_alert_print(msg_type, pdev_id, ring_id, hp_idx, tp_idx,
3806 				   bkp_time, "HTT_SW_RING_TYPE_UMAC");
3807 	break;
3808 	case HTT_SW_RING_TYPE_LMAC:
3809 		if (!time_allow_print(radio_tt->lmac_ttt, ring_id))
3810 			return;
3811 		dp_htt_alert_print(msg_type, pdev_id, ring_id, hp_idx, tp_idx,
3812 				   bkp_time, "HTT_SW_RING_TYPE_LMAC");
3813 	break;
3814 	default:
3815 		dp_htt_alert_print(msg_type, pdev_id, ring_id, hp_idx, tp_idx,
3816 				   bkp_time, "UNKNOWN");
3817 	break;
3818 	}
3819 
3820 	dp_print_ring_stats(pdev);
3821 	dp_print_napi_stats(pdev->soc);
3822 }
3823 
3824 /*
3825  * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler
3826  * @context:	Opaque context (HTT SOC handle)
3827  * @pkt:	HTC packet
3828  */
3829 static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
3830 {
3831 	struct htt_soc *soc = (struct htt_soc *) context;
3832 	qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
3833 	u_int32_t *msg_word;
3834 	enum htt_t2h_msg_type msg_type;
3835 	bool free_buf = true;
3836 
3837 	/* check for successful message reception */
3838 	if (pkt->Status != QDF_STATUS_SUCCESS) {
3839 		if (pkt->Status != QDF_STATUS_E_CANCELED)
3840 			soc->stats.htc_err_cnt++;
3841 
3842 		qdf_nbuf_free(htt_t2h_msg);
3843 		return;
3844 	}
3845 
3846 	/* TODO: Check if we should pop the HTC/HTT header alignment padding */
3847 
3848 	msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
3849 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
3850 	htt_event_record(soc->htt_logger_handle,
3851 			 msg_type, (uint8_t *)msg_word);
3852 	switch (msg_type) {
3853 	case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
3854 	{
3855 		dp_htt_bkp_event_alert(msg_word, soc);
3856 		break;
3857 	}
3858 	case HTT_T2H_MSG_TYPE_PEER_MAP:
3859 		{
3860 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3861 			u_int8_t *peer_mac_addr;
3862 			u_int16_t peer_id;
3863 			u_int16_t hw_peer_id;
3864 			u_int8_t vdev_id;
3865 			u_int8_t is_wds;
3866 			struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
3867 
3868 			peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
3869 			hw_peer_id =
3870 				HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2));
3871 			vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
3872 			peer_mac_addr = htt_t2h_mac_addr_deswizzle(
3873 				(u_int8_t *) (msg_word+1),
3874 				&mac_addr_deswizzle_buf[0]);
3875 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3876 				QDF_TRACE_LEVEL_INFO,
3877 				"HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3878 				peer_id, vdev_id);
3879 
3880 			/*
3881 			 * check if peer already exists for this peer_id, if so
3882 			 * this peer map event is in response for a wds peer add
3883 			 * wmi command sent during wds source port learning.
3884 			 * in this case just add the ast entry to the existing
3885 			 * peer ast_list.
3886 			 */
3887 			is_wds = !!(dpsoc->peer_id_to_obj_map[peer_id]);
3888 			dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id,
3889 					       vdev_id, peer_mac_addr, 0,
3890 					       is_wds);
3891 			break;
3892 		}
3893 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
3894 		{
3895 			u_int16_t peer_id;
3896 			u_int8_t vdev_id;
3897 			u_int8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0};
3898 			peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
3899 			vdev_id = HTT_RX_PEER_UNMAP_VDEV_ID_GET(*msg_word);
3900 
3901 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
3902 						 vdev_id, mac_addr, 0);
3903 			break;
3904 		}
3905 	case HTT_T2H_MSG_TYPE_SEC_IND:
3906 		{
3907 			u_int16_t peer_id;
3908 			enum cdp_sec_type sec_type;
3909 			int is_unicast;
3910 
3911 			peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
3912 			sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
3913 			is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
3914 			/* point to the first part of the Michael key */
3915 			msg_word++;
3916 			dp_rx_sec_ind_handler(
3917 				soc->dp_soc, peer_id, sec_type, is_unicast,
3918 				msg_word, msg_word + 2);
3919 			break;
3920 		}
3921 
3922 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
3923 		{
3924 			free_buf = dp_ppdu_stats_ind_handler(soc, msg_word,
3925 							     htt_t2h_msg);
3926 			break;
3927 		}
3928 
3929 	case HTT_T2H_MSG_TYPE_PKTLOG:
3930 		{
3931 			dp_pktlog_msg_handler(soc, msg_word);
3932 			break;
3933 		}
3934 
3935 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
3936 		{
3937 			htc_pm_runtime_put(soc->htc_soc);
3938 			soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
3939 			soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
3940 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
3941 				"target uses HTT version %d.%d; host uses %d.%d",
3942 				soc->tgt_ver.major, soc->tgt_ver.minor,
3943 				HTT_CURRENT_VERSION_MAJOR,
3944 				HTT_CURRENT_VERSION_MINOR);
3945 			if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
3946 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3947 					QDF_TRACE_LEVEL_ERROR,
3948 					"*** Incompatible host/target HTT versions!");
3949 			}
3950 			/* abort if the target is incompatible with the host */
3951 			qdf_assert(soc->tgt_ver.major ==
3952 				HTT_CURRENT_VERSION_MAJOR);
3953 			if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
3954 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3955 					QDF_TRACE_LEVEL_WARN,
3956 					"*** Warning: host/target HTT versions"
3957 					" are different, though compatible!");
3958 			}
3959 			break;
3960 		}
3961 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
3962 		{
3963 			uint16_t peer_id;
3964 			uint8_t tid;
3965 			uint8_t win_sz;
3966 			uint16_t status;
3967 			struct dp_peer *peer;
3968 
3969 			/*
3970 			 * Update REO Queue Desc with new values
3971 			 */
3972 			peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
3973 			tid = HTT_RX_ADDBA_TID_GET(*msg_word);
3974 			win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
3975 			peer = dp_peer_find_by_id(soc->dp_soc, peer_id);
3976 
3977 			/*
3978 			 * Window size needs to be incremented by 1
3979 			 * since fw needs to represent a value of 256
3980 			 * using just 8 bits
3981 			 */
3982 			if (peer) {
3983 				status = dp_addba_requestprocess_wifi3(
3984 					(struct cdp_soc_t *)soc->dp_soc,
3985 					peer->mac_addr.raw, peer->vdev->vdev_id,
3986 					0, tid, 0, win_sz + 1, 0xffff);
3987 
3988 				/*
3989 				 * If PEER_LOCK_REF_PROTECT enbled dec ref
3990 				 * which is inc by dp_peer_find_by_id
3991 				 */
3992 				dp_peer_unref_del_find_by_id(peer);
3993 
3994 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3995 					QDF_TRACE_LEVEL_INFO,
3996 					FL("PeerID %d BAW %d TID %d stat %d"),
3997 					peer_id, win_sz, tid, status);
3998 
3999 			} else {
4000 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4001 					QDF_TRACE_LEVEL_ERROR,
4002 					FL("Peer not found peer id %d"),
4003 					peer_id);
4004 			}
4005 			break;
4006 		}
4007 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
4008 		{
4009 			dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg);
4010 			break;
4011 		}
4012 	case HTT_T2H_MSG_TYPE_PEER_MAP_V2:
4013 		{
4014 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
4015 			u_int8_t *peer_mac_addr;
4016 			u_int16_t peer_id;
4017 			u_int16_t hw_peer_id;
4018 			u_int8_t vdev_id;
4019 			bool is_wds;
4020 			u_int16_t ast_hash;
4021 
4022 			peer_id = HTT_RX_PEER_MAP_V2_SW_PEER_ID_GET(*msg_word);
4023 			hw_peer_id =
4024 			HTT_RX_PEER_MAP_V2_HW_PEER_ID_GET(*(msg_word + 2));
4025 			vdev_id = HTT_RX_PEER_MAP_V2_VDEV_ID_GET(*msg_word);
4026 			peer_mac_addr =
4027 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
4028 						   &mac_addr_deswizzle_buf[0]);
4029 			is_wds =
4030 			HTT_RX_PEER_MAP_V2_NEXT_HOP_GET(*(msg_word + 3));
4031 			ast_hash =
4032 			HTT_RX_PEER_MAP_V2_AST_HASH_VALUE_GET(*(msg_word + 3));
4033 			QDF_TRACE(QDF_MODULE_ID_TXRX,
4034 				  QDF_TRACE_LEVEL_INFO,
4035 				  "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
4036 				  peer_id, vdev_id);
4037 
4038 			dp_rx_peer_map_handler(soc->dp_soc, peer_id,
4039 					       hw_peer_id, vdev_id,
4040 					       peer_mac_addr, ast_hash,
4041 					       is_wds);
4042 			break;
4043 		}
4044 	case HTT_T2H_MSG_TYPE_PEER_UNMAP_V2:
4045 		{
4046 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
4047 			u_int8_t *mac_addr;
4048 			u_int16_t peer_id;
4049 			u_int8_t vdev_id;
4050 			u_int8_t is_wds;
4051 
4052 			peer_id =
4053 			HTT_RX_PEER_UNMAP_V2_SW_PEER_ID_GET(*msg_word);
4054 			vdev_id = HTT_RX_PEER_UNMAP_V2_VDEV_ID_GET(*msg_word);
4055 			mac_addr =
4056 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
4057 						   &mac_addr_deswizzle_buf[0]);
4058 			is_wds =
4059 			HTT_RX_PEER_UNMAP_V2_NEXT_HOP_GET(*(msg_word + 2));
4060 			QDF_TRACE(QDF_MODULE_ID_TXRX,
4061 				  QDF_TRACE_LEVEL_INFO,
4062 				  "HTT_T2H_MSG_TYPE_PEER_UNMAP msg for peer id %d vdev id %d n",
4063 				  peer_id, vdev_id);
4064 
4065 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
4066 						 vdev_id, mac_addr,
4067 						 is_wds);
4068 			break;
4069 		}
4070 	default:
4071 		break;
4072 	};
4073 
4074 	/* Free the indication buffer */
4075 	if (free_buf)
4076 		qdf_nbuf_free(htt_t2h_msg);
4077 }
4078 
4079 /*
4080  * dp_htt_h2t_full() - Send full handler (called from HTC)
4081  * @context:	Opaque context (HTT SOC handle)
4082  * @pkt:	HTC packet
4083  *
4084  * Return: enum htc_send_full_action
4085  */
4086 static enum htc_send_full_action
4087 dp_htt_h2t_full(void *context, HTC_PACKET *pkt)
4088 {
4089 	return HTC_SEND_FULL_KEEP;
4090 }
4091 
4092 /*
4093  * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages
4094  * @context:	Opaque context (HTT SOC handle)
4095  * @nbuf:	nbuf containing T2H message
4096  * @pipe_id:	HIF pipe ID
4097  *
4098  * Return: QDF_STATUS
4099  *
4100  * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which
4101  * will be used for packet log and other high-priority HTT messages. Proper
4102  * HTC connection to be added later once required FW changes are available
4103  */
4104 static QDF_STATUS
4105 dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id)
4106 {
4107 	QDF_STATUS rc = QDF_STATUS_SUCCESS;
4108 	HTC_PACKET htc_pkt;
4109 
4110 	qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE);
4111 	qdf_mem_zero(&htc_pkt, sizeof(htc_pkt));
4112 	htc_pkt.Status = QDF_STATUS_SUCCESS;
4113 	htc_pkt.pPktContext = (void *)nbuf;
4114 	dp_htt_t2h_msg_handler(context, &htc_pkt);
4115 
4116 	return rc;
4117 }
4118 
4119 /*
4120  * htt_htc_soc_attach() - Register SOC level HTT instance with HTC
4121  * @htt_soc:	HTT SOC handle
4122  *
4123  * Return: QDF_STATUS
4124  */
4125 static QDF_STATUS
4126 htt_htc_soc_attach(struct htt_soc *soc)
4127 {
4128 	struct htc_service_connect_req connect;
4129 	struct htc_service_connect_resp response;
4130 	QDF_STATUS status;
4131 	struct dp_soc *dpsoc = soc->dp_soc;
4132 
4133 	qdf_mem_zero(&connect, sizeof(connect));
4134 	qdf_mem_zero(&response, sizeof(response));
4135 
4136 	connect.pMetaData = NULL;
4137 	connect.MetaDataLength = 0;
4138 	connect.EpCallbacks.pContext = soc;
4139 	connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
4140 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
4141 	connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler;
4142 
4143 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
4144 	connect.EpCallbacks.EpRecvRefill = NULL;
4145 
4146 	/* N/A, fill is done by HIF */
4147 	connect.EpCallbacks.RecvRefillWaterMark = 1;
4148 
4149 	connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
4150 	/*
4151 	 * Specify how deep to let a queue get before htc_send_pkt will
4152 	 * call the EpSendFull function due to excessive send queue depth.
4153 	 */
4154 	connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
4155 
4156 	/* disable flow control for HTT data message service */
4157 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
4158 
4159 	/* connect to control service */
4160 	connect.service_id = HTT_DATA_MSG_SVC;
4161 
4162 	status = htc_connect_service(soc->htc_soc, &connect, &response);
4163 
4164 	if (status != QDF_STATUS_SUCCESS)
4165 		return status;
4166 
4167 	soc->htc_endpoint = response.Endpoint;
4168 
4169 	hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint);
4170 
4171 	htt_interface_logging_init(&soc->htt_logger_handle);
4172 	dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc,
4173 		dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE);
4174 
4175 	return QDF_STATUS_SUCCESS; /* success */
4176 }
4177 
4178 /*
4179  * htt_soc_initialize() - SOC level HTT initialization
4180  * @htt_soc: Opaque htt SOC handle
4181  * @ctrl_psoc: Opaque ctrl SOC handle
4182  * @htc_soc: SOC level HTC handle
4183  * @hal_soc: Opaque HAL SOC handle
4184  * @osdev: QDF device
4185  *
4186  * Return: HTT handle on success; NULL on failure
4187  */
4188 void *
4189 htt_soc_initialize(struct htt_soc *htt_soc,
4190 		   struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
4191 		   HTC_HANDLE htc_soc,
4192 		   hal_soc_handle_t hal_soc_hdl, qdf_device_t osdev)
4193 {
4194 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
4195 
4196 	soc->osdev = osdev;
4197 	soc->ctrl_psoc = ctrl_psoc;
4198 	soc->htc_soc = htc_soc;
4199 	soc->hal_soc = hal_soc_hdl;
4200 
4201 	if (htt_htc_soc_attach(soc))
4202 		goto fail2;
4203 
4204 	return soc;
4205 
4206 fail2:
4207 	return NULL;
4208 }
4209 
4210 void htt_soc_htc_dealloc(struct htt_soc *htt_handle)
4211 {
4212 	htt_interface_logging_deinit(htt_handle->htt_logger_handle);
4213 	htt_htc_misc_pkt_pool_free(htt_handle);
4214 	htt_htc_pkt_pool_free(htt_handle);
4215 }
4216 
4217 /*
4218  * htt_soc_htc_prealloc() - HTC memory prealloc
4219  * @htt_soc: SOC level HTT handle
4220  *
4221  * Return: QDF_STATUS_SUCCESS on Success or
4222  * QDF_STATUS_E_NOMEM on allocation failure
4223  */
4224 QDF_STATUS htt_soc_htc_prealloc(struct htt_soc *soc)
4225 {
4226 	int i;
4227 
4228 	soc->htt_htc_pkt_freelist = NULL;
4229 	/* pre-allocate some HTC_PACKET objects */
4230 	for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
4231 		struct dp_htt_htc_pkt_union *pkt;
4232 		pkt = qdf_mem_malloc(sizeof(*pkt));
4233 		if (!pkt)
4234 			return QDF_STATUS_E_NOMEM;
4235 
4236 		htt_htc_pkt_free(soc, &pkt->u.pkt);
4237 	}
4238 	return QDF_STATUS_SUCCESS;
4239 }
4240 
4241 /*
4242  * htt_soc_detach() - Free SOC level HTT handle
4243  * @htt_hdl: HTT SOC handle
4244  */
4245 void htt_soc_detach(struct htt_soc *htt_hdl)
4246 {
4247 	int i;
4248 	struct htt_soc *htt_handle = (struct htt_soc *)htt_hdl;
4249 
4250 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4251 		qdf_mem_free(htt_handle->pdevid_tt[i].umac_ttt);
4252 		qdf_mem_free(htt_handle->pdevid_tt[i].lmac_ttt);
4253 	}
4254 
4255 	HTT_TX_MUTEX_DESTROY(&htt_handle->htt_tx_mutex);
4256 	qdf_mem_free(htt_handle);
4257 
4258 }
4259 
4260 /**
4261  * dp_h2t_ext_stats_msg_send(): function to contruct HTT message to pass to FW
4262  * @pdev: DP PDEV handle
4263  * @stats_type_upload_mask: stats type requested by user
4264  * @config_param_0: extra configuration parameters
4265  * @config_param_1: extra configuration parameters
4266  * @config_param_2: extra configuration parameters
4267  * @config_param_3: extra configuration parameters
4268  * @mac_id: mac number
4269  *
4270  * return: QDF STATUS
4271  */
4272 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
4273 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
4274 		uint32_t config_param_1, uint32_t config_param_2,
4275 		uint32_t config_param_3, int cookie_val, int cookie_msb,
4276 		uint8_t mac_id)
4277 {
4278 	struct htt_soc *soc = pdev->soc->htt_handle;
4279 	struct dp_htt_htc_pkt *pkt;
4280 	qdf_nbuf_t msg;
4281 	uint32_t *msg_word;
4282 	uint8_t pdev_mask = 0;
4283 	uint8_t *htt_logger_bufp;
4284 	int mac_for_pdev;
4285 	int target_pdev_id;
4286 
4287 	msg = qdf_nbuf_alloc(
4288 			soc->osdev,
4289 			HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ),
4290 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4291 
4292 	if (!msg)
4293 		return QDF_STATUS_E_NOMEM;
4294 
4295 	/*TODO:Add support for SOC stats
4296 	 * Bit 0: SOC Stats
4297 	 * Bit 1: Pdev stats for pdev id 0
4298 	 * Bit 2: Pdev stats for pdev id 1
4299 	 * Bit 3: Pdev stats for pdev id 2
4300 	 */
4301 	mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
4302 	target_pdev_id =
4303 	dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev);
4304 
4305 	pdev_mask = 1 << target_pdev_id;
4306 	/*
4307 	 * Set the length of the message.
4308 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4309 	 * separately during the below call to qdf_nbuf_push_head.
4310 	 * The contribution from the HTC header is added separately inside HTC.
4311 	 */
4312 	if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) {
4313 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4314 				"Failed to expand head for HTT_EXT_STATS");
4315 		qdf_nbuf_free(msg);
4316 		return QDF_STATUS_E_FAILURE;
4317 	}
4318 
4319 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4320 		"-----%s:%d----\n cookie <-> %d\n config_param_0 %u\n"
4321 		"config_param_1 %u\n config_param_2 %u\n"
4322 		"config_param_4 %u\n -------------",
4323 		__func__, __LINE__, cookie_val, config_param_0,
4324 		config_param_1, config_param_2,	config_param_3);
4325 
4326 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
4327 
4328 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4329 	htt_logger_bufp = (uint8_t *)msg_word;
4330 	*msg_word = 0;
4331 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ);
4332 	HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask);
4333 	HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask);
4334 
4335 	/* word 1 */
4336 	msg_word++;
4337 	*msg_word = 0;
4338 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0);
4339 
4340 	/* word 2 */
4341 	msg_word++;
4342 	*msg_word = 0;
4343 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1);
4344 
4345 	/* word 3 */
4346 	msg_word++;
4347 	*msg_word = 0;
4348 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2);
4349 
4350 	/* word 4 */
4351 	msg_word++;
4352 	*msg_word = 0;
4353 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3);
4354 
4355 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0);
4356 
4357 	/* word 5 */
4358 	msg_word++;
4359 
4360 	/* word 6 */
4361 	msg_word++;
4362 	*msg_word = 0;
4363 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val);
4364 
4365 	/* word 7 */
4366 	msg_word++;
4367 	*msg_word = 0;
4368 	/*Using last 2 bits for pdev_id */
4369 	cookie_msb = ((cookie_msb << 2) | pdev->pdev_id);
4370 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb);
4371 
4372 	pkt = htt_htc_pkt_alloc(soc);
4373 	if (!pkt) {
4374 		qdf_nbuf_free(msg);
4375 		return QDF_STATUS_E_NOMEM;
4376 	}
4377 
4378 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4379 
4380 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4381 			dp_htt_h2t_send_complete_free_netbuf,
4382 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
4383 			soc->htc_endpoint,
4384 			/* tag for FW response msg not guaranteed */
4385 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
4386 
4387 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4388 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_EXT_STATS_REQ,
4389 			    htt_logger_bufp);
4390 	return 0;
4391 }
4392 
4393 /* This macro will revert once proper HTT header will define for
4394  * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file
4395  * */
4396 #if defined(WDI_EVENT_ENABLE)
4397 /**
4398  * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW
4399  * @pdev: DP PDEV handle
4400  * @stats_type_upload_mask: stats type requested by user
4401  * @mac_id: Mac id number
4402  *
4403  * return: QDF STATUS
4404  */
4405 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
4406 		uint32_t stats_type_upload_mask, uint8_t mac_id)
4407 {
4408 	struct htt_soc *soc = pdev->soc->htt_handle;
4409 	struct dp_htt_htc_pkt *pkt;
4410 	qdf_nbuf_t msg;
4411 	uint32_t *msg_word;
4412 	uint8_t pdev_mask;
4413 
4414 	msg = qdf_nbuf_alloc(
4415 			soc->osdev,
4416 			HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ),
4417 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
4418 
4419 	if (!msg) {
4420 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4421 		"Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer");
4422 		qdf_assert(0);
4423 		return QDF_STATUS_E_NOMEM;
4424 	}
4425 
4426 	/*TODO:Add support for SOC stats
4427 	 * Bit 0: SOC Stats
4428 	 * Bit 1: Pdev stats for pdev id 0
4429 	 * Bit 2: Pdev stats for pdev id 1
4430 	 * Bit 3: Pdev stats for pdev id 2
4431 	 */
4432 	pdev_mask = 1 << dp_get_target_pdev_id_for_host_pdev_id(pdev->soc,
4433 								mac_id);
4434 
4435 	/*
4436 	 * Set the length of the message.
4437 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4438 	 * separately during the below call to qdf_nbuf_push_head.
4439 	 * The contribution from the HTC header is added separately inside HTC.
4440 	 */
4441 	if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) {
4442 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4443 				"Failed to expand head for HTT_CFG_STATS");
4444 		qdf_nbuf_free(msg);
4445 		return QDF_STATUS_E_FAILURE;
4446 	}
4447 
4448 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
4449 
4450 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4451 	*msg_word = 0;
4452 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
4453 	HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask);
4454 	HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word,
4455 			stats_type_upload_mask);
4456 
4457 	pkt = htt_htc_pkt_alloc(soc);
4458 	if (!pkt) {
4459 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4460 				"Fail to allocate dp_htt_htc_pkt buffer");
4461 		qdf_assert(0);
4462 		qdf_nbuf_free(msg);
4463 		return QDF_STATUS_E_NOMEM;
4464 	}
4465 
4466 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4467 
4468 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4469 			dp_htt_h2t_send_complete_free_netbuf,
4470 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
4471 			soc->htc_endpoint,
4472 			1); /* tag - not relevant here */
4473 
4474 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4475 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG,
4476 			    (uint8_t *)msg_word);
4477 	return 0;
4478 }
4479 #endif
4480 
4481 void
4482 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
4483 			     uint32_t *tag_buf)
4484 {
4485 	switch (tag_type) {
4486 	case HTT_STATS_PEER_DETAILS_TAG:
4487 	{
4488 		htt_peer_details_tlv *dp_stats_buf =
4489 			(htt_peer_details_tlv *)tag_buf;
4490 
4491 		pdev->fw_stats_peer_id = dp_stats_buf->sw_peer_id;
4492 	}
4493 	break;
4494 	case HTT_STATS_PEER_STATS_CMN_TAG:
4495 	{
4496 		htt_peer_stats_cmn_tlv *dp_stats_buf =
4497 			(htt_peer_stats_cmn_tlv *)tag_buf;
4498 
4499 		struct dp_peer *peer = dp_peer_find_by_id(pdev->soc,
4500 						pdev->fw_stats_peer_id);
4501 
4502 		if (peer && !peer->bss_peer) {
4503 			peer->stats.tx.inactive_time =
4504 				dp_stats_buf->inactive_time;
4505 			qdf_event_set(&pdev->fw_peer_stats_event);
4506 		}
4507 		if (peer)
4508 			dp_peer_unref_del_find_by_id(peer);
4509 	}
4510 	break;
4511 	default:
4512 		qdf_err("Invalid tag_type");
4513 	}
4514 }
4515 
4516 /**
4517  * dp_htt_rx_flow_fst_setup(): Send HTT Rx FST setup message to FW
4518  * @pdev: DP pdev handle
4519  * @fse_setup_info: FST setup parameters
4520  *
4521  * Return: Success when HTT message is sent, error on failure
4522  */
4523 QDF_STATUS
4524 dp_htt_rx_flow_fst_setup(struct dp_pdev *pdev,
4525 			 struct dp_htt_rx_flow_fst_setup *fse_setup_info)
4526 {
4527 	struct htt_soc *soc = pdev->soc->htt_handle;
4528 	struct dp_htt_htc_pkt *pkt;
4529 	qdf_nbuf_t msg;
4530 	u_int32_t *msg_word;
4531 	struct htt_h2t_msg_rx_fse_setup_t *fse_setup;
4532 	uint8_t *htt_logger_bufp;
4533 	u_int32_t *key;
4534 
4535 	msg = qdf_nbuf_alloc(
4536 		soc->osdev,
4537 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_setup_t)),
4538 		/* reserve room for the HTC header */
4539 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4540 
4541 	if (!msg)
4542 		return QDF_STATUS_E_NOMEM;
4543 
4544 	/*
4545 	 * Set the length of the message.
4546 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4547 	 * separately during the below call to qdf_nbuf_push_head.
4548 	 * The contribution from the HTC header is added separately inside HTC.
4549 	 */
4550 	if (!qdf_nbuf_put_tail(msg,
4551 			       sizeof(struct htt_h2t_msg_rx_fse_setup_t))) {
4552 		qdf_err("Failed to expand head for HTT RX_FSE_SETUP msg");
4553 		return QDF_STATUS_E_FAILURE;
4554 	}
4555 
4556 	/* fill in the message contents */
4557 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4558 
4559 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_setup_t));
4560 	/* rewind beyond alignment pad to get to the HTC header reserved area */
4561 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4562 	htt_logger_bufp = (uint8_t *)msg_word;
4563 
4564 	*msg_word = 0;
4565 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG);
4566 
4567 	fse_setup = (struct htt_h2t_msg_rx_fse_setup_t *)msg_word;
4568 
4569 	HTT_RX_FSE_SETUP_PDEV_ID_SET(*msg_word, fse_setup_info->pdev_id);
4570 
4571 	msg_word++;
4572 	HTT_RX_FSE_SETUP_NUM_REC_SET(*msg_word, fse_setup_info->max_entries);
4573 	HTT_RX_FSE_SETUP_MAX_SEARCH_SET(*msg_word, fse_setup_info->max_search);
4574 	HTT_RX_FSE_SETUP_IP_DA_SA_PREFIX_SET(*msg_word,
4575 					     fse_setup_info->ip_da_sa_prefix);
4576 
4577 	msg_word++;
4578 	HTT_RX_FSE_SETUP_BASE_ADDR_LO_SET(*msg_word,
4579 					  fse_setup_info->base_addr_lo);
4580 	msg_word++;
4581 	HTT_RX_FSE_SETUP_BASE_ADDR_HI_SET(*msg_word,
4582 					  fse_setup_info->base_addr_hi);
4583 
4584 	key = (u_int32_t *)fse_setup_info->hash_key;
4585 	fse_setup->toeplitz31_0 = *key++;
4586 	fse_setup->toeplitz63_32 = *key++;
4587 	fse_setup->toeplitz95_64 = *key++;
4588 	fse_setup->toeplitz127_96 = *key++;
4589 	fse_setup->toeplitz159_128 = *key++;
4590 	fse_setup->toeplitz191_160 = *key++;
4591 	fse_setup->toeplitz223_192 = *key++;
4592 	fse_setup->toeplitz255_224 = *key++;
4593 	fse_setup->toeplitz287_256 = *key++;
4594 	fse_setup->toeplitz314_288 = *key;
4595 
4596 	msg_word++;
4597 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz31_0);
4598 	msg_word++;
4599 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz63_32);
4600 	msg_word++;
4601 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz95_64);
4602 	msg_word++;
4603 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz127_96);
4604 	msg_word++;
4605 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz159_128);
4606 	msg_word++;
4607 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz191_160);
4608 	msg_word++;
4609 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz223_192);
4610 	msg_word++;
4611 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz255_224);
4612 	msg_word++;
4613 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz287_256);
4614 	msg_word++;
4615 	HTT_RX_FSE_SETUP_HASH_314_288_SET(*msg_word,
4616 					  fse_setup->toeplitz314_288);
4617 
4618 	pkt = htt_htc_pkt_alloc(soc);
4619 	if (!pkt) {
4620 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4621 		qdf_assert(0);
4622 		qdf_nbuf_free(msg);
4623 		return QDF_STATUS_E_RESOURCES; /* failure */
4624 	}
4625 
4626 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4627 
4628 	SET_HTC_PACKET_INFO_TX(
4629 		&pkt->htc_pkt,
4630 		dp_htt_h2t_send_complete_free_netbuf,
4631 		qdf_nbuf_data(msg),
4632 		qdf_nbuf_len(msg),
4633 		soc->htc_endpoint,
4634 		1); /* tag - not relevant here */
4635 
4636 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4637 
4638 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG,
4639 			    htt_logger_bufp);
4640 
4641 	qdf_info("HTT_H2T RX_FSE_SETUP sent to FW for pdev = %u",
4642 		 fse_setup_info->pdev_id);
4643 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_DEBUG,
4644 			   (void *)fse_setup_info->hash_key,
4645 			   fse_setup_info->hash_key_len);
4646 
4647 	return QDF_STATUS_SUCCESS;
4648 }
4649 
4650 /**
4651  * dp_htt_rx_flow_fse_operation(): Send HTT Flow Search Entry msg to
4652  * add/del a flow in HW
4653  * @pdev: DP pdev handle
4654  * @fse_op_info: Flow entry parameters
4655  *
4656  * Return: Success when HTT message is sent, error on failure
4657  */
4658 QDF_STATUS
4659 dp_htt_rx_flow_fse_operation(struct dp_pdev *pdev,
4660 			     struct dp_htt_rx_flow_fst_operation *fse_op_info)
4661 {
4662 	struct htt_soc *soc = pdev->soc->htt_handle;
4663 	struct dp_htt_htc_pkt *pkt;
4664 	qdf_nbuf_t msg;
4665 	u_int32_t *msg_word;
4666 	struct htt_h2t_msg_rx_fse_operation_t *fse_operation;
4667 	uint8_t *htt_logger_bufp;
4668 
4669 	msg = qdf_nbuf_alloc(
4670 		soc->osdev,
4671 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_operation_t)),
4672 		/* reserve room for the HTC header */
4673 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4674 	if (!msg)
4675 		return QDF_STATUS_E_NOMEM;
4676 
4677 	/*
4678 	 * Set the length of the message.
4679 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4680 	 * separately during the below call to qdf_nbuf_push_head.
4681 	 * The contribution from the HTC header is added separately inside HTC.
4682 	 */
4683 	if (!qdf_nbuf_put_tail(msg,
4684 			       sizeof(struct htt_h2t_msg_rx_fse_operation_t))) {
4685 		qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
4686 		return QDF_STATUS_E_FAILURE;
4687 	}
4688 
4689 	/* fill in the message contents */
4690 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4691 
4692 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_operation_t));
4693 	/* rewind beyond alignment pad to get to the HTC header reserved area */
4694 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4695 	htt_logger_bufp = (uint8_t *)msg_word;
4696 
4697 	*msg_word = 0;
4698 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG);
4699 
4700 	fse_operation = (struct htt_h2t_msg_rx_fse_operation_t *)msg_word;
4701 
4702 	HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, fse_op_info->pdev_id);
4703 	msg_word++;
4704 	HTT_RX_FSE_IPSEC_VALID_SET(*msg_word, false);
4705 	if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_ENTRY) {
4706 		HTT_RX_FSE_OPERATION_SET(*msg_word,
4707 					 HTT_RX_FSE_CACHE_INVALIDATE_ENTRY);
4708 		msg_word++;
4709 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4710 		*msg_word,
4711 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_31_0));
4712 		msg_word++;
4713 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4714 		*msg_word,
4715 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_63_32));
4716 		msg_word++;
4717 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4718 		*msg_word,
4719 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_95_64));
4720 		msg_word++;
4721 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4722 		*msg_word,
4723 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_127_96));
4724 		msg_word++;
4725 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4726 		*msg_word,
4727 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_31_0));
4728 		msg_word++;
4729 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4730 		*msg_word,
4731 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_63_32));
4732 		msg_word++;
4733 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4734 		*msg_word,
4735 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_95_64));
4736 		msg_word++;
4737 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4738 		*msg_word,
4739 		qdf_htonl(
4740 		fse_op_info->rx_flow->flow_tuple_info.dest_ip_127_96));
4741 		msg_word++;
4742 		HTT_RX_FSE_SOURCEPORT_SET(
4743 			*msg_word,
4744 			fse_op_info->rx_flow->flow_tuple_info.src_port);
4745 		HTT_RX_FSE_DESTPORT_SET(
4746 			*msg_word,
4747 			fse_op_info->rx_flow->flow_tuple_info.dest_port);
4748 		msg_word++;
4749 		HTT_RX_FSE_L4_PROTO_SET(
4750 			*msg_word,
4751 			fse_op_info->rx_flow->flow_tuple_info.l4_protocol);
4752 	} else if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_FULL) {
4753 		HTT_RX_FSE_OPERATION_SET(*msg_word,
4754 					 HTT_RX_FSE_CACHE_INVALIDATE_FULL);
4755 	} else if (fse_op_info->op_code == DP_HTT_FST_DISABLE) {
4756 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_DISABLE);
4757 	} else if (fse_op_info->op_code == DP_HTT_FST_ENABLE) {
4758 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_ENABLE);
4759 	}
4760 
4761 	pkt = htt_htc_pkt_alloc(soc);
4762 	if (!pkt) {
4763 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4764 		qdf_assert(0);
4765 		qdf_nbuf_free(msg);
4766 		return QDF_STATUS_E_RESOURCES; /* failure */
4767 	}
4768 
4769 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4770 
4771 	SET_HTC_PACKET_INFO_TX(
4772 		&pkt->htc_pkt,
4773 		dp_htt_h2t_send_complete_free_netbuf,
4774 		qdf_nbuf_data(msg),
4775 		qdf_nbuf_len(msg),
4776 		soc->htc_endpoint,
4777 		1); /* tag - not relevant here */
4778 
4779 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4780 
4781 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG,
4782 			    htt_logger_bufp);
4783 
4784 	qdf_info("HTT_H2T RX_FSE_OPERATION_CFG sent to FW for pdev = %u",
4785 		 fse_op_info->pdev_id);
4786 
4787 	return QDF_STATUS_SUCCESS;
4788 }
4789