xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c (revision 97f44cd39e4ff816eaa1710279d28cf6b9e65ad9)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <htt.h>
20 #include <hal_hw_headers.h>
21 #include <hal_api.h>
22 #include "dp_peer.h"
23 #include "dp_types.h"
24 #include "dp_internal.h"
25 #include "dp_rx.h"
26 #include "dp_rx_mon.h"
27 #include "htt_stats.h"
28 #include "htt_ppdu_stats.h"
29 #include "dp_htt.h"
30 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
31 #include "cdp_txrx_cmn_struct.h"
32 
33 #ifdef FEATURE_PERPKT_INFO
34 #include "dp_ratetable.h"
35 #endif
36 
37 #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
38 
39 #define HTT_HTC_PKT_POOL_INIT_SIZE 64
40 
41 #define HTT_MSG_BUF_SIZE(msg_bytes) \
42 	((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
43 
44 #define HTT_PID_BIT_MASK 0x3
45 
46 #define DP_EXT_MSG_LENGTH 2048
47 #define HTT_HEADER_LEN 16
48 #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16
49 
50 #define HTT_SHIFT_UPPER_TIMESTAMP 32
51 #define HTT_MASK_UPPER_TIMESTAMP 0xFFFFFFFF00000000
52 
53 /*
54  * dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap() - Get ppdu stats tlv
55  * bitmap for sniffer mode
56  * @bitmap: received bitmap
57  *
58  * Return: expected bitmap value, returns zero if doesn't match with
59  * either 64-bit Tx window or 256-bit window tlv bitmap
60  */
61 int
62 dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(uint32_t bitmap)
63 {
64 	if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64))
65 		return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64;
66 	else if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256))
67 		return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256;
68 
69 	return 0;
70 }
71 
72 #ifdef FEATURE_PERPKT_INFO
73 /*
74  * dp_peer_find_by_id_valid - check if peer exists for given id
75  * @soc: core DP soc context
76  * @peer_id: peer id from peer object can be retrieved
77  *
78  * Return: true if peer exists of false otherwise
79  */
80 
81 static
82 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id)
83 {
84 	struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id,
85 						     DP_MOD_ID_HTT);
86 
87 	if (peer) {
88 		/*
89 		 * Decrement the peer ref which is taken as part of
90 		 * dp_peer_get_ref_by_id if PEER_LOCK_REF_PROTECT is enabled
91 		 */
92 		dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
93 
94 		return true;
95 	}
96 
97 	return false;
98 }
99 
100 /*
101  * dp_peer_copy_delay_stats() - copy ppdu stats to peer delayed stats.
102  * @peer: Datapath peer handle
103  * @ppdu: User PPDU Descriptor
104  * @cur_ppdu_id: PPDU_ID
105  *
106  * Return: None
107  *
108  * on Tx data frame, we may get delayed ba set
109  * in htt_ppdu_stats_user_common_tlv. which mean we get Block Ack(BA) after we
110  * request Block Ack Request(BAR). Successful msdu is received only after Block
111  * Ack. To populate peer stats we need successful msdu(data frame).
112  * So we hold the Tx data stats on delayed_ba for stats update.
113  */
114 static void
115 dp_peer_copy_delay_stats(struct dp_peer *peer,
116 			 struct cdp_tx_completion_ppdu_user *ppdu,
117 			 uint32_t cur_ppdu_id)
118 {
119 	struct dp_pdev *pdev;
120 	struct dp_vdev *vdev;
121 
122 	if (peer->last_delayed_ba) {
123 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
124 			  "BA not yet recv for prev delayed ppdu[%d] - cur ppdu[%d]",
125 			  peer->last_delayed_ba_ppduid, cur_ppdu_id);
126 		vdev = peer->vdev;
127 		if (vdev) {
128 			pdev = vdev->pdev;
129 			pdev->stats.cdp_delayed_ba_not_recev++;
130 		}
131 	}
132 
133 	peer->delayed_ba_ppdu_stats.ltf_size = ppdu->ltf_size;
134 	peer->delayed_ba_ppdu_stats.stbc = ppdu->stbc;
135 	peer->delayed_ba_ppdu_stats.he_re = ppdu->he_re;
136 	peer->delayed_ba_ppdu_stats.txbf = ppdu->txbf;
137 	peer->delayed_ba_ppdu_stats.bw = ppdu->bw;
138 	peer->delayed_ba_ppdu_stats.nss = ppdu->nss;
139 	peer->delayed_ba_ppdu_stats.preamble = ppdu->preamble;
140 	peer->delayed_ba_ppdu_stats.gi = ppdu->gi;
141 	peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
142 	peer->delayed_ba_ppdu_stats.ldpc = ppdu->ldpc;
143 	peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
144 	peer->delayed_ba_ppdu_stats.mpdu_tried_ucast = ppdu->mpdu_tried_ucast;
145 	peer->delayed_ba_ppdu_stats.mpdu_tried_mcast = ppdu->mpdu_tried_mcast;
146 	peer->delayed_ba_ppdu_stats.frame_ctrl = ppdu->frame_ctrl;
147 	peer->delayed_ba_ppdu_stats.qos_ctrl = ppdu->qos_ctrl;
148 	peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
149 
150 	peer->delayed_ba_ppdu_stats.ru_start = ppdu->ru_start;
151 	peer->delayed_ba_ppdu_stats.ru_tones = ppdu->ru_tones;
152 	peer->delayed_ba_ppdu_stats.is_mcast = ppdu->is_mcast;
153 
154 	peer->delayed_ba_ppdu_stats.user_pos = ppdu->user_pos;
155 	peer->delayed_ba_ppdu_stats.mu_group_id = ppdu->mu_group_id;
156 
157 	peer->last_delayed_ba = true;
158 
159 	ppdu->debug_copied = true;
160 }
161 
162 /*
163  * dp_peer_copy_stats_to_bar() - copy delayed stats to ppdu stats.
164  * @peer: Datapath peer handle
165  * @ppdu: PPDU Descriptor
166  *
167  * Return: None
168  *
169  * For Tx BAR, PPDU stats TLV include Block Ack info. PPDU info
170  * from Tx BAR frame not required to populate peer stats.
171  * But we need successful MPDU and MSDU to update previous
172  * transmitted Tx data frame. Overwrite ppdu stats with the previous
173  * stored ppdu stats.
174  */
175 static void
176 dp_peer_copy_stats_to_bar(struct dp_peer *peer,
177 			  struct cdp_tx_completion_ppdu_user *ppdu)
178 {
179 	ppdu->ltf_size = peer->delayed_ba_ppdu_stats.ltf_size;
180 	ppdu->stbc = peer->delayed_ba_ppdu_stats.stbc;
181 	ppdu->he_re = peer->delayed_ba_ppdu_stats.he_re;
182 	ppdu->txbf = peer->delayed_ba_ppdu_stats.txbf;
183 	ppdu->bw = peer->delayed_ba_ppdu_stats.bw;
184 	ppdu->nss = peer->delayed_ba_ppdu_stats.nss;
185 	ppdu->preamble = peer->delayed_ba_ppdu_stats.preamble;
186 	ppdu->gi = peer->delayed_ba_ppdu_stats.gi;
187 	ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm;
188 	ppdu->ldpc = peer->delayed_ba_ppdu_stats.ldpc;
189 	ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm;
190 	ppdu->mpdu_tried_ucast = peer->delayed_ba_ppdu_stats.mpdu_tried_ucast;
191 	ppdu->mpdu_tried_mcast = peer->delayed_ba_ppdu_stats.mpdu_tried_mcast;
192 	ppdu->frame_ctrl = peer->delayed_ba_ppdu_stats.frame_ctrl;
193 	ppdu->qos_ctrl = peer->delayed_ba_ppdu_stats.qos_ctrl;
194 	ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm;
195 
196 	ppdu->ru_start = peer->delayed_ba_ppdu_stats.ru_start;
197 	ppdu->ru_tones = peer->delayed_ba_ppdu_stats.ru_tones;
198 	ppdu->is_mcast = peer->delayed_ba_ppdu_stats.is_mcast;
199 
200 	ppdu->user_pos = peer->delayed_ba_ppdu_stats.user_pos;
201 	ppdu->mu_group_id = peer->delayed_ba_ppdu_stats.mu_group_id;
202 
203 	peer->last_delayed_ba = false;
204 
205 	ppdu->debug_copied = true;
206 }
207 
208 /*
209  * dp_tx_rate_stats_update() - Update rate per-peer statistics
210  * @peer: Datapath peer handle
211  * @ppdu: PPDU Descriptor
212  *
213  * Return: None
214  */
215 static void
216 dp_tx_rate_stats_update(struct dp_peer *peer,
217 			struct cdp_tx_completion_ppdu_user *ppdu)
218 {
219 	uint32_t ratekbps = 0;
220 	uint64_t ppdu_tx_rate = 0;
221 	uint32_t rix;
222 	uint16_t ratecode = 0;
223 
224 	if (!peer || !ppdu)
225 		return;
226 
227 	if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK)
228 		return;
229 
230 	ratekbps = dp_getrateindex(ppdu->gi,
231 				   ppdu->mcs,
232 				   ppdu->nss,
233 				   ppdu->preamble,
234 				   ppdu->bw,
235 				   &rix,
236 				   &ratecode);
237 
238 	DP_STATS_UPD(peer, tx.last_tx_rate, ratekbps);
239 
240 	if (!ratekbps)
241 		return;
242 
243 	/* Calculate goodput in non-training period
244 	 * In training period, don't do anything as
245 	 * pending pkt is send as goodput.
246 	 */
247 	if ((!peer->bss_peer) && (!ppdu->sa_is_training)) {
248 		ppdu->sa_goodput = ((ratekbps / CDP_NUM_KB_IN_MB) *
249 				(CDP_PERCENT_MACRO - ppdu->current_rate_per));
250 	}
251 	ppdu->rix = rix;
252 	ppdu->tx_ratekbps = ratekbps;
253 	ppdu->tx_ratecode = ratecode;
254 	peer->stats.tx.avg_tx_rate =
255 		dp_ath_rate_lpf(peer->stats.tx.avg_tx_rate, ratekbps);
256 	ppdu_tx_rate = dp_ath_rate_out(peer->stats.tx.avg_tx_rate);
257 	DP_STATS_UPD(peer, tx.rnd_avg_tx_rate, ppdu_tx_rate);
258 
259 	if (peer->vdev) {
260 		/*
261 		 * In STA mode:
262 		 *	We get ucast stats as BSS peer stats.
263 		 *
264 		 * In AP mode:
265 		 *	We get mcast stats as BSS peer stats.
266 		 *	We get ucast stats as assoc peer stats.
267 		 */
268 		if (peer->vdev->opmode == wlan_op_mode_ap && peer->bss_peer) {
269 			peer->vdev->stats.tx.mcast_last_tx_rate = ratekbps;
270 			peer->vdev->stats.tx.mcast_last_tx_rate_mcs = ppdu->mcs;
271 		} else {
272 			peer->vdev->stats.tx.last_tx_rate = ratekbps;
273 			peer->vdev->stats.tx.last_tx_rate_mcs = ppdu->mcs;
274 		}
275 	}
276 }
277 
278 /*
279  * dp_tx_stats_update() - Update per-peer statistics
280  * @pdev: Datapath pdev handle
281  * @peer: Datapath peer handle
282  * @ppdu: PPDU Descriptor
283  * @ack_rssi: RSSI of last ack received
284  *
285  * Return: None
286  */
287 static void
288 dp_tx_stats_update(struct dp_pdev *pdev, struct dp_peer *peer,
289 		   struct cdp_tx_completion_ppdu_user *ppdu,
290 		   uint32_t ack_rssi)
291 {
292 	uint8_t preamble, mcs;
293 	uint16_t num_msdu;
294 	uint16_t num_mpdu;
295 	uint16_t mpdu_tried;
296 	uint16_t mpdu_failed;
297 
298 	preamble = ppdu->preamble;
299 	mcs = ppdu->mcs;
300 	num_msdu = ppdu->num_msdu;
301 	num_mpdu = ppdu->mpdu_success;
302 	mpdu_tried = ppdu->mpdu_tried_ucast + ppdu->mpdu_tried_mcast;
303 	mpdu_failed = mpdu_tried - num_mpdu;
304 
305 	/* If the peer statistics are already processed as part of
306 	 * per-MSDU completion handler, do not process these again in per-PPDU
307 	 * indications */
308 	if (pdev->soc->process_tx_status)
309 		return;
310 
311 	if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK) {
312 		/*
313 		 * All failed mpdu will be retried, so incrementing
314 		 * retries mpdu based on mpdu failed. Even for
315 		 * ack failure i.e for long retries we get
316 		 * mpdu failed equal mpdu tried.
317 		 */
318 		DP_STATS_INC(peer, tx.retries, mpdu_failed);
319 		DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus);
320 		return;
321 	}
322 
323 	if (ppdu->is_ppdu_cookie_valid)
324 		DP_STATS_INC(peer, tx.num_ppdu_cookie_valid, 1);
325 
326 	if (ppdu->mu_group_id <= MAX_MU_GROUP_ID &&
327 	    ppdu->ppdu_type != HTT_PPDU_STATS_PPDU_TYPE_SU) {
328 		if (unlikely(!(ppdu->mu_group_id & (MAX_MU_GROUP_ID - 1))))
329 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
330 				  "mu_group_id out of bound!!\n");
331 		else
332 			DP_STATS_UPD(peer, tx.mu_group_id[ppdu->mu_group_id],
333 				     (ppdu->user_pos + 1));
334 	}
335 
336 	if (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA ||
337 	    ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA) {
338 		DP_STATS_UPD(peer, tx.ru_tones, ppdu->ru_tones);
339 		DP_STATS_UPD(peer, tx.ru_start, ppdu->ru_start);
340 		switch (ppdu->ru_tones) {
341 		case RU_26:
342 			DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].num_msdu,
343 				     num_msdu);
344 			DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].num_mpdu,
345 				     num_mpdu);
346 			DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].mpdu_tried,
347 				     mpdu_tried);
348 		break;
349 		case RU_52:
350 			DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].num_msdu,
351 				     num_msdu);
352 			DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].num_mpdu,
353 				     num_mpdu);
354 			DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].mpdu_tried,
355 				     mpdu_tried);
356 		break;
357 		case RU_106:
358 			DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].num_msdu,
359 				     num_msdu);
360 			DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].num_mpdu,
361 				     num_mpdu);
362 			DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].mpdu_tried,
363 				     mpdu_tried);
364 		break;
365 		case RU_242:
366 			DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].num_msdu,
367 				     num_msdu);
368 			DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].num_mpdu,
369 				     num_mpdu);
370 			DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].mpdu_tried,
371 				     mpdu_tried);
372 		break;
373 		case RU_484:
374 			DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].num_msdu,
375 				     num_msdu);
376 			DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].num_mpdu,
377 				     num_mpdu);
378 			DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].mpdu_tried,
379 				     mpdu_tried);
380 		break;
381 		case RU_996:
382 			DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].num_msdu,
383 				     num_msdu);
384 			DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].num_mpdu,
385 				     num_mpdu);
386 			DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].mpdu_tried,
387 				     mpdu_tried);
388 		break;
389 		}
390 	}
391 
392 	/*
393 	 * All failed mpdu will be retried, so incrementing
394 	 * retries mpdu based on mpdu failed. Even for
395 	 * ack failure i.e for long retries we get
396 	 * mpdu failed equal mpdu tried.
397 	 */
398 	DP_STATS_INC(peer, tx.retries, mpdu_failed);
399 	DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus);
400 
401 	DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].num_msdu,
402 		     num_msdu);
403 	DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].num_mpdu,
404 		     num_mpdu);
405 	DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].mpdu_tried,
406 		     mpdu_tried);
407 
408 	DP_STATS_INC_PKT(peer, tx.comp_pkt,
409 			num_msdu, (ppdu->success_bytes +
410 				ppdu->retry_bytes + ppdu->failed_bytes));
411 	DP_STATS_UPD(peer, tx.tx_rate, ppdu->tx_rate);
412 	DP_STATS_INC(peer, tx.sgi_count[ppdu->gi], num_msdu);
413 	DP_STATS_INC(peer, tx.bw[ppdu->bw], num_msdu);
414 	DP_STATS_INC(peer, tx.nss[ppdu->nss], num_msdu);
415 	if (ppdu->tid < CDP_DATA_TID_MAX)
416 		DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)],
417 			     num_msdu);
418 	DP_STATS_INCC(peer, tx.stbc, num_msdu, ppdu->stbc);
419 	DP_STATS_INCC(peer, tx.ldpc, num_msdu, ppdu->ldpc);
420 	if (!(ppdu->is_mcast) && ppdu->ack_rssi_valid)
421 		DP_STATS_UPD(peer, tx.last_ack_rssi, ack_rssi);
422 
423 	DP_STATS_INCC(peer,
424 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
425 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
426 	DP_STATS_INCC(peer,
427 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
428 			((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
429 	DP_STATS_INCC(peer,
430 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
431 			((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
432 	DP_STATS_INCC(peer,
433 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
434 			((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B)));
435 	DP_STATS_INCC(peer,
436 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
437 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
438 	DP_STATS_INCC(peer,
439 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
440 			((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
441 	DP_STATS_INCC(peer,
442 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
443 			((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
444 	DP_STATS_INCC(peer,
445 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
446 			((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
447 	DP_STATS_INCC(peer,
448 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
449 			((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX)));
450 	DP_STATS_INCC(peer,
451 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
452 			((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX)));
453 	DP_STATS_INCC(peer, tx.ampdu_cnt, num_msdu, ppdu->is_ampdu);
454 	DP_STATS_INCC(peer, tx.non_ampdu_cnt, num_msdu, !(ppdu->is_ampdu));
455 	DP_STATS_INCC(peer, tx.pream_punct_cnt, 1, ppdu->pream_punct);
456 
457 	dp_peer_stats_notify(pdev, peer);
458 
459 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
460 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
461 			     &peer->stats, ppdu->peer_id,
462 			     UPDATE_PEER_STATS, pdev->pdev_id);
463 #endif
464 }
465 #endif
466 
467 #ifdef WLAN_TX_PKT_CAPTURE_ENH
468 #include "dp_tx_capture.h"
469 #else
470 static inline void
471 dp_process_ppdu_stats_update_failed_bitmap(struct dp_pdev *pdev,
472 					   void *data,
473 					   uint32_t ppdu_id,
474 					   uint32_t size)
475 {
476 }
477 #endif
478 
479 /*
480  * htt_htc_pkt_alloc() - Allocate HTC packet buffer
481  * @htt_soc:	HTT SOC handle
482  *
483  * Return: Pointer to htc packet buffer
484  */
485 static struct dp_htt_htc_pkt *
486 htt_htc_pkt_alloc(struct htt_soc *soc)
487 {
488 	struct dp_htt_htc_pkt_union *pkt = NULL;
489 
490 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
491 	if (soc->htt_htc_pkt_freelist) {
492 		pkt = soc->htt_htc_pkt_freelist;
493 		soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next;
494 	}
495 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
496 
497 	if (!pkt)
498 		pkt = qdf_mem_malloc(sizeof(*pkt));
499 
500 	if (!pkt)
501 		return NULL;
502 
503 	htc_packet_set_magic_cookie(&(pkt->u.pkt.htc_pkt), 0);
504 
505 	return &pkt->u.pkt; /* not actually a dereference */
506 }
507 
508 /*
509  * htt_htc_pkt_free() - Free HTC packet buffer
510  * @htt_soc:	HTT SOC handle
511  */
512 static void
513 htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
514 {
515 	struct dp_htt_htc_pkt_union *u_pkt =
516 		(struct dp_htt_htc_pkt_union *)pkt;
517 
518 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
519 	htc_packet_set_magic_cookie(&(u_pkt->u.pkt.htc_pkt), 0);
520 	u_pkt->u.next = soc->htt_htc_pkt_freelist;
521 	soc->htt_htc_pkt_freelist = u_pkt;
522 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
523 }
524 
525 /*
526  * htt_htc_pkt_pool_free() - Free HTC packet pool
527  * @htt_soc:	HTT SOC handle
528  */
529 void
530 htt_htc_pkt_pool_free(struct htt_soc *soc)
531 {
532 	struct dp_htt_htc_pkt_union *pkt, *next;
533 	pkt = soc->htt_htc_pkt_freelist;
534 	while (pkt) {
535 		next = pkt->u.next;
536 		qdf_mem_free(pkt);
537 		pkt = next;
538 	}
539 	soc->htt_htc_pkt_freelist = NULL;
540 }
541 
542 /*
543  * htt_htc_misc_pkt_list_trim() - trim misc list
544  * @htt_soc: HTT SOC handle
545  * @level: max no. of pkts in list
546  */
547 static void
548 htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level)
549 {
550 	struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL;
551 	int i = 0;
552 	qdf_nbuf_t netbuf;
553 
554 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
555 	pkt = soc->htt_htc_pkt_misclist;
556 	while (pkt) {
557 		next = pkt->u.next;
558 		/* trim the out grown list*/
559 		if (++i > level) {
560 			netbuf =
561 				(qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
562 			qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
563 			qdf_nbuf_free(netbuf);
564 			qdf_mem_free(pkt);
565 			pkt = NULL;
566 			if (prev)
567 				prev->u.next = NULL;
568 		}
569 		prev = pkt;
570 		pkt = next;
571 	}
572 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
573 }
574 
575 /*
576  * htt_htc_misc_pkt_list_add() - Add pkt to misc list
577  * @htt_soc:	HTT SOC handle
578  * @dp_htt_htc_pkt: pkt to be added to list
579  */
580 static void
581 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
582 {
583 	struct dp_htt_htc_pkt_union *u_pkt =
584 				(struct dp_htt_htc_pkt_union *)pkt;
585 	int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc,
586 							pkt->htc_pkt.Endpoint)
587 				+ DP_HTT_HTC_PKT_MISCLIST_SIZE;
588 
589 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
590 	if (soc->htt_htc_pkt_misclist) {
591 		u_pkt->u.next = soc->htt_htc_pkt_misclist;
592 		soc->htt_htc_pkt_misclist = u_pkt;
593 	} else {
594 		soc->htt_htc_pkt_misclist = u_pkt;
595 	}
596 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
597 
598 	/* only ce pipe size + tx_queue_depth could possibly be in use
599 	 * free older packets in the misclist
600 	 */
601 	htt_htc_misc_pkt_list_trim(soc, misclist_trim_level);
602 }
603 
604 /**
605  * DP_HTT_SEND_HTC_PKT() - Send htt packet from host
606  * @soc : HTT SOC handle
607  * @pkt: pkt to be send
608  * @cmd : command to be recorded in dp htt logger
609  * @buf : Pointer to buffer needs to be recored for above cmd
610  *
611  * Return: None
612  */
613 static inline QDF_STATUS DP_HTT_SEND_HTC_PKT(struct htt_soc *soc,
614 					     struct dp_htt_htc_pkt *pkt,
615 					     uint8_t cmd, uint8_t *buf)
616 {
617 	QDF_STATUS status;
618 
619 	htt_command_record(soc->htt_logger_handle, cmd, buf);
620 
621 	status = htc_send_pkt(soc->htc_soc, &pkt->htc_pkt);
622 	if (status == QDF_STATUS_SUCCESS)
623 		htt_htc_misc_pkt_list_add(soc, pkt);
624 	else
625 		soc->stats.fail_count++;
626 	return status;
627 }
628 
629 /*
630  * htt_htc_misc_pkt_pool_free() - free pkts in misc list
631  * @htt_soc:	HTT SOC handle
632  */
633 static void
634 htt_htc_misc_pkt_pool_free(struct htt_soc *soc)
635 {
636 	struct dp_htt_htc_pkt_union *pkt, *next;
637 	qdf_nbuf_t netbuf;
638 
639 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
640 	pkt = soc->htt_htc_pkt_misclist;
641 
642 	while (pkt) {
643 		next = pkt->u.next;
644 		if (htc_packet_get_magic_cookie(&(pkt->u.pkt.htc_pkt)) !=
645 		    HTC_PACKET_MAGIC_COOKIE) {
646 			pkt = next;
647 			soc->stats.skip_count++;
648 			continue;
649 		}
650 		netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
651 		qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
652 
653 		soc->stats.htc_pkt_free++;
654 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
655 			 "%s: Pkt free count %d",
656 			 __func__, soc->stats.htc_pkt_free);
657 
658 		qdf_nbuf_free(netbuf);
659 		qdf_mem_free(pkt);
660 		pkt = next;
661 	}
662 	soc->htt_htc_pkt_misclist = NULL;
663 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
664 	dp_info("HTC Packets, fail count = %d, skip count = %d",
665 		soc->stats.fail_count, soc->stats.skip_count);
666 }
667 
668 /*
669  * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ
670  * @tgt_mac_addr:	Target MAC
671  * @buffer:		Output buffer
672  */
673 static u_int8_t *
674 htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer)
675 {
676 #ifdef BIG_ENDIAN_HOST
677 	/*
678 	 * The host endianness is opposite of the target endianness.
679 	 * To make u_int32_t elements come out correctly, the target->host
680 	 * upload has swizzled the bytes in each u_int32_t element of the
681 	 * message.
682 	 * For byte-array message fields like the MAC address, this
683 	 * upload swizzling puts the bytes in the wrong order, and needs
684 	 * to be undone.
685 	 */
686 	buffer[0] = tgt_mac_addr[3];
687 	buffer[1] = tgt_mac_addr[2];
688 	buffer[2] = tgt_mac_addr[1];
689 	buffer[3] = tgt_mac_addr[0];
690 	buffer[4] = tgt_mac_addr[7];
691 	buffer[5] = tgt_mac_addr[6];
692 	return buffer;
693 #else
694 	/*
695 	 * The host endianness matches the target endianness -
696 	 * we can use the mac addr directly from the message buffer.
697 	 */
698 	return tgt_mac_addr;
699 #endif
700 }
701 
702 /*
703  * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
704  * @soc:	SOC handle
705  * @status:	Completion status
706  * @netbuf:	HTT buffer
707  */
708 static void
709 dp_htt_h2t_send_complete_free_netbuf(
710 	void *soc, A_STATUS status, qdf_nbuf_t netbuf)
711 {
712 	qdf_nbuf_free(netbuf);
713 }
714 
715 /*
716  * dp_htt_h2t_send_complete() - H2T completion handler
717  * @context:	Opaque context (HTT SOC handle)
718  * @htc_pkt:	HTC packet
719  */
720 static void
721 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
722 {
723 	void (*send_complete_part2)(
724 		void *soc, QDF_STATUS status, qdf_nbuf_t msdu);
725 	struct htt_soc *soc =  (struct htt_soc *) context;
726 	struct dp_htt_htc_pkt *htt_pkt;
727 	qdf_nbuf_t netbuf;
728 
729 	send_complete_part2 = htc_pkt->pPktContext;
730 
731 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
732 
733 	/* process (free or keep) the netbuf that held the message */
734 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
735 	/*
736 	 * adf sendcomplete is required for windows only
737 	 */
738 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
739 	if (send_complete_part2) {
740 		send_complete_part2(
741 			htt_pkt->soc_ctxt, htc_pkt->Status, netbuf);
742 	}
743 	/* free the htt_htc_pkt / HTC_PACKET object */
744 	htt_htc_pkt_free(soc, htt_pkt);
745 }
746 
747 /*
748  * htt_h2t_ver_req_msg() - Send HTT version request message to target
749  * @htt_soc:	HTT SOC handle
750  *
751  * Return: 0 on success; error code on failure
752  */
753 static int htt_h2t_ver_req_msg(struct htt_soc *soc)
754 {
755 	struct dp_htt_htc_pkt *pkt;
756 	qdf_nbuf_t msg;
757 	uint32_t *msg_word;
758 	QDF_STATUS status;
759 
760 	msg = qdf_nbuf_alloc(
761 		soc->osdev,
762 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
763 		/* reserve room for the HTC header */
764 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
765 	if (!msg)
766 		return QDF_STATUS_E_NOMEM;
767 
768 	/*
769 	 * Set the length of the message.
770 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
771 	 * separately during the below call to qdf_nbuf_push_head.
772 	 * The contribution from the HTC header is added separately inside HTC.
773 	 */
774 	if (qdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES) == NULL) {
775 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
776 			"%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
777 			__func__);
778 		return QDF_STATUS_E_FAILURE;
779 	}
780 
781 	/* fill in the message contents */
782 	msg_word = (u_int32_t *) qdf_nbuf_data(msg);
783 
784 	/* rewind beyond alignment pad to get to the HTC header reserved area */
785 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
786 
787 	*msg_word = 0;
788 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
789 
790 	pkt = htt_htc_pkt_alloc(soc);
791 	if (!pkt) {
792 		qdf_nbuf_free(msg);
793 		return QDF_STATUS_E_FAILURE;
794 	}
795 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
796 
797 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
798 		dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg),
799 		qdf_nbuf_len(msg), soc->htc_endpoint,
800 		HTC_TX_PACKET_TAG_RTPM_PUT_RC);
801 
802 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
803 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_VERSION_REQ,
804 				     NULL);
805 
806 	if (status != QDF_STATUS_SUCCESS) {
807 		qdf_nbuf_free(msg);
808 		htt_htc_pkt_free(soc, pkt);
809 	}
810 
811 	return status;
812 }
813 
814 /*
815  * htt_srng_setup() - Send SRNG setup message to target
816  * @htt_soc:	HTT SOC handle
817  * @mac_id:	MAC Id
818  * @hal_srng:	Opaque HAL SRNG pointer
819  * @hal_ring_type:	SRNG ring type
820  *
821  * Return: 0 on success; error code on failure
822  */
823 int htt_srng_setup(struct htt_soc *soc, int mac_id,
824 		   hal_ring_handle_t hal_ring_hdl,
825 		   int hal_ring_type)
826 {
827 	struct dp_htt_htc_pkt *pkt;
828 	qdf_nbuf_t htt_msg;
829 	uint32_t *msg_word;
830 	struct hal_srng_params srng_params;
831 	qdf_dma_addr_t hp_addr, tp_addr;
832 	uint32_t ring_entry_size =
833 		hal_srng_get_entrysize(soc->hal_soc, hal_ring_type);
834 	int htt_ring_type, htt_ring_id;
835 	uint8_t *htt_logger_bufp;
836 	int target_pdev_id;
837 	int lmac_id = dp_get_lmac_id_for_pdev_id(soc->dp_soc, 0, mac_id);
838 	QDF_STATUS status;
839 
840 	/* Sizes should be set in 4-byte words */
841 	ring_entry_size = ring_entry_size >> 2;
842 
843 	htt_msg = qdf_nbuf_alloc(soc->osdev,
844 		HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ),
845 		/* reserve room for the HTC header */
846 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
847 	if (!htt_msg)
848 		goto fail0;
849 
850 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
851 	hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_ring_hdl);
852 	tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_ring_hdl);
853 
854 	switch (hal_ring_type) {
855 	case RXDMA_BUF:
856 #ifdef QCA_HOST2FW_RXBUF_RING
857 		if (srng_params.ring_id ==
858 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0)) {
859 			htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
860 			htt_ring_type = HTT_SW_TO_SW_RING;
861 #ifdef IPA_OFFLOAD
862 		} else if (srng_params.ring_id ==
863 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2)) {
864 			htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING;
865 			htt_ring_type = HTT_SW_TO_SW_RING;
866 #endif
867 #else
868 		if (srng_params.ring_id ==
869 			(HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
870 			(lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
871 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
872 			htt_ring_type = HTT_SW_TO_HW_RING;
873 #endif
874 		} else if (srng_params.ring_id ==
875 #ifdef IPA_OFFLOAD
876 			 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 +
877 #else
878 			 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF +
879 #endif
880 			(lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
881 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
882 			htt_ring_type = HTT_SW_TO_HW_RING;
883 		} else {
884 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
885 				   "%s: Ring %d currently not supported",
886 				   __func__, srng_params.ring_id);
887 			goto fail1;
888 		}
889 
890 		dp_info("ring_type %d ring_id %d htt_ring_id %d hp_addr 0x%llx tp_addr 0x%llx",
891 			hal_ring_type, srng_params.ring_id, htt_ring_id,
892 			(uint64_t)hp_addr,
893 			(uint64_t)tp_addr);
894 		break;
895 	case RXDMA_MONITOR_BUF:
896 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
897 		htt_ring_type = HTT_SW_TO_HW_RING;
898 		break;
899 	case RXDMA_MONITOR_STATUS:
900 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
901 		htt_ring_type = HTT_SW_TO_HW_RING;
902 		break;
903 	case RXDMA_MONITOR_DST:
904 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
905 		htt_ring_type = HTT_HW_TO_SW_RING;
906 		break;
907 	case RXDMA_MONITOR_DESC:
908 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
909 		htt_ring_type = HTT_SW_TO_HW_RING;
910 		break;
911 	case RXDMA_DST:
912 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
913 		htt_ring_type = HTT_HW_TO_SW_RING;
914 		break;
915 
916 	default:
917 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
918 			"%s: Ring currently not supported", __func__);
919 			goto fail1;
920 	}
921 
922 	/*
923 	 * Set the length of the message.
924 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
925 	 * separately during the below call to qdf_nbuf_push_head.
926 	 * The contribution from the HTC header is added separately inside HTC.
927 	 */
928 	if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) {
929 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
930 			"%s: Failed to expand head for SRING_SETUP msg",
931 			__func__);
932 		return QDF_STATUS_E_FAILURE;
933 	}
934 
935 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
936 
937 	/* rewind beyond alignment pad to get to the HTC header reserved area */
938 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
939 
940 	/* word 0 */
941 	*msg_word = 0;
942 	htt_logger_bufp = (uint8_t *)msg_word;
943 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP);
944 	target_pdev_id =
945 	dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, mac_id);
946 
947 	if ((htt_ring_type == HTT_SW_TO_HW_RING) ||
948 			(htt_ring_type == HTT_HW_TO_SW_RING))
949 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, target_pdev_id);
950 	else
951 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id);
952 
953 	dp_info("mac_id %d", mac_id);
954 	HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type);
955 	/* TODO: Discuss with FW on changing this to unique ID and using
956 	 * htt_ring_type to send the type of ring
957 	 */
958 	HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id);
959 
960 	/* word 1 */
961 	msg_word++;
962 	*msg_word = 0;
963 	HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word,
964 		srng_params.ring_base_paddr & 0xffffffff);
965 
966 	/* word 2 */
967 	msg_word++;
968 	*msg_word = 0;
969 	HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word,
970 		(uint64_t)srng_params.ring_base_paddr >> 32);
971 
972 	/* word 3 */
973 	msg_word++;
974 	*msg_word = 0;
975 	HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size);
976 	HTT_SRING_SETUP_RING_SIZE_SET(*msg_word,
977 		(ring_entry_size * srng_params.num_entries));
978 	dp_info("entry_size %d", ring_entry_size);
979 	dp_info("num_entries %d", srng_params.num_entries);
980 	dp_info("ring_size %d", (ring_entry_size * srng_params.num_entries));
981 	if (htt_ring_type == HTT_SW_TO_HW_RING)
982 		HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET(
983 						*msg_word, 1);
984 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word,
985 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
986 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word,
987 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
988 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word,
989 		!!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP));
990 
991 	/* word 4 */
992 	msg_word++;
993 	*msg_word = 0;
994 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
995 		hp_addr & 0xffffffff);
996 
997 	/* word 5 */
998 	msg_word++;
999 	*msg_word = 0;
1000 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
1001 		(uint64_t)hp_addr >> 32);
1002 
1003 	/* word 6 */
1004 	msg_word++;
1005 	*msg_word = 0;
1006 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
1007 		tp_addr & 0xffffffff);
1008 
1009 	/* word 7 */
1010 	msg_word++;
1011 	*msg_word = 0;
1012 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
1013 		(uint64_t)tp_addr >> 32);
1014 
1015 	/* word 8 */
1016 	msg_word++;
1017 	*msg_word = 0;
1018 	HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word,
1019 		srng_params.msi_addr & 0xffffffff);
1020 
1021 	/* word 9 */
1022 	msg_word++;
1023 	*msg_word = 0;
1024 	HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word,
1025 		(uint64_t)(srng_params.msi_addr) >> 32);
1026 
1027 	/* word 10 */
1028 	msg_word++;
1029 	*msg_word = 0;
1030 	HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word,
1031 		qdf_cpu_to_le32(srng_params.msi_data));
1032 
1033 	/* word 11 */
1034 	msg_word++;
1035 	*msg_word = 0;
1036 	HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word,
1037 		srng_params.intr_batch_cntr_thres_entries *
1038 		ring_entry_size);
1039 	HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word,
1040 		srng_params.intr_timer_thres_us >> 3);
1041 
1042 	/* word 12 */
1043 	msg_word++;
1044 	*msg_word = 0;
1045 	if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
1046 		/* TODO: Setting low threshold to 1/8th of ring size - see
1047 		 * if this needs to be configurable
1048 		 */
1049 		HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word,
1050 			srng_params.low_threshold);
1051 	}
1052 	/* "response_required" field should be set if a HTT response message is
1053 	 * required after setting up the ring.
1054 	 */
1055 	pkt = htt_htc_pkt_alloc(soc);
1056 	if (!pkt)
1057 		goto fail1;
1058 
1059 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1060 
1061 	SET_HTC_PACKET_INFO_TX(
1062 		&pkt->htc_pkt,
1063 		dp_htt_h2t_send_complete_free_netbuf,
1064 		qdf_nbuf_data(htt_msg),
1065 		qdf_nbuf_len(htt_msg),
1066 		soc->htc_endpoint,
1067 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
1068 
1069 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1070 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_SRING_SETUP,
1071 				     htt_logger_bufp);
1072 
1073 	if (status != QDF_STATUS_SUCCESS) {
1074 		qdf_nbuf_free(htt_msg);
1075 		htt_htc_pkt_free(soc, pkt);
1076 	}
1077 
1078 	return status;
1079 
1080 fail1:
1081 	qdf_nbuf_free(htt_msg);
1082 fail0:
1083 	return QDF_STATUS_E_FAILURE;
1084 }
1085 
1086 #ifdef QCA_SUPPORT_FULL_MON
1087 /**
1088  * htt_h2t_full_mon_cfg() - Send full monitor configuarion msg to FW
1089  *
1090  * @htt_soc: HTT Soc handle
1091  * @pdev_id: Radio id
1092  * @dp_full_mon_config: enabled/disable configuration
1093  *
1094  * Return: Success when HTT message is sent, error on failure
1095  */
1096 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc,
1097 			 uint8_t pdev_id,
1098 			 enum dp_full_mon_config config)
1099 {
1100 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
1101 	struct dp_htt_htc_pkt *pkt;
1102 	qdf_nbuf_t htt_msg;
1103 	uint32_t *msg_word;
1104 	uint8_t *htt_logger_bufp;
1105 
1106 	htt_msg = qdf_nbuf_alloc(soc->osdev,
1107 				 HTT_MSG_BUF_SIZE(
1108 				 HTT_RX_FULL_MONITOR_MODE_SETUP_SZ),
1109 				 /* reserve room for the HTC header */
1110 				 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
1111 				 4,
1112 				 TRUE);
1113 	if (!htt_msg)
1114 		return QDF_STATUS_E_FAILURE;
1115 
1116 	/*
1117 	 * Set the length of the message.
1118 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
1119 	 * separately during the below call to qdf_nbuf_push_head.
1120 	 * The contribution from the HTC header is added separately inside HTC.
1121 	 */
1122 	if (!qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ)) {
1123 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1124 			  "%s: Failed to expand head for RX Ring Cfg msg",
1125 			  __func__);
1126 		goto fail1;
1127 	}
1128 
1129 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
1130 
1131 	/* rewind beyond alignment pad to get to the HTC header reserved area */
1132 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
1133 
1134 	/* word 0 */
1135 	*msg_word = 0;
1136 	htt_logger_bufp = (uint8_t *)msg_word;
1137 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE);
1138 	HTT_RX_FULL_MONITOR_MODE_OPERATION_PDEV_ID_SET(
1139 			*msg_word, DP_SW2HW_MACID(pdev_id));
1140 
1141 	msg_word++;
1142 	*msg_word = 0;
1143 	/* word 1 */
1144 	if (config == DP_FULL_MON_ENABLE) {
1145 		HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true);
1146 		HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, true);
1147 		HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, true);
1148 		HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2);
1149 	} else if (config == DP_FULL_MON_DISABLE) {
1150 		/* As per MAC team's suggestion, While disbaling full monitor
1151 		 * mode, Set 'en' bit to true in full monitor mode register.
1152 		 */
1153 		HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true);
1154 		HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, false);
1155 		HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, false);
1156 		HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2);
1157 	}
1158 
1159 	pkt = htt_htc_pkt_alloc(soc);
1160 	if (!pkt) {
1161 		qdf_err("HTC packet allocation failed");
1162 		goto fail1;
1163 	}
1164 
1165 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1166 
1167 	SET_HTC_PACKET_INFO_TX(
1168 		&pkt->htc_pkt,
1169 		dp_htt_h2t_send_complete_free_netbuf,
1170 		qdf_nbuf_data(htt_msg),
1171 		qdf_nbuf_len(htt_msg),
1172 		soc->htc_endpoint,
1173 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
1174 
1175 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1176 	qdf_info("config: %d", config);
1177 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_SRING_SETUP,
1178 			    htt_logger_bufp);
1179 	return QDF_STATUS_SUCCESS;
1180 fail1:
1181 	qdf_nbuf_free(htt_msg);
1182 	return QDF_STATUS_E_FAILURE;
1183 }
1184 #else
1185 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc,
1186 			 uint8_t pdev_id,
1187 			 enum dp_full_mon_config config)
1188 {
1189 	return 0;
1190 }
1191 
1192 #endif
1193 
1194 /*
1195  * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter
1196  * config message to target
1197  * @htt_soc:	HTT SOC handle
1198  * @pdev_id:	WIN- PDEV Id, MCL- mac id
1199  * @hal_srng:	Opaque HAL SRNG pointer
1200  * @hal_ring_type:	SRNG ring type
1201  * @ring_buf_size:	SRNG buffer size
1202  * @htt_tlv_filter:	Rx SRNG TLV and filter setting
1203  * Return: 0 on success; error code on failure
1204  */
1205 int htt_h2t_rx_ring_cfg(struct htt_soc *htt_soc, int pdev_id,
1206 			hal_ring_handle_t hal_ring_hdl,
1207 			int hal_ring_type, int ring_buf_size,
1208 			struct htt_rx_ring_tlv_filter *htt_tlv_filter)
1209 {
1210 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
1211 	struct dp_htt_htc_pkt *pkt;
1212 	qdf_nbuf_t htt_msg;
1213 	uint32_t *msg_word;
1214 	struct hal_srng_params srng_params;
1215 	uint32_t htt_ring_type, htt_ring_id;
1216 	uint32_t tlv_filter;
1217 	uint8_t *htt_logger_bufp;
1218 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->dp_soc->wlan_cfg_ctx;
1219 	uint32_t mon_drop_th = wlan_cfg_get_mon_drop_thresh(wlan_cfg_ctx);
1220 	int target_pdev_id;
1221 	QDF_STATUS status;
1222 
1223 	htt_msg = qdf_nbuf_alloc(soc->osdev,
1224 		HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ),
1225 	/* reserve room for the HTC header */
1226 	HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
1227 	if (!htt_msg)
1228 		goto fail0;
1229 
1230 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
1231 
1232 	switch (hal_ring_type) {
1233 	case RXDMA_BUF:
1234 		htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
1235 		htt_ring_type = HTT_SW_TO_HW_RING;
1236 		break;
1237 	case RXDMA_MONITOR_BUF:
1238 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
1239 		htt_ring_type = HTT_SW_TO_HW_RING;
1240 		break;
1241 	case RXDMA_MONITOR_STATUS:
1242 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
1243 		htt_ring_type = HTT_SW_TO_HW_RING;
1244 		break;
1245 	case RXDMA_MONITOR_DST:
1246 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
1247 		htt_ring_type = HTT_HW_TO_SW_RING;
1248 		break;
1249 	case RXDMA_MONITOR_DESC:
1250 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
1251 		htt_ring_type = HTT_SW_TO_HW_RING;
1252 		break;
1253 	case RXDMA_DST:
1254 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
1255 		htt_ring_type = HTT_HW_TO_SW_RING;
1256 		break;
1257 
1258 	default:
1259 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1260 			"%s: Ring currently not supported", __func__);
1261 		goto fail1;
1262 	}
1263 
1264 	/*
1265 	 * Set the length of the message.
1266 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
1267 	 * separately during the below call to qdf_nbuf_push_head.
1268 	 * The contribution from the HTC header is added separately inside HTC.
1269 	 */
1270 	if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) {
1271 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1272 			"%s: Failed to expand head for RX Ring Cfg msg",
1273 			__func__);
1274 		goto fail1; /* failure */
1275 	}
1276 
1277 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
1278 
1279 	/* rewind beyond alignment pad to get to the HTC header reserved area */
1280 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
1281 
1282 	/* word 0 */
1283 	htt_logger_bufp = (uint8_t *)msg_word;
1284 	*msg_word = 0;
1285 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
1286 
1287 	/*
1288 	 * pdev_id is indexed from 0 whereas mac_id is indexed from 1
1289 	 * SW_TO_SW and SW_TO_HW rings are unaffected by this
1290 	 */
1291 	target_pdev_id =
1292 	dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, pdev_id);
1293 
1294 	if (htt_ring_type == HTT_SW_TO_SW_RING ||
1295 			htt_ring_type == HTT_SW_TO_HW_RING)
1296 		HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word,
1297 						      target_pdev_id);
1298 
1299 	/* TODO: Discuss with FW on changing this to unique ID and using
1300 	 * htt_ring_type to send the type of ring
1301 	 */
1302 	HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id);
1303 
1304 	HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word,
1305 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
1306 
1307 	HTT_RX_RING_SELECTION_CFG_RX_OFFSETS_VALID_SET(*msg_word,
1308 						htt_tlv_filter->offset_valid);
1309 
1310 	if (mon_drop_th > 0)
1311 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1312 								   1);
1313 	else
1314 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1315 								   0);
1316 
1317 	/* word 1 */
1318 	msg_word++;
1319 	*msg_word = 0;
1320 	HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word,
1321 		ring_buf_size);
1322 
1323 	/* word 2 */
1324 	msg_word++;
1325 	*msg_word = 0;
1326 
1327 	if (htt_tlv_filter->enable_fp) {
1328 		/* TYPE: MGMT */
1329 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1330 			FP, MGMT, 0000,
1331 			(htt_tlv_filter->fp_mgmt_filter &
1332 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1333 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1334 			FP, MGMT, 0001,
1335 			(htt_tlv_filter->fp_mgmt_filter &
1336 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1337 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1338 			FP, MGMT, 0010,
1339 			(htt_tlv_filter->fp_mgmt_filter &
1340 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1341 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1342 			FP, MGMT, 0011,
1343 			(htt_tlv_filter->fp_mgmt_filter &
1344 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1345 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1346 			FP, MGMT, 0100,
1347 			(htt_tlv_filter->fp_mgmt_filter &
1348 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1349 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1350 			FP, MGMT, 0101,
1351 			(htt_tlv_filter->fp_mgmt_filter &
1352 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1353 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1354 			FP, MGMT, 0110,
1355 			(htt_tlv_filter->fp_mgmt_filter &
1356 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1357 		/* reserved */
1358 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
1359 			MGMT, 0111,
1360 			(htt_tlv_filter->fp_mgmt_filter &
1361 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1362 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1363 			FP, MGMT, 1000,
1364 			(htt_tlv_filter->fp_mgmt_filter &
1365 			FILTER_MGMT_BEACON) ? 1 : 0);
1366 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1367 			FP, MGMT, 1001,
1368 			(htt_tlv_filter->fp_mgmt_filter &
1369 			FILTER_MGMT_ATIM) ? 1 : 0);
1370 	}
1371 
1372 	if (htt_tlv_filter->enable_md) {
1373 			/* TYPE: MGMT */
1374 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1375 			MD, MGMT, 0000,
1376 			(htt_tlv_filter->md_mgmt_filter &
1377 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1378 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1379 			MD, MGMT, 0001,
1380 			(htt_tlv_filter->md_mgmt_filter &
1381 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1382 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1383 			MD, MGMT, 0010,
1384 			(htt_tlv_filter->md_mgmt_filter &
1385 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1386 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1387 			MD, MGMT, 0011,
1388 			(htt_tlv_filter->md_mgmt_filter &
1389 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1390 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1391 			MD, MGMT, 0100,
1392 			(htt_tlv_filter->md_mgmt_filter &
1393 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1394 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1395 			MD, MGMT, 0101,
1396 			(htt_tlv_filter->md_mgmt_filter &
1397 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1398 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1399 			MD, MGMT, 0110,
1400 			(htt_tlv_filter->md_mgmt_filter &
1401 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1402 		/* reserved */
1403 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
1404 			MGMT, 0111,
1405 			(htt_tlv_filter->md_mgmt_filter &
1406 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1407 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1408 			MD, MGMT, 1000,
1409 			(htt_tlv_filter->md_mgmt_filter &
1410 			FILTER_MGMT_BEACON) ? 1 : 0);
1411 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1412 			MD, MGMT, 1001,
1413 			(htt_tlv_filter->md_mgmt_filter &
1414 			FILTER_MGMT_ATIM) ? 1 : 0);
1415 	}
1416 
1417 	if (htt_tlv_filter->enable_mo) {
1418 		/* TYPE: MGMT */
1419 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1420 			MO, MGMT, 0000,
1421 			(htt_tlv_filter->mo_mgmt_filter &
1422 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1423 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1424 			MO, MGMT, 0001,
1425 			(htt_tlv_filter->mo_mgmt_filter &
1426 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1427 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1428 			MO, MGMT, 0010,
1429 			(htt_tlv_filter->mo_mgmt_filter &
1430 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1431 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1432 			MO, MGMT, 0011,
1433 			(htt_tlv_filter->mo_mgmt_filter &
1434 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1435 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1436 			MO, MGMT, 0100,
1437 			(htt_tlv_filter->mo_mgmt_filter &
1438 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1439 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1440 			MO, MGMT, 0101,
1441 			(htt_tlv_filter->mo_mgmt_filter &
1442 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1443 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1444 			MO, MGMT, 0110,
1445 			(htt_tlv_filter->mo_mgmt_filter &
1446 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1447 		/* reserved */
1448 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
1449 			MGMT, 0111,
1450 			(htt_tlv_filter->mo_mgmt_filter &
1451 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1452 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1453 			MO, MGMT, 1000,
1454 			(htt_tlv_filter->mo_mgmt_filter &
1455 			FILTER_MGMT_BEACON) ? 1 : 0);
1456 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1457 			MO, MGMT, 1001,
1458 			(htt_tlv_filter->mo_mgmt_filter &
1459 			FILTER_MGMT_ATIM) ? 1 : 0);
1460 	}
1461 
1462 	/* word 3 */
1463 	msg_word++;
1464 	*msg_word = 0;
1465 
1466 	if (htt_tlv_filter->enable_fp) {
1467 		/* TYPE: MGMT */
1468 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1469 			FP, MGMT, 1010,
1470 			(htt_tlv_filter->fp_mgmt_filter &
1471 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1472 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1473 			FP, MGMT, 1011,
1474 			(htt_tlv_filter->fp_mgmt_filter &
1475 			FILTER_MGMT_AUTH) ? 1 : 0);
1476 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1477 			FP, MGMT, 1100,
1478 			(htt_tlv_filter->fp_mgmt_filter &
1479 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1480 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1481 			FP, MGMT, 1101,
1482 			(htt_tlv_filter->fp_mgmt_filter &
1483 			FILTER_MGMT_ACTION) ? 1 : 0);
1484 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1485 			FP, MGMT, 1110,
1486 			(htt_tlv_filter->fp_mgmt_filter &
1487 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1488 		/* reserved*/
1489 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
1490 			MGMT, 1111,
1491 			(htt_tlv_filter->fp_mgmt_filter &
1492 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1493 	}
1494 
1495 	if (htt_tlv_filter->enable_md) {
1496 			/* TYPE: MGMT */
1497 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1498 			MD, MGMT, 1010,
1499 			(htt_tlv_filter->md_mgmt_filter &
1500 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1501 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1502 			MD, MGMT, 1011,
1503 			(htt_tlv_filter->md_mgmt_filter &
1504 			FILTER_MGMT_AUTH) ? 1 : 0);
1505 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1506 			MD, MGMT, 1100,
1507 			(htt_tlv_filter->md_mgmt_filter &
1508 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1509 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1510 			MD, MGMT, 1101,
1511 			(htt_tlv_filter->md_mgmt_filter &
1512 			FILTER_MGMT_ACTION) ? 1 : 0);
1513 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1514 			MD, MGMT, 1110,
1515 			(htt_tlv_filter->md_mgmt_filter &
1516 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1517 	}
1518 
1519 	if (htt_tlv_filter->enable_mo) {
1520 		/* TYPE: MGMT */
1521 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1522 			MO, MGMT, 1010,
1523 			(htt_tlv_filter->mo_mgmt_filter &
1524 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1525 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1526 			MO, MGMT, 1011,
1527 			(htt_tlv_filter->mo_mgmt_filter &
1528 			FILTER_MGMT_AUTH) ? 1 : 0);
1529 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1530 			MO, MGMT, 1100,
1531 			(htt_tlv_filter->mo_mgmt_filter &
1532 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1533 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1534 			MO, MGMT, 1101,
1535 			(htt_tlv_filter->mo_mgmt_filter &
1536 			FILTER_MGMT_ACTION) ? 1 : 0);
1537 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1538 			MO, MGMT, 1110,
1539 			(htt_tlv_filter->mo_mgmt_filter &
1540 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1541 		/* reserved*/
1542 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
1543 			MGMT, 1111,
1544 			(htt_tlv_filter->mo_mgmt_filter &
1545 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1546 	}
1547 
1548 	/* word 4 */
1549 	msg_word++;
1550 	*msg_word = 0;
1551 
1552 	if (htt_tlv_filter->enable_fp) {
1553 		/* TYPE: CTRL */
1554 		/* reserved */
1555 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1556 			CTRL, 0000,
1557 			(htt_tlv_filter->fp_ctrl_filter &
1558 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1559 		/* reserved */
1560 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1561 			CTRL, 0001,
1562 			(htt_tlv_filter->fp_ctrl_filter &
1563 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1564 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1565 			CTRL, 0010,
1566 			(htt_tlv_filter->fp_ctrl_filter &
1567 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1568 		/* reserved */
1569 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1570 			CTRL, 0011,
1571 			(htt_tlv_filter->fp_ctrl_filter &
1572 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1573 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1574 			CTRL, 0100,
1575 			(htt_tlv_filter->fp_ctrl_filter &
1576 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1577 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1578 			CTRL, 0101,
1579 			(htt_tlv_filter->fp_ctrl_filter &
1580 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1581 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1582 			CTRL, 0110,
1583 			(htt_tlv_filter->fp_ctrl_filter &
1584 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1585 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1586 			CTRL, 0111,
1587 			(htt_tlv_filter->fp_ctrl_filter &
1588 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1589 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1590 			CTRL, 1000,
1591 			(htt_tlv_filter->fp_ctrl_filter &
1592 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1593 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1594 			CTRL, 1001,
1595 			(htt_tlv_filter->fp_ctrl_filter &
1596 			FILTER_CTRL_BA) ? 1 : 0);
1597 	}
1598 
1599 	if (htt_tlv_filter->enable_md) {
1600 		/* TYPE: CTRL */
1601 		/* reserved */
1602 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1603 			CTRL, 0000,
1604 			(htt_tlv_filter->md_ctrl_filter &
1605 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1606 		/* reserved */
1607 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1608 			CTRL, 0001,
1609 			(htt_tlv_filter->md_ctrl_filter &
1610 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1611 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1612 			CTRL, 0010,
1613 			(htt_tlv_filter->md_ctrl_filter &
1614 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1615 		/* reserved */
1616 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1617 			CTRL, 0011,
1618 			(htt_tlv_filter->md_ctrl_filter &
1619 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1620 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1621 			CTRL, 0100,
1622 			(htt_tlv_filter->md_ctrl_filter &
1623 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1624 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1625 			CTRL, 0101,
1626 			(htt_tlv_filter->md_ctrl_filter &
1627 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1628 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1629 			CTRL, 0110,
1630 			(htt_tlv_filter->md_ctrl_filter &
1631 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1632 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1633 			CTRL, 0111,
1634 			(htt_tlv_filter->md_ctrl_filter &
1635 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1636 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1637 			CTRL, 1000,
1638 			(htt_tlv_filter->md_ctrl_filter &
1639 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1640 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1641 			CTRL, 1001,
1642 			(htt_tlv_filter->md_ctrl_filter &
1643 			FILTER_CTRL_BA) ? 1 : 0);
1644 	}
1645 
1646 	if (htt_tlv_filter->enable_mo) {
1647 		/* TYPE: CTRL */
1648 		/* reserved */
1649 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1650 			CTRL, 0000,
1651 			(htt_tlv_filter->mo_ctrl_filter &
1652 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1653 		/* reserved */
1654 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1655 			CTRL, 0001,
1656 			(htt_tlv_filter->mo_ctrl_filter &
1657 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1658 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1659 			CTRL, 0010,
1660 			(htt_tlv_filter->mo_ctrl_filter &
1661 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1662 		/* reserved */
1663 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1664 			CTRL, 0011,
1665 			(htt_tlv_filter->mo_ctrl_filter &
1666 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1667 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1668 			CTRL, 0100,
1669 			(htt_tlv_filter->mo_ctrl_filter &
1670 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1671 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1672 			CTRL, 0101,
1673 			(htt_tlv_filter->mo_ctrl_filter &
1674 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1675 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1676 			CTRL, 0110,
1677 			(htt_tlv_filter->mo_ctrl_filter &
1678 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1679 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1680 			CTRL, 0111,
1681 			(htt_tlv_filter->mo_ctrl_filter &
1682 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1683 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1684 			CTRL, 1000,
1685 			(htt_tlv_filter->mo_ctrl_filter &
1686 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1687 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1688 			CTRL, 1001,
1689 			(htt_tlv_filter->mo_ctrl_filter &
1690 			FILTER_CTRL_BA) ? 1 : 0);
1691 	}
1692 
1693 	/* word 5 */
1694 	msg_word++;
1695 	*msg_word = 0;
1696 	if (htt_tlv_filter->enable_fp) {
1697 		/* TYPE: CTRL */
1698 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1699 			CTRL, 1010,
1700 			(htt_tlv_filter->fp_ctrl_filter &
1701 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1702 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1703 			CTRL, 1011,
1704 			(htt_tlv_filter->fp_ctrl_filter &
1705 			FILTER_CTRL_RTS) ? 1 : 0);
1706 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1707 			CTRL, 1100,
1708 			(htt_tlv_filter->fp_ctrl_filter &
1709 			FILTER_CTRL_CTS) ? 1 : 0);
1710 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1711 			CTRL, 1101,
1712 			(htt_tlv_filter->fp_ctrl_filter &
1713 			FILTER_CTRL_ACK) ? 1 : 0);
1714 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1715 			CTRL, 1110,
1716 			(htt_tlv_filter->fp_ctrl_filter &
1717 			FILTER_CTRL_CFEND) ? 1 : 0);
1718 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1719 			CTRL, 1111,
1720 			(htt_tlv_filter->fp_ctrl_filter &
1721 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1722 		/* TYPE: DATA */
1723 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1724 			DATA, MCAST,
1725 			(htt_tlv_filter->fp_data_filter &
1726 			FILTER_DATA_MCAST) ? 1 : 0);
1727 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1728 			DATA, UCAST,
1729 			(htt_tlv_filter->fp_data_filter &
1730 			FILTER_DATA_UCAST) ? 1 : 0);
1731 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1732 			DATA, NULL,
1733 			(htt_tlv_filter->fp_data_filter &
1734 			FILTER_DATA_NULL) ? 1 : 0);
1735 	}
1736 
1737 	if (htt_tlv_filter->enable_md) {
1738 		/* TYPE: CTRL */
1739 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1740 			CTRL, 1010,
1741 			(htt_tlv_filter->md_ctrl_filter &
1742 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1743 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1744 			CTRL, 1011,
1745 			(htt_tlv_filter->md_ctrl_filter &
1746 			FILTER_CTRL_RTS) ? 1 : 0);
1747 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1748 			CTRL, 1100,
1749 			(htt_tlv_filter->md_ctrl_filter &
1750 			FILTER_CTRL_CTS) ? 1 : 0);
1751 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1752 			CTRL, 1101,
1753 			(htt_tlv_filter->md_ctrl_filter &
1754 			FILTER_CTRL_ACK) ? 1 : 0);
1755 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1756 			CTRL, 1110,
1757 			(htt_tlv_filter->md_ctrl_filter &
1758 			FILTER_CTRL_CFEND) ? 1 : 0);
1759 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1760 			CTRL, 1111,
1761 			(htt_tlv_filter->md_ctrl_filter &
1762 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1763 		/* TYPE: DATA */
1764 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1765 			DATA, MCAST,
1766 			(htt_tlv_filter->md_data_filter &
1767 			FILTER_DATA_MCAST) ? 1 : 0);
1768 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1769 			DATA, UCAST,
1770 			(htt_tlv_filter->md_data_filter &
1771 			FILTER_DATA_UCAST) ? 1 : 0);
1772 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1773 			DATA, NULL,
1774 			(htt_tlv_filter->md_data_filter &
1775 			FILTER_DATA_NULL) ? 1 : 0);
1776 	}
1777 
1778 	if (htt_tlv_filter->enable_mo) {
1779 		/* TYPE: CTRL */
1780 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1781 			CTRL, 1010,
1782 			(htt_tlv_filter->mo_ctrl_filter &
1783 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1784 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1785 			CTRL, 1011,
1786 			(htt_tlv_filter->mo_ctrl_filter &
1787 			FILTER_CTRL_RTS) ? 1 : 0);
1788 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1789 			CTRL, 1100,
1790 			(htt_tlv_filter->mo_ctrl_filter &
1791 			FILTER_CTRL_CTS) ? 1 : 0);
1792 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1793 			CTRL, 1101,
1794 			(htt_tlv_filter->mo_ctrl_filter &
1795 			FILTER_CTRL_ACK) ? 1 : 0);
1796 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1797 			CTRL, 1110,
1798 			(htt_tlv_filter->mo_ctrl_filter &
1799 			FILTER_CTRL_CFEND) ? 1 : 0);
1800 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1801 			CTRL, 1111,
1802 			(htt_tlv_filter->mo_ctrl_filter &
1803 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1804 		/* TYPE: DATA */
1805 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1806 			DATA, MCAST,
1807 			(htt_tlv_filter->mo_data_filter &
1808 			FILTER_DATA_MCAST) ? 1 : 0);
1809 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1810 			DATA, UCAST,
1811 			(htt_tlv_filter->mo_data_filter &
1812 			FILTER_DATA_UCAST) ? 1 : 0);
1813 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1814 			DATA, NULL,
1815 			(htt_tlv_filter->mo_data_filter &
1816 			FILTER_DATA_NULL) ? 1 : 0);
1817 	}
1818 
1819 	/* word 6 */
1820 	msg_word++;
1821 	*msg_word = 0;
1822 	tlv_filter = 0;
1823 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START,
1824 		htt_tlv_filter->mpdu_start);
1825 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START,
1826 		htt_tlv_filter->msdu_start);
1827 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET,
1828 		htt_tlv_filter->packet);
1829 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END,
1830 		htt_tlv_filter->msdu_end);
1831 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END,
1832 		htt_tlv_filter->mpdu_end);
1833 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER,
1834 		htt_tlv_filter->packet_header);
1835 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION,
1836 		htt_tlv_filter->attention);
1837 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START,
1838 		htt_tlv_filter->ppdu_start);
1839 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END,
1840 		htt_tlv_filter->ppdu_end);
1841 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS,
1842 		htt_tlv_filter->ppdu_end_user_stats);
1843 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter,
1844 		PPDU_END_USER_STATS_EXT,
1845 		htt_tlv_filter->ppdu_end_user_stats_ext);
1846 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE,
1847 		htt_tlv_filter->ppdu_end_status_done);
1848 	/* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/
1849 	 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED,
1850 		 htt_tlv_filter->header_per_msdu);
1851 
1852 	HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter);
1853 
1854 	msg_word++;
1855 	*msg_word = 0;
1856 	if (htt_tlv_filter->offset_valid) {
1857 		HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET_SET(*msg_word,
1858 					htt_tlv_filter->rx_packet_offset);
1859 		HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET_SET(*msg_word,
1860 					htt_tlv_filter->rx_header_offset);
1861 
1862 		msg_word++;
1863 		*msg_word = 0;
1864 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET_SET(*msg_word,
1865 					htt_tlv_filter->rx_mpdu_end_offset);
1866 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET_SET(*msg_word,
1867 					htt_tlv_filter->rx_mpdu_start_offset);
1868 
1869 		msg_word++;
1870 		*msg_word = 0;
1871 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET_SET(*msg_word,
1872 					htt_tlv_filter->rx_msdu_end_offset);
1873 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET_SET(*msg_word,
1874 					htt_tlv_filter->rx_msdu_start_offset);
1875 
1876 		msg_word++;
1877 		*msg_word = 0;
1878 		HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET_SET(*msg_word,
1879 					htt_tlv_filter->rx_attn_offset);
1880 		msg_word++;
1881 		*msg_word = 0;
1882 	} else {
1883 		msg_word += 4;
1884 		*msg_word = 0;
1885 	}
1886 
1887 	if (mon_drop_th > 0)
1888 		HTT_RX_RING_SELECTION_CFG_RX_DROP_THRESHOLD_SET(*msg_word,
1889 								mon_drop_th);
1890 
1891 	/* "response_required" field should be set if a HTT response message is
1892 	 * required after setting up the ring.
1893 	 */
1894 	pkt = htt_htc_pkt_alloc(soc);
1895 	if (!pkt)
1896 		goto fail1;
1897 
1898 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1899 
1900 	SET_HTC_PACKET_INFO_TX(
1901 		&pkt->htc_pkt,
1902 		dp_htt_h2t_send_complete_free_netbuf,
1903 		qdf_nbuf_data(htt_msg),
1904 		qdf_nbuf_len(htt_msg),
1905 		soc->htc_endpoint,
1906 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
1907 
1908 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1909 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
1910 				     HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG,
1911 				     htt_logger_bufp);
1912 
1913 	if (status != QDF_STATUS_SUCCESS) {
1914 		qdf_nbuf_free(htt_msg);
1915 		htt_htc_pkt_free(soc, pkt);
1916 	}
1917 
1918 	return status;
1919 
1920 fail1:
1921 	qdf_nbuf_free(htt_msg);
1922 fail0:
1923 	return QDF_STATUS_E_FAILURE;
1924 }
1925 
1926 #if defined(HTT_STATS_ENABLE)
1927 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1928 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1929 
1930 {
1931 	uint32_t pdev_id;
1932 	uint32_t *msg_word = NULL;
1933 	uint32_t msg_remain_len = 0;
1934 
1935 	msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1936 
1937 	/*COOKIE MSB*/
1938 	pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1939 
1940 	/* stats message length + 16 size of HTT header*/
1941 	msg_remain_len = qdf_min(htt_stats->msg_len + 16,
1942 				(uint32_t)DP_EXT_MSG_LENGTH);
1943 
1944 	dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc,
1945 			msg_word,  msg_remain_len,
1946 			WDI_NO_VAL, pdev_id);
1947 
1948 	if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1949 		htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1950 	}
1951 	/* Need to be freed here as WDI handler will
1952 	 * make a copy of pkt to send data to application
1953 	 */
1954 	qdf_nbuf_free(htt_msg);
1955 	return QDF_STATUS_SUCCESS;
1956 }
1957 #else
1958 static inline QDF_STATUS
1959 dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1960 		      struct dp_soc *soc, qdf_nbuf_t htt_msg)
1961 {
1962 	return QDF_STATUS_E_NOSUPPORT;
1963 }
1964 #endif
1965 
1966 #ifdef HTT_STATS_DEBUGFS_SUPPORT
1967 /* dp_send_htt_stats_dbgfs_msg() - Function to send htt data to upper layer
1968  * @pdev: dp pdev handle
1969  * @msg_word: HTT msg
1970  * @msg_len: Length of HTT msg sent
1971  *
1972  * Return: none
1973  */
1974 static inline void
1975 dp_htt_stats_dbgfs_send_msg(struct dp_pdev *pdev, uint32_t *msg_word,
1976 			    uint32_t msg_len)
1977 {
1978 	struct htt_dbgfs_cfg dbgfs_cfg;
1979 	int done = 0;
1980 
1981 	/* send 5th word of HTT msg to upper layer */
1982 	dbgfs_cfg.msg_word = (msg_word + 4);
1983 	dbgfs_cfg.m = pdev->dbgfs_cfg->m;
1984 
1985 	/* stats message length + 16 size of HTT header*/
1986 	msg_len = qdf_min(msg_len + HTT_HEADER_LEN, (uint32_t)DP_EXT_MSG_LENGTH);
1987 
1988 	if (pdev->dbgfs_cfg->htt_stats_dbgfs_msg_process)
1989 		pdev->dbgfs_cfg->htt_stats_dbgfs_msg_process(&dbgfs_cfg,
1990 							     (msg_len - HTT_HEADER_LEN));
1991 
1992 	/* Get TLV Done bit from 4th msg word */
1993 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*(msg_word + 3));
1994 	if (done) {
1995 		if (qdf_event_set(&pdev->dbgfs_cfg->htt_stats_dbgfs_event))
1996 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1997 				  "Failed to set event for debugfs htt stats");
1998 	}
1999 }
2000 #else
2001 static inline void
2002 dp_htt_stats_dbgfs_send_msg(struct dp_pdev *pdev, uint32_t *msg_word,
2003 			    uint32_t msg_len)
2004 {
2005 }
2006 #endif /* HTT_STATS_DEBUGFS_SUPPORT */
2007 
2008 /**
2009  * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats
2010  * @htt_stats: htt stats info
2011  *
2012  * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message
2013  * contains sub messages which are identified by a TLV header.
2014  * In this function we will process the stream of T2H messages and read all the
2015  * TLV contained in the message.
2016  *
2017  * THe following cases have been taken care of
2018  * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer
2019  *		In this case the buffer will contain multiple tlvs.
2020  * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer.
2021  *		Only one tlv will be contained in the HTT message and this tag
2022  *		will extend onto the next buffer.
2023  * Case 3: When the buffer is the continuation of the previous message
2024  * Case 4: tlv length is 0. which will indicate the end of message
2025  *
2026  * return: void
2027  */
2028 static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats,
2029 					struct dp_soc *soc)
2030 {
2031 	htt_tlv_tag_t tlv_type = 0xff;
2032 	qdf_nbuf_t htt_msg = NULL;
2033 	uint32_t *msg_word;
2034 	uint8_t *tlv_buf_head = NULL;
2035 	uint8_t *tlv_buf_tail = NULL;
2036 	uint32_t msg_remain_len = 0;
2037 	uint32_t tlv_remain_len = 0;
2038 	uint32_t *tlv_start;
2039 	int cookie_val = 0;
2040 	int cookie_msb = 0;
2041 	int pdev_id;
2042 	bool copy_stats = false;
2043 	struct dp_pdev *pdev;
2044 
2045 	/* Process node in the HTT message queue */
2046 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
2047 		!= NULL) {
2048 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
2049 		cookie_val = *(msg_word + 1);
2050 		htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET(
2051 					*(msg_word +
2052 					HTT_T2H_EXT_STATS_TLV_START_OFFSET));
2053 
2054 		if (cookie_val) {
2055 			if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg)
2056 					== QDF_STATUS_SUCCESS) {
2057 				continue;
2058 			}
2059 		}
2060 
2061 		cookie_msb = *(msg_word + 2);
2062 		pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
2063 		pdev = soc->pdev_list[pdev_id];
2064 
2065 		if (!cookie_val && (cookie_msb & DBG_STATS_COOKIE_HTT_DBGFS)) {
2066 			dp_htt_stats_dbgfs_send_msg(pdev, msg_word,
2067 						    htt_stats->msg_len);
2068 			qdf_nbuf_free(htt_msg);
2069 			continue;
2070 		}
2071 
2072 		if (cookie_msb & DBG_STATS_COOKIE_DP_STATS)
2073 			copy_stats = true;
2074 
2075 		/* read 5th word */
2076 		msg_word = msg_word + 4;
2077 		msg_remain_len = qdf_min(htt_stats->msg_len,
2078 				(uint32_t) DP_EXT_MSG_LENGTH);
2079 		/* Keep processing the node till node length is 0 */
2080 		while (msg_remain_len) {
2081 			/*
2082 			 * if message is not a continuation of previous message
2083 			 * read the tlv type and tlv length
2084 			 */
2085 			if (!tlv_buf_head) {
2086 				tlv_type = HTT_STATS_TLV_TAG_GET(
2087 						*msg_word);
2088 				tlv_remain_len = HTT_STATS_TLV_LENGTH_GET(
2089 						*msg_word);
2090 			}
2091 
2092 			if (tlv_remain_len == 0) {
2093 				msg_remain_len = 0;
2094 
2095 				if (tlv_buf_head) {
2096 					qdf_mem_free(tlv_buf_head);
2097 					tlv_buf_head = NULL;
2098 					tlv_buf_tail = NULL;
2099 				}
2100 
2101 				goto error;
2102 			}
2103 
2104 			if (!tlv_buf_head)
2105 				tlv_remain_len += HTT_TLV_HDR_LEN;
2106 
2107 			if ((tlv_remain_len <= msg_remain_len)) {
2108 				/* Case 3 */
2109 				if (tlv_buf_head) {
2110 					qdf_mem_copy(tlv_buf_tail,
2111 							(uint8_t *)msg_word,
2112 							tlv_remain_len);
2113 					tlv_start = (uint32_t *)tlv_buf_head;
2114 				} else {
2115 					/* Case 1 */
2116 					tlv_start = msg_word;
2117 				}
2118 
2119 				if (copy_stats)
2120 					dp_htt_stats_copy_tag(pdev,
2121 							      tlv_type,
2122 							      tlv_start);
2123 				else
2124 					dp_htt_stats_print_tag(pdev,
2125 							       tlv_type,
2126 							       tlv_start);
2127 
2128 				if (tlv_type == HTT_STATS_PEER_DETAILS_TAG ||
2129 				    tlv_type == HTT_STATS_PEER_STATS_CMN_TAG)
2130 					dp_peer_update_inactive_time(pdev,
2131 								     tlv_type,
2132 								     tlv_start);
2133 
2134 				msg_remain_len -= tlv_remain_len;
2135 
2136 				msg_word = (uint32_t *)
2137 					(((uint8_t *)msg_word) +
2138 					tlv_remain_len);
2139 
2140 				tlv_remain_len = 0;
2141 
2142 				if (tlv_buf_head) {
2143 					qdf_mem_free(tlv_buf_head);
2144 					tlv_buf_head = NULL;
2145 					tlv_buf_tail = NULL;
2146 				}
2147 
2148 			} else { /* tlv_remain_len > msg_remain_len */
2149 				/* Case 2 & 3 */
2150 				if (!tlv_buf_head) {
2151 					tlv_buf_head = qdf_mem_malloc(
2152 							tlv_remain_len);
2153 
2154 					if (!tlv_buf_head) {
2155 						QDF_TRACE(QDF_MODULE_ID_TXRX,
2156 								QDF_TRACE_LEVEL_ERROR,
2157 								"Alloc failed");
2158 						goto error;
2159 					}
2160 
2161 					tlv_buf_tail = tlv_buf_head;
2162 				}
2163 
2164 				qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word,
2165 						msg_remain_len);
2166 				tlv_remain_len -= msg_remain_len;
2167 				tlv_buf_tail += msg_remain_len;
2168 			}
2169 		}
2170 
2171 		if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
2172 			htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
2173 		}
2174 
2175 		qdf_nbuf_free(htt_msg);
2176 	}
2177 	return;
2178 
2179 error:
2180 	qdf_nbuf_free(htt_msg);
2181 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
2182 			!= NULL)
2183 		qdf_nbuf_free(htt_msg);
2184 }
2185 
2186 void htt_t2h_stats_handler(void *context)
2187 {
2188 	struct dp_soc *soc = (struct dp_soc *)context;
2189 	struct htt_stats_context htt_stats;
2190 	uint32_t *msg_word;
2191 	qdf_nbuf_t htt_msg = NULL;
2192 	uint8_t done;
2193 	uint32_t rem_stats;
2194 
2195 	if (!soc) {
2196 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2197 			  "soc is NULL");
2198 		return;
2199 	}
2200 
2201 	if (!qdf_atomic_read(&soc->cmn_init_done)) {
2202 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2203 			  "soc: 0x%pK, init_done: %d", soc,
2204 			  qdf_atomic_read(&soc->cmn_init_done));
2205 		return;
2206 	}
2207 
2208 	qdf_mem_zero(&htt_stats, sizeof(htt_stats));
2209 	qdf_nbuf_queue_init(&htt_stats.msg);
2210 
2211 	/* pull one completed stats from soc->htt_stats_msg and process */
2212 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2213 	if (!soc->htt_stats.num_stats) {
2214 		qdf_spin_unlock_bh(&soc->htt_stats.lock);
2215 		return;
2216 	}
2217 	while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) {
2218 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
2219 		msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET;
2220 		done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2221 		qdf_nbuf_queue_add(&htt_stats.msg, htt_msg);
2222 		/*
2223 		 * Done bit signifies that this is the last T2H buffer in the
2224 		 * stream of HTT EXT STATS message
2225 		 */
2226 		if (done)
2227 			break;
2228 	}
2229 	rem_stats = --soc->htt_stats.num_stats;
2230 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2231 
2232 	/* If there are more stats to process, schedule stats work again.
2233 	 * Scheduling prior to processing ht_stats to queue with early
2234 	 * index
2235 	 */
2236 	if (rem_stats)
2237 		qdf_sched_work(0, &soc->htt_stats.work);
2238 
2239 	dp_process_htt_stat_msg(&htt_stats, soc);
2240 }
2241 
2242 /*
2243  * dp_get_ppdu_info_user_index: Find and allocate a per-user descriptor for a PPDU,
2244  * if a new peer id arrives in a PPDU
2245  * pdev: DP pdev handle
2246  * @peer_id : peer unique identifier
2247  * @ppdu_info: per ppdu tlv structure
2248  *
2249  * return:user index to be populated
2250  */
2251 #ifdef FEATURE_PERPKT_INFO
2252 static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev,
2253 						uint16_t peer_id,
2254 						struct ppdu_info *ppdu_info)
2255 {
2256 	uint8_t user_index = 0;
2257 	struct cdp_tx_completion_ppdu *ppdu_desc;
2258 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2259 
2260 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2261 
2262 	while ((user_index + 1) <= ppdu_info->last_user) {
2263 		ppdu_user_desc = &ppdu_desc->user[user_index];
2264 		if (ppdu_user_desc->peer_id != peer_id) {
2265 			user_index++;
2266 			continue;
2267 		} else {
2268 			/* Max users possible is 8 so user array index should
2269 			 * not exceed 7
2270 			 */
2271 			qdf_assert_always(user_index <= (ppdu_desc->max_users - 1));
2272 			return user_index;
2273 		}
2274 	}
2275 
2276 	ppdu_info->last_user++;
2277 	/* Max users possible is 8 so last user should not exceed 8 */
2278 	qdf_assert_always(ppdu_info->last_user <= ppdu_desc->max_users);
2279 	return ppdu_info->last_user - 1;
2280 }
2281 
2282 /*
2283  * dp_process_ppdu_stats_common_tlv: Process htt_ppdu_stats_common_tlv
2284  * pdev: DP pdev handle
2285  * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv
2286  * @ppdu_info: per ppdu tlv structure
2287  *
2288  * return:void
2289  */
2290 static void dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev,
2291 		uint32_t *tag_buf, struct ppdu_info *ppdu_info)
2292 {
2293 	uint16_t frame_type;
2294 	uint16_t frame_ctrl;
2295 	uint16_t freq;
2296 	struct dp_soc *soc = NULL;
2297 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
2298 	uint64_t ppdu_start_timestamp;
2299 	uint32_t *start_tag_buf;
2300 
2301 	start_tag_buf = tag_buf;
2302 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2303 
2304 	ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
2305 
2306 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RING_ID_SCH_CMD_ID);
2307 	ppdu_info->sched_cmdid =
2308 		HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_GET(*tag_buf);
2309 	ppdu_desc->num_users =
2310 		HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf);
2311 
2312 	qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
2313 
2314 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(QTYPE_FRM_TYPE);
2315 	frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf);
2316 	ppdu_desc->htt_frame_type = frame_type;
2317 
2318 	frame_ctrl = ppdu_desc->frame_ctrl;
2319 
2320 	ppdu_desc->bar_ppdu_id = ppdu_info->ppdu_id;
2321 
2322 	switch (frame_type) {
2323 	case HTT_STATS_FTYPE_TIDQ_DATA_SU:
2324 	case HTT_STATS_FTYPE_TIDQ_DATA_MU:
2325 	case HTT_STATS_FTYPE_SGEN_QOS_NULL:
2326 		/*
2327 		 * for management packet, frame type come as DATA_SU
2328 		 * need to check frame_ctrl before setting frame_type
2329 		 */
2330 		if (HTT_GET_FRAME_CTRL_TYPE(frame_ctrl) <= FRAME_CTRL_TYPE_CTRL)
2331 			ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
2332 		else
2333 			ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA;
2334 	break;
2335 	case HTT_STATS_FTYPE_SGEN_MU_BAR:
2336 	case HTT_STATS_FTYPE_SGEN_BAR:
2337 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_BAR;
2338 	break;
2339 	default:
2340 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
2341 	break;
2342 	}
2343 
2344 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(FES_DUR_US);
2345 	ppdu_desc->tx_duration = *tag_buf;
2346 
2347 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_L32_US);
2348 	ppdu_desc->ppdu_start_timestamp = *tag_buf;
2349 
2350 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(CHAN_MHZ_PHY_MODE);
2351 	freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf);
2352 	if (freq != ppdu_desc->channel) {
2353 		soc = pdev->soc;
2354 		ppdu_desc->channel = freq;
2355 		pdev->operating_channel.freq = freq;
2356 		if (soc && soc->cdp_soc.ol_ops->freq_to_channel)
2357 			pdev->operating_channel.num =
2358 			    soc->cdp_soc.ol_ops->freq_to_channel(soc->ctrl_psoc,
2359 								 pdev->pdev_id,
2360 								 freq);
2361 
2362 		if (soc && soc->cdp_soc.ol_ops->freq_to_band)
2363 			pdev->operating_channel.band =
2364 			       soc->cdp_soc.ol_ops->freq_to_band(soc->ctrl_psoc,
2365 								 pdev->pdev_id,
2366 								 freq);
2367 	}
2368 
2369 	ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf);
2370 
2371 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RESV_NUM_UL_BEAM);
2372 	ppdu_desc->phy_ppdu_tx_time_us =
2373 		HTT_PPDU_STATS_COMMON_TLV_PHY_PPDU_TX_TIME_US_GET(*tag_buf);
2374 	ppdu_desc->beam_change =
2375 		HTT_PPDU_STATS_COMMON_TLV_BEAM_CHANGE_GET(*tag_buf);
2376 	ppdu_desc->doppler =
2377 		HTT_PPDU_STATS_COMMON_TLV_DOPPLER_INDICATION_GET(*tag_buf);
2378 	ppdu_desc->spatial_reuse =
2379 		HTT_PPDU_STATS_COMMON_TLV_SPATIAL_REUSE_GET(*tag_buf);
2380 
2381 	dp_tx_capture_htt_frame_counter(pdev, frame_type);
2382 
2383 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_U32_US);
2384 	ppdu_start_timestamp = *tag_buf;
2385 	ppdu_desc->ppdu_start_timestamp |= ((ppdu_start_timestamp <<
2386 					     HTT_SHIFT_UPPER_TIMESTAMP) &
2387 					    HTT_MASK_UPPER_TIMESTAMP);
2388 
2389 	ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
2390 					ppdu_desc->tx_duration;
2391 	/* Ack time stamp is same as end time stamp*/
2392 	ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
2393 
2394 	ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
2395 					ppdu_desc->tx_duration;
2396 
2397 	ppdu_desc->bar_ppdu_start_timestamp = ppdu_desc->ppdu_start_timestamp;
2398 	ppdu_desc->bar_ppdu_end_timestamp = ppdu_desc->ppdu_end_timestamp;
2399 	ppdu_desc->bar_tx_duration = ppdu_desc->tx_duration;
2400 
2401 	/* Ack time stamp is same as end time stamp*/
2402 	ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
2403 }
2404 
2405 /*
2406  * dp_process_ppdu_stats_user_common_tlv: Process ppdu_stats_user_common
2407  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv
2408  * @ppdu_info: per ppdu tlv structure
2409  *
2410  * return:void
2411  */
2412 static void dp_process_ppdu_stats_user_common_tlv(
2413 		struct dp_pdev *pdev, uint32_t *tag_buf,
2414 		struct ppdu_info *ppdu_info)
2415 {
2416 	uint16_t peer_id;
2417 	struct cdp_tx_completion_ppdu *ppdu_desc;
2418 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2419 	uint8_t curr_user_index = 0;
2420 	struct dp_peer *peer;
2421 	struct dp_vdev *vdev;
2422 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2423 
2424 	ppdu_desc =
2425 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2426 
2427 	tag_buf++;
2428 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
2429 
2430 	curr_user_index =
2431 		dp_get_ppdu_info_user_index(pdev,
2432 					    peer_id, ppdu_info);
2433 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2434 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2435 
2436 	ppdu_desc->vdev_id =
2437 		HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(*tag_buf);
2438 
2439 	ppdu_user_desc->peer_id = peer_id;
2440 
2441 	tag_buf++;
2442 
2443 	if (HTT_PPDU_STATS_USER_COMMON_TLV_DELAYED_BA_GET(*tag_buf)) {
2444 		ppdu_user_desc->delayed_ba = 1;
2445 		ppdu_desc->delayed_ba = 1;
2446 	}
2447 
2448 	if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) {
2449 		ppdu_user_desc->is_mcast = true;
2450 		ppdu_user_desc->mpdu_tried_mcast =
2451 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
2452 		ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast;
2453 	} else {
2454 		ppdu_user_desc->mpdu_tried_ucast =
2455 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
2456 	}
2457 
2458 	tag_buf++;
2459 
2460 	ppdu_user_desc->qos_ctrl =
2461 		HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf);
2462 	ppdu_user_desc->frame_ctrl =
2463 		HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf);
2464 	ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl;
2465 
2466 	if (ppdu_user_desc->delayed_ba)
2467 		ppdu_user_desc->mpdu_success = 0;
2468 
2469 	tag_buf += 3;
2470 
2471 	if (HTT_PPDU_STATS_IS_OPAQUE_VALID_GET(*tag_buf)) {
2472 		ppdu_user_desc->ppdu_cookie =
2473 			HTT_PPDU_STATS_HOST_OPAQUE_COOKIE_GET(*tag_buf);
2474 		ppdu_user_desc->is_ppdu_cookie_valid = 1;
2475 	}
2476 
2477 	/* returning earlier causes other feilds unpopulated */
2478 	if (peer_id == DP_SCAN_PEER_ID) {
2479 		vdev = dp_vdev_get_ref_by_id(pdev->soc, ppdu_desc->vdev_id,
2480 					     DP_MOD_ID_TX_PPDU_STATS);
2481 		if (!vdev)
2482 			return;
2483 		qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw,
2484 			     QDF_MAC_ADDR_SIZE);
2485 		dp_vdev_unref_delete(pdev->soc, vdev, DP_MOD_ID_TX_PPDU_STATS);
2486 	} else {
2487 		peer = dp_peer_get_ref_by_id(pdev->soc, peer_id,
2488 					     DP_MOD_ID_TX_PPDU_STATS);
2489 		if (!peer) {
2490 			/*
2491 			 * fw sends peer_id which is about to removed but
2492 			 * it was already removed in host.
2493 			 * eg: for disassoc, fw send ppdu stats
2494 			 * with peer id equal to previously associated
2495 			 * peer's peer_id but it was removed
2496 			 */
2497 			vdev = dp_vdev_get_ref_by_id(pdev->soc,
2498 						     ppdu_desc->vdev_id,
2499 						     DP_MOD_ID_TX_PPDU_STATS);
2500 			if (!vdev)
2501 				return;
2502 			qdf_mem_copy(ppdu_user_desc->mac_addr,
2503 				     vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE);
2504 			dp_vdev_unref_delete(pdev->soc, vdev,
2505 					     DP_MOD_ID_TX_PPDU_STATS);
2506 			return;
2507 		}
2508 		qdf_mem_copy(ppdu_user_desc->mac_addr,
2509 			     peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
2510 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
2511 	}
2512 }
2513 
2514 
2515 /**
2516  * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv
2517  * @pdev: DP pdev handle
2518  * @tag_buf: T2H message buffer carrying the user rate TLV
2519  * @ppdu_info: per ppdu tlv structure
2520  *
2521  * return:void
2522  */
2523 static void dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev,
2524 		uint32_t *tag_buf,
2525 		struct ppdu_info *ppdu_info)
2526 {
2527 	uint16_t peer_id;
2528 	struct cdp_tx_completion_ppdu *ppdu_desc;
2529 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2530 	uint8_t curr_user_index = 0;
2531 	struct dp_vdev *vdev;
2532 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2533 
2534 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2535 
2536 	tag_buf++;
2537 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
2538 
2539 	curr_user_index =
2540 		dp_get_ppdu_info_user_index(pdev,
2541 					    peer_id, ppdu_info);
2542 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2543 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2544 	if (peer_id == DP_SCAN_PEER_ID) {
2545 		vdev = dp_vdev_get_ref_by_id(pdev->soc, ppdu_desc->vdev_id,
2546 					     DP_MOD_ID_TX_PPDU_STATS);
2547 		if (!vdev)
2548 			return;
2549 		dp_vdev_unref_delete(pdev->soc, vdev,
2550 				     DP_MOD_ID_TX_PPDU_STATS);
2551 	}
2552 	ppdu_user_desc->peer_id = peer_id;
2553 
2554 	ppdu_user_desc->tid =
2555 		HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf);
2556 
2557 	tag_buf += 1;
2558 
2559 	ppdu_user_desc->user_pos =
2560 		HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_GET(*tag_buf);
2561 	ppdu_user_desc->mu_group_id =
2562 		HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_GET(*tag_buf);
2563 
2564 	tag_buf += 1;
2565 
2566 	ppdu_user_desc->ru_start =
2567 		HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf);
2568 	ppdu_user_desc->ru_tones =
2569 		(HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) -
2570 		HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1;
2571 	ppdu_desc->usr_ru_tones_sum += ppdu_user_desc->ru_tones;
2572 
2573 	tag_buf += 2;
2574 
2575 	ppdu_user_desc->ppdu_type =
2576 		HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf);
2577 
2578 	tag_buf++;
2579 	ppdu_user_desc->tx_rate = *tag_buf;
2580 
2581 	ppdu_user_desc->ltf_size =
2582 		HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf);
2583 	ppdu_user_desc->stbc =
2584 		HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf);
2585 	ppdu_user_desc->he_re =
2586 		HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf);
2587 	ppdu_user_desc->txbf =
2588 		HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf);
2589 	ppdu_user_desc->bw =
2590 		HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf) - 2;
2591 	ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf);
2592 	ppdu_desc->usr_nss_sum += ppdu_user_desc->nss;
2593 	ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf);
2594 	ppdu_user_desc->preamble =
2595 		HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf);
2596 	ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf);
2597 	ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf);
2598 	ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf);
2599 }
2600 
2601 /*
2602  * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv: Process
2603  * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
2604  * pdev: DP PDEV handle
2605  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
2606  * @ppdu_info: per ppdu tlv structure
2607  *
2608  * return:void
2609  */
2610 static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
2611 		struct dp_pdev *pdev, uint32_t *tag_buf,
2612 		struct ppdu_info *ppdu_info)
2613 {
2614 	htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf =
2615 		(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf;
2616 
2617 	struct cdp_tx_completion_ppdu *ppdu_desc;
2618 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2619 	uint8_t curr_user_index = 0;
2620 	uint16_t peer_id;
2621 	uint32_t size = CDP_BA_64_BIT_MAP_SIZE_DWORDS;
2622 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2623 
2624 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2625 
2626 	tag_buf++;
2627 
2628 	peer_id =
2629 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2630 
2631 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2632 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2633 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2634 	ppdu_user_desc->peer_id = peer_id;
2635 
2636 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
2637 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
2638 		     sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
2639 
2640 	dp_process_ppdu_stats_update_failed_bitmap(pdev,
2641 						   (void *)ppdu_user_desc,
2642 						   ppdu_info->ppdu_id,
2643 						   size);
2644 }
2645 
2646 /*
2647  * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv: Process
2648  * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
2649  * soc: DP SOC handle
2650  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
2651  * @ppdu_info: per ppdu tlv structure
2652  *
2653  * return:void
2654  */
2655 static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
2656 		struct dp_pdev *pdev, uint32_t *tag_buf,
2657 		struct ppdu_info *ppdu_info)
2658 {
2659 	htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf =
2660 		(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf;
2661 
2662 	struct cdp_tx_completion_ppdu *ppdu_desc;
2663 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2664 	uint8_t curr_user_index = 0;
2665 	uint16_t peer_id;
2666 	uint32_t size = CDP_BA_256_BIT_MAP_SIZE_DWORDS;
2667 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2668 
2669 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2670 
2671 	tag_buf++;
2672 
2673 	peer_id =
2674 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2675 
2676 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2677 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2678 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2679 	ppdu_user_desc->peer_id = peer_id;
2680 
2681 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
2682 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
2683 		     sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2684 
2685 	dp_process_ppdu_stats_update_failed_bitmap(pdev,
2686 						   (void *)ppdu_user_desc,
2687 						   ppdu_info->ppdu_id,
2688 						   size);
2689 }
2690 
2691 /*
2692  * dp_process_ppdu_stats_user_cmpltn_common_tlv: Process
2693  * htt_ppdu_stats_user_cmpltn_common_tlv
2694  * soc: DP SOC handle
2695  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv
2696  * @ppdu_info: per ppdu tlv structure
2697  *
2698  * return:void
2699  */
2700 static void dp_process_ppdu_stats_user_cmpltn_common_tlv(
2701 		struct dp_pdev *pdev, uint32_t *tag_buf,
2702 		struct ppdu_info *ppdu_info)
2703 {
2704 	uint16_t peer_id;
2705 	struct cdp_tx_completion_ppdu *ppdu_desc;
2706 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2707 	uint8_t curr_user_index = 0;
2708 	uint8_t bw_iter;
2709 	htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf =
2710 		(htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf;
2711 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2712 
2713 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2714 
2715 	tag_buf++;
2716 	peer_id =
2717 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf);
2718 
2719 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2720 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2721 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2722 	ppdu_user_desc->peer_id = peer_id;
2723 
2724 	ppdu_user_desc->completion_status =
2725 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET(
2726 				*tag_buf);
2727 
2728 	ppdu_user_desc->tid =
2729 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf);
2730 
2731 
2732 	tag_buf++;
2733 	if (qdf_likely(ppdu_user_desc->completion_status ==
2734 			HTT_PPDU_STATS_USER_STATUS_OK)) {
2735 		ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi;
2736 		ppdu_user_desc->usr_ack_rssi = dp_stats_buf->ack_rssi;
2737 		ppdu_user_desc->ack_rssi_valid = 1;
2738 	} else {
2739 		ppdu_user_desc->ack_rssi_valid = 0;
2740 	}
2741 
2742 	tag_buf++;
2743 
2744 	ppdu_user_desc->mpdu_success =
2745 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf);
2746 
2747 	ppdu_user_desc->mpdu_failed =
2748 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_GET(*tag_buf) -
2749 						ppdu_user_desc->mpdu_success;
2750 
2751 	tag_buf++;
2752 
2753 	ppdu_user_desc->long_retries =
2754 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf);
2755 
2756 	ppdu_user_desc->short_retries =
2757 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf);
2758 	ppdu_user_desc->retry_msdus =
2759 		ppdu_user_desc->long_retries + ppdu_user_desc->short_retries;
2760 
2761 	ppdu_user_desc->is_ampdu =
2762 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf);
2763 	ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu;
2764 
2765 	ppdu_desc->resp_type =
2766 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_GET(*tag_buf);
2767 	ppdu_desc->mprot_type =
2768 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPROT_TYPE_GET(*tag_buf);
2769 	ppdu_desc->rts_success =
2770 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_SUCCESS_GET(*tag_buf);
2771 	ppdu_desc->rts_failure =
2772 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_FAILURE_GET(*tag_buf);
2773 	ppdu_user_desc->pream_punct =
2774 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PREAM_PUNC_TX_GET(*tag_buf);
2775 
2776 	ppdu_info->compltn_common_tlv++;
2777 
2778 	/*
2779 	 * MU BAR may send request to n users but we may received ack only from
2780 	 * m users. To have count of number of users respond back, we have a
2781 	 * separate counter bar_num_users per PPDU that get increment for every
2782 	 * htt_ppdu_stats_user_cmpltn_common_tlv
2783 	 */
2784 	ppdu_desc->bar_num_users++;
2785 
2786 	tag_buf++;
2787 	for (bw_iter = 0; bw_iter < CDP_RSSI_CHAIN_LEN; bw_iter++) {
2788 		ppdu_user_desc->rssi_chain[bw_iter] =
2789 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CHAIN_RSSI_GET(*tag_buf);
2790 		tag_buf++;
2791 	}
2792 
2793 	ppdu_user_desc->sa_tx_antenna =
2794 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TX_ANTENNA_MASK_GET(*tag_buf);
2795 
2796 	tag_buf++;
2797 	ppdu_user_desc->sa_is_training =
2798 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_TRAINING_GET(*tag_buf);
2799 	if (ppdu_user_desc->sa_is_training) {
2800 		ppdu_user_desc->sa_goodput =
2801 			HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PENDING_TRAINING_PKTS_GET(*tag_buf);
2802 	}
2803 
2804 	tag_buf++;
2805 	for (bw_iter = 0; bw_iter < CDP_NUM_SA_BW; bw_iter++) {
2806 		ppdu_user_desc->sa_max_rates[bw_iter] =
2807 			HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MAX_RATES_GET(tag_buf[bw_iter]);
2808 	}
2809 
2810 	tag_buf += CDP_NUM_SA_BW;
2811 	ppdu_user_desc->current_rate_per =
2812 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CURRENT_RATE_PER_GET(*tag_buf);
2813 }
2814 
2815 /*
2816  * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv: Process
2817  * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2818  * pdev: DP PDEV handle
2819  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2820  * @ppdu_info: per ppdu tlv structure
2821  *
2822  * return:void
2823  */
2824 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
2825 		struct dp_pdev *pdev, uint32_t *tag_buf,
2826 		struct ppdu_info *ppdu_info)
2827 {
2828 	htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf =
2829 		(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf;
2830 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2831 	struct cdp_tx_completion_ppdu *ppdu_desc;
2832 	uint8_t curr_user_index = 0;
2833 	uint16_t peer_id;
2834 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2835 
2836 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2837 
2838 	tag_buf++;
2839 
2840 	peer_id =
2841 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2842 
2843 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2844 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2845 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2846 	ppdu_user_desc->peer_id = peer_id;
2847 
2848 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2849 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2850 		     sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
2851 	ppdu_user_desc->ba_size = CDP_BA_64_BIT_MAP_SIZE_DWORDS * 32;
2852 }
2853 
2854 /*
2855  * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv: Process
2856  * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2857  * pdev: DP PDEV handle
2858  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2859  * @ppdu_info: per ppdu tlv structure
2860  *
2861  * return:void
2862  */
2863 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
2864 		struct dp_pdev *pdev, uint32_t *tag_buf,
2865 		struct ppdu_info *ppdu_info)
2866 {
2867 	htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf =
2868 		(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf;
2869 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2870 	struct cdp_tx_completion_ppdu *ppdu_desc;
2871 	uint8_t curr_user_index = 0;
2872 	uint16_t peer_id;
2873 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2874 
2875 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2876 
2877 	tag_buf++;
2878 
2879 	peer_id =
2880 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2881 
2882 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2883 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2884 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2885 	ppdu_user_desc->peer_id = peer_id;
2886 
2887 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2888 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2889 		     sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2890 	ppdu_user_desc->ba_size = CDP_BA_256_BIT_MAP_SIZE_DWORDS * 32;
2891 }
2892 
2893 /*
2894  * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv: Process
2895  * htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2896  * pdev: DP PDE handle
2897  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2898  * @ppdu_info: per ppdu tlv structure
2899  *
2900  * return:void
2901  */
2902 static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
2903 		struct dp_pdev *pdev, uint32_t *tag_buf,
2904 		struct ppdu_info *ppdu_info)
2905 {
2906 	uint16_t peer_id;
2907 	struct cdp_tx_completion_ppdu *ppdu_desc;
2908 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2909 	uint8_t curr_user_index = 0;
2910 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2911 
2912 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2913 
2914 	tag_buf += 2;
2915 	peer_id =
2916 	HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf);
2917 
2918 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2919 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2920 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2921 	if (!ppdu_user_desc->ack_ba_tlv) {
2922 		ppdu_user_desc->ack_ba_tlv = 1;
2923 	} else {
2924 		pdev->stats.ack_ba_comes_twice++;
2925 		return;
2926 	}
2927 
2928 	ppdu_user_desc->peer_id = peer_id;
2929 
2930 	tag_buf++;
2931 	/* not to update ppdu_desc->tid from this TLV */
2932 	ppdu_user_desc->num_mpdu =
2933 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf);
2934 
2935 	ppdu_user_desc->num_msdu =
2936 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf);
2937 
2938 	ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu;
2939 
2940 	tag_buf++;
2941 	ppdu_user_desc->start_seq =
2942 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_GET(
2943 			*tag_buf);
2944 
2945 	tag_buf++;
2946 	ppdu_user_desc->success_bytes = *tag_buf;
2947 
2948 	/* increase ack ba tlv counter on successful mpdu */
2949 	if (ppdu_user_desc->num_mpdu)
2950 		ppdu_info->ack_ba_tlv++;
2951 
2952 	if (ppdu_user_desc->ba_size == 0) {
2953 		ppdu_user_desc->ba_seq_no = ppdu_user_desc->start_seq;
2954 		ppdu_user_desc->ba_bitmap[0] = 1;
2955 		ppdu_user_desc->ba_size = 1;
2956 	}
2957 }
2958 
2959 /*
2960  * dp_process_ppdu_stats_user_common_array_tlv: Process
2961  * htt_ppdu_stats_user_common_array_tlv
2962  * pdev: DP PDEV handle
2963  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2964  * @ppdu_info: per ppdu tlv structure
2965  *
2966  * return:void
2967  */
2968 static void dp_process_ppdu_stats_user_common_array_tlv(
2969 		struct dp_pdev *pdev, uint32_t *tag_buf,
2970 		struct ppdu_info *ppdu_info)
2971 {
2972 	uint32_t peer_id;
2973 	struct cdp_tx_completion_ppdu *ppdu_desc;
2974 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2975 	uint8_t curr_user_index = 0;
2976 	struct htt_tx_ppdu_stats_info *dp_stats_buf;
2977 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2978 
2979 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2980 
2981 	tag_buf++;
2982 	dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf;
2983 	tag_buf += 3;
2984 	peer_id =
2985 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf);
2986 
2987 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) {
2988 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2989 			"Invalid peer");
2990 		return;
2991 	}
2992 
2993 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2994 
2995 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2996 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2997 
2998 	ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes;
2999 	ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes;
3000 
3001 	tag_buf++;
3002 
3003 	ppdu_user_desc->success_msdus =
3004 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf);
3005 	ppdu_user_desc->retry_bytes =
3006 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf);
3007 	tag_buf++;
3008 	ppdu_user_desc->failed_msdus =
3009 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf);
3010 }
3011 
3012 /*
3013  * dp_process_ppdu_stats_flush_tlv: Process
3014  * htt_ppdu_stats_flush_tlv
3015  * @pdev: DP PDEV handle
3016  * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv
3017  * @ppdu_info: per ppdu tlv structure
3018  *
3019  * return:void
3020  */
3021 static void
3022 dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev,
3023 					     uint32_t *tag_buf,
3024 					     struct ppdu_info *ppdu_info)
3025 {
3026 	struct cdp_tx_completion_ppdu *ppdu_desc;
3027 	uint32_t peer_id;
3028 	uint8_t tid;
3029 	struct dp_peer *peer;
3030 
3031 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
3032 				qdf_nbuf_data(ppdu_info->nbuf);
3033 	ppdu_desc->is_flush = 1;
3034 
3035 	tag_buf++;
3036 	ppdu_desc->drop_reason = *tag_buf;
3037 
3038 	tag_buf++;
3039 	ppdu_desc->num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf);
3040 	ppdu_desc->num_mpdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_GET(*tag_buf);
3041 	ppdu_desc->flow_type = HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_GET(*tag_buf);
3042 
3043 	tag_buf++;
3044 	peer_id = HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf);
3045 	tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf);
3046 
3047 	ppdu_desc->num_users = 1;
3048 	ppdu_desc->user[0].peer_id = peer_id;
3049 	ppdu_desc->user[0].tid = tid;
3050 
3051 	ppdu_desc->queue_type =
3052 			HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_GET(*tag_buf);
3053 
3054 	peer = dp_peer_get_ref_by_id(pdev->soc, peer_id,
3055 				     DP_MOD_ID_TX_PPDU_STATS);
3056 	if (!peer)
3057 		goto add_ppdu_to_sched_list;
3058 
3059 	if (ppdu_desc->drop_reason == HTT_FLUSH_EXCESS_RETRIES) {
3060 		DP_STATS_INC(peer,
3061 			     tx.excess_retries_per_ac[TID_TO_WME_AC(tid)],
3062 			     ppdu_desc->num_msdu);
3063 	}
3064 
3065 	dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
3066 
3067 add_ppdu_to_sched_list:
3068 	ppdu_info->done = 1;
3069 	TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
3070 	pdev->list_depth--;
3071 	TAILQ_INSERT_TAIL(&pdev->sched_comp_ppdu_list, ppdu_info,
3072 			  ppdu_info_list_elem);
3073 	pdev->sched_comp_list_depth++;
3074 }
3075 
3076 /**
3077  * dp_process_ppdu_stats_sch_cmd_status_tlv: Process schedule command status tlv
3078  * Here we are not going to process the buffer.
3079  * @pdev: DP PDEV handle
3080  * @ppdu_info: per ppdu tlv structure
3081  *
3082  * return:void
3083  */
3084 static void
3085 dp_process_ppdu_stats_sch_cmd_status_tlv(struct dp_pdev *pdev,
3086 					 struct ppdu_info *ppdu_info)
3087 {
3088 	struct cdp_tx_completion_ppdu *ppdu_desc;
3089 	struct dp_peer *peer;
3090 	uint8_t num_users;
3091 	uint8_t i;
3092 
3093 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
3094 				qdf_nbuf_data(ppdu_info->nbuf);
3095 
3096 	num_users = ppdu_desc->bar_num_users;
3097 
3098 	for (i = 0; i < num_users; i++) {
3099 		if (ppdu_desc->user[i].user_pos == 0) {
3100 			if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
3101 				/* update phy mode for bar frame */
3102 				ppdu_desc->phy_mode =
3103 					ppdu_desc->user[i].preamble;
3104 				ppdu_desc->user[0].mcs = ppdu_desc->user[i].mcs;
3105 				break;
3106 			}
3107 			if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_CTRL) {
3108 				ppdu_desc->frame_ctrl =
3109 					ppdu_desc->user[i].frame_ctrl;
3110 				break;
3111 			}
3112 		}
3113 	}
3114 
3115 	if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA &&
3116 	    ppdu_desc->delayed_ba) {
3117 		qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
3118 
3119 		for (i = 0; i < ppdu_desc->num_users; i++) {
3120 			struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu;
3121 			uint64_t start_tsf;
3122 			uint64_t end_tsf;
3123 			uint32_t ppdu_id;
3124 
3125 			ppdu_id = ppdu_desc->ppdu_id;
3126 			peer = dp_peer_get_ref_by_id
3127 				(pdev->soc, ppdu_desc->user[i].peer_id,
3128 				 DP_MOD_ID_TX_PPDU_STATS);
3129 			/**
3130 			 * This check is to make sure peer is not deleted
3131 			 * after processing the TLVs.
3132 			 */
3133 			if (!peer)
3134 				continue;
3135 
3136 			delay_ppdu = &peer->delayed_ba_ppdu_stats;
3137 			start_tsf = ppdu_desc->ppdu_start_timestamp;
3138 			end_tsf = ppdu_desc->ppdu_end_timestamp;
3139 			/**
3140 			 * save delayed ba user info
3141 			 */
3142 			if (ppdu_desc->user[i].delayed_ba) {
3143 				dp_peer_copy_delay_stats(peer,
3144 							 &ppdu_desc->user[i],
3145 							 ppdu_id);
3146 				peer->last_delayed_ba_ppduid = ppdu_id;
3147 				delay_ppdu->ppdu_start_timestamp = start_tsf;
3148 				delay_ppdu->ppdu_end_timestamp = end_tsf;
3149 			}
3150 			ppdu_desc->user[i].peer_last_delayed_ba =
3151 				peer->last_delayed_ba;
3152 
3153 			dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
3154 
3155 			if (ppdu_desc->user[i].delayed_ba &&
3156 			    !ppdu_desc->user[i].debug_copied) {
3157 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3158 					  QDF_TRACE_LEVEL_INFO_MED,
3159 					  "%s: %d ppdu_id[%d] bar_ppdu_id[%d] num_users[%d] usr[%d] htt_frame_type[%d]\n",
3160 					  __func__, __LINE__,
3161 					  ppdu_desc->ppdu_id,
3162 					  ppdu_desc->bar_ppdu_id,
3163 					  ppdu_desc->num_users,
3164 					  i,
3165 					  ppdu_desc->htt_frame_type);
3166 			}
3167 		}
3168 	}
3169 
3170 	/*
3171 	 * when frame type is BAR and STATS_COMMON_TLV is set
3172 	 * copy the store peer delayed info to BAR status
3173 	 */
3174 	if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
3175 		for (i = 0; i < ppdu_desc->bar_num_users; i++) {
3176 			struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu;
3177 			uint64_t start_tsf;
3178 			uint64_t end_tsf;
3179 
3180 			peer = dp_peer_get_ref_by_id
3181 				(pdev->soc,
3182 				 ppdu_desc->user[i].peer_id,
3183 				 DP_MOD_ID_TX_PPDU_STATS);
3184 			/**
3185 			 * This check is to make sure peer is not deleted
3186 			 * after processing the TLVs.
3187 			 */
3188 			if (!peer)
3189 				continue;
3190 
3191 			if (ppdu_desc->user[i].completion_status !=
3192 			    HTT_PPDU_STATS_USER_STATUS_OK) {
3193 				dp_peer_unref_delete(peer,
3194 						     DP_MOD_ID_TX_PPDU_STATS);
3195 				continue;
3196 			}
3197 
3198 			delay_ppdu = &peer->delayed_ba_ppdu_stats;
3199 			start_tsf = delay_ppdu->ppdu_start_timestamp;
3200 			end_tsf = delay_ppdu->ppdu_end_timestamp;
3201 
3202 			if (peer->last_delayed_ba) {
3203 				dp_peer_copy_stats_to_bar(peer,
3204 							  &ppdu_desc->user[i]);
3205 				ppdu_desc->ppdu_id =
3206 					peer->last_delayed_ba_ppduid;
3207 				ppdu_desc->ppdu_start_timestamp = start_tsf;
3208 				ppdu_desc->ppdu_end_timestamp = end_tsf;
3209 			}
3210 			ppdu_desc->user[i].peer_last_delayed_ba =
3211 				peer->last_delayed_ba;
3212 			dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
3213 		}
3214 	}
3215 
3216 	TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
3217 	pdev->list_depth--;
3218 	TAILQ_INSERT_TAIL(&pdev->sched_comp_ppdu_list, ppdu_info,
3219 			  ppdu_info_list_elem);
3220 	pdev->sched_comp_list_depth++;
3221 }
3222 
3223 #ifndef WLAN_TX_PKT_CAPTURE_ENH
3224 /*
3225  * dp_deliver_mgmt_frm: Process
3226  * @pdev: DP PDEV handle
3227  * @nbuf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
3228  *
3229  * return: void
3230  */
3231 void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
3232 {
3233 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
3234 		dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc,
3235 				     nbuf, HTT_INVALID_PEER,
3236 				     WDI_NO_VAL, pdev->pdev_id);
3237 	} else {
3238 		if (!pdev->bpr_enable)
3239 			qdf_nbuf_free(nbuf);
3240 	}
3241 }
3242 #endif
3243 
3244 /*
3245  * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv: Process
3246  * htt_ppdu_stats_tx_mgmtctrl_payload_tlv
3247  * @pdev: DP PDEV handle
3248  * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
3249  * @length: tlv_length
3250  *
3251  * return:QDF_STATUS_SUCCESS if nbuf as to be freed in caller
3252  */
3253 static QDF_STATUS
3254 dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev,
3255 					      qdf_nbuf_t tag_buf,
3256 					      uint32_t ppdu_id)
3257 {
3258 	uint32_t *nbuf_ptr;
3259 	uint8_t trim_size;
3260 	size_t head_size;
3261 	struct cdp_tx_mgmt_comp_info *ptr_mgmt_comp_info;
3262 	uint32_t *msg_word;
3263 	uint32_t tsf_hdr;
3264 
3265 	if ((!pdev->tx_sniffer_enable) && (!pdev->mcopy_mode) &&
3266 	    (!pdev->bpr_enable) && (!pdev->tx_capture_enabled))
3267 		return QDF_STATUS_SUCCESS;
3268 
3269 	/*
3270 	 * get timestamp from htt_t2h_ppdu_stats_ind_hdr_t
3271 	 */
3272 	msg_word = (uint32_t *)qdf_nbuf_data(tag_buf);
3273 	msg_word = msg_word + 2;
3274 	tsf_hdr = *msg_word;
3275 
3276 	trim_size = ((pdev->mgmtctrl_frm_info.mgmt_buf +
3277 		      HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) -
3278 		      qdf_nbuf_data(tag_buf));
3279 
3280 	if (!qdf_nbuf_pull_head(tag_buf, trim_size))
3281 		return QDF_STATUS_SUCCESS;
3282 
3283 	qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) -
3284 			    pdev->mgmtctrl_frm_info.mgmt_buf_len);
3285 
3286 	if (pdev->tx_capture_enabled) {
3287 		head_size = sizeof(struct cdp_tx_mgmt_comp_info);
3288 		if (qdf_unlikely(qdf_nbuf_headroom(tag_buf) < head_size)) {
3289 			qdf_err("Fail to get headroom h_sz %d h_avail %d\n",
3290 				head_size, qdf_nbuf_headroom(tag_buf));
3291 			qdf_assert_always(0);
3292 			return QDF_STATUS_E_NOMEM;
3293 		}
3294 		ptr_mgmt_comp_info = (struct cdp_tx_mgmt_comp_info *)
3295 					qdf_nbuf_push_head(tag_buf, head_size);
3296 		qdf_assert_always(ptr_mgmt_comp_info);
3297 		ptr_mgmt_comp_info->ppdu_id = ppdu_id;
3298 		ptr_mgmt_comp_info->is_sgen_pkt = true;
3299 		ptr_mgmt_comp_info->tx_tsf = tsf_hdr;
3300 	} else {
3301 		head_size = sizeof(ppdu_id);
3302 		nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(tag_buf, head_size);
3303 		*nbuf_ptr = ppdu_id;
3304 	}
3305 
3306 	if (pdev->bpr_enable) {
3307 		dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc,
3308 				     tag_buf, HTT_INVALID_PEER,
3309 				     WDI_NO_VAL, pdev->pdev_id);
3310 	}
3311 
3312 	dp_deliver_mgmt_frm(pdev, tag_buf);
3313 
3314 	return QDF_STATUS_E_ALREADY;
3315 }
3316 
3317 /**
3318  * dp_validate_fix_ppdu_tlv(): Function to validate the length of PPDU
3319  *
3320  * If the TLV length sent as part of PPDU TLV is less that expected size i.e
3321  * size of corresponding data structure, pad the remaining bytes with zeros
3322  * and continue processing the TLVs
3323  *
3324  * @pdev: DP pdev handle
3325  * @tag_buf: TLV buffer
3326  * @tlv_expected_size: Expected size of Tag
3327  * @tlv_len: TLV length received from FW
3328  *
3329  * Return: Pointer to updated TLV
3330  */
3331 static inline uint32_t *dp_validate_fix_ppdu_tlv(struct dp_pdev *pdev,
3332 						 uint32_t *tag_buf,
3333 						 uint16_t tlv_expected_size,
3334 						 uint16_t tlv_len)
3335 {
3336 	uint32_t *tlv_desc = tag_buf;
3337 
3338 	qdf_assert_always(tlv_len != 0);
3339 
3340 	if (tlv_len < tlv_expected_size) {
3341 		qdf_mem_zero(pdev->ppdu_tlv_buf, tlv_expected_size);
3342 		qdf_mem_copy(pdev->ppdu_tlv_buf, tag_buf, tlv_len);
3343 		tlv_desc = pdev->ppdu_tlv_buf;
3344 	}
3345 
3346 	return tlv_desc;
3347 }
3348 
3349 /**
3350  * dp_process_ppdu_tag(): Function to process the PPDU TLVs
3351  * @pdev: DP pdev handle
3352  * @tag_buf: TLV buffer
3353  * @tlv_len: length of tlv
3354  * @ppdu_info: per ppdu tlv structure
3355  *
3356  * return: void
3357  */
3358 static void dp_process_ppdu_tag(struct dp_pdev *pdev, uint32_t *tag_buf,
3359 		uint32_t tlv_len, struct ppdu_info *ppdu_info)
3360 {
3361 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
3362 	uint16_t tlv_expected_size;
3363 	uint32_t *tlv_desc;
3364 
3365 	switch (tlv_type) {
3366 	case HTT_PPDU_STATS_COMMON_TLV:
3367 		tlv_expected_size = sizeof(htt_ppdu_stats_common_tlv);
3368 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3369 						    tlv_expected_size, tlv_len);
3370 		dp_process_ppdu_stats_common_tlv(pdev, tlv_desc, ppdu_info);
3371 		break;
3372 	case HTT_PPDU_STATS_USR_COMMON_TLV:
3373 		tlv_expected_size = sizeof(htt_ppdu_stats_user_common_tlv);
3374 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3375 						    tlv_expected_size, tlv_len);
3376 		dp_process_ppdu_stats_user_common_tlv(pdev, tlv_desc,
3377 						      ppdu_info);
3378 		break;
3379 	case HTT_PPDU_STATS_USR_RATE_TLV:
3380 		tlv_expected_size = sizeof(htt_ppdu_stats_user_rate_tlv);
3381 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3382 						    tlv_expected_size, tlv_len);
3383 		dp_process_ppdu_stats_user_rate_tlv(pdev, tlv_desc,
3384 						    ppdu_info);
3385 		break;
3386 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV:
3387 		tlv_expected_size =
3388 			sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv);
3389 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3390 						    tlv_expected_size, tlv_len);
3391 		dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
3392 				pdev, tlv_desc, ppdu_info);
3393 		break;
3394 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV:
3395 		tlv_expected_size =
3396 			sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv);
3397 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3398 						    tlv_expected_size, tlv_len);
3399 		dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
3400 				pdev, tlv_desc, ppdu_info);
3401 		break;
3402 	case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV:
3403 		tlv_expected_size =
3404 			sizeof(htt_ppdu_stats_user_cmpltn_common_tlv);
3405 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3406 						    tlv_expected_size, tlv_len);
3407 		dp_process_ppdu_stats_user_cmpltn_common_tlv(
3408 				pdev, tlv_desc, ppdu_info);
3409 		break;
3410 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV:
3411 		tlv_expected_size =
3412 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv);
3413 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3414 						    tlv_expected_size, tlv_len);
3415 		dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
3416 				pdev, tlv_desc, ppdu_info);
3417 		break;
3418 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV:
3419 		tlv_expected_size =
3420 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv);
3421 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3422 						    tlv_expected_size, tlv_len);
3423 		dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
3424 				pdev, tlv_desc, ppdu_info);
3425 		break;
3426 	case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV:
3427 		tlv_expected_size =
3428 			sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv);
3429 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3430 						    tlv_expected_size, tlv_len);
3431 		dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
3432 				pdev, tlv_desc, ppdu_info);
3433 		break;
3434 	case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV:
3435 		tlv_expected_size =
3436 			sizeof(htt_ppdu_stats_usr_common_array_tlv_v);
3437 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3438 						    tlv_expected_size, tlv_len);
3439 		dp_process_ppdu_stats_user_common_array_tlv(
3440 				pdev, tlv_desc, ppdu_info);
3441 		break;
3442 	case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV:
3443 		tlv_expected_size = sizeof(htt_ppdu_stats_flush_tlv);
3444 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3445 						    tlv_expected_size, tlv_len);
3446 		dp_process_ppdu_stats_user_compltn_flush_tlv(pdev, tlv_desc,
3447 							     ppdu_info);
3448 		break;
3449 	case HTT_PPDU_STATS_SCH_CMD_STATUS_TLV:
3450 		dp_process_ppdu_stats_sch_cmd_status_tlv(pdev, ppdu_info);
3451 		break;
3452 	default:
3453 		break;
3454 	}
3455 }
3456 
3457 #ifdef WLAN_ATF_ENABLE
3458 static void
3459 dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev,
3460 				     struct cdp_tx_completion_ppdu *ppdu_desc,
3461 				     struct cdp_tx_completion_ppdu_user *user)
3462 {
3463 	uint32_t nss_ru_width_sum = 0;
3464 
3465 	if (!pdev || !ppdu_desc || !user)
3466 		return;
3467 
3468 	if (!pdev->dp_atf_stats_enable)
3469 		return;
3470 
3471 	if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_DATA)
3472 		return;
3473 
3474 	nss_ru_width_sum = ppdu_desc->usr_nss_sum * ppdu_desc->usr_ru_tones_sum;
3475 	if (!nss_ru_width_sum)
3476 		nss_ru_width_sum = 1;
3477 
3478 	/**
3479 	 * For SU-MIMO PPDU phy Tx time is same for the single user.
3480 	 * For MU-MIMO phy Tx time is calculated per user as below
3481 	 *     user phy tx time =
3482 	 *           Entire PPDU duration * MU Ratio * OFDMA Ratio
3483 	 *     MU Ratio = usr_nss / Sum_of_nss_of_all_users
3484 	 *     OFDMA_ratio = usr_ru_width / Sum_of_ru_width_of_all_users
3485 	 *     usr_ru_widt = ru_end – ru_start + 1
3486 	 */
3487 	if (ppdu_desc->htt_frame_type == HTT_STATS_FTYPE_TIDQ_DATA_SU) {
3488 		user->phy_tx_time_us = ppdu_desc->phy_ppdu_tx_time_us;
3489 	} else {
3490 		user->phy_tx_time_us = (ppdu_desc->phy_ppdu_tx_time_us *
3491 				user->nss * user->ru_tones) / nss_ru_width_sum;
3492 	}
3493 }
3494 #else
3495 static void
3496 dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev,
3497 				     struct cdp_tx_completion_ppdu *ppdu_desc,
3498 				     struct cdp_tx_completion_ppdu_user *user)
3499 {
3500 }
3501 #endif
3502 
3503 /**
3504  * dp_ppdu_desc_user_stats_update(): Function to update TX user stats
3505  * @pdev: DP pdev handle
3506  * @ppdu_info: per PPDU TLV descriptor
3507  *
3508  * return: void
3509  */
3510 void
3511 dp_ppdu_desc_user_stats_update(struct dp_pdev *pdev,
3512 			       struct ppdu_info *ppdu_info)
3513 {
3514 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3515 	struct dp_peer *peer = NULL;
3516 	uint32_t tlv_bitmap_expected;
3517 	uint32_t tlv_bitmap_default;
3518 	uint16_t i;
3519 	uint32_t num_users;
3520 
3521 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
3522 		qdf_nbuf_data(ppdu_info->nbuf);
3523 
3524 	if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_BAR)
3525 		ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
3526 
3527 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
3528 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode ||
3529 	    pdev->tx_capture_enabled) {
3530 		if (ppdu_info->is_ampdu)
3531 			tlv_bitmap_expected =
3532 				dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
3533 					ppdu_info->tlv_bitmap);
3534 	}
3535 
3536 	tlv_bitmap_default = tlv_bitmap_expected;
3537 
3538 	if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
3539 		num_users = ppdu_desc->bar_num_users;
3540 		ppdu_desc->num_users = ppdu_desc->bar_num_users;
3541 	} else {
3542 		num_users = ppdu_desc->num_users;
3543 	}
3544 	qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
3545 
3546 	for (i = 0; i < num_users; i++) {
3547 		ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu;
3548 		ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu;
3549 
3550 		peer = dp_peer_get_ref_by_id(pdev->soc,
3551 					     ppdu_desc->user[i].peer_id,
3552 					     DP_MOD_ID_TX_PPDU_STATS);
3553 		/**
3554 		 * This check is to make sure peer is not deleted
3555 		 * after processing the TLVs.
3556 		 */
3557 		if (!peer)
3558 			continue;
3559 
3560 		/*
3561 		 * different frame like DATA, BAR or CTRL has different
3562 		 * tlv bitmap expected. Apart from ACK_BA_STATUS TLV, we
3563 		 * receive other tlv in-order/sequential from fw.
3564 		 * Since ACK_BA_STATUS TLV come from Hardware it is
3565 		 * asynchronous So we need to depend on some tlv to confirm
3566 		 * all tlv is received for a ppdu.
3567 		 * So we depend on both SCHED_CMD_STATUS_TLV and
3568 		 * ACK_BA_STATUS_TLV. for failure packet we won't get
3569 		 * ACK_BA_STATUS_TLV.
3570 		 */
3571 		if (!(ppdu_info->tlv_bitmap &
3572 		      (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)) ||
3573 		    (!(ppdu_info->tlv_bitmap &
3574 		       (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)) &&
3575 		     (ppdu_desc->user[i].completion_status ==
3576 		      HTT_PPDU_STATS_USER_STATUS_OK))) {
3577 			dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
3578 			continue;
3579 		}
3580 
3581 		/**
3582 		 * Update tx stats for data frames having Qos as well as
3583 		 * non-Qos data tid
3584 		 */
3585 
3586 		if ((ppdu_desc->user[i].tid < CDP_DATA_TID_MAX ||
3587 		     (ppdu_desc->user[i].tid == CDP_DATA_NON_QOS_TID) ||
3588 		     (ppdu_desc->htt_frame_type ==
3589 		      HTT_STATS_FTYPE_SGEN_QOS_NULL) ||
3590 		     ((ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) &&
3591 		      (ppdu_desc->num_mpdu > 1))) &&
3592 		      (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL)) {
3593 
3594 			dp_tx_stats_update(pdev, peer,
3595 					   &ppdu_desc->user[i],
3596 					   ppdu_desc->ack_rssi);
3597 			dp_tx_rate_stats_update(peer, &ppdu_desc->user[i]);
3598 		}
3599 
3600 		dp_ppdu_desc_user_phy_tx_time_update(pdev, ppdu_desc,
3601 						     &ppdu_desc->user[i]);
3602 
3603 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
3604 		tlv_bitmap_expected = tlv_bitmap_default;
3605 	}
3606 }
3607 
3608 #ifndef WLAN_TX_PKT_CAPTURE_ENH
3609 
3610 /**
3611  * dp_ppdu_desc_deliver(): Function to deliver Tx PPDU status descriptor
3612  * to upper layer
3613  * @pdev: DP pdev handle
3614  * @ppdu_info: per PPDU TLV descriptor
3615  *
3616  * return: void
3617  */
3618 static
3619 void dp_ppdu_desc_deliver(struct dp_pdev *pdev,
3620 			  struct ppdu_info *ppdu_info)
3621 {
3622 	struct ppdu_info *s_ppdu_info = NULL;
3623 	struct ppdu_info *ppdu_info_next = NULL;
3624 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3625 	qdf_nbuf_t nbuf;
3626 	uint32_t time_delta = 0;
3627 	bool starved = 0;
3628 	bool matched = 0;
3629 	bool recv_ack_ba_done = 0;
3630 
3631 	if (ppdu_info->tlv_bitmap &
3632 	    (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) &&
3633 	    ppdu_info->done)
3634 		recv_ack_ba_done = 1;
3635 
3636 	pdev->last_sched_cmdid = ppdu_info->sched_cmdid;
3637 
3638 	s_ppdu_info = TAILQ_FIRST(&pdev->sched_comp_ppdu_list);
3639 
3640 	TAILQ_FOREACH_SAFE(s_ppdu_info, &pdev->sched_comp_ppdu_list,
3641 			   ppdu_info_list_elem, ppdu_info_next) {
3642 		if (s_ppdu_info->tsf_l32 > ppdu_info->tsf_l32)
3643 			time_delta = (MAX_TSF_32 - s_ppdu_info->tsf_l32) +
3644 					ppdu_info->tsf_l32;
3645 		else
3646 			time_delta = ppdu_info->tsf_l32 - s_ppdu_info->tsf_l32;
3647 
3648 		if (!s_ppdu_info->done && !recv_ack_ba_done) {
3649 			if (time_delta < MAX_SCHED_STARVE) {
3650 				dp_info("pdev[%d] ppdu_id[%d] sched_cmdid[%d] TLV_B[0x%x] TSF[%u] D[%d]",
3651 					pdev->pdev_id,
3652 					s_ppdu_info->ppdu_id,
3653 					s_ppdu_info->sched_cmdid,
3654 					s_ppdu_info->tlv_bitmap,
3655 					s_ppdu_info->tsf_l32,
3656 					s_ppdu_info->done);
3657 				break;
3658 			}
3659 			starved = 1;
3660 		}
3661 
3662 		pdev->delivered_sched_cmdid = s_ppdu_info->sched_cmdid;
3663 		TAILQ_REMOVE(&pdev->sched_comp_ppdu_list, s_ppdu_info,
3664 			     ppdu_info_list_elem);
3665 		pdev->sched_comp_list_depth--;
3666 
3667 		nbuf = s_ppdu_info->nbuf;
3668 		qdf_assert_always(nbuf);
3669 		ppdu_desc = (struct cdp_tx_completion_ppdu *)
3670 				qdf_nbuf_data(nbuf);
3671 		ppdu_desc->tlv_bitmap = s_ppdu_info->tlv_bitmap;
3672 
3673 		if (starved) {
3674 			dp_err("ppdu starved fc[0x%x] h_ftype[%d] tlv_bitmap[0x%x] cs[%d]\n",
3675 			       ppdu_desc->frame_ctrl,
3676 			       ppdu_desc->htt_frame_type,
3677 			       ppdu_desc->tlv_bitmap,
3678 			       ppdu_desc->user[0].completion_status);
3679 			starved = 0;
3680 		}
3681 
3682 		if (ppdu_info->ppdu_id == s_ppdu_info->ppdu_id &&
3683 		    ppdu_info->sched_cmdid == s_ppdu_info->sched_cmdid)
3684 			matched = 1;
3685 
3686 		dp_ppdu_desc_user_stats_update(pdev, s_ppdu_info);
3687 
3688 		qdf_mem_free(s_ppdu_info);
3689 
3690 		/**
3691 		 * Deliver PPDU stats only for valid (acked) data
3692 		 * frames if sniffer mode is not enabled.
3693 		 * If sniffer mode is enabled, PPDU stats
3694 		 * for all frames including mgmt/control
3695 		 * frames should be delivered to upper layer
3696 		 */
3697 		if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
3698 			dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
3699 					     pdev->soc,
3700 					     nbuf, HTT_INVALID_PEER,
3701 					     WDI_NO_VAL,
3702 					     pdev->pdev_id);
3703 		} else {
3704 			if (ppdu_desc->num_mpdu != 0 &&
3705 			    ppdu_desc->num_users != 0 &&
3706 			    ppdu_desc->frame_ctrl &
3707 			    HTT_FRAMECTRL_DATATYPE) {
3708 				dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
3709 						     pdev->soc,
3710 						     nbuf, HTT_INVALID_PEER,
3711 						     WDI_NO_VAL,
3712 						     pdev->pdev_id);
3713 			} else {
3714 				qdf_nbuf_free(nbuf);
3715 			}
3716 		}
3717 
3718 		if (matched)
3719 			break;
3720 	}
3721 	return;
3722 }
3723 
3724 #endif
3725 
3726 /**
3727  * dp_get_ppdu_desc(): Function to allocate new PPDU status
3728  * desc for new ppdu id
3729  * @pdev: DP pdev handle
3730  * @ppdu_id: PPDU unique identifier
3731  * @tlv_type: TLV type received
3732  * @tsf_l32: timestamp received along with ppdu stats indication header
3733  * @max_users: Maximum user for that particular ppdu
3734  *
3735  * return: ppdu_info per ppdu tlv structure
3736  */
3737 static
3738 struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id,
3739 				   uint8_t tlv_type, uint32_t tsf_l32,
3740 				   uint8_t max_users)
3741 {
3742 	struct ppdu_info *ppdu_info = NULL;
3743 	struct ppdu_info *s_ppdu_info = NULL;
3744 	struct ppdu_info *ppdu_info_next = NULL;
3745 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3746 	uint32_t size = 0;
3747 	struct cdp_tx_completion_ppdu *tmp_ppdu_desc = NULL;
3748 	struct cdp_tx_completion_ppdu_user *tmp_user;
3749 	uint32_t time_delta;
3750 
3751 	/*
3752 	 * Find ppdu_id node exists or not
3753 	 */
3754 	TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
3755 			   ppdu_info_list_elem, ppdu_info_next) {
3756 		if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) {
3757 			if (ppdu_info->tsf_l32 > tsf_l32)
3758 				time_delta  = (MAX_TSF_32 -
3759 					       ppdu_info->tsf_l32) + tsf_l32;
3760 			else
3761 				time_delta  = tsf_l32 - ppdu_info->tsf_l32;
3762 
3763 			if (time_delta > WRAP_DROP_TSF_DELTA) {
3764 				TAILQ_REMOVE(&pdev->ppdu_info_list,
3765 					     ppdu_info, ppdu_info_list_elem);
3766 				pdev->list_depth--;
3767 				pdev->stats.ppdu_wrap_drop++;
3768 				tmp_ppdu_desc =
3769 					(struct cdp_tx_completion_ppdu *)
3770 					qdf_nbuf_data(ppdu_info->nbuf);
3771 				tmp_user = &tmp_ppdu_desc->user[0];
3772 				QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
3773 					  QDF_TRACE_LEVEL_INFO_MED,
3774 					  "S_PID [%d] S_TSF[%u] TLV_BITMAP[0x%x] [CMPLTN - %d ACK_BA - %d] CS[%d] - R_PID[%d] R_TSF[%u] R_TLV_TAG[0x%x]\n",
3775 					  ppdu_info->ppdu_id,
3776 					  ppdu_info->tsf_l32,
3777 					  ppdu_info->tlv_bitmap,
3778 					  tmp_user->completion_status,
3779 					  ppdu_info->compltn_common_tlv,
3780 					  ppdu_info->ack_ba_tlv,
3781 					  ppdu_id, tsf_l32, tlv_type);
3782 				qdf_nbuf_free(ppdu_info->nbuf);
3783 				ppdu_info->nbuf = NULL;
3784 				qdf_mem_free(ppdu_info);
3785 			} else {
3786 				break;
3787 			}
3788 		}
3789 	}
3790 
3791 	/*
3792 	 * check if it is ack ba tlv and if it is not there in ppdu info
3793 	 * list then check it in sched completion ppdu list
3794 	 */
3795 	if (!ppdu_info &&
3796 	    tlv_type == HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) {
3797 		TAILQ_FOREACH(s_ppdu_info,
3798 			      &pdev->sched_comp_ppdu_list,
3799 			      ppdu_info_list_elem) {
3800 			if (s_ppdu_info && (s_ppdu_info->ppdu_id == ppdu_id)) {
3801 				if (s_ppdu_info->tsf_l32 > tsf_l32)
3802 					time_delta  = (MAX_TSF_32 -
3803 						       s_ppdu_info->tsf_l32) +
3804 							tsf_l32;
3805 				else
3806 					time_delta  = tsf_l32 -
3807 						s_ppdu_info->tsf_l32;
3808 				if (time_delta < WRAP_DROP_TSF_DELTA) {
3809 					ppdu_info = s_ppdu_info;
3810 					break;
3811 				}
3812 			} else {
3813 				/*
3814 				 * ACK BA STATUS TLV comes sequential order
3815 				 * if we received ack ba status tlv for second
3816 				 * ppdu and first ppdu is still waiting for
3817 				 * ACK BA STATUS TLV. Based on fw comment
3818 				 * we won't receive it tlv later. So we can
3819 				 * set ppdu info done.
3820 				 */
3821 				if (s_ppdu_info)
3822 					s_ppdu_info->done = 1;
3823 			}
3824 		}
3825 	}
3826 
3827 	if (ppdu_info) {
3828 		if (ppdu_info->tlv_bitmap & (1 << tlv_type)) {
3829 			/**
3830 			 * if we get tlv_type that is already been processed
3831 			 * for ppdu, that means we got a new ppdu with same
3832 			 * ppdu id. Hence Flush the older ppdu
3833 			 * for MUMIMO and OFDMA, In a PPDU we have
3834 			 * multiple user with same tlv types. tlv bitmap is
3835 			 * used to check whether SU or MU_MIMO/OFDMA
3836 			 */
3837 			if (!(ppdu_info->tlv_bitmap &
3838 			    (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)))
3839 				return ppdu_info;
3840 
3841 			ppdu_desc = (struct cdp_tx_completion_ppdu *)
3842 				qdf_nbuf_data(ppdu_info->nbuf);
3843 
3844 			/**
3845 			 * apart from ACK BA STATUS TLV rest all comes in order
3846 			 * so if tlv type not ACK BA STATUS TLV we can deliver
3847 			 * ppdu_info
3848 			 */
3849 			if ((tlv_type ==
3850 			     HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) &&
3851 			    (ppdu_desc->htt_frame_type ==
3852 			     HTT_STATS_FTYPE_SGEN_MU_BAR))
3853 				return ppdu_info;
3854 
3855 			dp_ppdu_desc_deliver(pdev, ppdu_info);
3856 		} else {
3857 			return ppdu_info;
3858 		}
3859 	}
3860 
3861 	/**
3862 	 * Flush the head ppdu descriptor if ppdu desc list reaches max
3863 	 * threshold
3864 	 */
3865 	if (pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
3866 		ppdu_info = TAILQ_FIRST(&pdev->ppdu_info_list);
3867 		TAILQ_REMOVE(&pdev->ppdu_info_list,
3868 			     ppdu_info, ppdu_info_list_elem);
3869 		pdev->list_depth--;
3870 		pdev->stats.ppdu_drop++;
3871 		qdf_nbuf_free(ppdu_info->nbuf);
3872 		ppdu_info->nbuf = NULL;
3873 		qdf_mem_free(ppdu_info);
3874 	}
3875 
3876 	size = sizeof(struct cdp_tx_completion_ppdu) +
3877 			(max_users * sizeof(struct cdp_tx_completion_ppdu_user));
3878 
3879 	/*
3880 	 * Allocate new ppdu_info node
3881 	 */
3882 	ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info));
3883 	if (!ppdu_info)
3884 		return NULL;
3885 
3886 	ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev, size,
3887 									 0, 4, TRUE);
3888 	if (!ppdu_info->nbuf) {
3889 		qdf_mem_free(ppdu_info);
3890 		return NULL;
3891 	}
3892 
3893 	ppdu_info->ppdu_desc =
3894 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
3895 	qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf), size);
3896 
3897 	if (qdf_nbuf_put_tail(ppdu_info->nbuf, size) == NULL) {
3898 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3899 				"No tailroom for HTT PPDU");
3900 		qdf_nbuf_free(ppdu_info->nbuf);
3901 		ppdu_info->nbuf = NULL;
3902 		ppdu_info->last_user = 0;
3903 		qdf_mem_free(ppdu_info);
3904 		return NULL;
3905 	}
3906 
3907 	ppdu_info->ppdu_desc->max_users = max_users;
3908 	ppdu_info->tsf_l32 = tsf_l32;
3909 	/**
3910 	 * No lock is needed because all PPDU TLVs are processed in
3911 	 * same context and this list is updated in same context
3912 	 */
3913 	TAILQ_INSERT_TAIL(&pdev->ppdu_info_list, ppdu_info,
3914 			ppdu_info_list_elem);
3915 	pdev->list_depth++;
3916 	return ppdu_info;
3917 }
3918 
3919 /**
3920  * dp_htt_process_tlv(): Function to process each PPDU TLVs
3921  * @pdev: DP pdev handle
3922  * @htt_t2h_msg: HTT target to host message
3923  *
3924  * return: ppdu_info per ppdu tlv structure
3925  */
3926 
3927 static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev,
3928 		qdf_nbuf_t htt_t2h_msg)
3929 {
3930 	uint32_t length;
3931 	uint32_t ppdu_id;
3932 	uint8_t tlv_type;
3933 	uint32_t tlv_length, tlv_bitmap_expected;
3934 	uint8_t *tlv_buf;
3935 	struct ppdu_info *ppdu_info = NULL;
3936 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3937 	uint8_t max_users = CDP_MU_MAX_USERS;
3938 	uint32_t tsf_l32;
3939 
3940 	uint32_t *msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
3941 
3942 	length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word);
3943 
3944 	msg_word = msg_word + 1;
3945 	ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word);
3946 
3947 	msg_word = msg_word + 1;
3948 	tsf_l32 = (uint32_t)(*msg_word);
3949 
3950 	msg_word = msg_word + 2;
3951 	while (length > 0) {
3952 		tlv_buf = (uint8_t *)msg_word;
3953 		tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
3954 		tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
3955 		if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG))
3956 			pdev->stats.ppdu_stats_counter[tlv_type]++;
3957 
3958 		if (tlv_length == 0)
3959 			break;
3960 
3961 		tlv_length += HTT_TLV_HDR_LEN;
3962 
3963 		/**
3964 		 * Not allocating separate ppdu descriptor for MGMT Payload
3965 		 * TLV as this is sent as separate WDI indication and it
3966 		 * doesn't contain any ppdu information
3967 		 */
3968 		if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) {
3969 			pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf;
3970 			pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id;
3971 			pdev->mgmtctrl_frm_info.mgmt_buf_len =
3972 				HTT_PPDU_STATS_TX_MGMTCTRL_TLV_FRAME_LENGTH_GET
3973 						(*(msg_word + 1));
3974 			msg_word =
3975 				(uint32_t *)((uint8_t *)tlv_buf + tlv_length);
3976 			length -= (tlv_length);
3977 			continue;
3978 		}
3979 
3980 		/*
3981 		 * retrieve max_users if it's USERS_INFO,
3982 		 * else, it's 1 for COMPLTN_FLUSH,
3983 		 * else, use CDP_MU_MAX_USERS
3984 		 */
3985 		if (tlv_type == HTT_PPDU_STATS_USERS_INFO_TLV) {
3986 			max_users =
3987 				HTT_PPDU_STATS_USERS_INFO_TLV_MAX_USERS_GET(*(msg_word + 1));
3988 		} else if (tlv_type == HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) {
3989 			max_users = 1;
3990 		}
3991 
3992 		ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type,
3993 					     tsf_l32, max_users);
3994 		if (!ppdu_info)
3995 			return NULL;
3996 
3997 		ppdu_info->ppdu_desc->bss_color =
3998 			pdev->rx_mon_recv_status.bsscolor;
3999 
4000 		ppdu_info->ppdu_id = ppdu_id;
4001 		ppdu_info->tlv_bitmap |= (1 << tlv_type);
4002 
4003 		dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info);
4004 
4005 		/**
4006 		 * Increment pdev level tlv count to monitor
4007 		 * missing TLVs
4008 		 */
4009 		pdev->tlv_count++;
4010 		ppdu_info->last_tlv_cnt = pdev->tlv_count;
4011 		msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
4012 		length -= (tlv_length);
4013 	}
4014 
4015 	if (!ppdu_info)
4016 		return NULL;
4017 
4018 	pdev->last_ppdu_id = ppdu_id;
4019 
4020 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
4021 
4022 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode ||
4023 	    pdev->tx_capture_enabled) {
4024 		if (ppdu_info->is_ampdu)
4025 			tlv_bitmap_expected =
4026 				dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
4027 					ppdu_info->tlv_bitmap);
4028 	}
4029 
4030 	ppdu_desc = ppdu_info->ppdu_desc;
4031 
4032 	if (!ppdu_desc)
4033 		return NULL;
4034 
4035 	if (ppdu_desc->user[ppdu_desc->last_usr_index].completion_status !=
4036 	    HTT_PPDU_STATS_USER_STATUS_OK) {
4037 		tlv_bitmap_expected = tlv_bitmap_expected & 0xFF;
4038 	}
4039 
4040 	/*
4041 	 * for frame type DATA and BAR, we update stats based on MSDU,
4042 	 * successful msdu and mpdu are populate from ACK BA STATUS TLV
4043 	 * which comes out of order. successful mpdu also populated from
4044 	 * COMPLTN COMMON TLV which comes in order. for every ppdu_info
4045 	 * we store successful mpdu from both tlv and compare before delivering
4046 	 * to make sure we received ACK BA STATUS TLV. For some self generated
4047 	 * frame we won't get ack ba status tlv so no need to wait for
4048 	 * ack ba status tlv.
4049 	 */
4050 	if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL &&
4051 	    ppdu_desc->htt_frame_type != HTT_STATS_FTYPE_SGEN_QOS_NULL) {
4052 		/*
4053 		 * most of the time bar frame will have duplicate ack ba
4054 		 * status tlv
4055 		 */
4056 		if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR &&
4057 		    (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv))
4058 			return NULL;
4059 		/*
4060 		 * For data frame, compltn common tlv should match ack ba status
4061 		 * tlv and completion status. Reason we are checking first user
4062 		 * for ofdma, completion seen at next MU BAR frm, for mimo
4063 		 * only for first user completion will be immediate.
4064 		 */
4065 		if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA &&
4066 		    (ppdu_desc->user[0].completion_status == 0 &&
4067 		     (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv)))
4068 			return NULL;
4069 	}
4070 
4071 	/**
4072 	 * Once all the TLVs for a given PPDU has been processed,
4073 	 * return PPDU status to be delivered to higher layer.
4074 	 * tlv_bitmap_expected can't be available for different frame type.
4075 	 * But SCHED CMD STATS TLV is the last TLV from the FW for a ppdu.
4076 	 * apart from ACK BA TLV, FW sends other TLV in sequential order.
4077 	 * flush tlv comes separate.
4078 	 */
4079 	if ((ppdu_info->tlv_bitmap != 0 &&
4080 	     (ppdu_info->tlv_bitmap &
4081 	      (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV))) ||
4082 	    (ppdu_info->tlv_bitmap &
4083 	     (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV))) {
4084 		ppdu_info->done = 1;
4085 		return ppdu_info;
4086 	}
4087 
4088 	return NULL;
4089 }
4090 #endif /* FEATURE_PERPKT_INFO */
4091 
4092 /**
4093  * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW
4094  * @soc: DP SOC handle
4095  * @pdev_id: pdev id
4096  * @htt_t2h_msg: HTT message nbuf
4097  *
4098  * return:void
4099  */
4100 #if defined(WDI_EVENT_ENABLE)
4101 #ifdef FEATURE_PERPKT_INFO
4102 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
4103 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
4104 {
4105 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
4106 	struct ppdu_info *ppdu_info = NULL;
4107 	bool free_buf = true;
4108 
4109 	if (pdev_id >= MAX_PDEV_CNT)
4110 		return true;
4111 
4112 	pdev = soc->pdev_list[pdev_id];
4113 	if (!pdev)
4114 		return true;
4115 
4116 	if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
4117 	    !pdev->mcopy_mode && !pdev->bpr_enable)
4118 		return free_buf;
4119 
4120 	ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg);
4121 
4122 	if (pdev->mgmtctrl_frm_info.mgmt_buf) {
4123 		if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv
4124 		    (pdev, htt_t2h_msg, pdev->mgmtctrl_frm_info.ppdu_id) !=
4125 		    QDF_STATUS_SUCCESS)
4126 			free_buf = false;
4127 	}
4128 
4129 	if (ppdu_info)
4130 		dp_ppdu_desc_deliver(pdev, ppdu_info);
4131 
4132 	pdev->mgmtctrl_frm_info.mgmt_buf = NULL;
4133 	pdev->mgmtctrl_frm_info.mgmt_buf_len = 0;
4134 	pdev->mgmtctrl_frm_info.ppdu_id = 0;
4135 
4136 	return free_buf;
4137 }
4138 #else
4139 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
4140 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
4141 {
4142 	return true;
4143 }
4144 #endif
4145 #endif
4146 
4147 /**
4148  * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats
4149  * @soc: DP SOC handle
4150  * @htt_t2h_msg: HTT message nbuf
4151  *
4152  * return:void
4153  */
4154 static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc,
4155 		qdf_nbuf_t htt_t2h_msg)
4156 {
4157 	uint8_t done;
4158 	qdf_nbuf_t msg_copy;
4159 	uint32_t *msg_word;
4160 
4161 	msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
4162 	msg_word = msg_word + 3;
4163 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
4164 
4165 	/*
4166 	 * HTT EXT stats response comes as stream of TLVs which span over
4167 	 * multiple T2H messages.
4168 	 * The first message will carry length of the response.
4169 	 * For rest of the messages length will be zero.
4170 	 *
4171 	 * Clone the T2H message buffer and store it in a list to process
4172 	 * it later.
4173 	 *
4174 	 * The original T2H message buffers gets freed in the T2H HTT event
4175 	 * handler
4176 	 */
4177 	msg_copy = qdf_nbuf_clone(htt_t2h_msg);
4178 
4179 	if (!msg_copy) {
4180 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
4181 				"T2H messge clone failed for HTT EXT STATS");
4182 		goto error;
4183 	}
4184 
4185 	qdf_spin_lock_bh(&soc->htt_stats.lock);
4186 	qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy);
4187 	/*
4188 	 * Done bit signifies that this is the last T2H buffer in the stream of
4189 	 * HTT EXT STATS message
4190 	 */
4191 	if (done) {
4192 		soc->htt_stats.num_stats++;
4193 		qdf_sched_work(0, &soc->htt_stats.work);
4194 	}
4195 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
4196 
4197 	return;
4198 
4199 error:
4200 	qdf_spin_lock_bh(&soc->htt_stats.lock);
4201 	while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg))
4202 			!= NULL) {
4203 		qdf_nbuf_free(msg_copy);
4204 	}
4205 	soc->htt_stats.num_stats = 0;
4206 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
4207 	return;
4208 
4209 }
4210 
4211 /*
4212  * htt_soc_attach_target() - SOC level HTT setup
4213  * @htt_soc:	HTT SOC handle
4214  *
4215  * Return: 0 on success; error code on failure
4216  */
4217 int htt_soc_attach_target(struct htt_soc *htt_soc)
4218 {
4219 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
4220 
4221 	return htt_h2t_ver_req_msg(soc);
4222 }
4223 
4224 void htt_set_htc_handle(struct htt_soc *htt_soc, HTC_HANDLE htc_soc)
4225 {
4226 	htt_soc->htc_soc = htc_soc;
4227 }
4228 
4229 HTC_HANDLE htt_get_htc_handle(struct htt_soc *htt_soc)
4230 {
4231 	return htt_soc->htc_soc;
4232 }
4233 
4234 struct htt_soc *htt_soc_attach(struct dp_soc *soc, HTC_HANDLE htc_handle)
4235 {
4236 	int i;
4237 	int j;
4238 	int alloc_size = HTT_SW_UMAC_RING_IDX_MAX * sizeof(unsigned long);
4239 	struct htt_soc *htt_soc = NULL;
4240 
4241 	htt_soc = qdf_mem_malloc(sizeof(*htt_soc));
4242 	if (!htt_soc) {
4243 		dp_err("HTT attach failed");
4244 		return NULL;
4245 	}
4246 
4247 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4248 		htt_soc->pdevid_tt[i].umac_ttt = qdf_mem_malloc(alloc_size);
4249 		if (!htt_soc->pdevid_tt[i].umac_ttt)
4250 			break;
4251 		qdf_mem_set(htt_soc->pdevid_tt[i].umac_ttt, alloc_size, -1);
4252 		htt_soc->pdevid_tt[i].lmac_ttt = qdf_mem_malloc(alloc_size);
4253 		if (!htt_soc->pdevid_tt[i].lmac_ttt) {
4254 			qdf_mem_free(htt_soc->pdevid_tt[i].umac_ttt);
4255 			break;
4256 		}
4257 		qdf_mem_set(htt_soc->pdevid_tt[i].lmac_ttt, alloc_size, -1);
4258 	}
4259 	if (i != MAX_PDEV_CNT) {
4260 		for (j = 0; j < i; j++) {
4261 			qdf_mem_free(htt_soc->pdevid_tt[j].umac_ttt);
4262 			qdf_mem_free(htt_soc->pdevid_tt[j].lmac_ttt);
4263 		}
4264 		qdf_mem_free(htt_soc);
4265 		return NULL;
4266 	}
4267 
4268 	htt_soc->dp_soc = soc;
4269 	htt_soc->htc_soc = htc_handle;
4270 	HTT_TX_MUTEX_INIT(&htt_soc->htt_tx_mutex);
4271 
4272 	return htt_soc;
4273 }
4274 
4275 #if defined(WDI_EVENT_ENABLE) && !defined(REMOVE_PKT_LOG)
4276 /*
4277  * dp_ppdu_stats_ind_handler() - PPDU stats msg handler
4278  * @htt_soc:	 HTT SOC handle
4279  * @msg_word:    Pointer to payload
4280  * @htt_t2h_msg: HTT msg nbuf
4281  *
4282  * Return: True if buffer should be freed by caller.
4283  */
4284 static bool
4285 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
4286 				uint32_t *msg_word,
4287 				qdf_nbuf_t htt_t2h_msg)
4288 {
4289 	u_int8_t pdev_id;
4290 	u_int8_t target_pdev_id;
4291 	bool free_buf;
4292 
4293 	target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
4294 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
4295 							 target_pdev_id);
4296 	dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc,
4297 			     htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL,
4298 			     pdev_id);
4299 	free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id,
4300 					      htt_t2h_msg);
4301 	return free_buf;
4302 }
4303 #else
4304 static bool
4305 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
4306 				uint32_t *msg_word,
4307 				qdf_nbuf_t htt_t2h_msg)
4308 {
4309 	return true;
4310 }
4311 #endif
4312 
4313 #if defined(WDI_EVENT_ENABLE) && \
4314 	!defined(REMOVE_PKT_LOG)
4315 /*
4316  * dp_pktlog_msg_handler() - Pktlog msg handler
4317  * @htt_soc:	 HTT SOC handle
4318  * @msg_word:    Pointer to payload
4319  *
4320  * Return: None
4321  */
4322 static void
4323 dp_pktlog_msg_handler(struct htt_soc *soc,
4324 		      uint32_t *msg_word)
4325 {
4326 	uint8_t pdev_id;
4327 	uint8_t target_pdev_id;
4328 	uint32_t *pl_hdr;
4329 
4330 	target_pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word);
4331 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
4332 							 target_pdev_id);
4333 	pl_hdr = (msg_word + 1);
4334 	dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc,
4335 		pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL,
4336 		pdev_id);
4337 }
4338 #else
4339 static void
4340 dp_pktlog_msg_handler(struct htt_soc *soc,
4341 		      uint32_t *msg_word)
4342 {
4343 }
4344 #endif
4345 
4346 /*
4347  * time_allow_print() - time allow print
4348  * @htt_ring_tt:	ringi_id array of timestamps
4349  * @ring_id:		ring_id (index)
4350  *
4351  * Return: 1 for successfully saving timestamp in array
4352  *	and 0 for timestamp falling within 2 seconds after last one
4353  */
4354 static bool time_allow_print(unsigned long *htt_ring_tt, u_int8_t ring_id)
4355 {
4356 	unsigned long tstamp;
4357 	unsigned long delta;
4358 
4359 	tstamp = qdf_get_system_timestamp();
4360 
4361 	if (!htt_ring_tt)
4362 		return 0; //unable to print backpressure messages
4363 
4364 	if (htt_ring_tt[ring_id] == -1) {
4365 		htt_ring_tt[ring_id] = tstamp;
4366 		return 1;
4367 	}
4368 	delta = tstamp - htt_ring_tt[ring_id];
4369 	if (delta >= 2000) {
4370 		htt_ring_tt[ring_id] = tstamp;
4371 		return 1;
4372 	}
4373 
4374 	return 0;
4375 }
4376 
4377 static void dp_htt_alert_print(enum htt_t2h_msg_type msg_type,
4378 			       u_int8_t pdev_id, u_int8_t ring_id,
4379 			       u_int16_t hp_idx, u_int16_t tp_idx,
4380 			       u_int32_t bkp_time, char *ring_stype)
4381 {
4382 	dp_alert("msg_type: %d pdev_id: %d ring_type: %s ",
4383 		 msg_type, pdev_id, ring_stype);
4384 	dp_alert("ring_id: %d hp_idx: %d tp_idx: %d bkpressure_time_ms: %d ",
4385 		 ring_id, hp_idx, tp_idx, bkp_time);
4386 }
4387 
4388 /*
4389  * dp_htt_bkp_event_alert() - htt backpressure event alert
4390  * @msg_word:	htt packet context
4391  * @htt_soc:	HTT SOC handle
4392  *
4393  * Return: after attempting to print stats
4394  */
4395 static void dp_htt_bkp_event_alert(u_int32_t *msg_word, struct htt_soc *soc)
4396 {
4397 	u_int8_t ring_type;
4398 	u_int8_t pdev_id;
4399 	uint8_t target_pdev_id;
4400 	u_int8_t ring_id;
4401 	u_int16_t hp_idx;
4402 	u_int16_t tp_idx;
4403 	u_int32_t bkp_time;
4404 	enum htt_t2h_msg_type msg_type;
4405 	struct dp_soc *dpsoc;
4406 	struct dp_pdev *pdev;
4407 	struct dp_htt_timestamp *radio_tt;
4408 
4409 	if (!soc)
4410 		return;
4411 
4412 	dpsoc = (struct dp_soc *)soc->dp_soc;
4413 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
4414 	ring_type = HTT_T2H_RX_BKPRESSURE_RING_TYPE_GET(*msg_word);
4415 	target_pdev_id = HTT_T2H_RX_BKPRESSURE_PDEV_ID_GET(*msg_word);
4416 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
4417 							 target_pdev_id);
4418 	if (pdev_id >= MAX_PDEV_CNT) {
4419 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
4420 			  "pdev id %d is invalid", pdev_id);
4421 		return;
4422 	}
4423 
4424 	pdev = (struct dp_pdev *)dpsoc->pdev_list[pdev_id];
4425 	ring_id = HTT_T2H_RX_BKPRESSURE_RINGID_GET(*msg_word);
4426 	hp_idx = HTT_T2H_RX_BKPRESSURE_HEAD_IDX_GET(*(msg_word + 1));
4427 	tp_idx = HTT_T2H_RX_BKPRESSURE_TAIL_IDX_GET(*(msg_word + 1));
4428 	bkp_time = HTT_T2H_RX_BKPRESSURE_TIME_MS_GET(*(msg_word + 2));
4429 	radio_tt = &soc->pdevid_tt[pdev_id];
4430 
4431 	switch (ring_type) {
4432 	case HTT_SW_RING_TYPE_UMAC:
4433 		if (!time_allow_print(radio_tt->umac_ttt, ring_id))
4434 			return;
4435 		dp_htt_alert_print(msg_type, pdev_id, ring_id, hp_idx, tp_idx,
4436 				   bkp_time, "HTT_SW_RING_TYPE_UMAC");
4437 	break;
4438 	case HTT_SW_RING_TYPE_LMAC:
4439 		if (!time_allow_print(radio_tt->lmac_ttt, ring_id))
4440 			return;
4441 		dp_htt_alert_print(msg_type, pdev_id, ring_id, hp_idx, tp_idx,
4442 				   bkp_time, "HTT_SW_RING_TYPE_LMAC");
4443 	break;
4444 	default:
4445 		dp_htt_alert_print(msg_type, pdev_id, ring_id, hp_idx, tp_idx,
4446 				   bkp_time, "UNKNOWN");
4447 	break;
4448 	}
4449 
4450 	dp_print_ring_stats(pdev);
4451 	dp_print_napi_stats(pdev->soc);
4452 }
4453 
4454 /*
4455  * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler
4456  * @context:	Opaque context (HTT SOC handle)
4457  * @pkt:	HTC packet
4458  */
4459 static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
4460 {
4461 	struct htt_soc *soc = (struct htt_soc *) context;
4462 	qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
4463 	u_int32_t *msg_word;
4464 	enum htt_t2h_msg_type msg_type;
4465 	bool free_buf = true;
4466 
4467 	/* check for successful message reception */
4468 	if (pkt->Status != QDF_STATUS_SUCCESS) {
4469 		if (pkt->Status != QDF_STATUS_E_CANCELED)
4470 			soc->stats.htc_err_cnt++;
4471 
4472 		qdf_nbuf_free(htt_t2h_msg);
4473 		return;
4474 	}
4475 
4476 	/* TODO: Check if we should pop the HTC/HTT header alignment padding */
4477 
4478 	msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
4479 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
4480 	htt_event_record(soc->htt_logger_handle,
4481 			 msg_type, (uint8_t *)msg_word);
4482 	switch (msg_type) {
4483 	case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
4484 	{
4485 		dp_htt_bkp_event_alert(msg_word, soc);
4486 		break;
4487 	}
4488 	case HTT_T2H_MSG_TYPE_PEER_MAP:
4489 		{
4490 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
4491 			u_int8_t *peer_mac_addr;
4492 			u_int16_t peer_id;
4493 			u_int16_t hw_peer_id;
4494 			u_int8_t vdev_id;
4495 			u_int8_t is_wds;
4496 			struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
4497 
4498 			peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
4499 			hw_peer_id =
4500 				HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2));
4501 			vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
4502 			peer_mac_addr = htt_t2h_mac_addr_deswizzle(
4503 				(u_int8_t *) (msg_word+1),
4504 				&mac_addr_deswizzle_buf[0]);
4505 			QDF_TRACE(QDF_MODULE_ID_TXRX,
4506 				QDF_TRACE_LEVEL_INFO,
4507 				"HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
4508 				peer_id, vdev_id);
4509 
4510 			/*
4511 			 * check if peer already exists for this peer_id, if so
4512 			 * this peer map event is in response for a wds peer add
4513 			 * wmi command sent during wds source port learning.
4514 			 * in this case just add the ast entry to the existing
4515 			 * peer ast_list.
4516 			 */
4517 			is_wds = !!(dpsoc->peer_id_to_obj_map[peer_id]);
4518 			dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id,
4519 					       vdev_id, peer_mac_addr, 0,
4520 					       is_wds);
4521 			break;
4522 		}
4523 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
4524 		{
4525 			u_int16_t peer_id;
4526 			u_int8_t vdev_id;
4527 			u_int8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0};
4528 			peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
4529 			vdev_id = HTT_RX_PEER_UNMAP_VDEV_ID_GET(*msg_word);
4530 
4531 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
4532 						 vdev_id, mac_addr, 0,
4533 						 DP_PEER_WDS_COUNT_INVALID);
4534 			break;
4535 		}
4536 	case HTT_T2H_MSG_TYPE_SEC_IND:
4537 		{
4538 			u_int16_t peer_id;
4539 			enum cdp_sec_type sec_type;
4540 			int is_unicast;
4541 
4542 			peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
4543 			sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
4544 			is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
4545 			/* point to the first part of the Michael key */
4546 			msg_word++;
4547 			dp_rx_sec_ind_handler(
4548 				soc->dp_soc, peer_id, sec_type, is_unicast,
4549 				msg_word, msg_word + 2);
4550 			break;
4551 		}
4552 
4553 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
4554 		{
4555 			free_buf = dp_ppdu_stats_ind_handler(soc, msg_word,
4556 							     htt_t2h_msg);
4557 			break;
4558 		}
4559 
4560 	case HTT_T2H_MSG_TYPE_PKTLOG:
4561 		{
4562 			dp_pktlog_msg_handler(soc, msg_word);
4563 			break;
4564 		}
4565 
4566 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
4567 		{
4568 			/*
4569 			 * HTC maintains runtime pm count for H2T messages that
4570 			 * have a response msg from FW. This count ensures that
4571 			 * in the case FW does not sent out the response or host
4572 			 * did not process this indication runtime_put happens
4573 			 * properly in the cleanup path.
4574 			 */
4575 			if (htc_dec_return_runtime_cnt(soc->htc_soc) >= 0)
4576 				htc_pm_runtime_put(soc->htc_soc);
4577 			else
4578 				soc->stats.htt_ver_req_put_skip++;
4579 			soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
4580 			soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
4581 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4582 				"target uses HTT version %d.%d; host uses %d.%d",
4583 				soc->tgt_ver.major, soc->tgt_ver.minor,
4584 				HTT_CURRENT_VERSION_MAJOR,
4585 				HTT_CURRENT_VERSION_MINOR);
4586 			if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
4587 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4588 					QDF_TRACE_LEVEL_WARN,
4589 					"*** Incompatible host/target HTT versions!");
4590 			}
4591 			/* abort if the target is incompatible with the host */
4592 			qdf_assert(soc->tgt_ver.major ==
4593 				HTT_CURRENT_VERSION_MAJOR);
4594 			if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
4595 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4596 					QDF_TRACE_LEVEL_INFO_LOW,
4597 					"*** Warning: host/target HTT versions"
4598 					" are different, though compatible!");
4599 			}
4600 			break;
4601 		}
4602 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
4603 		{
4604 			uint16_t peer_id;
4605 			uint8_t tid;
4606 			uint8_t win_sz;
4607 			uint16_t status;
4608 			struct dp_peer *peer;
4609 
4610 			/*
4611 			 * Update REO Queue Desc with new values
4612 			 */
4613 			peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
4614 			tid = HTT_RX_ADDBA_TID_GET(*msg_word);
4615 			win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
4616 			peer = dp_peer_get_ref_by_id(soc->dp_soc, peer_id,
4617 						     DP_MOD_ID_HTT);
4618 
4619 			/*
4620 			 * Window size needs to be incremented by 1
4621 			 * since fw needs to represent a value of 256
4622 			 * using just 8 bits
4623 			 */
4624 			if (peer) {
4625 				status = dp_addba_requestprocess_wifi3(
4626 					(struct cdp_soc_t *)soc->dp_soc,
4627 					peer->mac_addr.raw, peer->vdev->vdev_id,
4628 					0, tid, 0, win_sz + 1, 0xffff);
4629 
4630 				/*
4631 				 * If PEER_LOCK_REF_PROTECT enbled dec ref
4632 				 * which is inc by dp_peer_get_ref_by_id
4633 				 */
4634 				dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
4635 
4636 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4637 					QDF_TRACE_LEVEL_INFO,
4638 					FL("PeerID %d BAW %d TID %d stat %d"),
4639 					peer_id, win_sz, tid, status);
4640 
4641 			} else {
4642 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4643 					QDF_TRACE_LEVEL_ERROR,
4644 					FL("Peer not found peer id %d"),
4645 					peer_id);
4646 			}
4647 			break;
4648 		}
4649 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
4650 		{
4651 			dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg);
4652 			break;
4653 		}
4654 	case HTT_T2H_MSG_TYPE_PEER_MAP_V2:
4655 		{
4656 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
4657 			u_int8_t *peer_mac_addr;
4658 			u_int16_t peer_id;
4659 			u_int16_t hw_peer_id;
4660 			u_int8_t vdev_id;
4661 			bool is_wds;
4662 			u_int16_t ast_hash;
4663 			struct dp_ast_flow_override_info ast_flow_info;
4664 
4665 			qdf_mem_set(&ast_flow_info, 0,
4666 					    sizeof(struct dp_ast_flow_override_info));
4667 
4668 			peer_id = HTT_RX_PEER_MAP_V2_SW_PEER_ID_GET(*msg_word);
4669 			hw_peer_id =
4670 			HTT_RX_PEER_MAP_V2_HW_PEER_ID_GET(*(msg_word + 2));
4671 			vdev_id = HTT_RX_PEER_MAP_V2_VDEV_ID_GET(*msg_word);
4672 			peer_mac_addr =
4673 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
4674 						   &mac_addr_deswizzle_buf[0]);
4675 			is_wds =
4676 			HTT_RX_PEER_MAP_V2_NEXT_HOP_GET(*(msg_word + 3));
4677 			ast_hash =
4678 			HTT_RX_PEER_MAP_V2_AST_HASH_VALUE_GET(*(msg_word + 3));
4679 			/*
4680 			 * Update 4 ast_index per peer, ast valid mask
4681 			 * and TID flow valid mask.
4682 			 * AST valid mask is 3 bit field corresponds to
4683 			 * ast_index[3:1]. ast_index 0 is always valid.
4684 			 */
4685 			ast_flow_info.ast_valid_mask =
4686 			HTT_RX_PEER_MAP_V2_AST_VALID_MASK_GET(*(msg_word + 3));
4687 			ast_flow_info.ast_idx[0] = hw_peer_id;
4688 			ast_flow_info.ast_flow_mask[0] =
4689 			HTT_RX_PEER_MAP_V2_AST_0_FLOW_MASK_GET(*(msg_word + 4));
4690 			ast_flow_info.ast_idx[1] =
4691 			HTT_RX_PEER_MAP_V2_AST_INDEX_1_GET(*(msg_word + 4));
4692 			ast_flow_info.ast_flow_mask[1] =
4693 			HTT_RX_PEER_MAP_V2_AST_1_FLOW_MASK_GET(*(msg_word + 4));
4694 			ast_flow_info.ast_idx[2] =
4695 			HTT_RX_PEER_MAP_V2_AST_INDEX_2_GET(*(msg_word + 5));
4696 			ast_flow_info.ast_flow_mask[2] =
4697 			HTT_RX_PEER_MAP_V2_AST_2_FLOW_MASK_GET(*(msg_word + 4));
4698 			ast_flow_info.ast_idx[3] =
4699 			HTT_RX_PEER_MAP_V2_AST_INDEX_3_GET(*(msg_word + 6));
4700 			ast_flow_info.ast_flow_mask[3] =
4701 			HTT_RX_PEER_MAP_V2_AST_3_FLOW_MASK_GET(*(msg_word + 4));
4702 			/*
4703 			 * TID valid mask is applicable only
4704 			 * for HI and LOW priority flows.
4705 			 * tid_valid_mas is 8 bit field corresponds
4706 			 * to TID[7:0]
4707 			 */
4708 			ast_flow_info.tid_valid_low_pri_mask =
4709 			HTT_RX_PEER_MAP_V2_TID_VALID_LOW_PRI_GET(*(msg_word + 5));
4710 			ast_flow_info.tid_valid_hi_pri_mask =
4711 			HTT_RX_PEER_MAP_V2_TID_VALID_HI_PRI_GET(*(msg_word + 5));
4712 
4713 			QDF_TRACE(QDF_MODULE_ID_TXRX,
4714 				  QDF_TRACE_LEVEL_INFO,
4715 				  "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
4716 				  peer_id, vdev_id);
4717 
4718 			dp_rx_peer_map_handler(soc->dp_soc, peer_id,
4719 					       hw_peer_id, vdev_id,
4720 					       peer_mac_addr, ast_hash,
4721 					       is_wds);
4722 
4723 			/*
4724 			 * Update ast indexes for flow override support
4725 			 * Applicable only for non wds peers
4726 			 */
4727 			dp_peer_ast_index_flow_queue_map_create(
4728 					    soc->dp_soc, is_wds,
4729 					    peer_id, peer_mac_addr,
4730 					    &ast_flow_info);
4731 
4732 			break;
4733 		}
4734 	case HTT_T2H_MSG_TYPE_PEER_UNMAP_V2:
4735 		{
4736 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
4737 			u_int8_t *mac_addr;
4738 			u_int16_t peer_id;
4739 			u_int8_t vdev_id;
4740 			u_int8_t is_wds;
4741 			u_int32_t free_wds_count;
4742 
4743 			peer_id =
4744 			HTT_RX_PEER_UNMAP_V2_SW_PEER_ID_GET(*msg_word);
4745 			vdev_id = HTT_RX_PEER_UNMAP_V2_VDEV_ID_GET(*msg_word);
4746 			mac_addr =
4747 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
4748 						   &mac_addr_deswizzle_buf[0]);
4749 			is_wds =
4750 			HTT_RX_PEER_UNMAP_V2_NEXT_HOP_GET(*(msg_word + 2));
4751 			free_wds_count =
4752 			HTT_RX_PEER_UNMAP_V2_PEER_WDS_FREE_COUNT_GET(*(msg_word + 4));
4753 
4754 			QDF_TRACE(QDF_MODULE_ID_TXRX,
4755 				  QDF_TRACE_LEVEL_INFO,
4756 				  "HTT_T2H_MSG_TYPE_PEER_UNMAP msg for peer id %d vdev id %d n",
4757 				  peer_id, vdev_id);
4758 
4759 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
4760 						 vdev_id, mac_addr,
4761 						 is_wds, free_wds_count);
4762 			break;
4763 		}
4764 	case HTT_T2H_MSG_TYPE_RX_DELBA:
4765 		{
4766 			uint16_t peer_id;
4767 			uint8_t tid;
4768 			uint8_t win_sz;
4769 			QDF_STATUS status;
4770 
4771 			peer_id = HTT_RX_DELBA_PEER_ID_GET(*msg_word);
4772 			tid = HTT_RX_DELBA_TID_GET(*msg_word);
4773 			win_sz = HTT_RX_DELBA_WIN_SIZE_GET(*msg_word);
4774 
4775 			status = dp_rx_delba_ind_handler(
4776 				soc->dp_soc,
4777 				peer_id, tid, win_sz);
4778 
4779 			QDF_TRACE(QDF_MODULE_ID_TXRX,
4780 				  QDF_TRACE_LEVEL_INFO,
4781 				  FL("DELBA PeerID %d BAW %d TID %d stat %d"),
4782 				  peer_id, win_sz, tid, status);
4783 			break;
4784 		}
4785 	case HTT_T2H_MSG_TYPE_FSE_CMEM_BASE_SEND:
4786 		{
4787 			uint16_t num_entries;
4788 			uint32_t cmem_ba_lo;
4789 			uint32_t cmem_ba_hi;
4790 
4791 			num_entries = HTT_CMEM_BASE_SEND_NUM_ENTRIES_GET(*msg_word);
4792 			cmem_ba_lo = *(msg_word + 1);
4793 			cmem_ba_hi = *(msg_word + 2);
4794 
4795 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
4796 				  FL("CMEM FSE num_entries %u CMEM BA LO %x HI %x"),
4797 				  num_entries, cmem_ba_lo, cmem_ba_hi);
4798 
4799 			dp_rx_fst_update_cmem_params(soc->dp_soc, num_entries,
4800 						     cmem_ba_lo, cmem_ba_hi);
4801 		}
4802 	default:
4803 		break;
4804 	};
4805 
4806 	/* Free the indication buffer */
4807 	if (free_buf)
4808 		qdf_nbuf_free(htt_t2h_msg);
4809 }
4810 
4811 /*
4812  * dp_htt_h2t_full() - Send full handler (called from HTC)
4813  * @context:	Opaque context (HTT SOC handle)
4814  * @pkt:	HTC packet
4815  *
4816  * Return: enum htc_send_full_action
4817  */
4818 static enum htc_send_full_action
4819 dp_htt_h2t_full(void *context, HTC_PACKET *pkt)
4820 {
4821 	return HTC_SEND_FULL_KEEP;
4822 }
4823 
4824 /*
4825  * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages
4826  * @context:	Opaque context (HTT SOC handle)
4827  * @nbuf:	nbuf containing T2H message
4828  * @pipe_id:	HIF pipe ID
4829  *
4830  * Return: QDF_STATUS
4831  *
4832  * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which
4833  * will be used for packet log and other high-priority HTT messages. Proper
4834  * HTC connection to be added later once required FW changes are available
4835  */
4836 static QDF_STATUS
4837 dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id)
4838 {
4839 	QDF_STATUS rc = QDF_STATUS_SUCCESS;
4840 	HTC_PACKET htc_pkt;
4841 
4842 	qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE);
4843 	qdf_mem_zero(&htc_pkt, sizeof(htc_pkt));
4844 	htc_pkt.Status = QDF_STATUS_SUCCESS;
4845 	htc_pkt.pPktContext = (void *)nbuf;
4846 	dp_htt_t2h_msg_handler(context, &htc_pkt);
4847 
4848 	return rc;
4849 }
4850 
4851 /*
4852  * htt_htc_soc_attach() - Register SOC level HTT instance with HTC
4853  * @htt_soc:	HTT SOC handle
4854  *
4855  * Return: QDF_STATUS
4856  */
4857 static QDF_STATUS
4858 htt_htc_soc_attach(struct htt_soc *soc)
4859 {
4860 	struct htc_service_connect_req connect;
4861 	struct htc_service_connect_resp response;
4862 	QDF_STATUS status;
4863 	struct dp_soc *dpsoc = soc->dp_soc;
4864 
4865 	qdf_mem_zero(&connect, sizeof(connect));
4866 	qdf_mem_zero(&response, sizeof(response));
4867 
4868 	connect.pMetaData = NULL;
4869 	connect.MetaDataLength = 0;
4870 	connect.EpCallbacks.pContext = soc;
4871 	connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
4872 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
4873 	connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler;
4874 
4875 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
4876 	connect.EpCallbacks.EpRecvRefill = NULL;
4877 
4878 	/* N/A, fill is done by HIF */
4879 	connect.EpCallbacks.RecvRefillWaterMark = 1;
4880 
4881 	connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
4882 	/*
4883 	 * Specify how deep to let a queue get before htc_send_pkt will
4884 	 * call the EpSendFull function due to excessive send queue depth.
4885 	 */
4886 	connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
4887 
4888 	/* disable flow control for HTT data message service */
4889 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
4890 
4891 	/* connect to control service */
4892 	connect.service_id = HTT_DATA_MSG_SVC;
4893 
4894 	status = htc_connect_service(soc->htc_soc, &connect, &response);
4895 
4896 	if (status != QDF_STATUS_SUCCESS)
4897 		return status;
4898 
4899 	soc->htc_endpoint = response.Endpoint;
4900 
4901 	hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint);
4902 
4903 	htt_interface_logging_init(&soc->htt_logger_handle);
4904 	dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc,
4905 		dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE);
4906 
4907 	return QDF_STATUS_SUCCESS; /* success */
4908 }
4909 
4910 /*
4911  * htt_soc_initialize() - SOC level HTT initialization
4912  * @htt_soc: Opaque htt SOC handle
4913  * @ctrl_psoc: Opaque ctrl SOC handle
4914  * @htc_soc: SOC level HTC handle
4915  * @hal_soc: Opaque HAL SOC handle
4916  * @osdev: QDF device
4917  *
4918  * Return: HTT handle on success; NULL on failure
4919  */
4920 void *
4921 htt_soc_initialize(struct htt_soc *htt_soc,
4922 		   struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
4923 		   HTC_HANDLE htc_soc,
4924 		   hal_soc_handle_t hal_soc_hdl, qdf_device_t osdev)
4925 {
4926 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
4927 
4928 	soc->osdev = osdev;
4929 	soc->ctrl_psoc = ctrl_psoc;
4930 	soc->htc_soc = htc_soc;
4931 	soc->hal_soc = hal_soc_hdl;
4932 
4933 	if (htt_htc_soc_attach(soc))
4934 		goto fail2;
4935 
4936 	return soc;
4937 
4938 fail2:
4939 	return NULL;
4940 }
4941 
4942 void htt_soc_htc_dealloc(struct htt_soc *htt_handle)
4943 {
4944 	htt_interface_logging_deinit(htt_handle->htt_logger_handle);
4945 	htt_htc_misc_pkt_pool_free(htt_handle);
4946 	htt_htc_pkt_pool_free(htt_handle);
4947 }
4948 
4949 /*
4950  * htt_soc_htc_prealloc() - HTC memory prealloc
4951  * @htt_soc: SOC level HTT handle
4952  *
4953  * Return: QDF_STATUS_SUCCESS on Success or
4954  * QDF_STATUS_E_NOMEM on allocation failure
4955  */
4956 QDF_STATUS htt_soc_htc_prealloc(struct htt_soc *soc)
4957 {
4958 	int i;
4959 
4960 	soc->htt_htc_pkt_freelist = NULL;
4961 	/* pre-allocate some HTC_PACKET objects */
4962 	for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
4963 		struct dp_htt_htc_pkt_union *pkt;
4964 		pkt = qdf_mem_malloc(sizeof(*pkt));
4965 		if (!pkt)
4966 			return QDF_STATUS_E_NOMEM;
4967 
4968 		htt_htc_pkt_free(soc, &pkt->u.pkt);
4969 	}
4970 	return QDF_STATUS_SUCCESS;
4971 }
4972 
4973 /*
4974  * htt_soc_detach() - Free SOC level HTT handle
4975  * @htt_hdl: HTT SOC handle
4976  */
4977 void htt_soc_detach(struct htt_soc *htt_hdl)
4978 {
4979 	int i;
4980 	struct htt_soc *htt_handle = (struct htt_soc *)htt_hdl;
4981 
4982 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4983 		qdf_mem_free(htt_handle->pdevid_tt[i].umac_ttt);
4984 		qdf_mem_free(htt_handle->pdevid_tt[i].lmac_ttt);
4985 	}
4986 
4987 	HTT_TX_MUTEX_DESTROY(&htt_handle->htt_tx_mutex);
4988 	qdf_mem_free(htt_handle);
4989 
4990 }
4991 
4992 /**
4993  * dp_h2t_ext_stats_msg_send(): function to contruct HTT message to pass to FW
4994  * @pdev: DP PDEV handle
4995  * @stats_type_upload_mask: stats type requested by user
4996  * @config_param_0: extra configuration parameters
4997  * @config_param_1: extra configuration parameters
4998  * @config_param_2: extra configuration parameters
4999  * @config_param_3: extra configuration parameters
5000  * @mac_id: mac number
5001  *
5002  * return: QDF STATUS
5003  */
5004 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
5005 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
5006 		uint32_t config_param_1, uint32_t config_param_2,
5007 		uint32_t config_param_3, int cookie_val, int cookie_msb,
5008 		uint8_t mac_id)
5009 {
5010 	struct htt_soc *soc = pdev->soc->htt_handle;
5011 	struct dp_htt_htc_pkt *pkt;
5012 	qdf_nbuf_t msg;
5013 	uint32_t *msg_word;
5014 	uint8_t pdev_mask = 0;
5015 	uint8_t *htt_logger_bufp;
5016 	int mac_for_pdev;
5017 	int target_pdev_id;
5018 	QDF_STATUS status;
5019 
5020 	msg = qdf_nbuf_alloc(
5021 			soc->osdev,
5022 			HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ),
5023 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
5024 
5025 	if (!msg)
5026 		return QDF_STATUS_E_NOMEM;
5027 
5028 	/*TODO:Add support for SOC stats
5029 	 * Bit 0: SOC Stats
5030 	 * Bit 1: Pdev stats for pdev id 0
5031 	 * Bit 2: Pdev stats for pdev id 1
5032 	 * Bit 3: Pdev stats for pdev id 2
5033 	 */
5034 	mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
5035 	target_pdev_id =
5036 	dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev);
5037 
5038 	pdev_mask = 1 << target_pdev_id;
5039 
5040 	/*
5041 	 * Set the length of the message.
5042 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
5043 	 * separately during the below call to qdf_nbuf_push_head.
5044 	 * The contribution from the HTC header is added separately inside HTC.
5045 	 */
5046 	if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) {
5047 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5048 				"Failed to expand head for HTT_EXT_STATS");
5049 		qdf_nbuf_free(msg);
5050 		return QDF_STATUS_E_FAILURE;
5051 	}
5052 
5053 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5054 		"-----%s:%d----\n cookie <-> %d\n config_param_0 %u\n"
5055 		"config_param_1 %u\n config_param_2 %u\n"
5056 		"config_param_4 %u\n -------------",
5057 		__func__, __LINE__, cookie_val, config_param_0,
5058 		config_param_1, config_param_2,	config_param_3);
5059 
5060 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
5061 
5062 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
5063 	htt_logger_bufp = (uint8_t *)msg_word;
5064 	*msg_word = 0;
5065 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ);
5066 	HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask);
5067 	HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask);
5068 
5069 	/* word 1 */
5070 	msg_word++;
5071 	*msg_word = 0;
5072 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0);
5073 
5074 	/* word 2 */
5075 	msg_word++;
5076 	*msg_word = 0;
5077 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1);
5078 
5079 	/* word 3 */
5080 	msg_word++;
5081 	*msg_word = 0;
5082 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2);
5083 
5084 	/* word 4 */
5085 	msg_word++;
5086 	*msg_word = 0;
5087 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3);
5088 
5089 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0);
5090 
5091 	/* word 5 */
5092 	msg_word++;
5093 
5094 	/* word 6 */
5095 	msg_word++;
5096 	*msg_word = 0;
5097 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val);
5098 
5099 	/* word 7 */
5100 	msg_word++;
5101 	*msg_word = 0;
5102 	/* Currently Using last 2 bits for pdev_id
5103 	 * For future reference, reserving 3 bits in cookie_msb for pdev_id
5104 	 */
5105 	cookie_msb = (cookie_msb | pdev->pdev_id);
5106 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb);
5107 
5108 	pkt = htt_htc_pkt_alloc(soc);
5109 	if (!pkt) {
5110 		qdf_nbuf_free(msg);
5111 		return QDF_STATUS_E_NOMEM;
5112 	}
5113 
5114 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5115 
5116 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
5117 			dp_htt_h2t_send_complete_free_netbuf,
5118 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
5119 			soc->htc_endpoint,
5120 			/* tag for FW response msg not guaranteed */
5121 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
5122 
5123 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5124 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_EXT_STATS_REQ,
5125 				     htt_logger_bufp);
5126 
5127 	if (status != QDF_STATUS_SUCCESS) {
5128 		qdf_nbuf_free(msg);
5129 		htt_htc_pkt_free(soc, pkt);
5130 	}
5131 
5132 	return status;
5133 }
5134 
5135 /**
5136  * dp_h2t_3tuple_config_send(): function to contruct 3 tuple configuration
5137  * HTT message to pass to FW
5138  * @pdev: DP PDEV handle
5139  * @tuple_mask: tuple configuration to report 3 tuple hash value in either
5140  * toeplitz_2_or_4 or flow_id_toeplitz in MSDU START TLV.
5141  *
5142  * tuple_mask[1:0]:
5143  *   00 - Do not report 3 tuple hash value
5144  *   10 - Report 3 tuple hash value in toeplitz_2_or_4
5145  *   01 - Report 3 tuple hash value in flow_id_toeplitz
5146  *   11 - Report 3 tuple hash value in both toeplitz_2_or_4 & flow_id_toeplitz
5147  *
5148  * return: QDF STATUS
5149  */
5150 QDF_STATUS dp_h2t_3tuple_config_send(struct dp_pdev *pdev,
5151 				     uint32_t tuple_mask, uint8_t mac_id)
5152 {
5153 	struct htt_soc *soc = pdev->soc->htt_handle;
5154 	struct dp_htt_htc_pkt *pkt;
5155 	qdf_nbuf_t msg;
5156 	uint32_t *msg_word;
5157 	uint8_t *htt_logger_bufp;
5158 	int mac_for_pdev;
5159 	int target_pdev_id;
5160 
5161 	msg = qdf_nbuf_alloc(
5162 			soc->osdev,
5163 			HTT_MSG_BUF_SIZE(HTT_3_TUPLE_HASH_CFG_REQ_BYTES),
5164 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
5165 
5166 	if (!msg)
5167 		return QDF_STATUS_E_NOMEM;
5168 
5169 	mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
5170 	target_pdev_id =
5171 	dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev);
5172 
5173 	/*
5174 	 * Set the length of the message.
5175 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
5176 	 * separately during the below call to qdf_nbuf_push_head.
5177 	 * The contribution from the HTC header is added separately inside HTC.
5178 	 */
5179 	if (!qdf_nbuf_put_tail(msg, HTT_3_TUPLE_HASH_CFG_REQ_BYTES)) {
5180 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5181 			  "Failed to expand head for HTT_3TUPLE_CONFIG");
5182 		qdf_nbuf_free(msg);
5183 		return QDF_STATUS_E_FAILURE;
5184 	}
5185 
5186 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5187 		  "config_param_sent %s:%d 0x%x for target_pdev %d\n -------------",
5188 		  __func__, __LINE__, tuple_mask, target_pdev_id);
5189 
5190 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
5191 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
5192 	htt_logger_bufp = (uint8_t *)msg_word;
5193 
5194 	*msg_word = 0;
5195 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG);
5196 	HTT_RX_3_TUPLE_HASH_PDEV_ID_SET(*msg_word, target_pdev_id);
5197 
5198 	msg_word++;
5199 	*msg_word = 0;
5200 	HTT_H2T_FLOW_ID_TOEPLITZ_FIELD_CONFIG_SET(*msg_word, tuple_mask);
5201 	HTT_H2T_TOEPLITZ_2_OR_4_FIELD_CONFIG_SET(*msg_word, tuple_mask);
5202 
5203 	pkt = htt_htc_pkt_alloc(soc);
5204 	if (!pkt) {
5205 		qdf_nbuf_free(msg);
5206 		return QDF_STATUS_E_NOMEM;
5207 	}
5208 
5209 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5210 
5211 	SET_HTC_PACKET_INFO_TX(
5212 			&pkt->htc_pkt,
5213 			dp_htt_h2t_send_complete_free_netbuf,
5214 			qdf_nbuf_data(msg),
5215 			qdf_nbuf_len(msg),
5216 			soc->htc_endpoint,
5217 			/* tag for no FW response msg */
5218 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
5219 
5220 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5221 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG,
5222 			    htt_logger_bufp);
5223 
5224 	return QDF_STATUS_SUCCESS;
5225 }
5226 
5227 /* This macro will revert once proper HTT header will define for
5228  * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file
5229  * */
5230 #if defined(WDI_EVENT_ENABLE)
5231 /**
5232  * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW
5233  * @pdev: DP PDEV handle
5234  * @stats_type_upload_mask: stats type requested by user
5235  * @mac_id: Mac id number
5236  *
5237  * return: QDF STATUS
5238  */
5239 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
5240 		uint32_t stats_type_upload_mask, uint8_t mac_id)
5241 {
5242 	struct htt_soc *soc = pdev->soc->htt_handle;
5243 	struct dp_htt_htc_pkt *pkt;
5244 	qdf_nbuf_t msg;
5245 	uint32_t *msg_word;
5246 	uint8_t pdev_mask;
5247 	QDF_STATUS status;
5248 
5249 	msg = qdf_nbuf_alloc(
5250 			soc->osdev,
5251 			HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ),
5252 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
5253 
5254 	if (!msg) {
5255 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5256 		"Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer");
5257 		qdf_assert(0);
5258 		return QDF_STATUS_E_NOMEM;
5259 	}
5260 
5261 	/*TODO:Add support for SOC stats
5262 	 * Bit 0: SOC Stats
5263 	 * Bit 1: Pdev stats for pdev id 0
5264 	 * Bit 2: Pdev stats for pdev id 1
5265 	 * Bit 3: Pdev stats for pdev id 2
5266 	 */
5267 	pdev_mask = 1 << dp_get_target_pdev_id_for_host_pdev_id(pdev->soc,
5268 								mac_id);
5269 
5270 	/*
5271 	 * Set the length of the message.
5272 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
5273 	 * separately during the below call to qdf_nbuf_push_head.
5274 	 * The contribution from the HTC header is added separately inside HTC.
5275 	 */
5276 	if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) {
5277 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5278 				"Failed to expand head for HTT_CFG_STATS");
5279 		qdf_nbuf_free(msg);
5280 		return QDF_STATUS_E_FAILURE;
5281 	}
5282 
5283 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
5284 
5285 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
5286 	*msg_word = 0;
5287 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
5288 	HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask);
5289 	HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word,
5290 			stats_type_upload_mask);
5291 
5292 	pkt = htt_htc_pkt_alloc(soc);
5293 	if (!pkt) {
5294 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5295 				"Fail to allocate dp_htt_htc_pkt buffer");
5296 		qdf_assert(0);
5297 		qdf_nbuf_free(msg);
5298 		return QDF_STATUS_E_NOMEM;
5299 	}
5300 
5301 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5302 
5303 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
5304 			dp_htt_h2t_send_complete_free_netbuf,
5305 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
5306 			soc->htc_endpoint,
5307 			/* tag for no FW response msg */
5308 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
5309 
5310 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5311 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG,
5312 				     (uint8_t *)msg_word);
5313 
5314 	if (status != QDF_STATUS_SUCCESS) {
5315 		qdf_nbuf_free(msg);
5316 		htt_htc_pkt_free(soc, pkt);
5317 	}
5318 
5319 	return status;
5320 }
5321 #endif
5322 
5323 void
5324 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
5325 			     uint32_t *tag_buf)
5326 {
5327 	struct dp_peer *peer = NULL;
5328 	switch (tag_type) {
5329 	case HTT_STATS_PEER_DETAILS_TAG:
5330 	{
5331 		htt_peer_details_tlv *dp_stats_buf =
5332 			(htt_peer_details_tlv *)tag_buf;
5333 
5334 		pdev->fw_stats_peer_id = dp_stats_buf->sw_peer_id;
5335 	}
5336 	break;
5337 	case HTT_STATS_PEER_STATS_CMN_TAG:
5338 	{
5339 		htt_peer_stats_cmn_tlv *dp_stats_buf =
5340 			(htt_peer_stats_cmn_tlv *)tag_buf;
5341 
5342 		peer = dp_peer_get_ref_by_id(pdev->soc, pdev->fw_stats_peer_id,
5343 					     DP_MOD_ID_HTT);
5344 
5345 		if (peer && !peer->bss_peer) {
5346 			peer->stats.tx.inactive_time =
5347 				dp_stats_buf->inactive_time;
5348 			qdf_event_set(&pdev->fw_peer_stats_event);
5349 		}
5350 		if (peer)
5351 			dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
5352 	}
5353 	break;
5354 	default:
5355 		qdf_err("Invalid tag_type");
5356 	}
5357 }
5358 
5359 /**
5360  * dp_htt_rx_flow_fst_setup(): Send HTT Rx FST setup message to FW
5361  * @pdev: DP pdev handle
5362  * @fse_setup_info: FST setup parameters
5363  *
5364  * Return: Success when HTT message is sent, error on failure
5365  */
5366 QDF_STATUS
5367 dp_htt_rx_flow_fst_setup(struct dp_pdev *pdev,
5368 			 struct dp_htt_rx_flow_fst_setup *fse_setup_info)
5369 {
5370 	struct htt_soc *soc = pdev->soc->htt_handle;
5371 	struct dp_htt_htc_pkt *pkt;
5372 	qdf_nbuf_t msg;
5373 	u_int32_t *msg_word;
5374 	struct htt_h2t_msg_rx_fse_setup_t *fse_setup;
5375 	uint8_t *htt_logger_bufp;
5376 	u_int32_t *key;
5377 	QDF_STATUS status;
5378 
5379 	msg = qdf_nbuf_alloc(
5380 		soc->osdev,
5381 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_setup_t)),
5382 		/* reserve room for the HTC header */
5383 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
5384 
5385 	if (!msg)
5386 		return QDF_STATUS_E_NOMEM;
5387 
5388 	/*
5389 	 * Set the length of the message.
5390 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
5391 	 * separately during the below call to qdf_nbuf_push_head.
5392 	 * The contribution from the HTC header is added separately inside HTC.
5393 	 */
5394 	if (!qdf_nbuf_put_tail(msg,
5395 			       sizeof(struct htt_h2t_msg_rx_fse_setup_t))) {
5396 		qdf_err("Failed to expand head for HTT RX_FSE_SETUP msg");
5397 		return QDF_STATUS_E_FAILURE;
5398 	}
5399 
5400 	/* fill in the message contents */
5401 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
5402 
5403 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_setup_t));
5404 	/* rewind beyond alignment pad to get to the HTC header reserved area */
5405 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
5406 	htt_logger_bufp = (uint8_t *)msg_word;
5407 
5408 	*msg_word = 0;
5409 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG);
5410 
5411 	fse_setup = (struct htt_h2t_msg_rx_fse_setup_t *)msg_word;
5412 
5413 	HTT_RX_FSE_SETUP_PDEV_ID_SET(*msg_word, fse_setup_info->pdev_id);
5414 
5415 	msg_word++;
5416 	HTT_RX_FSE_SETUP_NUM_REC_SET(*msg_word, fse_setup_info->max_entries);
5417 	HTT_RX_FSE_SETUP_MAX_SEARCH_SET(*msg_word, fse_setup_info->max_search);
5418 	HTT_RX_FSE_SETUP_IP_DA_SA_PREFIX_SET(*msg_word,
5419 					     fse_setup_info->ip_da_sa_prefix);
5420 
5421 	msg_word++;
5422 	HTT_RX_FSE_SETUP_BASE_ADDR_LO_SET(*msg_word,
5423 					  fse_setup_info->base_addr_lo);
5424 	msg_word++;
5425 	HTT_RX_FSE_SETUP_BASE_ADDR_HI_SET(*msg_word,
5426 					  fse_setup_info->base_addr_hi);
5427 
5428 	key = (u_int32_t *)fse_setup_info->hash_key;
5429 	fse_setup->toeplitz31_0 = *key++;
5430 	fse_setup->toeplitz63_32 = *key++;
5431 	fse_setup->toeplitz95_64 = *key++;
5432 	fse_setup->toeplitz127_96 = *key++;
5433 	fse_setup->toeplitz159_128 = *key++;
5434 	fse_setup->toeplitz191_160 = *key++;
5435 	fse_setup->toeplitz223_192 = *key++;
5436 	fse_setup->toeplitz255_224 = *key++;
5437 	fse_setup->toeplitz287_256 = *key++;
5438 	fse_setup->toeplitz314_288 = *key;
5439 
5440 	msg_word++;
5441 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz31_0);
5442 	msg_word++;
5443 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz63_32);
5444 	msg_word++;
5445 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz95_64);
5446 	msg_word++;
5447 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz127_96);
5448 	msg_word++;
5449 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz159_128);
5450 	msg_word++;
5451 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz191_160);
5452 	msg_word++;
5453 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz223_192);
5454 	msg_word++;
5455 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz255_224);
5456 	msg_word++;
5457 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz287_256);
5458 	msg_word++;
5459 	HTT_RX_FSE_SETUP_HASH_314_288_SET(*msg_word,
5460 					  fse_setup->toeplitz314_288);
5461 
5462 	pkt = htt_htc_pkt_alloc(soc);
5463 	if (!pkt) {
5464 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
5465 		qdf_assert(0);
5466 		qdf_nbuf_free(msg);
5467 		return QDF_STATUS_E_RESOURCES; /* failure */
5468 	}
5469 
5470 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5471 
5472 	SET_HTC_PACKET_INFO_TX(
5473 		&pkt->htc_pkt,
5474 		dp_htt_h2t_send_complete_free_netbuf,
5475 		qdf_nbuf_data(msg),
5476 		qdf_nbuf_len(msg),
5477 		soc->htc_endpoint,
5478 		/* tag for no FW response msg */
5479 		HTC_TX_PACKET_TAG_RUNTIME_PUT);
5480 
5481 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5482 
5483 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
5484 				     HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG,
5485 				     htt_logger_bufp);
5486 
5487 	if (status == QDF_STATUS_SUCCESS) {
5488 		dp_info("HTT_H2T RX_FSE_SETUP sent to FW for pdev = %u",
5489 			fse_setup_info->pdev_id);
5490 		QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_DEBUG,
5491 				   (void *)fse_setup_info->hash_key,
5492 				   fse_setup_info->hash_key_len);
5493 	} else {
5494 		qdf_nbuf_free(msg);
5495 		htt_htc_pkt_free(soc, pkt);
5496 	}
5497 
5498 	return status;
5499 }
5500 
5501 /**
5502  * dp_htt_rx_flow_fse_operation(): Send HTT Flow Search Entry msg to
5503  * add/del a flow in HW
5504  * @pdev: DP pdev handle
5505  * @fse_op_info: Flow entry parameters
5506  *
5507  * Return: Success when HTT message is sent, error on failure
5508  */
5509 QDF_STATUS
5510 dp_htt_rx_flow_fse_operation(struct dp_pdev *pdev,
5511 			     struct dp_htt_rx_flow_fst_operation *fse_op_info)
5512 {
5513 	struct htt_soc *soc = pdev->soc->htt_handle;
5514 	struct dp_htt_htc_pkt *pkt;
5515 	qdf_nbuf_t msg;
5516 	u_int32_t *msg_word;
5517 	struct htt_h2t_msg_rx_fse_operation_t *fse_operation;
5518 	uint8_t *htt_logger_bufp;
5519 	QDF_STATUS status;
5520 
5521 	msg = qdf_nbuf_alloc(
5522 		soc->osdev,
5523 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_operation_t)),
5524 		/* reserve room for the HTC header */
5525 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
5526 	if (!msg)
5527 		return QDF_STATUS_E_NOMEM;
5528 
5529 	/*
5530 	 * Set the length of the message.
5531 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
5532 	 * separately during the below call to qdf_nbuf_push_head.
5533 	 * The contribution from the HTC header is added separately inside HTC.
5534 	 */
5535 	if (!qdf_nbuf_put_tail(msg,
5536 			       sizeof(struct htt_h2t_msg_rx_fse_operation_t))) {
5537 		qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
5538 		qdf_nbuf_free(msg);
5539 		return QDF_STATUS_E_FAILURE;
5540 	}
5541 
5542 	/* fill in the message contents */
5543 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
5544 
5545 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_operation_t));
5546 	/* rewind beyond alignment pad to get to the HTC header reserved area */
5547 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
5548 	htt_logger_bufp = (uint8_t *)msg_word;
5549 
5550 	*msg_word = 0;
5551 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG);
5552 
5553 	fse_operation = (struct htt_h2t_msg_rx_fse_operation_t *)msg_word;
5554 
5555 	HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, fse_op_info->pdev_id);
5556 	msg_word++;
5557 	HTT_RX_FSE_IPSEC_VALID_SET(*msg_word, false);
5558 	if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_ENTRY) {
5559 		HTT_RX_FSE_OPERATION_SET(*msg_word,
5560 					 HTT_RX_FSE_CACHE_INVALIDATE_ENTRY);
5561 		msg_word++;
5562 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5563 		*msg_word,
5564 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_31_0));
5565 		msg_word++;
5566 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5567 		*msg_word,
5568 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_63_32));
5569 		msg_word++;
5570 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5571 		*msg_word,
5572 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_95_64));
5573 		msg_word++;
5574 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5575 		*msg_word,
5576 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_127_96));
5577 		msg_word++;
5578 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5579 		*msg_word,
5580 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_31_0));
5581 		msg_word++;
5582 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5583 		*msg_word,
5584 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_63_32));
5585 		msg_word++;
5586 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5587 		*msg_word,
5588 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_95_64));
5589 		msg_word++;
5590 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5591 		*msg_word,
5592 		qdf_htonl(
5593 		fse_op_info->rx_flow->flow_tuple_info.dest_ip_127_96));
5594 		msg_word++;
5595 		HTT_RX_FSE_SOURCEPORT_SET(
5596 			*msg_word,
5597 			fse_op_info->rx_flow->flow_tuple_info.src_port);
5598 		HTT_RX_FSE_DESTPORT_SET(
5599 			*msg_word,
5600 			fse_op_info->rx_flow->flow_tuple_info.dest_port);
5601 		msg_word++;
5602 		HTT_RX_FSE_L4_PROTO_SET(
5603 			*msg_word,
5604 			fse_op_info->rx_flow->flow_tuple_info.l4_protocol);
5605 	} else if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_FULL) {
5606 		HTT_RX_FSE_OPERATION_SET(*msg_word,
5607 					 HTT_RX_FSE_CACHE_INVALIDATE_FULL);
5608 	} else if (fse_op_info->op_code == DP_HTT_FST_DISABLE) {
5609 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_DISABLE);
5610 	} else if (fse_op_info->op_code == DP_HTT_FST_ENABLE) {
5611 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_ENABLE);
5612 	}
5613 
5614 	pkt = htt_htc_pkt_alloc(soc);
5615 	if (!pkt) {
5616 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
5617 		qdf_assert(0);
5618 		qdf_nbuf_free(msg);
5619 		return QDF_STATUS_E_RESOURCES; /* failure */
5620 	}
5621 
5622 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5623 
5624 	SET_HTC_PACKET_INFO_TX(
5625 		&pkt->htc_pkt,
5626 		dp_htt_h2t_send_complete_free_netbuf,
5627 		qdf_nbuf_data(msg),
5628 		qdf_nbuf_len(msg),
5629 		soc->htc_endpoint,
5630 		/* tag for no FW response msg */
5631 		HTC_TX_PACKET_TAG_RUNTIME_PUT);
5632 
5633 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5634 
5635 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
5636 				     HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG,
5637 				     htt_logger_bufp);
5638 
5639 	if (status == QDF_STATUS_SUCCESS) {
5640 		dp_info("HTT_H2T RX_FSE_OPERATION_CFG sent to FW for pdev = %u",
5641 			fse_op_info->pdev_id);
5642 	} else {
5643 		qdf_nbuf_free(msg);
5644 		htt_htc_pkt_free(soc, pkt);
5645 	}
5646 
5647 	return status;
5648 }
5649 
5650 /**
5651  * dp_htt_rx_fisa_config(): Send HTT msg to configure FISA
5652  * @pdev: DP pdev handle
5653  * @fse_op_info: Flow entry parameters
5654  *
5655  * Return: Success when HTT message is sent, error on failure
5656  */
5657 QDF_STATUS
5658 dp_htt_rx_fisa_config(struct dp_pdev *pdev,
5659 		      struct dp_htt_rx_fisa_cfg *fisa_config)
5660 {
5661 	struct htt_soc *soc = pdev->soc->htt_handle;
5662 	struct dp_htt_htc_pkt *pkt;
5663 	qdf_nbuf_t msg;
5664 	u_int32_t *msg_word;
5665 	struct htt_h2t_msg_type_fisa_config_t *htt_fisa_config;
5666 	uint8_t *htt_logger_bufp;
5667 	uint32_t len;
5668 	QDF_STATUS status;
5669 
5670 	len = HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_type_fisa_config_t));
5671 
5672 	msg = qdf_nbuf_alloc(soc->osdev,
5673 			     len,
5674 			     /* reserve room for the HTC header */
5675 			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
5676 			     4,
5677 			     TRUE);
5678 	if (!msg)
5679 		return QDF_STATUS_E_NOMEM;
5680 
5681 	/*
5682 	 * Set the length of the message.
5683 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
5684 	 * separately during the below call to qdf_nbuf_push_head.
5685 	 * The contribution from the HTC header is added separately inside HTC.
5686 	 */
5687 	if (!qdf_nbuf_put_tail(msg,
5688 			       sizeof(struct htt_h2t_msg_type_fisa_config_t))) {
5689 		qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
5690 		qdf_nbuf_free(msg);
5691 		return QDF_STATUS_E_FAILURE;
5692 	}
5693 
5694 	/* fill in the message contents */
5695 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
5696 
5697 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_type_fisa_config_t));
5698 	/* rewind beyond alignment pad to get to the HTC header reserved area */
5699 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
5700 	htt_logger_bufp = (uint8_t *)msg_word;
5701 
5702 	*msg_word = 0;
5703 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FISA_CFG);
5704 
5705 	htt_fisa_config = (struct htt_h2t_msg_type_fisa_config_t *)msg_word;
5706 
5707 	HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, htt_fisa_config->pdev_id);
5708 
5709 	msg_word++;
5710 	HTT_RX_FISA_CONFIG_FISA_V2_ENABLE_SET(*msg_word, 1);
5711 	HTT_RX_FISA_CONFIG_FISA_V2_AGGR_LIMIT_SET(*msg_word, 0xf);
5712 
5713 	msg_word++;
5714 	htt_fisa_config->fisa_timeout_threshold = fisa_config->fisa_timeout;
5715 
5716 	pkt = htt_htc_pkt_alloc(soc);
5717 	if (!pkt) {
5718 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
5719 		qdf_assert(0);
5720 		qdf_nbuf_free(msg);
5721 		return QDF_STATUS_E_RESOURCES; /* failure */
5722 	}
5723 
5724 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5725 
5726 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
5727 			       dp_htt_h2t_send_complete_free_netbuf,
5728 			       qdf_nbuf_data(msg),
5729 			       qdf_nbuf_len(msg),
5730 			       soc->htc_endpoint,
5731 			       /* tag for no FW response msg */
5732 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
5733 
5734 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5735 
5736 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FISA_CFG,
5737 				     htt_logger_bufp);
5738 
5739 	if (status == QDF_STATUS_SUCCESS) {
5740 		dp_info("HTT_H2T_MSG_TYPE_RX_FISA_CFG sent to FW for pdev = %u",
5741 			fisa_config->pdev_id);
5742 	} else {
5743 		qdf_nbuf_free(msg);
5744 		htt_htc_pkt_free(soc, pkt);
5745 	}
5746 
5747 	return status;
5748 }
5749