xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c (revision 878d42c770e8f4f39f616b20412de44faeced7b9)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <htt.h>
20 #include <hal_hw_headers.h>
21 #include <hal_api.h>
22 #include "dp_peer.h"
23 #include "dp_types.h"
24 #include "dp_internal.h"
25 #include "dp_rx.h"
26 #include "dp_rx_mon.h"
27 #include "htt_stats.h"
28 #include "htt_ppdu_stats.h"
29 #include "dp_htt.h"
30 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
31 #include "cdp_txrx_cmn_struct.h"
32 
33 #ifdef FEATURE_PERPKT_INFO
34 #include "dp_ratetable.h"
35 #endif
36 
37 #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
38 
39 #define HTT_HTC_PKT_POOL_INIT_SIZE 64
40 
41 #define HTT_MSG_BUF_SIZE(msg_bytes) \
42 	((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
43 
44 #define HTT_PID_BIT_MASK 0x3
45 
46 #define DP_EXT_MSG_LENGTH 2048
47 #define HTT_HEADER_LEN 16
48 #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16
49 
50 #define HTT_SHIFT_UPPER_TIMESTAMP 32
51 #define HTT_MASK_UPPER_TIMESTAMP 0xFFFFFFFF00000000
52 
53 /*
54  * dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap() - Get ppdu stats tlv
55  * bitmap for sniffer mode
56  * @bitmap: received bitmap
57  *
58  * Return: expected bitmap value, returns zero if doesn't match with
59  * either 64-bit Tx window or 256-bit window tlv bitmap
60  */
61 int
62 dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(uint32_t bitmap)
63 {
64 	if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64))
65 		return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64;
66 	else if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256))
67 		return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256;
68 
69 	return 0;
70 }
71 
72 #ifdef FEATURE_PERPKT_INFO
73 /*
74  * dp_peer_find_by_id_valid - check if peer exists for given id
75  * @soc: core DP soc context
76  * @peer_id: peer id from peer object can be retrieved
77  *
78  * Return: true if peer exists of false otherwise
79  */
80 
81 static
82 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id)
83 {
84 	struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id,
85 						     DP_MOD_ID_HTT);
86 
87 	if (peer) {
88 		/*
89 		 * Decrement the peer ref which is taken as part of
90 		 * dp_peer_get_ref_by_id if PEER_LOCK_REF_PROTECT is enabled
91 		 */
92 		dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
93 
94 		return true;
95 	}
96 
97 	return false;
98 }
99 
100 /*
101  * dp_peer_copy_delay_stats() - copy ppdu stats to peer delayed stats.
102  * @peer: Datapath peer handle
103  * @ppdu: User PPDU Descriptor
104  * @cur_ppdu_id: PPDU_ID
105  *
106  * Return: None
107  *
108  * on Tx data frame, we may get delayed ba set
109  * in htt_ppdu_stats_user_common_tlv. which mean we get Block Ack(BA) after we
110  * request Block Ack Request(BAR). Successful msdu is received only after Block
111  * Ack. To populate peer stats we need successful msdu(data frame).
112  * So we hold the Tx data stats on delayed_ba for stats update.
113  */
114 static void
115 dp_peer_copy_delay_stats(struct dp_peer *peer,
116 			 struct cdp_tx_completion_ppdu_user *ppdu,
117 			 uint32_t cur_ppdu_id)
118 {
119 	struct dp_pdev *pdev;
120 	struct dp_vdev *vdev;
121 
122 	if (peer->last_delayed_ba) {
123 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
124 			  "BA not yet recv for prev delayed ppdu[%d] - cur ppdu[%d]",
125 			  peer->last_delayed_ba_ppduid, cur_ppdu_id);
126 		vdev = peer->vdev;
127 		if (vdev) {
128 			pdev = vdev->pdev;
129 			pdev->stats.cdp_delayed_ba_not_recev++;
130 		}
131 	}
132 
133 	peer->delayed_ba_ppdu_stats.ltf_size = ppdu->ltf_size;
134 	peer->delayed_ba_ppdu_stats.stbc = ppdu->stbc;
135 	peer->delayed_ba_ppdu_stats.he_re = ppdu->he_re;
136 	peer->delayed_ba_ppdu_stats.txbf = ppdu->txbf;
137 	peer->delayed_ba_ppdu_stats.bw = ppdu->bw;
138 	peer->delayed_ba_ppdu_stats.nss = ppdu->nss;
139 	peer->delayed_ba_ppdu_stats.preamble = ppdu->preamble;
140 	peer->delayed_ba_ppdu_stats.gi = ppdu->gi;
141 	peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
142 	peer->delayed_ba_ppdu_stats.ldpc = ppdu->ldpc;
143 	peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
144 	peer->delayed_ba_ppdu_stats.mpdu_tried_ucast = ppdu->mpdu_tried_ucast;
145 	peer->delayed_ba_ppdu_stats.mpdu_tried_mcast = ppdu->mpdu_tried_mcast;
146 	peer->delayed_ba_ppdu_stats.frame_ctrl = ppdu->frame_ctrl;
147 	peer->delayed_ba_ppdu_stats.qos_ctrl = ppdu->qos_ctrl;
148 	peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
149 
150 	peer->delayed_ba_ppdu_stats.ru_start = ppdu->ru_start;
151 	peer->delayed_ba_ppdu_stats.ru_tones = ppdu->ru_tones;
152 	peer->delayed_ba_ppdu_stats.is_mcast = ppdu->is_mcast;
153 
154 	peer->delayed_ba_ppdu_stats.user_pos = ppdu->user_pos;
155 	peer->delayed_ba_ppdu_stats.mu_group_id = ppdu->mu_group_id;
156 	peer->delayed_ba_ppdu_stats.mcs = ppdu->mcs;
157 
158 	peer->last_delayed_ba = true;
159 
160 	ppdu->debug_copied = true;
161 }
162 
163 /*
164  * dp_peer_copy_stats_to_bar() - copy delayed stats to ppdu stats.
165  * @peer: Datapath peer handle
166  * @ppdu: PPDU Descriptor
167  *
168  * Return: None
169  *
170  * For Tx BAR, PPDU stats TLV include Block Ack info. PPDU info
171  * from Tx BAR frame not required to populate peer stats.
172  * But we need successful MPDU and MSDU to update previous
173  * transmitted Tx data frame. Overwrite ppdu stats with the previous
174  * stored ppdu stats.
175  */
176 static void
177 dp_peer_copy_stats_to_bar(struct dp_peer *peer,
178 			  struct cdp_tx_completion_ppdu_user *ppdu)
179 {
180 	ppdu->ltf_size = peer->delayed_ba_ppdu_stats.ltf_size;
181 	ppdu->stbc = peer->delayed_ba_ppdu_stats.stbc;
182 	ppdu->he_re = peer->delayed_ba_ppdu_stats.he_re;
183 	ppdu->txbf = peer->delayed_ba_ppdu_stats.txbf;
184 	ppdu->bw = peer->delayed_ba_ppdu_stats.bw;
185 	ppdu->nss = peer->delayed_ba_ppdu_stats.nss;
186 	ppdu->preamble = peer->delayed_ba_ppdu_stats.preamble;
187 	ppdu->gi = peer->delayed_ba_ppdu_stats.gi;
188 	ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm;
189 	ppdu->ldpc = peer->delayed_ba_ppdu_stats.ldpc;
190 	ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm;
191 	ppdu->mpdu_tried_ucast = peer->delayed_ba_ppdu_stats.mpdu_tried_ucast;
192 	ppdu->mpdu_tried_mcast = peer->delayed_ba_ppdu_stats.mpdu_tried_mcast;
193 	ppdu->frame_ctrl = peer->delayed_ba_ppdu_stats.frame_ctrl;
194 	ppdu->qos_ctrl = peer->delayed_ba_ppdu_stats.qos_ctrl;
195 	ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm;
196 
197 	ppdu->ru_start = peer->delayed_ba_ppdu_stats.ru_start;
198 	ppdu->ru_tones = peer->delayed_ba_ppdu_stats.ru_tones;
199 	ppdu->is_mcast = peer->delayed_ba_ppdu_stats.is_mcast;
200 
201 	ppdu->user_pos = peer->delayed_ba_ppdu_stats.user_pos;
202 	ppdu->mu_group_id = peer->delayed_ba_ppdu_stats.mu_group_id;
203 	ppdu->mcs = peer->delayed_ba_ppdu_stats.mcs;
204 
205 	peer->last_delayed_ba = false;
206 
207 	ppdu->debug_copied = true;
208 }
209 
210 /*
211  * dp_tx_rate_stats_update() - Update rate per-peer statistics
212  * @peer: Datapath peer handle
213  * @ppdu: PPDU Descriptor
214  *
215  * Return: None
216  */
217 static void
218 dp_tx_rate_stats_update(struct dp_peer *peer,
219 			struct cdp_tx_completion_ppdu_user *ppdu)
220 {
221 	uint32_t ratekbps = 0;
222 	uint64_t ppdu_tx_rate = 0;
223 	uint32_t rix;
224 	uint16_t ratecode = 0;
225 
226 	if (!peer || !ppdu)
227 		return;
228 
229 	if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK)
230 		return;
231 
232 	ratekbps = dp_getrateindex(ppdu->gi,
233 				   ppdu->mcs,
234 				   ppdu->nss,
235 				   ppdu->preamble,
236 				   ppdu->bw,
237 				   &rix,
238 				   &ratecode);
239 
240 	DP_STATS_UPD(peer, tx.last_tx_rate, ratekbps);
241 
242 	if (!ratekbps)
243 		return;
244 
245 	/* Calculate goodput in non-training period
246 	 * In training period, don't do anything as
247 	 * pending pkt is send as goodput.
248 	 */
249 	if ((!peer->bss_peer) && (!ppdu->sa_is_training)) {
250 		ppdu->sa_goodput = ((ratekbps / CDP_NUM_KB_IN_MB) *
251 				(CDP_PERCENT_MACRO - ppdu->current_rate_per));
252 	}
253 	ppdu->rix = rix;
254 	ppdu->tx_ratekbps = ratekbps;
255 	ppdu->tx_ratecode = ratecode;
256 	peer->stats.tx.avg_tx_rate =
257 		dp_ath_rate_lpf(peer->stats.tx.avg_tx_rate, ratekbps);
258 	ppdu_tx_rate = dp_ath_rate_out(peer->stats.tx.avg_tx_rate);
259 	DP_STATS_UPD(peer, tx.rnd_avg_tx_rate, ppdu_tx_rate);
260 
261 	if (peer->vdev) {
262 		/*
263 		 * In STA mode:
264 		 *	We get ucast stats as BSS peer stats.
265 		 *
266 		 * In AP mode:
267 		 *	We get mcast stats as BSS peer stats.
268 		 *	We get ucast stats as assoc peer stats.
269 		 */
270 		if (peer->vdev->opmode == wlan_op_mode_ap && peer->bss_peer) {
271 			peer->vdev->stats.tx.mcast_last_tx_rate = ratekbps;
272 			peer->vdev->stats.tx.mcast_last_tx_rate_mcs = ppdu->mcs;
273 		} else {
274 			peer->vdev->stats.tx.last_tx_rate = ratekbps;
275 			peer->vdev->stats.tx.last_tx_rate_mcs = ppdu->mcs;
276 		}
277 	}
278 }
279 
280 /*
281  * dp_tx_stats_update() - Update per-peer statistics
282  * @pdev: Datapath pdev handle
283  * @peer: Datapath peer handle
284  * @ppdu: PPDU Descriptor
285  * @ack_rssi: RSSI of last ack received
286  *
287  * Return: None
288  */
289 static void
290 dp_tx_stats_update(struct dp_pdev *pdev, struct dp_peer *peer,
291 		   struct cdp_tx_completion_ppdu_user *ppdu,
292 		   uint32_t ack_rssi)
293 {
294 	uint8_t preamble, mcs;
295 	uint16_t num_msdu;
296 	uint16_t num_mpdu;
297 	uint16_t mpdu_tried;
298 	uint16_t mpdu_failed;
299 
300 	preamble = ppdu->preamble;
301 	mcs = ppdu->mcs;
302 	num_msdu = ppdu->num_msdu;
303 	num_mpdu = ppdu->mpdu_success;
304 	mpdu_tried = ppdu->mpdu_tried_ucast + ppdu->mpdu_tried_mcast;
305 	mpdu_failed = mpdu_tried - num_mpdu;
306 
307 	/* If the peer statistics are already processed as part of
308 	 * per-MSDU completion handler, do not process these again in per-PPDU
309 	 * indications */
310 	if (pdev->soc->process_tx_status)
311 		return;
312 
313 	if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK) {
314 		/*
315 		 * All failed mpdu will be retried, so incrementing
316 		 * retries mpdu based on mpdu failed. Even for
317 		 * ack failure i.e for long retries we get
318 		 * mpdu failed equal mpdu tried.
319 		 */
320 		DP_STATS_INC(peer, tx.retries, mpdu_failed);
321 		DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus);
322 		return;
323 	}
324 
325 	if (ppdu->is_ppdu_cookie_valid)
326 		DP_STATS_INC(peer, tx.num_ppdu_cookie_valid, 1);
327 
328 	if (ppdu->mu_group_id <= MAX_MU_GROUP_ID &&
329 	    ppdu->ppdu_type != HTT_PPDU_STATS_PPDU_TYPE_SU) {
330 		if (unlikely(!(ppdu->mu_group_id & (MAX_MU_GROUP_ID - 1))))
331 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
332 				  "mu_group_id out of bound!!\n");
333 		else
334 			DP_STATS_UPD(peer, tx.mu_group_id[ppdu->mu_group_id],
335 				     (ppdu->user_pos + 1));
336 	}
337 
338 	if (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA ||
339 	    ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA) {
340 		DP_STATS_UPD(peer, tx.ru_tones, ppdu->ru_tones);
341 		DP_STATS_UPD(peer, tx.ru_start, ppdu->ru_start);
342 		switch (ppdu->ru_tones) {
343 		case RU_26:
344 			DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].num_msdu,
345 				     num_msdu);
346 			DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].num_mpdu,
347 				     num_mpdu);
348 			DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].mpdu_tried,
349 				     mpdu_tried);
350 		break;
351 		case RU_52:
352 			DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].num_msdu,
353 				     num_msdu);
354 			DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].num_mpdu,
355 				     num_mpdu);
356 			DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].mpdu_tried,
357 				     mpdu_tried);
358 		break;
359 		case RU_106:
360 			DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].num_msdu,
361 				     num_msdu);
362 			DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].num_mpdu,
363 				     num_mpdu);
364 			DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].mpdu_tried,
365 				     mpdu_tried);
366 		break;
367 		case RU_242:
368 			DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].num_msdu,
369 				     num_msdu);
370 			DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].num_mpdu,
371 				     num_mpdu);
372 			DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].mpdu_tried,
373 				     mpdu_tried);
374 		break;
375 		case RU_484:
376 			DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].num_msdu,
377 				     num_msdu);
378 			DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].num_mpdu,
379 				     num_mpdu);
380 			DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].mpdu_tried,
381 				     mpdu_tried);
382 		break;
383 		case RU_996:
384 			DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].num_msdu,
385 				     num_msdu);
386 			DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].num_mpdu,
387 				     num_mpdu);
388 			DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].mpdu_tried,
389 				     mpdu_tried);
390 		break;
391 		}
392 	}
393 
394 	/*
395 	 * All failed mpdu will be retried, so incrementing
396 	 * retries mpdu based on mpdu failed. Even for
397 	 * ack failure i.e for long retries we get
398 	 * mpdu failed equal mpdu tried.
399 	 */
400 	DP_STATS_INC(peer, tx.retries, mpdu_failed);
401 	DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus);
402 
403 	DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].num_msdu,
404 		     num_msdu);
405 	DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].num_mpdu,
406 		     num_mpdu);
407 	DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].mpdu_tried,
408 		     mpdu_tried);
409 
410 	DP_STATS_INC_PKT(peer, tx.comp_pkt,
411 			num_msdu, (ppdu->success_bytes +
412 				ppdu->retry_bytes + ppdu->failed_bytes));
413 	DP_STATS_UPD(peer, tx.tx_rate, ppdu->tx_rate);
414 	DP_STATS_INC(peer, tx.sgi_count[ppdu->gi], num_msdu);
415 	DP_STATS_INC(peer, tx.bw[ppdu->bw], num_msdu);
416 	DP_STATS_INC(peer, tx.nss[ppdu->nss], num_msdu);
417 	if (ppdu->tid < CDP_DATA_TID_MAX)
418 		DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)],
419 			     num_msdu);
420 	DP_STATS_INCC(peer, tx.stbc, num_msdu, ppdu->stbc);
421 	DP_STATS_INCC(peer, tx.ldpc, num_msdu, ppdu->ldpc);
422 	if (!(ppdu->is_mcast) && ppdu->ack_rssi_valid)
423 		DP_STATS_UPD(peer, tx.last_ack_rssi, ack_rssi);
424 
425 	DP_STATS_INCC(peer,
426 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
427 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
428 	DP_STATS_INCC(peer,
429 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
430 			((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
431 	DP_STATS_INCC(peer,
432 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
433 			((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
434 	DP_STATS_INCC(peer,
435 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
436 			((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B)));
437 	DP_STATS_INCC(peer,
438 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
439 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
440 	DP_STATS_INCC(peer,
441 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
442 			((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
443 	DP_STATS_INCC(peer,
444 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
445 			((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
446 	DP_STATS_INCC(peer,
447 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
448 			((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
449 	DP_STATS_INCC(peer,
450 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
451 			((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX)));
452 	DP_STATS_INCC(peer,
453 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
454 			((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX)));
455 	DP_STATS_INCC(peer, tx.ampdu_cnt, num_msdu, ppdu->is_ampdu);
456 	DP_STATS_INCC(peer, tx.non_ampdu_cnt, num_msdu, !(ppdu->is_ampdu));
457 	DP_STATS_INCC(peer, tx.pream_punct_cnt, 1, ppdu->pream_punct);
458 
459 	dp_peer_stats_notify(pdev, peer);
460 
461 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
462 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
463 			     &peer->stats, ppdu->peer_id,
464 			     UPDATE_PEER_STATS, pdev->pdev_id);
465 #endif
466 }
467 #endif
468 
469 QDF_STATUS dp_rx_populate_cbf_hdr(struct dp_soc *soc,
470 				  uint32_t mac_id,
471 				  uint32_t event,
472 				  qdf_nbuf_t mpdu,
473 				  uint32_t msdu_timestamp)
474 {
475 	uint32_t data_size, hdr_size, ppdu_id, align4byte;
476 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
477 	uint32_t *msg_word;
478 
479 	if (!pdev)
480 		return QDF_STATUS_E_INVAL;
481 
482 	ppdu_id = pdev->ppdu_info.com_info.ppdu_id;
483 
484 	hdr_size = HTT_T2H_PPDU_STATS_IND_HDR_SIZE
485 		+ qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload);
486 
487 	data_size = qdf_nbuf_len(mpdu);
488 
489 	qdf_nbuf_push_head(mpdu, hdr_size);
490 
491 	msg_word = (uint32_t *)qdf_nbuf_data(mpdu);
492 	/*
493 	 * Populate the PPDU Stats Indication header
494 	 */
495 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_T2H_MSG_TYPE_PPDU_STATS_IND);
496 	HTT_T2H_PPDU_STATS_MAC_ID_SET(*msg_word, mac_id);
497 	HTT_T2H_PPDU_STATS_PDEV_ID_SET(*msg_word, pdev->pdev_id);
498 	align4byte = ((data_size +
499 		qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload)
500 		+ 3) >> 2) << 2;
501 	HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_SET(*msg_word, align4byte);
502 	msg_word++;
503 	HTT_T2H_PPDU_STATS_PPDU_ID_SET(*msg_word, ppdu_id);
504 	msg_word++;
505 
506 	*msg_word = msdu_timestamp;
507 	msg_word++;
508 	/* Skip reserved field */
509 	msg_word++;
510 	/*
511 	 * Populate MGMT_CTRL Payload TLV first
512 	 */
513 	HTT_STATS_TLV_TAG_SET(*msg_word,
514 			      HTT_PPDU_STATS_RX_MGMTCTRL_PAYLOAD_TLV);
515 
516 	align4byte = ((data_size - sizeof(htt_tlv_hdr_t) +
517 		qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload)
518 		+ 3) >> 2) << 2;
519 	HTT_STATS_TLV_LENGTH_SET(*msg_word, align4byte);
520 	msg_word++;
521 
522 	HTT_PPDU_STATS_RX_MGMTCTRL_TLV_FRAME_LENGTH_SET(
523 		*msg_word, data_size);
524 	msg_word++;
525 
526 	dp_wdi_event_handler(event, soc, (void *)mpdu,
527 			     HTT_INVALID_PEER, WDI_NO_VAL, pdev->pdev_id);
528 
529 	qdf_nbuf_pull_head(mpdu, hdr_size);
530 
531 	return QDF_STATUS_SUCCESS;
532 }
533 
534 #ifdef WLAN_TX_PKT_CAPTURE_ENH
535 #include "dp_tx_capture.h"
536 #else
537 static inline void
538 dp_process_ppdu_stats_update_failed_bitmap(struct dp_pdev *pdev,
539 					   void *data,
540 					   uint32_t ppdu_id,
541 					   uint32_t size)
542 {
543 }
544 #endif
545 
546 /*
547  * htt_htc_pkt_alloc() - Allocate HTC packet buffer
548  * @htt_soc:	HTT SOC handle
549  *
550  * Return: Pointer to htc packet buffer
551  */
552 static struct dp_htt_htc_pkt *
553 htt_htc_pkt_alloc(struct htt_soc *soc)
554 {
555 	struct dp_htt_htc_pkt_union *pkt = NULL;
556 
557 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
558 	if (soc->htt_htc_pkt_freelist) {
559 		pkt = soc->htt_htc_pkt_freelist;
560 		soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next;
561 	}
562 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
563 
564 	if (!pkt)
565 		pkt = qdf_mem_malloc(sizeof(*pkt));
566 
567 	if (!pkt)
568 		return NULL;
569 
570 	htc_packet_set_magic_cookie(&(pkt->u.pkt.htc_pkt), 0);
571 
572 	return &pkt->u.pkt; /* not actually a dereference */
573 }
574 
575 /*
576  * htt_htc_pkt_free() - Free HTC packet buffer
577  * @htt_soc:	HTT SOC handle
578  */
579 static void
580 htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
581 {
582 	struct dp_htt_htc_pkt_union *u_pkt =
583 		(struct dp_htt_htc_pkt_union *)pkt;
584 
585 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
586 	htc_packet_set_magic_cookie(&(u_pkt->u.pkt.htc_pkt), 0);
587 	u_pkt->u.next = soc->htt_htc_pkt_freelist;
588 	soc->htt_htc_pkt_freelist = u_pkt;
589 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
590 }
591 
592 /*
593  * htt_htc_pkt_pool_free() - Free HTC packet pool
594  * @htt_soc:	HTT SOC handle
595  */
596 void
597 htt_htc_pkt_pool_free(struct htt_soc *soc)
598 {
599 	struct dp_htt_htc_pkt_union *pkt, *next;
600 	pkt = soc->htt_htc_pkt_freelist;
601 	while (pkt) {
602 		next = pkt->u.next;
603 		qdf_mem_free(pkt);
604 		pkt = next;
605 	}
606 	soc->htt_htc_pkt_freelist = NULL;
607 }
608 
609 /*
610  * htt_htc_misc_pkt_list_trim() - trim misc list
611  * @htt_soc: HTT SOC handle
612  * @level: max no. of pkts in list
613  */
614 static void
615 htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level)
616 {
617 	struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL;
618 	int i = 0;
619 	qdf_nbuf_t netbuf;
620 
621 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
622 	pkt = soc->htt_htc_pkt_misclist;
623 	while (pkt) {
624 		next = pkt->u.next;
625 		/* trim the out grown list*/
626 		if (++i > level) {
627 			netbuf =
628 				(qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
629 			qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
630 			qdf_nbuf_free(netbuf);
631 			qdf_mem_free(pkt);
632 			pkt = NULL;
633 			if (prev)
634 				prev->u.next = NULL;
635 		}
636 		prev = pkt;
637 		pkt = next;
638 	}
639 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
640 }
641 
642 /*
643  * htt_htc_misc_pkt_list_add() - Add pkt to misc list
644  * @htt_soc:	HTT SOC handle
645  * @dp_htt_htc_pkt: pkt to be added to list
646  */
647 static void
648 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
649 {
650 	struct dp_htt_htc_pkt_union *u_pkt =
651 				(struct dp_htt_htc_pkt_union *)pkt;
652 	int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc,
653 							pkt->htc_pkt.Endpoint)
654 				+ DP_HTT_HTC_PKT_MISCLIST_SIZE;
655 
656 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
657 	if (soc->htt_htc_pkt_misclist) {
658 		u_pkt->u.next = soc->htt_htc_pkt_misclist;
659 		soc->htt_htc_pkt_misclist = u_pkt;
660 	} else {
661 		soc->htt_htc_pkt_misclist = u_pkt;
662 	}
663 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
664 
665 	/* only ce pipe size + tx_queue_depth could possibly be in use
666 	 * free older packets in the misclist
667 	 */
668 	htt_htc_misc_pkt_list_trim(soc, misclist_trim_level);
669 }
670 
671 /**
672  * DP_HTT_SEND_HTC_PKT() - Send htt packet from host
673  * @soc : HTT SOC handle
674  * @pkt: pkt to be send
675  * @cmd : command to be recorded in dp htt logger
676  * @buf : Pointer to buffer needs to be recored for above cmd
677  *
678  * Return: None
679  */
680 static inline QDF_STATUS DP_HTT_SEND_HTC_PKT(struct htt_soc *soc,
681 					     struct dp_htt_htc_pkt *pkt,
682 					     uint8_t cmd, uint8_t *buf)
683 {
684 	QDF_STATUS status;
685 
686 	htt_command_record(soc->htt_logger_handle, cmd, buf);
687 
688 	status = htc_send_pkt(soc->htc_soc, &pkt->htc_pkt);
689 	if (status == QDF_STATUS_SUCCESS)
690 		htt_htc_misc_pkt_list_add(soc, pkt);
691 	else
692 		soc->stats.fail_count++;
693 	return status;
694 }
695 
696 /*
697  * htt_htc_misc_pkt_pool_free() - free pkts in misc list
698  * @htt_soc:	HTT SOC handle
699  */
700 static void
701 htt_htc_misc_pkt_pool_free(struct htt_soc *soc)
702 {
703 	struct dp_htt_htc_pkt_union *pkt, *next;
704 	qdf_nbuf_t netbuf;
705 
706 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
707 	pkt = soc->htt_htc_pkt_misclist;
708 
709 	while (pkt) {
710 		next = pkt->u.next;
711 		if (htc_packet_get_magic_cookie(&(pkt->u.pkt.htc_pkt)) !=
712 		    HTC_PACKET_MAGIC_COOKIE) {
713 			pkt = next;
714 			soc->stats.skip_count++;
715 			continue;
716 		}
717 		netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
718 		qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
719 
720 		soc->stats.htc_pkt_free++;
721 		dp_htt_info("%pK: Pkt free count %d",
722 			    soc->dp_soc, soc->stats.htc_pkt_free);
723 
724 		qdf_nbuf_free(netbuf);
725 		qdf_mem_free(pkt);
726 		pkt = next;
727 	}
728 	soc->htt_htc_pkt_misclist = NULL;
729 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
730 	dp_info("HTC Packets, fail count = %d, skip count = %d",
731 		soc->stats.fail_count, soc->stats.skip_count);
732 }
733 
734 /*
735  * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ
736  * @tgt_mac_addr:	Target MAC
737  * @buffer:		Output buffer
738  */
739 static u_int8_t *
740 htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer)
741 {
742 #ifdef BIG_ENDIAN_HOST
743 	/*
744 	 * The host endianness is opposite of the target endianness.
745 	 * To make u_int32_t elements come out correctly, the target->host
746 	 * upload has swizzled the bytes in each u_int32_t element of the
747 	 * message.
748 	 * For byte-array message fields like the MAC address, this
749 	 * upload swizzling puts the bytes in the wrong order, and needs
750 	 * to be undone.
751 	 */
752 	buffer[0] = tgt_mac_addr[3];
753 	buffer[1] = tgt_mac_addr[2];
754 	buffer[2] = tgt_mac_addr[1];
755 	buffer[3] = tgt_mac_addr[0];
756 	buffer[4] = tgt_mac_addr[7];
757 	buffer[5] = tgt_mac_addr[6];
758 	return buffer;
759 #else
760 	/*
761 	 * The host endianness matches the target endianness -
762 	 * we can use the mac addr directly from the message buffer.
763 	 */
764 	return tgt_mac_addr;
765 #endif
766 }
767 
768 /*
769  * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
770  * @soc:	SOC handle
771  * @status:	Completion status
772  * @netbuf:	HTT buffer
773  */
774 static void
775 dp_htt_h2t_send_complete_free_netbuf(
776 	void *soc, A_STATUS status, qdf_nbuf_t netbuf)
777 {
778 	qdf_nbuf_free(netbuf);
779 }
780 
781 /*
782  * dp_htt_h2t_send_complete() - H2T completion handler
783  * @context:	Opaque context (HTT SOC handle)
784  * @htc_pkt:	HTC packet
785  */
786 static void
787 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
788 {
789 	void (*send_complete_part2)(
790 		void *soc, QDF_STATUS status, qdf_nbuf_t msdu);
791 	struct htt_soc *soc =  (struct htt_soc *) context;
792 	struct dp_htt_htc_pkt *htt_pkt;
793 	qdf_nbuf_t netbuf;
794 
795 	send_complete_part2 = htc_pkt->pPktContext;
796 
797 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
798 
799 	/* process (free or keep) the netbuf that held the message */
800 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
801 	/*
802 	 * adf sendcomplete is required for windows only
803 	 */
804 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
805 	if (send_complete_part2) {
806 		send_complete_part2(
807 			htt_pkt->soc_ctxt, htc_pkt->Status, netbuf);
808 	}
809 	/* free the htt_htc_pkt / HTC_PACKET object */
810 	htt_htc_pkt_free(soc, htt_pkt);
811 }
812 
813 /*
814  * htt_h2t_ver_req_msg() - Send HTT version request message to target
815  * @htt_soc:	HTT SOC handle
816  *
817  * Return: 0 on success; error code on failure
818  */
819 static int htt_h2t_ver_req_msg(struct htt_soc *soc)
820 {
821 	struct dp_htt_htc_pkt *pkt;
822 	qdf_nbuf_t msg;
823 	uint32_t *msg_word;
824 	QDF_STATUS status;
825 
826 	msg = qdf_nbuf_alloc(
827 		soc->osdev,
828 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
829 		/* reserve room for the HTC header */
830 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
831 	if (!msg)
832 		return QDF_STATUS_E_NOMEM;
833 
834 	/*
835 	 * Set the length of the message.
836 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
837 	 * separately during the below call to qdf_nbuf_push_head.
838 	 * The contribution from the HTC header is added separately inside HTC.
839 	 */
840 	if (qdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES) == NULL) {
841 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
842 			"%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
843 			__func__);
844 		return QDF_STATUS_E_FAILURE;
845 	}
846 
847 	/* fill in the message contents */
848 	msg_word = (u_int32_t *) qdf_nbuf_data(msg);
849 
850 	/* rewind beyond alignment pad to get to the HTC header reserved area */
851 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
852 
853 	*msg_word = 0;
854 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
855 
856 	pkt = htt_htc_pkt_alloc(soc);
857 	if (!pkt) {
858 		qdf_nbuf_free(msg);
859 		return QDF_STATUS_E_FAILURE;
860 	}
861 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
862 
863 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
864 		dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg),
865 		qdf_nbuf_len(msg), soc->htc_endpoint,
866 		HTC_TX_PACKET_TAG_RTPM_PUT_RC);
867 
868 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
869 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_VERSION_REQ,
870 				     NULL);
871 
872 	if (status != QDF_STATUS_SUCCESS) {
873 		qdf_nbuf_free(msg);
874 		htt_htc_pkt_free(soc, pkt);
875 	}
876 
877 	return status;
878 }
879 
880 /*
881  * htt_srng_setup() - Send SRNG setup message to target
882  * @htt_soc:	HTT SOC handle
883  * @mac_id:	MAC Id
884  * @hal_srng:	Opaque HAL SRNG pointer
885  * @hal_ring_type:	SRNG ring type
886  *
887  * Return: 0 on success; error code on failure
888  */
889 int htt_srng_setup(struct htt_soc *soc, int mac_id,
890 		   hal_ring_handle_t hal_ring_hdl,
891 		   int hal_ring_type)
892 {
893 	struct dp_htt_htc_pkt *pkt;
894 	qdf_nbuf_t htt_msg;
895 	uint32_t *msg_word;
896 	struct hal_srng_params srng_params;
897 	qdf_dma_addr_t hp_addr, tp_addr;
898 	uint32_t ring_entry_size =
899 		hal_srng_get_entrysize(soc->hal_soc, hal_ring_type);
900 	int htt_ring_type, htt_ring_id;
901 	uint8_t *htt_logger_bufp;
902 	int target_pdev_id;
903 	int lmac_id = dp_get_lmac_id_for_pdev_id(soc->dp_soc, 0, mac_id);
904 	QDF_STATUS status;
905 
906 	/* Sizes should be set in 4-byte words */
907 	ring_entry_size = ring_entry_size >> 2;
908 
909 	htt_msg = qdf_nbuf_alloc(soc->osdev,
910 		HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ),
911 		/* reserve room for the HTC header */
912 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
913 	if (!htt_msg)
914 		goto fail0;
915 
916 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
917 	hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_ring_hdl);
918 	tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_ring_hdl);
919 
920 	switch (hal_ring_type) {
921 	case RXDMA_BUF:
922 #ifdef QCA_HOST2FW_RXBUF_RING
923 		if (srng_params.ring_id ==
924 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0)) {
925 			htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
926 			htt_ring_type = HTT_SW_TO_SW_RING;
927 #ifdef IPA_OFFLOAD
928 		} else if (srng_params.ring_id ==
929 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2)) {
930 			htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING;
931 			htt_ring_type = HTT_SW_TO_SW_RING;
932 #endif
933 #else
934 		if (srng_params.ring_id ==
935 			(HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
936 			(lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
937 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
938 			htt_ring_type = HTT_SW_TO_HW_RING;
939 #endif
940 		} else if (srng_params.ring_id ==
941 #ifdef IPA_OFFLOAD
942 			 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 +
943 #else
944 			 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF +
945 #endif
946 			(lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
947 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
948 			htt_ring_type = HTT_SW_TO_HW_RING;
949 		} else {
950 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
951 				   "%s: Ring %d currently not supported",
952 				   __func__, srng_params.ring_id);
953 			goto fail1;
954 		}
955 
956 		dp_info("ring_type %d ring_id %d htt_ring_id %d hp_addr 0x%llx tp_addr 0x%llx",
957 			hal_ring_type, srng_params.ring_id, htt_ring_id,
958 			(uint64_t)hp_addr,
959 			(uint64_t)tp_addr);
960 		break;
961 	case RXDMA_MONITOR_BUF:
962 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
963 		htt_ring_type = HTT_SW_TO_HW_RING;
964 		break;
965 	case RXDMA_MONITOR_STATUS:
966 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
967 		htt_ring_type = HTT_SW_TO_HW_RING;
968 		break;
969 	case RXDMA_MONITOR_DST:
970 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
971 		htt_ring_type = HTT_HW_TO_SW_RING;
972 		break;
973 	case RXDMA_MONITOR_DESC:
974 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
975 		htt_ring_type = HTT_SW_TO_HW_RING;
976 		break;
977 	case RXDMA_DST:
978 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
979 		htt_ring_type = HTT_HW_TO_SW_RING;
980 		break;
981 
982 	default:
983 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
984 			"%s: Ring currently not supported", __func__);
985 			goto fail1;
986 	}
987 
988 	/*
989 	 * Set the length of the message.
990 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
991 	 * separately during the below call to qdf_nbuf_push_head.
992 	 * The contribution from the HTC header is added separately inside HTC.
993 	 */
994 	if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) {
995 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
996 			"%s: Failed to expand head for SRING_SETUP msg",
997 			__func__);
998 		return QDF_STATUS_E_FAILURE;
999 	}
1000 
1001 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
1002 
1003 	/* rewind beyond alignment pad to get to the HTC header reserved area */
1004 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
1005 
1006 	/* word 0 */
1007 	*msg_word = 0;
1008 	htt_logger_bufp = (uint8_t *)msg_word;
1009 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP);
1010 	target_pdev_id =
1011 	dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, mac_id);
1012 
1013 	if ((htt_ring_type == HTT_SW_TO_HW_RING) ||
1014 			(htt_ring_type == HTT_HW_TO_SW_RING))
1015 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, target_pdev_id);
1016 	else
1017 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id);
1018 
1019 	dp_info("mac_id %d", mac_id);
1020 	HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type);
1021 	/* TODO: Discuss with FW on changing this to unique ID and using
1022 	 * htt_ring_type to send the type of ring
1023 	 */
1024 	HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id);
1025 
1026 	/* word 1 */
1027 	msg_word++;
1028 	*msg_word = 0;
1029 	HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word,
1030 		srng_params.ring_base_paddr & 0xffffffff);
1031 
1032 	/* word 2 */
1033 	msg_word++;
1034 	*msg_word = 0;
1035 	HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word,
1036 		(uint64_t)srng_params.ring_base_paddr >> 32);
1037 
1038 	/* word 3 */
1039 	msg_word++;
1040 	*msg_word = 0;
1041 	HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size);
1042 	HTT_SRING_SETUP_RING_SIZE_SET(*msg_word,
1043 		(ring_entry_size * srng_params.num_entries));
1044 	dp_info("entry_size %d", ring_entry_size);
1045 	dp_info("num_entries %d", srng_params.num_entries);
1046 	dp_info("ring_size %d", (ring_entry_size * srng_params.num_entries));
1047 	if (htt_ring_type == HTT_SW_TO_HW_RING)
1048 		HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET(
1049 						*msg_word, 1);
1050 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word,
1051 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
1052 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word,
1053 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
1054 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word,
1055 		!!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP));
1056 
1057 	/* word 4 */
1058 	msg_word++;
1059 	*msg_word = 0;
1060 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
1061 		hp_addr & 0xffffffff);
1062 
1063 	/* word 5 */
1064 	msg_word++;
1065 	*msg_word = 0;
1066 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
1067 		(uint64_t)hp_addr >> 32);
1068 
1069 	/* word 6 */
1070 	msg_word++;
1071 	*msg_word = 0;
1072 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
1073 		tp_addr & 0xffffffff);
1074 
1075 	/* word 7 */
1076 	msg_word++;
1077 	*msg_word = 0;
1078 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
1079 		(uint64_t)tp_addr >> 32);
1080 
1081 	/* word 8 */
1082 	msg_word++;
1083 	*msg_word = 0;
1084 	HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word,
1085 		srng_params.msi_addr & 0xffffffff);
1086 
1087 	/* word 9 */
1088 	msg_word++;
1089 	*msg_word = 0;
1090 	HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word,
1091 		(uint64_t)(srng_params.msi_addr) >> 32);
1092 
1093 	/* word 10 */
1094 	msg_word++;
1095 	*msg_word = 0;
1096 	HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word,
1097 		qdf_cpu_to_le32(srng_params.msi_data));
1098 
1099 	/* word 11 */
1100 	msg_word++;
1101 	*msg_word = 0;
1102 	HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word,
1103 		srng_params.intr_batch_cntr_thres_entries *
1104 		ring_entry_size);
1105 	HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word,
1106 		srng_params.intr_timer_thres_us >> 3);
1107 
1108 	/* word 12 */
1109 	msg_word++;
1110 	*msg_word = 0;
1111 	if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
1112 		/* TODO: Setting low threshold to 1/8th of ring size - see
1113 		 * if this needs to be configurable
1114 		 */
1115 		HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word,
1116 			srng_params.low_threshold);
1117 	}
1118 	/* "response_required" field should be set if a HTT response message is
1119 	 * required after setting up the ring.
1120 	 */
1121 	pkt = htt_htc_pkt_alloc(soc);
1122 	if (!pkt)
1123 		goto fail1;
1124 
1125 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1126 
1127 	SET_HTC_PACKET_INFO_TX(
1128 		&pkt->htc_pkt,
1129 		dp_htt_h2t_send_complete_free_netbuf,
1130 		qdf_nbuf_data(htt_msg),
1131 		qdf_nbuf_len(htt_msg),
1132 		soc->htc_endpoint,
1133 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
1134 
1135 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1136 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_SRING_SETUP,
1137 				     htt_logger_bufp);
1138 
1139 	if (status != QDF_STATUS_SUCCESS) {
1140 		qdf_nbuf_free(htt_msg);
1141 		htt_htc_pkt_free(soc, pkt);
1142 	}
1143 
1144 	return status;
1145 
1146 fail1:
1147 	qdf_nbuf_free(htt_msg);
1148 fail0:
1149 	return QDF_STATUS_E_FAILURE;
1150 }
1151 
1152 #ifdef QCA_SUPPORT_FULL_MON
1153 /**
1154  * htt_h2t_full_mon_cfg() - Send full monitor configuarion msg to FW
1155  *
1156  * @htt_soc: HTT Soc handle
1157  * @pdev_id: Radio id
1158  * @dp_full_mon_config: enabled/disable configuration
1159  *
1160  * Return: Success when HTT message is sent, error on failure
1161  */
1162 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc,
1163 			 uint8_t pdev_id,
1164 			 enum dp_full_mon_config config)
1165 {
1166 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
1167 	struct dp_htt_htc_pkt *pkt;
1168 	qdf_nbuf_t htt_msg;
1169 	uint32_t *msg_word;
1170 	uint8_t *htt_logger_bufp;
1171 
1172 	htt_msg = qdf_nbuf_alloc(soc->osdev,
1173 				 HTT_MSG_BUF_SIZE(
1174 				 HTT_RX_FULL_MONITOR_MODE_SETUP_SZ),
1175 				 /* reserve room for the HTC header */
1176 				 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
1177 				 4,
1178 				 TRUE);
1179 	if (!htt_msg)
1180 		return QDF_STATUS_E_FAILURE;
1181 
1182 	/*
1183 	 * Set the length of the message.
1184 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
1185 	 * separately during the below call to qdf_nbuf_push_head.
1186 	 * The contribution from the HTC header is added separately inside HTC.
1187 	 */
1188 	if (!qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ)) {
1189 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1190 			  "%s: Failed to expand head for RX Ring Cfg msg",
1191 			  __func__);
1192 		goto fail1;
1193 	}
1194 
1195 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
1196 
1197 	/* rewind beyond alignment pad to get to the HTC header reserved area */
1198 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
1199 
1200 	/* word 0 */
1201 	*msg_word = 0;
1202 	htt_logger_bufp = (uint8_t *)msg_word;
1203 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE);
1204 	HTT_RX_FULL_MONITOR_MODE_OPERATION_PDEV_ID_SET(
1205 			*msg_word, DP_SW2HW_MACID(pdev_id));
1206 
1207 	msg_word++;
1208 	*msg_word = 0;
1209 	/* word 1 */
1210 	if (config == DP_FULL_MON_ENABLE) {
1211 		HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true);
1212 		HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, true);
1213 		HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, true);
1214 		HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2);
1215 	} else if (config == DP_FULL_MON_DISABLE) {
1216 		/* As per MAC team's suggestion, While disbaling full monitor
1217 		 * mode, Set 'en' bit to true in full monitor mode register.
1218 		 */
1219 		HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true);
1220 		HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, false);
1221 		HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, false);
1222 		HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2);
1223 	}
1224 
1225 	pkt = htt_htc_pkt_alloc(soc);
1226 	if (!pkt) {
1227 		qdf_err("HTC packet allocation failed");
1228 		goto fail1;
1229 	}
1230 
1231 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1232 
1233 	SET_HTC_PACKET_INFO_TX(
1234 		&pkt->htc_pkt,
1235 		dp_htt_h2t_send_complete_free_netbuf,
1236 		qdf_nbuf_data(htt_msg),
1237 		qdf_nbuf_len(htt_msg),
1238 		soc->htc_endpoint,
1239 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
1240 
1241 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1242 	qdf_info("config: %d", config);
1243 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_SRING_SETUP,
1244 			    htt_logger_bufp);
1245 	return QDF_STATUS_SUCCESS;
1246 fail1:
1247 	qdf_nbuf_free(htt_msg);
1248 	return QDF_STATUS_E_FAILURE;
1249 }
1250 #else
1251 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc,
1252 			 uint8_t pdev_id,
1253 			 enum dp_full_mon_config config)
1254 {
1255 	return 0;
1256 }
1257 
1258 #endif
1259 
1260 /*
1261  * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter
1262  * config message to target
1263  * @htt_soc:	HTT SOC handle
1264  * @pdev_id:	WIN- PDEV Id, MCL- mac id
1265  * @hal_srng:	Opaque HAL SRNG pointer
1266  * @hal_ring_type:	SRNG ring type
1267  * @ring_buf_size:	SRNG buffer size
1268  * @htt_tlv_filter:	Rx SRNG TLV and filter setting
1269  * Return: 0 on success; error code on failure
1270  */
1271 int htt_h2t_rx_ring_cfg(struct htt_soc *htt_soc, int pdev_id,
1272 			hal_ring_handle_t hal_ring_hdl,
1273 			int hal_ring_type, int ring_buf_size,
1274 			struct htt_rx_ring_tlv_filter *htt_tlv_filter)
1275 {
1276 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
1277 	struct dp_htt_htc_pkt *pkt;
1278 	qdf_nbuf_t htt_msg;
1279 	uint32_t *msg_word;
1280 	struct hal_srng_params srng_params;
1281 	uint32_t htt_ring_type, htt_ring_id;
1282 	uint32_t tlv_filter;
1283 	uint8_t *htt_logger_bufp;
1284 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->dp_soc->wlan_cfg_ctx;
1285 	uint32_t mon_drop_th = wlan_cfg_get_mon_drop_thresh(wlan_cfg_ctx);
1286 	int target_pdev_id;
1287 	QDF_STATUS status;
1288 
1289 	htt_msg = qdf_nbuf_alloc(soc->osdev,
1290 		HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ),
1291 	/* reserve room for the HTC header */
1292 	HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
1293 	if (!htt_msg)
1294 		goto fail0;
1295 
1296 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
1297 
1298 	switch (hal_ring_type) {
1299 	case RXDMA_BUF:
1300 		htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
1301 		htt_ring_type = HTT_SW_TO_HW_RING;
1302 		break;
1303 	case RXDMA_MONITOR_BUF:
1304 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
1305 		htt_ring_type = HTT_SW_TO_HW_RING;
1306 		break;
1307 	case RXDMA_MONITOR_STATUS:
1308 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
1309 		htt_ring_type = HTT_SW_TO_HW_RING;
1310 		break;
1311 	case RXDMA_MONITOR_DST:
1312 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
1313 		htt_ring_type = HTT_HW_TO_SW_RING;
1314 		break;
1315 	case RXDMA_MONITOR_DESC:
1316 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
1317 		htt_ring_type = HTT_SW_TO_HW_RING;
1318 		break;
1319 	case RXDMA_DST:
1320 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
1321 		htt_ring_type = HTT_HW_TO_SW_RING;
1322 		break;
1323 
1324 	default:
1325 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1326 			"%s: Ring currently not supported", __func__);
1327 		goto fail1;
1328 	}
1329 
1330 	/*
1331 	 * Set the length of the message.
1332 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
1333 	 * separately during the below call to qdf_nbuf_push_head.
1334 	 * The contribution from the HTC header is added separately inside HTC.
1335 	 */
1336 	if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) {
1337 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1338 			"%s: Failed to expand head for RX Ring Cfg msg",
1339 			__func__);
1340 		goto fail1; /* failure */
1341 	}
1342 
1343 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
1344 
1345 	/* rewind beyond alignment pad to get to the HTC header reserved area */
1346 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
1347 
1348 	/* word 0 */
1349 	htt_logger_bufp = (uint8_t *)msg_word;
1350 	*msg_word = 0;
1351 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
1352 
1353 	/*
1354 	 * pdev_id is indexed from 0 whereas mac_id is indexed from 1
1355 	 * SW_TO_SW and SW_TO_HW rings are unaffected by this
1356 	 */
1357 	target_pdev_id =
1358 	dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, pdev_id);
1359 
1360 	if (htt_ring_type == HTT_SW_TO_SW_RING ||
1361 			htt_ring_type == HTT_SW_TO_HW_RING)
1362 		HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word,
1363 						      target_pdev_id);
1364 
1365 	/* TODO: Discuss with FW on changing this to unique ID and using
1366 	 * htt_ring_type to send the type of ring
1367 	 */
1368 	HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id);
1369 
1370 	HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word,
1371 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
1372 
1373 	HTT_RX_RING_SELECTION_CFG_RX_OFFSETS_VALID_SET(*msg_word,
1374 						htt_tlv_filter->offset_valid);
1375 
1376 	if (mon_drop_th > 0)
1377 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1378 								   1);
1379 	else
1380 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1381 								   0);
1382 
1383 	/* word 1 */
1384 	msg_word++;
1385 	*msg_word = 0;
1386 	HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word,
1387 		ring_buf_size);
1388 
1389 	/* word 2 */
1390 	msg_word++;
1391 	*msg_word = 0;
1392 
1393 	if (htt_tlv_filter->enable_fp) {
1394 		/* TYPE: MGMT */
1395 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1396 			FP, MGMT, 0000,
1397 			(htt_tlv_filter->fp_mgmt_filter &
1398 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1399 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1400 			FP, MGMT, 0001,
1401 			(htt_tlv_filter->fp_mgmt_filter &
1402 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1403 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1404 			FP, MGMT, 0010,
1405 			(htt_tlv_filter->fp_mgmt_filter &
1406 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1407 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1408 			FP, MGMT, 0011,
1409 			(htt_tlv_filter->fp_mgmt_filter &
1410 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1411 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1412 			FP, MGMT, 0100,
1413 			(htt_tlv_filter->fp_mgmt_filter &
1414 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1415 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1416 			FP, MGMT, 0101,
1417 			(htt_tlv_filter->fp_mgmt_filter &
1418 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1419 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1420 			FP, MGMT, 0110,
1421 			(htt_tlv_filter->fp_mgmt_filter &
1422 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1423 		/* reserved */
1424 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
1425 			MGMT, 0111,
1426 			(htt_tlv_filter->fp_mgmt_filter &
1427 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1428 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1429 			FP, MGMT, 1000,
1430 			(htt_tlv_filter->fp_mgmt_filter &
1431 			FILTER_MGMT_BEACON) ? 1 : 0);
1432 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1433 			FP, MGMT, 1001,
1434 			(htt_tlv_filter->fp_mgmt_filter &
1435 			FILTER_MGMT_ATIM) ? 1 : 0);
1436 	}
1437 
1438 	if (htt_tlv_filter->enable_md) {
1439 			/* TYPE: MGMT */
1440 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1441 			MD, MGMT, 0000,
1442 			(htt_tlv_filter->md_mgmt_filter &
1443 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1444 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1445 			MD, MGMT, 0001,
1446 			(htt_tlv_filter->md_mgmt_filter &
1447 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1448 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1449 			MD, MGMT, 0010,
1450 			(htt_tlv_filter->md_mgmt_filter &
1451 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1452 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1453 			MD, MGMT, 0011,
1454 			(htt_tlv_filter->md_mgmt_filter &
1455 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1456 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1457 			MD, MGMT, 0100,
1458 			(htt_tlv_filter->md_mgmt_filter &
1459 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1460 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1461 			MD, MGMT, 0101,
1462 			(htt_tlv_filter->md_mgmt_filter &
1463 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1464 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1465 			MD, MGMT, 0110,
1466 			(htt_tlv_filter->md_mgmt_filter &
1467 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1468 		/* reserved */
1469 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
1470 			MGMT, 0111,
1471 			(htt_tlv_filter->md_mgmt_filter &
1472 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1473 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1474 			MD, MGMT, 1000,
1475 			(htt_tlv_filter->md_mgmt_filter &
1476 			FILTER_MGMT_BEACON) ? 1 : 0);
1477 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1478 			MD, MGMT, 1001,
1479 			(htt_tlv_filter->md_mgmt_filter &
1480 			FILTER_MGMT_ATIM) ? 1 : 0);
1481 	}
1482 
1483 	if (htt_tlv_filter->enable_mo) {
1484 		/* TYPE: MGMT */
1485 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1486 			MO, MGMT, 0000,
1487 			(htt_tlv_filter->mo_mgmt_filter &
1488 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1489 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1490 			MO, MGMT, 0001,
1491 			(htt_tlv_filter->mo_mgmt_filter &
1492 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1493 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1494 			MO, MGMT, 0010,
1495 			(htt_tlv_filter->mo_mgmt_filter &
1496 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1497 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1498 			MO, MGMT, 0011,
1499 			(htt_tlv_filter->mo_mgmt_filter &
1500 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1501 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1502 			MO, MGMT, 0100,
1503 			(htt_tlv_filter->mo_mgmt_filter &
1504 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1505 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1506 			MO, MGMT, 0101,
1507 			(htt_tlv_filter->mo_mgmt_filter &
1508 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1509 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1510 			MO, MGMT, 0110,
1511 			(htt_tlv_filter->mo_mgmt_filter &
1512 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1513 		/* reserved */
1514 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
1515 			MGMT, 0111,
1516 			(htt_tlv_filter->mo_mgmt_filter &
1517 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1518 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1519 			MO, MGMT, 1000,
1520 			(htt_tlv_filter->mo_mgmt_filter &
1521 			FILTER_MGMT_BEACON) ? 1 : 0);
1522 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1523 			MO, MGMT, 1001,
1524 			(htt_tlv_filter->mo_mgmt_filter &
1525 			FILTER_MGMT_ATIM) ? 1 : 0);
1526 	}
1527 
1528 	/* word 3 */
1529 	msg_word++;
1530 	*msg_word = 0;
1531 
1532 	if (htt_tlv_filter->enable_fp) {
1533 		/* TYPE: MGMT */
1534 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1535 			FP, MGMT, 1010,
1536 			(htt_tlv_filter->fp_mgmt_filter &
1537 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1538 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1539 			FP, MGMT, 1011,
1540 			(htt_tlv_filter->fp_mgmt_filter &
1541 			FILTER_MGMT_AUTH) ? 1 : 0);
1542 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1543 			FP, MGMT, 1100,
1544 			(htt_tlv_filter->fp_mgmt_filter &
1545 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1546 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1547 			FP, MGMT, 1101,
1548 			(htt_tlv_filter->fp_mgmt_filter &
1549 			FILTER_MGMT_ACTION) ? 1 : 0);
1550 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1551 			FP, MGMT, 1110,
1552 			(htt_tlv_filter->fp_mgmt_filter &
1553 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1554 		/* reserved*/
1555 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
1556 			MGMT, 1111,
1557 			(htt_tlv_filter->fp_mgmt_filter &
1558 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1559 	}
1560 
1561 	if (htt_tlv_filter->enable_md) {
1562 			/* TYPE: MGMT */
1563 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1564 			MD, MGMT, 1010,
1565 			(htt_tlv_filter->md_mgmt_filter &
1566 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1567 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1568 			MD, MGMT, 1011,
1569 			(htt_tlv_filter->md_mgmt_filter &
1570 			FILTER_MGMT_AUTH) ? 1 : 0);
1571 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1572 			MD, MGMT, 1100,
1573 			(htt_tlv_filter->md_mgmt_filter &
1574 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1575 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1576 			MD, MGMT, 1101,
1577 			(htt_tlv_filter->md_mgmt_filter &
1578 			FILTER_MGMT_ACTION) ? 1 : 0);
1579 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1580 			MD, MGMT, 1110,
1581 			(htt_tlv_filter->md_mgmt_filter &
1582 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1583 	}
1584 
1585 	if (htt_tlv_filter->enable_mo) {
1586 		/* TYPE: MGMT */
1587 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1588 			MO, MGMT, 1010,
1589 			(htt_tlv_filter->mo_mgmt_filter &
1590 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1591 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1592 			MO, MGMT, 1011,
1593 			(htt_tlv_filter->mo_mgmt_filter &
1594 			FILTER_MGMT_AUTH) ? 1 : 0);
1595 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1596 			MO, MGMT, 1100,
1597 			(htt_tlv_filter->mo_mgmt_filter &
1598 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1599 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1600 			MO, MGMT, 1101,
1601 			(htt_tlv_filter->mo_mgmt_filter &
1602 			FILTER_MGMT_ACTION) ? 1 : 0);
1603 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1604 			MO, MGMT, 1110,
1605 			(htt_tlv_filter->mo_mgmt_filter &
1606 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1607 		/* reserved*/
1608 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
1609 			MGMT, 1111,
1610 			(htt_tlv_filter->mo_mgmt_filter &
1611 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1612 	}
1613 
1614 	/* word 4 */
1615 	msg_word++;
1616 	*msg_word = 0;
1617 
1618 	if (htt_tlv_filter->enable_fp) {
1619 		/* TYPE: CTRL */
1620 		/* reserved */
1621 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1622 			CTRL, 0000,
1623 			(htt_tlv_filter->fp_ctrl_filter &
1624 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1625 		/* reserved */
1626 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1627 			CTRL, 0001,
1628 			(htt_tlv_filter->fp_ctrl_filter &
1629 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1630 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1631 			CTRL, 0010,
1632 			(htt_tlv_filter->fp_ctrl_filter &
1633 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1634 		/* reserved */
1635 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1636 			CTRL, 0011,
1637 			(htt_tlv_filter->fp_ctrl_filter &
1638 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1639 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1640 			CTRL, 0100,
1641 			(htt_tlv_filter->fp_ctrl_filter &
1642 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1643 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1644 			CTRL, 0101,
1645 			(htt_tlv_filter->fp_ctrl_filter &
1646 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1647 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1648 			CTRL, 0110,
1649 			(htt_tlv_filter->fp_ctrl_filter &
1650 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1651 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1652 			CTRL, 0111,
1653 			(htt_tlv_filter->fp_ctrl_filter &
1654 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1655 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1656 			CTRL, 1000,
1657 			(htt_tlv_filter->fp_ctrl_filter &
1658 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1659 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1660 			CTRL, 1001,
1661 			(htt_tlv_filter->fp_ctrl_filter &
1662 			FILTER_CTRL_BA) ? 1 : 0);
1663 	}
1664 
1665 	if (htt_tlv_filter->enable_md) {
1666 		/* TYPE: CTRL */
1667 		/* reserved */
1668 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1669 			CTRL, 0000,
1670 			(htt_tlv_filter->md_ctrl_filter &
1671 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1672 		/* reserved */
1673 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1674 			CTRL, 0001,
1675 			(htt_tlv_filter->md_ctrl_filter &
1676 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1677 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1678 			CTRL, 0010,
1679 			(htt_tlv_filter->md_ctrl_filter &
1680 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1681 		/* reserved */
1682 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1683 			CTRL, 0011,
1684 			(htt_tlv_filter->md_ctrl_filter &
1685 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1686 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1687 			CTRL, 0100,
1688 			(htt_tlv_filter->md_ctrl_filter &
1689 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1690 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1691 			CTRL, 0101,
1692 			(htt_tlv_filter->md_ctrl_filter &
1693 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1694 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1695 			CTRL, 0110,
1696 			(htt_tlv_filter->md_ctrl_filter &
1697 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1698 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1699 			CTRL, 0111,
1700 			(htt_tlv_filter->md_ctrl_filter &
1701 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1702 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1703 			CTRL, 1000,
1704 			(htt_tlv_filter->md_ctrl_filter &
1705 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1706 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1707 			CTRL, 1001,
1708 			(htt_tlv_filter->md_ctrl_filter &
1709 			FILTER_CTRL_BA) ? 1 : 0);
1710 	}
1711 
1712 	if (htt_tlv_filter->enable_mo) {
1713 		/* TYPE: CTRL */
1714 		/* reserved */
1715 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1716 			CTRL, 0000,
1717 			(htt_tlv_filter->mo_ctrl_filter &
1718 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1719 		/* reserved */
1720 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1721 			CTRL, 0001,
1722 			(htt_tlv_filter->mo_ctrl_filter &
1723 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1724 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1725 			CTRL, 0010,
1726 			(htt_tlv_filter->mo_ctrl_filter &
1727 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1728 		/* reserved */
1729 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1730 			CTRL, 0011,
1731 			(htt_tlv_filter->mo_ctrl_filter &
1732 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1733 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1734 			CTRL, 0100,
1735 			(htt_tlv_filter->mo_ctrl_filter &
1736 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1737 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1738 			CTRL, 0101,
1739 			(htt_tlv_filter->mo_ctrl_filter &
1740 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1741 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1742 			CTRL, 0110,
1743 			(htt_tlv_filter->mo_ctrl_filter &
1744 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1745 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1746 			CTRL, 0111,
1747 			(htt_tlv_filter->mo_ctrl_filter &
1748 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1749 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1750 			CTRL, 1000,
1751 			(htt_tlv_filter->mo_ctrl_filter &
1752 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1753 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1754 			CTRL, 1001,
1755 			(htt_tlv_filter->mo_ctrl_filter &
1756 			FILTER_CTRL_BA) ? 1 : 0);
1757 	}
1758 
1759 	/* word 5 */
1760 	msg_word++;
1761 	*msg_word = 0;
1762 	if (htt_tlv_filter->enable_fp) {
1763 		/* TYPE: CTRL */
1764 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1765 			CTRL, 1010,
1766 			(htt_tlv_filter->fp_ctrl_filter &
1767 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1768 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1769 			CTRL, 1011,
1770 			(htt_tlv_filter->fp_ctrl_filter &
1771 			FILTER_CTRL_RTS) ? 1 : 0);
1772 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1773 			CTRL, 1100,
1774 			(htt_tlv_filter->fp_ctrl_filter &
1775 			FILTER_CTRL_CTS) ? 1 : 0);
1776 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1777 			CTRL, 1101,
1778 			(htt_tlv_filter->fp_ctrl_filter &
1779 			FILTER_CTRL_ACK) ? 1 : 0);
1780 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1781 			CTRL, 1110,
1782 			(htt_tlv_filter->fp_ctrl_filter &
1783 			FILTER_CTRL_CFEND) ? 1 : 0);
1784 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1785 			CTRL, 1111,
1786 			(htt_tlv_filter->fp_ctrl_filter &
1787 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1788 		/* TYPE: DATA */
1789 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1790 			DATA, MCAST,
1791 			(htt_tlv_filter->fp_data_filter &
1792 			FILTER_DATA_MCAST) ? 1 : 0);
1793 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1794 			DATA, UCAST,
1795 			(htt_tlv_filter->fp_data_filter &
1796 			FILTER_DATA_UCAST) ? 1 : 0);
1797 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1798 			DATA, NULL,
1799 			(htt_tlv_filter->fp_data_filter &
1800 			FILTER_DATA_NULL) ? 1 : 0);
1801 	}
1802 
1803 	if (htt_tlv_filter->enable_md) {
1804 		/* TYPE: CTRL */
1805 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1806 			CTRL, 1010,
1807 			(htt_tlv_filter->md_ctrl_filter &
1808 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1809 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1810 			CTRL, 1011,
1811 			(htt_tlv_filter->md_ctrl_filter &
1812 			FILTER_CTRL_RTS) ? 1 : 0);
1813 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1814 			CTRL, 1100,
1815 			(htt_tlv_filter->md_ctrl_filter &
1816 			FILTER_CTRL_CTS) ? 1 : 0);
1817 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1818 			CTRL, 1101,
1819 			(htt_tlv_filter->md_ctrl_filter &
1820 			FILTER_CTRL_ACK) ? 1 : 0);
1821 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1822 			CTRL, 1110,
1823 			(htt_tlv_filter->md_ctrl_filter &
1824 			FILTER_CTRL_CFEND) ? 1 : 0);
1825 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1826 			CTRL, 1111,
1827 			(htt_tlv_filter->md_ctrl_filter &
1828 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1829 		/* TYPE: DATA */
1830 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1831 			DATA, MCAST,
1832 			(htt_tlv_filter->md_data_filter &
1833 			FILTER_DATA_MCAST) ? 1 : 0);
1834 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1835 			DATA, UCAST,
1836 			(htt_tlv_filter->md_data_filter &
1837 			FILTER_DATA_UCAST) ? 1 : 0);
1838 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1839 			DATA, NULL,
1840 			(htt_tlv_filter->md_data_filter &
1841 			FILTER_DATA_NULL) ? 1 : 0);
1842 	}
1843 
1844 	if (htt_tlv_filter->enable_mo) {
1845 		/* TYPE: CTRL */
1846 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1847 			CTRL, 1010,
1848 			(htt_tlv_filter->mo_ctrl_filter &
1849 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1850 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1851 			CTRL, 1011,
1852 			(htt_tlv_filter->mo_ctrl_filter &
1853 			FILTER_CTRL_RTS) ? 1 : 0);
1854 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1855 			CTRL, 1100,
1856 			(htt_tlv_filter->mo_ctrl_filter &
1857 			FILTER_CTRL_CTS) ? 1 : 0);
1858 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1859 			CTRL, 1101,
1860 			(htt_tlv_filter->mo_ctrl_filter &
1861 			FILTER_CTRL_ACK) ? 1 : 0);
1862 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1863 			CTRL, 1110,
1864 			(htt_tlv_filter->mo_ctrl_filter &
1865 			FILTER_CTRL_CFEND) ? 1 : 0);
1866 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1867 			CTRL, 1111,
1868 			(htt_tlv_filter->mo_ctrl_filter &
1869 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1870 		/* TYPE: DATA */
1871 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1872 			DATA, MCAST,
1873 			(htt_tlv_filter->mo_data_filter &
1874 			FILTER_DATA_MCAST) ? 1 : 0);
1875 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1876 			DATA, UCAST,
1877 			(htt_tlv_filter->mo_data_filter &
1878 			FILTER_DATA_UCAST) ? 1 : 0);
1879 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1880 			DATA, NULL,
1881 			(htt_tlv_filter->mo_data_filter &
1882 			FILTER_DATA_NULL) ? 1 : 0);
1883 	}
1884 
1885 	/* word 6 */
1886 	msg_word++;
1887 	*msg_word = 0;
1888 	tlv_filter = 0;
1889 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START,
1890 		htt_tlv_filter->mpdu_start);
1891 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START,
1892 		htt_tlv_filter->msdu_start);
1893 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET,
1894 		htt_tlv_filter->packet);
1895 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END,
1896 		htt_tlv_filter->msdu_end);
1897 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END,
1898 		htt_tlv_filter->mpdu_end);
1899 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER,
1900 		htt_tlv_filter->packet_header);
1901 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION,
1902 		htt_tlv_filter->attention);
1903 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START,
1904 		htt_tlv_filter->ppdu_start);
1905 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END,
1906 		htt_tlv_filter->ppdu_end);
1907 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS,
1908 		htt_tlv_filter->ppdu_end_user_stats);
1909 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter,
1910 		PPDU_END_USER_STATS_EXT,
1911 		htt_tlv_filter->ppdu_end_user_stats_ext);
1912 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE,
1913 		htt_tlv_filter->ppdu_end_status_done);
1914 	/* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/
1915 	 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED,
1916 		 htt_tlv_filter->header_per_msdu);
1917 
1918 	HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter);
1919 
1920 	msg_word++;
1921 	*msg_word = 0;
1922 	if (htt_tlv_filter->offset_valid) {
1923 		HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET_SET(*msg_word,
1924 					htt_tlv_filter->rx_packet_offset);
1925 		HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET_SET(*msg_word,
1926 					htt_tlv_filter->rx_header_offset);
1927 
1928 		msg_word++;
1929 		*msg_word = 0;
1930 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET_SET(*msg_word,
1931 					htt_tlv_filter->rx_mpdu_end_offset);
1932 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET_SET(*msg_word,
1933 					htt_tlv_filter->rx_mpdu_start_offset);
1934 
1935 		msg_word++;
1936 		*msg_word = 0;
1937 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET_SET(*msg_word,
1938 					htt_tlv_filter->rx_msdu_end_offset);
1939 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET_SET(*msg_word,
1940 					htt_tlv_filter->rx_msdu_start_offset);
1941 
1942 		msg_word++;
1943 		*msg_word = 0;
1944 		HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET_SET(*msg_word,
1945 					htt_tlv_filter->rx_attn_offset);
1946 		msg_word++;
1947 		*msg_word = 0;
1948 	} else {
1949 		msg_word += 4;
1950 		*msg_word = 0;
1951 	}
1952 
1953 	if (mon_drop_th > 0)
1954 		HTT_RX_RING_SELECTION_CFG_RX_DROP_THRESHOLD_SET(*msg_word,
1955 								mon_drop_th);
1956 
1957 	/* "response_required" field should be set if a HTT response message is
1958 	 * required after setting up the ring.
1959 	 */
1960 	pkt = htt_htc_pkt_alloc(soc);
1961 	if (!pkt)
1962 		goto fail1;
1963 
1964 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1965 
1966 	SET_HTC_PACKET_INFO_TX(
1967 		&pkt->htc_pkt,
1968 		dp_htt_h2t_send_complete_free_netbuf,
1969 		qdf_nbuf_data(htt_msg),
1970 		qdf_nbuf_len(htt_msg),
1971 		soc->htc_endpoint,
1972 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
1973 
1974 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1975 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
1976 				     HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG,
1977 				     htt_logger_bufp);
1978 
1979 	if (status != QDF_STATUS_SUCCESS) {
1980 		qdf_nbuf_free(htt_msg);
1981 		htt_htc_pkt_free(soc, pkt);
1982 	}
1983 
1984 	return status;
1985 
1986 fail1:
1987 	qdf_nbuf_free(htt_msg);
1988 fail0:
1989 	return QDF_STATUS_E_FAILURE;
1990 }
1991 
1992 #if defined(HTT_STATS_ENABLE)
1993 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1994 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1995 
1996 {
1997 	uint32_t pdev_id;
1998 	uint32_t *msg_word = NULL;
1999 	uint32_t msg_remain_len = 0;
2000 
2001 	msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
2002 
2003 	/*COOKIE MSB*/
2004 	pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
2005 
2006 	/* stats message length + 16 size of HTT header*/
2007 	msg_remain_len = qdf_min(htt_stats->msg_len + 16,
2008 				(uint32_t)DP_EXT_MSG_LENGTH);
2009 
2010 	dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc,
2011 			msg_word,  msg_remain_len,
2012 			WDI_NO_VAL, pdev_id);
2013 
2014 	if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
2015 		htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
2016 	}
2017 	/* Need to be freed here as WDI handler will
2018 	 * make a copy of pkt to send data to application
2019 	 */
2020 	qdf_nbuf_free(htt_msg);
2021 	return QDF_STATUS_SUCCESS;
2022 }
2023 #else
2024 static inline QDF_STATUS
2025 dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
2026 		      struct dp_soc *soc, qdf_nbuf_t htt_msg)
2027 {
2028 	return QDF_STATUS_E_NOSUPPORT;
2029 }
2030 #endif
2031 
2032 #ifdef HTT_STATS_DEBUGFS_SUPPORT
2033 /* dp_send_htt_stats_dbgfs_msg() - Function to send htt data to upper layer
2034  * @pdev: dp pdev handle
2035  * @msg_word: HTT msg
2036  * @msg_len: Length of HTT msg sent
2037  *
2038  * Return: none
2039  */
2040 static inline void
2041 dp_htt_stats_dbgfs_send_msg(struct dp_pdev *pdev, uint32_t *msg_word,
2042 			    uint32_t msg_len)
2043 {
2044 	struct htt_dbgfs_cfg dbgfs_cfg;
2045 	int done = 0;
2046 
2047 	/* send 5th word of HTT msg to upper layer */
2048 	dbgfs_cfg.msg_word = (msg_word + 4);
2049 	dbgfs_cfg.m = pdev->dbgfs_cfg->m;
2050 
2051 	/* stats message length + 16 size of HTT header*/
2052 	msg_len = qdf_min(msg_len + HTT_HEADER_LEN, (uint32_t)DP_EXT_MSG_LENGTH);
2053 
2054 	if (pdev->dbgfs_cfg->htt_stats_dbgfs_msg_process)
2055 		pdev->dbgfs_cfg->htt_stats_dbgfs_msg_process(&dbgfs_cfg,
2056 							     (msg_len - HTT_HEADER_LEN));
2057 
2058 	/* Get TLV Done bit from 4th msg word */
2059 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*(msg_word + 3));
2060 	if (done) {
2061 		if (qdf_event_set(&pdev->dbgfs_cfg->htt_stats_dbgfs_event))
2062 			dp_htt_err("%pK: Failed to set event for debugfs htt stats"
2063 				   , pdev->soc);
2064 	}
2065 }
2066 #else
2067 static inline void
2068 dp_htt_stats_dbgfs_send_msg(struct dp_pdev *pdev, uint32_t *msg_word,
2069 			    uint32_t msg_len)
2070 {
2071 }
2072 #endif /* HTT_STATS_DEBUGFS_SUPPORT */
2073 
2074 /**
2075  * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats
2076  * @htt_stats: htt stats info
2077  *
2078  * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message
2079  * contains sub messages which are identified by a TLV header.
2080  * In this function we will process the stream of T2H messages and read all the
2081  * TLV contained in the message.
2082  *
2083  * THe following cases have been taken care of
2084  * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer
2085  *		In this case the buffer will contain multiple tlvs.
2086  * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer.
2087  *		Only one tlv will be contained in the HTT message and this tag
2088  *		will extend onto the next buffer.
2089  * Case 3: When the buffer is the continuation of the previous message
2090  * Case 4: tlv length is 0. which will indicate the end of message
2091  *
2092  * return: void
2093  */
2094 static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats,
2095 					struct dp_soc *soc)
2096 {
2097 	htt_tlv_tag_t tlv_type = 0xff;
2098 	qdf_nbuf_t htt_msg = NULL;
2099 	uint32_t *msg_word;
2100 	uint8_t *tlv_buf_head = NULL;
2101 	uint8_t *tlv_buf_tail = NULL;
2102 	uint32_t msg_remain_len = 0;
2103 	uint32_t tlv_remain_len = 0;
2104 	uint32_t *tlv_start;
2105 	int cookie_val = 0;
2106 	int cookie_msb = 0;
2107 	int pdev_id;
2108 	bool copy_stats = false;
2109 	struct dp_pdev *pdev;
2110 
2111 	/* Process node in the HTT message queue */
2112 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
2113 		!= NULL) {
2114 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
2115 		cookie_val = *(msg_word + 1);
2116 		htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET(
2117 					*(msg_word +
2118 					HTT_T2H_EXT_STATS_TLV_START_OFFSET));
2119 
2120 		if (cookie_val) {
2121 			if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg)
2122 					== QDF_STATUS_SUCCESS) {
2123 				continue;
2124 			}
2125 		}
2126 
2127 		cookie_msb = *(msg_word + 2);
2128 		pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
2129 		pdev = soc->pdev_list[pdev_id];
2130 
2131 		if (!cookie_val && (cookie_msb & DBG_STATS_COOKIE_HTT_DBGFS)) {
2132 			dp_htt_stats_dbgfs_send_msg(pdev, msg_word,
2133 						    htt_stats->msg_len);
2134 			qdf_nbuf_free(htt_msg);
2135 			continue;
2136 		}
2137 
2138 		if (cookie_msb & DBG_STATS_COOKIE_DP_STATS)
2139 			copy_stats = true;
2140 
2141 		/* read 5th word */
2142 		msg_word = msg_word + 4;
2143 		msg_remain_len = qdf_min(htt_stats->msg_len,
2144 				(uint32_t) DP_EXT_MSG_LENGTH);
2145 		/* Keep processing the node till node length is 0 */
2146 		while (msg_remain_len) {
2147 			/*
2148 			 * if message is not a continuation of previous message
2149 			 * read the tlv type and tlv length
2150 			 */
2151 			if (!tlv_buf_head) {
2152 				tlv_type = HTT_STATS_TLV_TAG_GET(
2153 						*msg_word);
2154 				tlv_remain_len = HTT_STATS_TLV_LENGTH_GET(
2155 						*msg_word);
2156 			}
2157 
2158 			if (tlv_remain_len == 0) {
2159 				msg_remain_len = 0;
2160 
2161 				if (tlv_buf_head) {
2162 					qdf_mem_free(tlv_buf_head);
2163 					tlv_buf_head = NULL;
2164 					tlv_buf_tail = NULL;
2165 				}
2166 
2167 				goto error;
2168 			}
2169 
2170 			if (!tlv_buf_head)
2171 				tlv_remain_len += HTT_TLV_HDR_LEN;
2172 
2173 			if ((tlv_remain_len <= msg_remain_len)) {
2174 				/* Case 3 */
2175 				if (tlv_buf_head) {
2176 					qdf_mem_copy(tlv_buf_tail,
2177 							(uint8_t *)msg_word,
2178 							tlv_remain_len);
2179 					tlv_start = (uint32_t *)tlv_buf_head;
2180 				} else {
2181 					/* Case 1 */
2182 					tlv_start = msg_word;
2183 				}
2184 
2185 				if (copy_stats)
2186 					dp_htt_stats_copy_tag(pdev,
2187 							      tlv_type,
2188 							      tlv_start);
2189 				else
2190 					dp_htt_stats_print_tag(pdev,
2191 							       tlv_type,
2192 							       tlv_start);
2193 
2194 				if (tlv_type == HTT_STATS_PEER_DETAILS_TAG ||
2195 				    tlv_type == HTT_STATS_PEER_STATS_CMN_TAG)
2196 					dp_peer_update_inactive_time(pdev,
2197 								     tlv_type,
2198 								     tlv_start);
2199 
2200 				msg_remain_len -= tlv_remain_len;
2201 
2202 				msg_word = (uint32_t *)
2203 					(((uint8_t *)msg_word) +
2204 					tlv_remain_len);
2205 
2206 				tlv_remain_len = 0;
2207 
2208 				if (tlv_buf_head) {
2209 					qdf_mem_free(tlv_buf_head);
2210 					tlv_buf_head = NULL;
2211 					tlv_buf_tail = NULL;
2212 				}
2213 
2214 			} else { /* tlv_remain_len > msg_remain_len */
2215 				/* Case 2 & 3 */
2216 				if (!tlv_buf_head) {
2217 					tlv_buf_head = qdf_mem_malloc(
2218 							tlv_remain_len);
2219 
2220 					if (!tlv_buf_head) {
2221 						QDF_TRACE(QDF_MODULE_ID_TXRX,
2222 								QDF_TRACE_LEVEL_ERROR,
2223 								"Alloc failed");
2224 						goto error;
2225 					}
2226 
2227 					tlv_buf_tail = tlv_buf_head;
2228 				}
2229 
2230 				qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word,
2231 						msg_remain_len);
2232 				tlv_remain_len -= msg_remain_len;
2233 				tlv_buf_tail += msg_remain_len;
2234 			}
2235 		}
2236 
2237 		if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
2238 			htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
2239 		}
2240 
2241 		qdf_nbuf_free(htt_msg);
2242 	}
2243 	return;
2244 
2245 error:
2246 	qdf_nbuf_free(htt_msg);
2247 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
2248 			!= NULL)
2249 		qdf_nbuf_free(htt_msg);
2250 }
2251 
2252 void htt_t2h_stats_handler(void *context)
2253 {
2254 	struct dp_soc *soc = (struct dp_soc *)context;
2255 	struct htt_stats_context htt_stats;
2256 	uint32_t *msg_word;
2257 	qdf_nbuf_t htt_msg = NULL;
2258 	uint8_t done;
2259 	uint32_t rem_stats;
2260 
2261 	if (!soc) {
2262 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2263 			  "soc is NULL");
2264 		return;
2265 	}
2266 
2267 	if (!qdf_atomic_read(&soc->cmn_init_done)) {
2268 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2269 			  "soc: 0x%pK, init_done: %d", soc,
2270 			  qdf_atomic_read(&soc->cmn_init_done));
2271 		return;
2272 	}
2273 
2274 	qdf_mem_zero(&htt_stats, sizeof(htt_stats));
2275 	qdf_nbuf_queue_init(&htt_stats.msg);
2276 
2277 	/* pull one completed stats from soc->htt_stats_msg and process */
2278 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2279 	if (!soc->htt_stats.num_stats) {
2280 		qdf_spin_unlock_bh(&soc->htt_stats.lock);
2281 		return;
2282 	}
2283 	while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) {
2284 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
2285 		msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET;
2286 		done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2287 		qdf_nbuf_queue_add(&htt_stats.msg, htt_msg);
2288 		/*
2289 		 * Done bit signifies that this is the last T2H buffer in the
2290 		 * stream of HTT EXT STATS message
2291 		 */
2292 		if (done)
2293 			break;
2294 	}
2295 	rem_stats = --soc->htt_stats.num_stats;
2296 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2297 
2298 	/* If there are more stats to process, schedule stats work again.
2299 	 * Scheduling prior to processing ht_stats to queue with early
2300 	 * index
2301 	 */
2302 	if (rem_stats)
2303 		qdf_sched_work(0, &soc->htt_stats.work);
2304 
2305 	dp_process_htt_stat_msg(&htt_stats, soc);
2306 }
2307 
2308 /*
2309  * dp_get_ppdu_info_user_index: Find and allocate a per-user descriptor for a PPDU,
2310  * if a new peer id arrives in a PPDU
2311  * pdev: DP pdev handle
2312  * @peer_id : peer unique identifier
2313  * @ppdu_info: per ppdu tlv structure
2314  *
2315  * return:user index to be populated
2316  */
2317 #ifdef FEATURE_PERPKT_INFO
2318 static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev,
2319 						uint16_t peer_id,
2320 						struct ppdu_info *ppdu_info)
2321 {
2322 	uint8_t user_index = 0;
2323 	struct cdp_tx_completion_ppdu *ppdu_desc;
2324 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2325 
2326 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2327 
2328 	while ((user_index + 1) <= ppdu_info->last_user) {
2329 		ppdu_user_desc = &ppdu_desc->user[user_index];
2330 		if (ppdu_user_desc->peer_id != peer_id) {
2331 			user_index++;
2332 			continue;
2333 		} else {
2334 			/* Max users possible is 8 so user array index should
2335 			 * not exceed 7
2336 			 */
2337 			qdf_assert_always(user_index <= (ppdu_desc->max_users - 1));
2338 			return user_index;
2339 		}
2340 	}
2341 
2342 	ppdu_info->last_user++;
2343 	/* Max users possible is 8 so last user should not exceed 8 */
2344 	qdf_assert_always(ppdu_info->last_user <= ppdu_desc->max_users);
2345 	return ppdu_info->last_user - 1;
2346 }
2347 
2348 /*
2349  * dp_process_ppdu_stats_common_tlv: Process htt_ppdu_stats_common_tlv
2350  * pdev: DP pdev handle
2351  * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv
2352  * @ppdu_info: per ppdu tlv structure
2353  *
2354  * return:void
2355  */
2356 static void dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev,
2357 		uint32_t *tag_buf, struct ppdu_info *ppdu_info)
2358 {
2359 	uint16_t frame_type;
2360 	uint16_t frame_ctrl;
2361 	uint16_t freq;
2362 	struct dp_soc *soc = NULL;
2363 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
2364 	uint64_t ppdu_start_timestamp;
2365 	uint32_t *start_tag_buf;
2366 
2367 	start_tag_buf = tag_buf;
2368 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2369 
2370 	ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
2371 
2372 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RING_ID_SCH_CMD_ID);
2373 	ppdu_info->sched_cmdid =
2374 		HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_GET(*tag_buf);
2375 	ppdu_desc->num_users =
2376 		HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf);
2377 
2378 	qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
2379 
2380 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(QTYPE_FRM_TYPE);
2381 	frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf);
2382 	ppdu_desc->htt_frame_type = frame_type;
2383 
2384 	frame_ctrl = ppdu_desc->frame_ctrl;
2385 
2386 	ppdu_desc->bar_ppdu_id = ppdu_info->ppdu_id;
2387 
2388 	switch (frame_type) {
2389 	case HTT_STATS_FTYPE_TIDQ_DATA_SU:
2390 	case HTT_STATS_FTYPE_TIDQ_DATA_MU:
2391 	case HTT_STATS_FTYPE_SGEN_QOS_NULL:
2392 		/*
2393 		 * for management packet, frame type come as DATA_SU
2394 		 * need to check frame_ctrl before setting frame_type
2395 		 */
2396 		if (HTT_GET_FRAME_CTRL_TYPE(frame_ctrl) <= FRAME_CTRL_TYPE_CTRL)
2397 			ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
2398 		else
2399 			ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA;
2400 	break;
2401 	case HTT_STATS_FTYPE_SGEN_MU_BAR:
2402 	case HTT_STATS_FTYPE_SGEN_BAR:
2403 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_BAR;
2404 	break;
2405 	default:
2406 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
2407 	break;
2408 	}
2409 
2410 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(FES_DUR_US);
2411 	ppdu_desc->tx_duration = *tag_buf;
2412 
2413 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_L32_US);
2414 	ppdu_desc->ppdu_start_timestamp = *tag_buf;
2415 
2416 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(CHAN_MHZ_PHY_MODE);
2417 	freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf);
2418 	if (freq != ppdu_desc->channel) {
2419 		soc = pdev->soc;
2420 		ppdu_desc->channel = freq;
2421 		pdev->operating_channel.freq = freq;
2422 		if (soc && soc->cdp_soc.ol_ops->freq_to_channel)
2423 			pdev->operating_channel.num =
2424 			    soc->cdp_soc.ol_ops->freq_to_channel(soc->ctrl_psoc,
2425 								 pdev->pdev_id,
2426 								 freq);
2427 
2428 		if (soc && soc->cdp_soc.ol_ops->freq_to_band)
2429 			pdev->operating_channel.band =
2430 			       soc->cdp_soc.ol_ops->freq_to_band(soc->ctrl_psoc,
2431 								 pdev->pdev_id,
2432 								 freq);
2433 	}
2434 
2435 	ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf);
2436 
2437 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RESV_NUM_UL_BEAM);
2438 	ppdu_desc->phy_ppdu_tx_time_us =
2439 		HTT_PPDU_STATS_COMMON_TLV_PHY_PPDU_TX_TIME_US_GET(*tag_buf);
2440 	ppdu_desc->beam_change =
2441 		HTT_PPDU_STATS_COMMON_TLV_BEAM_CHANGE_GET(*tag_buf);
2442 	ppdu_desc->doppler =
2443 		HTT_PPDU_STATS_COMMON_TLV_DOPPLER_INDICATION_GET(*tag_buf);
2444 	ppdu_desc->spatial_reuse =
2445 		HTT_PPDU_STATS_COMMON_TLV_SPATIAL_REUSE_GET(*tag_buf);
2446 
2447 	dp_tx_capture_htt_frame_counter(pdev, frame_type);
2448 
2449 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_U32_US);
2450 	ppdu_start_timestamp = *tag_buf;
2451 	ppdu_desc->ppdu_start_timestamp |= ((ppdu_start_timestamp <<
2452 					     HTT_SHIFT_UPPER_TIMESTAMP) &
2453 					    HTT_MASK_UPPER_TIMESTAMP);
2454 
2455 	ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
2456 					ppdu_desc->tx_duration;
2457 	/* Ack time stamp is same as end time stamp*/
2458 	ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
2459 
2460 	ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
2461 					ppdu_desc->tx_duration;
2462 
2463 	ppdu_desc->bar_ppdu_start_timestamp = ppdu_desc->ppdu_start_timestamp;
2464 	ppdu_desc->bar_ppdu_end_timestamp = ppdu_desc->ppdu_end_timestamp;
2465 	ppdu_desc->bar_tx_duration = ppdu_desc->tx_duration;
2466 
2467 	/* Ack time stamp is same as end time stamp*/
2468 	ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
2469 }
2470 
2471 /*
2472  * dp_process_ppdu_stats_user_common_tlv: Process ppdu_stats_user_common
2473  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv
2474  * @ppdu_info: per ppdu tlv structure
2475  *
2476  * return:void
2477  */
2478 static void dp_process_ppdu_stats_user_common_tlv(
2479 		struct dp_pdev *pdev, uint32_t *tag_buf,
2480 		struct ppdu_info *ppdu_info)
2481 {
2482 	uint16_t peer_id;
2483 	struct cdp_tx_completion_ppdu *ppdu_desc;
2484 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2485 	uint8_t curr_user_index = 0;
2486 	struct dp_peer *peer;
2487 	struct dp_vdev *vdev;
2488 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2489 
2490 	ppdu_desc =
2491 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2492 
2493 	tag_buf++;
2494 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
2495 
2496 	curr_user_index =
2497 		dp_get_ppdu_info_user_index(pdev,
2498 					    peer_id, ppdu_info);
2499 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2500 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2501 
2502 	ppdu_desc->vdev_id =
2503 		HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(*tag_buf);
2504 
2505 	ppdu_user_desc->peer_id = peer_id;
2506 
2507 	tag_buf++;
2508 
2509 	if (HTT_PPDU_STATS_USER_COMMON_TLV_DELAYED_BA_GET(*tag_buf)) {
2510 		ppdu_user_desc->delayed_ba = 1;
2511 		ppdu_desc->delayed_ba = 1;
2512 	}
2513 
2514 	if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) {
2515 		ppdu_user_desc->is_mcast = true;
2516 		ppdu_user_desc->mpdu_tried_mcast =
2517 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
2518 		ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast;
2519 	} else {
2520 		ppdu_user_desc->mpdu_tried_ucast =
2521 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
2522 	}
2523 
2524 	ppdu_user_desc->is_seq_num_valid =
2525 	HTT_PPDU_STATS_USER_COMMON_TLV_IS_SQNUM_VALID_IN_BUFFER_GET(*tag_buf);
2526 	tag_buf++;
2527 
2528 	ppdu_user_desc->qos_ctrl =
2529 		HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf);
2530 	ppdu_user_desc->frame_ctrl =
2531 		HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf);
2532 	ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl;
2533 
2534 	if (ppdu_user_desc->delayed_ba)
2535 		ppdu_user_desc->mpdu_success = 0;
2536 
2537 	tag_buf += 3;
2538 
2539 	if (HTT_PPDU_STATS_IS_OPAQUE_VALID_GET(*tag_buf)) {
2540 		ppdu_user_desc->ppdu_cookie =
2541 			HTT_PPDU_STATS_HOST_OPAQUE_COOKIE_GET(*tag_buf);
2542 		ppdu_user_desc->is_ppdu_cookie_valid = 1;
2543 	}
2544 
2545 	/* returning earlier causes other feilds unpopulated */
2546 	if (peer_id == DP_SCAN_PEER_ID) {
2547 		vdev = dp_vdev_get_ref_by_id(pdev->soc, ppdu_desc->vdev_id,
2548 					     DP_MOD_ID_TX_PPDU_STATS);
2549 		if (!vdev)
2550 			return;
2551 		qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw,
2552 			     QDF_MAC_ADDR_SIZE);
2553 		dp_vdev_unref_delete(pdev->soc, vdev, DP_MOD_ID_TX_PPDU_STATS);
2554 	} else {
2555 		peer = dp_peer_get_ref_by_id(pdev->soc, peer_id,
2556 					     DP_MOD_ID_TX_PPDU_STATS);
2557 		if (!peer) {
2558 			/*
2559 			 * fw sends peer_id which is about to removed but
2560 			 * it was already removed in host.
2561 			 * eg: for disassoc, fw send ppdu stats
2562 			 * with peer id equal to previously associated
2563 			 * peer's peer_id but it was removed
2564 			 */
2565 			vdev = dp_vdev_get_ref_by_id(pdev->soc,
2566 						     ppdu_desc->vdev_id,
2567 						     DP_MOD_ID_TX_PPDU_STATS);
2568 			if (!vdev)
2569 				return;
2570 			qdf_mem_copy(ppdu_user_desc->mac_addr,
2571 				     vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE);
2572 			dp_vdev_unref_delete(pdev->soc, vdev,
2573 					     DP_MOD_ID_TX_PPDU_STATS);
2574 			return;
2575 		}
2576 		qdf_mem_copy(ppdu_user_desc->mac_addr,
2577 			     peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
2578 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
2579 	}
2580 }
2581 
2582 
2583 /**
2584  * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv
2585  * @pdev: DP pdev handle
2586  * @tag_buf: T2H message buffer carrying the user rate TLV
2587  * @ppdu_info: per ppdu tlv structure
2588  *
2589  * return:void
2590  */
2591 static void dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev,
2592 		uint32_t *tag_buf,
2593 		struct ppdu_info *ppdu_info)
2594 {
2595 	uint16_t peer_id;
2596 	struct cdp_tx_completion_ppdu *ppdu_desc;
2597 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2598 	uint8_t curr_user_index = 0;
2599 	struct dp_vdev *vdev;
2600 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2601 
2602 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2603 
2604 	tag_buf++;
2605 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
2606 
2607 	curr_user_index =
2608 		dp_get_ppdu_info_user_index(pdev,
2609 					    peer_id, ppdu_info);
2610 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2611 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2612 	if (peer_id == DP_SCAN_PEER_ID) {
2613 		vdev = dp_vdev_get_ref_by_id(pdev->soc, ppdu_desc->vdev_id,
2614 					     DP_MOD_ID_TX_PPDU_STATS);
2615 		if (!vdev)
2616 			return;
2617 		dp_vdev_unref_delete(pdev->soc, vdev,
2618 				     DP_MOD_ID_TX_PPDU_STATS);
2619 	}
2620 	ppdu_user_desc->peer_id = peer_id;
2621 
2622 	ppdu_user_desc->tid =
2623 		HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf);
2624 
2625 	tag_buf += 1;
2626 
2627 	ppdu_user_desc->user_pos =
2628 		HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_GET(*tag_buf);
2629 	ppdu_user_desc->mu_group_id =
2630 		HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_GET(*tag_buf);
2631 
2632 	tag_buf += 1;
2633 
2634 	ppdu_user_desc->ru_start =
2635 		HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf);
2636 	ppdu_user_desc->ru_tones =
2637 		(HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) -
2638 		HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1;
2639 	ppdu_desc->usr_ru_tones_sum += ppdu_user_desc->ru_tones;
2640 
2641 	tag_buf += 2;
2642 
2643 	ppdu_user_desc->ppdu_type =
2644 		HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf);
2645 
2646 	tag_buf++;
2647 	ppdu_user_desc->tx_rate = *tag_buf;
2648 
2649 	ppdu_user_desc->ltf_size =
2650 		HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf);
2651 	ppdu_user_desc->stbc =
2652 		HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf);
2653 	ppdu_user_desc->he_re =
2654 		HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf);
2655 	ppdu_user_desc->txbf =
2656 		HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf);
2657 	ppdu_user_desc->bw =
2658 		HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf) - 2;
2659 	ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf);
2660 	ppdu_desc->usr_nss_sum += ppdu_user_desc->nss;
2661 	ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf);
2662 	ppdu_user_desc->preamble =
2663 		HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf);
2664 	ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf);
2665 	ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf);
2666 	ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf);
2667 }
2668 
2669 /*
2670  * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv: Process
2671  * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
2672  * pdev: DP PDEV handle
2673  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
2674  * @ppdu_info: per ppdu tlv structure
2675  *
2676  * return:void
2677  */
2678 static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
2679 		struct dp_pdev *pdev, uint32_t *tag_buf,
2680 		struct ppdu_info *ppdu_info)
2681 {
2682 	htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf =
2683 		(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf;
2684 
2685 	struct cdp_tx_completion_ppdu *ppdu_desc;
2686 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2687 	uint8_t curr_user_index = 0;
2688 	uint16_t peer_id;
2689 	uint32_t size = CDP_BA_64_BIT_MAP_SIZE_DWORDS;
2690 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2691 
2692 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2693 
2694 	tag_buf++;
2695 
2696 	peer_id =
2697 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2698 
2699 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2700 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2701 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2702 	ppdu_user_desc->peer_id = peer_id;
2703 
2704 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
2705 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
2706 		     sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
2707 
2708 	dp_process_ppdu_stats_update_failed_bitmap(pdev,
2709 						   (void *)ppdu_user_desc,
2710 						   ppdu_info->ppdu_id,
2711 						   size);
2712 }
2713 
2714 /*
2715  * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv: Process
2716  * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
2717  * soc: DP SOC handle
2718  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
2719  * @ppdu_info: per ppdu tlv structure
2720  *
2721  * return:void
2722  */
2723 static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
2724 		struct dp_pdev *pdev, uint32_t *tag_buf,
2725 		struct ppdu_info *ppdu_info)
2726 {
2727 	htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf =
2728 		(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf;
2729 
2730 	struct cdp_tx_completion_ppdu *ppdu_desc;
2731 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2732 	uint8_t curr_user_index = 0;
2733 	uint16_t peer_id;
2734 	uint32_t size = CDP_BA_256_BIT_MAP_SIZE_DWORDS;
2735 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2736 
2737 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2738 
2739 	tag_buf++;
2740 
2741 	peer_id =
2742 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2743 
2744 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2745 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2746 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2747 	ppdu_user_desc->peer_id = peer_id;
2748 
2749 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
2750 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
2751 		     sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2752 
2753 	dp_process_ppdu_stats_update_failed_bitmap(pdev,
2754 						   (void *)ppdu_user_desc,
2755 						   ppdu_info->ppdu_id,
2756 						   size);
2757 }
2758 
2759 /*
2760  * dp_process_ppdu_stats_user_cmpltn_common_tlv: Process
2761  * htt_ppdu_stats_user_cmpltn_common_tlv
2762  * soc: DP SOC handle
2763  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv
2764  * @ppdu_info: per ppdu tlv structure
2765  *
2766  * return:void
2767  */
2768 static void dp_process_ppdu_stats_user_cmpltn_common_tlv(
2769 		struct dp_pdev *pdev, uint32_t *tag_buf,
2770 		struct ppdu_info *ppdu_info)
2771 {
2772 	uint16_t peer_id;
2773 	struct cdp_tx_completion_ppdu *ppdu_desc;
2774 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2775 	uint8_t curr_user_index = 0;
2776 	uint8_t bw_iter;
2777 	htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf =
2778 		(htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf;
2779 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2780 
2781 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2782 
2783 	tag_buf++;
2784 	peer_id =
2785 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf);
2786 
2787 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2788 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2789 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2790 	ppdu_user_desc->peer_id = peer_id;
2791 
2792 	ppdu_user_desc->completion_status =
2793 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET(
2794 				*tag_buf);
2795 
2796 	ppdu_user_desc->tid =
2797 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf);
2798 
2799 
2800 	tag_buf++;
2801 	if (qdf_likely(ppdu_user_desc->completion_status ==
2802 			HTT_PPDU_STATS_USER_STATUS_OK)) {
2803 		ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi;
2804 		ppdu_user_desc->usr_ack_rssi = dp_stats_buf->ack_rssi;
2805 		ppdu_user_desc->ack_rssi_valid = 1;
2806 	} else {
2807 		ppdu_user_desc->ack_rssi_valid = 0;
2808 	}
2809 
2810 	tag_buf++;
2811 
2812 	ppdu_user_desc->mpdu_success =
2813 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf);
2814 
2815 	ppdu_user_desc->mpdu_failed =
2816 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_GET(*tag_buf) -
2817 						ppdu_user_desc->mpdu_success;
2818 
2819 	tag_buf++;
2820 
2821 	ppdu_user_desc->long_retries =
2822 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf);
2823 
2824 	ppdu_user_desc->short_retries =
2825 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf);
2826 	ppdu_user_desc->retry_msdus =
2827 		ppdu_user_desc->long_retries + ppdu_user_desc->short_retries;
2828 
2829 	ppdu_user_desc->is_ampdu =
2830 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf);
2831 	ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu;
2832 
2833 	ppdu_desc->resp_type =
2834 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_GET(*tag_buf);
2835 	ppdu_desc->mprot_type =
2836 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPROT_TYPE_GET(*tag_buf);
2837 	ppdu_desc->rts_success =
2838 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_SUCCESS_GET(*tag_buf);
2839 	ppdu_desc->rts_failure =
2840 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_FAILURE_GET(*tag_buf);
2841 	ppdu_user_desc->pream_punct =
2842 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PREAM_PUNC_TX_GET(*tag_buf);
2843 
2844 	ppdu_info->compltn_common_tlv++;
2845 
2846 	/*
2847 	 * MU BAR may send request to n users but we may received ack only from
2848 	 * m users. To have count of number of users respond back, we have a
2849 	 * separate counter bar_num_users per PPDU that get increment for every
2850 	 * htt_ppdu_stats_user_cmpltn_common_tlv
2851 	 */
2852 	ppdu_desc->bar_num_users++;
2853 
2854 	tag_buf++;
2855 	for (bw_iter = 0; bw_iter < CDP_RSSI_CHAIN_LEN; bw_iter++) {
2856 		ppdu_user_desc->rssi_chain[bw_iter] =
2857 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CHAIN_RSSI_GET(*tag_buf);
2858 		tag_buf++;
2859 	}
2860 
2861 	ppdu_user_desc->sa_tx_antenna =
2862 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TX_ANTENNA_MASK_GET(*tag_buf);
2863 
2864 	tag_buf++;
2865 	ppdu_user_desc->sa_is_training =
2866 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_TRAINING_GET(*tag_buf);
2867 	if (ppdu_user_desc->sa_is_training) {
2868 		ppdu_user_desc->sa_goodput =
2869 			HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PENDING_TRAINING_PKTS_GET(*tag_buf);
2870 	}
2871 
2872 	tag_buf++;
2873 	for (bw_iter = 0; bw_iter < CDP_NUM_SA_BW; bw_iter++) {
2874 		ppdu_user_desc->sa_max_rates[bw_iter] =
2875 			HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MAX_RATES_GET(tag_buf[bw_iter]);
2876 	}
2877 
2878 	tag_buf += CDP_NUM_SA_BW;
2879 	ppdu_user_desc->current_rate_per =
2880 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CURRENT_RATE_PER_GET(*tag_buf);
2881 }
2882 
2883 /*
2884  * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv: Process
2885  * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2886  * pdev: DP PDEV handle
2887  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2888  * @ppdu_info: per ppdu tlv structure
2889  *
2890  * return:void
2891  */
2892 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
2893 		struct dp_pdev *pdev, uint32_t *tag_buf,
2894 		struct ppdu_info *ppdu_info)
2895 {
2896 	htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf =
2897 		(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf;
2898 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2899 	struct cdp_tx_completion_ppdu *ppdu_desc;
2900 	uint8_t curr_user_index = 0;
2901 	uint16_t peer_id;
2902 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2903 
2904 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2905 
2906 	tag_buf++;
2907 
2908 	peer_id =
2909 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2910 
2911 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2912 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2913 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2914 	ppdu_user_desc->peer_id = peer_id;
2915 
2916 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2917 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2918 		     sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
2919 	ppdu_user_desc->ba_size = CDP_BA_64_BIT_MAP_SIZE_DWORDS * 32;
2920 }
2921 
2922 /*
2923  * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv: Process
2924  * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2925  * pdev: DP PDEV handle
2926  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2927  * @ppdu_info: per ppdu tlv structure
2928  *
2929  * return:void
2930  */
2931 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
2932 		struct dp_pdev *pdev, uint32_t *tag_buf,
2933 		struct ppdu_info *ppdu_info)
2934 {
2935 	htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf =
2936 		(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf;
2937 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2938 	struct cdp_tx_completion_ppdu *ppdu_desc;
2939 	uint8_t curr_user_index = 0;
2940 	uint16_t peer_id;
2941 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2942 
2943 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2944 
2945 	tag_buf++;
2946 
2947 	peer_id =
2948 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2949 
2950 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2951 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2952 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2953 	ppdu_user_desc->peer_id = peer_id;
2954 
2955 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2956 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2957 		     sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2958 	ppdu_user_desc->ba_size = CDP_BA_256_BIT_MAP_SIZE_DWORDS * 32;
2959 }
2960 
2961 /*
2962  * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv: Process
2963  * htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2964  * pdev: DP PDE handle
2965  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2966  * @ppdu_info: per ppdu tlv structure
2967  *
2968  * return:void
2969  */
2970 static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
2971 		struct dp_pdev *pdev, uint32_t *tag_buf,
2972 		struct ppdu_info *ppdu_info)
2973 {
2974 	uint16_t peer_id;
2975 	struct cdp_tx_completion_ppdu *ppdu_desc;
2976 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2977 	uint8_t curr_user_index = 0;
2978 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2979 
2980 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2981 
2982 	tag_buf += 2;
2983 	peer_id =
2984 	HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf);
2985 
2986 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2987 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2988 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2989 	if (!ppdu_user_desc->ack_ba_tlv) {
2990 		ppdu_user_desc->ack_ba_tlv = 1;
2991 	} else {
2992 		pdev->stats.ack_ba_comes_twice++;
2993 		return;
2994 	}
2995 
2996 	ppdu_user_desc->peer_id = peer_id;
2997 
2998 	tag_buf++;
2999 	/* not to update ppdu_desc->tid from this TLV */
3000 	ppdu_user_desc->num_mpdu =
3001 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf);
3002 
3003 	ppdu_user_desc->num_msdu =
3004 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf);
3005 
3006 	ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu;
3007 
3008 	tag_buf++;
3009 	ppdu_user_desc->start_seq =
3010 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_GET(
3011 			*tag_buf);
3012 
3013 	tag_buf++;
3014 	ppdu_user_desc->success_bytes = *tag_buf;
3015 
3016 	/* increase ack ba tlv counter on successful mpdu */
3017 	if (ppdu_user_desc->num_mpdu)
3018 		ppdu_info->ack_ba_tlv++;
3019 
3020 	if (ppdu_user_desc->ba_size == 0) {
3021 		ppdu_user_desc->ba_seq_no = ppdu_user_desc->start_seq;
3022 		ppdu_user_desc->ba_bitmap[0] = 1;
3023 		ppdu_user_desc->ba_size = 1;
3024 	}
3025 }
3026 
3027 /*
3028  * dp_process_ppdu_stats_user_common_array_tlv: Process
3029  * htt_ppdu_stats_user_common_array_tlv
3030  * pdev: DP PDEV handle
3031  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
3032  * @ppdu_info: per ppdu tlv structure
3033  *
3034  * return:void
3035  */
3036 static void dp_process_ppdu_stats_user_common_array_tlv(
3037 		struct dp_pdev *pdev, uint32_t *tag_buf,
3038 		struct ppdu_info *ppdu_info)
3039 {
3040 	uint32_t peer_id;
3041 	struct cdp_tx_completion_ppdu *ppdu_desc;
3042 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
3043 	uint8_t curr_user_index = 0;
3044 	struct htt_tx_ppdu_stats_info *dp_stats_buf;
3045 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
3046 
3047 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
3048 
3049 	tag_buf++;
3050 	dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf;
3051 	tag_buf += 3;
3052 	peer_id =
3053 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf);
3054 
3055 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) {
3056 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3057 			"Invalid peer");
3058 		return;
3059 	}
3060 
3061 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
3062 
3063 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
3064 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
3065 
3066 	ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes;
3067 	ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes;
3068 
3069 	tag_buf++;
3070 
3071 	ppdu_user_desc->success_msdus =
3072 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf);
3073 	ppdu_user_desc->retry_bytes =
3074 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf);
3075 	tag_buf++;
3076 	ppdu_user_desc->failed_msdus =
3077 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf);
3078 }
3079 
3080 /*
3081  * dp_process_ppdu_stats_flush_tlv: Process
3082  * htt_ppdu_stats_flush_tlv
3083  * @pdev: DP PDEV handle
3084  * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv
3085  * @ppdu_info: per ppdu tlv structure
3086  *
3087  * return:void
3088  */
3089 static void
3090 dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev,
3091 					     uint32_t *tag_buf,
3092 					     struct ppdu_info *ppdu_info)
3093 {
3094 	struct cdp_tx_completion_ppdu *ppdu_desc;
3095 	uint32_t peer_id;
3096 	uint8_t tid;
3097 	struct dp_peer *peer;
3098 
3099 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
3100 				qdf_nbuf_data(ppdu_info->nbuf);
3101 	ppdu_desc->is_flush = 1;
3102 
3103 	tag_buf++;
3104 	ppdu_desc->drop_reason = *tag_buf;
3105 
3106 	tag_buf++;
3107 	ppdu_desc->num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf);
3108 	ppdu_desc->num_mpdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_GET(*tag_buf);
3109 	ppdu_desc->flow_type = HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_GET(*tag_buf);
3110 
3111 	tag_buf++;
3112 	peer_id = HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf);
3113 	tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf);
3114 
3115 	ppdu_desc->num_users = 1;
3116 	ppdu_desc->user[0].peer_id = peer_id;
3117 	ppdu_desc->user[0].tid = tid;
3118 
3119 	ppdu_desc->queue_type =
3120 			HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_GET(*tag_buf);
3121 
3122 	peer = dp_peer_get_ref_by_id(pdev->soc, peer_id,
3123 				     DP_MOD_ID_TX_PPDU_STATS);
3124 	if (!peer)
3125 		goto add_ppdu_to_sched_list;
3126 
3127 	if (ppdu_desc->drop_reason == HTT_FLUSH_EXCESS_RETRIES) {
3128 		DP_STATS_INC(peer,
3129 			     tx.excess_retries_per_ac[TID_TO_WME_AC(tid)],
3130 			     ppdu_desc->num_msdu);
3131 	}
3132 
3133 	dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
3134 
3135 add_ppdu_to_sched_list:
3136 	ppdu_info->done = 1;
3137 	TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
3138 	pdev->list_depth--;
3139 	TAILQ_INSERT_TAIL(&pdev->sched_comp_ppdu_list, ppdu_info,
3140 			  ppdu_info_list_elem);
3141 	pdev->sched_comp_list_depth++;
3142 }
3143 
3144 /**
3145  * dp_process_ppdu_stats_sch_cmd_status_tlv: Process schedule command status tlv
3146  * Here we are not going to process the buffer.
3147  * @pdev: DP PDEV handle
3148  * @ppdu_info: per ppdu tlv structure
3149  *
3150  * return:void
3151  */
3152 static void
3153 dp_process_ppdu_stats_sch_cmd_status_tlv(struct dp_pdev *pdev,
3154 					 struct ppdu_info *ppdu_info)
3155 {
3156 	struct cdp_tx_completion_ppdu *ppdu_desc;
3157 	struct dp_peer *peer;
3158 	uint8_t num_users;
3159 	uint8_t i;
3160 
3161 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
3162 				qdf_nbuf_data(ppdu_info->nbuf);
3163 
3164 	num_users = ppdu_desc->bar_num_users;
3165 
3166 	for (i = 0; i < num_users; i++) {
3167 		if (ppdu_desc->user[i].user_pos == 0) {
3168 			if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
3169 				/* update phy mode for bar frame */
3170 				ppdu_desc->phy_mode =
3171 					ppdu_desc->user[i].preamble;
3172 				ppdu_desc->user[0].mcs = ppdu_desc->user[i].mcs;
3173 				break;
3174 			}
3175 			if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_CTRL) {
3176 				ppdu_desc->frame_ctrl =
3177 					ppdu_desc->user[i].frame_ctrl;
3178 				break;
3179 			}
3180 		}
3181 	}
3182 
3183 	if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA &&
3184 	    ppdu_desc->delayed_ba) {
3185 		qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
3186 
3187 		for (i = 0; i < ppdu_desc->num_users; i++) {
3188 			struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu;
3189 			uint64_t start_tsf;
3190 			uint64_t end_tsf;
3191 			uint32_t ppdu_id;
3192 
3193 			ppdu_id = ppdu_desc->ppdu_id;
3194 			peer = dp_peer_get_ref_by_id
3195 				(pdev->soc, ppdu_desc->user[i].peer_id,
3196 				 DP_MOD_ID_TX_PPDU_STATS);
3197 			/**
3198 			 * This check is to make sure peer is not deleted
3199 			 * after processing the TLVs.
3200 			 */
3201 			if (!peer)
3202 				continue;
3203 
3204 			delay_ppdu = &peer->delayed_ba_ppdu_stats;
3205 			start_tsf = ppdu_desc->ppdu_start_timestamp;
3206 			end_tsf = ppdu_desc->ppdu_end_timestamp;
3207 			/**
3208 			 * save delayed ba user info
3209 			 */
3210 			if (ppdu_desc->user[i].delayed_ba) {
3211 				dp_peer_copy_delay_stats(peer,
3212 							 &ppdu_desc->user[i],
3213 							 ppdu_id);
3214 				peer->last_delayed_ba_ppduid = ppdu_id;
3215 				delay_ppdu->ppdu_start_timestamp = start_tsf;
3216 				delay_ppdu->ppdu_end_timestamp = end_tsf;
3217 			}
3218 			ppdu_desc->user[i].peer_last_delayed_ba =
3219 				peer->last_delayed_ba;
3220 
3221 			dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
3222 
3223 			if (ppdu_desc->user[i].delayed_ba &&
3224 			    !ppdu_desc->user[i].debug_copied) {
3225 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3226 					  QDF_TRACE_LEVEL_INFO_MED,
3227 					  "%s: %d ppdu_id[%d] bar_ppdu_id[%d] num_users[%d] usr[%d] htt_frame_type[%d]\n",
3228 					  __func__, __LINE__,
3229 					  ppdu_desc->ppdu_id,
3230 					  ppdu_desc->bar_ppdu_id,
3231 					  ppdu_desc->num_users,
3232 					  i,
3233 					  ppdu_desc->htt_frame_type);
3234 			}
3235 		}
3236 	}
3237 
3238 	/*
3239 	 * when frame type is BAR and STATS_COMMON_TLV is set
3240 	 * copy the store peer delayed info to BAR status
3241 	 */
3242 	if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
3243 		for (i = 0; i < ppdu_desc->bar_num_users; i++) {
3244 			struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu;
3245 			uint64_t start_tsf;
3246 			uint64_t end_tsf;
3247 
3248 			peer = dp_peer_get_ref_by_id
3249 				(pdev->soc,
3250 				 ppdu_desc->user[i].peer_id,
3251 				 DP_MOD_ID_TX_PPDU_STATS);
3252 			/**
3253 			 * This check is to make sure peer is not deleted
3254 			 * after processing the TLVs.
3255 			 */
3256 			if (!peer)
3257 				continue;
3258 
3259 			if (ppdu_desc->user[i].completion_status !=
3260 			    HTT_PPDU_STATS_USER_STATUS_OK) {
3261 				dp_peer_unref_delete(peer,
3262 						     DP_MOD_ID_TX_PPDU_STATS);
3263 				continue;
3264 			}
3265 
3266 			delay_ppdu = &peer->delayed_ba_ppdu_stats;
3267 			start_tsf = delay_ppdu->ppdu_start_timestamp;
3268 			end_tsf = delay_ppdu->ppdu_end_timestamp;
3269 
3270 			if (peer->last_delayed_ba) {
3271 				dp_peer_copy_stats_to_bar(peer,
3272 							  &ppdu_desc->user[i]);
3273 				ppdu_desc->ppdu_id =
3274 					peer->last_delayed_ba_ppduid;
3275 				ppdu_desc->ppdu_start_timestamp = start_tsf;
3276 				ppdu_desc->ppdu_end_timestamp = end_tsf;
3277 			}
3278 			ppdu_desc->user[i].peer_last_delayed_ba =
3279 				peer->last_delayed_ba;
3280 			dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
3281 		}
3282 	}
3283 
3284 	TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
3285 	pdev->list_depth--;
3286 	TAILQ_INSERT_TAIL(&pdev->sched_comp_ppdu_list, ppdu_info,
3287 			  ppdu_info_list_elem);
3288 	pdev->sched_comp_list_depth++;
3289 }
3290 
3291 #ifndef WLAN_TX_PKT_CAPTURE_ENH
3292 /*
3293  * dp_deliver_mgmt_frm: Process
3294  * @pdev: DP PDEV handle
3295  * @nbuf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
3296  *
3297  * return: void
3298  */
3299 void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
3300 {
3301 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
3302 		dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc,
3303 				     nbuf, HTT_INVALID_PEER,
3304 				     WDI_NO_VAL, pdev->pdev_id);
3305 	} else {
3306 		if (!pdev->bpr_enable)
3307 			qdf_nbuf_free(nbuf);
3308 	}
3309 }
3310 #endif
3311 
3312 /*
3313  * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv: Process
3314  * htt_ppdu_stats_tx_mgmtctrl_payload_tlv
3315  * @pdev: DP PDEV handle
3316  * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
3317  * @length: tlv_length
3318  *
3319  * return:QDF_STATUS_SUCCESS if nbuf as to be freed in caller
3320  */
3321 static QDF_STATUS
3322 dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev,
3323 					      qdf_nbuf_t tag_buf,
3324 					      uint32_t ppdu_id)
3325 {
3326 	uint32_t *nbuf_ptr;
3327 	uint8_t trim_size;
3328 	size_t head_size;
3329 	struct cdp_tx_mgmt_comp_info *ptr_mgmt_comp_info;
3330 	uint32_t *msg_word;
3331 	uint32_t tsf_hdr;
3332 
3333 	if ((!pdev->tx_sniffer_enable) && (!pdev->mcopy_mode) &&
3334 	    (!pdev->bpr_enable) && (!pdev->tx_capture_enabled))
3335 		return QDF_STATUS_SUCCESS;
3336 
3337 	/*
3338 	 * get timestamp from htt_t2h_ppdu_stats_ind_hdr_t
3339 	 */
3340 	msg_word = (uint32_t *)qdf_nbuf_data(tag_buf);
3341 	msg_word = msg_word + 2;
3342 	tsf_hdr = *msg_word;
3343 
3344 	trim_size = ((pdev->mgmtctrl_frm_info.mgmt_buf +
3345 		      HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) -
3346 		      qdf_nbuf_data(tag_buf));
3347 
3348 	if (!qdf_nbuf_pull_head(tag_buf, trim_size))
3349 		return QDF_STATUS_SUCCESS;
3350 
3351 	qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) -
3352 			    pdev->mgmtctrl_frm_info.mgmt_buf_len);
3353 
3354 	if (pdev->tx_capture_enabled) {
3355 		head_size = sizeof(struct cdp_tx_mgmt_comp_info);
3356 		if (qdf_unlikely(qdf_nbuf_headroom(tag_buf) < head_size)) {
3357 			qdf_err("Fail to get headroom h_sz %zu h_avail %d\n",
3358 				head_size, qdf_nbuf_headroom(tag_buf));
3359 			qdf_assert_always(0);
3360 			return QDF_STATUS_E_NOMEM;
3361 		}
3362 		ptr_mgmt_comp_info = (struct cdp_tx_mgmt_comp_info *)
3363 					qdf_nbuf_push_head(tag_buf, head_size);
3364 		qdf_assert_always(ptr_mgmt_comp_info);
3365 		ptr_mgmt_comp_info->ppdu_id = ppdu_id;
3366 		ptr_mgmt_comp_info->is_sgen_pkt = true;
3367 		ptr_mgmt_comp_info->tx_tsf = tsf_hdr;
3368 	} else {
3369 		head_size = sizeof(ppdu_id);
3370 		nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(tag_buf, head_size);
3371 		*nbuf_ptr = ppdu_id;
3372 	}
3373 
3374 	if (pdev->bpr_enable) {
3375 		dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc,
3376 				     tag_buf, HTT_INVALID_PEER,
3377 				     WDI_NO_VAL, pdev->pdev_id);
3378 	}
3379 
3380 	dp_deliver_mgmt_frm(pdev, tag_buf);
3381 
3382 	return QDF_STATUS_E_ALREADY;
3383 }
3384 
3385 /**
3386  * dp_validate_fix_ppdu_tlv(): Function to validate the length of PPDU
3387  *
3388  * If the TLV length sent as part of PPDU TLV is less that expected size i.e
3389  * size of corresponding data structure, pad the remaining bytes with zeros
3390  * and continue processing the TLVs
3391  *
3392  * @pdev: DP pdev handle
3393  * @tag_buf: TLV buffer
3394  * @tlv_expected_size: Expected size of Tag
3395  * @tlv_len: TLV length received from FW
3396  *
3397  * Return: Pointer to updated TLV
3398  */
3399 static inline uint32_t *dp_validate_fix_ppdu_tlv(struct dp_pdev *pdev,
3400 						 uint32_t *tag_buf,
3401 						 uint16_t tlv_expected_size,
3402 						 uint16_t tlv_len)
3403 {
3404 	uint32_t *tlv_desc = tag_buf;
3405 
3406 	qdf_assert_always(tlv_len != 0);
3407 
3408 	if (tlv_len < tlv_expected_size) {
3409 		qdf_mem_zero(pdev->ppdu_tlv_buf, tlv_expected_size);
3410 		qdf_mem_copy(pdev->ppdu_tlv_buf, tag_buf, tlv_len);
3411 		tlv_desc = pdev->ppdu_tlv_buf;
3412 	}
3413 
3414 	return tlv_desc;
3415 }
3416 
3417 /**
3418  * dp_process_ppdu_tag(): Function to process the PPDU TLVs
3419  * @pdev: DP pdev handle
3420  * @tag_buf: TLV buffer
3421  * @tlv_len: length of tlv
3422  * @ppdu_info: per ppdu tlv structure
3423  *
3424  * return: void
3425  */
3426 static void dp_process_ppdu_tag(struct dp_pdev *pdev, uint32_t *tag_buf,
3427 		uint32_t tlv_len, struct ppdu_info *ppdu_info)
3428 {
3429 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
3430 	uint16_t tlv_expected_size;
3431 	uint32_t *tlv_desc;
3432 
3433 	switch (tlv_type) {
3434 	case HTT_PPDU_STATS_COMMON_TLV:
3435 		tlv_expected_size = sizeof(htt_ppdu_stats_common_tlv);
3436 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3437 						    tlv_expected_size, tlv_len);
3438 		dp_process_ppdu_stats_common_tlv(pdev, tlv_desc, ppdu_info);
3439 		break;
3440 	case HTT_PPDU_STATS_USR_COMMON_TLV:
3441 		tlv_expected_size = sizeof(htt_ppdu_stats_user_common_tlv);
3442 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3443 						    tlv_expected_size, tlv_len);
3444 		dp_process_ppdu_stats_user_common_tlv(pdev, tlv_desc,
3445 						      ppdu_info);
3446 		break;
3447 	case HTT_PPDU_STATS_USR_RATE_TLV:
3448 		tlv_expected_size = sizeof(htt_ppdu_stats_user_rate_tlv);
3449 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3450 						    tlv_expected_size, tlv_len);
3451 		dp_process_ppdu_stats_user_rate_tlv(pdev, tlv_desc,
3452 						    ppdu_info);
3453 		break;
3454 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV:
3455 		tlv_expected_size =
3456 			sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv);
3457 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3458 						    tlv_expected_size, tlv_len);
3459 		dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
3460 				pdev, tlv_desc, ppdu_info);
3461 		break;
3462 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV:
3463 		tlv_expected_size =
3464 			sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv);
3465 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3466 						    tlv_expected_size, tlv_len);
3467 		dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
3468 				pdev, tlv_desc, ppdu_info);
3469 		break;
3470 	case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV:
3471 		tlv_expected_size =
3472 			sizeof(htt_ppdu_stats_user_cmpltn_common_tlv);
3473 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3474 						    tlv_expected_size, tlv_len);
3475 		dp_process_ppdu_stats_user_cmpltn_common_tlv(
3476 				pdev, tlv_desc, ppdu_info);
3477 		break;
3478 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV:
3479 		tlv_expected_size =
3480 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv);
3481 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3482 						    tlv_expected_size, tlv_len);
3483 		dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
3484 				pdev, tlv_desc, ppdu_info);
3485 		break;
3486 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV:
3487 		tlv_expected_size =
3488 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv);
3489 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3490 						    tlv_expected_size, tlv_len);
3491 		dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
3492 				pdev, tlv_desc, ppdu_info);
3493 		break;
3494 	case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV:
3495 		tlv_expected_size =
3496 			sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv);
3497 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3498 						    tlv_expected_size, tlv_len);
3499 		dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
3500 				pdev, tlv_desc, ppdu_info);
3501 		break;
3502 	case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV:
3503 		tlv_expected_size =
3504 			sizeof(htt_ppdu_stats_usr_common_array_tlv_v);
3505 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3506 						    tlv_expected_size, tlv_len);
3507 		dp_process_ppdu_stats_user_common_array_tlv(
3508 				pdev, tlv_desc, ppdu_info);
3509 		break;
3510 	case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV:
3511 		tlv_expected_size = sizeof(htt_ppdu_stats_flush_tlv);
3512 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3513 						    tlv_expected_size, tlv_len);
3514 		dp_process_ppdu_stats_user_compltn_flush_tlv(pdev, tlv_desc,
3515 							     ppdu_info);
3516 		break;
3517 	case HTT_PPDU_STATS_SCH_CMD_STATUS_TLV:
3518 		dp_process_ppdu_stats_sch_cmd_status_tlv(pdev, ppdu_info);
3519 		break;
3520 	default:
3521 		break;
3522 	}
3523 }
3524 
3525 #ifdef WLAN_ATF_ENABLE
3526 static void
3527 dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev,
3528 				     struct cdp_tx_completion_ppdu *ppdu_desc,
3529 				     struct cdp_tx_completion_ppdu_user *user)
3530 {
3531 	uint32_t nss_ru_width_sum = 0;
3532 
3533 	if (!pdev || !ppdu_desc || !user)
3534 		return;
3535 
3536 	if (!pdev->dp_atf_stats_enable)
3537 		return;
3538 
3539 	if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_DATA)
3540 		return;
3541 
3542 	nss_ru_width_sum = ppdu_desc->usr_nss_sum * ppdu_desc->usr_ru_tones_sum;
3543 	if (!nss_ru_width_sum)
3544 		nss_ru_width_sum = 1;
3545 
3546 	/**
3547 	 * For SU-MIMO PPDU phy Tx time is same for the single user.
3548 	 * For MU-MIMO phy Tx time is calculated per user as below
3549 	 *     user phy tx time =
3550 	 *           Entire PPDU duration * MU Ratio * OFDMA Ratio
3551 	 *     MU Ratio = usr_nss / Sum_of_nss_of_all_users
3552 	 *     OFDMA_ratio = usr_ru_width / Sum_of_ru_width_of_all_users
3553 	 *     usr_ru_widt = ru_end – ru_start + 1
3554 	 */
3555 	if (ppdu_desc->htt_frame_type == HTT_STATS_FTYPE_TIDQ_DATA_SU) {
3556 		user->phy_tx_time_us = ppdu_desc->phy_ppdu_tx_time_us;
3557 	} else {
3558 		user->phy_tx_time_us = (ppdu_desc->phy_ppdu_tx_time_us *
3559 				user->nss * user->ru_tones) / nss_ru_width_sum;
3560 	}
3561 }
3562 #else
3563 static void
3564 dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev,
3565 				     struct cdp_tx_completion_ppdu *ppdu_desc,
3566 				     struct cdp_tx_completion_ppdu_user *user)
3567 {
3568 }
3569 #endif
3570 
3571 /**
3572  * dp_ppdu_desc_user_stats_update(): Function to update TX user stats
3573  * @pdev: DP pdev handle
3574  * @ppdu_info: per PPDU TLV descriptor
3575  *
3576  * return: void
3577  */
3578 void
3579 dp_ppdu_desc_user_stats_update(struct dp_pdev *pdev,
3580 			       struct ppdu_info *ppdu_info)
3581 {
3582 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3583 	struct dp_peer *peer = NULL;
3584 	uint32_t tlv_bitmap_expected;
3585 	uint32_t tlv_bitmap_default;
3586 	uint16_t i;
3587 	uint32_t num_users;
3588 
3589 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
3590 		qdf_nbuf_data(ppdu_info->nbuf);
3591 
3592 	if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_BAR)
3593 		ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
3594 
3595 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
3596 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode ||
3597 	    pdev->tx_capture_enabled) {
3598 		if (ppdu_info->is_ampdu)
3599 			tlv_bitmap_expected =
3600 				dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
3601 					ppdu_info->tlv_bitmap);
3602 	}
3603 
3604 	tlv_bitmap_default = tlv_bitmap_expected;
3605 
3606 	if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
3607 		num_users = ppdu_desc->bar_num_users;
3608 		ppdu_desc->num_users = ppdu_desc->bar_num_users;
3609 	} else {
3610 		num_users = ppdu_desc->num_users;
3611 	}
3612 	qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
3613 
3614 	for (i = 0; i < num_users; i++) {
3615 		ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu;
3616 		ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu;
3617 
3618 		peer = dp_peer_get_ref_by_id(pdev->soc,
3619 					     ppdu_desc->user[i].peer_id,
3620 					     DP_MOD_ID_TX_PPDU_STATS);
3621 		/**
3622 		 * This check is to make sure peer is not deleted
3623 		 * after processing the TLVs.
3624 		 */
3625 		if (!peer)
3626 			continue;
3627 
3628 		ppdu_desc->user[i].is_bss_peer = peer->bss_peer;
3629 		/*
3630 		 * different frame like DATA, BAR or CTRL has different
3631 		 * tlv bitmap expected. Apart from ACK_BA_STATUS TLV, we
3632 		 * receive other tlv in-order/sequential from fw.
3633 		 * Since ACK_BA_STATUS TLV come from Hardware it is
3634 		 * asynchronous So we need to depend on some tlv to confirm
3635 		 * all tlv is received for a ppdu.
3636 		 * So we depend on both SCHED_CMD_STATUS_TLV and
3637 		 * ACK_BA_STATUS_TLV. for failure packet we won't get
3638 		 * ACK_BA_STATUS_TLV.
3639 		 */
3640 		if (!(ppdu_info->tlv_bitmap &
3641 		      (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)) ||
3642 		    (!(ppdu_info->tlv_bitmap &
3643 		       (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)) &&
3644 		     (ppdu_desc->user[i].completion_status ==
3645 		      HTT_PPDU_STATS_USER_STATUS_OK))) {
3646 			dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
3647 			continue;
3648 		}
3649 
3650 		/**
3651 		 * Update tx stats for data frames having Qos as well as
3652 		 * non-Qos data tid
3653 		 */
3654 
3655 		if ((ppdu_desc->user[i].tid < CDP_DATA_TID_MAX ||
3656 		     (ppdu_desc->user[i].tid == CDP_DATA_NON_QOS_TID) ||
3657 		     (ppdu_desc->htt_frame_type ==
3658 		      HTT_STATS_FTYPE_SGEN_QOS_NULL) ||
3659 		     ((ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) &&
3660 		      (ppdu_desc->num_mpdu > 1))) &&
3661 		      (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL)) {
3662 
3663 			dp_tx_stats_update(pdev, peer,
3664 					   &ppdu_desc->user[i],
3665 					   ppdu_desc->ack_rssi);
3666 			dp_tx_rate_stats_update(peer, &ppdu_desc->user[i]);
3667 		}
3668 
3669 		dp_ppdu_desc_user_phy_tx_time_update(pdev, ppdu_desc,
3670 						     &ppdu_desc->user[i]);
3671 
3672 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
3673 		tlv_bitmap_expected = tlv_bitmap_default;
3674 	}
3675 }
3676 
3677 #ifndef WLAN_TX_PKT_CAPTURE_ENH
3678 
3679 /**
3680  * dp_ppdu_desc_deliver(): Function to deliver Tx PPDU status descriptor
3681  * to upper layer
3682  * @pdev: DP pdev handle
3683  * @ppdu_info: per PPDU TLV descriptor
3684  *
3685  * return: void
3686  */
3687 static
3688 void dp_ppdu_desc_deliver(struct dp_pdev *pdev,
3689 			  struct ppdu_info *ppdu_info)
3690 {
3691 	struct ppdu_info *s_ppdu_info = NULL;
3692 	struct ppdu_info *ppdu_info_next = NULL;
3693 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3694 	qdf_nbuf_t nbuf;
3695 	uint32_t time_delta = 0;
3696 	bool starved = 0;
3697 	bool matched = 0;
3698 	bool recv_ack_ba_done = 0;
3699 
3700 	if (ppdu_info->tlv_bitmap &
3701 	    (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) &&
3702 	    ppdu_info->done)
3703 		recv_ack_ba_done = 1;
3704 
3705 	pdev->last_sched_cmdid = ppdu_info->sched_cmdid;
3706 
3707 	s_ppdu_info = TAILQ_FIRST(&pdev->sched_comp_ppdu_list);
3708 
3709 	TAILQ_FOREACH_SAFE(s_ppdu_info, &pdev->sched_comp_ppdu_list,
3710 			   ppdu_info_list_elem, ppdu_info_next) {
3711 		if (s_ppdu_info->tsf_l32 > ppdu_info->tsf_l32)
3712 			time_delta = (MAX_TSF_32 - s_ppdu_info->tsf_l32) +
3713 					ppdu_info->tsf_l32;
3714 		else
3715 			time_delta = ppdu_info->tsf_l32 - s_ppdu_info->tsf_l32;
3716 
3717 		if (!s_ppdu_info->done && !recv_ack_ba_done) {
3718 			if (time_delta < MAX_SCHED_STARVE) {
3719 				dp_info("pdev[%d] ppdu_id[%d] sched_cmdid[%d] TLV_B[0x%x] TSF[%u] D[%d]",
3720 					pdev->pdev_id,
3721 					s_ppdu_info->ppdu_id,
3722 					s_ppdu_info->sched_cmdid,
3723 					s_ppdu_info->tlv_bitmap,
3724 					s_ppdu_info->tsf_l32,
3725 					s_ppdu_info->done);
3726 				break;
3727 			}
3728 			starved = 1;
3729 		}
3730 
3731 		pdev->delivered_sched_cmdid = s_ppdu_info->sched_cmdid;
3732 		TAILQ_REMOVE(&pdev->sched_comp_ppdu_list, s_ppdu_info,
3733 			     ppdu_info_list_elem);
3734 		pdev->sched_comp_list_depth--;
3735 
3736 		nbuf = s_ppdu_info->nbuf;
3737 		qdf_assert_always(nbuf);
3738 		ppdu_desc = (struct cdp_tx_completion_ppdu *)
3739 				qdf_nbuf_data(nbuf);
3740 		ppdu_desc->tlv_bitmap = s_ppdu_info->tlv_bitmap;
3741 
3742 		if (starved) {
3743 			dp_err("ppdu starved fc[0x%x] h_ftype[%d] tlv_bitmap[0x%x] cs[%d]\n",
3744 			       ppdu_desc->frame_ctrl,
3745 			       ppdu_desc->htt_frame_type,
3746 			       ppdu_desc->tlv_bitmap,
3747 			       ppdu_desc->user[0].completion_status);
3748 			starved = 0;
3749 		}
3750 
3751 		if (ppdu_info->ppdu_id == s_ppdu_info->ppdu_id &&
3752 		    ppdu_info->sched_cmdid == s_ppdu_info->sched_cmdid)
3753 			matched = 1;
3754 
3755 		dp_ppdu_desc_user_stats_update(pdev, s_ppdu_info);
3756 
3757 		qdf_mem_free(s_ppdu_info);
3758 
3759 		/**
3760 		 * Deliver PPDU stats only for valid (acked) data
3761 		 * frames if sniffer mode is not enabled.
3762 		 * If sniffer mode is enabled, PPDU stats
3763 		 * for all frames including mgmt/control
3764 		 * frames should be delivered to upper layer
3765 		 */
3766 		if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
3767 			dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
3768 					     pdev->soc,
3769 					     nbuf, HTT_INVALID_PEER,
3770 					     WDI_NO_VAL,
3771 					     pdev->pdev_id);
3772 		} else {
3773 			if (ppdu_desc->num_mpdu != 0 &&
3774 			    ppdu_desc->num_users != 0 &&
3775 			    ppdu_desc->frame_ctrl &
3776 			    HTT_FRAMECTRL_DATATYPE) {
3777 				dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
3778 						     pdev->soc,
3779 						     nbuf, HTT_INVALID_PEER,
3780 						     WDI_NO_VAL,
3781 						     pdev->pdev_id);
3782 			} else {
3783 				qdf_nbuf_free(nbuf);
3784 			}
3785 		}
3786 
3787 		if (matched)
3788 			break;
3789 	}
3790 	return;
3791 }
3792 
3793 #endif
3794 
3795 /**
3796  * dp_get_ppdu_desc(): Function to allocate new PPDU status
3797  * desc for new ppdu id
3798  * @pdev: DP pdev handle
3799  * @ppdu_id: PPDU unique identifier
3800  * @tlv_type: TLV type received
3801  * @tsf_l32: timestamp received along with ppdu stats indication header
3802  * @max_users: Maximum user for that particular ppdu
3803  *
3804  * return: ppdu_info per ppdu tlv structure
3805  */
3806 static
3807 struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id,
3808 				   uint8_t tlv_type, uint32_t tsf_l32,
3809 				   uint8_t max_users)
3810 {
3811 	struct ppdu_info *ppdu_info = NULL;
3812 	struct ppdu_info *s_ppdu_info = NULL;
3813 	struct ppdu_info *ppdu_info_next = NULL;
3814 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3815 	uint32_t size = 0;
3816 	struct cdp_tx_completion_ppdu *tmp_ppdu_desc = NULL;
3817 	struct cdp_tx_completion_ppdu_user *tmp_user;
3818 	uint32_t time_delta;
3819 
3820 	/*
3821 	 * Find ppdu_id node exists or not
3822 	 */
3823 	TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
3824 			   ppdu_info_list_elem, ppdu_info_next) {
3825 		if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) {
3826 			if (ppdu_info->tsf_l32 > tsf_l32)
3827 				time_delta  = (MAX_TSF_32 -
3828 					       ppdu_info->tsf_l32) + tsf_l32;
3829 			else
3830 				time_delta  = tsf_l32 - ppdu_info->tsf_l32;
3831 
3832 			if (time_delta > WRAP_DROP_TSF_DELTA) {
3833 				TAILQ_REMOVE(&pdev->ppdu_info_list,
3834 					     ppdu_info, ppdu_info_list_elem);
3835 				pdev->list_depth--;
3836 				pdev->stats.ppdu_wrap_drop++;
3837 				tmp_ppdu_desc =
3838 					(struct cdp_tx_completion_ppdu *)
3839 					qdf_nbuf_data(ppdu_info->nbuf);
3840 				tmp_user = &tmp_ppdu_desc->user[0];
3841 				dp_htt_tx_stats_info("S_PID [%d] S_TSF[%u] TLV_BITMAP[0x%x] [CMPLTN - %d ACK_BA - %d] CS[%d] - R_PID[%d] R_TSF[%u] R_TLV_TAG[0x%x]\n",
3842 						     ppdu_info->ppdu_id,
3843 						     ppdu_info->tsf_l32,
3844 						     ppdu_info->tlv_bitmap,
3845 						     tmp_user->completion_status,
3846 						     ppdu_info->compltn_common_tlv,
3847 						     ppdu_info->ack_ba_tlv,
3848 						     ppdu_id, tsf_l32, tlv_type);
3849 				qdf_nbuf_free(ppdu_info->nbuf);
3850 				ppdu_info->nbuf = NULL;
3851 				qdf_mem_free(ppdu_info);
3852 			} else {
3853 				break;
3854 			}
3855 		}
3856 	}
3857 
3858 	/*
3859 	 * check if it is ack ba tlv and if it is not there in ppdu info
3860 	 * list then check it in sched completion ppdu list
3861 	 */
3862 	if (!ppdu_info &&
3863 	    tlv_type == HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) {
3864 		TAILQ_FOREACH(s_ppdu_info,
3865 			      &pdev->sched_comp_ppdu_list,
3866 			      ppdu_info_list_elem) {
3867 			if (s_ppdu_info && (s_ppdu_info->ppdu_id == ppdu_id)) {
3868 				if (s_ppdu_info->tsf_l32 > tsf_l32)
3869 					time_delta  = (MAX_TSF_32 -
3870 						       s_ppdu_info->tsf_l32) +
3871 							tsf_l32;
3872 				else
3873 					time_delta  = tsf_l32 -
3874 						s_ppdu_info->tsf_l32;
3875 				if (time_delta < WRAP_DROP_TSF_DELTA) {
3876 					ppdu_info = s_ppdu_info;
3877 					break;
3878 				}
3879 			} else {
3880 				/*
3881 				 * ACK BA STATUS TLV comes sequential order
3882 				 * if we received ack ba status tlv for second
3883 				 * ppdu and first ppdu is still waiting for
3884 				 * ACK BA STATUS TLV. Based on fw comment
3885 				 * we won't receive it tlv later. So we can
3886 				 * set ppdu info done.
3887 				 */
3888 				if (s_ppdu_info)
3889 					s_ppdu_info->done = 1;
3890 			}
3891 		}
3892 	}
3893 
3894 	if (ppdu_info) {
3895 		if (ppdu_info->tlv_bitmap & (1 << tlv_type)) {
3896 			/**
3897 			 * if we get tlv_type that is already been processed
3898 			 * for ppdu, that means we got a new ppdu with same
3899 			 * ppdu id. Hence Flush the older ppdu
3900 			 * for MUMIMO and OFDMA, In a PPDU we have
3901 			 * multiple user with same tlv types. tlv bitmap is
3902 			 * used to check whether SU or MU_MIMO/OFDMA
3903 			 */
3904 			if (!(ppdu_info->tlv_bitmap &
3905 			    (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)))
3906 				return ppdu_info;
3907 
3908 			ppdu_desc = (struct cdp_tx_completion_ppdu *)
3909 				qdf_nbuf_data(ppdu_info->nbuf);
3910 
3911 			/**
3912 			 * apart from ACK BA STATUS TLV rest all comes in order
3913 			 * so if tlv type not ACK BA STATUS TLV we can deliver
3914 			 * ppdu_info
3915 			 */
3916 			if ((tlv_type ==
3917 			     HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) &&
3918 			    (ppdu_desc->htt_frame_type ==
3919 			     HTT_STATS_FTYPE_SGEN_MU_BAR))
3920 				return ppdu_info;
3921 
3922 			dp_ppdu_desc_deliver(pdev, ppdu_info);
3923 		} else {
3924 			return ppdu_info;
3925 		}
3926 	}
3927 
3928 	/**
3929 	 * Flush the head ppdu descriptor if ppdu desc list reaches max
3930 	 * threshold
3931 	 */
3932 	if (pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
3933 		ppdu_info = TAILQ_FIRST(&pdev->ppdu_info_list);
3934 		TAILQ_REMOVE(&pdev->ppdu_info_list,
3935 			     ppdu_info, ppdu_info_list_elem);
3936 		pdev->list_depth--;
3937 		pdev->stats.ppdu_drop++;
3938 		qdf_nbuf_free(ppdu_info->nbuf);
3939 		ppdu_info->nbuf = NULL;
3940 		qdf_mem_free(ppdu_info);
3941 	}
3942 
3943 	size = sizeof(struct cdp_tx_completion_ppdu) +
3944 			(max_users * sizeof(struct cdp_tx_completion_ppdu_user));
3945 
3946 	/*
3947 	 * Allocate new ppdu_info node
3948 	 */
3949 	ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info));
3950 	if (!ppdu_info)
3951 		return NULL;
3952 
3953 	ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev, size,
3954 									 0, 4, TRUE);
3955 	if (!ppdu_info->nbuf) {
3956 		qdf_mem_free(ppdu_info);
3957 		return NULL;
3958 	}
3959 
3960 	ppdu_info->ppdu_desc =
3961 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
3962 	qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf), size);
3963 
3964 	if (qdf_nbuf_put_tail(ppdu_info->nbuf, size) == NULL) {
3965 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3966 				"No tailroom for HTT PPDU");
3967 		qdf_nbuf_free(ppdu_info->nbuf);
3968 		ppdu_info->nbuf = NULL;
3969 		ppdu_info->last_user = 0;
3970 		qdf_mem_free(ppdu_info);
3971 		return NULL;
3972 	}
3973 
3974 	ppdu_info->ppdu_desc->max_users = max_users;
3975 	ppdu_info->tsf_l32 = tsf_l32;
3976 	/**
3977 	 * No lock is needed because all PPDU TLVs are processed in
3978 	 * same context and this list is updated in same context
3979 	 */
3980 	TAILQ_INSERT_TAIL(&pdev->ppdu_info_list, ppdu_info,
3981 			ppdu_info_list_elem);
3982 	pdev->list_depth++;
3983 	return ppdu_info;
3984 }
3985 
3986 /**
3987  * dp_htt_process_tlv(): Function to process each PPDU TLVs
3988  * @pdev: DP pdev handle
3989  * @htt_t2h_msg: HTT target to host message
3990  *
3991  * return: ppdu_info per ppdu tlv structure
3992  */
3993 
3994 static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev,
3995 		qdf_nbuf_t htt_t2h_msg)
3996 {
3997 	uint32_t length;
3998 	uint32_t ppdu_id;
3999 	uint8_t tlv_type;
4000 	uint32_t tlv_length, tlv_bitmap_expected;
4001 	uint8_t *tlv_buf;
4002 	struct ppdu_info *ppdu_info = NULL;
4003 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
4004 	uint8_t max_users = CDP_MU_MAX_USERS;
4005 	uint32_t tsf_l32;
4006 
4007 	uint32_t *msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
4008 
4009 	length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word);
4010 
4011 	msg_word = msg_word + 1;
4012 	ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word);
4013 
4014 	msg_word = msg_word + 1;
4015 	tsf_l32 = (uint32_t)(*msg_word);
4016 
4017 	msg_word = msg_word + 2;
4018 	while (length > 0) {
4019 		tlv_buf = (uint8_t *)msg_word;
4020 		tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
4021 		tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
4022 		if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG))
4023 			pdev->stats.ppdu_stats_counter[tlv_type]++;
4024 
4025 		if (tlv_length == 0)
4026 			break;
4027 
4028 		tlv_length += HTT_TLV_HDR_LEN;
4029 
4030 		/**
4031 		 * Not allocating separate ppdu descriptor for MGMT Payload
4032 		 * TLV as this is sent as separate WDI indication and it
4033 		 * doesn't contain any ppdu information
4034 		 */
4035 		if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) {
4036 			pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf;
4037 			pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id;
4038 			pdev->mgmtctrl_frm_info.mgmt_buf_len =
4039 				HTT_PPDU_STATS_TX_MGMTCTRL_TLV_FRAME_LENGTH_GET
4040 						(*(msg_word + 1));
4041 			msg_word =
4042 				(uint32_t *)((uint8_t *)tlv_buf + tlv_length);
4043 			length -= (tlv_length);
4044 			continue;
4045 		}
4046 
4047 		/*
4048 		 * retrieve max_users if it's USERS_INFO,
4049 		 * else, it's 1 for COMPLTN_FLUSH,
4050 		 * else, use CDP_MU_MAX_USERS
4051 		 */
4052 		if (tlv_type == HTT_PPDU_STATS_USERS_INFO_TLV) {
4053 			max_users =
4054 				HTT_PPDU_STATS_USERS_INFO_TLV_MAX_USERS_GET(*(msg_word + 1));
4055 		} else if (tlv_type == HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) {
4056 			max_users = 1;
4057 		}
4058 
4059 		ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type,
4060 					     tsf_l32, max_users);
4061 		if (!ppdu_info)
4062 			return NULL;
4063 
4064 		ppdu_info->ppdu_desc->bss_color =
4065 			pdev->rx_mon_recv_status.bsscolor;
4066 
4067 		ppdu_info->ppdu_id = ppdu_id;
4068 		ppdu_info->tlv_bitmap |= (1 << tlv_type);
4069 
4070 		dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info);
4071 
4072 		/**
4073 		 * Increment pdev level tlv count to monitor
4074 		 * missing TLVs
4075 		 */
4076 		pdev->tlv_count++;
4077 		ppdu_info->last_tlv_cnt = pdev->tlv_count;
4078 		msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
4079 		length -= (tlv_length);
4080 	}
4081 
4082 	if (!ppdu_info)
4083 		return NULL;
4084 
4085 	pdev->last_ppdu_id = ppdu_id;
4086 
4087 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
4088 
4089 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode ||
4090 	    pdev->tx_capture_enabled) {
4091 		if (ppdu_info->is_ampdu)
4092 			tlv_bitmap_expected =
4093 				dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
4094 					ppdu_info->tlv_bitmap);
4095 	}
4096 
4097 	ppdu_desc = ppdu_info->ppdu_desc;
4098 
4099 	if (!ppdu_desc)
4100 		return NULL;
4101 
4102 	if (ppdu_desc->user[ppdu_desc->last_usr_index].completion_status !=
4103 	    HTT_PPDU_STATS_USER_STATUS_OK) {
4104 		tlv_bitmap_expected = tlv_bitmap_expected & 0xFF;
4105 	}
4106 
4107 	/*
4108 	 * for frame type DATA and BAR, we update stats based on MSDU,
4109 	 * successful msdu and mpdu are populate from ACK BA STATUS TLV
4110 	 * which comes out of order. successful mpdu also populated from
4111 	 * COMPLTN COMMON TLV which comes in order. for every ppdu_info
4112 	 * we store successful mpdu from both tlv and compare before delivering
4113 	 * to make sure we received ACK BA STATUS TLV. For some self generated
4114 	 * frame we won't get ack ba status tlv so no need to wait for
4115 	 * ack ba status tlv.
4116 	 */
4117 	if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL &&
4118 	    ppdu_desc->htt_frame_type != HTT_STATS_FTYPE_SGEN_QOS_NULL) {
4119 		/*
4120 		 * most of the time bar frame will have duplicate ack ba
4121 		 * status tlv
4122 		 */
4123 		if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR &&
4124 		    (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv))
4125 			return NULL;
4126 		/*
4127 		 * For data frame, compltn common tlv should match ack ba status
4128 		 * tlv and completion status. Reason we are checking first user
4129 		 * for ofdma, completion seen at next MU BAR frm, for mimo
4130 		 * only for first user completion will be immediate.
4131 		 */
4132 		if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA &&
4133 		    (ppdu_desc->user[0].completion_status == 0 &&
4134 		     (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv)))
4135 			return NULL;
4136 	}
4137 
4138 	/**
4139 	 * Once all the TLVs for a given PPDU has been processed,
4140 	 * return PPDU status to be delivered to higher layer.
4141 	 * tlv_bitmap_expected can't be available for different frame type.
4142 	 * But SCHED CMD STATS TLV is the last TLV from the FW for a ppdu.
4143 	 * apart from ACK BA TLV, FW sends other TLV in sequential order.
4144 	 * flush tlv comes separate.
4145 	 */
4146 	if ((ppdu_info->tlv_bitmap != 0 &&
4147 	     (ppdu_info->tlv_bitmap &
4148 	      (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV))) ||
4149 	    (ppdu_info->tlv_bitmap &
4150 	     (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV))) {
4151 		ppdu_info->done = 1;
4152 		return ppdu_info;
4153 	}
4154 
4155 	return NULL;
4156 }
4157 #endif /* FEATURE_PERPKT_INFO */
4158 
4159 /**
4160  * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW
4161  * @soc: DP SOC handle
4162  * @pdev_id: pdev id
4163  * @htt_t2h_msg: HTT message nbuf
4164  *
4165  * return:void
4166  */
4167 #if defined(WDI_EVENT_ENABLE)
4168 #ifdef FEATURE_PERPKT_INFO
4169 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
4170 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
4171 {
4172 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
4173 	struct ppdu_info *ppdu_info = NULL;
4174 	bool free_buf = true;
4175 
4176 	if (pdev_id >= MAX_PDEV_CNT)
4177 		return true;
4178 
4179 	pdev = soc->pdev_list[pdev_id];
4180 	if (!pdev)
4181 		return true;
4182 
4183 	if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
4184 	    !pdev->mcopy_mode && !pdev->bpr_enable)
4185 		return free_buf;
4186 
4187 	qdf_spin_lock_bh(&pdev->ppdu_stats_lock);
4188 	ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg);
4189 
4190 	if (pdev->mgmtctrl_frm_info.mgmt_buf) {
4191 		if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv
4192 		    (pdev, htt_t2h_msg, pdev->mgmtctrl_frm_info.ppdu_id) !=
4193 		    QDF_STATUS_SUCCESS)
4194 			free_buf = false;
4195 	}
4196 
4197 	if (ppdu_info)
4198 		dp_ppdu_desc_deliver(pdev, ppdu_info);
4199 
4200 	pdev->mgmtctrl_frm_info.mgmt_buf = NULL;
4201 	pdev->mgmtctrl_frm_info.mgmt_buf_len = 0;
4202 	pdev->mgmtctrl_frm_info.ppdu_id = 0;
4203 
4204 	qdf_spin_unlock_bh(&pdev->ppdu_stats_lock);
4205 
4206 	return free_buf;
4207 }
4208 #else
4209 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
4210 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
4211 {
4212 	return true;
4213 }
4214 #endif
4215 #endif
4216 
4217 /**
4218  * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats
4219  * @soc: DP SOC handle
4220  * @htt_t2h_msg: HTT message nbuf
4221  *
4222  * return:void
4223  */
4224 static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc,
4225 		qdf_nbuf_t htt_t2h_msg)
4226 {
4227 	uint8_t done;
4228 	qdf_nbuf_t msg_copy;
4229 	uint32_t *msg_word;
4230 
4231 	msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
4232 	msg_word = msg_word + 3;
4233 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
4234 
4235 	/*
4236 	 * HTT EXT stats response comes as stream of TLVs which span over
4237 	 * multiple T2H messages.
4238 	 * The first message will carry length of the response.
4239 	 * For rest of the messages length will be zero.
4240 	 *
4241 	 * Clone the T2H message buffer and store it in a list to process
4242 	 * it later.
4243 	 *
4244 	 * The original T2H message buffers gets freed in the T2H HTT event
4245 	 * handler
4246 	 */
4247 	msg_copy = qdf_nbuf_clone(htt_t2h_msg);
4248 
4249 	if (!msg_copy) {
4250 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
4251 				"T2H messge clone failed for HTT EXT STATS");
4252 		goto error;
4253 	}
4254 
4255 	qdf_spin_lock_bh(&soc->htt_stats.lock);
4256 	qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy);
4257 	/*
4258 	 * Done bit signifies that this is the last T2H buffer in the stream of
4259 	 * HTT EXT STATS message
4260 	 */
4261 	if (done) {
4262 		soc->htt_stats.num_stats++;
4263 		qdf_sched_work(0, &soc->htt_stats.work);
4264 	}
4265 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
4266 
4267 	return;
4268 
4269 error:
4270 	qdf_spin_lock_bh(&soc->htt_stats.lock);
4271 	while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg))
4272 			!= NULL) {
4273 		qdf_nbuf_free(msg_copy);
4274 	}
4275 	soc->htt_stats.num_stats = 0;
4276 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
4277 	return;
4278 
4279 }
4280 
4281 /*
4282  * htt_soc_attach_target() - SOC level HTT setup
4283  * @htt_soc:	HTT SOC handle
4284  *
4285  * Return: 0 on success; error code on failure
4286  */
4287 int htt_soc_attach_target(struct htt_soc *htt_soc)
4288 {
4289 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
4290 
4291 	return htt_h2t_ver_req_msg(soc);
4292 }
4293 
4294 void htt_set_htc_handle(struct htt_soc *htt_soc, HTC_HANDLE htc_soc)
4295 {
4296 	htt_soc->htc_soc = htc_soc;
4297 }
4298 
4299 HTC_HANDLE htt_get_htc_handle(struct htt_soc *htt_soc)
4300 {
4301 	return htt_soc->htc_soc;
4302 }
4303 
4304 struct htt_soc *htt_soc_attach(struct dp_soc *soc, HTC_HANDLE htc_handle)
4305 {
4306 	int i;
4307 	int j;
4308 	int alloc_size = HTT_SW_UMAC_RING_IDX_MAX * sizeof(unsigned long);
4309 	struct htt_soc *htt_soc = NULL;
4310 
4311 	htt_soc = qdf_mem_malloc(sizeof(*htt_soc));
4312 	if (!htt_soc) {
4313 		dp_err("HTT attach failed");
4314 		return NULL;
4315 	}
4316 
4317 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4318 		htt_soc->pdevid_tt[i].umac_ttt = qdf_mem_malloc(alloc_size);
4319 		if (!htt_soc->pdevid_tt[i].umac_ttt)
4320 			break;
4321 		qdf_mem_set(htt_soc->pdevid_tt[i].umac_ttt, alloc_size, -1);
4322 		htt_soc->pdevid_tt[i].lmac_ttt = qdf_mem_malloc(alloc_size);
4323 		if (!htt_soc->pdevid_tt[i].lmac_ttt) {
4324 			qdf_mem_free(htt_soc->pdevid_tt[i].umac_ttt);
4325 			break;
4326 		}
4327 		qdf_mem_set(htt_soc->pdevid_tt[i].lmac_ttt, alloc_size, -1);
4328 	}
4329 	if (i != MAX_PDEV_CNT) {
4330 		for (j = 0; j < i; j++) {
4331 			qdf_mem_free(htt_soc->pdevid_tt[j].umac_ttt);
4332 			qdf_mem_free(htt_soc->pdevid_tt[j].lmac_ttt);
4333 		}
4334 		qdf_mem_free(htt_soc);
4335 		return NULL;
4336 	}
4337 
4338 	htt_soc->dp_soc = soc;
4339 	htt_soc->htc_soc = htc_handle;
4340 	HTT_TX_MUTEX_INIT(&htt_soc->htt_tx_mutex);
4341 
4342 	return htt_soc;
4343 }
4344 
4345 #if defined(WDI_EVENT_ENABLE) && !defined(REMOVE_PKT_LOG)
4346 /*
4347  * dp_ppdu_stats_ind_handler() - PPDU stats msg handler
4348  * @htt_soc:	 HTT SOC handle
4349  * @msg_word:    Pointer to payload
4350  * @htt_t2h_msg: HTT msg nbuf
4351  *
4352  * Return: True if buffer should be freed by caller.
4353  */
4354 static bool
4355 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
4356 				uint32_t *msg_word,
4357 				qdf_nbuf_t htt_t2h_msg)
4358 {
4359 	u_int8_t pdev_id;
4360 	u_int8_t target_pdev_id;
4361 	bool free_buf;
4362 
4363 	target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
4364 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
4365 							 target_pdev_id);
4366 	dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc,
4367 			     htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL,
4368 			     pdev_id);
4369 
4370 	free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id,
4371 					      htt_t2h_msg);
4372 
4373 	return free_buf;
4374 }
4375 #else
4376 static bool
4377 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
4378 				uint32_t *msg_word,
4379 				qdf_nbuf_t htt_t2h_msg)
4380 {
4381 	return true;
4382 }
4383 #endif
4384 
4385 #if defined(WDI_EVENT_ENABLE) && \
4386 	!defined(REMOVE_PKT_LOG)
4387 /*
4388  * dp_pktlog_msg_handler() - Pktlog msg handler
4389  * @htt_soc:	 HTT SOC handle
4390  * @msg_word:    Pointer to payload
4391  *
4392  * Return: None
4393  */
4394 static void
4395 dp_pktlog_msg_handler(struct htt_soc *soc,
4396 		      uint32_t *msg_word)
4397 {
4398 	uint8_t pdev_id;
4399 	uint8_t target_pdev_id;
4400 	uint32_t *pl_hdr;
4401 
4402 	target_pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word);
4403 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
4404 							 target_pdev_id);
4405 	pl_hdr = (msg_word + 1);
4406 	dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc,
4407 		pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL,
4408 		pdev_id);
4409 }
4410 #else
4411 static void
4412 dp_pktlog_msg_handler(struct htt_soc *soc,
4413 		      uint32_t *msg_word)
4414 {
4415 }
4416 #endif
4417 
4418 /*
4419  * time_allow_print() - time allow print
4420  * @htt_ring_tt:	ringi_id array of timestamps
4421  * @ring_id:		ring_id (index)
4422  *
4423  * Return: 1 for successfully saving timestamp in array
4424  *	and 0 for timestamp falling within 2 seconds after last one
4425  */
4426 static bool time_allow_print(unsigned long *htt_ring_tt, u_int8_t ring_id)
4427 {
4428 	unsigned long tstamp;
4429 	unsigned long delta;
4430 
4431 	tstamp = qdf_get_system_timestamp();
4432 
4433 	if (!htt_ring_tt)
4434 		return 0; //unable to print backpressure messages
4435 
4436 	if (htt_ring_tt[ring_id] == -1) {
4437 		htt_ring_tt[ring_id] = tstamp;
4438 		return 1;
4439 	}
4440 	delta = tstamp - htt_ring_tt[ring_id];
4441 	if (delta >= 2000) {
4442 		htt_ring_tt[ring_id] = tstamp;
4443 		return 1;
4444 	}
4445 
4446 	return 0;
4447 }
4448 
4449 static void dp_htt_alert_print(enum htt_t2h_msg_type msg_type,
4450 			       u_int8_t pdev_id, u_int8_t ring_id,
4451 			       u_int16_t hp_idx, u_int16_t tp_idx,
4452 			       u_int32_t bkp_time, char *ring_stype)
4453 {
4454 	dp_alert("msg_type: %d pdev_id: %d ring_type: %s ",
4455 		 msg_type, pdev_id, ring_stype);
4456 	dp_alert("ring_id: %d hp_idx: %d tp_idx: %d bkpressure_time_ms: %d ",
4457 		 ring_id, hp_idx, tp_idx, bkp_time);
4458 }
4459 
4460 /*
4461  * dp_htt_bkp_event_alert() - htt backpressure event alert
4462  * @msg_word:	htt packet context
4463  * @htt_soc:	HTT SOC handle
4464  *
4465  * Return: after attempting to print stats
4466  */
4467 static void dp_htt_bkp_event_alert(u_int32_t *msg_word, struct htt_soc *soc)
4468 {
4469 	u_int8_t ring_type;
4470 	u_int8_t pdev_id;
4471 	uint8_t target_pdev_id;
4472 	u_int8_t ring_id;
4473 	u_int16_t hp_idx;
4474 	u_int16_t tp_idx;
4475 	u_int32_t bkp_time;
4476 	enum htt_t2h_msg_type msg_type;
4477 	struct dp_soc *dpsoc;
4478 	struct dp_pdev *pdev;
4479 	struct dp_htt_timestamp *radio_tt;
4480 
4481 	if (!soc)
4482 		return;
4483 
4484 	dpsoc = (struct dp_soc *)soc->dp_soc;
4485 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
4486 	ring_type = HTT_T2H_RX_BKPRESSURE_RING_TYPE_GET(*msg_word);
4487 	target_pdev_id = HTT_T2H_RX_BKPRESSURE_PDEV_ID_GET(*msg_word);
4488 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
4489 							 target_pdev_id);
4490 	if (pdev_id >= MAX_PDEV_CNT) {
4491 		dp_htt_debug("%pK: pdev id %d is invalid", soc, pdev_id);
4492 		return;
4493 	}
4494 
4495 	pdev = (struct dp_pdev *)dpsoc->pdev_list[pdev_id];
4496 	ring_id = HTT_T2H_RX_BKPRESSURE_RINGID_GET(*msg_word);
4497 	hp_idx = HTT_T2H_RX_BKPRESSURE_HEAD_IDX_GET(*(msg_word + 1));
4498 	tp_idx = HTT_T2H_RX_BKPRESSURE_TAIL_IDX_GET(*(msg_word + 1));
4499 	bkp_time = HTT_T2H_RX_BKPRESSURE_TIME_MS_GET(*(msg_word + 2));
4500 	radio_tt = &soc->pdevid_tt[pdev_id];
4501 
4502 	switch (ring_type) {
4503 	case HTT_SW_RING_TYPE_UMAC:
4504 		if (!time_allow_print(radio_tt->umac_ttt, ring_id))
4505 			return;
4506 		dp_htt_alert_print(msg_type, pdev_id, ring_id, hp_idx, tp_idx,
4507 				   bkp_time, "HTT_SW_RING_TYPE_UMAC");
4508 	break;
4509 	case HTT_SW_RING_TYPE_LMAC:
4510 		if (!time_allow_print(radio_tt->lmac_ttt, ring_id))
4511 			return;
4512 		dp_htt_alert_print(msg_type, pdev_id, ring_id, hp_idx, tp_idx,
4513 				   bkp_time, "HTT_SW_RING_TYPE_LMAC");
4514 	break;
4515 	default:
4516 		dp_htt_alert_print(msg_type, pdev_id, ring_id, hp_idx, tp_idx,
4517 				   bkp_time, "UNKNOWN");
4518 	break;
4519 	}
4520 
4521 	dp_print_ring_stats(pdev);
4522 	dp_print_napi_stats(pdev->soc);
4523 }
4524 
4525 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
4526 /*
4527  * dp_offload_ind_handler() - offload msg handler
4528  * @htt_soc: HTT SOC handle
4529  * @msg_word: Pointer to payload
4530  *
4531  * Return: None
4532  */
4533 static void
4534 dp_offload_ind_handler(struct htt_soc *soc, uint32_t *msg_word)
4535 {
4536 	u_int8_t pdev_id;
4537 	u_int8_t target_pdev_id;
4538 
4539 	target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
4540 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
4541 							 target_pdev_id);
4542 	dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_OFFLOAD_TX_DATA, soc->dp_soc,
4543 			     msg_word, HTT_INVALID_VDEV, WDI_NO_VAL,
4544 			     pdev_id);
4545 }
4546 #else
4547 static void
4548 dp_offload_ind_handler(struct htt_soc *soc, uint32_t *msg_word)
4549 {
4550 }
4551 #endif
4552 
4553 /*
4554  * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler
4555  * @context:	Opaque context (HTT SOC handle)
4556  * @pkt:	HTC packet
4557  */
4558 static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
4559 {
4560 	struct htt_soc *soc = (struct htt_soc *) context;
4561 	qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
4562 	u_int32_t *msg_word;
4563 	enum htt_t2h_msg_type msg_type;
4564 	bool free_buf = true;
4565 
4566 	/* check for successful message reception */
4567 	if (pkt->Status != QDF_STATUS_SUCCESS) {
4568 		if (pkt->Status != QDF_STATUS_E_CANCELED)
4569 			soc->stats.htc_err_cnt++;
4570 
4571 		qdf_nbuf_free(htt_t2h_msg);
4572 		return;
4573 	}
4574 
4575 	/* TODO: Check if we should pop the HTC/HTT header alignment padding */
4576 
4577 	msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
4578 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
4579 	htt_event_record(soc->htt_logger_handle,
4580 			 msg_type, (uint8_t *)msg_word);
4581 	switch (msg_type) {
4582 	case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
4583 	{
4584 		dp_htt_bkp_event_alert(msg_word, soc);
4585 		break;
4586 	}
4587 	case HTT_T2H_MSG_TYPE_PEER_MAP:
4588 		{
4589 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
4590 			u_int8_t *peer_mac_addr;
4591 			u_int16_t peer_id;
4592 			u_int16_t hw_peer_id;
4593 			u_int8_t vdev_id;
4594 			u_int8_t is_wds;
4595 			struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
4596 
4597 			peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
4598 			hw_peer_id =
4599 				HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2));
4600 			vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
4601 			peer_mac_addr = htt_t2h_mac_addr_deswizzle(
4602 				(u_int8_t *) (msg_word+1),
4603 				&mac_addr_deswizzle_buf[0]);
4604 			QDF_TRACE(QDF_MODULE_ID_TXRX,
4605 				QDF_TRACE_LEVEL_INFO,
4606 				"HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
4607 				peer_id, vdev_id);
4608 
4609 			/*
4610 			 * check if peer already exists for this peer_id, if so
4611 			 * this peer map event is in response for a wds peer add
4612 			 * wmi command sent during wds source port learning.
4613 			 * in this case just add the ast entry to the existing
4614 			 * peer ast_list.
4615 			 */
4616 			is_wds = !!(dpsoc->peer_id_to_obj_map[peer_id]);
4617 			dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id,
4618 					       vdev_id, peer_mac_addr, 0,
4619 					       is_wds);
4620 			break;
4621 		}
4622 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
4623 		{
4624 			u_int16_t peer_id;
4625 			u_int8_t vdev_id;
4626 			u_int8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0};
4627 			peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
4628 			vdev_id = HTT_RX_PEER_UNMAP_VDEV_ID_GET(*msg_word);
4629 
4630 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
4631 						 vdev_id, mac_addr, 0,
4632 						 DP_PEER_WDS_COUNT_INVALID);
4633 			break;
4634 		}
4635 	case HTT_T2H_MSG_TYPE_SEC_IND:
4636 		{
4637 			u_int16_t peer_id;
4638 			enum cdp_sec_type sec_type;
4639 			int is_unicast;
4640 
4641 			peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
4642 			sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
4643 			is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
4644 			/* point to the first part of the Michael key */
4645 			msg_word++;
4646 			dp_rx_sec_ind_handler(
4647 				soc->dp_soc, peer_id, sec_type, is_unicast,
4648 				msg_word, msg_word + 2);
4649 			break;
4650 		}
4651 
4652 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
4653 		{
4654 			free_buf = dp_ppdu_stats_ind_handler(soc, msg_word,
4655 							     htt_t2h_msg);
4656 			break;
4657 		}
4658 
4659 	case HTT_T2H_MSG_TYPE_PKTLOG:
4660 		{
4661 			dp_pktlog_msg_handler(soc, msg_word);
4662 			break;
4663 		}
4664 
4665 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
4666 		{
4667 			/*
4668 			 * HTC maintains runtime pm count for H2T messages that
4669 			 * have a response msg from FW. This count ensures that
4670 			 * in the case FW does not sent out the response or host
4671 			 * did not process this indication runtime_put happens
4672 			 * properly in the cleanup path.
4673 			 */
4674 			if (htc_dec_return_runtime_cnt(soc->htc_soc) >= 0)
4675 				htc_pm_runtime_put(soc->htc_soc);
4676 			else
4677 				soc->stats.htt_ver_req_put_skip++;
4678 			soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
4679 			soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
4680 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4681 				"target uses HTT version %d.%d; host uses %d.%d",
4682 				soc->tgt_ver.major, soc->tgt_ver.minor,
4683 				HTT_CURRENT_VERSION_MAJOR,
4684 				HTT_CURRENT_VERSION_MINOR);
4685 			if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
4686 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4687 					QDF_TRACE_LEVEL_WARN,
4688 					"*** Incompatible host/target HTT versions!");
4689 			}
4690 			/* abort if the target is incompatible with the host */
4691 			qdf_assert(soc->tgt_ver.major ==
4692 				HTT_CURRENT_VERSION_MAJOR);
4693 			if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
4694 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4695 					QDF_TRACE_LEVEL_INFO_LOW,
4696 					"*** Warning: host/target HTT versions"
4697 					" are different, though compatible!");
4698 			}
4699 			break;
4700 		}
4701 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
4702 		{
4703 			uint16_t peer_id;
4704 			uint8_t tid;
4705 			uint8_t win_sz;
4706 			uint16_t status;
4707 			struct dp_peer *peer;
4708 
4709 			/*
4710 			 * Update REO Queue Desc with new values
4711 			 */
4712 			peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
4713 			tid = HTT_RX_ADDBA_TID_GET(*msg_word);
4714 			win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
4715 			peer = dp_peer_get_ref_by_id(soc->dp_soc, peer_id,
4716 						     DP_MOD_ID_HTT);
4717 
4718 			/*
4719 			 * Window size needs to be incremented by 1
4720 			 * since fw needs to represent a value of 256
4721 			 * using just 8 bits
4722 			 */
4723 			if (peer) {
4724 				status = dp_addba_requestprocess_wifi3(
4725 					(struct cdp_soc_t *)soc->dp_soc,
4726 					peer->mac_addr.raw, peer->vdev->vdev_id,
4727 					0, tid, 0, win_sz + 1, 0xffff);
4728 
4729 				/*
4730 				 * If PEER_LOCK_REF_PROTECT enbled dec ref
4731 				 * which is inc by dp_peer_get_ref_by_id
4732 				 */
4733 				dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
4734 
4735 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4736 					QDF_TRACE_LEVEL_INFO,
4737 					FL("PeerID %d BAW %d TID %d stat %d"),
4738 					peer_id, win_sz, tid, status);
4739 
4740 			} else {
4741 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4742 					QDF_TRACE_LEVEL_ERROR,
4743 					FL("Peer not found peer id %d"),
4744 					peer_id);
4745 			}
4746 			break;
4747 		}
4748 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
4749 		{
4750 			dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg);
4751 			break;
4752 		}
4753 	case HTT_T2H_MSG_TYPE_PEER_MAP_V2:
4754 		{
4755 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
4756 			u_int8_t *peer_mac_addr;
4757 			u_int16_t peer_id;
4758 			u_int16_t hw_peer_id;
4759 			u_int8_t vdev_id;
4760 			bool is_wds;
4761 			u_int16_t ast_hash;
4762 			struct dp_ast_flow_override_info ast_flow_info;
4763 
4764 			qdf_mem_set(&ast_flow_info, 0,
4765 					    sizeof(struct dp_ast_flow_override_info));
4766 
4767 			peer_id = HTT_RX_PEER_MAP_V2_SW_PEER_ID_GET(*msg_word);
4768 			hw_peer_id =
4769 			HTT_RX_PEER_MAP_V2_HW_PEER_ID_GET(*(msg_word + 2));
4770 			vdev_id = HTT_RX_PEER_MAP_V2_VDEV_ID_GET(*msg_word);
4771 			peer_mac_addr =
4772 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
4773 						   &mac_addr_deswizzle_buf[0]);
4774 			is_wds =
4775 			HTT_RX_PEER_MAP_V2_NEXT_HOP_GET(*(msg_word + 3));
4776 			ast_hash =
4777 			HTT_RX_PEER_MAP_V2_AST_HASH_VALUE_GET(*(msg_word + 3));
4778 			/*
4779 			 * Update 4 ast_index per peer, ast valid mask
4780 			 * and TID flow valid mask.
4781 			 * AST valid mask is 3 bit field corresponds to
4782 			 * ast_index[3:1]. ast_index 0 is always valid.
4783 			 */
4784 			ast_flow_info.ast_valid_mask =
4785 			HTT_RX_PEER_MAP_V2_AST_VALID_MASK_GET(*(msg_word + 3));
4786 			ast_flow_info.ast_idx[0] = hw_peer_id;
4787 			ast_flow_info.ast_flow_mask[0] =
4788 			HTT_RX_PEER_MAP_V2_AST_0_FLOW_MASK_GET(*(msg_word + 4));
4789 			ast_flow_info.ast_idx[1] =
4790 			HTT_RX_PEER_MAP_V2_AST_INDEX_1_GET(*(msg_word + 4));
4791 			ast_flow_info.ast_flow_mask[1] =
4792 			HTT_RX_PEER_MAP_V2_AST_1_FLOW_MASK_GET(*(msg_word + 4));
4793 			ast_flow_info.ast_idx[2] =
4794 			HTT_RX_PEER_MAP_V2_AST_INDEX_2_GET(*(msg_word + 5));
4795 			ast_flow_info.ast_flow_mask[2] =
4796 			HTT_RX_PEER_MAP_V2_AST_2_FLOW_MASK_GET(*(msg_word + 4));
4797 			ast_flow_info.ast_idx[3] =
4798 			HTT_RX_PEER_MAP_V2_AST_INDEX_3_GET(*(msg_word + 6));
4799 			ast_flow_info.ast_flow_mask[3] =
4800 			HTT_RX_PEER_MAP_V2_AST_3_FLOW_MASK_GET(*(msg_word + 4));
4801 			/*
4802 			 * TID valid mask is applicable only
4803 			 * for HI and LOW priority flows.
4804 			 * tid_valid_mas is 8 bit field corresponds
4805 			 * to TID[7:0]
4806 			 */
4807 			ast_flow_info.tid_valid_low_pri_mask =
4808 			HTT_RX_PEER_MAP_V2_TID_VALID_LOW_PRI_GET(*(msg_word + 5));
4809 			ast_flow_info.tid_valid_hi_pri_mask =
4810 			HTT_RX_PEER_MAP_V2_TID_VALID_HI_PRI_GET(*(msg_word + 5));
4811 
4812 			QDF_TRACE(QDF_MODULE_ID_TXRX,
4813 				  QDF_TRACE_LEVEL_INFO,
4814 				  "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
4815 				  peer_id, vdev_id);
4816 
4817 			dp_rx_peer_map_handler(soc->dp_soc, peer_id,
4818 					       hw_peer_id, vdev_id,
4819 					       peer_mac_addr, ast_hash,
4820 					       is_wds);
4821 
4822 			/*
4823 			 * Update ast indexes for flow override support
4824 			 * Applicable only for non wds peers
4825 			 */
4826 			dp_peer_ast_index_flow_queue_map_create(
4827 					    soc->dp_soc, is_wds,
4828 					    peer_id, peer_mac_addr,
4829 					    &ast_flow_info);
4830 
4831 			break;
4832 		}
4833 	case HTT_T2H_MSG_TYPE_PEER_UNMAP_V2:
4834 		{
4835 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
4836 			u_int8_t *mac_addr;
4837 			u_int16_t peer_id;
4838 			u_int8_t vdev_id;
4839 			u_int8_t is_wds;
4840 			u_int32_t free_wds_count;
4841 
4842 			peer_id =
4843 			HTT_RX_PEER_UNMAP_V2_SW_PEER_ID_GET(*msg_word);
4844 			vdev_id = HTT_RX_PEER_UNMAP_V2_VDEV_ID_GET(*msg_word);
4845 			mac_addr =
4846 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
4847 						   &mac_addr_deswizzle_buf[0]);
4848 			is_wds =
4849 			HTT_RX_PEER_UNMAP_V2_NEXT_HOP_GET(*(msg_word + 2));
4850 			free_wds_count =
4851 			HTT_RX_PEER_UNMAP_V2_PEER_WDS_FREE_COUNT_GET(*(msg_word + 4));
4852 
4853 			QDF_TRACE(QDF_MODULE_ID_TXRX,
4854 				  QDF_TRACE_LEVEL_INFO,
4855 				  "HTT_T2H_MSG_TYPE_PEER_UNMAP msg for peer id %d vdev id %d n",
4856 				  peer_id, vdev_id);
4857 
4858 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
4859 						 vdev_id, mac_addr,
4860 						 is_wds, free_wds_count);
4861 			break;
4862 		}
4863 	case HTT_T2H_MSG_TYPE_RX_DELBA:
4864 		{
4865 			uint16_t peer_id;
4866 			uint8_t tid;
4867 			uint8_t win_sz;
4868 			QDF_STATUS status;
4869 
4870 			peer_id = HTT_RX_DELBA_PEER_ID_GET(*msg_word);
4871 			tid = HTT_RX_DELBA_TID_GET(*msg_word);
4872 			win_sz = HTT_RX_DELBA_WIN_SIZE_GET(*msg_word);
4873 
4874 			status = dp_rx_delba_ind_handler(
4875 				soc->dp_soc,
4876 				peer_id, tid, win_sz);
4877 
4878 			QDF_TRACE(QDF_MODULE_ID_TXRX,
4879 				  QDF_TRACE_LEVEL_INFO,
4880 				  FL("DELBA PeerID %d BAW %d TID %d stat %d"),
4881 				  peer_id, win_sz, tid, status);
4882 			break;
4883 		}
4884 	case HTT_T2H_MSG_TYPE_FSE_CMEM_BASE_SEND:
4885 		{
4886 			uint16_t num_entries;
4887 			uint32_t cmem_ba_lo;
4888 			uint32_t cmem_ba_hi;
4889 
4890 			num_entries = HTT_CMEM_BASE_SEND_NUM_ENTRIES_GET(*msg_word);
4891 			cmem_ba_lo = *(msg_word + 1);
4892 			cmem_ba_hi = *(msg_word + 2);
4893 
4894 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
4895 				  FL("CMEM FSE num_entries %u CMEM BA LO %x HI %x"),
4896 				  num_entries, cmem_ba_lo, cmem_ba_hi);
4897 
4898 			dp_rx_fst_update_cmem_params(soc->dp_soc, num_entries,
4899 						     cmem_ba_lo, cmem_ba_hi);
4900 			break;
4901 		}
4902 	case HTT_T2H_MSG_TYPE_TX_OFFLOAD_DELIVER_IND:
4903 		{
4904 			dp_offload_ind_handler(soc, msg_word);
4905 			break;
4906 		}
4907 	default:
4908 		break;
4909 	};
4910 
4911 	/* Free the indication buffer */
4912 	if (free_buf)
4913 		qdf_nbuf_free(htt_t2h_msg);
4914 }
4915 
4916 /*
4917  * dp_htt_h2t_full() - Send full handler (called from HTC)
4918  * @context:	Opaque context (HTT SOC handle)
4919  * @pkt:	HTC packet
4920  *
4921  * Return: enum htc_send_full_action
4922  */
4923 static enum htc_send_full_action
4924 dp_htt_h2t_full(void *context, HTC_PACKET *pkt)
4925 {
4926 	return HTC_SEND_FULL_KEEP;
4927 }
4928 
4929 /*
4930  * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages
4931  * @context:	Opaque context (HTT SOC handle)
4932  * @nbuf:	nbuf containing T2H message
4933  * @pipe_id:	HIF pipe ID
4934  *
4935  * Return: QDF_STATUS
4936  *
4937  * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which
4938  * will be used for packet log and other high-priority HTT messages. Proper
4939  * HTC connection to be added later once required FW changes are available
4940  */
4941 static QDF_STATUS
4942 dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id)
4943 {
4944 	QDF_STATUS rc = QDF_STATUS_SUCCESS;
4945 	HTC_PACKET htc_pkt;
4946 
4947 	qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE);
4948 	qdf_mem_zero(&htc_pkt, sizeof(htc_pkt));
4949 	htc_pkt.Status = QDF_STATUS_SUCCESS;
4950 	htc_pkt.pPktContext = (void *)nbuf;
4951 	dp_htt_t2h_msg_handler(context, &htc_pkt);
4952 
4953 	return rc;
4954 }
4955 
4956 /*
4957  * htt_htc_soc_attach() - Register SOC level HTT instance with HTC
4958  * @htt_soc:	HTT SOC handle
4959  *
4960  * Return: QDF_STATUS
4961  */
4962 static QDF_STATUS
4963 htt_htc_soc_attach(struct htt_soc *soc)
4964 {
4965 	struct htc_service_connect_req connect;
4966 	struct htc_service_connect_resp response;
4967 	QDF_STATUS status;
4968 	struct dp_soc *dpsoc = soc->dp_soc;
4969 
4970 	qdf_mem_zero(&connect, sizeof(connect));
4971 	qdf_mem_zero(&response, sizeof(response));
4972 
4973 	connect.pMetaData = NULL;
4974 	connect.MetaDataLength = 0;
4975 	connect.EpCallbacks.pContext = soc;
4976 	connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
4977 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
4978 	connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler;
4979 
4980 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
4981 	connect.EpCallbacks.EpRecvRefill = NULL;
4982 
4983 	/* N/A, fill is done by HIF */
4984 	connect.EpCallbacks.RecvRefillWaterMark = 1;
4985 
4986 	connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
4987 	/*
4988 	 * Specify how deep to let a queue get before htc_send_pkt will
4989 	 * call the EpSendFull function due to excessive send queue depth.
4990 	 */
4991 	connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
4992 
4993 	/* disable flow control for HTT data message service */
4994 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
4995 
4996 	/* connect to control service */
4997 	connect.service_id = HTT_DATA_MSG_SVC;
4998 
4999 	status = htc_connect_service(soc->htc_soc, &connect, &response);
5000 
5001 	if (status != QDF_STATUS_SUCCESS)
5002 		return status;
5003 
5004 	soc->htc_endpoint = response.Endpoint;
5005 
5006 	hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint);
5007 
5008 	htt_interface_logging_init(&soc->htt_logger_handle, soc->ctrl_psoc);
5009 	dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc,
5010 		dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE);
5011 
5012 	return QDF_STATUS_SUCCESS; /* success */
5013 }
5014 
5015 /*
5016  * htt_soc_initialize() - SOC level HTT initialization
5017  * @htt_soc: Opaque htt SOC handle
5018  * @ctrl_psoc: Opaque ctrl SOC handle
5019  * @htc_soc: SOC level HTC handle
5020  * @hal_soc: Opaque HAL SOC handle
5021  * @osdev: QDF device
5022  *
5023  * Return: HTT handle on success; NULL on failure
5024  */
5025 void *
5026 htt_soc_initialize(struct htt_soc *htt_soc,
5027 		   struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
5028 		   HTC_HANDLE htc_soc,
5029 		   hal_soc_handle_t hal_soc_hdl, qdf_device_t osdev)
5030 {
5031 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
5032 
5033 	soc->osdev = osdev;
5034 	soc->ctrl_psoc = ctrl_psoc;
5035 	soc->htc_soc = htc_soc;
5036 	soc->hal_soc = hal_soc_hdl;
5037 
5038 	if (htt_htc_soc_attach(soc))
5039 		goto fail2;
5040 
5041 	return soc;
5042 
5043 fail2:
5044 	return NULL;
5045 }
5046 
5047 void htt_soc_htc_dealloc(struct htt_soc *htt_handle)
5048 {
5049 	htt_interface_logging_deinit(htt_handle->htt_logger_handle);
5050 	htt_htc_misc_pkt_pool_free(htt_handle);
5051 	htt_htc_pkt_pool_free(htt_handle);
5052 }
5053 
5054 /*
5055  * htt_soc_htc_prealloc() - HTC memory prealloc
5056  * @htt_soc: SOC level HTT handle
5057  *
5058  * Return: QDF_STATUS_SUCCESS on Success or
5059  * QDF_STATUS_E_NOMEM on allocation failure
5060  */
5061 QDF_STATUS htt_soc_htc_prealloc(struct htt_soc *soc)
5062 {
5063 	int i;
5064 
5065 	soc->htt_htc_pkt_freelist = NULL;
5066 	/* pre-allocate some HTC_PACKET objects */
5067 	for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
5068 		struct dp_htt_htc_pkt_union *pkt;
5069 		pkt = qdf_mem_malloc(sizeof(*pkt));
5070 		if (!pkt)
5071 			return QDF_STATUS_E_NOMEM;
5072 
5073 		htt_htc_pkt_free(soc, &pkt->u.pkt);
5074 	}
5075 	return QDF_STATUS_SUCCESS;
5076 }
5077 
5078 /*
5079  * htt_soc_detach() - Free SOC level HTT handle
5080  * @htt_hdl: HTT SOC handle
5081  */
5082 void htt_soc_detach(struct htt_soc *htt_hdl)
5083 {
5084 	int i;
5085 	struct htt_soc *htt_handle = (struct htt_soc *)htt_hdl;
5086 
5087 	for (i = 0; i < MAX_PDEV_CNT; i++) {
5088 		qdf_mem_free(htt_handle->pdevid_tt[i].umac_ttt);
5089 		qdf_mem_free(htt_handle->pdevid_tt[i].lmac_ttt);
5090 	}
5091 
5092 	HTT_TX_MUTEX_DESTROY(&htt_handle->htt_tx_mutex);
5093 	qdf_mem_free(htt_handle);
5094 
5095 }
5096 
5097 /**
5098  * dp_h2t_ext_stats_msg_send(): function to contruct HTT message to pass to FW
5099  * @pdev: DP PDEV handle
5100  * @stats_type_upload_mask: stats type requested by user
5101  * @config_param_0: extra configuration parameters
5102  * @config_param_1: extra configuration parameters
5103  * @config_param_2: extra configuration parameters
5104  * @config_param_3: extra configuration parameters
5105  * @mac_id: mac number
5106  *
5107  * return: QDF STATUS
5108  */
5109 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
5110 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
5111 		uint32_t config_param_1, uint32_t config_param_2,
5112 		uint32_t config_param_3, int cookie_val, int cookie_msb,
5113 		uint8_t mac_id)
5114 {
5115 	struct htt_soc *soc = pdev->soc->htt_handle;
5116 	struct dp_htt_htc_pkt *pkt;
5117 	qdf_nbuf_t msg;
5118 	uint32_t *msg_word;
5119 	uint8_t pdev_mask = 0;
5120 	uint8_t *htt_logger_bufp;
5121 	int mac_for_pdev;
5122 	int target_pdev_id;
5123 	QDF_STATUS status;
5124 
5125 	msg = qdf_nbuf_alloc(
5126 			soc->osdev,
5127 			HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ),
5128 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
5129 
5130 	if (!msg)
5131 		return QDF_STATUS_E_NOMEM;
5132 
5133 	/*TODO:Add support for SOC stats
5134 	 * Bit 0: SOC Stats
5135 	 * Bit 1: Pdev stats for pdev id 0
5136 	 * Bit 2: Pdev stats for pdev id 1
5137 	 * Bit 3: Pdev stats for pdev id 2
5138 	 */
5139 	mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
5140 	target_pdev_id =
5141 	dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev);
5142 
5143 	pdev_mask = 1 << target_pdev_id;
5144 
5145 	/*
5146 	 * Set the length of the message.
5147 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
5148 	 * separately during the below call to qdf_nbuf_push_head.
5149 	 * The contribution from the HTC header is added separately inside HTC.
5150 	 */
5151 	if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) {
5152 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5153 				"Failed to expand head for HTT_EXT_STATS");
5154 		qdf_nbuf_free(msg);
5155 		return QDF_STATUS_E_FAILURE;
5156 	}
5157 
5158 	dp_htt_tx_stats_info("%pK: cookie <-> %d\n config_param_0 %u\n"
5159 			     "config_param_1 %u\n config_param_2 %u\n"
5160 			     "config_param_4 %u\n -------------",
5161 			     pdev->soc, cookie_val,
5162 			     config_param_0,
5163 			     config_param_1, config_param_2, config_param_3);
5164 
5165 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
5166 
5167 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
5168 	htt_logger_bufp = (uint8_t *)msg_word;
5169 	*msg_word = 0;
5170 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ);
5171 	HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask);
5172 	HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask);
5173 
5174 	/* word 1 */
5175 	msg_word++;
5176 	*msg_word = 0;
5177 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0);
5178 
5179 	/* word 2 */
5180 	msg_word++;
5181 	*msg_word = 0;
5182 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1);
5183 
5184 	/* word 3 */
5185 	msg_word++;
5186 	*msg_word = 0;
5187 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2);
5188 
5189 	/* word 4 */
5190 	msg_word++;
5191 	*msg_word = 0;
5192 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3);
5193 
5194 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0);
5195 
5196 	/* word 5 */
5197 	msg_word++;
5198 
5199 	/* word 6 */
5200 	msg_word++;
5201 	*msg_word = 0;
5202 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val);
5203 
5204 	/* word 7 */
5205 	msg_word++;
5206 	*msg_word = 0;
5207 	/* Currently Using last 2 bits for pdev_id
5208 	 * For future reference, reserving 3 bits in cookie_msb for pdev_id
5209 	 */
5210 	cookie_msb = (cookie_msb | pdev->pdev_id);
5211 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb);
5212 
5213 	pkt = htt_htc_pkt_alloc(soc);
5214 	if (!pkt) {
5215 		qdf_nbuf_free(msg);
5216 		return QDF_STATUS_E_NOMEM;
5217 	}
5218 
5219 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5220 
5221 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
5222 			dp_htt_h2t_send_complete_free_netbuf,
5223 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
5224 			soc->htc_endpoint,
5225 			/* tag for FW response msg not guaranteed */
5226 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
5227 
5228 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5229 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_EXT_STATS_REQ,
5230 				     htt_logger_bufp);
5231 
5232 	if (status != QDF_STATUS_SUCCESS) {
5233 		qdf_nbuf_free(msg);
5234 		htt_htc_pkt_free(soc, pkt);
5235 	}
5236 
5237 	return status;
5238 }
5239 
5240 /**
5241  * dp_h2t_3tuple_config_send(): function to contruct 3 tuple configuration
5242  * HTT message to pass to FW
5243  * @pdev: DP PDEV handle
5244  * @tuple_mask: tuple configuration to report 3 tuple hash value in either
5245  * toeplitz_2_or_4 or flow_id_toeplitz in MSDU START TLV.
5246  *
5247  * tuple_mask[1:0]:
5248  *   00 - Do not report 3 tuple hash value
5249  *   10 - Report 3 tuple hash value in toeplitz_2_or_4
5250  *   01 - Report 3 tuple hash value in flow_id_toeplitz
5251  *   11 - Report 3 tuple hash value in both toeplitz_2_or_4 & flow_id_toeplitz
5252  *
5253  * return: QDF STATUS
5254  */
5255 QDF_STATUS dp_h2t_3tuple_config_send(struct dp_pdev *pdev,
5256 				     uint32_t tuple_mask, uint8_t mac_id)
5257 {
5258 	struct htt_soc *soc = pdev->soc->htt_handle;
5259 	struct dp_htt_htc_pkt *pkt;
5260 	qdf_nbuf_t msg;
5261 	uint32_t *msg_word;
5262 	uint8_t *htt_logger_bufp;
5263 	int mac_for_pdev;
5264 	int target_pdev_id;
5265 
5266 	msg = qdf_nbuf_alloc(
5267 			soc->osdev,
5268 			HTT_MSG_BUF_SIZE(HTT_3_TUPLE_HASH_CFG_REQ_BYTES),
5269 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
5270 
5271 	if (!msg)
5272 		return QDF_STATUS_E_NOMEM;
5273 
5274 	mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
5275 	target_pdev_id =
5276 	dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev);
5277 
5278 	/*
5279 	 * Set the length of the message.
5280 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
5281 	 * separately during the below call to qdf_nbuf_push_head.
5282 	 * The contribution from the HTC header is added separately inside HTC.
5283 	 */
5284 	if (!qdf_nbuf_put_tail(msg, HTT_3_TUPLE_HASH_CFG_REQ_BYTES)) {
5285 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5286 			  "Failed to expand head for HTT_3TUPLE_CONFIG");
5287 		qdf_nbuf_free(msg);
5288 		return QDF_STATUS_E_FAILURE;
5289 	}
5290 
5291 	dp_htt_info("%pK: config_param_sent 0x%x for target_pdev %d\n -------------",
5292 		    pdev->soc, tuple_mask, target_pdev_id);
5293 
5294 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
5295 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
5296 	htt_logger_bufp = (uint8_t *)msg_word;
5297 
5298 	*msg_word = 0;
5299 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG);
5300 	HTT_RX_3_TUPLE_HASH_PDEV_ID_SET(*msg_word, target_pdev_id);
5301 
5302 	msg_word++;
5303 	*msg_word = 0;
5304 	HTT_H2T_FLOW_ID_TOEPLITZ_FIELD_CONFIG_SET(*msg_word, tuple_mask);
5305 	HTT_H2T_TOEPLITZ_2_OR_4_FIELD_CONFIG_SET(*msg_word, tuple_mask);
5306 
5307 	pkt = htt_htc_pkt_alloc(soc);
5308 	if (!pkt) {
5309 		qdf_nbuf_free(msg);
5310 		return QDF_STATUS_E_NOMEM;
5311 	}
5312 
5313 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5314 
5315 	SET_HTC_PACKET_INFO_TX(
5316 			&pkt->htc_pkt,
5317 			dp_htt_h2t_send_complete_free_netbuf,
5318 			qdf_nbuf_data(msg),
5319 			qdf_nbuf_len(msg),
5320 			soc->htc_endpoint,
5321 			/* tag for no FW response msg */
5322 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
5323 
5324 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5325 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG,
5326 			    htt_logger_bufp);
5327 
5328 	return QDF_STATUS_SUCCESS;
5329 }
5330 
5331 /* This macro will revert once proper HTT header will define for
5332  * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file
5333  * */
5334 #if defined(WDI_EVENT_ENABLE)
5335 /**
5336  * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW
5337  * @pdev: DP PDEV handle
5338  * @stats_type_upload_mask: stats type requested by user
5339  * @mac_id: Mac id number
5340  *
5341  * return: QDF STATUS
5342  */
5343 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
5344 		uint32_t stats_type_upload_mask, uint8_t mac_id)
5345 {
5346 	struct htt_soc *soc = pdev->soc->htt_handle;
5347 	struct dp_htt_htc_pkt *pkt;
5348 	qdf_nbuf_t msg;
5349 	uint32_t *msg_word;
5350 	uint8_t pdev_mask;
5351 	QDF_STATUS status;
5352 
5353 	msg = qdf_nbuf_alloc(
5354 			soc->osdev,
5355 			HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ),
5356 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
5357 
5358 	if (!msg) {
5359 		dp_htt_err("%pK: Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer"
5360 			   , pdev->soc);
5361 		qdf_assert(0);
5362 		return QDF_STATUS_E_NOMEM;
5363 	}
5364 
5365 	/*TODO:Add support for SOC stats
5366 	 * Bit 0: SOC Stats
5367 	 * Bit 1: Pdev stats for pdev id 0
5368 	 * Bit 2: Pdev stats for pdev id 1
5369 	 * Bit 3: Pdev stats for pdev id 2
5370 	 */
5371 	pdev_mask = 1 << dp_get_target_pdev_id_for_host_pdev_id(pdev->soc,
5372 								mac_id);
5373 
5374 	/*
5375 	 * Set the length of the message.
5376 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
5377 	 * separately during the below call to qdf_nbuf_push_head.
5378 	 * The contribution from the HTC header is added separately inside HTC.
5379 	 */
5380 	if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) {
5381 		dp_htt_err("%pK: Failed to expand head for HTT_CFG_STATS"
5382 			   , pdev->soc);
5383 		qdf_nbuf_free(msg);
5384 		return QDF_STATUS_E_FAILURE;
5385 	}
5386 
5387 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
5388 
5389 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
5390 	*msg_word = 0;
5391 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
5392 	HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask);
5393 	HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word,
5394 			stats_type_upload_mask);
5395 
5396 	pkt = htt_htc_pkt_alloc(soc);
5397 	if (!pkt) {
5398 		dp_htt_err("%pK: Fail to allocate dp_htt_htc_pkt buffer", pdev->soc);
5399 		qdf_assert(0);
5400 		qdf_nbuf_free(msg);
5401 		return QDF_STATUS_E_NOMEM;
5402 	}
5403 
5404 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5405 
5406 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
5407 			dp_htt_h2t_send_complete_free_netbuf,
5408 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
5409 			soc->htc_endpoint,
5410 			/* tag for no FW response msg */
5411 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
5412 
5413 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5414 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG,
5415 				     (uint8_t *)msg_word);
5416 
5417 	if (status != QDF_STATUS_SUCCESS) {
5418 		qdf_nbuf_free(msg);
5419 		htt_htc_pkt_free(soc, pkt);
5420 	}
5421 
5422 	return status;
5423 }
5424 #endif
5425 
5426 void
5427 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
5428 			     uint32_t *tag_buf)
5429 {
5430 	struct dp_peer *peer = NULL;
5431 	switch (tag_type) {
5432 	case HTT_STATS_PEER_DETAILS_TAG:
5433 	{
5434 		htt_peer_details_tlv *dp_stats_buf =
5435 			(htt_peer_details_tlv *)tag_buf;
5436 
5437 		pdev->fw_stats_peer_id = dp_stats_buf->sw_peer_id;
5438 	}
5439 	break;
5440 	case HTT_STATS_PEER_STATS_CMN_TAG:
5441 	{
5442 		htt_peer_stats_cmn_tlv *dp_stats_buf =
5443 			(htt_peer_stats_cmn_tlv *)tag_buf;
5444 
5445 		peer = dp_peer_get_ref_by_id(pdev->soc, pdev->fw_stats_peer_id,
5446 					     DP_MOD_ID_HTT);
5447 
5448 		if (peer && !peer->bss_peer) {
5449 			peer->stats.tx.inactive_time =
5450 				dp_stats_buf->inactive_time;
5451 			qdf_event_set(&pdev->fw_peer_stats_event);
5452 		}
5453 		if (peer)
5454 			dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
5455 	}
5456 	break;
5457 	default:
5458 		qdf_err("Invalid tag_type");
5459 	}
5460 }
5461 
5462 /**
5463  * dp_htt_rx_flow_fst_setup(): Send HTT Rx FST setup message to FW
5464  * @pdev: DP pdev handle
5465  * @fse_setup_info: FST setup parameters
5466  *
5467  * Return: Success when HTT message is sent, error on failure
5468  */
5469 QDF_STATUS
5470 dp_htt_rx_flow_fst_setup(struct dp_pdev *pdev,
5471 			 struct dp_htt_rx_flow_fst_setup *fse_setup_info)
5472 {
5473 	struct htt_soc *soc = pdev->soc->htt_handle;
5474 	struct dp_htt_htc_pkt *pkt;
5475 	qdf_nbuf_t msg;
5476 	u_int32_t *msg_word;
5477 	struct htt_h2t_msg_rx_fse_setup_t *fse_setup;
5478 	uint8_t *htt_logger_bufp;
5479 	u_int32_t *key;
5480 	QDF_STATUS status;
5481 
5482 	msg = qdf_nbuf_alloc(
5483 		soc->osdev,
5484 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_setup_t)),
5485 		/* reserve room for the HTC header */
5486 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
5487 
5488 	if (!msg)
5489 		return QDF_STATUS_E_NOMEM;
5490 
5491 	/*
5492 	 * Set the length of the message.
5493 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
5494 	 * separately during the below call to qdf_nbuf_push_head.
5495 	 * The contribution from the HTC header is added separately inside HTC.
5496 	 */
5497 	if (!qdf_nbuf_put_tail(msg,
5498 			       sizeof(struct htt_h2t_msg_rx_fse_setup_t))) {
5499 		qdf_err("Failed to expand head for HTT RX_FSE_SETUP msg");
5500 		return QDF_STATUS_E_FAILURE;
5501 	}
5502 
5503 	/* fill in the message contents */
5504 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
5505 
5506 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_setup_t));
5507 	/* rewind beyond alignment pad to get to the HTC header reserved area */
5508 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
5509 	htt_logger_bufp = (uint8_t *)msg_word;
5510 
5511 	*msg_word = 0;
5512 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG);
5513 
5514 	fse_setup = (struct htt_h2t_msg_rx_fse_setup_t *)msg_word;
5515 
5516 	HTT_RX_FSE_SETUP_PDEV_ID_SET(*msg_word, fse_setup_info->pdev_id);
5517 
5518 	msg_word++;
5519 	HTT_RX_FSE_SETUP_NUM_REC_SET(*msg_word, fse_setup_info->max_entries);
5520 	HTT_RX_FSE_SETUP_MAX_SEARCH_SET(*msg_word, fse_setup_info->max_search);
5521 	HTT_RX_FSE_SETUP_IP_DA_SA_PREFIX_SET(*msg_word,
5522 					     fse_setup_info->ip_da_sa_prefix);
5523 
5524 	msg_word++;
5525 	HTT_RX_FSE_SETUP_BASE_ADDR_LO_SET(*msg_word,
5526 					  fse_setup_info->base_addr_lo);
5527 	msg_word++;
5528 	HTT_RX_FSE_SETUP_BASE_ADDR_HI_SET(*msg_word,
5529 					  fse_setup_info->base_addr_hi);
5530 
5531 	key = (u_int32_t *)fse_setup_info->hash_key;
5532 	fse_setup->toeplitz31_0 = *key++;
5533 	fse_setup->toeplitz63_32 = *key++;
5534 	fse_setup->toeplitz95_64 = *key++;
5535 	fse_setup->toeplitz127_96 = *key++;
5536 	fse_setup->toeplitz159_128 = *key++;
5537 	fse_setup->toeplitz191_160 = *key++;
5538 	fse_setup->toeplitz223_192 = *key++;
5539 	fse_setup->toeplitz255_224 = *key++;
5540 	fse_setup->toeplitz287_256 = *key++;
5541 	fse_setup->toeplitz314_288 = *key;
5542 
5543 	msg_word++;
5544 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz31_0);
5545 	msg_word++;
5546 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz63_32);
5547 	msg_word++;
5548 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz95_64);
5549 	msg_word++;
5550 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz127_96);
5551 	msg_word++;
5552 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz159_128);
5553 	msg_word++;
5554 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz191_160);
5555 	msg_word++;
5556 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz223_192);
5557 	msg_word++;
5558 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz255_224);
5559 	msg_word++;
5560 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz287_256);
5561 	msg_word++;
5562 	HTT_RX_FSE_SETUP_HASH_314_288_SET(*msg_word,
5563 					  fse_setup->toeplitz314_288);
5564 
5565 	pkt = htt_htc_pkt_alloc(soc);
5566 	if (!pkt) {
5567 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
5568 		qdf_assert(0);
5569 		qdf_nbuf_free(msg);
5570 		return QDF_STATUS_E_RESOURCES; /* failure */
5571 	}
5572 
5573 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5574 
5575 	SET_HTC_PACKET_INFO_TX(
5576 		&pkt->htc_pkt,
5577 		dp_htt_h2t_send_complete_free_netbuf,
5578 		qdf_nbuf_data(msg),
5579 		qdf_nbuf_len(msg),
5580 		soc->htc_endpoint,
5581 		/* tag for no FW response msg */
5582 		HTC_TX_PACKET_TAG_RUNTIME_PUT);
5583 
5584 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5585 
5586 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
5587 				     HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG,
5588 				     htt_logger_bufp);
5589 
5590 	if (status == QDF_STATUS_SUCCESS) {
5591 		dp_info("HTT_H2T RX_FSE_SETUP sent to FW for pdev = %u",
5592 			fse_setup_info->pdev_id);
5593 		QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_DEBUG,
5594 				   (void *)fse_setup_info->hash_key,
5595 				   fse_setup_info->hash_key_len);
5596 	} else {
5597 		qdf_nbuf_free(msg);
5598 		htt_htc_pkt_free(soc, pkt);
5599 	}
5600 
5601 	return status;
5602 }
5603 
5604 /**
5605  * dp_htt_rx_flow_fse_operation(): Send HTT Flow Search Entry msg to
5606  * add/del a flow in HW
5607  * @pdev: DP pdev handle
5608  * @fse_op_info: Flow entry parameters
5609  *
5610  * Return: Success when HTT message is sent, error on failure
5611  */
5612 QDF_STATUS
5613 dp_htt_rx_flow_fse_operation(struct dp_pdev *pdev,
5614 			     struct dp_htt_rx_flow_fst_operation *fse_op_info)
5615 {
5616 	struct htt_soc *soc = pdev->soc->htt_handle;
5617 	struct dp_htt_htc_pkt *pkt;
5618 	qdf_nbuf_t msg;
5619 	u_int32_t *msg_word;
5620 	struct htt_h2t_msg_rx_fse_operation_t *fse_operation;
5621 	uint8_t *htt_logger_bufp;
5622 	QDF_STATUS status;
5623 
5624 	msg = qdf_nbuf_alloc(
5625 		soc->osdev,
5626 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_operation_t)),
5627 		/* reserve room for the HTC header */
5628 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
5629 	if (!msg)
5630 		return QDF_STATUS_E_NOMEM;
5631 
5632 	/*
5633 	 * Set the length of the message.
5634 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
5635 	 * separately during the below call to qdf_nbuf_push_head.
5636 	 * The contribution from the HTC header is added separately inside HTC.
5637 	 */
5638 	if (!qdf_nbuf_put_tail(msg,
5639 			       sizeof(struct htt_h2t_msg_rx_fse_operation_t))) {
5640 		qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
5641 		qdf_nbuf_free(msg);
5642 		return QDF_STATUS_E_FAILURE;
5643 	}
5644 
5645 	/* fill in the message contents */
5646 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
5647 
5648 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_operation_t));
5649 	/* rewind beyond alignment pad to get to the HTC header reserved area */
5650 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
5651 	htt_logger_bufp = (uint8_t *)msg_word;
5652 
5653 	*msg_word = 0;
5654 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG);
5655 
5656 	fse_operation = (struct htt_h2t_msg_rx_fse_operation_t *)msg_word;
5657 
5658 	HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, fse_op_info->pdev_id);
5659 	msg_word++;
5660 	HTT_RX_FSE_IPSEC_VALID_SET(*msg_word, false);
5661 	if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_ENTRY) {
5662 		HTT_RX_FSE_OPERATION_SET(*msg_word,
5663 					 HTT_RX_FSE_CACHE_INVALIDATE_ENTRY);
5664 		msg_word++;
5665 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5666 		*msg_word,
5667 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_31_0));
5668 		msg_word++;
5669 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5670 		*msg_word,
5671 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_63_32));
5672 		msg_word++;
5673 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5674 		*msg_word,
5675 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_95_64));
5676 		msg_word++;
5677 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5678 		*msg_word,
5679 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_127_96));
5680 		msg_word++;
5681 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5682 		*msg_word,
5683 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_31_0));
5684 		msg_word++;
5685 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5686 		*msg_word,
5687 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_63_32));
5688 		msg_word++;
5689 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5690 		*msg_word,
5691 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_95_64));
5692 		msg_word++;
5693 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5694 		*msg_word,
5695 		qdf_htonl(
5696 		fse_op_info->rx_flow->flow_tuple_info.dest_ip_127_96));
5697 		msg_word++;
5698 		HTT_RX_FSE_SOURCEPORT_SET(
5699 			*msg_word,
5700 			fse_op_info->rx_flow->flow_tuple_info.src_port);
5701 		HTT_RX_FSE_DESTPORT_SET(
5702 			*msg_word,
5703 			fse_op_info->rx_flow->flow_tuple_info.dest_port);
5704 		msg_word++;
5705 		HTT_RX_FSE_L4_PROTO_SET(
5706 			*msg_word,
5707 			fse_op_info->rx_flow->flow_tuple_info.l4_protocol);
5708 	} else if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_FULL) {
5709 		HTT_RX_FSE_OPERATION_SET(*msg_word,
5710 					 HTT_RX_FSE_CACHE_INVALIDATE_FULL);
5711 	} else if (fse_op_info->op_code == DP_HTT_FST_DISABLE) {
5712 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_DISABLE);
5713 	} else if (fse_op_info->op_code == DP_HTT_FST_ENABLE) {
5714 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_ENABLE);
5715 	}
5716 
5717 	pkt = htt_htc_pkt_alloc(soc);
5718 	if (!pkt) {
5719 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
5720 		qdf_assert(0);
5721 		qdf_nbuf_free(msg);
5722 		return QDF_STATUS_E_RESOURCES; /* failure */
5723 	}
5724 
5725 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5726 
5727 	SET_HTC_PACKET_INFO_TX(
5728 		&pkt->htc_pkt,
5729 		dp_htt_h2t_send_complete_free_netbuf,
5730 		qdf_nbuf_data(msg),
5731 		qdf_nbuf_len(msg),
5732 		soc->htc_endpoint,
5733 		/* tag for no FW response msg */
5734 		HTC_TX_PACKET_TAG_RUNTIME_PUT);
5735 
5736 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5737 
5738 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
5739 				     HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG,
5740 				     htt_logger_bufp);
5741 
5742 	if (status == QDF_STATUS_SUCCESS) {
5743 		dp_info("HTT_H2T RX_FSE_OPERATION_CFG sent to FW for pdev = %u",
5744 			fse_op_info->pdev_id);
5745 	} else {
5746 		qdf_nbuf_free(msg);
5747 		htt_htc_pkt_free(soc, pkt);
5748 	}
5749 
5750 	return status;
5751 }
5752 
5753 /**
5754  * dp_htt_rx_fisa_config(): Send HTT msg to configure FISA
5755  * @pdev: DP pdev handle
5756  * @fse_op_info: Flow entry parameters
5757  *
5758  * Return: Success when HTT message is sent, error on failure
5759  */
5760 QDF_STATUS
5761 dp_htt_rx_fisa_config(struct dp_pdev *pdev,
5762 		      struct dp_htt_rx_fisa_cfg *fisa_config)
5763 {
5764 	struct htt_soc *soc = pdev->soc->htt_handle;
5765 	struct dp_htt_htc_pkt *pkt;
5766 	qdf_nbuf_t msg;
5767 	u_int32_t *msg_word;
5768 	struct htt_h2t_msg_type_fisa_config_t *htt_fisa_config;
5769 	uint8_t *htt_logger_bufp;
5770 	uint32_t len;
5771 	QDF_STATUS status;
5772 
5773 	len = HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_type_fisa_config_t));
5774 
5775 	msg = qdf_nbuf_alloc(soc->osdev,
5776 			     len,
5777 			     /* reserve room for the HTC header */
5778 			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
5779 			     4,
5780 			     TRUE);
5781 	if (!msg)
5782 		return QDF_STATUS_E_NOMEM;
5783 
5784 	/*
5785 	 * Set the length of the message.
5786 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
5787 	 * separately during the below call to qdf_nbuf_push_head.
5788 	 * The contribution from the HTC header is added separately inside HTC.
5789 	 */
5790 	if (!qdf_nbuf_put_tail(msg,
5791 			       sizeof(struct htt_h2t_msg_type_fisa_config_t))) {
5792 		qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
5793 		qdf_nbuf_free(msg);
5794 		return QDF_STATUS_E_FAILURE;
5795 	}
5796 
5797 	/* fill in the message contents */
5798 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
5799 
5800 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_type_fisa_config_t));
5801 	/* rewind beyond alignment pad to get to the HTC header reserved area */
5802 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
5803 	htt_logger_bufp = (uint8_t *)msg_word;
5804 
5805 	*msg_word = 0;
5806 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FISA_CFG);
5807 
5808 	htt_fisa_config = (struct htt_h2t_msg_type_fisa_config_t *)msg_word;
5809 
5810 	HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, htt_fisa_config->pdev_id);
5811 
5812 	msg_word++;
5813 	HTT_RX_FISA_CONFIG_FISA_V2_ENABLE_SET(*msg_word, 1);
5814 	HTT_RX_FISA_CONFIG_FISA_V2_AGGR_LIMIT_SET(*msg_word, 0xf);
5815 
5816 	msg_word++;
5817 	htt_fisa_config->fisa_timeout_threshold = fisa_config->fisa_timeout;
5818 
5819 	pkt = htt_htc_pkt_alloc(soc);
5820 	if (!pkt) {
5821 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
5822 		qdf_assert(0);
5823 		qdf_nbuf_free(msg);
5824 		return QDF_STATUS_E_RESOURCES; /* failure */
5825 	}
5826 
5827 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5828 
5829 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
5830 			       dp_htt_h2t_send_complete_free_netbuf,
5831 			       qdf_nbuf_data(msg),
5832 			       qdf_nbuf_len(msg),
5833 			       soc->htc_endpoint,
5834 			       /* tag for no FW response msg */
5835 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
5836 
5837 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5838 
5839 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FISA_CFG,
5840 				     htt_logger_bufp);
5841 
5842 	if (status == QDF_STATUS_SUCCESS) {
5843 		dp_info("HTT_H2T_MSG_TYPE_RX_FISA_CFG sent to FW for pdev = %u",
5844 			fisa_config->pdev_id);
5845 	} else {
5846 		qdf_nbuf_free(msg);
5847 		htt_htc_pkt_free(soc, pkt);
5848 	}
5849 
5850 	return status;
5851 }
5852