xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c (revision 45a38684b07295822dc8eba39e293408f203eec8)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <htt.h>
20 #include <hal_hw_headers.h>
21 #include <hal_api.h>
22 #include "dp_peer.h"
23 #include "dp_types.h"
24 #include "dp_internal.h"
25 #include "dp_rx.h"
26 #include "dp_rx_mon.h"
27 #include "htt_stats.h"
28 #include "htt_ppdu_stats.h"
29 #include "dp_htt.h"
30 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
31 #include "cdp_txrx_cmn_struct.h"
32 
33 #ifdef FEATURE_PERPKT_INFO
34 #include "dp_ratetable.h"
35 #endif
36 
37 #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
38 
39 #define HTT_HTC_PKT_POOL_INIT_SIZE 64
40 
41 #define HTT_MSG_BUF_SIZE(msg_bytes) \
42 	((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
43 
44 #define HTT_PID_BIT_MASK 0x3
45 
46 #define DP_EXT_MSG_LENGTH 2048
47 
48 #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16
49 
50 #define HTT_SHIFT_UPPER_TIMESTAMP 32
51 #define HTT_MASK_UPPER_TIMESTAMP 0xFFFFFFFF00000000
52 
53 /*
54  * dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap() - Get ppdu stats tlv
55  * bitmap for sniffer mode
56  * @bitmap: received bitmap
57  *
58  * Return: expected bitmap value, returns zero if doesn't match with
59  * either 64-bit Tx window or 256-bit window tlv bitmap
60  */
61 int
62 dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(uint32_t bitmap)
63 {
64 	if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64))
65 		return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64;
66 	else if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256))
67 		return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256;
68 
69 	return 0;
70 }
71 
72 #ifdef FEATURE_PERPKT_INFO
73 /*
74  * dp_peer_copy_delay_stats() - copy ppdu stats to peer delayed stats.
75  * @peer: Datapath peer handle
76  * @ppdu: User PPDU Descriptor
77  * @cur_ppdu_id: PPDU_ID
78  *
79  * Return: None
80  *
81  * on Tx data frame, we may get delayed ba set
82  * in htt_ppdu_stats_user_common_tlv. which mean we get Block Ack(BA) after we
83  * request Block Ack Request(BAR). Successful msdu is received only after Block
84  * Ack. To populate peer stats we need successful msdu(data frame).
85  * So we hold the Tx data stats on delayed_ba for stats update.
86  */
87 static void
88 dp_peer_copy_delay_stats(struct dp_peer *peer,
89 			 struct cdp_tx_completion_ppdu_user *ppdu,
90 			 uint32_t cur_ppdu_id)
91 {
92 	struct dp_pdev *pdev;
93 	struct dp_vdev *vdev;
94 
95 	if (peer->last_delayed_ba) {
96 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
97 			  "BA not yet recv for prev delayed ppdu[%d] - cur ppdu[%d]",
98 			  peer->last_delayed_ba_ppduid, cur_ppdu_id);
99 		vdev = peer->vdev;
100 		if (vdev) {
101 			pdev = vdev->pdev;
102 			pdev->stats.cdp_delayed_ba_not_recev++;
103 		}
104 	}
105 
106 	peer->delayed_ba_ppdu_stats.ltf_size = ppdu->ltf_size;
107 	peer->delayed_ba_ppdu_stats.stbc = ppdu->stbc;
108 	peer->delayed_ba_ppdu_stats.he_re = ppdu->he_re;
109 	peer->delayed_ba_ppdu_stats.txbf = ppdu->txbf;
110 	peer->delayed_ba_ppdu_stats.bw = ppdu->bw;
111 	peer->delayed_ba_ppdu_stats.nss = ppdu->nss;
112 	peer->delayed_ba_ppdu_stats.preamble = ppdu->preamble;
113 	peer->delayed_ba_ppdu_stats.gi = ppdu->gi;
114 	peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
115 	peer->delayed_ba_ppdu_stats.ldpc = ppdu->ldpc;
116 	peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
117 	peer->delayed_ba_ppdu_stats.mpdu_tried_ucast = ppdu->mpdu_tried_ucast;
118 	peer->delayed_ba_ppdu_stats.mpdu_tried_mcast = ppdu->mpdu_tried_mcast;
119 	peer->delayed_ba_ppdu_stats.frame_ctrl = ppdu->frame_ctrl;
120 	peer->delayed_ba_ppdu_stats.qos_ctrl = ppdu->qos_ctrl;
121 	peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
122 
123 	peer->delayed_ba_ppdu_stats.ru_start = ppdu->ru_start;
124 	peer->delayed_ba_ppdu_stats.ru_tones = ppdu->ru_tones;
125 	peer->delayed_ba_ppdu_stats.is_mcast = ppdu->is_mcast;
126 
127 	peer->delayed_ba_ppdu_stats.user_pos = ppdu->user_pos;
128 	peer->delayed_ba_ppdu_stats.mu_group_id = ppdu->mu_group_id;
129 
130 	peer->last_delayed_ba = true;
131 
132 	ppdu->debug_copied = true;
133 }
134 
135 /*
136  * dp_peer_copy_stats_to_bar() - copy delayed stats to ppdu stats.
137  * @peer: Datapath peer handle
138  * @ppdu: PPDU Descriptor
139  *
140  * Return: None
141  *
142  * For Tx BAR, PPDU stats TLV include Block Ack info. PPDU info
143  * from Tx BAR frame not required to populate peer stats.
144  * But we need successful MPDU and MSDU to update previous
145  * transmitted Tx data frame. Overwrite ppdu stats with the previous
146  * stored ppdu stats.
147  */
148 static void
149 dp_peer_copy_stats_to_bar(struct dp_peer *peer,
150 			  struct cdp_tx_completion_ppdu_user *ppdu)
151 {
152 	ppdu->ltf_size = peer->delayed_ba_ppdu_stats.ltf_size;
153 	ppdu->stbc = peer->delayed_ba_ppdu_stats.stbc;
154 	ppdu->he_re = peer->delayed_ba_ppdu_stats.he_re;
155 	ppdu->txbf = peer->delayed_ba_ppdu_stats.txbf;
156 	ppdu->bw = peer->delayed_ba_ppdu_stats.bw;
157 	ppdu->nss = peer->delayed_ba_ppdu_stats.nss;
158 	ppdu->preamble = peer->delayed_ba_ppdu_stats.preamble;
159 	ppdu->gi = peer->delayed_ba_ppdu_stats.gi;
160 	ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm;
161 	ppdu->ldpc = peer->delayed_ba_ppdu_stats.ldpc;
162 	ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm;
163 	ppdu->mpdu_tried_ucast = peer->delayed_ba_ppdu_stats.mpdu_tried_ucast;
164 	ppdu->mpdu_tried_mcast = peer->delayed_ba_ppdu_stats.mpdu_tried_mcast;
165 	ppdu->frame_ctrl = peer->delayed_ba_ppdu_stats.frame_ctrl;
166 	ppdu->qos_ctrl = peer->delayed_ba_ppdu_stats.qos_ctrl;
167 	ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm;
168 
169 	ppdu->ru_start = peer->delayed_ba_ppdu_stats.ru_start;
170 	ppdu->ru_tones = peer->delayed_ba_ppdu_stats.ru_tones;
171 	ppdu->is_mcast = peer->delayed_ba_ppdu_stats.is_mcast;
172 
173 	ppdu->user_pos = peer->delayed_ba_ppdu_stats.user_pos;
174 	ppdu->mu_group_id = peer->delayed_ba_ppdu_stats.mu_group_id;
175 
176 	peer->last_delayed_ba = false;
177 
178 	ppdu->debug_copied = true;
179 }
180 
181 /*
182  * dp_tx_rate_stats_update() - Update rate per-peer statistics
183  * @peer: Datapath peer handle
184  * @ppdu: PPDU Descriptor
185  *
186  * Return: None
187  */
188 static void
189 dp_tx_rate_stats_update(struct dp_peer *peer,
190 			struct cdp_tx_completion_ppdu_user *ppdu)
191 {
192 	uint32_t ratekbps = 0;
193 	uint64_t ppdu_tx_rate = 0;
194 	uint32_t rix;
195 	uint16_t ratecode = 0;
196 
197 	if (!peer || !ppdu)
198 		return;
199 
200 	if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK)
201 		return;
202 
203 	ratekbps = dp_getrateindex(ppdu->gi,
204 				   ppdu->mcs,
205 				   ppdu->nss,
206 				   ppdu->preamble,
207 				   ppdu->bw,
208 				   &rix,
209 				   &ratecode);
210 
211 	DP_STATS_UPD(peer, tx.last_tx_rate, ratekbps);
212 
213 	if (!ratekbps)
214 		return;
215 
216 	/* Calculate goodput in non-training period
217 	 * In training period, don't do anything as
218 	 * pending pkt is send as goodput.
219 	 */
220 	if ((!peer->bss_peer) && (!ppdu->sa_is_training)) {
221 		ppdu->sa_goodput = ((ratekbps / CDP_NUM_KB_IN_MB) *
222 				(CDP_PERCENT_MACRO - ppdu->current_rate_per));
223 	}
224 	ppdu->rix = rix;
225 	ppdu->tx_ratekbps = ratekbps;
226 	ppdu->tx_ratecode = ratecode;
227 	peer->stats.tx.avg_tx_rate =
228 		dp_ath_rate_lpf(peer->stats.tx.avg_tx_rate, ratekbps);
229 	ppdu_tx_rate = dp_ath_rate_out(peer->stats.tx.avg_tx_rate);
230 	DP_STATS_UPD(peer, tx.rnd_avg_tx_rate, ppdu_tx_rate);
231 
232 	if (peer->vdev) {
233 		/*
234 		 * In STA mode:
235 		 *	We get ucast stats as BSS peer stats.
236 		 *
237 		 * In AP mode:
238 		 *	We get mcast stats as BSS peer stats.
239 		 *	We get ucast stats as assoc peer stats.
240 		 */
241 		if (peer->vdev->opmode == wlan_op_mode_ap && peer->bss_peer) {
242 			peer->vdev->stats.tx.mcast_last_tx_rate = ratekbps;
243 			peer->vdev->stats.tx.mcast_last_tx_rate_mcs = ppdu->mcs;
244 		} else {
245 			peer->vdev->stats.tx.last_tx_rate = ratekbps;
246 			peer->vdev->stats.tx.last_tx_rate_mcs = ppdu->mcs;
247 		}
248 	}
249 }
250 
251 /*
252  * dp_tx_stats_update() - Update per-peer statistics
253  * @pdev: Datapath pdev handle
254  * @peer: Datapath peer handle
255  * @ppdu: PPDU Descriptor
256  * @ack_rssi: RSSI of last ack received
257  *
258  * Return: None
259  */
260 static void
261 dp_tx_stats_update(struct dp_pdev *pdev, struct dp_peer *peer,
262 		   struct cdp_tx_completion_ppdu_user *ppdu,
263 		   uint32_t ack_rssi)
264 {
265 	uint8_t preamble, mcs;
266 	uint16_t num_msdu;
267 	uint16_t num_mpdu;
268 	uint16_t mpdu_tried;
269 	uint16_t mpdu_failed;
270 
271 	preamble = ppdu->preamble;
272 	mcs = ppdu->mcs;
273 	num_msdu = ppdu->num_msdu;
274 	num_mpdu = ppdu->mpdu_success;
275 	mpdu_tried = ppdu->mpdu_tried_ucast + ppdu->mpdu_tried_mcast;
276 	mpdu_failed = mpdu_tried - num_mpdu;
277 
278 	/* If the peer statistics are already processed as part of
279 	 * per-MSDU completion handler, do not process these again in per-PPDU
280 	 * indications */
281 	if (pdev->soc->process_tx_status)
282 		return;
283 
284 	if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK) {
285 		/*
286 		 * All failed mpdu will be retried, so incrementing
287 		 * retries mpdu based on mpdu failed. Even for
288 		 * ack failure i.e for long retries we get
289 		 * mpdu failed equal mpdu tried.
290 		 */
291 		DP_STATS_INC(peer, tx.retries, mpdu_failed);
292 		DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus);
293 		return;
294 	}
295 
296 	if (ppdu->is_ppdu_cookie_valid)
297 		DP_STATS_INC(peer, tx.num_ppdu_cookie_valid, 1);
298 
299 	if (ppdu->mu_group_id <= MAX_MU_GROUP_ID &&
300 	    ppdu->ppdu_type != HTT_PPDU_STATS_PPDU_TYPE_SU) {
301 		if (unlikely(!(ppdu->mu_group_id & (MAX_MU_GROUP_ID - 1))))
302 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
303 				  "mu_group_id out of bound!!\n");
304 		else
305 			DP_STATS_UPD(peer, tx.mu_group_id[ppdu->mu_group_id],
306 				     (ppdu->user_pos + 1));
307 	}
308 
309 	if (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA ||
310 	    ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA) {
311 		DP_STATS_UPD(peer, tx.ru_tones, ppdu->ru_tones);
312 		DP_STATS_UPD(peer, tx.ru_start, ppdu->ru_start);
313 		switch (ppdu->ru_tones) {
314 		case RU_26:
315 			DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].num_msdu,
316 				     num_msdu);
317 			DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].num_mpdu,
318 				     num_mpdu);
319 			DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].mpdu_tried,
320 				     mpdu_tried);
321 		break;
322 		case RU_52:
323 			DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].num_msdu,
324 				     num_msdu);
325 			DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].num_mpdu,
326 				     num_mpdu);
327 			DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].mpdu_tried,
328 				     mpdu_tried);
329 		break;
330 		case RU_106:
331 			DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].num_msdu,
332 				     num_msdu);
333 			DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].num_mpdu,
334 				     num_mpdu);
335 			DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].mpdu_tried,
336 				     mpdu_tried);
337 		break;
338 		case RU_242:
339 			DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].num_msdu,
340 				     num_msdu);
341 			DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].num_mpdu,
342 				     num_mpdu);
343 			DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].mpdu_tried,
344 				     mpdu_tried);
345 		break;
346 		case RU_484:
347 			DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].num_msdu,
348 				     num_msdu);
349 			DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].num_mpdu,
350 				     num_mpdu);
351 			DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].mpdu_tried,
352 				     mpdu_tried);
353 		break;
354 		case RU_996:
355 			DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].num_msdu,
356 				     num_msdu);
357 			DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].num_mpdu,
358 				     num_mpdu);
359 			DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].mpdu_tried,
360 				     mpdu_tried);
361 		break;
362 		}
363 	}
364 
365 	/*
366 	 * All failed mpdu will be retried, so incrementing
367 	 * retries mpdu based on mpdu failed. Even for
368 	 * ack failure i.e for long retries we get
369 	 * mpdu failed equal mpdu tried.
370 	 */
371 	DP_STATS_INC(peer, tx.retries, mpdu_failed);
372 	DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus);
373 
374 	DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].num_msdu,
375 		     num_msdu);
376 	DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].num_mpdu,
377 		     num_mpdu);
378 	DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].mpdu_tried,
379 		     mpdu_tried);
380 
381 	DP_STATS_INC_PKT(peer, tx.comp_pkt,
382 			num_msdu, (ppdu->success_bytes +
383 				ppdu->retry_bytes + ppdu->failed_bytes));
384 	DP_STATS_UPD(peer, tx.tx_rate, ppdu->tx_rate);
385 	DP_STATS_INC(peer, tx.sgi_count[ppdu->gi], num_msdu);
386 	DP_STATS_INC(peer, tx.bw[ppdu->bw], num_msdu);
387 	DP_STATS_INC(peer, tx.nss[ppdu->nss], num_msdu);
388 	if (ppdu->tid < CDP_DATA_TID_MAX)
389 		DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)],
390 			     num_msdu);
391 	DP_STATS_INCC(peer, tx.stbc, num_msdu, ppdu->stbc);
392 	DP_STATS_INCC(peer, tx.ldpc, num_msdu, ppdu->ldpc);
393 	if (!(ppdu->is_mcast) && ppdu->ack_rssi_valid)
394 		DP_STATS_UPD(peer, tx.last_ack_rssi, ack_rssi);
395 
396 	DP_STATS_INCC(peer,
397 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
398 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
399 	DP_STATS_INCC(peer,
400 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
401 			((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
402 	DP_STATS_INCC(peer,
403 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
404 			((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
405 	DP_STATS_INCC(peer,
406 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
407 			((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B)));
408 	DP_STATS_INCC(peer,
409 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
410 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
411 	DP_STATS_INCC(peer,
412 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
413 			((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
414 	DP_STATS_INCC(peer,
415 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
416 			((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
417 	DP_STATS_INCC(peer,
418 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
419 			((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
420 	DP_STATS_INCC(peer,
421 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
422 			((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX)));
423 	DP_STATS_INCC(peer,
424 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
425 			((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX)));
426 	DP_STATS_INCC(peer, tx.ampdu_cnt, num_msdu, ppdu->is_ampdu);
427 	DP_STATS_INCC(peer, tx.non_ampdu_cnt, num_msdu, !(ppdu->is_ampdu));
428 
429 	dp_peer_stats_notify(pdev, peer);
430 
431 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
432 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
433 			     &peer->stats, ppdu->peer_id,
434 			     UPDATE_PEER_STATS, pdev->pdev_id);
435 #endif
436 }
437 #endif
438 
439 #ifdef WLAN_TX_PKT_CAPTURE_ENH
440 #include "dp_tx_capture.h"
441 #else
442 static inline void
443 dp_process_ppdu_stats_update_failed_bitmap(struct dp_pdev *pdev,
444 					   void *data,
445 					   uint32_t ppdu_id,
446 					   uint32_t size)
447 {
448 }
449 #endif
450 
451 /*
452  * htt_htc_pkt_alloc() - Allocate HTC packet buffer
453  * @htt_soc:	HTT SOC handle
454  *
455  * Return: Pointer to htc packet buffer
456  */
457 static struct dp_htt_htc_pkt *
458 htt_htc_pkt_alloc(struct htt_soc *soc)
459 {
460 	struct dp_htt_htc_pkt_union *pkt = NULL;
461 
462 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
463 	if (soc->htt_htc_pkt_freelist) {
464 		pkt = soc->htt_htc_pkt_freelist;
465 		soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next;
466 	}
467 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
468 
469 	if (!pkt)
470 		pkt = qdf_mem_malloc(sizeof(*pkt));
471 	return &pkt->u.pkt; /* not actually a dereference */
472 }
473 
474 /*
475  * htt_htc_pkt_free() - Free HTC packet buffer
476  * @htt_soc:	HTT SOC handle
477  */
478 static void
479 htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
480 {
481 	struct dp_htt_htc_pkt_union *u_pkt =
482 		(struct dp_htt_htc_pkt_union *)pkt;
483 
484 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
485 	u_pkt->u.next = soc->htt_htc_pkt_freelist;
486 	soc->htt_htc_pkt_freelist = u_pkt;
487 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
488 }
489 
490 /*
491  * htt_htc_pkt_pool_free() - Free HTC packet pool
492  * @htt_soc:	HTT SOC handle
493  */
494 void
495 htt_htc_pkt_pool_free(struct htt_soc *soc)
496 {
497 	struct dp_htt_htc_pkt_union *pkt, *next;
498 	pkt = soc->htt_htc_pkt_freelist;
499 	while (pkt) {
500 		next = pkt->u.next;
501 		qdf_mem_free(pkt);
502 		pkt = next;
503 	}
504 	soc->htt_htc_pkt_freelist = NULL;
505 }
506 
507 /*
508  * htt_htc_misc_pkt_list_trim() - trim misc list
509  * @htt_soc: HTT SOC handle
510  * @level: max no. of pkts in list
511  */
512 static void
513 htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level)
514 {
515 	struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL;
516 	int i = 0;
517 	qdf_nbuf_t netbuf;
518 
519 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
520 	pkt = soc->htt_htc_pkt_misclist;
521 	while (pkt) {
522 		next = pkt->u.next;
523 		/* trim the out grown list*/
524 		if (++i > level) {
525 			netbuf =
526 				(qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
527 			qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
528 			qdf_nbuf_free(netbuf);
529 			qdf_mem_free(pkt);
530 			pkt = NULL;
531 			if (prev)
532 				prev->u.next = NULL;
533 		}
534 		prev = pkt;
535 		pkt = next;
536 	}
537 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
538 }
539 
540 /*
541  * htt_htc_misc_pkt_list_add() - Add pkt to misc list
542  * @htt_soc:	HTT SOC handle
543  * @dp_htt_htc_pkt: pkt to be added to list
544  */
545 static void
546 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
547 {
548 	struct dp_htt_htc_pkt_union *u_pkt =
549 				(struct dp_htt_htc_pkt_union *)pkt;
550 	int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc,
551 							pkt->htc_pkt.Endpoint)
552 				+ DP_HTT_HTC_PKT_MISCLIST_SIZE;
553 
554 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
555 	if (soc->htt_htc_pkt_misclist) {
556 		u_pkt->u.next = soc->htt_htc_pkt_misclist;
557 		soc->htt_htc_pkt_misclist = u_pkt;
558 	} else {
559 		soc->htt_htc_pkt_misclist = u_pkt;
560 	}
561 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
562 
563 	/* only ce pipe size + tx_queue_depth could possibly be in use
564 	 * free older packets in the misclist
565 	 */
566 	htt_htc_misc_pkt_list_trim(soc, misclist_trim_level);
567 }
568 
569 /**
570  * DP_HTT_SEND_HTC_PKT() - Send htt packet from host
571  * @soc : HTT SOC handle
572  * @pkt: pkt to be send
573  * @cmd : command to be recorded in dp htt logger
574  * @buf : Pointer to buffer needs to be recored for above cmd
575  *
576  * Return: None
577  */
578 static inline QDF_STATUS DP_HTT_SEND_HTC_PKT(struct htt_soc *soc,
579 					     struct dp_htt_htc_pkt *pkt,
580 					     uint8_t cmd, uint8_t *buf)
581 {
582 	QDF_STATUS status;
583 
584 	htt_command_record(soc->htt_logger_handle, cmd, buf);
585 
586 	status = htc_send_pkt(soc->htc_soc, &pkt->htc_pkt);
587 	if (status == QDF_STATUS_SUCCESS)
588 		htt_htc_misc_pkt_list_add(soc, pkt);
589 
590 	return status;
591 }
592 
593 /*
594  * htt_htc_misc_pkt_pool_free() - free pkts in misc list
595  * @htt_soc:	HTT SOC handle
596  */
597 static void
598 htt_htc_misc_pkt_pool_free(struct htt_soc *soc)
599 {
600 	struct dp_htt_htc_pkt_union *pkt, *next;
601 	qdf_nbuf_t netbuf;
602 
603 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
604 	pkt = soc->htt_htc_pkt_misclist;
605 
606 	while (pkt) {
607 		next = pkt->u.next;
608 		netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
609 		qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
610 
611 		soc->stats.htc_pkt_free++;
612 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
613 			 "%s: Pkt free count %d",
614 			 __func__, soc->stats.htc_pkt_free);
615 
616 		qdf_nbuf_free(netbuf);
617 		qdf_mem_free(pkt);
618 		pkt = next;
619 	}
620 	soc->htt_htc_pkt_misclist = NULL;
621 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
622 }
623 
624 /*
625  * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ
626  * @tgt_mac_addr:	Target MAC
627  * @buffer:		Output buffer
628  */
629 static u_int8_t *
630 htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer)
631 {
632 #ifdef BIG_ENDIAN_HOST
633 	/*
634 	 * The host endianness is opposite of the target endianness.
635 	 * To make u_int32_t elements come out correctly, the target->host
636 	 * upload has swizzled the bytes in each u_int32_t element of the
637 	 * message.
638 	 * For byte-array message fields like the MAC address, this
639 	 * upload swizzling puts the bytes in the wrong order, and needs
640 	 * to be undone.
641 	 */
642 	buffer[0] = tgt_mac_addr[3];
643 	buffer[1] = tgt_mac_addr[2];
644 	buffer[2] = tgt_mac_addr[1];
645 	buffer[3] = tgt_mac_addr[0];
646 	buffer[4] = tgt_mac_addr[7];
647 	buffer[5] = tgt_mac_addr[6];
648 	return buffer;
649 #else
650 	/*
651 	 * The host endianness matches the target endianness -
652 	 * we can use the mac addr directly from the message buffer.
653 	 */
654 	return tgt_mac_addr;
655 #endif
656 }
657 
658 /*
659  * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
660  * @soc:	SOC handle
661  * @status:	Completion status
662  * @netbuf:	HTT buffer
663  */
664 static void
665 dp_htt_h2t_send_complete_free_netbuf(
666 	void *soc, A_STATUS status, qdf_nbuf_t netbuf)
667 {
668 	qdf_nbuf_free(netbuf);
669 }
670 
671 /*
672  * dp_htt_h2t_send_complete() - H2T completion handler
673  * @context:	Opaque context (HTT SOC handle)
674  * @htc_pkt:	HTC packet
675  */
676 static void
677 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
678 {
679 	void (*send_complete_part2)(
680 		void *soc, QDF_STATUS status, qdf_nbuf_t msdu);
681 	struct htt_soc *soc =  (struct htt_soc *) context;
682 	struct dp_htt_htc_pkt *htt_pkt;
683 	qdf_nbuf_t netbuf;
684 
685 	send_complete_part2 = htc_pkt->pPktContext;
686 
687 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
688 
689 	/* process (free or keep) the netbuf that held the message */
690 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
691 	/*
692 	 * adf sendcomplete is required for windows only
693 	 */
694 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
695 	if (send_complete_part2) {
696 		send_complete_part2(
697 			htt_pkt->soc_ctxt, htc_pkt->Status, netbuf);
698 	}
699 	/* free the htt_htc_pkt / HTC_PACKET object */
700 	htt_htc_pkt_free(soc, htt_pkt);
701 }
702 
703 /*
704  * htt_h2t_ver_req_msg() - Send HTT version request message to target
705  * @htt_soc:	HTT SOC handle
706  *
707  * Return: 0 on success; error code on failure
708  */
709 static int htt_h2t_ver_req_msg(struct htt_soc *soc)
710 {
711 	struct dp_htt_htc_pkt *pkt;
712 	qdf_nbuf_t msg;
713 	uint32_t *msg_word;
714 	QDF_STATUS status;
715 
716 	msg = qdf_nbuf_alloc(
717 		soc->osdev,
718 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
719 		/* reserve room for the HTC header */
720 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
721 	if (!msg)
722 		return QDF_STATUS_E_NOMEM;
723 
724 	/*
725 	 * Set the length of the message.
726 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
727 	 * separately during the below call to qdf_nbuf_push_head.
728 	 * The contribution from the HTC header is added separately inside HTC.
729 	 */
730 	if (qdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES) == NULL) {
731 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
732 			"%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
733 			__func__);
734 		return QDF_STATUS_E_FAILURE;
735 	}
736 
737 	/* fill in the message contents */
738 	msg_word = (u_int32_t *) qdf_nbuf_data(msg);
739 
740 	/* rewind beyond alignment pad to get to the HTC header reserved area */
741 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
742 
743 	*msg_word = 0;
744 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
745 
746 	pkt = htt_htc_pkt_alloc(soc);
747 	if (!pkt) {
748 		qdf_nbuf_free(msg);
749 		return QDF_STATUS_E_FAILURE;
750 	}
751 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
752 
753 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
754 		dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg),
755 		qdf_nbuf_len(msg), soc->htc_endpoint,
756 		1); /* tag - not relevant here */
757 
758 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
759 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_VERSION_REQ,
760 				     NULL);
761 
762 	if (status != QDF_STATUS_SUCCESS) {
763 		qdf_nbuf_free(msg);
764 		htt_htc_pkt_free(soc, pkt);
765 	}
766 
767 	return status;
768 }
769 
770 /*
771  * htt_srng_setup() - Send SRNG setup message to target
772  * @htt_soc:	HTT SOC handle
773  * @mac_id:	MAC Id
774  * @hal_srng:	Opaque HAL SRNG pointer
775  * @hal_ring_type:	SRNG ring type
776  *
777  * Return: 0 on success; error code on failure
778  */
779 int htt_srng_setup(struct htt_soc *soc, int mac_id,
780 		   hal_ring_handle_t hal_ring_hdl,
781 		   int hal_ring_type)
782 {
783 	struct dp_htt_htc_pkt *pkt;
784 	qdf_nbuf_t htt_msg;
785 	uint32_t *msg_word;
786 	struct hal_srng_params srng_params;
787 	qdf_dma_addr_t hp_addr, tp_addr;
788 	uint32_t ring_entry_size =
789 		hal_srng_get_entrysize(soc->hal_soc, hal_ring_type);
790 	int htt_ring_type, htt_ring_id;
791 	uint8_t *htt_logger_bufp;
792 	int target_pdev_id;
793 	int lmac_id = dp_get_lmac_id_for_pdev_id(soc->dp_soc, 0, mac_id);
794 	QDF_STATUS status;
795 
796 	/* Sizes should be set in 4-byte words */
797 	ring_entry_size = ring_entry_size >> 2;
798 
799 	htt_msg = qdf_nbuf_alloc(soc->osdev,
800 		HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ),
801 		/* reserve room for the HTC header */
802 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
803 	if (!htt_msg)
804 		goto fail0;
805 
806 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
807 	hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_ring_hdl);
808 	tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_ring_hdl);
809 
810 	switch (hal_ring_type) {
811 	case RXDMA_BUF:
812 #ifdef QCA_HOST2FW_RXBUF_RING
813 		if (srng_params.ring_id ==
814 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0)) {
815 			htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
816 			htt_ring_type = HTT_SW_TO_SW_RING;
817 #ifdef IPA_OFFLOAD
818 		} else if (srng_params.ring_id ==
819 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2)) {
820 			htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING;
821 			htt_ring_type = HTT_SW_TO_SW_RING;
822 #endif
823 #else
824 		if (srng_params.ring_id ==
825 			(HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
826 			(lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
827 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
828 			htt_ring_type = HTT_SW_TO_HW_RING;
829 #endif
830 		} else if (srng_params.ring_id ==
831 #ifdef IPA_OFFLOAD
832 			 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 +
833 #else
834 			 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF +
835 #endif
836 			(lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
837 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
838 			htt_ring_type = HTT_SW_TO_HW_RING;
839 		} else {
840 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
841 				   "%s: Ring %d currently not supported",
842 				   __func__, srng_params.ring_id);
843 			goto fail1;
844 		}
845 
846 		dp_info("ring_type %d ring_id %d htt_ring_id %d hp_addr 0x%llx tp_addr 0x%llx",
847 			hal_ring_type, srng_params.ring_id, htt_ring_id,
848 			(uint64_t)hp_addr,
849 			(uint64_t)tp_addr);
850 		break;
851 	case RXDMA_MONITOR_BUF:
852 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
853 		htt_ring_type = HTT_SW_TO_HW_RING;
854 		break;
855 	case RXDMA_MONITOR_STATUS:
856 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
857 		htt_ring_type = HTT_SW_TO_HW_RING;
858 		break;
859 	case RXDMA_MONITOR_DST:
860 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
861 		htt_ring_type = HTT_HW_TO_SW_RING;
862 		break;
863 	case RXDMA_MONITOR_DESC:
864 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
865 		htt_ring_type = HTT_SW_TO_HW_RING;
866 		break;
867 	case RXDMA_DST:
868 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
869 		htt_ring_type = HTT_HW_TO_SW_RING;
870 		break;
871 
872 	default:
873 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
874 			"%s: Ring currently not supported", __func__);
875 			goto fail1;
876 	}
877 
878 	/*
879 	 * Set the length of the message.
880 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
881 	 * separately during the below call to qdf_nbuf_push_head.
882 	 * The contribution from the HTC header is added separately inside HTC.
883 	 */
884 	if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) {
885 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
886 			"%s: Failed to expand head for SRING_SETUP msg",
887 			__func__);
888 		return QDF_STATUS_E_FAILURE;
889 	}
890 
891 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
892 
893 	/* rewind beyond alignment pad to get to the HTC header reserved area */
894 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
895 
896 	/* word 0 */
897 	*msg_word = 0;
898 	htt_logger_bufp = (uint8_t *)msg_word;
899 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP);
900 	target_pdev_id =
901 	dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, mac_id);
902 
903 	if ((htt_ring_type == HTT_SW_TO_HW_RING) ||
904 			(htt_ring_type == HTT_HW_TO_SW_RING))
905 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, target_pdev_id);
906 	else
907 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id);
908 
909 	dp_info("%s: mac_id %d", __func__, mac_id);
910 	HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type);
911 	/* TODO: Discuss with FW on changing this to unique ID and using
912 	 * htt_ring_type to send the type of ring
913 	 */
914 	HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id);
915 
916 	/* word 1 */
917 	msg_word++;
918 	*msg_word = 0;
919 	HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word,
920 		srng_params.ring_base_paddr & 0xffffffff);
921 
922 	/* word 2 */
923 	msg_word++;
924 	*msg_word = 0;
925 	HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word,
926 		(uint64_t)srng_params.ring_base_paddr >> 32);
927 
928 	/* word 3 */
929 	msg_word++;
930 	*msg_word = 0;
931 	HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size);
932 	HTT_SRING_SETUP_RING_SIZE_SET(*msg_word,
933 		(ring_entry_size * srng_params.num_entries));
934 	dp_info("%s: entry_size %d", __func__, ring_entry_size);
935 	dp_info("%s: num_entries %d", __func__, srng_params.num_entries);
936 	dp_info("%s: ring_size %d", __func__,
937 		(ring_entry_size * srng_params.num_entries));
938 	if (htt_ring_type == HTT_SW_TO_HW_RING)
939 		HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET(
940 						*msg_word, 1);
941 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word,
942 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
943 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word,
944 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
945 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word,
946 		!!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP));
947 
948 	/* word 4 */
949 	msg_word++;
950 	*msg_word = 0;
951 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
952 		hp_addr & 0xffffffff);
953 
954 	/* word 5 */
955 	msg_word++;
956 	*msg_word = 0;
957 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
958 		(uint64_t)hp_addr >> 32);
959 
960 	/* word 6 */
961 	msg_word++;
962 	*msg_word = 0;
963 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
964 		tp_addr & 0xffffffff);
965 
966 	/* word 7 */
967 	msg_word++;
968 	*msg_word = 0;
969 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
970 		(uint64_t)tp_addr >> 32);
971 
972 	/* word 8 */
973 	msg_word++;
974 	*msg_word = 0;
975 	HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word,
976 		srng_params.msi_addr & 0xffffffff);
977 
978 	/* word 9 */
979 	msg_word++;
980 	*msg_word = 0;
981 	HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word,
982 		(uint64_t)(srng_params.msi_addr) >> 32);
983 
984 	/* word 10 */
985 	msg_word++;
986 	*msg_word = 0;
987 	HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word,
988 		srng_params.msi_data);
989 
990 	/* word 11 */
991 	msg_word++;
992 	*msg_word = 0;
993 	HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word,
994 		srng_params.intr_batch_cntr_thres_entries *
995 		ring_entry_size);
996 	HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word,
997 		srng_params.intr_timer_thres_us >> 3);
998 
999 	/* word 12 */
1000 	msg_word++;
1001 	*msg_word = 0;
1002 	if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
1003 		/* TODO: Setting low threshold to 1/8th of ring size - see
1004 		 * if this needs to be configurable
1005 		 */
1006 		HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word,
1007 			srng_params.low_threshold);
1008 	}
1009 	/* "response_required" field should be set if a HTT response message is
1010 	 * required after setting up the ring.
1011 	 */
1012 	pkt = htt_htc_pkt_alloc(soc);
1013 	if (!pkt)
1014 		goto fail1;
1015 
1016 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1017 
1018 	SET_HTC_PACKET_INFO_TX(
1019 		&pkt->htc_pkt,
1020 		dp_htt_h2t_send_complete_free_netbuf,
1021 		qdf_nbuf_data(htt_msg),
1022 		qdf_nbuf_len(htt_msg),
1023 		soc->htc_endpoint,
1024 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
1025 
1026 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1027 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_SRING_SETUP,
1028 				     htt_logger_bufp);
1029 
1030 	if (status != QDF_STATUS_SUCCESS) {
1031 		qdf_nbuf_free(htt_msg);
1032 		htt_htc_pkt_free(soc, pkt);
1033 	}
1034 
1035 	return status;
1036 
1037 fail1:
1038 	qdf_nbuf_free(htt_msg);
1039 fail0:
1040 	return QDF_STATUS_E_FAILURE;
1041 }
1042 
1043 #ifdef QCA_SUPPORT_FULL_MON
1044 /**
1045  * htt_h2t_full_mon_cfg() - Send full monitor configuarion msg to FW
1046  *
1047  * @htt_soc: HTT Soc handle
1048  * @pdev_id: Radio id
1049  * @dp_full_mon_config: enabled/disable configuration
1050  *
1051  * Return: Success when HTT message is sent, error on failure
1052  */
1053 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc,
1054 			 uint8_t pdev_id,
1055 			 enum dp_full_mon_config config)
1056 {
1057 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
1058 	struct dp_htt_htc_pkt *pkt;
1059 	qdf_nbuf_t htt_msg;
1060 	uint32_t *msg_word;
1061 	uint8_t *htt_logger_bufp;
1062 
1063 	htt_msg = qdf_nbuf_alloc(soc->osdev,
1064 				 HTT_MSG_BUF_SIZE(
1065 				 HTT_RX_FULL_MONITOR_MODE_SETUP_SZ),
1066 				 /* reserve room for the HTC header */
1067 				 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
1068 				 4,
1069 				 TRUE);
1070 	if (!htt_msg)
1071 		return QDF_STATUS_E_FAILURE;
1072 
1073 	/*
1074 	 * Set the length of the message.
1075 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
1076 	 * separately during the below call to qdf_nbuf_push_head.
1077 	 * The contribution from the HTC header is added separately inside HTC.
1078 	 */
1079 	if (!qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ)) {
1080 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1081 			  "%s: Failed to expand head for RX Ring Cfg msg",
1082 			  __func__);
1083 		goto fail1;
1084 	}
1085 
1086 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
1087 
1088 	/* rewind beyond alignment pad to get to the HTC header reserved area */
1089 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
1090 
1091 	/* word 0 */
1092 	*msg_word = 0;
1093 	htt_logger_bufp = (uint8_t *)msg_word;
1094 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE);
1095 	HTT_RX_FULL_MONITOR_MODE_OPERATION_PDEV_ID_SET(
1096 			*msg_word, DP_SW2HW_MACID(pdev_id));
1097 
1098 	msg_word++;
1099 	*msg_word = 0;
1100 	/* word 1 */
1101 	if (config == DP_FULL_MON_ENABLE) {
1102 		HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true);
1103 		HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, true);
1104 		HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, true);
1105 		HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2);
1106 	} else if (config == DP_FULL_MON_DISABLE) {
1107 		/* As per MAC team's suggestion, While disbaling full monitor
1108 		 * mode, Set 'en' bit to true in full monitor mode register.
1109 		 */
1110 		HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true);
1111 		HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, false);
1112 		HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, false);
1113 		HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2);
1114 	}
1115 
1116 	pkt = htt_htc_pkt_alloc(soc);
1117 	if (!pkt) {
1118 		qdf_err("HTC packet allocation failed");
1119 		goto fail1;
1120 	}
1121 
1122 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1123 
1124 	SET_HTC_PACKET_INFO_TX(
1125 		&pkt->htc_pkt,
1126 		dp_htt_h2t_send_complete_free_netbuf,
1127 		qdf_nbuf_data(htt_msg),
1128 		qdf_nbuf_len(htt_msg),
1129 		soc->htc_endpoint,
1130 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
1131 
1132 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1133 	qdf_info("config: %d", config);
1134 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_SRING_SETUP,
1135 			    htt_logger_bufp);
1136 	return QDF_STATUS_SUCCESS;
1137 fail1:
1138 	qdf_nbuf_free(htt_msg);
1139 	return QDF_STATUS_E_FAILURE;
1140 }
1141 #else
1142 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc,
1143 			 uint8_t pdev_id,
1144 			 enum dp_full_mon_config config)
1145 {
1146 	return 0;
1147 }
1148 
1149 #endif
1150 
1151 /*
1152  * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter
1153  * config message to target
1154  * @htt_soc:	HTT SOC handle
1155  * @pdev_id:	WIN- PDEV Id, MCL- mac id
1156  * @hal_srng:	Opaque HAL SRNG pointer
1157  * @hal_ring_type:	SRNG ring type
1158  * @ring_buf_size:	SRNG buffer size
1159  * @htt_tlv_filter:	Rx SRNG TLV and filter setting
1160  * Return: 0 on success; error code on failure
1161  */
1162 int htt_h2t_rx_ring_cfg(struct htt_soc *htt_soc, int pdev_id,
1163 			hal_ring_handle_t hal_ring_hdl,
1164 			int hal_ring_type, int ring_buf_size,
1165 			struct htt_rx_ring_tlv_filter *htt_tlv_filter)
1166 {
1167 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
1168 	struct dp_htt_htc_pkt *pkt;
1169 	qdf_nbuf_t htt_msg;
1170 	uint32_t *msg_word;
1171 	struct hal_srng_params srng_params;
1172 	uint32_t htt_ring_type, htt_ring_id;
1173 	uint32_t tlv_filter;
1174 	uint8_t *htt_logger_bufp;
1175 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->dp_soc->wlan_cfg_ctx;
1176 	uint32_t mon_drop_th = wlan_cfg_get_mon_drop_thresh(wlan_cfg_ctx);
1177 	int target_pdev_id;
1178 	QDF_STATUS status;
1179 
1180 	htt_msg = qdf_nbuf_alloc(soc->osdev,
1181 		HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ),
1182 	/* reserve room for the HTC header */
1183 	HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
1184 	if (!htt_msg)
1185 		goto fail0;
1186 
1187 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
1188 
1189 	switch (hal_ring_type) {
1190 	case RXDMA_BUF:
1191 		htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
1192 		htt_ring_type = HTT_SW_TO_HW_RING;
1193 		break;
1194 	case RXDMA_MONITOR_BUF:
1195 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
1196 		htt_ring_type = HTT_SW_TO_HW_RING;
1197 		break;
1198 	case RXDMA_MONITOR_STATUS:
1199 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
1200 		htt_ring_type = HTT_SW_TO_HW_RING;
1201 		break;
1202 	case RXDMA_MONITOR_DST:
1203 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
1204 		htt_ring_type = HTT_HW_TO_SW_RING;
1205 		break;
1206 	case RXDMA_MONITOR_DESC:
1207 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
1208 		htt_ring_type = HTT_SW_TO_HW_RING;
1209 		break;
1210 	case RXDMA_DST:
1211 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
1212 		htt_ring_type = HTT_HW_TO_SW_RING;
1213 		break;
1214 
1215 	default:
1216 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1217 			"%s: Ring currently not supported", __func__);
1218 		goto fail1;
1219 	}
1220 
1221 	/*
1222 	 * Set the length of the message.
1223 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
1224 	 * separately during the below call to qdf_nbuf_push_head.
1225 	 * The contribution from the HTC header is added separately inside HTC.
1226 	 */
1227 	if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) {
1228 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1229 			"%s: Failed to expand head for RX Ring Cfg msg",
1230 			__func__);
1231 		goto fail1; /* failure */
1232 	}
1233 
1234 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
1235 
1236 	/* rewind beyond alignment pad to get to the HTC header reserved area */
1237 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
1238 
1239 	/* word 0 */
1240 	htt_logger_bufp = (uint8_t *)msg_word;
1241 	*msg_word = 0;
1242 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
1243 
1244 	/*
1245 	 * pdev_id is indexed from 0 whereas mac_id is indexed from 1
1246 	 * SW_TO_SW and SW_TO_HW rings are unaffected by this
1247 	 */
1248 	target_pdev_id =
1249 	dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, pdev_id);
1250 
1251 	if (htt_ring_type == HTT_SW_TO_SW_RING ||
1252 			htt_ring_type == HTT_SW_TO_HW_RING)
1253 		HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word,
1254 						      target_pdev_id);
1255 
1256 	/* TODO: Discuss with FW on changing this to unique ID and using
1257 	 * htt_ring_type to send the type of ring
1258 	 */
1259 	HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id);
1260 
1261 	HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word,
1262 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
1263 
1264 	HTT_RX_RING_SELECTION_CFG_PKT_TLV_SET(*msg_word,
1265 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
1266 
1267 	HTT_RX_RING_SELECTION_CFG_RX_OFFSETS_VALID_SET(*msg_word,
1268 						htt_tlv_filter->offset_valid);
1269 
1270 	if (mon_drop_th > 0)
1271 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1272 								   1);
1273 	else
1274 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1275 								   0);
1276 
1277 	/* word 1 */
1278 	msg_word++;
1279 	*msg_word = 0;
1280 	HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word,
1281 		ring_buf_size);
1282 
1283 	/* word 2 */
1284 	msg_word++;
1285 	*msg_word = 0;
1286 
1287 	if (htt_tlv_filter->enable_fp) {
1288 		/* TYPE: MGMT */
1289 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1290 			FP, MGMT, 0000,
1291 			(htt_tlv_filter->fp_mgmt_filter &
1292 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1293 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1294 			FP, MGMT, 0001,
1295 			(htt_tlv_filter->fp_mgmt_filter &
1296 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1297 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1298 			FP, MGMT, 0010,
1299 			(htt_tlv_filter->fp_mgmt_filter &
1300 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1301 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1302 			FP, MGMT, 0011,
1303 			(htt_tlv_filter->fp_mgmt_filter &
1304 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1305 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1306 			FP, MGMT, 0100,
1307 			(htt_tlv_filter->fp_mgmt_filter &
1308 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1309 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1310 			FP, MGMT, 0101,
1311 			(htt_tlv_filter->fp_mgmt_filter &
1312 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1313 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1314 			FP, MGMT, 0110,
1315 			(htt_tlv_filter->fp_mgmt_filter &
1316 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1317 		/* reserved */
1318 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
1319 			MGMT, 0111,
1320 			(htt_tlv_filter->fp_mgmt_filter &
1321 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1322 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1323 			FP, MGMT, 1000,
1324 			(htt_tlv_filter->fp_mgmt_filter &
1325 			FILTER_MGMT_BEACON) ? 1 : 0);
1326 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1327 			FP, MGMT, 1001,
1328 			(htt_tlv_filter->fp_mgmt_filter &
1329 			FILTER_MGMT_ATIM) ? 1 : 0);
1330 	}
1331 
1332 	if (htt_tlv_filter->enable_md) {
1333 			/* TYPE: MGMT */
1334 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1335 			MD, MGMT, 0000,
1336 			(htt_tlv_filter->md_mgmt_filter &
1337 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1338 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1339 			MD, MGMT, 0001,
1340 			(htt_tlv_filter->md_mgmt_filter &
1341 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1342 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1343 			MD, MGMT, 0010,
1344 			(htt_tlv_filter->md_mgmt_filter &
1345 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1346 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1347 			MD, MGMT, 0011,
1348 			(htt_tlv_filter->md_mgmt_filter &
1349 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1350 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1351 			MD, MGMT, 0100,
1352 			(htt_tlv_filter->md_mgmt_filter &
1353 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1354 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1355 			MD, MGMT, 0101,
1356 			(htt_tlv_filter->md_mgmt_filter &
1357 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1358 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1359 			MD, MGMT, 0110,
1360 			(htt_tlv_filter->md_mgmt_filter &
1361 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1362 		/* reserved */
1363 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
1364 			MGMT, 0111,
1365 			(htt_tlv_filter->md_mgmt_filter &
1366 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1367 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1368 			MD, MGMT, 1000,
1369 			(htt_tlv_filter->md_mgmt_filter &
1370 			FILTER_MGMT_BEACON) ? 1 : 0);
1371 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1372 			MD, MGMT, 1001,
1373 			(htt_tlv_filter->md_mgmt_filter &
1374 			FILTER_MGMT_ATIM) ? 1 : 0);
1375 	}
1376 
1377 	if (htt_tlv_filter->enable_mo) {
1378 		/* TYPE: MGMT */
1379 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1380 			MO, MGMT, 0000,
1381 			(htt_tlv_filter->mo_mgmt_filter &
1382 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1383 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1384 			MO, MGMT, 0001,
1385 			(htt_tlv_filter->mo_mgmt_filter &
1386 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1387 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1388 			MO, MGMT, 0010,
1389 			(htt_tlv_filter->mo_mgmt_filter &
1390 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1391 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1392 			MO, MGMT, 0011,
1393 			(htt_tlv_filter->mo_mgmt_filter &
1394 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1395 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1396 			MO, MGMT, 0100,
1397 			(htt_tlv_filter->mo_mgmt_filter &
1398 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1399 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1400 			MO, MGMT, 0101,
1401 			(htt_tlv_filter->mo_mgmt_filter &
1402 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1403 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1404 			MO, MGMT, 0110,
1405 			(htt_tlv_filter->mo_mgmt_filter &
1406 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1407 		/* reserved */
1408 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
1409 			MGMT, 0111,
1410 			(htt_tlv_filter->mo_mgmt_filter &
1411 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1412 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1413 			MO, MGMT, 1000,
1414 			(htt_tlv_filter->mo_mgmt_filter &
1415 			FILTER_MGMT_BEACON) ? 1 : 0);
1416 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1417 			MO, MGMT, 1001,
1418 			(htt_tlv_filter->mo_mgmt_filter &
1419 			FILTER_MGMT_ATIM) ? 1 : 0);
1420 	}
1421 
1422 	/* word 3 */
1423 	msg_word++;
1424 	*msg_word = 0;
1425 
1426 	if (htt_tlv_filter->enable_fp) {
1427 		/* TYPE: MGMT */
1428 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1429 			FP, MGMT, 1010,
1430 			(htt_tlv_filter->fp_mgmt_filter &
1431 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1432 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1433 			FP, MGMT, 1011,
1434 			(htt_tlv_filter->fp_mgmt_filter &
1435 			FILTER_MGMT_AUTH) ? 1 : 0);
1436 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1437 			FP, MGMT, 1100,
1438 			(htt_tlv_filter->fp_mgmt_filter &
1439 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1440 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1441 			FP, MGMT, 1101,
1442 			(htt_tlv_filter->fp_mgmt_filter &
1443 			FILTER_MGMT_ACTION) ? 1 : 0);
1444 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1445 			FP, MGMT, 1110,
1446 			(htt_tlv_filter->fp_mgmt_filter &
1447 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1448 		/* reserved*/
1449 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
1450 			MGMT, 1111,
1451 			(htt_tlv_filter->fp_mgmt_filter &
1452 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1453 	}
1454 
1455 	if (htt_tlv_filter->enable_md) {
1456 			/* TYPE: MGMT */
1457 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1458 			MD, MGMT, 1010,
1459 			(htt_tlv_filter->md_mgmt_filter &
1460 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1461 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1462 			MD, MGMT, 1011,
1463 			(htt_tlv_filter->md_mgmt_filter &
1464 			FILTER_MGMT_AUTH) ? 1 : 0);
1465 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1466 			MD, MGMT, 1100,
1467 			(htt_tlv_filter->md_mgmt_filter &
1468 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1469 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1470 			MD, MGMT, 1101,
1471 			(htt_tlv_filter->md_mgmt_filter &
1472 			FILTER_MGMT_ACTION) ? 1 : 0);
1473 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1474 			MD, MGMT, 1110,
1475 			(htt_tlv_filter->md_mgmt_filter &
1476 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1477 	}
1478 
1479 	if (htt_tlv_filter->enable_mo) {
1480 		/* TYPE: MGMT */
1481 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1482 			MO, MGMT, 1010,
1483 			(htt_tlv_filter->mo_mgmt_filter &
1484 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1485 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1486 			MO, MGMT, 1011,
1487 			(htt_tlv_filter->mo_mgmt_filter &
1488 			FILTER_MGMT_AUTH) ? 1 : 0);
1489 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1490 			MO, MGMT, 1100,
1491 			(htt_tlv_filter->mo_mgmt_filter &
1492 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1493 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1494 			MO, MGMT, 1101,
1495 			(htt_tlv_filter->mo_mgmt_filter &
1496 			FILTER_MGMT_ACTION) ? 1 : 0);
1497 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1498 			MO, MGMT, 1110,
1499 			(htt_tlv_filter->mo_mgmt_filter &
1500 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1501 		/* reserved*/
1502 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
1503 			MGMT, 1111,
1504 			(htt_tlv_filter->mo_mgmt_filter &
1505 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1506 	}
1507 
1508 	/* word 4 */
1509 	msg_word++;
1510 	*msg_word = 0;
1511 
1512 	if (htt_tlv_filter->enable_fp) {
1513 		/* TYPE: CTRL */
1514 		/* reserved */
1515 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1516 			CTRL, 0000,
1517 			(htt_tlv_filter->fp_ctrl_filter &
1518 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1519 		/* reserved */
1520 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1521 			CTRL, 0001,
1522 			(htt_tlv_filter->fp_ctrl_filter &
1523 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1524 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1525 			CTRL, 0010,
1526 			(htt_tlv_filter->fp_ctrl_filter &
1527 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1528 		/* reserved */
1529 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1530 			CTRL, 0011,
1531 			(htt_tlv_filter->fp_ctrl_filter &
1532 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1533 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1534 			CTRL, 0100,
1535 			(htt_tlv_filter->fp_ctrl_filter &
1536 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1537 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1538 			CTRL, 0101,
1539 			(htt_tlv_filter->fp_ctrl_filter &
1540 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1541 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1542 			CTRL, 0110,
1543 			(htt_tlv_filter->fp_ctrl_filter &
1544 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1545 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1546 			CTRL, 0111,
1547 			(htt_tlv_filter->fp_ctrl_filter &
1548 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1549 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1550 			CTRL, 1000,
1551 			(htt_tlv_filter->fp_ctrl_filter &
1552 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1553 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1554 			CTRL, 1001,
1555 			(htt_tlv_filter->fp_ctrl_filter &
1556 			FILTER_CTRL_BA) ? 1 : 0);
1557 	}
1558 
1559 	if (htt_tlv_filter->enable_md) {
1560 		/* TYPE: CTRL */
1561 		/* reserved */
1562 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1563 			CTRL, 0000,
1564 			(htt_tlv_filter->md_ctrl_filter &
1565 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1566 		/* reserved */
1567 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1568 			CTRL, 0001,
1569 			(htt_tlv_filter->md_ctrl_filter &
1570 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1571 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1572 			CTRL, 0010,
1573 			(htt_tlv_filter->md_ctrl_filter &
1574 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1575 		/* reserved */
1576 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1577 			CTRL, 0011,
1578 			(htt_tlv_filter->md_ctrl_filter &
1579 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1580 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1581 			CTRL, 0100,
1582 			(htt_tlv_filter->md_ctrl_filter &
1583 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1584 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1585 			CTRL, 0101,
1586 			(htt_tlv_filter->md_ctrl_filter &
1587 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1588 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1589 			CTRL, 0110,
1590 			(htt_tlv_filter->md_ctrl_filter &
1591 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1592 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1593 			CTRL, 0111,
1594 			(htt_tlv_filter->md_ctrl_filter &
1595 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1596 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1597 			CTRL, 1000,
1598 			(htt_tlv_filter->md_ctrl_filter &
1599 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1600 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1601 			CTRL, 1001,
1602 			(htt_tlv_filter->md_ctrl_filter &
1603 			FILTER_CTRL_BA) ? 1 : 0);
1604 	}
1605 
1606 	if (htt_tlv_filter->enable_mo) {
1607 		/* TYPE: CTRL */
1608 		/* reserved */
1609 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1610 			CTRL, 0000,
1611 			(htt_tlv_filter->mo_ctrl_filter &
1612 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1613 		/* reserved */
1614 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1615 			CTRL, 0001,
1616 			(htt_tlv_filter->mo_ctrl_filter &
1617 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1618 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1619 			CTRL, 0010,
1620 			(htt_tlv_filter->mo_ctrl_filter &
1621 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1622 		/* reserved */
1623 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1624 			CTRL, 0011,
1625 			(htt_tlv_filter->mo_ctrl_filter &
1626 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1627 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1628 			CTRL, 0100,
1629 			(htt_tlv_filter->mo_ctrl_filter &
1630 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1631 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1632 			CTRL, 0101,
1633 			(htt_tlv_filter->mo_ctrl_filter &
1634 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1635 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1636 			CTRL, 0110,
1637 			(htt_tlv_filter->mo_ctrl_filter &
1638 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1639 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1640 			CTRL, 0111,
1641 			(htt_tlv_filter->mo_ctrl_filter &
1642 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1643 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1644 			CTRL, 1000,
1645 			(htt_tlv_filter->mo_ctrl_filter &
1646 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1647 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1648 			CTRL, 1001,
1649 			(htt_tlv_filter->mo_ctrl_filter &
1650 			FILTER_CTRL_BA) ? 1 : 0);
1651 	}
1652 
1653 	/* word 5 */
1654 	msg_word++;
1655 	*msg_word = 0;
1656 	if (htt_tlv_filter->enable_fp) {
1657 		/* TYPE: CTRL */
1658 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1659 			CTRL, 1010,
1660 			(htt_tlv_filter->fp_ctrl_filter &
1661 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1662 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1663 			CTRL, 1011,
1664 			(htt_tlv_filter->fp_ctrl_filter &
1665 			FILTER_CTRL_RTS) ? 1 : 0);
1666 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1667 			CTRL, 1100,
1668 			(htt_tlv_filter->fp_ctrl_filter &
1669 			FILTER_CTRL_CTS) ? 1 : 0);
1670 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1671 			CTRL, 1101,
1672 			(htt_tlv_filter->fp_ctrl_filter &
1673 			FILTER_CTRL_ACK) ? 1 : 0);
1674 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1675 			CTRL, 1110,
1676 			(htt_tlv_filter->fp_ctrl_filter &
1677 			FILTER_CTRL_CFEND) ? 1 : 0);
1678 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1679 			CTRL, 1111,
1680 			(htt_tlv_filter->fp_ctrl_filter &
1681 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1682 		/* TYPE: DATA */
1683 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1684 			DATA, MCAST,
1685 			(htt_tlv_filter->fp_data_filter &
1686 			FILTER_DATA_MCAST) ? 1 : 0);
1687 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1688 			DATA, UCAST,
1689 			(htt_tlv_filter->fp_data_filter &
1690 			FILTER_DATA_UCAST) ? 1 : 0);
1691 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1692 			DATA, NULL,
1693 			(htt_tlv_filter->fp_data_filter &
1694 			FILTER_DATA_NULL) ? 1 : 0);
1695 	}
1696 
1697 	if (htt_tlv_filter->enable_md) {
1698 		/* TYPE: CTRL */
1699 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1700 			CTRL, 1010,
1701 			(htt_tlv_filter->md_ctrl_filter &
1702 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1703 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1704 			CTRL, 1011,
1705 			(htt_tlv_filter->md_ctrl_filter &
1706 			FILTER_CTRL_RTS) ? 1 : 0);
1707 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1708 			CTRL, 1100,
1709 			(htt_tlv_filter->md_ctrl_filter &
1710 			FILTER_CTRL_CTS) ? 1 : 0);
1711 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1712 			CTRL, 1101,
1713 			(htt_tlv_filter->md_ctrl_filter &
1714 			FILTER_CTRL_ACK) ? 1 : 0);
1715 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1716 			CTRL, 1110,
1717 			(htt_tlv_filter->md_ctrl_filter &
1718 			FILTER_CTRL_CFEND) ? 1 : 0);
1719 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1720 			CTRL, 1111,
1721 			(htt_tlv_filter->md_ctrl_filter &
1722 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1723 		/* TYPE: DATA */
1724 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1725 			DATA, MCAST,
1726 			(htt_tlv_filter->md_data_filter &
1727 			FILTER_DATA_MCAST) ? 1 : 0);
1728 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1729 			DATA, UCAST,
1730 			(htt_tlv_filter->md_data_filter &
1731 			FILTER_DATA_UCAST) ? 1 : 0);
1732 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1733 			DATA, NULL,
1734 			(htt_tlv_filter->md_data_filter &
1735 			FILTER_DATA_NULL) ? 1 : 0);
1736 	}
1737 
1738 	if (htt_tlv_filter->enable_mo) {
1739 		/* TYPE: CTRL */
1740 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1741 			CTRL, 1010,
1742 			(htt_tlv_filter->mo_ctrl_filter &
1743 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1744 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1745 			CTRL, 1011,
1746 			(htt_tlv_filter->mo_ctrl_filter &
1747 			FILTER_CTRL_RTS) ? 1 : 0);
1748 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1749 			CTRL, 1100,
1750 			(htt_tlv_filter->mo_ctrl_filter &
1751 			FILTER_CTRL_CTS) ? 1 : 0);
1752 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1753 			CTRL, 1101,
1754 			(htt_tlv_filter->mo_ctrl_filter &
1755 			FILTER_CTRL_ACK) ? 1 : 0);
1756 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1757 			CTRL, 1110,
1758 			(htt_tlv_filter->mo_ctrl_filter &
1759 			FILTER_CTRL_CFEND) ? 1 : 0);
1760 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1761 			CTRL, 1111,
1762 			(htt_tlv_filter->mo_ctrl_filter &
1763 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1764 		/* TYPE: DATA */
1765 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1766 			DATA, MCAST,
1767 			(htt_tlv_filter->mo_data_filter &
1768 			FILTER_DATA_MCAST) ? 1 : 0);
1769 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1770 			DATA, UCAST,
1771 			(htt_tlv_filter->mo_data_filter &
1772 			FILTER_DATA_UCAST) ? 1 : 0);
1773 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1774 			DATA, NULL,
1775 			(htt_tlv_filter->mo_data_filter &
1776 			FILTER_DATA_NULL) ? 1 : 0);
1777 	}
1778 
1779 	/* word 6 */
1780 	msg_word++;
1781 	*msg_word = 0;
1782 	tlv_filter = 0;
1783 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START,
1784 		htt_tlv_filter->mpdu_start);
1785 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START,
1786 		htt_tlv_filter->msdu_start);
1787 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET,
1788 		htt_tlv_filter->packet);
1789 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END,
1790 		htt_tlv_filter->msdu_end);
1791 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END,
1792 		htt_tlv_filter->mpdu_end);
1793 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER,
1794 		htt_tlv_filter->packet_header);
1795 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION,
1796 		htt_tlv_filter->attention);
1797 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START,
1798 		htt_tlv_filter->ppdu_start);
1799 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END,
1800 		htt_tlv_filter->ppdu_end);
1801 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS,
1802 		htt_tlv_filter->ppdu_end_user_stats);
1803 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter,
1804 		PPDU_END_USER_STATS_EXT,
1805 		htt_tlv_filter->ppdu_end_user_stats_ext);
1806 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE,
1807 		htt_tlv_filter->ppdu_end_status_done);
1808 	/* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/
1809 	 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED,
1810 		 htt_tlv_filter->header_per_msdu);
1811 
1812 	HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter);
1813 
1814 	msg_word++;
1815 	*msg_word = 0;
1816 	if (htt_tlv_filter->offset_valid) {
1817 		HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET_SET(*msg_word,
1818 					htt_tlv_filter->rx_packet_offset);
1819 		HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET_SET(*msg_word,
1820 					htt_tlv_filter->rx_header_offset);
1821 
1822 		msg_word++;
1823 		*msg_word = 0;
1824 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET_SET(*msg_word,
1825 					htt_tlv_filter->rx_mpdu_end_offset);
1826 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET_SET(*msg_word,
1827 					htt_tlv_filter->rx_mpdu_start_offset);
1828 
1829 		msg_word++;
1830 		*msg_word = 0;
1831 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET_SET(*msg_word,
1832 					htt_tlv_filter->rx_msdu_end_offset);
1833 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET_SET(*msg_word,
1834 					htt_tlv_filter->rx_msdu_start_offset);
1835 
1836 		msg_word++;
1837 		*msg_word = 0;
1838 		HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET_SET(*msg_word,
1839 					htt_tlv_filter->rx_attn_offset);
1840 		msg_word++;
1841 		*msg_word = 0;
1842 	} else {
1843 		msg_word += 4;
1844 		*msg_word = 0;
1845 	}
1846 
1847 	if (mon_drop_th > 0)
1848 		HTT_RX_RING_SELECTION_CFG_RX_DROP_THRESHOLD_SET(*msg_word,
1849 								mon_drop_th);
1850 
1851 	/* "response_required" field should be set if a HTT response message is
1852 	 * required after setting up the ring.
1853 	 */
1854 	pkt = htt_htc_pkt_alloc(soc);
1855 	if (!pkt)
1856 		goto fail1;
1857 
1858 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1859 
1860 	SET_HTC_PACKET_INFO_TX(
1861 		&pkt->htc_pkt,
1862 		dp_htt_h2t_send_complete_free_netbuf,
1863 		qdf_nbuf_data(htt_msg),
1864 		qdf_nbuf_len(htt_msg),
1865 		soc->htc_endpoint,
1866 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
1867 
1868 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1869 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
1870 				     HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG,
1871 				     htt_logger_bufp);
1872 
1873 	if (status != QDF_STATUS_SUCCESS) {
1874 		qdf_nbuf_free(htt_msg);
1875 		htt_htc_pkt_free(soc, pkt);
1876 	}
1877 
1878 	return status;
1879 
1880 fail1:
1881 	qdf_nbuf_free(htt_msg);
1882 fail0:
1883 	return QDF_STATUS_E_FAILURE;
1884 }
1885 
1886 #if defined(HTT_STATS_ENABLE)
1887 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1888 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1889 
1890 {
1891 	uint32_t pdev_id;
1892 	uint32_t *msg_word = NULL;
1893 	uint32_t msg_remain_len = 0;
1894 
1895 	msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1896 
1897 	/*COOKIE MSB*/
1898 	pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1899 
1900 	/* stats message length + 16 size of HTT header*/
1901 	msg_remain_len = qdf_min(htt_stats->msg_len + 16,
1902 				(uint32_t)DP_EXT_MSG_LENGTH);
1903 
1904 	dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc,
1905 			msg_word,  msg_remain_len,
1906 			WDI_NO_VAL, pdev_id);
1907 
1908 	if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1909 		htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1910 	}
1911 	/* Need to be freed here as WDI handler will
1912 	 * make a copy of pkt to send data to application
1913 	 */
1914 	qdf_nbuf_free(htt_msg);
1915 	return QDF_STATUS_SUCCESS;
1916 }
1917 #else
1918 static inline QDF_STATUS
1919 dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1920 		      struct dp_soc *soc, qdf_nbuf_t htt_msg)
1921 {
1922 	return QDF_STATUS_E_NOSUPPORT;
1923 }
1924 #endif
1925 /**
1926  * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats
1927  * @htt_stats: htt stats info
1928  *
1929  * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message
1930  * contains sub messages which are identified by a TLV header.
1931  * In this function we will process the stream of T2H messages and read all the
1932  * TLV contained in the message.
1933  *
1934  * THe following cases have been taken care of
1935  * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer
1936  *		In this case the buffer will contain multiple tlvs.
1937  * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer.
1938  *		Only one tlv will be contained in the HTT message and this tag
1939  *		will extend onto the next buffer.
1940  * Case 3: When the buffer is the continuation of the previous message
1941  * Case 4: tlv length is 0. which will indicate the end of message
1942  *
1943  * return: void
1944  */
1945 static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats,
1946 					struct dp_soc *soc)
1947 {
1948 	htt_tlv_tag_t tlv_type = 0xff;
1949 	qdf_nbuf_t htt_msg = NULL;
1950 	uint32_t *msg_word;
1951 	uint8_t *tlv_buf_head = NULL;
1952 	uint8_t *tlv_buf_tail = NULL;
1953 	uint32_t msg_remain_len = 0;
1954 	uint32_t tlv_remain_len = 0;
1955 	uint32_t *tlv_start;
1956 	int cookie_val;
1957 	int cookie_msb;
1958 	int pdev_id;
1959 	bool copy_stats = false;
1960 	struct dp_pdev *pdev;
1961 
1962 	/* Process node in the HTT message queue */
1963 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1964 		!= NULL) {
1965 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1966 		cookie_val = *(msg_word + 1);
1967 		htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET(
1968 					*(msg_word +
1969 					HTT_T2H_EXT_STATS_TLV_START_OFFSET));
1970 
1971 		if (cookie_val) {
1972 			if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg)
1973 					== QDF_STATUS_SUCCESS) {
1974 				continue;
1975 			}
1976 		}
1977 
1978 		cookie_msb = *(msg_word + 2);
1979 		pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1980 		pdev = soc->pdev_list[pdev_id];
1981 
1982 		if (cookie_msb >> 2) {
1983 			copy_stats = true;
1984 		}
1985 
1986 		/* read 5th word */
1987 		msg_word = msg_word + 4;
1988 		msg_remain_len = qdf_min(htt_stats->msg_len,
1989 				(uint32_t) DP_EXT_MSG_LENGTH);
1990 		/* Keep processing the node till node length is 0 */
1991 		while (msg_remain_len) {
1992 			/*
1993 			 * if message is not a continuation of previous message
1994 			 * read the tlv type and tlv length
1995 			 */
1996 			if (!tlv_buf_head) {
1997 				tlv_type = HTT_STATS_TLV_TAG_GET(
1998 						*msg_word);
1999 				tlv_remain_len = HTT_STATS_TLV_LENGTH_GET(
2000 						*msg_word);
2001 			}
2002 
2003 			if (tlv_remain_len == 0) {
2004 				msg_remain_len = 0;
2005 
2006 				if (tlv_buf_head) {
2007 					qdf_mem_free(tlv_buf_head);
2008 					tlv_buf_head = NULL;
2009 					tlv_buf_tail = NULL;
2010 				}
2011 
2012 				goto error;
2013 			}
2014 
2015 			if (!tlv_buf_head)
2016 				tlv_remain_len += HTT_TLV_HDR_LEN;
2017 
2018 			if ((tlv_remain_len <= msg_remain_len)) {
2019 				/* Case 3 */
2020 				if (tlv_buf_head) {
2021 					qdf_mem_copy(tlv_buf_tail,
2022 							(uint8_t *)msg_word,
2023 							tlv_remain_len);
2024 					tlv_start = (uint32_t *)tlv_buf_head;
2025 				} else {
2026 					/* Case 1 */
2027 					tlv_start = msg_word;
2028 				}
2029 
2030 				if (copy_stats)
2031 					dp_htt_stats_copy_tag(pdev,
2032 							      tlv_type,
2033 							      tlv_start);
2034 				else
2035 					dp_htt_stats_print_tag(pdev,
2036 							       tlv_type,
2037 							       tlv_start);
2038 
2039 				if (tlv_type == HTT_STATS_PEER_DETAILS_TAG ||
2040 				    tlv_type == HTT_STATS_PEER_STATS_CMN_TAG)
2041 					dp_peer_update_inactive_time(pdev,
2042 								     tlv_type,
2043 								     tlv_start);
2044 
2045 				msg_remain_len -= tlv_remain_len;
2046 
2047 				msg_word = (uint32_t *)
2048 					(((uint8_t *)msg_word) +
2049 					tlv_remain_len);
2050 
2051 				tlv_remain_len = 0;
2052 
2053 				if (tlv_buf_head) {
2054 					qdf_mem_free(tlv_buf_head);
2055 					tlv_buf_head = NULL;
2056 					tlv_buf_tail = NULL;
2057 				}
2058 
2059 			} else { /* tlv_remain_len > msg_remain_len */
2060 				/* Case 2 & 3 */
2061 				if (!tlv_buf_head) {
2062 					tlv_buf_head = qdf_mem_malloc(
2063 							tlv_remain_len);
2064 
2065 					if (!tlv_buf_head) {
2066 						QDF_TRACE(QDF_MODULE_ID_TXRX,
2067 								QDF_TRACE_LEVEL_ERROR,
2068 								"Alloc failed");
2069 						goto error;
2070 					}
2071 
2072 					tlv_buf_tail = tlv_buf_head;
2073 				}
2074 
2075 				qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word,
2076 						msg_remain_len);
2077 				tlv_remain_len -= msg_remain_len;
2078 				tlv_buf_tail += msg_remain_len;
2079 			}
2080 		}
2081 
2082 		if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
2083 			htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
2084 		}
2085 
2086 		qdf_nbuf_free(htt_msg);
2087 	}
2088 	return;
2089 
2090 error:
2091 	qdf_nbuf_free(htt_msg);
2092 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
2093 			!= NULL)
2094 		qdf_nbuf_free(htt_msg);
2095 }
2096 
2097 void htt_t2h_stats_handler(void *context)
2098 {
2099 	struct dp_soc *soc = (struct dp_soc *)context;
2100 	struct htt_stats_context htt_stats;
2101 	uint32_t *msg_word;
2102 	qdf_nbuf_t htt_msg = NULL;
2103 	uint8_t done;
2104 	uint32_t rem_stats;
2105 
2106 	if (!soc) {
2107 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2108 			  "soc is NULL");
2109 		return;
2110 	}
2111 
2112 	if (!qdf_atomic_read(&soc->cmn_init_done)) {
2113 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2114 			  "soc: 0x%pK, init_done: %d", soc,
2115 			  qdf_atomic_read(&soc->cmn_init_done));
2116 		return;
2117 	}
2118 
2119 	qdf_mem_zero(&htt_stats, sizeof(htt_stats));
2120 	qdf_nbuf_queue_init(&htt_stats.msg);
2121 
2122 	/* pull one completed stats from soc->htt_stats_msg and process */
2123 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2124 	if (!soc->htt_stats.num_stats) {
2125 		qdf_spin_unlock_bh(&soc->htt_stats.lock);
2126 		return;
2127 	}
2128 	while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) {
2129 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
2130 		msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET;
2131 		done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2132 		qdf_nbuf_queue_add(&htt_stats.msg, htt_msg);
2133 		/*
2134 		 * Done bit signifies that this is the last T2H buffer in the
2135 		 * stream of HTT EXT STATS message
2136 		 */
2137 		if (done)
2138 			break;
2139 	}
2140 	rem_stats = --soc->htt_stats.num_stats;
2141 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2142 
2143 	/* If there are more stats to process, schedule stats work again.
2144 	 * Scheduling prior to processing ht_stats to queue with early
2145 	 * index
2146 	 */
2147 	if (rem_stats)
2148 		qdf_sched_work(0, &soc->htt_stats.work);
2149 
2150 	dp_process_htt_stat_msg(&htt_stats, soc);
2151 }
2152 
2153 /*
2154  * dp_get_ppdu_info_user_index: Find and allocate a per-user descriptor for a PPDU,
2155  * if a new peer id arrives in a PPDU
2156  * pdev: DP pdev handle
2157  * @peer_id : peer unique identifier
2158  * @ppdu_info: per ppdu tlv structure
2159  *
2160  * return:user index to be populated
2161  */
2162 #ifdef FEATURE_PERPKT_INFO
2163 static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev,
2164 						uint16_t peer_id,
2165 						struct ppdu_info *ppdu_info)
2166 {
2167 	uint8_t user_index = 0;
2168 	struct cdp_tx_completion_ppdu *ppdu_desc;
2169 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2170 
2171 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2172 
2173 	while ((user_index + 1) <= ppdu_info->last_user) {
2174 		ppdu_user_desc = &ppdu_desc->user[user_index];
2175 		if (ppdu_user_desc->peer_id != peer_id) {
2176 			user_index++;
2177 			continue;
2178 		} else {
2179 			/* Max users possible is 8 so user array index should
2180 			 * not exceed 7
2181 			 */
2182 			qdf_assert_always(user_index <= (ppdu_desc->max_users - 1));
2183 			return user_index;
2184 		}
2185 	}
2186 
2187 	ppdu_info->last_user++;
2188 	/* Max users possible is 8 so last user should not exceed 8 */
2189 	qdf_assert_always(ppdu_info->last_user <= ppdu_desc->max_users);
2190 	return ppdu_info->last_user - 1;
2191 }
2192 
2193 /*
2194  * dp_process_ppdu_stats_common_tlv: Process htt_ppdu_stats_common_tlv
2195  * pdev: DP pdev handle
2196  * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv
2197  * @ppdu_info: per ppdu tlv structure
2198  *
2199  * return:void
2200  */
2201 static void dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev,
2202 		uint32_t *tag_buf, struct ppdu_info *ppdu_info)
2203 {
2204 	uint16_t frame_type;
2205 	uint16_t frame_ctrl;
2206 	uint16_t freq;
2207 	struct dp_soc *soc = NULL;
2208 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
2209 	uint64_t ppdu_start_timestamp;
2210 	uint32_t *start_tag_buf;
2211 
2212 	start_tag_buf = tag_buf;
2213 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2214 
2215 	ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
2216 
2217 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RING_ID_SCH_CMD_ID);
2218 	ppdu_info->sched_cmdid =
2219 		HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_GET(*tag_buf);
2220 	ppdu_desc->num_users =
2221 		HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf);
2222 
2223 	qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
2224 
2225 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(QTYPE_FRM_TYPE);
2226 	frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf);
2227 	ppdu_desc->htt_frame_type = frame_type;
2228 
2229 	frame_ctrl = ppdu_desc->frame_ctrl;
2230 
2231 	ppdu_desc->bar_ppdu_id = ppdu_info->ppdu_id;
2232 
2233 	switch (frame_type) {
2234 	case HTT_STATS_FTYPE_TIDQ_DATA_SU:
2235 	case HTT_STATS_FTYPE_TIDQ_DATA_MU:
2236 	case HTT_STATS_FTYPE_SGEN_QOS_NULL:
2237 		/*
2238 		 * for management packet, frame type come as DATA_SU
2239 		 * need to check frame_ctrl before setting frame_type
2240 		 */
2241 		if (HTT_GET_FRAME_CTRL_TYPE(frame_ctrl) <= FRAME_CTRL_TYPE_CTRL)
2242 			ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
2243 		else
2244 			ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA;
2245 	break;
2246 	case HTT_STATS_FTYPE_SGEN_MU_BAR:
2247 	case HTT_STATS_FTYPE_SGEN_BAR:
2248 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_BAR;
2249 	break;
2250 	default:
2251 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
2252 	break;
2253 	}
2254 
2255 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(FES_DUR_US);
2256 	ppdu_desc->tx_duration = *tag_buf;
2257 
2258 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_L32_US);
2259 	ppdu_desc->ppdu_start_timestamp = *tag_buf;
2260 
2261 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(CHAN_MHZ_PHY_MODE);
2262 	freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf);
2263 	if (freq != ppdu_desc->channel) {
2264 		soc = pdev->soc;
2265 		ppdu_desc->channel = freq;
2266 		pdev->operating_channel.freq = freq;
2267 		if (soc && soc->cdp_soc.ol_ops->freq_to_channel)
2268 			pdev->operating_channel.num =
2269 			    soc->cdp_soc.ol_ops->freq_to_channel(soc->ctrl_psoc,
2270 								 pdev->pdev_id,
2271 								 freq);
2272 
2273 		if (soc && soc->cdp_soc.ol_ops->freq_to_band)
2274 			pdev->operating_channel.band =
2275 			       soc->cdp_soc.ol_ops->freq_to_band(soc->ctrl_psoc,
2276 								 pdev->pdev_id,
2277 								 freq);
2278 	}
2279 
2280 	ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf);
2281 
2282 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RESV_NUM_UL_BEAM);
2283 	ppdu_desc->phy_ppdu_tx_time_us =
2284 		HTT_PPDU_STATS_COMMON_TLV_PHY_PPDU_TX_TIME_US_GET(*tag_buf);
2285 	ppdu_desc->beam_change =
2286 		HTT_PPDU_STATS_COMMON_TLV_BEAM_CHANGE_GET(*tag_buf);
2287 	ppdu_desc->doppler =
2288 		HTT_PPDU_STATS_COMMON_TLV_DOPPLER_INDICATION_GET(*tag_buf);
2289 	ppdu_desc->spatial_reuse =
2290 		HTT_PPDU_STATS_COMMON_TLV_SPATIAL_REUSE_GET(*tag_buf);
2291 
2292 	dp_tx_capture_htt_frame_counter(pdev, frame_type);
2293 
2294 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_U32_US);
2295 	ppdu_start_timestamp = *tag_buf;
2296 	ppdu_desc->ppdu_start_timestamp |= ((ppdu_start_timestamp <<
2297 					     HTT_SHIFT_UPPER_TIMESTAMP) &
2298 					    HTT_MASK_UPPER_TIMESTAMP);
2299 
2300 	ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
2301 					ppdu_desc->tx_duration;
2302 	/* Ack time stamp is same as end time stamp*/
2303 	ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
2304 
2305 	ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
2306 					ppdu_desc->tx_duration;
2307 
2308 	ppdu_desc->bar_ppdu_start_timestamp = ppdu_desc->ppdu_start_timestamp;
2309 	ppdu_desc->bar_ppdu_end_timestamp = ppdu_desc->ppdu_end_timestamp;
2310 	ppdu_desc->bar_tx_duration = ppdu_desc->tx_duration;
2311 
2312 	/* Ack time stamp is same as end time stamp*/
2313 	ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
2314 }
2315 
2316 /*
2317  * dp_process_ppdu_stats_user_common_tlv: Process ppdu_stats_user_common
2318  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv
2319  * @ppdu_info: per ppdu tlv structure
2320  *
2321  * return:void
2322  */
2323 static void dp_process_ppdu_stats_user_common_tlv(
2324 		struct dp_pdev *pdev, uint32_t *tag_buf,
2325 		struct ppdu_info *ppdu_info)
2326 {
2327 	uint16_t peer_id;
2328 	struct cdp_tx_completion_ppdu *ppdu_desc;
2329 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2330 	uint8_t curr_user_index = 0;
2331 	struct dp_peer *peer;
2332 	struct dp_vdev *vdev;
2333 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2334 
2335 	ppdu_desc =
2336 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2337 
2338 	tag_buf++;
2339 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
2340 
2341 	curr_user_index =
2342 		dp_get_ppdu_info_user_index(pdev,
2343 					    peer_id, ppdu_info);
2344 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2345 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2346 
2347 	ppdu_desc->vdev_id =
2348 		HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(*tag_buf);
2349 
2350 	ppdu_user_desc->peer_id = peer_id;
2351 
2352 	tag_buf++;
2353 
2354 	if (HTT_PPDU_STATS_USER_COMMON_TLV_DELAYED_BA_GET(*tag_buf)) {
2355 		ppdu_user_desc->delayed_ba = 1;
2356 		ppdu_desc->delayed_ba = 1;
2357 	}
2358 
2359 	if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) {
2360 		ppdu_user_desc->is_mcast = true;
2361 		ppdu_user_desc->mpdu_tried_mcast =
2362 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
2363 		ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast;
2364 	} else {
2365 		ppdu_user_desc->mpdu_tried_ucast =
2366 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
2367 	}
2368 
2369 	tag_buf++;
2370 
2371 	ppdu_user_desc->qos_ctrl =
2372 		HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf);
2373 	ppdu_user_desc->frame_ctrl =
2374 		HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf);
2375 	ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl;
2376 
2377 	if (ppdu_user_desc->delayed_ba)
2378 		ppdu_user_desc->mpdu_success = 0;
2379 
2380 	tag_buf += 3;
2381 
2382 	if (HTT_PPDU_STATS_IS_OPAQUE_VALID_GET(*tag_buf)) {
2383 		ppdu_user_desc->ppdu_cookie =
2384 			HTT_PPDU_STATS_HOST_OPAQUE_COOKIE_GET(*tag_buf);
2385 		ppdu_user_desc->is_ppdu_cookie_valid = 1;
2386 	}
2387 
2388 	/* returning earlier causes other feilds unpopulated */
2389 	if (peer_id == DP_SCAN_PEER_ID) {
2390 		vdev =
2391 		       dp_get_vdev_from_soc_vdev_id_wifi3(pdev->soc,
2392 							  ppdu_desc->vdev_id);
2393 		if (!vdev)
2394 			return;
2395 		qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw,
2396 			     QDF_MAC_ADDR_SIZE);
2397 	} else {
2398 		peer = dp_peer_find_by_id(pdev->soc, peer_id);
2399 		if (!peer) {
2400 			/*
2401 			 * fw sends peer_id which is about to removed but
2402 			 * it was already removed in host.
2403 			 * eg: for disassoc, fw send ppdu stats
2404 			 * with peer id equal to previously associated
2405 			 * peer's peer_id but it was removed
2406 			 */
2407 			vdev =
2408 			dp_get_vdev_from_soc_vdev_id_wifi3(pdev->soc,
2409 							   ppdu_desc->vdev_id);
2410 			if (!vdev)
2411 				return;
2412 			qdf_mem_copy(ppdu_user_desc->mac_addr,
2413 				     vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE);
2414 			return;
2415 		}
2416 		qdf_mem_copy(ppdu_user_desc->mac_addr,
2417 			     peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
2418 		dp_peer_unref_del_find_by_id(peer);
2419 	}
2420 }
2421 
2422 
2423 /**
2424  * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv
2425  * @pdev: DP pdev handle
2426  * @tag_buf: T2H message buffer carrying the user rate TLV
2427  * @ppdu_info: per ppdu tlv structure
2428  *
2429  * return:void
2430  */
2431 static void dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev,
2432 		uint32_t *tag_buf,
2433 		struct ppdu_info *ppdu_info)
2434 {
2435 	uint16_t peer_id;
2436 	struct dp_peer *peer;
2437 	struct cdp_tx_completion_ppdu *ppdu_desc;
2438 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2439 	uint8_t curr_user_index = 0;
2440 	struct dp_vdev *vdev;
2441 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2442 
2443 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2444 
2445 	tag_buf++;
2446 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
2447 
2448 	curr_user_index =
2449 		dp_get_ppdu_info_user_index(pdev,
2450 					    peer_id, ppdu_info);
2451 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2452 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2453 	if (peer_id == DP_SCAN_PEER_ID) {
2454 		vdev =
2455 		       dp_get_vdev_from_soc_vdev_id_wifi3(pdev->soc,
2456 							  ppdu_desc->vdev_id);
2457 		if (!vdev)
2458 			return;
2459 		qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw,
2460 			     QDF_MAC_ADDR_SIZE);
2461 	} else {
2462 		peer = dp_peer_find_by_id(pdev->soc, peer_id);
2463 
2464 		if (peer) {
2465 			ppdu_desc->vdev_id = peer->vdev->vdev_id;
2466 			qdf_mem_copy(ppdu_user_desc->mac_addr,
2467 				     peer->mac_addr.raw,
2468 				     QDF_MAC_ADDR_SIZE);
2469 			dp_peer_unref_del_find_by_id(peer);
2470 		}
2471 	}
2472 
2473 	ppdu_user_desc->peer_id = peer_id;
2474 
2475 	ppdu_user_desc->tid =
2476 		HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf);
2477 
2478 	tag_buf += 1;
2479 
2480 	ppdu_user_desc->user_pos =
2481 		HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_GET(*tag_buf);
2482 	ppdu_user_desc->mu_group_id =
2483 		HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_GET(*tag_buf);
2484 
2485 	tag_buf += 1;
2486 
2487 	ppdu_user_desc->ru_start =
2488 		HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf);
2489 	ppdu_user_desc->ru_tones =
2490 		(HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) -
2491 		HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1;
2492 	ppdu_desc->usr_ru_tones_sum += ppdu_user_desc->ru_tones;
2493 
2494 	tag_buf += 2;
2495 
2496 	ppdu_user_desc->ppdu_type =
2497 		HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf);
2498 
2499 	tag_buf++;
2500 	ppdu_user_desc->tx_rate = *tag_buf;
2501 
2502 	ppdu_user_desc->ltf_size =
2503 		HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf);
2504 	ppdu_user_desc->stbc =
2505 		HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf);
2506 	ppdu_user_desc->he_re =
2507 		HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf);
2508 	ppdu_user_desc->txbf =
2509 		HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf);
2510 	ppdu_user_desc->bw =
2511 		HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf) - 2;
2512 	ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf);
2513 	ppdu_desc->usr_nss_sum += ppdu_user_desc->nss;
2514 	ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf);
2515 	ppdu_user_desc->preamble =
2516 		HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf);
2517 	ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf);
2518 	ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf);
2519 	ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf);
2520 }
2521 
2522 /*
2523  * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv: Process
2524  * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
2525  * pdev: DP PDEV handle
2526  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
2527  * @ppdu_info: per ppdu tlv structure
2528  *
2529  * return:void
2530  */
2531 static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
2532 		struct dp_pdev *pdev, uint32_t *tag_buf,
2533 		struct ppdu_info *ppdu_info)
2534 {
2535 	htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf =
2536 		(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf;
2537 
2538 	struct cdp_tx_completion_ppdu *ppdu_desc;
2539 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2540 	uint8_t curr_user_index = 0;
2541 	uint16_t peer_id;
2542 	struct dp_peer *peer;
2543 	uint32_t size = CDP_BA_64_BIT_MAP_SIZE_DWORDS;
2544 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2545 
2546 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2547 
2548 	tag_buf++;
2549 
2550 	peer_id =
2551 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2552 
2553 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2554 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2555 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2556 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2557 	if (peer) {
2558 		ppdu_desc->vdev_id = peer->vdev->vdev_id;
2559 		qdf_mem_copy(ppdu_user_desc->mac_addr,
2560 			     peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
2561 		dp_peer_unref_del_find_by_id(peer);
2562 	}
2563 	ppdu_user_desc->peer_id = peer_id;
2564 
2565 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
2566 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
2567 		     sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
2568 
2569 	dp_process_ppdu_stats_update_failed_bitmap(pdev,
2570 						   (void *)ppdu_user_desc,
2571 						   ppdu_info->ppdu_id,
2572 						   size);
2573 }
2574 
2575 /*
2576  * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv: Process
2577  * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
2578  * soc: DP SOC handle
2579  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
2580  * @ppdu_info: per ppdu tlv structure
2581  *
2582  * return:void
2583  */
2584 static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
2585 		struct dp_pdev *pdev, uint32_t *tag_buf,
2586 		struct ppdu_info *ppdu_info)
2587 {
2588 	htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf =
2589 		(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf;
2590 
2591 	struct cdp_tx_completion_ppdu *ppdu_desc;
2592 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2593 	uint8_t curr_user_index = 0;
2594 	uint16_t peer_id;
2595 	struct dp_peer *peer;
2596 	uint32_t size = CDP_BA_256_BIT_MAP_SIZE_DWORDS;
2597 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2598 
2599 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2600 
2601 	tag_buf++;
2602 
2603 	peer_id =
2604 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2605 
2606 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2607 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2608 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2609 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2610 	if (peer) {
2611 		ppdu_desc->vdev_id = peer->vdev->vdev_id;
2612 		qdf_mem_copy(ppdu_user_desc->mac_addr,
2613 			     peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
2614 		dp_peer_unref_del_find_by_id(peer);
2615 	}
2616 	ppdu_user_desc->peer_id = peer_id;
2617 
2618 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
2619 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
2620 		     sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2621 
2622 	dp_process_ppdu_stats_update_failed_bitmap(pdev,
2623 						   (void *)ppdu_user_desc,
2624 						   ppdu_info->ppdu_id,
2625 						   size);
2626 }
2627 
2628 /*
2629  * dp_process_ppdu_stats_user_cmpltn_common_tlv: Process
2630  * htt_ppdu_stats_user_cmpltn_common_tlv
2631  * soc: DP SOC handle
2632  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv
2633  * @ppdu_info: per ppdu tlv structure
2634  *
2635  * return:void
2636  */
2637 static void dp_process_ppdu_stats_user_cmpltn_common_tlv(
2638 		struct dp_pdev *pdev, uint32_t *tag_buf,
2639 		struct ppdu_info *ppdu_info)
2640 {
2641 	uint16_t peer_id;
2642 	struct dp_peer *peer;
2643 	struct cdp_tx_completion_ppdu *ppdu_desc;
2644 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2645 	uint8_t curr_user_index = 0;
2646 	uint8_t bw_iter;
2647 	htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf =
2648 		(htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf;
2649 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2650 
2651 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2652 
2653 	tag_buf++;
2654 	peer_id =
2655 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf);
2656 
2657 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2658 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2659 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2660 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2661 	if (peer) {
2662 		ppdu_desc->vdev_id = peer->vdev->vdev_id;
2663 		qdf_mem_copy(ppdu_user_desc->mac_addr,
2664 			     peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
2665 		dp_peer_unref_del_find_by_id(peer);
2666 	}
2667 	ppdu_user_desc->peer_id = peer_id;
2668 
2669 	ppdu_user_desc->completion_status =
2670 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET(
2671 				*tag_buf);
2672 
2673 	ppdu_user_desc->tid =
2674 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf);
2675 
2676 
2677 	tag_buf++;
2678 	if (qdf_likely(ppdu_user_desc->completion_status ==
2679 			HTT_PPDU_STATS_USER_STATUS_OK)) {
2680 		ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi;
2681 		ppdu_user_desc->ack_rssi_valid = 1;
2682 	} else {
2683 		ppdu_user_desc->ack_rssi_valid = 0;
2684 	}
2685 
2686 	tag_buf++;
2687 
2688 	ppdu_user_desc->mpdu_success =
2689 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf);
2690 
2691 	ppdu_user_desc->mpdu_failed =
2692 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_GET(*tag_buf) -
2693 						ppdu_user_desc->mpdu_success;
2694 
2695 	tag_buf++;
2696 
2697 	ppdu_user_desc->long_retries =
2698 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf);
2699 
2700 	ppdu_user_desc->short_retries =
2701 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf);
2702 	ppdu_user_desc->retry_msdus =
2703 		ppdu_user_desc->long_retries + ppdu_user_desc->short_retries;
2704 
2705 	ppdu_user_desc->is_ampdu =
2706 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf);
2707 	ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu;
2708 
2709 	ppdu_desc->resp_type =
2710 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_GET(*tag_buf);
2711 	ppdu_desc->mprot_type =
2712 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPROT_TYPE_GET(*tag_buf);
2713 	ppdu_desc->rts_success =
2714 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_SUCCESS_GET(*tag_buf);
2715 	ppdu_desc->rts_failure =
2716 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_FAILURE_GET(*tag_buf);
2717 
2718 	ppdu_info->compltn_common_tlv++;
2719 
2720 	/*
2721 	 * MU BAR may send request to n users but we may received ack only from
2722 	 * m users. To have count of number of users respond back, we have a
2723 	 * separate counter bar_num_users per PPDU that get increment for every
2724 	 * htt_ppdu_stats_user_cmpltn_common_tlv
2725 	 */
2726 	ppdu_desc->bar_num_users++;
2727 
2728 	tag_buf++;
2729 	for (bw_iter = 0; bw_iter < CDP_RSSI_CHAIN_LEN; bw_iter++) {
2730 		ppdu_user_desc->rssi_chain[bw_iter] =
2731 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CHAIN_RSSI_GET(*tag_buf);
2732 		tag_buf++;
2733 	}
2734 
2735 	ppdu_user_desc->sa_tx_antenna =
2736 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TX_ANTENNA_MASK_GET(*tag_buf);
2737 
2738 	tag_buf++;
2739 	ppdu_user_desc->sa_is_training =
2740 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_TRAINING_GET(*tag_buf);
2741 	if (ppdu_user_desc->sa_is_training) {
2742 		ppdu_user_desc->sa_goodput =
2743 			HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PENDING_TRAINING_PKTS_GET(*tag_buf);
2744 	}
2745 
2746 	tag_buf++;
2747 	for (bw_iter = 0; bw_iter < CDP_NUM_SA_BW; bw_iter++) {
2748 		ppdu_user_desc->sa_max_rates[bw_iter] =
2749 			HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MAX_RATES_GET(tag_buf[bw_iter]);
2750 	}
2751 
2752 	tag_buf += CDP_NUM_SA_BW;
2753 	ppdu_user_desc->current_rate_per =
2754 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CURRENT_RATE_PER_GET(*tag_buf);
2755 }
2756 
2757 /*
2758  * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv: Process
2759  * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2760  * pdev: DP PDEV handle
2761  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2762  * @ppdu_info: per ppdu tlv structure
2763  *
2764  * return:void
2765  */
2766 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
2767 		struct dp_pdev *pdev, uint32_t *tag_buf,
2768 		struct ppdu_info *ppdu_info)
2769 {
2770 	htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf =
2771 		(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf;
2772 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2773 	struct cdp_tx_completion_ppdu *ppdu_desc;
2774 	uint8_t curr_user_index = 0;
2775 	uint16_t peer_id;
2776 	struct dp_peer *peer;
2777 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2778 
2779 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2780 
2781 	tag_buf++;
2782 
2783 	peer_id =
2784 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2785 
2786 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2787 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2788 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2789 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2790 	if (peer) {
2791 		ppdu_desc->vdev_id = peer->vdev->vdev_id;
2792 		qdf_mem_copy(ppdu_user_desc->mac_addr,
2793 			     peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
2794 		dp_peer_unref_del_find_by_id(peer);
2795 	}
2796 	ppdu_user_desc->peer_id = peer_id;
2797 
2798 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2799 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2800 		     sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
2801 	ppdu_user_desc->ba_size = CDP_BA_64_BIT_MAP_SIZE_DWORDS * 32;
2802 }
2803 
2804 /*
2805  * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv: Process
2806  * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2807  * pdev: DP PDEV handle
2808  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2809  * @ppdu_info: per ppdu tlv structure
2810  *
2811  * return:void
2812  */
2813 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
2814 		struct dp_pdev *pdev, uint32_t *tag_buf,
2815 		struct ppdu_info *ppdu_info)
2816 {
2817 	htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf =
2818 		(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf;
2819 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2820 	struct cdp_tx_completion_ppdu *ppdu_desc;
2821 	uint8_t curr_user_index = 0;
2822 	uint16_t peer_id;
2823 	struct dp_peer *peer;
2824 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2825 
2826 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2827 
2828 	tag_buf++;
2829 
2830 	peer_id =
2831 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2832 
2833 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2834 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2835 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2836 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2837 	if (peer) {
2838 		ppdu_desc->vdev_id = peer->vdev->vdev_id;
2839 		qdf_mem_copy(ppdu_user_desc->mac_addr,
2840 			     peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
2841 		dp_peer_unref_del_find_by_id(peer);
2842 	}
2843 	ppdu_user_desc->peer_id = peer_id;
2844 
2845 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2846 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2847 		     sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2848 	ppdu_user_desc->ba_size = CDP_BA_256_BIT_MAP_SIZE_DWORDS * 32;
2849 }
2850 
2851 /*
2852  * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv: Process
2853  * htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2854  * pdev: DP PDE handle
2855  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2856  * @ppdu_info: per ppdu tlv structure
2857  *
2858  * return:void
2859  */
2860 static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
2861 		struct dp_pdev *pdev, uint32_t *tag_buf,
2862 		struct ppdu_info *ppdu_info)
2863 {
2864 	uint16_t peer_id;
2865 	struct dp_peer *peer;
2866 	struct cdp_tx_completion_ppdu *ppdu_desc;
2867 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2868 	uint8_t curr_user_index = 0;
2869 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2870 
2871 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2872 
2873 	tag_buf += 2;
2874 	peer_id =
2875 	HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf);
2876 
2877 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2878 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2879 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2880 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2881 	if (peer) {
2882 		ppdu_desc->vdev_id = peer->vdev->vdev_id;
2883 		qdf_mem_copy(ppdu_user_desc->mac_addr,
2884 			     peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
2885 		dp_peer_unref_del_find_by_id(peer);
2886 	}
2887 	if (!ppdu_user_desc->ack_ba_tlv) {
2888 		ppdu_user_desc->ack_ba_tlv = 1;
2889 	} else {
2890 		pdev->stats.ack_ba_comes_twice++;
2891 		dp_peer_unref_del_find_by_id(peer);
2892 		return;
2893 	}
2894 
2895 	ppdu_user_desc->peer_id = peer_id;
2896 
2897 	tag_buf++;
2898 	/* not to update ppdu_desc->tid from this TLV */
2899 	ppdu_user_desc->num_mpdu =
2900 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf);
2901 
2902 	ppdu_user_desc->num_msdu =
2903 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf);
2904 
2905 	ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu;
2906 
2907 	tag_buf++;
2908 	ppdu_user_desc->start_seq =
2909 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_GET(
2910 			*tag_buf);
2911 
2912 	tag_buf++;
2913 	ppdu_user_desc->success_bytes = *tag_buf;
2914 
2915 	/* increase ack ba tlv counter on successful mpdu */
2916 	if (ppdu_user_desc->num_mpdu)
2917 		ppdu_info->ack_ba_tlv++;
2918 
2919 	if (ppdu_user_desc->ba_size == 0) {
2920 		ppdu_user_desc->ba_seq_no = ppdu_user_desc->start_seq;
2921 		ppdu_user_desc->ba_bitmap[0] = 1;
2922 		ppdu_user_desc->ba_size = 1;
2923 	}
2924 }
2925 
2926 /*
2927  * dp_process_ppdu_stats_user_common_array_tlv: Process
2928  * htt_ppdu_stats_user_common_array_tlv
2929  * pdev: DP PDEV handle
2930  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2931  * @ppdu_info: per ppdu tlv structure
2932  *
2933  * return:void
2934  */
2935 static void dp_process_ppdu_stats_user_common_array_tlv(
2936 		struct dp_pdev *pdev, uint32_t *tag_buf,
2937 		struct ppdu_info *ppdu_info)
2938 {
2939 	uint32_t peer_id;
2940 	struct cdp_tx_completion_ppdu *ppdu_desc;
2941 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2942 	uint8_t curr_user_index = 0;
2943 	struct htt_tx_ppdu_stats_info *dp_stats_buf;
2944 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2945 
2946 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2947 
2948 	tag_buf++;
2949 	dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf;
2950 	tag_buf += 3;
2951 	peer_id =
2952 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf);
2953 
2954 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) {
2955 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2956 			"Invalid peer");
2957 		return;
2958 	}
2959 
2960 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2961 
2962 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2963 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2964 
2965 	ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes;
2966 	ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes;
2967 
2968 	tag_buf++;
2969 
2970 	ppdu_user_desc->success_msdus =
2971 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf);
2972 	ppdu_user_desc->retry_bytes =
2973 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf);
2974 	tag_buf++;
2975 	ppdu_user_desc->failed_msdus =
2976 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf);
2977 }
2978 
2979 /*
2980  * dp_process_ppdu_stats_flush_tlv: Process
2981  * htt_ppdu_stats_flush_tlv
2982  * @pdev: DP PDEV handle
2983  * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv
2984  * @ppdu_info: per ppdu tlv structure
2985  *
2986  * return:void
2987  */
2988 static void
2989 dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev,
2990 					     uint32_t *tag_buf,
2991 					     struct ppdu_info *ppdu_info)
2992 {
2993 	struct cdp_tx_completion_ppdu *ppdu_desc;
2994 	uint32_t peer_id;
2995 	uint8_t tid;
2996 	struct dp_peer *peer;
2997 
2998 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
2999 				qdf_nbuf_data(ppdu_info->nbuf);
3000 	ppdu_desc->is_flush = 1;
3001 
3002 	tag_buf++;
3003 	ppdu_desc->drop_reason = *tag_buf;
3004 
3005 	tag_buf++;
3006 	ppdu_desc->num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf);
3007 	ppdu_desc->num_mpdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_GET(*tag_buf);
3008 	ppdu_desc->flow_type = HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_GET(*tag_buf);
3009 
3010 	tag_buf++;
3011 	peer_id = HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf);
3012 	tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf);
3013 
3014 	ppdu_desc->num_users = 1;
3015 	ppdu_desc->user[0].peer_id = peer_id;
3016 	ppdu_desc->user[0].tid = tid;
3017 
3018 	ppdu_desc->queue_type =
3019 			HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_GET(*tag_buf);
3020 
3021 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
3022 	if (!peer)
3023 		goto add_ppdu_to_sched_list;
3024 
3025 	if (ppdu_desc->drop_reason == HTT_FLUSH_EXCESS_RETRIES) {
3026 		DP_STATS_INC(peer,
3027 			     tx.excess_retries_per_ac[TID_TO_WME_AC(tid)],
3028 			     ppdu_desc->num_msdu);
3029 	}
3030 
3031 	dp_peer_unref_del_find_by_id(peer);
3032 
3033 add_ppdu_to_sched_list:
3034 	ppdu_info->done = 1;
3035 	TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
3036 	pdev->list_depth--;
3037 	TAILQ_INSERT_TAIL(&pdev->sched_comp_ppdu_list, ppdu_info,
3038 			  ppdu_info_list_elem);
3039 	pdev->sched_comp_list_depth++;
3040 }
3041 
3042 /**
3043  * dp_process_ppdu_stats_sch_cmd_status_tlv: Process schedule command status tlv
3044  * Here we are not going to process the buffer.
3045  * @pdev: DP PDEV handle
3046  * @ppdu_info: per ppdu tlv structure
3047  *
3048  * return:void
3049  */
3050 static void
3051 dp_process_ppdu_stats_sch_cmd_status_tlv(struct dp_pdev *pdev,
3052 					 struct ppdu_info *ppdu_info)
3053 {
3054 	struct cdp_tx_completion_ppdu *ppdu_desc;
3055 	struct dp_peer *peer;
3056 	uint8_t num_users;
3057 	uint8_t i;
3058 
3059 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
3060 				qdf_nbuf_data(ppdu_info->nbuf);
3061 
3062 	num_users = ppdu_desc->bar_num_users;
3063 
3064 	if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
3065 		for (i = 0; i < num_users; i++) {
3066 			if (ppdu_desc->user[i].user_pos == 0) {
3067 				/* update phy mode for bar frame */
3068 				ppdu_desc->phy_mode =
3069 					ppdu_desc->user[i].preamble;
3070 				ppdu_desc->user[0].mcs = ppdu_desc->user[i].mcs;
3071 				break;
3072 			}
3073 		}
3074 	}
3075 
3076 	if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA &&
3077 	    ppdu_desc->delayed_ba) {
3078 		qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
3079 
3080 		for (i = 0; i < ppdu_desc->num_users; i++) {
3081 			struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu;
3082 			uint64_t start_tsf;
3083 			uint64_t end_tsf;
3084 			uint32_t ppdu_id;
3085 
3086 			ppdu_id = ppdu_desc->ppdu_id;
3087 			peer = dp_peer_find_by_id(pdev->soc,
3088 						  ppdu_desc->user[i].peer_id);
3089 			/**
3090 			 * This check is to make sure peer is not deleted
3091 			 * after processing the TLVs.
3092 			 */
3093 			if (!peer)
3094 				continue;
3095 
3096 			delay_ppdu = &peer->delayed_ba_ppdu_stats;
3097 			start_tsf = ppdu_desc->ppdu_start_timestamp;
3098 			end_tsf = ppdu_desc->ppdu_end_timestamp;
3099 			/**
3100 			 * save delayed ba user info
3101 			 */
3102 			if (ppdu_desc->user[i].delayed_ba) {
3103 				dp_peer_copy_delay_stats(peer,
3104 							 &ppdu_desc->user[i],
3105 							 ppdu_id);
3106 				peer->last_delayed_ba_ppduid = ppdu_id;
3107 				delay_ppdu->ppdu_start_timestamp = start_tsf;
3108 				delay_ppdu->ppdu_end_timestamp = end_tsf;
3109 			}
3110 			ppdu_desc->user[i].peer_last_delayed_ba =
3111 				peer->last_delayed_ba;
3112 
3113 			dp_peer_unref_del_find_by_id(peer);
3114 
3115 			if (ppdu_desc->user[i].delayed_ba &&
3116 			    !ppdu_desc->user[i].debug_copied) {
3117 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3118 					  QDF_TRACE_LEVEL_INFO_MED,
3119 					  "%s: %d ppdu_id[%d] bar_ppdu_id[%d] num_users[%d] usr[%d] htt_frame_type[%d]\n",
3120 					  __func__, __LINE__,
3121 					  ppdu_desc->ppdu_id,
3122 					  ppdu_desc->bar_ppdu_id,
3123 					  ppdu_desc->num_users,
3124 					  i,
3125 					  ppdu_desc->htt_frame_type);
3126 			}
3127 		}
3128 	}
3129 
3130 	/*
3131 	 * when frame type is BAR and STATS_COMMON_TLV is set
3132 	 * copy the store peer delayed info to BAR status
3133 	 */
3134 	if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
3135 		for (i = 0; i < ppdu_desc->bar_num_users; i++) {
3136 			struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu;
3137 			uint64_t start_tsf;
3138 			uint64_t end_tsf;
3139 
3140 			peer = dp_peer_find_by_id(pdev->soc,
3141 						  ppdu_desc->user[i].peer_id);
3142 			/**
3143 			 * This check is to make sure peer is not deleted
3144 			 * after processing the TLVs.
3145 			 */
3146 			if (!peer)
3147 				continue;
3148 
3149 			if (ppdu_desc->user[i].completion_status !=
3150 			    HTT_PPDU_STATS_USER_STATUS_OK) {
3151 				dp_peer_unref_del_find_by_id(peer);
3152 				continue;
3153 			}
3154 
3155 			delay_ppdu = &peer->delayed_ba_ppdu_stats;
3156 			start_tsf = delay_ppdu->ppdu_start_timestamp;
3157 			end_tsf = delay_ppdu->ppdu_end_timestamp;
3158 
3159 			if (peer->last_delayed_ba) {
3160 				dp_peer_copy_stats_to_bar(peer,
3161 							  &ppdu_desc->user[i]);
3162 				ppdu_desc->ppdu_id =
3163 					peer->last_delayed_ba_ppduid;
3164 				ppdu_desc->ppdu_start_timestamp = start_tsf;
3165 				ppdu_desc->ppdu_end_timestamp = end_tsf;
3166 			}
3167 			ppdu_desc->user[i].peer_last_delayed_ba =
3168 				peer->last_delayed_ba;
3169 			dp_peer_unref_del_find_by_id(peer);
3170 		}
3171 	}
3172 
3173 	TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
3174 	pdev->list_depth--;
3175 	TAILQ_INSERT_TAIL(&pdev->sched_comp_ppdu_list, ppdu_info,
3176 			  ppdu_info_list_elem);
3177 	pdev->sched_comp_list_depth++;
3178 }
3179 
3180 #ifndef WLAN_TX_PKT_CAPTURE_ENH
3181 /*
3182  * dp_deliver_mgmt_frm: Process
3183  * @pdev: DP PDEV handle
3184  * @nbuf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
3185  *
3186  * return: void
3187  */
3188 void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
3189 {
3190 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
3191 		dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc,
3192 				     nbuf, HTT_INVALID_PEER,
3193 				     WDI_NO_VAL, pdev->pdev_id);
3194 	} else {
3195 		if (!pdev->bpr_enable)
3196 			qdf_nbuf_free(nbuf);
3197 	}
3198 }
3199 #endif
3200 
3201 /*
3202  * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv: Process
3203  * htt_ppdu_stats_tx_mgmtctrl_payload_tlv
3204  * @pdev: DP PDEV handle
3205  * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
3206  * @length: tlv_length
3207  *
3208  * return:QDF_STATUS_SUCCESS if nbuf as to be freed in caller
3209  */
3210 static QDF_STATUS
3211 dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev,
3212 					      qdf_nbuf_t tag_buf,
3213 					      uint32_t ppdu_id)
3214 {
3215 	uint32_t *nbuf_ptr;
3216 	uint8_t trim_size;
3217 	size_t head_size;
3218 	struct cdp_tx_mgmt_comp_info *ptr_mgmt_comp_info;
3219 	uint32_t *msg_word;
3220 	uint32_t tsf_hdr;
3221 
3222 	if ((!pdev->tx_sniffer_enable) && (!pdev->mcopy_mode) &&
3223 	    (!pdev->bpr_enable) && (!pdev->tx_capture_enabled))
3224 		return QDF_STATUS_SUCCESS;
3225 
3226 	/*
3227 	 * get timestamp from htt_t2h_ppdu_stats_ind_hdr_t
3228 	 */
3229 	msg_word = (uint32_t *)qdf_nbuf_data(tag_buf);
3230 	msg_word = msg_word + 2;
3231 	tsf_hdr = *msg_word;
3232 
3233 	trim_size = ((pdev->mgmtctrl_frm_info.mgmt_buf +
3234 		      HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) -
3235 		      qdf_nbuf_data(tag_buf));
3236 
3237 	if (!qdf_nbuf_pull_head(tag_buf, trim_size))
3238 		return QDF_STATUS_SUCCESS;
3239 
3240 	qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) -
3241 			    pdev->mgmtctrl_frm_info.mgmt_buf_len);
3242 
3243 	if (pdev->tx_capture_enabled) {
3244 		head_size = sizeof(struct cdp_tx_mgmt_comp_info);
3245 		if (qdf_unlikely(qdf_nbuf_headroom(tag_buf) < head_size)) {
3246 			qdf_err("Fail to get headroom h_sz %d h_avail %d\n",
3247 				head_size, qdf_nbuf_headroom(tag_buf));
3248 			qdf_assert_always(0);
3249 			return QDF_STATUS_E_NOMEM;
3250 		}
3251 		ptr_mgmt_comp_info = (struct cdp_tx_mgmt_comp_info *)
3252 					qdf_nbuf_push_head(tag_buf, head_size);
3253 		qdf_assert_always(ptr_mgmt_comp_info);
3254 		ptr_mgmt_comp_info->ppdu_id = ppdu_id;
3255 		ptr_mgmt_comp_info->is_sgen_pkt = true;
3256 		ptr_mgmt_comp_info->tx_tsf = tsf_hdr;
3257 	} else {
3258 		head_size = sizeof(ppdu_id);
3259 		nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(tag_buf, head_size);
3260 		*nbuf_ptr = ppdu_id;
3261 	}
3262 
3263 	if (pdev->bpr_enable) {
3264 		dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc,
3265 				     tag_buf, HTT_INVALID_PEER,
3266 				     WDI_NO_VAL, pdev->pdev_id);
3267 	}
3268 
3269 	dp_deliver_mgmt_frm(pdev, tag_buf);
3270 
3271 	return QDF_STATUS_E_ALREADY;
3272 }
3273 
3274 /**
3275  * dp_validate_fix_ppdu_tlv(): Function to validate the length of PPDU
3276  *
3277  * If the TLV length sent as part of PPDU TLV is less that expected size i.e
3278  * size of corresponding data structure, pad the remaining bytes with zeros
3279  * and continue processing the TLVs
3280  *
3281  * @pdev: DP pdev handle
3282  * @tag_buf: TLV buffer
3283  * @tlv_expected_size: Expected size of Tag
3284  * @tlv_len: TLV length received from FW
3285  *
3286  * Return: Pointer to updated TLV
3287  */
3288 static inline uint32_t *dp_validate_fix_ppdu_tlv(struct dp_pdev *pdev,
3289 						 uint32_t *tag_buf,
3290 						 uint16_t tlv_expected_size,
3291 						 uint16_t tlv_len)
3292 {
3293 	uint32_t *tlv_desc = tag_buf;
3294 
3295 	qdf_assert_always(tlv_len != 0);
3296 
3297 	if (tlv_len < tlv_expected_size) {
3298 		qdf_mem_zero(pdev->ppdu_tlv_buf, tlv_expected_size);
3299 		qdf_mem_copy(pdev->ppdu_tlv_buf, tag_buf, tlv_len);
3300 		tlv_desc = pdev->ppdu_tlv_buf;
3301 	}
3302 
3303 	return tlv_desc;
3304 }
3305 
3306 /**
3307  * dp_process_ppdu_tag(): Function to process the PPDU TLVs
3308  * @pdev: DP pdev handle
3309  * @tag_buf: TLV buffer
3310  * @tlv_len: length of tlv
3311  * @ppdu_info: per ppdu tlv structure
3312  *
3313  * return: void
3314  */
3315 static void dp_process_ppdu_tag(struct dp_pdev *pdev, uint32_t *tag_buf,
3316 		uint32_t tlv_len, struct ppdu_info *ppdu_info)
3317 {
3318 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
3319 	uint16_t tlv_expected_size;
3320 	uint32_t *tlv_desc;
3321 
3322 	switch (tlv_type) {
3323 	case HTT_PPDU_STATS_COMMON_TLV:
3324 		tlv_expected_size = sizeof(htt_ppdu_stats_common_tlv);
3325 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3326 						    tlv_expected_size, tlv_len);
3327 		dp_process_ppdu_stats_common_tlv(pdev, tlv_desc, ppdu_info);
3328 		break;
3329 	case HTT_PPDU_STATS_USR_COMMON_TLV:
3330 		tlv_expected_size = sizeof(htt_ppdu_stats_user_common_tlv);
3331 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3332 						    tlv_expected_size, tlv_len);
3333 		dp_process_ppdu_stats_user_common_tlv(pdev, tlv_desc,
3334 						      ppdu_info);
3335 		break;
3336 	case HTT_PPDU_STATS_USR_RATE_TLV:
3337 		tlv_expected_size = sizeof(htt_ppdu_stats_user_rate_tlv);
3338 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3339 						    tlv_expected_size, tlv_len);
3340 		dp_process_ppdu_stats_user_rate_tlv(pdev, tlv_desc,
3341 						    ppdu_info);
3342 		break;
3343 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV:
3344 		tlv_expected_size =
3345 			sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv);
3346 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3347 						    tlv_expected_size, tlv_len);
3348 		dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
3349 				pdev, tlv_desc, ppdu_info);
3350 		break;
3351 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV:
3352 		tlv_expected_size =
3353 			sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv);
3354 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3355 						    tlv_expected_size, tlv_len);
3356 		dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
3357 				pdev, tlv_desc, ppdu_info);
3358 		break;
3359 	case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV:
3360 		tlv_expected_size =
3361 			sizeof(htt_ppdu_stats_user_cmpltn_common_tlv);
3362 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3363 						    tlv_expected_size, tlv_len);
3364 		dp_process_ppdu_stats_user_cmpltn_common_tlv(
3365 				pdev, tlv_desc, ppdu_info);
3366 		break;
3367 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV:
3368 		tlv_expected_size =
3369 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv);
3370 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3371 						    tlv_expected_size, tlv_len);
3372 		dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
3373 				pdev, tlv_desc, ppdu_info);
3374 		break;
3375 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV:
3376 		tlv_expected_size =
3377 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv);
3378 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3379 						    tlv_expected_size, tlv_len);
3380 		dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
3381 				pdev, tlv_desc, ppdu_info);
3382 		break;
3383 	case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV:
3384 		tlv_expected_size =
3385 			sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv);
3386 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3387 						    tlv_expected_size, tlv_len);
3388 		dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
3389 				pdev, tlv_desc, ppdu_info);
3390 		break;
3391 	case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV:
3392 		tlv_expected_size =
3393 			sizeof(htt_ppdu_stats_usr_common_array_tlv_v);
3394 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3395 						    tlv_expected_size, tlv_len);
3396 		dp_process_ppdu_stats_user_common_array_tlv(
3397 				pdev, tlv_desc, ppdu_info);
3398 		break;
3399 	case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV:
3400 		tlv_expected_size = sizeof(htt_ppdu_stats_flush_tlv);
3401 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3402 						    tlv_expected_size, tlv_len);
3403 		dp_process_ppdu_stats_user_compltn_flush_tlv(pdev, tlv_desc,
3404 							     ppdu_info);
3405 		break;
3406 	case HTT_PPDU_STATS_SCH_CMD_STATUS_TLV:
3407 		dp_process_ppdu_stats_sch_cmd_status_tlv(pdev, ppdu_info);
3408 		break;
3409 	default:
3410 		break;
3411 	}
3412 }
3413 
3414 #ifdef WLAN_ATF_ENABLE
3415 static void
3416 dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev,
3417 				     struct cdp_tx_completion_ppdu *ppdu_desc,
3418 				     struct cdp_tx_completion_ppdu_user *user)
3419 {
3420 	uint32_t nss_ru_width_sum = 0;
3421 
3422 	if (!pdev || !ppdu_desc || !user)
3423 		return;
3424 
3425 	if (!pdev->dp_atf_stats_enable)
3426 		return;
3427 
3428 	if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_DATA)
3429 		return;
3430 
3431 	nss_ru_width_sum = ppdu_desc->usr_nss_sum * ppdu_desc->usr_ru_tones_sum;
3432 	if (!nss_ru_width_sum)
3433 		nss_ru_width_sum = 1;
3434 
3435 	/**
3436 	 * For SU-MIMO PPDU phy Tx time is same for the single user.
3437 	 * For MU-MIMO phy Tx time is calculated per user as below
3438 	 *     user phy tx time =
3439 	 *           Entire PPDU duration * MU Ratio * OFDMA Ratio
3440 	 *     MU Ratio = usr_nss / Sum_of_nss_of_all_users
3441 	 *     OFDMA_ratio = usr_ru_width / Sum_of_ru_width_of_all_users
3442 	 *     usr_ru_widt = ru_end – ru_start + 1
3443 	 */
3444 	if (ppdu_desc->htt_frame_type == HTT_STATS_FTYPE_TIDQ_DATA_SU) {
3445 		user->phy_tx_time_us = ppdu_desc->phy_ppdu_tx_time_us;
3446 	} else {
3447 		user->phy_tx_time_us = (ppdu_desc->phy_ppdu_tx_time_us *
3448 				user->nss * user->ru_tones) / nss_ru_width_sum;
3449 	}
3450 }
3451 #else
3452 static void
3453 dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev,
3454 				     struct cdp_tx_completion_ppdu *ppdu_desc,
3455 				     struct cdp_tx_completion_ppdu_user *user)
3456 {
3457 }
3458 #endif
3459 
3460 /**
3461  * dp_ppdu_desc_user_stats_update(): Function to update TX user stats
3462  * @pdev: DP pdev handle
3463  * @ppdu_info: per PPDU TLV descriptor
3464  *
3465  * return: void
3466  */
3467 void
3468 dp_ppdu_desc_user_stats_update(struct dp_pdev *pdev,
3469 			       struct ppdu_info *ppdu_info)
3470 {
3471 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3472 	struct dp_peer *peer = NULL;
3473 	uint32_t tlv_bitmap_expected;
3474 	uint32_t tlv_bitmap_default;
3475 	uint16_t i;
3476 	uint32_t num_users;
3477 
3478 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
3479 		qdf_nbuf_data(ppdu_info->nbuf);
3480 
3481 	if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_BAR)
3482 		ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
3483 
3484 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
3485 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode ||
3486 	    pdev->tx_capture_enabled) {
3487 		if (ppdu_info->is_ampdu)
3488 			tlv_bitmap_expected =
3489 				dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
3490 					ppdu_info->tlv_bitmap);
3491 	}
3492 
3493 	tlv_bitmap_default = tlv_bitmap_expected;
3494 
3495 	if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
3496 		num_users = ppdu_desc->bar_num_users;
3497 		ppdu_desc->num_users = ppdu_desc->bar_num_users;
3498 	} else {
3499 		num_users = ppdu_desc->num_users;
3500 	}
3501 	qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
3502 
3503 	for (i = 0; i < num_users; i++) {
3504 		ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu;
3505 		ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu;
3506 
3507 		peer = dp_peer_find_hash_find(pdev->soc,
3508 					      ppdu_desc->user[i].mac_addr,
3509 					      0, ppdu_desc->vdev_id);
3510 		/**
3511 		 * This check is to make sure peer is not deleted
3512 		 * after processing the TLVs.
3513 		 */
3514 		if (!peer)
3515 			continue;
3516 
3517 		ppdu_desc->user[i].cookie = (void *)peer->wlanstats_ctx;
3518 
3519 		/*
3520 		 * different frame like DATA, BAR or CTRL has different
3521 		 * tlv bitmap expected. Apart from ACK_BA_STATUS TLV, we
3522 		 * receive other tlv in-order/sequential from fw.
3523 		 * Since ACK_BA_STATUS TLV come from Hardware it is
3524 		 * asynchronous So we need to depend on some tlv to confirm
3525 		 * all tlv is received for a ppdu.
3526 		 * So we depend on both SCHED_CMD_STATUS_TLV and
3527 		 * ACK_BA_STATUS_TLV. for failure packet we won't get
3528 		 * ACK_BA_STATUS_TLV.
3529 		 */
3530 		if (!(ppdu_info->tlv_bitmap &
3531 		      (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)) ||
3532 		    (!(ppdu_info->tlv_bitmap &
3533 		       (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)) &&
3534 		     (ppdu_desc->user[i].completion_status ==
3535 		      HTT_PPDU_STATS_USER_STATUS_OK))) {
3536 			dp_peer_unref_delete(peer);
3537 			continue;
3538 		}
3539 
3540 		/**
3541 		 * Update tx stats for data frames having Qos as well as
3542 		 * non-Qos data tid
3543 		 */
3544 
3545 		if ((ppdu_desc->user[i].tid < CDP_DATA_TID_MAX ||
3546 		     (ppdu_desc->user[i].tid == CDP_DATA_NON_QOS_TID) ||
3547 		     (ppdu_desc->htt_frame_type ==
3548 		      HTT_STATS_FTYPE_SGEN_QOS_NULL) ||
3549 		     ((ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) &&
3550 		      (ppdu_desc->num_mpdu > 1))) &&
3551 		      (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL)) {
3552 
3553 			dp_tx_stats_update(pdev, peer,
3554 					   &ppdu_desc->user[i],
3555 					   ppdu_desc->ack_rssi);
3556 			dp_tx_rate_stats_update(peer, &ppdu_desc->user[i]);
3557 		}
3558 
3559 		dp_ppdu_desc_user_phy_tx_time_update(pdev, ppdu_desc,
3560 						     &ppdu_desc->user[i]);
3561 
3562 		dp_peer_unref_delete(peer);
3563 		tlv_bitmap_expected = tlv_bitmap_default;
3564 	}
3565 }
3566 
3567 #ifndef WLAN_TX_PKT_CAPTURE_ENH
3568 
3569 /**
3570  * dp_ppdu_desc_deliver(): Function to deliver Tx PPDU status descriptor
3571  * to upper layer
3572  * @pdev: DP pdev handle
3573  * @ppdu_info: per PPDU TLV descriptor
3574  *
3575  * return: void
3576  */
3577 static
3578 void dp_ppdu_desc_deliver(struct dp_pdev *pdev,
3579 			  struct ppdu_info *ppdu_info)
3580 {
3581 	struct ppdu_info *s_ppdu_info = NULL;
3582 	struct ppdu_info *ppdu_info_next = NULL;
3583 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3584 	qdf_nbuf_t nbuf;
3585 	uint32_t time_delta = 0;
3586 	bool starved = 0;
3587 	bool matched = 0;
3588 	bool recv_ack_ba_done = 0;
3589 
3590 	if (ppdu_info->tlv_bitmap &
3591 	    (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) &&
3592 	    ppdu_info->done)
3593 		recv_ack_ba_done = 1;
3594 
3595 	pdev->last_sched_cmdid = ppdu_info->sched_cmdid;
3596 
3597 	s_ppdu_info = TAILQ_FIRST(&pdev->sched_comp_ppdu_list);
3598 
3599 	TAILQ_FOREACH_SAFE(s_ppdu_info, &pdev->sched_comp_ppdu_list,
3600 			   ppdu_info_list_elem, ppdu_info_next) {
3601 		if (s_ppdu_info->tsf_l32 > ppdu_info->tsf_l32)
3602 			time_delta = (MAX_TSF_32 - s_ppdu_info->tsf_l32) +
3603 					ppdu_info->tsf_l32;
3604 		else
3605 			time_delta = ppdu_info->tsf_l32 - s_ppdu_info->tsf_l32;
3606 
3607 		if (!s_ppdu_info->done && !recv_ack_ba_done) {
3608 			if (time_delta < MAX_SCHED_STARVE) {
3609 				qdf_err("pdev[%d] ppdu_id[%d] sched_cmdid[%d] TLV_B[0x%x] TSF[%u] D[%d]",
3610 					pdev->pdev_id,
3611 					s_ppdu_info->ppdu_id,
3612 					s_ppdu_info->sched_cmdid,
3613 					s_ppdu_info->tlv_bitmap,
3614 					s_ppdu_info->tsf_l32,
3615 					s_ppdu_info->done);
3616 				break;
3617 			}
3618 			starved = 1;
3619 		}
3620 
3621 		pdev->delivered_sched_cmdid = s_ppdu_info->sched_cmdid;
3622 		TAILQ_REMOVE(&pdev->sched_comp_ppdu_list, s_ppdu_info,
3623 			     ppdu_info_list_elem);
3624 		pdev->sched_comp_list_depth--;
3625 
3626 		nbuf = s_ppdu_info->nbuf;
3627 		qdf_assert_always(nbuf);
3628 		ppdu_desc = (struct cdp_tx_completion_ppdu *)
3629 				qdf_nbuf_data(nbuf);
3630 		ppdu_desc->tlv_bitmap = s_ppdu_info->tlv_bitmap;
3631 
3632 		if (starved) {
3633 			qdf_err("ppdu starved fc[0x%x] h_ftype[%d] tlv_bitmap[0x%x] cs[%d]\n",
3634 				ppdu_desc->frame_ctrl,
3635 				ppdu_desc->htt_frame_type,
3636 				ppdu_desc->tlv_bitmap,
3637 				ppdu_desc->user[0].completion_status);
3638 			starved = 0;
3639 		}
3640 
3641 		if (ppdu_info->ppdu_id == s_ppdu_info->ppdu_id &&
3642 		    ppdu_info->sched_cmdid == s_ppdu_info->sched_cmdid)
3643 			matched = 1;
3644 
3645 		dp_ppdu_desc_user_stats_update(pdev, s_ppdu_info);
3646 
3647 		qdf_mem_free(s_ppdu_info);
3648 
3649 		/**
3650 		 * Deliver PPDU stats only for valid (acked) data
3651 		 * frames if sniffer mode is not enabled.
3652 		 * If sniffer mode is enabled, PPDU stats
3653 		 * for all frames including mgmt/control
3654 		 * frames should be delivered to upper layer
3655 		 */
3656 		if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
3657 			dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
3658 					     pdev->soc,
3659 					     nbuf, HTT_INVALID_PEER,
3660 					     WDI_NO_VAL,
3661 					     pdev->pdev_id);
3662 		} else {
3663 			if (ppdu_desc->num_mpdu != 0 &&
3664 			    ppdu_desc->num_users != 0 &&
3665 			    ppdu_desc->frame_ctrl &
3666 			    HTT_FRAMECTRL_DATATYPE) {
3667 				dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
3668 						     pdev->soc,
3669 						     nbuf, HTT_INVALID_PEER,
3670 						     WDI_NO_VAL,
3671 						     pdev->pdev_id);
3672 			} else {
3673 				qdf_nbuf_free(nbuf);
3674 			}
3675 		}
3676 
3677 		if (matched)
3678 			break;
3679 	}
3680 	return;
3681 }
3682 
3683 #endif
3684 
3685 /**
3686  * dp_get_ppdu_desc(): Function to allocate new PPDU status
3687  * desc for new ppdu id
3688  * @pdev: DP pdev handle
3689  * @ppdu_id: PPDU unique identifier
3690  * @tlv_type: TLV type received
3691  * @tsf_l32: timestamp received along with ppdu stats indication header
3692  * @max_users: Maximum user for that particular ppdu
3693  *
3694  * return: ppdu_info per ppdu tlv structure
3695  */
3696 static
3697 struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id,
3698 				   uint8_t tlv_type, uint32_t tsf_l32,
3699 				   uint8_t max_users)
3700 {
3701 	struct ppdu_info *ppdu_info = NULL;
3702 	struct ppdu_info *s_ppdu_info = NULL;
3703 	struct ppdu_info *ppdu_info_next = NULL;
3704 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3705 	uint32_t size = 0;
3706 	struct cdp_tx_completion_ppdu *tmp_ppdu_desc = NULL;
3707 	struct cdp_tx_completion_ppdu_user *tmp_user;
3708 	uint32_t time_delta;
3709 
3710 	/*
3711 	 * Find ppdu_id node exists or not
3712 	 */
3713 	TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
3714 			   ppdu_info_list_elem, ppdu_info_next) {
3715 		if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) {
3716 			if (ppdu_info->tsf_l32 > tsf_l32)
3717 				time_delta  = (MAX_TSF_32 -
3718 					       ppdu_info->tsf_l32) + tsf_l32;
3719 			else
3720 				time_delta  = tsf_l32 - ppdu_info->tsf_l32;
3721 
3722 			if (time_delta > WRAP_DROP_TSF_DELTA) {
3723 				TAILQ_REMOVE(&pdev->ppdu_info_list,
3724 					     ppdu_info, ppdu_info_list_elem);
3725 				pdev->list_depth--;
3726 				pdev->stats.ppdu_wrap_drop++;
3727 				tmp_ppdu_desc =
3728 					(struct cdp_tx_completion_ppdu *)
3729 					qdf_nbuf_data(ppdu_info->nbuf);
3730 				tmp_user = &tmp_ppdu_desc->user[0];
3731 				QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
3732 					  QDF_TRACE_LEVEL_INFO_MED,
3733 					  "S_PID [%d] S_TSF[%u] TLV_BITMAP[0x%x] [CMPLTN - %d ACK_BA - %d] CS[%d] - R_PID[%d] R_TSF[%u] R_TLV_TAG[0x%x]\n",
3734 					  ppdu_info->ppdu_id,
3735 					  ppdu_info->tsf_l32,
3736 					  ppdu_info->tlv_bitmap,
3737 					  tmp_user->completion_status,
3738 					  ppdu_info->compltn_common_tlv,
3739 					  ppdu_info->ack_ba_tlv,
3740 					  ppdu_id, tsf_l32, tlv_type);
3741 				qdf_nbuf_free(ppdu_info->nbuf);
3742 				ppdu_info->nbuf = NULL;
3743 				qdf_mem_free(ppdu_info);
3744 			} else {
3745 				break;
3746 			}
3747 		}
3748 	}
3749 
3750 	/*
3751 	 * check if it is ack ba tlv and if it is not there in ppdu info
3752 	 * list then check it in sched completion ppdu list
3753 	 */
3754 	if (!ppdu_info &&
3755 	    tlv_type == HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) {
3756 		TAILQ_FOREACH(s_ppdu_info,
3757 			      &pdev->sched_comp_ppdu_list,
3758 			      ppdu_info_list_elem) {
3759 			if (s_ppdu_info && (s_ppdu_info->ppdu_id == ppdu_id)) {
3760 				if (s_ppdu_info->tsf_l32 > tsf_l32)
3761 					time_delta  = (MAX_TSF_32 -
3762 						       s_ppdu_info->tsf_l32) +
3763 							tsf_l32;
3764 				else
3765 					time_delta  = tsf_l32 -
3766 						s_ppdu_info->tsf_l32;
3767 				if (time_delta < WRAP_DROP_TSF_DELTA) {
3768 					ppdu_info = s_ppdu_info;
3769 					break;
3770 				}
3771 			} else {
3772 				/*
3773 				 * ACK BA STATUS TLV comes sequential order
3774 				 * if we received ack ba status tlv for second
3775 				 * ppdu and first ppdu is still waiting for
3776 				 * ACK BA STATUS TLV. Based on fw comment
3777 				 * we won't receive it tlv later. So we can
3778 				 * set ppdu info done.
3779 				 */
3780 				if (s_ppdu_info)
3781 					s_ppdu_info->done = 1;
3782 			}
3783 		}
3784 	}
3785 
3786 	if (ppdu_info) {
3787 		if (ppdu_info->tlv_bitmap & (1 << tlv_type)) {
3788 			/**
3789 			 * if we get tlv_type that is already been processed
3790 			 * for ppdu, that means we got a new ppdu with same
3791 			 * ppdu id. Hence Flush the older ppdu
3792 			 * for MUMIMO and OFDMA, In a PPDU we have
3793 			 * multiple user with same tlv types. tlv bitmap is
3794 			 * used to check whether SU or MU_MIMO/OFDMA
3795 			 */
3796 			if (!(ppdu_info->tlv_bitmap &
3797 			    (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)))
3798 				return ppdu_info;
3799 
3800 			ppdu_desc = (struct cdp_tx_completion_ppdu *)
3801 				qdf_nbuf_data(ppdu_info->nbuf);
3802 
3803 			/**
3804 			 * apart from ACK BA STATUS TLV rest all comes in order
3805 			 * so if tlv type not ACK BA STATUS TLV we can deliver
3806 			 * ppdu_info
3807 			 */
3808 			if ((tlv_type ==
3809 			     HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) &&
3810 			    (ppdu_desc->htt_frame_type ==
3811 			     HTT_STATS_FTYPE_SGEN_MU_BAR))
3812 				return ppdu_info;
3813 
3814 			dp_ppdu_desc_deliver(pdev, ppdu_info);
3815 		} else {
3816 			return ppdu_info;
3817 		}
3818 	}
3819 
3820 	/**
3821 	 * Flush the head ppdu descriptor if ppdu desc list reaches max
3822 	 * threshold
3823 	 */
3824 	if (pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
3825 		ppdu_info = TAILQ_FIRST(&pdev->ppdu_info_list);
3826 		TAILQ_REMOVE(&pdev->ppdu_info_list,
3827 			     ppdu_info, ppdu_info_list_elem);
3828 		pdev->list_depth--;
3829 		pdev->stats.ppdu_drop++;
3830 		qdf_nbuf_free(ppdu_info->nbuf);
3831 		ppdu_info->nbuf = NULL;
3832 		qdf_mem_free(ppdu_info);
3833 	}
3834 
3835 	size = sizeof(struct cdp_tx_completion_ppdu) +
3836 			(max_users * sizeof(struct cdp_tx_completion_ppdu_user));
3837 
3838 	/*
3839 	 * Allocate new ppdu_info node
3840 	 */
3841 	ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info));
3842 	if (!ppdu_info)
3843 		return NULL;
3844 
3845 	ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev, size,
3846 									 0, 4, TRUE);
3847 	if (!ppdu_info->nbuf) {
3848 		qdf_mem_free(ppdu_info);
3849 		return NULL;
3850 	}
3851 
3852 	ppdu_info->ppdu_desc =
3853 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
3854 	qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf), size);
3855 
3856 	if (qdf_nbuf_put_tail(ppdu_info->nbuf, size) == NULL) {
3857 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3858 				"No tailroom for HTT PPDU");
3859 		qdf_nbuf_free(ppdu_info->nbuf);
3860 		ppdu_info->nbuf = NULL;
3861 		ppdu_info->last_user = 0;
3862 		qdf_mem_free(ppdu_info);
3863 		return NULL;
3864 	}
3865 
3866 	ppdu_info->ppdu_desc->max_users = max_users;
3867 	ppdu_info->tsf_l32 = tsf_l32;
3868 	/**
3869 	 * No lock is needed because all PPDU TLVs are processed in
3870 	 * same context and this list is updated in same context
3871 	 */
3872 	TAILQ_INSERT_TAIL(&pdev->ppdu_info_list, ppdu_info,
3873 			ppdu_info_list_elem);
3874 	pdev->list_depth++;
3875 	return ppdu_info;
3876 }
3877 
3878 /**
3879  * dp_htt_process_tlv(): Function to process each PPDU TLVs
3880  * @pdev: DP pdev handle
3881  * @htt_t2h_msg: HTT target to host message
3882  *
3883  * return: ppdu_info per ppdu tlv structure
3884  */
3885 
3886 static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev,
3887 		qdf_nbuf_t htt_t2h_msg)
3888 {
3889 	uint32_t length;
3890 	uint32_t ppdu_id;
3891 	uint8_t tlv_type;
3892 	uint32_t tlv_length, tlv_bitmap_expected;
3893 	uint8_t *tlv_buf;
3894 	struct ppdu_info *ppdu_info = NULL;
3895 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3896 	uint8_t max_users = CDP_MU_MAX_USERS;
3897 	uint32_t tsf_l32;
3898 
3899 	uint32_t *msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
3900 
3901 	length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word);
3902 
3903 	msg_word = msg_word + 1;
3904 	ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word);
3905 
3906 	msg_word = msg_word + 1;
3907 	tsf_l32 = (uint32_t)(*msg_word);
3908 
3909 	msg_word = msg_word + 2;
3910 	while (length > 0) {
3911 		tlv_buf = (uint8_t *)msg_word;
3912 		tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
3913 		tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
3914 		if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG))
3915 			pdev->stats.ppdu_stats_counter[tlv_type]++;
3916 
3917 		if (tlv_length == 0)
3918 			break;
3919 
3920 		tlv_length += HTT_TLV_HDR_LEN;
3921 
3922 		/**
3923 		 * Not allocating separate ppdu descriptor for MGMT Payload
3924 		 * TLV as this is sent as separate WDI indication and it
3925 		 * doesn't contain any ppdu information
3926 		 */
3927 		if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) {
3928 			pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf;
3929 			pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id;
3930 			pdev->mgmtctrl_frm_info.mgmt_buf_len =
3931 				HTT_PPDU_STATS_TX_MGMTCTRL_TLV_FRAME_LENGTH_GET
3932 						(*(msg_word + 1));
3933 			msg_word =
3934 				(uint32_t *)((uint8_t *)tlv_buf + tlv_length);
3935 			length -= (tlv_length);
3936 			continue;
3937 		}
3938 
3939 		/*
3940 		 * retrieve max_users if it's USERS_INFO,
3941 		 * else, it's 1 for COMPLTN_FLUSH,
3942 		 * else, use CDP_MU_MAX_USERS
3943 		 */
3944 		if (tlv_type == HTT_PPDU_STATS_USERS_INFO_TLV) {
3945 			max_users =
3946 				HTT_PPDU_STATS_USERS_INFO_TLV_MAX_USERS_GET(*(msg_word + 1));
3947 		} else if (tlv_type == HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) {
3948 			max_users = 1;
3949 		}
3950 
3951 		ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type,
3952 					     tsf_l32, max_users);
3953 		if (!ppdu_info)
3954 			return NULL;
3955 
3956 		ppdu_info->ppdu_desc->bss_color =
3957 			pdev->rx_mon_recv_status.bsscolor;
3958 
3959 		ppdu_info->ppdu_id = ppdu_id;
3960 		ppdu_info->tlv_bitmap |= (1 << tlv_type);
3961 
3962 		dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info);
3963 
3964 		/**
3965 		 * Increment pdev level tlv count to monitor
3966 		 * missing TLVs
3967 		 */
3968 		pdev->tlv_count++;
3969 		ppdu_info->last_tlv_cnt = pdev->tlv_count;
3970 		msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
3971 		length -= (tlv_length);
3972 	}
3973 
3974 	if (!ppdu_info)
3975 		return NULL;
3976 
3977 	pdev->last_ppdu_id = ppdu_id;
3978 
3979 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
3980 
3981 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode ||
3982 	    pdev->tx_capture_enabled) {
3983 		if (ppdu_info->is_ampdu)
3984 			tlv_bitmap_expected =
3985 				dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
3986 					ppdu_info->tlv_bitmap);
3987 	}
3988 
3989 	ppdu_desc = ppdu_info->ppdu_desc;
3990 
3991 	if (!ppdu_desc)
3992 		return NULL;
3993 
3994 	if (ppdu_desc->user[ppdu_desc->last_usr_index].completion_status !=
3995 	    HTT_PPDU_STATS_USER_STATUS_OK) {
3996 		tlv_bitmap_expected = tlv_bitmap_expected & 0xFF;
3997 	}
3998 
3999 	/*
4000 	 * for frame type DATA and BAR, we update stats based on MSDU,
4001 	 * successful msdu and mpdu are populate from ACK BA STATUS TLV
4002 	 * which comes out of order. successful mpdu also populated from
4003 	 * COMPLTN COMMON TLV which comes in order. for every ppdu_info
4004 	 * we store successful mpdu from both tlv and compare before delivering
4005 	 * to make sure we received ACK BA STATUS TLV. For some self generated
4006 	 * frame we won't get ack ba status tlv so no need to wait for
4007 	 * ack ba status tlv.
4008 	 */
4009 	if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL &&
4010 	    ppdu_desc->htt_frame_type != HTT_STATS_FTYPE_SGEN_QOS_NULL) {
4011 		/*
4012 		 * most of the time bar frame will have duplicate ack ba
4013 		 * status tlv
4014 		 */
4015 		if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR &&
4016 		    (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv))
4017 			return NULL;
4018 		/*
4019 		 * For data frame, compltn common tlv should match ack ba status
4020 		 * tlv and completion status. Reason we are checking first user
4021 		 * for ofdma, completion seen at next MU BAR frm, for mimo
4022 		 * only for first user completion will be immediate.
4023 		 */
4024 		if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA &&
4025 		    (ppdu_desc->user[0].completion_status == 0 &&
4026 		     (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv)))
4027 			return NULL;
4028 	}
4029 
4030 	/**
4031 	 * Once all the TLVs for a given PPDU has been processed,
4032 	 * return PPDU status to be delivered to higher layer.
4033 	 * tlv_bitmap_expected can't be available for different frame type.
4034 	 * But SCHED CMD STATS TLV is the last TLV from the FW for a ppdu.
4035 	 * apart from ACK BA TLV, FW sends other TLV in sequential order.
4036 	 * flush tlv comes separate.
4037 	 */
4038 	if ((ppdu_info->tlv_bitmap != 0 &&
4039 	     (ppdu_info->tlv_bitmap &
4040 	      (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV))) ||
4041 	    (ppdu_info->tlv_bitmap &
4042 	     (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV))) {
4043 		ppdu_info->done = 1;
4044 		return ppdu_info;
4045 	}
4046 
4047 	return NULL;
4048 }
4049 #endif /* FEATURE_PERPKT_INFO */
4050 
4051 /**
4052  * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW
4053  * @soc: DP SOC handle
4054  * @pdev_id: pdev id
4055  * @htt_t2h_msg: HTT message nbuf
4056  *
4057  * return:void
4058  */
4059 #if defined(WDI_EVENT_ENABLE)
4060 #ifdef FEATURE_PERPKT_INFO
4061 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
4062 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
4063 {
4064 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
4065 	struct ppdu_info *ppdu_info = NULL;
4066 	bool free_buf = true;
4067 
4068 	if (pdev_id >= MAX_PDEV_CNT)
4069 		return true;
4070 
4071 	pdev = soc->pdev_list[pdev_id];
4072 	if (!pdev)
4073 		return true;
4074 
4075 	if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
4076 	    !pdev->mcopy_mode && !pdev->bpr_enable)
4077 		return free_buf;
4078 
4079 	ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg);
4080 
4081 	if (pdev->mgmtctrl_frm_info.mgmt_buf) {
4082 		if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv
4083 		    (pdev, htt_t2h_msg, pdev->mgmtctrl_frm_info.ppdu_id) !=
4084 		    QDF_STATUS_SUCCESS)
4085 			free_buf = false;
4086 	}
4087 
4088 	if (ppdu_info)
4089 		dp_ppdu_desc_deliver(pdev, ppdu_info);
4090 
4091 	pdev->mgmtctrl_frm_info.mgmt_buf = NULL;
4092 	pdev->mgmtctrl_frm_info.mgmt_buf_len = 0;
4093 	pdev->mgmtctrl_frm_info.ppdu_id = 0;
4094 
4095 	return free_buf;
4096 }
4097 #else
4098 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
4099 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
4100 {
4101 	return true;
4102 }
4103 #endif
4104 #endif
4105 
4106 /**
4107  * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats
4108  * @soc: DP SOC handle
4109  * @htt_t2h_msg: HTT message nbuf
4110  *
4111  * return:void
4112  */
4113 static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc,
4114 		qdf_nbuf_t htt_t2h_msg)
4115 {
4116 	uint8_t done;
4117 	qdf_nbuf_t msg_copy;
4118 	uint32_t *msg_word;
4119 
4120 	msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
4121 	msg_word = msg_word + 3;
4122 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
4123 
4124 	/*
4125 	 * HTT EXT stats response comes as stream of TLVs which span over
4126 	 * multiple T2H messages.
4127 	 * The first message will carry length of the response.
4128 	 * For rest of the messages length will be zero.
4129 	 *
4130 	 * Clone the T2H message buffer and store it in a list to process
4131 	 * it later.
4132 	 *
4133 	 * The original T2H message buffers gets freed in the T2H HTT event
4134 	 * handler
4135 	 */
4136 	msg_copy = qdf_nbuf_clone(htt_t2h_msg);
4137 
4138 	if (!msg_copy) {
4139 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
4140 				"T2H messge clone failed for HTT EXT STATS");
4141 		goto error;
4142 	}
4143 
4144 	qdf_spin_lock_bh(&soc->htt_stats.lock);
4145 	qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy);
4146 	/*
4147 	 * Done bit signifies that this is the last T2H buffer in the stream of
4148 	 * HTT EXT STATS message
4149 	 */
4150 	if (done) {
4151 		soc->htt_stats.num_stats++;
4152 		qdf_sched_work(0, &soc->htt_stats.work);
4153 	}
4154 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
4155 
4156 	return;
4157 
4158 error:
4159 	qdf_spin_lock_bh(&soc->htt_stats.lock);
4160 	while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg))
4161 			!= NULL) {
4162 		qdf_nbuf_free(msg_copy);
4163 	}
4164 	soc->htt_stats.num_stats = 0;
4165 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
4166 	return;
4167 
4168 }
4169 
4170 /*
4171  * htt_soc_attach_target() - SOC level HTT setup
4172  * @htt_soc:	HTT SOC handle
4173  *
4174  * Return: 0 on success; error code on failure
4175  */
4176 int htt_soc_attach_target(struct htt_soc *htt_soc)
4177 {
4178 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
4179 
4180 	return htt_h2t_ver_req_msg(soc);
4181 }
4182 
4183 void htt_set_htc_handle(struct htt_soc *htt_soc, HTC_HANDLE htc_soc)
4184 {
4185 	htt_soc->htc_soc = htc_soc;
4186 }
4187 
4188 HTC_HANDLE htt_get_htc_handle(struct htt_soc *htt_soc)
4189 {
4190 	return htt_soc->htc_soc;
4191 }
4192 
4193 struct htt_soc *htt_soc_attach(struct dp_soc *soc, HTC_HANDLE htc_handle)
4194 {
4195 	int i;
4196 	int j;
4197 	int alloc_size = HTT_SW_UMAC_RING_IDX_MAX * sizeof(unsigned long);
4198 	struct htt_soc *htt_soc = NULL;
4199 
4200 	htt_soc = qdf_mem_malloc(sizeof(*htt_soc));
4201 	if (!htt_soc) {
4202 		dp_err("HTT attach failed");
4203 		return NULL;
4204 	}
4205 
4206 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4207 		htt_soc->pdevid_tt[i].umac_ttt = qdf_mem_malloc(alloc_size);
4208 		if (!htt_soc->pdevid_tt[i].umac_ttt)
4209 			break;
4210 		qdf_mem_set(htt_soc->pdevid_tt[i].umac_ttt, alloc_size, -1);
4211 		htt_soc->pdevid_tt[i].lmac_ttt = qdf_mem_malloc(alloc_size);
4212 		if (!htt_soc->pdevid_tt[i].lmac_ttt) {
4213 			qdf_mem_free(htt_soc->pdevid_tt[i].umac_ttt);
4214 			break;
4215 		}
4216 		qdf_mem_set(htt_soc->pdevid_tt[i].lmac_ttt, alloc_size, -1);
4217 	}
4218 	if (i != MAX_PDEV_CNT) {
4219 		for (j = 0; j < i; j++) {
4220 			qdf_mem_free(htt_soc->pdevid_tt[j].umac_ttt);
4221 			qdf_mem_free(htt_soc->pdevid_tt[j].lmac_ttt);
4222 		}
4223 		qdf_mem_free(htt_soc);
4224 		return NULL;
4225 	}
4226 
4227 	htt_soc->dp_soc = soc;
4228 	htt_soc->htc_soc = htc_handle;
4229 	HTT_TX_MUTEX_INIT(&htt_soc->htt_tx_mutex);
4230 
4231 	return htt_soc;
4232 }
4233 
4234 #if defined(WDI_EVENT_ENABLE) && !defined(REMOVE_PKT_LOG)
4235 /*
4236  * dp_ppdu_stats_ind_handler() - PPDU stats msg handler
4237  * @htt_soc:	 HTT SOC handle
4238  * @msg_word:    Pointer to payload
4239  * @htt_t2h_msg: HTT msg nbuf
4240  *
4241  * Return: True if buffer should be freed by caller.
4242  */
4243 static bool
4244 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
4245 				uint32_t *msg_word,
4246 				qdf_nbuf_t htt_t2h_msg)
4247 {
4248 	u_int8_t pdev_id;
4249 	u_int8_t target_pdev_id;
4250 	bool free_buf;
4251 	qdf_nbuf_set_pktlen(htt_t2h_msg, HTT_T2H_MAX_MSG_SIZE);
4252 	target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
4253 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
4254 							 target_pdev_id);
4255 	dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc,
4256 			     htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL,
4257 			     pdev_id);
4258 	free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id,
4259 					      htt_t2h_msg);
4260 	return free_buf;
4261 }
4262 #else
4263 static bool
4264 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
4265 				uint32_t *msg_word,
4266 				qdf_nbuf_t htt_t2h_msg)
4267 {
4268 	return true;
4269 }
4270 #endif
4271 
4272 #if defined(WDI_EVENT_ENABLE) && \
4273 	!defined(REMOVE_PKT_LOG)
4274 /*
4275  * dp_pktlog_msg_handler() - Pktlog msg handler
4276  * @htt_soc:	 HTT SOC handle
4277  * @msg_word:    Pointer to payload
4278  *
4279  * Return: None
4280  */
4281 static void
4282 dp_pktlog_msg_handler(struct htt_soc *soc,
4283 		      uint32_t *msg_word)
4284 {
4285 	uint8_t pdev_id;
4286 	uint8_t target_pdev_id;
4287 	uint32_t *pl_hdr;
4288 
4289 	target_pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word);
4290 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
4291 							 target_pdev_id);
4292 	pl_hdr = (msg_word + 1);
4293 	dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc,
4294 		pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL,
4295 		pdev_id);
4296 }
4297 #else
4298 static void
4299 dp_pktlog_msg_handler(struct htt_soc *soc,
4300 		      uint32_t *msg_word)
4301 {
4302 }
4303 #endif
4304 
4305 /*
4306  * time_allow_print() - time allow print
4307  * @htt_ring_tt:	ringi_id array of timestamps
4308  * @ring_id:		ring_id (index)
4309  *
4310  * Return: 1 for successfully saving timestamp in array
4311  *	and 0 for timestamp falling within 2 seconds after last one
4312  */
4313 static bool time_allow_print(unsigned long *htt_ring_tt, u_int8_t ring_id)
4314 {
4315 	unsigned long tstamp;
4316 	unsigned long delta;
4317 
4318 	tstamp = qdf_get_system_timestamp();
4319 
4320 	if (!htt_ring_tt)
4321 		return 0; //unable to print backpressure messages
4322 
4323 	if (htt_ring_tt[ring_id] == -1) {
4324 		htt_ring_tt[ring_id] = tstamp;
4325 		return 1;
4326 	}
4327 	delta = tstamp - htt_ring_tt[ring_id];
4328 	if (delta >= 2000) {
4329 		htt_ring_tt[ring_id] = tstamp;
4330 		return 1;
4331 	}
4332 
4333 	return 0;
4334 }
4335 
4336 static void dp_htt_alert_print(enum htt_t2h_msg_type msg_type,
4337 			       u_int8_t pdev_id, u_int8_t ring_id,
4338 			       u_int16_t hp_idx, u_int16_t tp_idx,
4339 			       u_int32_t bkp_time, char *ring_stype)
4340 {
4341 	dp_alert("msg_type: %d pdev_id: %d ring_type: %s ",
4342 		 msg_type, pdev_id, ring_stype);
4343 	dp_alert("ring_id: %d hp_idx: %d tp_idx: %d bkpressure_time_ms: %d ",
4344 		 ring_id, hp_idx, tp_idx, bkp_time);
4345 }
4346 
4347 /*
4348  * dp_htt_bkp_event_alert() - htt backpressure event alert
4349  * @msg_word:	htt packet context
4350  * @htt_soc:	HTT SOC handle
4351  *
4352  * Return: after attempting to print stats
4353  */
4354 static void dp_htt_bkp_event_alert(u_int32_t *msg_word, struct htt_soc *soc)
4355 {
4356 	u_int8_t ring_type;
4357 	u_int8_t pdev_id;
4358 	uint8_t target_pdev_id;
4359 	u_int8_t ring_id;
4360 	u_int16_t hp_idx;
4361 	u_int16_t tp_idx;
4362 	u_int32_t bkp_time;
4363 	enum htt_t2h_msg_type msg_type;
4364 	struct dp_soc *dpsoc;
4365 	struct dp_pdev *pdev;
4366 	struct dp_htt_timestamp *radio_tt;
4367 
4368 	if (!soc)
4369 		return;
4370 
4371 	dpsoc = (struct dp_soc *)soc->dp_soc;
4372 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
4373 	ring_type = HTT_T2H_RX_BKPRESSURE_RING_TYPE_GET(*msg_word);
4374 	target_pdev_id = HTT_T2H_RX_BKPRESSURE_PDEV_ID_GET(*msg_word);
4375 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
4376 							 target_pdev_id);
4377 	if (pdev_id >= MAX_PDEV_CNT) {
4378 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
4379 			  "pdev id %d is invalid", pdev_id);
4380 		return;
4381 	}
4382 
4383 	pdev = (struct dp_pdev *)dpsoc->pdev_list[pdev_id];
4384 	ring_id = HTT_T2H_RX_BKPRESSURE_RINGID_GET(*msg_word);
4385 	hp_idx = HTT_T2H_RX_BKPRESSURE_HEAD_IDX_GET(*(msg_word + 1));
4386 	tp_idx = HTT_T2H_RX_BKPRESSURE_TAIL_IDX_GET(*(msg_word + 1));
4387 	bkp_time = HTT_T2H_RX_BKPRESSURE_TIME_MS_GET(*(msg_word + 2));
4388 	radio_tt = &soc->pdevid_tt[pdev_id];
4389 
4390 	switch (ring_type) {
4391 	case HTT_SW_RING_TYPE_UMAC:
4392 		if (!time_allow_print(radio_tt->umac_ttt, ring_id))
4393 			return;
4394 		dp_htt_alert_print(msg_type, pdev_id, ring_id, hp_idx, tp_idx,
4395 				   bkp_time, "HTT_SW_RING_TYPE_UMAC");
4396 	break;
4397 	case HTT_SW_RING_TYPE_LMAC:
4398 		if (!time_allow_print(radio_tt->lmac_ttt, ring_id))
4399 			return;
4400 		dp_htt_alert_print(msg_type, pdev_id, ring_id, hp_idx, tp_idx,
4401 				   bkp_time, "HTT_SW_RING_TYPE_LMAC");
4402 	break;
4403 	default:
4404 		dp_htt_alert_print(msg_type, pdev_id, ring_id, hp_idx, tp_idx,
4405 				   bkp_time, "UNKNOWN");
4406 	break;
4407 	}
4408 
4409 	dp_print_ring_stats(pdev);
4410 	dp_print_napi_stats(pdev->soc);
4411 }
4412 
4413 /*
4414  * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler
4415  * @context:	Opaque context (HTT SOC handle)
4416  * @pkt:	HTC packet
4417  */
4418 static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
4419 {
4420 	struct htt_soc *soc = (struct htt_soc *) context;
4421 	qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
4422 	u_int32_t *msg_word;
4423 	enum htt_t2h_msg_type msg_type;
4424 	bool free_buf = true;
4425 
4426 	/* check for successful message reception */
4427 	if (pkt->Status != QDF_STATUS_SUCCESS) {
4428 		if (pkt->Status != QDF_STATUS_E_CANCELED)
4429 			soc->stats.htc_err_cnt++;
4430 
4431 		qdf_nbuf_free(htt_t2h_msg);
4432 		return;
4433 	}
4434 
4435 	/* TODO: Check if we should pop the HTC/HTT header alignment padding */
4436 
4437 	msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
4438 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
4439 	htt_event_record(soc->htt_logger_handle,
4440 			 msg_type, (uint8_t *)msg_word);
4441 	switch (msg_type) {
4442 	case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
4443 	{
4444 		dp_htt_bkp_event_alert(msg_word, soc);
4445 		break;
4446 	}
4447 	case HTT_T2H_MSG_TYPE_PEER_MAP:
4448 		{
4449 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
4450 			u_int8_t *peer_mac_addr;
4451 			u_int16_t peer_id;
4452 			u_int16_t hw_peer_id;
4453 			u_int8_t vdev_id;
4454 			u_int8_t is_wds;
4455 			struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
4456 
4457 			peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
4458 			hw_peer_id =
4459 				HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2));
4460 			vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
4461 			peer_mac_addr = htt_t2h_mac_addr_deswizzle(
4462 				(u_int8_t *) (msg_word+1),
4463 				&mac_addr_deswizzle_buf[0]);
4464 			QDF_TRACE(QDF_MODULE_ID_TXRX,
4465 				QDF_TRACE_LEVEL_INFO,
4466 				"HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
4467 				peer_id, vdev_id);
4468 
4469 			/*
4470 			 * check if peer already exists for this peer_id, if so
4471 			 * this peer map event is in response for a wds peer add
4472 			 * wmi command sent during wds source port learning.
4473 			 * in this case just add the ast entry to the existing
4474 			 * peer ast_list.
4475 			 */
4476 			is_wds = !!(dpsoc->peer_id_to_obj_map[peer_id]);
4477 			dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id,
4478 					       vdev_id, peer_mac_addr, 0,
4479 					       is_wds);
4480 			break;
4481 		}
4482 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
4483 		{
4484 			u_int16_t peer_id;
4485 			u_int8_t vdev_id;
4486 			u_int8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0};
4487 			peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
4488 			vdev_id = HTT_RX_PEER_UNMAP_VDEV_ID_GET(*msg_word);
4489 
4490 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
4491 						 vdev_id, mac_addr, 0,
4492 						 DP_PEER_WDS_COUNT_INVALID);
4493 			break;
4494 		}
4495 	case HTT_T2H_MSG_TYPE_SEC_IND:
4496 		{
4497 			u_int16_t peer_id;
4498 			enum cdp_sec_type sec_type;
4499 			int is_unicast;
4500 
4501 			peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
4502 			sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
4503 			is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
4504 			/* point to the first part of the Michael key */
4505 			msg_word++;
4506 			dp_rx_sec_ind_handler(
4507 				soc->dp_soc, peer_id, sec_type, is_unicast,
4508 				msg_word, msg_word + 2);
4509 			break;
4510 		}
4511 
4512 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
4513 		{
4514 			free_buf = dp_ppdu_stats_ind_handler(soc, msg_word,
4515 							     htt_t2h_msg);
4516 			break;
4517 		}
4518 
4519 	case HTT_T2H_MSG_TYPE_PKTLOG:
4520 		{
4521 			dp_pktlog_msg_handler(soc, msg_word);
4522 			break;
4523 		}
4524 
4525 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
4526 		{
4527 			htc_pm_runtime_put(soc->htc_soc);
4528 			soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
4529 			soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
4530 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4531 				"target uses HTT version %d.%d; host uses %d.%d",
4532 				soc->tgt_ver.major, soc->tgt_ver.minor,
4533 				HTT_CURRENT_VERSION_MAJOR,
4534 				HTT_CURRENT_VERSION_MINOR);
4535 			if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
4536 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4537 					QDF_TRACE_LEVEL_WARN,
4538 					"*** Incompatible host/target HTT versions!");
4539 			}
4540 			/* abort if the target is incompatible with the host */
4541 			qdf_assert(soc->tgt_ver.major ==
4542 				HTT_CURRENT_VERSION_MAJOR);
4543 			if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
4544 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4545 					QDF_TRACE_LEVEL_INFO_LOW,
4546 					"*** Warning: host/target HTT versions"
4547 					" are different, though compatible!");
4548 			}
4549 			break;
4550 		}
4551 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
4552 		{
4553 			uint16_t peer_id;
4554 			uint8_t tid;
4555 			uint8_t win_sz;
4556 			uint16_t status;
4557 			struct dp_peer *peer;
4558 
4559 			/*
4560 			 * Update REO Queue Desc with new values
4561 			 */
4562 			peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
4563 			tid = HTT_RX_ADDBA_TID_GET(*msg_word);
4564 			win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
4565 			peer = dp_peer_find_by_id(soc->dp_soc, peer_id);
4566 
4567 			/*
4568 			 * Window size needs to be incremented by 1
4569 			 * since fw needs to represent a value of 256
4570 			 * using just 8 bits
4571 			 */
4572 			if (peer) {
4573 				status = dp_addba_requestprocess_wifi3(
4574 					(struct cdp_soc_t *)soc->dp_soc,
4575 					peer->mac_addr.raw, peer->vdev->vdev_id,
4576 					0, tid, 0, win_sz + 1, 0xffff);
4577 
4578 				/*
4579 				 * If PEER_LOCK_REF_PROTECT enbled dec ref
4580 				 * which is inc by dp_peer_find_by_id
4581 				 */
4582 				dp_peer_unref_del_find_by_id(peer);
4583 
4584 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4585 					QDF_TRACE_LEVEL_INFO,
4586 					FL("PeerID %d BAW %d TID %d stat %d"),
4587 					peer_id, win_sz, tid, status);
4588 
4589 			} else {
4590 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4591 					QDF_TRACE_LEVEL_ERROR,
4592 					FL("Peer not found peer id %d"),
4593 					peer_id);
4594 			}
4595 			break;
4596 		}
4597 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
4598 		{
4599 			dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg);
4600 			break;
4601 		}
4602 	case HTT_T2H_MSG_TYPE_PEER_MAP_V2:
4603 		{
4604 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
4605 			u_int8_t *peer_mac_addr;
4606 			u_int16_t peer_id;
4607 			u_int16_t hw_peer_id;
4608 			u_int8_t vdev_id;
4609 			bool is_wds;
4610 			u_int16_t ast_hash;
4611 			struct dp_ast_flow_override_info ast_flow_info;
4612 
4613 			qdf_mem_set(&ast_flow_info, 0,
4614 					    sizeof(struct dp_ast_flow_override_info));
4615 
4616 			peer_id = HTT_RX_PEER_MAP_V2_SW_PEER_ID_GET(*msg_word);
4617 			hw_peer_id =
4618 			HTT_RX_PEER_MAP_V2_HW_PEER_ID_GET(*(msg_word + 2));
4619 			vdev_id = HTT_RX_PEER_MAP_V2_VDEV_ID_GET(*msg_word);
4620 			peer_mac_addr =
4621 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
4622 						   &mac_addr_deswizzle_buf[0]);
4623 			is_wds =
4624 			HTT_RX_PEER_MAP_V2_NEXT_HOP_GET(*(msg_word + 3));
4625 			ast_hash =
4626 			HTT_RX_PEER_MAP_V2_AST_HASH_VALUE_GET(*(msg_word + 3));
4627 			/*
4628 			 * Update 4 ast_index per peer, ast valid mask
4629 			 * and TID flow valid mask.
4630 			 * AST valid mask is 3 bit field corresponds to
4631 			 * ast_index[3:1]. ast_index 0 is always valid.
4632 			 */
4633 			ast_flow_info.ast_valid_mask =
4634 			HTT_RX_PEER_MAP_V2_AST_VALID_MASK_GET(*(msg_word + 3));
4635 			ast_flow_info.ast_idx[0] = hw_peer_id;
4636 			ast_flow_info.ast_flow_mask[0] =
4637 			HTT_RX_PEER_MAP_V2_AST_0_FLOW_MASK_GET(*(msg_word + 4));
4638 			ast_flow_info.ast_idx[1] =
4639 			HTT_RX_PEER_MAP_V2_AST_INDEX_1_GET(*(msg_word + 4));
4640 			ast_flow_info.ast_flow_mask[1] =
4641 			HTT_RX_PEER_MAP_V2_AST_1_FLOW_MASK_GET(*(msg_word + 4));
4642 			ast_flow_info.ast_idx[2] =
4643 			HTT_RX_PEER_MAP_V2_AST_INDEX_2_GET(*(msg_word + 5));
4644 			ast_flow_info.ast_flow_mask[2] =
4645 			HTT_RX_PEER_MAP_V2_AST_2_FLOW_MASK_GET(*(msg_word + 4));
4646 			ast_flow_info.ast_idx[3] =
4647 			HTT_RX_PEER_MAP_V2_AST_INDEX_3_GET(*(msg_word + 6));
4648 			ast_flow_info.ast_flow_mask[3] =
4649 			HTT_RX_PEER_MAP_V2_AST_3_FLOW_MASK_GET(*(msg_word + 4));
4650 			/*
4651 			 * TID valid mask is applicable only
4652 			 * for HI and LOW priority flows.
4653 			 * tid_valid_mas is 8 bit field corresponds
4654 			 * to TID[7:0]
4655 			 */
4656 			ast_flow_info.tid_valid_low_pri_mask =
4657 			HTT_RX_PEER_MAP_V2_TID_VALID_LOW_PRI_GET(*(msg_word + 5));
4658 			ast_flow_info.tid_valid_hi_pri_mask =
4659 			HTT_RX_PEER_MAP_V2_TID_VALID_HI_PRI_GET(*(msg_word + 5));
4660 
4661 			QDF_TRACE(QDF_MODULE_ID_TXRX,
4662 				  QDF_TRACE_LEVEL_INFO,
4663 				  "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
4664 				  peer_id, vdev_id);
4665 
4666 			dp_rx_peer_map_handler(soc->dp_soc, peer_id,
4667 					       hw_peer_id, vdev_id,
4668 					       peer_mac_addr, ast_hash,
4669 					       is_wds);
4670 
4671 			/*
4672 			 * Update ast indexes for flow override support
4673 			 * Applicable only for non wds peers
4674 			 */
4675 			dp_peer_ast_index_flow_queue_map_create(
4676 					    soc->dp_soc, is_wds,
4677 					    peer_id, peer_mac_addr,
4678 					    &ast_flow_info);
4679 
4680 			break;
4681 		}
4682 	case HTT_T2H_MSG_TYPE_PEER_UNMAP_V2:
4683 		{
4684 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
4685 			u_int8_t *mac_addr;
4686 			u_int16_t peer_id;
4687 			u_int8_t vdev_id;
4688 			u_int8_t is_wds;
4689 			u_int32_t free_wds_count;
4690 
4691 			peer_id =
4692 			HTT_RX_PEER_UNMAP_V2_SW_PEER_ID_GET(*msg_word);
4693 			vdev_id = HTT_RX_PEER_UNMAP_V2_VDEV_ID_GET(*msg_word);
4694 			mac_addr =
4695 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
4696 						   &mac_addr_deswizzle_buf[0]);
4697 			is_wds =
4698 			HTT_RX_PEER_UNMAP_V2_NEXT_HOP_GET(*(msg_word + 2));
4699 			free_wds_count =
4700 			HTT_RX_PEER_UNMAP_V2_PEER_WDS_FREE_COUNT_GET(*(msg_word + 4));
4701 
4702 			QDF_TRACE(QDF_MODULE_ID_TXRX,
4703 				  QDF_TRACE_LEVEL_INFO,
4704 				  "HTT_T2H_MSG_TYPE_PEER_UNMAP msg for peer id %d vdev id %d n",
4705 				  peer_id, vdev_id);
4706 
4707 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
4708 						 vdev_id, mac_addr,
4709 						 is_wds, free_wds_count);
4710 			break;
4711 		}
4712 	case HTT_T2H_MSG_TYPE_RX_DELBA:
4713 		{
4714 			uint16_t peer_id;
4715 			uint8_t tid;
4716 			uint8_t win_sz;
4717 			QDF_STATUS status;
4718 
4719 			peer_id = HTT_RX_DELBA_PEER_ID_GET(*msg_word);
4720 			tid = HTT_RX_DELBA_TID_GET(*msg_word);
4721 			win_sz = HTT_RX_DELBA_WIN_SIZE_GET(*msg_word);
4722 
4723 			status = dp_rx_delba_ind_handler(
4724 				soc->dp_soc,
4725 				peer_id, tid, win_sz);
4726 
4727 			QDF_TRACE(QDF_MODULE_ID_TXRX,
4728 				  QDF_TRACE_LEVEL_INFO,
4729 				  FL("DELBA PeerID %d BAW %d TID %d stat %d"),
4730 				  peer_id, win_sz, tid, status);
4731 			break;
4732 		}
4733 	default:
4734 		break;
4735 	};
4736 
4737 	/* Free the indication buffer */
4738 	if (free_buf)
4739 		qdf_nbuf_free(htt_t2h_msg);
4740 }
4741 
4742 /*
4743  * dp_htt_h2t_full() - Send full handler (called from HTC)
4744  * @context:	Opaque context (HTT SOC handle)
4745  * @pkt:	HTC packet
4746  *
4747  * Return: enum htc_send_full_action
4748  */
4749 static enum htc_send_full_action
4750 dp_htt_h2t_full(void *context, HTC_PACKET *pkt)
4751 {
4752 	return HTC_SEND_FULL_KEEP;
4753 }
4754 
4755 /*
4756  * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages
4757  * @context:	Opaque context (HTT SOC handle)
4758  * @nbuf:	nbuf containing T2H message
4759  * @pipe_id:	HIF pipe ID
4760  *
4761  * Return: QDF_STATUS
4762  *
4763  * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which
4764  * will be used for packet log and other high-priority HTT messages. Proper
4765  * HTC connection to be added later once required FW changes are available
4766  */
4767 static QDF_STATUS
4768 dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id)
4769 {
4770 	QDF_STATUS rc = QDF_STATUS_SUCCESS;
4771 	HTC_PACKET htc_pkt;
4772 
4773 	qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE);
4774 	qdf_mem_zero(&htc_pkt, sizeof(htc_pkt));
4775 	htc_pkt.Status = QDF_STATUS_SUCCESS;
4776 	htc_pkt.pPktContext = (void *)nbuf;
4777 	dp_htt_t2h_msg_handler(context, &htc_pkt);
4778 
4779 	return rc;
4780 }
4781 
4782 /*
4783  * htt_htc_soc_attach() - Register SOC level HTT instance with HTC
4784  * @htt_soc:	HTT SOC handle
4785  *
4786  * Return: QDF_STATUS
4787  */
4788 static QDF_STATUS
4789 htt_htc_soc_attach(struct htt_soc *soc)
4790 {
4791 	struct htc_service_connect_req connect;
4792 	struct htc_service_connect_resp response;
4793 	QDF_STATUS status;
4794 	struct dp_soc *dpsoc = soc->dp_soc;
4795 
4796 	qdf_mem_zero(&connect, sizeof(connect));
4797 	qdf_mem_zero(&response, sizeof(response));
4798 
4799 	connect.pMetaData = NULL;
4800 	connect.MetaDataLength = 0;
4801 	connect.EpCallbacks.pContext = soc;
4802 	connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
4803 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
4804 	connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler;
4805 
4806 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
4807 	connect.EpCallbacks.EpRecvRefill = NULL;
4808 
4809 	/* N/A, fill is done by HIF */
4810 	connect.EpCallbacks.RecvRefillWaterMark = 1;
4811 
4812 	connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
4813 	/*
4814 	 * Specify how deep to let a queue get before htc_send_pkt will
4815 	 * call the EpSendFull function due to excessive send queue depth.
4816 	 */
4817 	connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
4818 
4819 	/* disable flow control for HTT data message service */
4820 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
4821 
4822 	/* connect to control service */
4823 	connect.service_id = HTT_DATA_MSG_SVC;
4824 
4825 	status = htc_connect_service(soc->htc_soc, &connect, &response);
4826 
4827 	if (status != QDF_STATUS_SUCCESS)
4828 		return status;
4829 
4830 	soc->htc_endpoint = response.Endpoint;
4831 
4832 	hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint);
4833 
4834 	htt_interface_logging_init(&soc->htt_logger_handle);
4835 	dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc,
4836 		dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE);
4837 
4838 	return QDF_STATUS_SUCCESS; /* success */
4839 }
4840 
4841 /*
4842  * htt_soc_initialize() - SOC level HTT initialization
4843  * @htt_soc: Opaque htt SOC handle
4844  * @ctrl_psoc: Opaque ctrl SOC handle
4845  * @htc_soc: SOC level HTC handle
4846  * @hal_soc: Opaque HAL SOC handle
4847  * @osdev: QDF device
4848  *
4849  * Return: HTT handle on success; NULL on failure
4850  */
4851 void *
4852 htt_soc_initialize(struct htt_soc *htt_soc,
4853 		   struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
4854 		   HTC_HANDLE htc_soc,
4855 		   hal_soc_handle_t hal_soc_hdl, qdf_device_t osdev)
4856 {
4857 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
4858 
4859 	soc->osdev = osdev;
4860 	soc->ctrl_psoc = ctrl_psoc;
4861 	soc->htc_soc = htc_soc;
4862 	soc->hal_soc = hal_soc_hdl;
4863 
4864 	if (htt_htc_soc_attach(soc))
4865 		goto fail2;
4866 
4867 	return soc;
4868 
4869 fail2:
4870 	return NULL;
4871 }
4872 
4873 void htt_soc_htc_dealloc(struct htt_soc *htt_handle)
4874 {
4875 	htt_interface_logging_deinit(htt_handle->htt_logger_handle);
4876 	htt_htc_misc_pkt_pool_free(htt_handle);
4877 	htt_htc_pkt_pool_free(htt_handle);
4878 }
4879 
4880 /*
4881  * htt_soc_htc_prealloc() - HTC memory prealloc
4882  * @htt_soc: SOC level HTT handle
4883  *
4884  * Return: QDF_STATUS_SUCCESS on Success or
4885  * QDF_STATUS_E_NOMEM on allocation failure
4886  */
4887 QDF_STATUS htt_soc_htc_prealloc(struct htt_soc *soc)
4888 {
4889 	int i;
4890 
4891 	soc->htt_htc_pkt_freelist = NULL;
4892 	/* pre-allocate some HTC_PACKET objects */
4893 	for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
4894 		struct dp_htt_htc_pkt_union *pkt;
4895 		pkt = qdf_mem_malloc(sizeof(*pkt));
4896 		if (!pkt)
4897 			return QDF_STATUS_E_NOMEM;
4898 
4899 		htt_htc_pkt_free(soc, &pkt->u.pkt);
4900 	}
4901 	return QDF_STATUS_SUCCESS;
4902 }
4903 
4904 /*
4905  * htt_soc_detach() - Free SOC level HTT handle
4906  * @htt_hdl: HTT SOC handle
4907  */
4908 void htt_soc_detach(struct htt_soc *htt_hdl)
4909 {
4910 	int i;
4911 	struct htt_soc *htt_handle = (struct htt_soc *)htt_hdl;
4912 
4913 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4914 		qdf_mem_free(htt_handle->pdevid_tt[i].umac_ttt);
4915 		qdf_mem_free(htt_handle->pdevid_tt[i].lmac_ttt);
4916 	}
4917 
4918 	HTT_TX_MUTEX_DESTROY(&htt_handle->htt_tx_mutex);
4919 	qdf_mem_free(htt_handle);
4920 
4921 }
4922 
4923 /**
4924  * dp_h2t_ext_stats_msg_send(): function to contruct HTT message to pass to FW
4925  * @pdev: DP PDEV handle
4926  * @stats_type_upload_mask: stats type requested by user
4927  * @config_param_0: extra configuration parameters
4928  * @config_param_1: extra configuration parameters
4929  * @config_param_2: extra configuration parameters
4930  * @config_param_3: extra configuration parameters
4931  * @mac_id: mac number
4932  *
4933  * return: QDF STATUS
4934  */
4935 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
4936 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
4937 		uint32_t config_param_1, uint32_t config_param_2,
4938 		uint32_t config_param_3, int cookie_val, int cookie_msb,
4939 		uint8_t mac_id)
4940 {
4941 	struct htt_soc *soc = pdev->soc->htt_handle;
4942 	struct dp_htt_htc_pkt *pkt;
4943 	qdf_nbuf_t msg;
4944 	uint32_t *msg_word;
4945 	uint8_t pdev_mask = 0;
4946 	uint8_t *htt_logger_bufp;
4947 	int mac_for_pdev;
4948 	int target_pdev_id;
4949 	QDF_STATUS status;
4950 
4951 	msg = qdf_nbuf_alloc(
4952 			soc->osdev,
4953 			HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ),
4954 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4955 
4956 	if (!msg)
4957 		return QDF_STATUS_E_NOMEM;
4958 
4959 	/*TODO:Add support for SOC stats
4960 	 * Bit 0: SOC Stats
4961 	 * Bit 1: Pdev stats for pdev id 0
4962 	 * Bit 2: Pdev stats for pdev id 1
4963 	 * Bit 3: Pdev stats for pdev id 2
4964 	 */
4965 	mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
4966 	target_pdev_id =
4967 	dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev);
4968 
4969 	pdev_mask = 1 << target_pdev_id;
4970 
4971 	/*
4972 	 * Set the length of the message.
4973 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4974 	 * separately during the below call to qdf_nbuf_push_head.
4975 	 * The contribution from the HTC header is added separately inside HTC.
4976 	 */
4977 	if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) {
4978 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4979 				"Failed to expand head for HTT_EXT_STATS");
4980 		qdf_nbuf_free(msg);
4981 		return QDF_STATUS_E_FAILURE;
4982 	}
4983 
4984 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4985 		"-----%s:%d----\n cookie <-> %d\n config_param_0 %u\n"
4986 		"config_param_1 %u\n config_param_2 %u\n"
4987 		"config_param_4 %u\n -------------",
4988 		__func__, __LINE__, cookie_val, config_param_0,
4989 		config_param_1, config_param_2,	config_param_3);
4990 
4991 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
4992 
4993 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4994 	htt_logger_bufp = (uint8_t *)msg_word;
4995 	*msg_word = 0;
4996 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ);
4997 	HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask);
4998 	HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask);
4999 
5000 	/* word 1 */
5001 	msg_word++;
5002 	*msg_word = 0;
5003 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0);
5004 
5005 	/* word 2 */
5006 	msg_word++;
5007 	*msg_word = 0;
5008 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1);
5009 
5010 	/* word 3 */
5011 	msg_word++;
5012 	*msg_word = 0;
5013 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2);
5014 
5015 	/* word 4 */
5016 	msg_word++;
5017 	*msg_word = 0;
5018 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3);
5019 
5020 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0);
5021 
5022 	/* word 5 */
5023 	msg_word++;
5024 
5025 	/* word 6 */
5026 	msg_word++;
5027 	*msg_word = 0;
5028 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val);
5029 
5030 	/* word 7 */
5031 	msg_word++;
5032 	*msg_word = 0;
5033 	/*Using last 2 bits for pdev_id */
5034 	cookie_msb = ((cookie_msb << 2) | pdev->pdev_id);
5035 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb);
5036 
5037 	pkt = htt_htc_pkt_alloc(soc);
5038 	if (!pkt) {
5039 		qdf_nbuf_free(msg);
5040 		return QDF_STATUS_E_NOMEM;
5041 	}
5042 
5043 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5044 
5045 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
5046 			dp_htt_h2t_send_complete_free_netbuf,
5047 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
5048 			soc->htc_endpoint,
5049 			/* tag for FW response msg not guaranteed */
5050 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
5051 
5052 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5053 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_EXT_STATS_REQ,
5054 				     htt_logger_bufp);
5055 
5056 	if (status != QDF_STATUS_SUCCESS) {
5057 		qdf_nbuf_free(msg);
5058 		htt_htc_pkt_free(soc, pkt);
5059 	}
5060 
5061 	return status;
5062 }
5063 
5064 /**
5065  * dp_h2t_3tuple_config_send(): function to contruct 3 tuple configuration
5066  * HTT message to pass to FW
5067  * @pdev: DP PDEV handle
5068  * @tuple_mask: tuple configuration to report 3 tuple hash value in either
5069  * toeplitz_2_or_4 or flow_id_toeplitz in MSDU START TLV.
5070  *
5071  * tuple_mask[1:0]:
5072  *   00 - Do not report 3 tuple hash value
5073  *   10 - Report 3 tuple hash value in toeplitz_2_or_4
5074  *   01 - Report 3 tuple hash value in flow_id_toeplitz
5075  *   11 - Report 3 tuple hash value in both toeplitz_2_or_4 & flow_id_toeplitz
5076  *
5077  * return: QDF STATUS
5078  */
5079 QDF_STATUS dp_h2t_3tuple_config_send(struct dp_pdev *pdev,
5080 				     uint32_t tuple_mask, uint8_t mac_id)
5081 {
5082 	struct htt_soc *soc = pdev->soc->htt_handle;
5083 	struct dp_htt_htc_pkt *pkt;
5084 	qdf_nbuf_t msg;
5085 	uint32_t *msg_word;
5086 	uint8_t *htt_logger_bufp;
5087 	int mac_for_pdev;
5088 	int target_pdev_id;
5089 
5090 	msg = qdf_nbuf_alloc(
5091 			soc->osdev,
5092 			HTT_MSG_BUF_SIZE(HTT_3_TUPLE_HASH_CFG_REQ_BYTES),
5093 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
5094 
5095 	if (!msg)
5096 		return QDF_STATUS_E_NOMEM;
5097 
5098 	mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
5099 	target_pdev_id =
5100 	dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev);
5101 
5102 	/*
5103 	 * Set the length of the message.
5104 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
5105 	 * separately during the below call to qdf_nbuf_push_head.
5106 	 * The contribution from the HTC header is added separately inside HTC.
5107 	 */
5108 	if (!qdf_nbuf_put_tail(msg, HTT_3_TUPLE_HASH_CFG_REQ_BYTES)) {
5109 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5110 			  "Failed to expand head for HTT_3TUPLE_CONFIG");
5111 		qdf_nbuf_free(msg);
5112 		return QDF_STATUS_E_FAILURE;
5113 	}
5114 
5115 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5116 		  "config_param_sent %s:%d 0x%x for target_pdev %d\n -------------",
5117 		  __func__, __LINE__, tuple_mask, target_pdev_id);
5118 
5119 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
5120 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
5121 	htt_logger_bufp = (uint8_t *)msg_word;
5122 
5123 	*msg_word = 0;
5124 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG);
5125 	HTT_RX_3_TUPLE_HASH_PDEV_ID_SET(*msg_word, target_pdev_id);
5126 
5127 	msg_word++;
5128 	*msg_word = 0;
5129 	HTT_H2T_FLOW_ID_TOEPLITZ_FIELD_CONFIG_SET(*msg_word, tuple_mask);
5130 	HTT_H2T_TOEPLITZ_2_OR_4_FIELD_CONFIG_SET(*msg_word, tuple_mask);
5131 
5132 	pkt = htt_htc_pkt_alloc(soc);
5133 	if (!pkt) {
5134 		qdf_nbuf_free(msg);
5135 		return QDF_STATUS_E_NOMEM;
5136 	}
5137 
5138 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5139 
5140 	SET_HTC_PACKET_INFO_TX(
5141 			&pkt->htc_pkt,
5142 			dp_htt_h2t_send_complete_free_netbuf,
5143 			qdf_nbuf_data(msg),
5144 			qdf_nbuf_len(msg),
5145 			soc->htc_endpoint,
5146 			/* tag for no FW response msg */
5147 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
5148 
5149 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5150 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG,
5151 			    htt_logger_bufp);
5152 
5153 	return QDF_STATUS_SUCCESS;
5154 }
5155 
5156 /* This macro will revert once proper HTT header will define for
5157  * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file
5158  * */
5159 #if defined(WDI_EVENT_ENABLE)
5160 /**
5161  * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW
5162  * @pdev: DP PDEV handle
5163  * @stats_type_upload_mask: stats type requested by user
5164  * @mac_id: Mac id number
5165  *
5166  * return: QDF STATUS
5167  */
5168 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
5169 		uint32_t stats_type_upload_mask, uint8_t mac_id)
5170 {
5171 	struct htt_soc *soc = pdev->soc->htt_handle;
5172 	struct dp_htt_htc_pkt *pkt;
5173 	qdf_nbuf_t msg;
5174 	uint32_t *msg_word;
5175 	uint8_t pdev_mask;
5176 	QDF_STATUS status;
5177 
5178 	msg = qdf_nbuf_alloc(
5179 			soc->osdev,
5180 			HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ),
5181 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
5182 
5183 	if (!msg) {
5184 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5185 		"Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer");
5186 		qdf_assert(0);
5187 		return QDF_STATUS_E_NOMEM;
5188 	}
5189 
5190 	/*TODO:Add support for SOC stats
5191 	 * Bit 0: SOC Stats
5192 	 * Bit 1: Pdev stats for pdev id 0
5193 	 * Bit 2: Pdev stats for pdev id 1
5194 	 * Bit 3: Pdev stats for pdev id 2
5195 	 */
5196 	pdev_mask = 1 << dp_get_target_pdev_id_for_host_pdev_id(pdev->soc,
5197 								mac_id);
5198 
5199 	/*
5200 	 * Set the length of the message.
5201 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
5202 	 * separately during the below call to qdf_nbuf_push_head.
5203 	 * The contribution from the HTC header is added separately inside HTC.
5204 	 */
5205 	if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) {
5206 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5207 				"Failed to expand head for HTT_CFG_STATS");
5208 		qdf_nbuf_free(msg);
5209 		return QDF_STATUS_E_FAILURE;
5210 	}
5211 
5212 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
5213 
5214 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
5215 	*msg_word = 0;
5216 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
5217 	HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask);
5218 	HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word,
5219 			stats_type_upload_mask);
5220 
5221 	pkt = htt_htc_pkt_alloc(soc);
5222 	if (!pkt) {
5223 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5224 				"Fail to allocate dp_htt_htc_pkt buffer");
5225 		qdf_assert(0);
5226 		qdf_nbuf_free(msg);
5227 		return QDF_STATUS_E_NOMEM;
5228 	}
5229 
5230 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5231 
5232 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
5233 			dp_htt_h2t_send_complete_free_netbuf,
5234 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
5235 			soc->htc_endpoint,
5236 			/* tag for no FW response msg */
5237 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
5238 
5239 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5240 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG,
5241 				     (uint8_t *)msg_word);
5242 
5243 	if (status != QDF_STATUS_SUCCESS) {
5244 		qdf_nbuf_free(msg);
5245 		htt_htc_pkt_free(soc, pkt);
5246 	}
5247 
5248 	return status;
5249 }
5250 #endif
5251 
5252 void
5253 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
5254 			     uint32_t *tag_buf)
5255 {
5256 	switch (tag_type) {
5257 	case HTT_STATS_PEER_DETAILS_TAG:
5258 	{
5259 		htt_peer_details_tlv *dp_stats_buf =
5260 			(htt_peer_details_tlv *)tag_buf;
5261 
5262 		pdev->fw_stats_peer_id = dp_stats_buf->sw_peer_id;
5263 	}
5264 	break;
5265 	case HTT_STATS_PEER_STATS_CMN_TAG:
5266 	{
5267 		htt_peer_stats_cmn_tlv *dp_stats_buf =
5268 			(htt_peer_stats_cmn_tlv *)tag_buf;
5269 
5270 		struct dp_peer *peer = dp_peer_find_by_id(pdev->soc,
5271 						pdev->fw_stats_peer_id);
5272 
5273 		if (peer && !peer->bss_peer) {
5274 			peer->stats.tx.inactive_time =
5275 				dp_stats_buf->inactive_time;
5276 			qdf_event_set(&pdev->fw_peer_stats_event);
5277 		}
5278 		if (peer)
5279 			dp_peer_unref_del_find_by_id(peer);
5280 	}
5281 	break;
5282 	default:
5283 		qdf_err("Invalid tag_type");
5284 	}
5285 }
5286 
5287 /**
5288  * dp_htt_rx_flow_fst_setup(): Send HTT Rx FST setup message to FW
5289  * @pdev: DP pdev handle
5290  * @fse_setup_info: FST setup parameters
5291  *
5292  * Return: Success when HTT message is sent, error on failure
5293  */
5294 QDF_STATUS
5295 dp_htt_rx_flow_fst_setup(struct dp_pdev *pdev,
5296 			 struct dp_htt_rx_flow_fst_setup *fse_setup_info)
5297 {
5298 	struct htt_soc *soc = pdev->soc->htt_handle;
5299 	struct dp_htt_htc_pkt *pkt;
5300 	qdf_nbuf_t msg;
5301 	u_int32_t *msg_word;
5302 	struct htt_h2t_msg_rx_fse_setup_t *fse_setup;
5303 	uint8_t *htt_logger_bufp;
5304 	u_int32_t *key;
5305 	QDF_STATUS status;
5306 
5307 	msg = qdf_nbuf_alloc(
5308 		soc->osdev,
5309 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_setup_t)),
5310 		/* reserve room for the HTC header */
5311 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
5312 
5313 	if (!msg)
5314 		return QDF_STATUS_E_NOMEM;
5315 
5316 	/*
5317 	 * Set the length of the message.
5318 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
5319 	 * separately during the below call to qdf_nbuf_push_head.
5320 	 * The contribution from the HTC header is added separately inside HTC.
5321 	 */
5322 	if (!qdf_nbuf_put_tail(msg,
5323 			       sizeof(struct htt_h2t_msg_rx_fse_setup_t))) {
5324 		qdf_err("Failed to expand head for HTT RX_FSE_SETUP msg");
5325 		return QDF_STATUS_E_FAILURE;
5326 	}
5327 
5328 	/* fill in the message contents */
5329 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
5330 
5331 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_setup_t));
5332 	/* rewind beyond alignment pad to get to the HTC header reserved area */
5333 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
5334 	htt_logger_bufp = (uint8_t *)msg_word;
5335 
5336 	*msg_word = 0;
5337 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG);
5338 
5339 	fse_setup = (struct htt_h2t_msg_rx_fse_setup_t *)msg_word;
5340 
5341 	HTT_RX_FSE_SETUP_PDEV_ID_SET(*msg_word, fse_setup_info->pdev_id);
5342 
5343 	msg_word++;
5344 	HTT_RX_FSE_SETUP_NUM_REC_SET(*msg_word, fse_setup_info->max_entries);
5345 	HTT_RX_FSE_SETUP_MAX_SEARCH_SET(*msg_word, fse_setup_info->max_search);
5346 	HTT_RX_FSE_SETUP_IP_DA_SA_PREFIX_SET(*msg_word,
5347 					     fse_setup_info->ip_da_sa_prefix);
5348 
5349 	msg_word++;
5350 	HTT_RX_FSE_SETUP_BASE_ADDR_LO_SET(*msg_word,
5351 					  fse_setup_info->base_addr_lo);
5352 	msg_word++;
5353 	HTT_RX_FSE_SETUP_BASE_ADDR_HI_SET(*msg_word,
5354 					  fse_setup_info->base_addr_hi);
5355 
5356 	key = (u_int32_t *)fse_setup_info->hash_key;
5357 	fse_setup->toeplitz31_0 = *key++;
5358 	fse_setup->toeplitz63_32 = *key++;
5359 	fse_setup->toeplitz95_64 = *key++;
5360 	fse_setup->toeplitz127_96 = *key++;
5361 	fse_setup->toeplitz159_128 = *key++;
5362 	fse_setup->toeplitz191_160 = *key++;
5363 	fse_setup->toeplitz223_192 = *key++;
5364 	fse_setup->toeplitz255_224 = *key++;
5365 	fse_setup->toeplitz287_256 = *key++;
5366 	fse_setup->toeplitz314_288 = *key;
5367 
5368 	msg_word++;
5369 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz31_0);
5370 	msg_word++;
5371 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz63_32);
5372 	msg_word++;
5373 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz95_64);
5374 	msg_word++;
5375 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz127_96);
5376 	msg_word++;
5377 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz159_128);
5378 	msg_word++;
5379 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz191_160);
5380 	msg_word++;
5381 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz223_192);
5382 	msg_word++;
5383 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz255_224);
5384 	msg_word++;
5385 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz287_256);
5386 	msg_word++;
5387 	HTT_RX_FSE_SETUP_HASH_314_288_SET(*msg_word,
5388 					  fse_setup->toeplitz314_288);
5389 
5390 	pkt = htt_htc_pkt_alloc(soc);
5391 	if (!pkt) {
5392 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
5393 		qdf_assert(0);
5394 		qdf_nbuf_free(msg);
5395 		return QDF_STATUS_E_RESOURCES; /* failure */
5396 	}
5397 
5398 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5399 
5400 	SET_HTC_PACKET_INFO_TX(
5401 		&pkt->htc_pkt,
5402 		dp_htt_h2t_send_complete_free_netbuf,
5403 		qdf_nbuf_data(msg),
5404 		qdf_nbuf_len(msg),
5405 		soc->htc_endpoint,
5406 		/* tag for no FW response msg */
5407 		HTC_TX_PACKET_TAG_RUNTIME_PUT);
5408 
5409 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5410 
5411 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
5412 				     HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG,
5413 				     htt_logger_bufp);
5414 
5415 	if (status == QDF_STATUS_SUCCESS) {
5416 		dp_info("HTT_H2T RX_FSE_SETUP sent to FW for pdev = %u",
5417 			fse_setup_info->pdev_id);
5418 		QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_DEBUG,
5419 				   (void *)fse_setup_info->hash_key,
5420 				   fse_setup_info->hash_key_len);
5421 	} else {
5422 		qdf_nbuf_free(msg);
5423 		htt_htc_pkt_free(soc, pkt);
5424 	}
5425 
5426 	return status;
5427 }
5428 
5429 /**
5430  * dp_htt_rx_flow_fse_operation(): Send HTT Flow Search Entry msg to
5431  * add/del a flow in HW
5432  * @pdev: DP pdev handle
5433  * @fse_op_info: Flow entry parameters
5434  *
5435  * Return: Success when HTT message is sent, error on failure
5436  */
5437 QDF_STATUS
5438 dp_htt_rx_flow_fse_operation(struct dp_pdev *pdev,
5439 			     struct dp_htt_rx_flow_fst_operation *fse_op_info)
5440 {
5441 	struct htt_soc *soc = pdev->soc->htt_handle;
5442 	struct dp_htt_htc_pkt *pkt;
5443 	qdf_nbuf_t msg;
5444 	u_int32_t *msg_word;
5445 	struct htt_h2t_msg_rx_fse_operation_t *fse_operation;
5446 	uint8_t *htt_logger_bufp;
5447 	QDF_STATUS status;
5448 
5449 	msg = qdf_nbuf_alloc(
5450 		soc->osdev,
5451 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_operation_t)),
5452 		/* reserve room for the HTC header */
5453 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
5454 	if (!msg)
5455 		return QDF_STATUS_E_NOMEM;
5456 
5457 	/*
5458 	 * Set the length of the message.
5459 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
5460 	 * separately during the below call to qdf_nbuf_push_head.
5461 	 * The contribution from the HTC header is added separately inside HTC.
5462 	 */
5463 	if (!qdf_nbuf_put_tail(msg,
5464 			       sizeof(struct htt_h2t_msg_rx_fse_operation_t))) {
5465 		qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
5466 		qdf_nbuf_free(msg);
5467 		return QDF_STATUS_E_FAILURE;
5468 	}
5469 
5470 	/* fill in the message contents */
5471 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
5472 
5473 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_operation_t));
5474 	/* rewind beyond alignment pad to get to the HTC header reserved area */
5475 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
5476 	htt_logger_bufp = (uint8_t *)msg_word;
5477 
5478 	*msg_word = 0;
5479 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG);
5480 
5481 	fse_operation = (struct htt_h2t_msg_rx_fse_operation_t *)msg_word;
5482 
5483 	HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, fse_op_info->pdev_id);
5484 	msg_word++;
5485 	HTT_RX_FSE_IPSEC_VALID_SET(*msg_word, false);
5486 	if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_ENTRY) {
5487 		HTT_RX_FSE_OPERATION_SET(*msg_word,
5488 					 HTT_RX_FSE_CACHE_INVALIDATE_ENTRY);
5489 		msg_word++;
5490 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5491 		*msg_word,
5492 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_31_0));
5493 		msg_word++;
5494 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5495 		*msg_word,
5496 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_63_32));
5497 		msg_word++;
5498 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5499 		*msg_word,
5500 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_95_64));
5501 		msg_word++;
5502 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5503 		*msg_word,
5504 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_127_96));
5505 		msg_word++;
5506 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5507 		*msg_word,
5508 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_31_0));
5509 		msg_word++;
5510 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5511 		*msg_word,
5512 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_63_32));
5513 		msg_word++;
5514 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5515 		*msg_word,
5516 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_95_64));
5517 		msg_word++;
5518 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5519 		*msg_word,
5520 		qdf_htonl(
5521 		fse_op_info->rx_flow->flow_tuple_info.dest_ip_127_96));
5522 		msg_word++;
5523 		HTT_RX_FSE_SOURCEPORT_SET(
5524 			*msg_word,
5525 			fse_op_info->rx_flow->flow_tuple_info.src_port);
5526 		HTT_RX_FSE_DESTPORT_SET(
5527 			*msg_word,
5528 			fse_op_info->rx_flow->flow_tuple_info.dest_port);
5529 		msg_word++;
5530 		HTT_RX_FSE_L4_PROTO_SET(
5531 			*msg_word,
5532 			fse_op_info->rx_flow->flow_tuple_info.l4_protocol);
5533 	} else if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_FULL) {
5534 		HTT_RX_FSE_OPERATION_SET(*msg_word,
5535 					 HTT_RX_FSE_CACHE_INVALIDATE_FULL);
5536 	} else if (fse_op_info->op_code == DP_HTT_FST_DISABLE) {
5537 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_DISABLE);
5538 	} else if (fse_op_info->op_code == DP_HTT_FST_ENABLE) {
5539 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_ENABLE);
5540 	}
5541 
5542 	pkt = htt_htc_pkt_alloc(soc);
5543 	if (!pkt) {
5544 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
5545 		qdf_assert(0);
5546 		qdf_nbuf_free(msg);
5547 		return QDF_STATUS_E_RESOURCES; /* failure */
5548 	}
5549 
5550 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5551 
5552 	SET_HTC_PACKET_INFO_TX(
5553 		&pkt->htc_pkt,
5554 		dp_htt_h2t_send_complete_free_netbuf,
5555 		qdf_nbuf_data(msg),
5556 		qdf_nbuf_len(msg),
5557 		soc->htc_endpoint,
5558 		/* tag for no FW response msg */
5559 		HTC_TX_PACKET_TAG_RUNTIME_PUT);
5560 
5561 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5562 
5563 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
5564 				     HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG,
5565 				     htt_logger_bufp);
5566 
5567 	if (status == QDF_STATUS_SUCCESS) {
5568 		dp_info("HTT_H2T RX_FSE_OPERATION_CFG sent to FW for pdev = %u",
5569 			fse_op_info->pdev_id);
5570 	} else {
5571 		qdf_nbuf_free(msg);
5572 		htt_htc_pkt_free(soc, pkt);
5573 	}
5574 
5575 	return status;
5576 }
5577 
5578 /**
5579  * dp_htt_rx_fisa_config(): Send HTT msg to configure FISA
5580  * @pdev: DP pdev handle
5581  * @fse_op_info: Flow entry parameters
5582  *
5583  * Return: Success when HTT message is sent, error on failure
5584  */
5585 QDF_STATUS
5586 dp_htt_rx_fisa_config(struct dp_pdev *pdev,
5587 		      struct dp_htt_rx_fisa_cfg *fisa_config)
5588 {
5589 	struct htt_soc *soc = pdev->soc->htt_handle;
5590 	struct dp_htt_htc_pkt *pkt;
5591 	qdf_nbuf_t msg;
5592 	u_int32_t *msg_word;
5593 	struct htt_h2t_msg_type_fisa_config_t *htt_fisa_config;
5594 	uint8_t *htt_logger_bufp;
5595 	uint32_t len;
5596 	QDF_STATUS status;
5597 
5598 	len = HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_type_fisa_config_t));
5599 
5600 	msg = qdf_nbuf_alloc(soc->osdev,
5601 			     len,
5602 			     /* reserve room for the HTC header */
5603 			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
5604 			     4,
5605 			     TRUE);
5606 	if (!msg)
5607 		return QDF_STATUS_E_NOMEM;
5608 
5609 	/*
5610 	 * Set the length of the message.
5611 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
5612 	 * separately during the below call to qdf_nbuf_push_head.
5613 	 * The contribution from the HTC header is added separately inside HTC.
5614 	 */
5615 	if (!qdf_nbuf_put_tail(msg,
5616 			       sizeof(struct htt_h2t_msg_type_fisa_config_t))) {
5617 		qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
5618 		qdf_nbuf_free(msg);
5619 		return QDF_STATUS_E_FAILURE;
5620 	}
5621 
5622 	/* fill in the message contents */
5623 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
5624 
5625 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_type_fisa_config_t));
5626 	/* rewind beyond alignment pad to get to the HTC header reserved area */
5627 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
5628 	htt_logger_bufp = (uint8_t *)msg_word;
5629 
5630 	*msg_word = 0;
5631 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FISA_CFG);
5632 
5633 	htt_fisa_config = (struct htt_h2t_msg_type_fisa_config_t *)msg_word;
5634 
5635 	HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, htt_fisa_config->pdev_id);
5636 
5637 	msg_word++;
5638 	HTT_RX_FISA_CONFIG_FISA_V2_ENABLE_SET(*msg_word, 1);
5639 	HTT_RX_FISA_CONFIG_FISA_V2_AGGR_LIMIT_SET(*msg_word, 0xf);
5640 
5641 	msg_word++;
5642 	htt_fisa_config->fisa_timeout_threshold = fisa_config->fisa_timeout;
5643 
5644 	pkt = htt_htc_pkt_alloc(soc);
5645 	if (!pkt) {
5646 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
5647 		qdf_assert(0);
5648 		qdf_nbuf_free(msg);
5649 		return QDF_STATUS_E_RESOURCES; /* failure */
5650 	}
5651 
5652 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5653 
5654 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
5655 			       dp_htt_h2t_send_complete_free_netbuf,
5656 			       qdf_nbuf_data(msg),
5657 			       qdf_nbuf_len(msg),
5658 			       soc->htc_endpoint,
5659 			       /* tag for no FW response msg */
5660 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
5661 
5662 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5663 
5664 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FISA_CFG,
5665 				     htt_logger_bufp);
5666 
5667 	if (status == QDF_STATUS_SUCCESS) {
5668 		dp_info("HTT_H2T_MSG_TYPE_RX_FISA_CFG sent to FW for pdev = %u",
5669 			fisa_config->pdev_id);
5670 	} else {
5671 		qdf_nbuf_free(msg);
5672 		htt_htc_pkt_free(soc, pkt);
5673 	}
5674 
5675 	return status;
5676 }
5677