xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c (revision a86b23ee68a2491aede2e03991f3fb37046f4e41)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <htt.h>
20 #include <hal_hw_headers.h>
21 #include <hal_api.h>
22 #include "dp_peer.h"
23 #include "dp_types.h"
24 #include "dp_internal.h"
25 #include "dp_rx.h"
26 #include "dp_rx_mon.h"
27 #include "htt_stats.h"
28 #include "htt_ppdu_stats.h"
29 #include "dp_htt.h"
30 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
31 #include "cdp_txrx_cmn_struct.h"
32 
33 #ifdef FEATURE_PERPKT_INFO
34 #include "dp_ratetable.h"
35 #endif
36 
37 #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
38 
39 #define HTT_HTC_PKT_POOL_INIT_SIZE 64
40 
41 #define HTT_MSG_BUF_SIZE(msg_bytes) \
42 	((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
43 
44 #define HTT_PID_BIT_MASK 0x3
45 
46 #define DP_EXT_MSG_LENGTH 2048
47 
48 #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16
49 
50 #define HTT_SHIFT_UPPER_TIMESTAMP 32
51 #define HTT_MASK_UPPER_TIMESTAMP 0xFFFFFFFF00000000
52 
53 /*
54  * dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap() - Get ppdu stats tlv
55  * bitmap for sniffer mode
56  * @bitmap: received bitmap
57  *
58  * Return: expected bitmap value, returns zero if doesn't match with
59  * either 64-bit Tx window or 256-bit window tlv bitmap
60  */
61 int
62 dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(uint32_t bitmap)
63 {
64 	if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64))
65 		return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64;
66 	else if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256))
67 		return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256;
68 
69 	return 0;
70 }
71 
72 #ifdef FEATURE_PERPKT_INFO
73 /*
74  * dp_peer_copy_delay_stats() - copy ppdu stats to peer delayed stats.
75  * @peer: Datapath peer handle
76  * @ppdu: User PPDU Descriptor
77  * @cur_ppdu_id: PPDU_ID
78  *
79  * Return: None
80  *
81  * on Tx data frame, we may get delayed ba set
82  * in htt_ppdu_stats_user_common_tlv. which mean we get Block Ack(BA) after we
83  * request Block Ack Request(BAR). Successful msdu is received only after Block
84  * Ack. To populate peer stats we need successful msdu(data frame).
85  * So we hold the Tx data stats on delayed_ba for stats update.
86  */
87 static void
88 dp_peer_copy_delay_stats(struct dp_peer *peer,
89 			 struct cdp_tx_completion_ppdu_user *ppdu,
90 			 uint32_t cur_ppdu_id)
91 {
92 	struct dp_pdev *pdev;
93 	struct dp_vdev *vdev;
94 
95 	if (!peer->last_delayed_ba_ppduid || !cur_ppdu_id)
96 		return;
97 
98 	if (peer->last_delayed_ba) {
99 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
100 			  "BA not yet recv for prev delayed ppdu[%d] - cur ppdu[%d]",
101 			  peer->last_delayed_ba_ppduid, cur_ppdu_id);
102 		vdev = peer->vdev;
103 		if (vdev) {
104 			pdev = vdev->pdev;
105 			pdev->stats.cdp_delayed_ba_not_recev++;
106 		}
107 	}
108 
109 	peer->delayed_ba_ppdu_stats.ltf_size = ppdu->ltf_size;
110 	peer->delayed_ba_ppdu_stats.stbc = ppdu->stbc;
111 	peer->delayed_ba_ppdu_stats.he_re = ppdu->he_re;
112 	peer->delayed_ba_ppdu_stats.txbf = ppdu->txbf;
113 	peer->delayed_ba_ppdu_stats.bw = ppdu->bw;
114 	peer->delayed_ba_ppdu_stats.nss = ppdu->nss;
115 	peer->delayed_ba_ppdu_stats.preamble = ppdu->preamble;
116 	peer->delayed_ba_ppdu_stats.gi = ppdu->gi;
117 	peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
118 	peer->delayed_ba_ppdu_stats.ldpc = ppdu->ldpc;
119 	peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
120 	peer->delayed_ba_ppdu_stats.mpdu_tried_ucast = ppdu->mpdu_tried_ucast;
121 	peer->delayed_ba_ppdu_stats.mpdu_tried_mcast = ppdu->mpdu_tried_mcast;
122 	peer->delayed_ba_ppdu_stats.frame_ctrl = ppdu->frame_ctrl;
123 	peer->delayed_ba_ppdu_stats.qos_ctrl = ppdu->qos_ctrl;
124 	peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
125 
126 	peer->delayed_ba_ppdu_stats.ru_start = ppdu->ru_start;
127 	peer->delayed_ba_ppdu_stats.ru_tones = ppdu->ru_tones;
128 	peer->delayed_ba_ppdu_stats.is_mcast = ppdu->is_mcast;
129 
130 	peer->delayed_ba_ppdu_stats.user_pos = ppdu->user_pos;
131 	peer->delayed_ba_ppdu_stats.mu_group_id = ppdu->mu_group_id;
132 
133 	peer->last_delayed_ba = true;
134 }
135 
136 /*
137  * dp_peer_copy_stats_to_bar() - copy delayed stats to ppdu stats.
138  * @peer: Datapath peer handle
139  * @ppdu: PPDU Descriptor
140  *
141  * Return: None
142  *
143  * For Tx BAR, PPDU stats TLV include Block Ack info. PPDU info
144  * from Tx BAR frame not required to populate peer stats.
145  * But we need successful MPDU and MSDU to update previous
146  * transmitted Tx data frame. Overwrite ppdu stats with the previous
147  * stored ppdu stats.
148  */
149 static void
150 dp_peer_copy_stats_to_bar(struct dp_peer *peer,
151 			  struct cdp_tx_completion_ppdu_user *ppdu)
152 {
153 	ppdu->ltf_size = peer->delayed_ba_ppdu_stats.ltf_size;
154 	ppdu->stbc = peer->delayed_ba_ppdu_stats.stbc;
155 	ppdu->he_re = peer->delayed_ba_ppdu_stats.he_re;
156 	ppdu->txbf = peer->delayed_ba_ppdu_stats.txbf;
157 	ppdu->bw = peer->delayed_ba_ppdu_stats.bw;
158 	ppdu->nss = peer->delayed_ba_ppdu_stats.nss;
159 	ppdu->preamble = peer->delayed_ba_ppdu_stats.preamble;
160 	ppdu->gi = peer->delayed_ba_ppdu_stats.gi;
161 	ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm;
162 	ppdu->ldpc = peer->delayed_ba_ppdu_stats.ldpc;
163 	ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm;
164 	ppdu->mpdu_tried_ucast = peer->delayed_ba_ppdu_stats.mpdu_tried_ucast;
165 	ppdu->mpdu_tried_mcast = peer->delayed_ba_ppdu_stats.mpdu_tried_mcast;
166 	ppdu->frame_ctrl = peer->delayed_ba_ppdu_stats.frame_ctrl;
167 	ppdu->qos_ctrl = peer->delayed_ba_ppdu_stats.qos_ctrl;
168 	ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm;
169 
170 	ppdu->ru_start = peer->delayed_ba_ppdu_stats.ru_start;
171 	ppdu->ru_tones = peer->delayed_ba_ppdu_stats.ru_tones;
172 	ppdu->is_mcast = peer->delayed_ba_ppdu_stats.is_mcast;
173 
174 	ppdu->user_pos = peer->delayed_ba_ppdu_stats.user_pos;
175 	ppdu->mu_group_id = peer->delayed_ba_ppdu_stats.mu_group_id;
176 
177 	peer->last_delayed_ba = false;
178 }
179 
180 /*
181  * dp_tx_rate_stats_update() - Update rate per-peer statistics
182  * @peer: Datapath peer handle
183  * @ppdu: PPDU Descriptor
184  *
185  * Return: None
186  */
187 static void
188 dp_tx_rate_stats_update(struct dp_peer *peer,
189 			struct cdp_tx_completion_ppdu_user *ppdu)
190 {
191 	uint32_t ratekbps = 0;
192 	uint64_t ppdu_tx_rate = 0;
193 	uint32_t rix;
194 	uint16_t ratecode = 0;
195 
196 	if (!peer || !ppdu)
197 		return;
198 
199 	if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK)
200 		return;
201 
202 	ratekbps = dp_getrateindex(ppdu->gi,
203 				   ppdu->mcs,
204 				   ppdu->nss,
205 				   ppdu->preamble,
206 				   ppdu->bw,
207 				   &rix,
208 				   &ratecode);
209 
210 	DP_STATS_UPD(peer, tx.last_tx_rate, ratekbps);
211 
212 	if (!ratekbps)
213 		return;
214 
215 	/* Calculate goodput in non-training period
216 	 * In training period, don't do anything as
217 	 * pending pkt is send as goodput.
218 	 */
219 	if ((!peer->bss_peer) && (!ppdu->sa_is_training)) {
220 		ppdu->sa_goodput = ((ratekbps / CDP_NUM_KB_IN_MB) *
221 				(CDP_PERCENT_MACRO - ppdu->current_rate_per));
222 	}
223 	ppdu->rix = rix;
224 	ppdu->tx_ratekbps = ratekbps;
225 	ppdu->tx_ratecode = ratecode;
226 	peer->stats.tx.avg_tx_rate =
227 		dp_ath_rate_lpf(peer->stats.tx.avg_tx_rate, ratekbps);
228 	ppdu_tx_rate = dp_ath_rate_out(peer->stats.tx.avg_tx_rate);
229 	DP_STATS_UPD(peer, tx.rnd_avg_tx_rate, ppdu_tx_rate);
230 
231 	if (peer->vdev) {
232 		/*
233 		 * In STA mode:
234 		 *	We get ucast stats as BSS peer stats.
235 		 *
236 		 * In AP mode:
237 		 *	We get mcast stats as BSS peer stats.
238 		 *	We get ucast stats as assoc peer stats.
239 		 */
240 		if (peer->vdev->opmode == wlan_op_mode_ap && peer->bss_peer) {
241 			peer->vdev->stats.tx.mcast_last_tx_rate = ratekbps;
242 			peer->vdev->stats.tx.mcast_last_tx_rate_mcs = ppdu->mcs;
243 		} else {
244 			peer->vdev->stats.tx.last_tx_rate = ratekbps;
245 			peer->vdev->stats.tx.last_tx_rate_mcs = ppdu->mcs;
246 		}
247 	}
248 }
249 
250 /*
251  * dp_tx_stats_update() - Update per-peer statistics
252  * @pdev: Datapath pdev handle
253  * @peer: Datapath peer handle
254  * @ppdu: PPDU Descriptor
255  * @ack_rssi: RSSI of last ack received
256  *
257  * Return: None
258  */
259 static void
260 dp_tx_stats_update(struct dp_pdev *pdev, struct dp_peer *peer,
261 		   struct cdp_tx_completion_ppdu_user *ppdu,
262 		   uint32_t ack_rssi)
263 {
264 	uint8_t preamble, mcs;
265 	uint16_t num_msdu;
266 	uint16_t num_mpdu;
267 	uint16_t mpdu_tried;
268 	uint16_t mpdu_failed;
269 
270 	preamble = ppdu->preamble;
271 	mcs = ppdu->mcs;
272 	num_msdu = ppdu->num_msdu;
273 	num_mpdu = ppdu->mpdu_success;
274 	mpdu_tried = ppdu->mpdu_tried_ucast + ppdu->mpdu_tried_mcast;
275 	mpdu_failed = mpdu_tried - num_mpdu;
276 
277 	/* If the peer statistics are already processed as part of
278 	 * per-MSDU completion handler, do not process these again in per-PPDU
279 	 * indications */
280 	if (pdev->soc->process_tx_status)
281 		return;
282 
283 	if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK) {
284 		/*
285 		 * All failed mpdu will be retried, so incrementing
286 		 * retries mpdu based on mpdu failed. Even for
287 		 * ack failure i.e for long retries we get
288 		 * mpdu failed equal mpdu tried.
289 		 */
290 		DP_STATS_INC(peer, tx.retries, mpdu_failed);
291 		DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus);
292 		return;
293 	}
294 
295 	if (ppdu->is_ppdu_cookie_valid)
296 		DP_STATS_INC(peer, tx.num_ppdu_cookie_valid, 1);
297 
298 	if (ppdu->mu_group_id <= MAX_MU_GROUP_ID &&
299 	    ppdu->ppdu_type != HTT_PPDU_STATS_PPDU_TYPE_SU) {
300 		if (unlikely(!(ppdu->mu_group_id & (MAX_MU_GROUP_ID - 1))))
301 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
302 				  "mu_group_id out of bound!!\n");
303 		else
304 			DP_STATS_UPD(peer, tx.mu_group_id[ppdu->mu_group_id],
305 				     (ppdu->user_pos + 1));
306 	}
307 
308 	if (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA ||
309 	    ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA) {
310 		DP_STATS_UPD(peer, tx.ru_tones, ppdu->ru_tones);
311 		DP_STATS_UPD(peer, tx.ru_start, ppdu->ru_start);
312 		switch (ppdu->ru_tones) {
313 		case RU_26:
314 			DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].num_msdu,
315 				     num_msdu);
316 			DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].num_mpdu,
317 				     num_mpdu);
318 			DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].mpdu_tried,
319 				     mpdu_tried);
320 		break;
321 		case RU_52:
322 			DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].num_msdu,
323 				     num_msdu);
324 			DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].num_mpdu,
325 				     num_mpdu);
326 			DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].mpdu_tried,
327 				     mpdu_tried);
328 		break;
329 		case RU_106:
330 			DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].num_msdu,
331 				     num_msdu);
332 			DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].num_mpdu,
333 				     num_mpdu);
334 			DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].mpdu_tried,
335 				     mpdu_tried);
336 		break;
337 		case RU_242:
338 			DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].num_msdu,
339 				     num_msdu);
340 			DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].num_mpdu,
341 				     num_mpdu);
342 			DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].mpdu_tried,
343 				     mpdu_tried);
344 		break;
345 		case RU_484:
346 			DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].num_msdu,
347 				     num_msdu);
348 			DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].num_mpdu,
349 				     num_mpdu);
350 			DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].mpdu_tried,
351 				     mpdu_tried);
352 		break;
353 		case RU_996:
354 			DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].num_msdu,
355 				     num_msdu);
356 			DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].num_mpdu,
357 				     num_mpdu);
358 			DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].mpdu_tried,
359 				     mpdu_tried);
360 		break;
361 		}
362 	}
363 
364 	/*
365 	 * All failed mpdu will be retried, so incrementing
366 	 * retries mpdu based on mpdu failed. Even for
367 	 * ack failure i.e for long retries we get
368 	 * mpdu failed equal mpdu tried.
369 	 */
370 	DP_STATS_INC(peer, tx.retries, mpdu_failed);
371 	DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus);
372 
373 	DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].num_msdu,
374 		     num_msdu);
375 	DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].num_mpdu,
376 		     num_mpdu);
377 	DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].mpdu_tried,
378 		     mpdu_tried);
379 
380 	DP_STATS_INC_PKT(peer, tx.comp_pkt,
381 			num_msdu, (ppdu->success_bytes +
382 				ppdu->retry_bytes + ppdu->failed_bytes));
383 	DP_STATS_UPD(peer, tx.tx_rate, ppdu->tx_rate);
384 	DP_STATS_INC(peer, tx.sgi_count[ppdu->gi], num_msdu);
385 	DP_STATS_INC(peer, tx.bw[ppdu->bw], num_msdu);
386 	DP_STATS_INC(peer, tx.nss[ppdu->nss], num_msdu);
387 	if (ppdu->tid < CDP_DATA_TID_MAX)
388 		DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)],
389 			     num_msdu);
390 	DP_STATS_INCC(peer, tx.stbc, num_msdu, ppdu->stbc);
391 	DP_STATS_INCC(peer, tx.ldpc, num_msdu, ppdu->ldpc);
392 	if (!(ppdu->is_mcast) && ppdu->ack_rssi_valid)
393 		DP_STATS_UPD(peer, tx.last_ack_rssi, ack_rssi);
394 
395 	DP_STATS_INCC(peer,
396 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
397 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
398 	DP_STATS_INCC(peer,
399 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
400 			((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
401 	DP_STATS_INCC(peer,
402 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
403 			((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
404 	DP_STATS_INCC(peer,
405 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
406 			((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B)));
407 	DP_STATS_INCC(peer,
408 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
409 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
410 	DP_STATS_INCC(peer,
411 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
412 			((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
413 	DP_STATS_INCC(peer,
414 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
415 			((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
416 	DP_STATS_INCC(peer,
417 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
418 			((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
419 	DP_STATS_INCC(peer,
420 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
421 			((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX)));
422 	DP_STATS_INCC(peer,
423 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
424 			((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX)));
425 	DP_STATS_INCC(peer, tx.ampdu_cnt, num_msdu, ppdu->is_ampdu);
426 	DP_STATS_INCC(peer, tx.non_ampdu_cnt, num_msdu, !(ppdu->is_ampdu));
427 
428 	dp_peer_stats_notify(pdev, peer);
429 
430 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
431 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
432 			     &peer->stats, ppdu->peer_id,
433 			     UPDATE_PEER_STATS, pdev->pdev_id);
434 #endif
435 }
436 #endif
437 
438 #ifdef WLAN_TX_PKT_CAPTURE_ENH
439 #include "dp_tx_capture.h"
440 #else
441 static inline void
442 dp_process_ppdu_stats_update_failed_bitmap(struct dp_pdev *pdev,
443 					   void *data,
444 					   uint32_t ppdu_id,
445 					   uint32_t size)
446 {
447 }
448 #endif
449 
450 /*
451  * htt_htc_pkt_alloc() - Allocate HTC packet buffer
452  * @htt_soc:	HTT SOC handle
453  *
454  * Return: Pointer to htc packet buffer
455  */
456 static struct dp_htt_htc_pkt *
457 htt_htc_pkt_alloc(struct htt_soc *soc)
458 {
459 	struct dp_htt_htc_pkt_union *pkt = NULL;
460 
461 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
462 	if (soc->htt_htc_pkt_freelist) {
463 		pkt = soc->htt_htc_pkt_freelist;
464 		soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next;
465 	}
466 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
467 
468 	if (!pkt)
469 		pkt = qdf_mem_malloc(sizeof(*pkt));
470 	return &pkt->u.pkt; /* not actually a dereference */
471 }
472 
473 /*
474  * htt_htc_pkt_free() - Free HTC packet buffer
475  * @htt_soc:	HTT SOC handle
476  */
477 static void
478 htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
479 {
480 	struct dp_htt_htc_pkt_union *u_pkt =
481 		(struct dp_htt_htc_pkt_union *)pkt;
482 
483 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
484 	u_pkt->u.next = soc->htt_htc_pkt_freelist;
485 	soc->htt_htc_pkt_freelist = u_pkt;
486 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
487 }
488 
489 /*
490  * htt_htc_pkt_pool_free() - Free HTC packet pool
491  * @htt_soc:	HTT SOC handle
492  */
493 void
494 htt_htc_pkt_pool_free(struct htt_soc *soc)
495 {
496 	struct dp_htt_htc_pkt_union *pkt, *next;
497 	pkt = soc->htt_htc_pkt_freelist;
498 	while (pkt) {
499 		next = pkt->u.next;
500 		qdf_mem_free(pkt);
501 		pkt = next;
502 	}
503 	soc->htt_htc_pkt_freelist = NULL;
504 }
505 
506 /*
507  * htt_htc_misc_pkt_list_trim() - trim misc list
508  * @htt_soc: HTT SOC handle
509  * @level: max no. of pkts in list
510  */
511 static void
512 htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level)
513 {
514 	struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL;
515 	int i = 0;
516 	qdf_nbuf_t netbuf;
517 
518 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
519 	pkt = soc->htt_htc_pkt_misclist;
520 	while (pkt) {
521 		next = pkt->u.next;
522 		/* trim the out grown list*/
523 		if (++i > level) {
524 			netbuf =
525 				(qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
526 			qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
527 			qdf_nbuf_free(netbuf);
528 			qdf_mem_free(pkt);
529 			pkt = NULL;
530 			if (prev)
531 				prev->u.next = NULL;
532 		}
533 		prev = pkt;
534 		pkt = next;
535 	}
536 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
537 }
538 
539 /*
540  * htt_htc_misc_pkt_list_add() - Add pkt to misc list
541  * @htt_soc:	HTT SOC handle
542  * @dp_htt_htc_pkt: pkt to be added to list
543  */
544 static void
545 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
546 {
547 	struct dp_htt_htc_pkt_union *u_pkt =
548 				(struct dp_htt_htc_pkt_union *)pkt;
549 	int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc,
550 							pkt->htc_pkt.Endpoint)
551 				+ DP_HTT_HTC_PKT_MISCLIST_SIZE;
552 
553 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
554 	if (soc->htt_htc_pkt_misclist) {
555 		u_pkt->u.next = soc->htt_htc_pkt_misclist;
556 		soc->htt_htc_pkt_misclist = u_pkt;
557 	} else {
558 		soc->htt_htc_pkt_misclist = u_pkt;
559 	}
560 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
561 
562 	/* only ce pipe size + tx_queue_depth could possibly be in use
563 	 * free older packets in the misclist
564 	 */
565 	htt_htc_misc_pkt_list_trim(soc, misclist_trim_level);
566 }
567 
568 /**
569  * DP_HTT_SEND_HTC_PKT() - Send htt packet from host
570  * @soc : HTT SOC handle
571  * @pkt: pkt to be send
572  * @cmd : command to be recorded in dp htt logger
573  * @buf : Pointer to buffer needs to be recored for above cmd
574  *
575  * Return: None
576  */
577 static inline QDF_STATUS DP_HTT_SEND_HTC_PKT(struct htt_soc *soc,
578 					     struct dp_htt_htc_pkt *pkt,
579 					     uint8_t cmd, uint8_t *buf)
580 {
581 	QDF_STATUS status;
582 
583 	htt_command_record(soc->htt_logger_handle, cmd, buf);
584 
585 	status = htc_send_pkt(soc->htc_soc, &pkt->htc_pkt);
586 	if (status == QDF_STATUS_SUCCESS)
587 		htt_htc_misc_pkt_list_add(soc, pkt);
588 
589 	return status;
590 }
591 
592 /*
593  * htt_htc_misc_pkt_pool_free() - free pkts in misc list
594  * @htt_soc:	HTT SOC handle
595  */
596 static void
597 htt_htc_misc_pkt_pool_free(struct htt_soc *soc)
598 {
599 	struct dp_htt_htc_pkt_union *pkt, *next;
600 	qdf_nbuf_t netbuf;
601 
602 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
603 	pkt = soc->htt_htc_pkt_misclist;
604 
605 	while (pkt) {
606 		next = pkt->u.next;
607 		netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
608 		qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
609 
610 		soc->stats.htc_pkt_free++;
611 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
612 			 "%s: Pkt free count %d",
613 			 __func__, soc->stats.htc_pkt_free);
614 
615 		qdf_nbuf_free(netbuf);
616 		qdf_mem_free(pkt);
617 		pkt = next;
618 	}
619 	soc->htt_htc_pkt_misclist = NULL;
620 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
621 }
622 
623 /*
624  * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ
625  * @tgt_mac_addr:	Target MAC
626  * @buffer:		Output buffer
627  */
628 static u_int8_t *
629 htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer)
630 {
631 #ifdef BIG_ENDIAN_HOST
632 	/*
633 	 * The host endianness is opposite of the target endianness.
634 	 * To make u_int32_t elements come out correctly, the target->host
635 	 * upload has swizzled the bytes in each u_int32_t element of the
636 	 * message.
637 	 * For byte-array message fields like the MAC address, this
638 	 * upload swizzling puts the bytes in the wrong order, and needs
639 	 * to be undone.
640 	 */
641 	buffer[0] = tgt_mac_addr[3];
642 	buffer[1] = tgt_mac_addr[2];
643 	buffer[2] = tgt_mac_addr[1];
644 	buffer[3] = tgt_mac_addr[0];
645 	buffer[4] = tgt_mac_addr[7];
646 	buffer[5] = tgt_mac_addr[6];
647 	return buffer;
648 #else
649 	/*
650 	 * The host endianness matches the target endianness -
651 	 * we can use the mac addr directly from the message buffer.
652 	 */
653 	return tgt_mac_addr;
654 #endif
655 }
656 
657 /*
658  * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
659  * @soc:	SOC handle
660  * @status:	Completion status
661  * @netbuf:	HTT buffer
662  */
663 static void
664 dp_htt_h2t_send_complete_free_netbuf(
665 	void *soc, A_STATUS status, qdf_nbuf_t netbuf)
666 {
667 	qdf_nbuf_free(netbuf);
668 }
669 
670 /*
671  * dp_htt_h2t_send_complete() - H2T completion handler
672  * @context:	Opaque context (HTT SOC handle)
673  * @htc_pkt:	HTC packet
674  */
675 static void
676 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
677 {
678 	void (*send_complete_part2)(
679 		void *soc, QDF_STATUS status, qdf_nbuf_t msdu);
680 	struct htt_soc *soc =  (struct htt_soc *) context;
681 	struct dp_htt_htc_pkt *htt_pkt;
682 	qdf_nbuf_t netbuf;
683 
684 	send_complete_part2 = htc_pkt->pPktContext;
685 
686 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
687 
688 	/* process (free or keep) the netbuf that held the message */
689 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
690 	/*
691 	 * adf sendcomplete is required for windows only
692 	 */
693 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
694 	if (send_complete_part2) {
695 		send_complete_part2(
696 			htt_pkt->soc_ctxt, htc_pkt->Status, netbuf);
697 	}
698 	/* free the htt_htc_pkt / HTC_PACKET object */
699 	htt_htc_pkt_free(soc, htt_pkt);
700 }
701 
702 /*
703  * htt_h2t_ver_req_msg() - Send HTT version request message to target
704  * @htt_soc:	HTT SOC handle
705  *
706  * Return: 0 on success; error code on failure
707  */
708 static int htt_h2t_ver_req_msg(struct htt_soc *soc)
709 {
710 	struct dp_htt_htc_pkt *pkt;
711 	qdf_nbuf_t msg;
712 	uint32_t *msg_word;
713 	QDF_STATUS status;
714 
715 	msg = qdf_nbuf_alloc(
716 		soc->osdev,
717 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
718 		/* reserve room for the HTC header */
719 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
720 	if (!msg)
721 		return QDF_STATUS_E_NOMEM;
722 
723 	/*
724 	 * Set the length of the message.
725 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
726 	 * separately during the below call to qdf_nbuf_push_head.
727 	 * The contribution from the HTC header is added separately inside HTC.
728 	 */
729 	if (qdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES) == NULL) {
730 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
731 			"%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
732 			__func__);
733 		return QDF_STATUS_E_FAILURE;
734 	}
735 
736 	/* fill in the message contents */
737 	msg_word = (u_int32_t *) qdf_nbuf_data(msg);
738 
739 	/* rewind beyond alignment pad to get to the HTC header reserved area */
740 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
741 
742 	*msg_word = 0;
743 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
744 
745 	pkt = htt_htc_pkt_alloc(soc);
746 	if (!pkt) {
747 		qdf_nbuf_free(msg);
748 		return QDF_STATUS_E_FAILURE;
749 	}
750 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
751 
752 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
753 		dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg),
754 		qdf_nbuf_len(msg), soc->htc_endpoint,
755 		1); /* tag - not relevant here */
756 
757 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
758 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_VERSION_REQ,
759 				     NULL);
760 
761 	if (status != QDF_STATUS_SUCCESS) {
762 		qdf_nbuf_free(msg);
763 		htt_htc_pkt_free(soc, pkt);
764 	}
765 
766 	return status;
767 }
768 
769 /*
770  * htt_srng_setup() - Send SRNG setup message to target
771  * @htt_soc:	HTT SOC handle
772  * @mac_id:	MAC Id
773  * @hal_srng:	Opaque HAL SRNG pointer
774  * @hal_ring_type:	SRNG ring type
775  *
776  * Return: 0 on success; error code on failure
777  */
778 int htt_srng_setup(struct htt_soc *soc, int mac_id,
779 		   hal_ring_handle_t hal_ring_hdl,
780 		   int hal_ring_type)
781 {
782 	struct dp_htt_htc_pkt *pkt;
783 	qdf_nbuf_t htt_msg;
784 	uint32_t *msg_word;
785 	struct hal_srng_params srng_params;
786 	qdf_dma_addr_t hp_addr, tp_addr;
787 	uint32_t ring_entry_size =
788 		hal_srng_get_entrysize(soc->hal_soc, hal_ring_type);
789 	int htt_ring_type, htt_ring_id;
790 	uint8_t *htt_logger_bufp;
791 	int target_pdev_id;
792 	int lmac_id = dp_get_lmac_id_for_pdev_id(soc->dp_soc, 0, mac_id);
793 	QDF_STATUS status;
794 
795 	/* Sizes should be set in 4-byte words */
796 	ring_entry_size = ring_entry_size >> 2;
797 
798 	htt_msg = qdf_nbuf_alloc(soc->osdev,
799 		HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ),
800 		/* reserve room for the HTC header */
801 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
802 	if (!htt_msg)
803 		goto fail0;
804 
805 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
806 	hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_ring_hdl);
807 	tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_ring_hdl);
808 
809 	switch (hal_ring_type) {
810 	case RXDMA_BUF:
811 #ifdef QCA_HOST2FW_RXBUF_RING
812 		if (srng_params.ring_id ==
813 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0)) {
814 			htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
815 			htt_ring_type = HTT_SW_TO_SW_RING;
816 #ifdef IPA_OFFLOAD
817 		} else if (srng_params.ring_id ==
818 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2)) {
819 			htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING;
820 			htt_ring_type = HTT_SW_TO_SW_RING;
821 #endif
822 #else
823 		if (srng_params.ring_id ==
824 			(HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
825 			(lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
826 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
827 			htt_ring_type = HTT_SW_TO_HW_RING;
828 #endif
829 		} else if (srng_params.ring_id ==
830 #ifdef IPA_OFFLOAD
831 			 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 +
832 #else
833 			 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF +
834 #endif
835 			(lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
836 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
837 			htt_ring_type = HTT_SW_TO_HW_RING;
838 		} else {
839 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
840 				   "%s: Ring %d currently not supported",
841 				   __func__, srng_params.ring_id);
842 			goto fail1;
843 		}
844 
845 		dp_info("ring_type %d ring_id %d htt_ring_id %d hp_addr 0x%llx tp_addr 0x%llx",
846 			hal_ring_type, srng_params.ring_id, htt_ring_id,
847 			(uint64_t)hp_addr,
848 			(uint64_t)tp_addr);
849 		break;
850 	case RXDMA_MONITOR_BUF:
851 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
852 		htt_ring_type = HTT_SW_TO_HW_RING;
853 		break;
854 	case RXDMA_MONITOR_STATUS:
855 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
856 		htt_ring_type = HTT_SW_TO_HW_RING;
857 		break;
858 	case RXDMA_MONITOR_DST:
859 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
860 		htt_ring_type = HTT_HW_TO_SW_RING;
861 		break;
862 	case RXDMA_MONITOR_DESC:
863 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
864 		htt_ring_type = HTT_SW_TO_HW_RING;
865 		break;
866 	case RXDMA_DST:
867 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
868 		htt_ring_type = HTT_HW_TO_SW_RING;
869 		break;
870 
871 	default:
872 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
873 			"%s: Ring currently not supported", __func__);
874 			goto fail1;
875 	}
876 
877 	/*
878 	 * Set the length of the message.
879 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
880 	 * separately during the below call to qdf_nbuf_push_head.
881 	 * The contribution from the HTC header is added separately inside HTC.
882 	 */
883 	if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) {
884 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
885 			"%s: Failed to expand head for SRING_SETUP msg",
886 			__func__);
887 		return QDF_STATUS_E_FAILURE;
888 	}
889 
890 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
891 
892 	/* rewind beyond alignment pad to get to the HTC header reserved area */
893 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
894 
895 	/* word 0 */
896 	*msg_word = 0;
897 	htt_logger_bufp = (uint8_t *)msg_word;
898 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP);
899 	target_pdev_id =
900 	dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, mac_id);
901 
902 	if ((htt_ring_type == HTT_SW_TO_HW_RING) ||
903 			(htt_ring_type == HTT_HW_TO_SW_RING))
904 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, target_pdev_id);
905 	else
906 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id);
907 
908 	dp_info("%s: mac_id %d", __func__, mac_id);
909 	HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type);
910 	/* TODO: Discuss with FW on changing this to unique ID and using
911 	 * htt_ring_type to send the type of ring
912 	 */
913 	HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id);
914 
915 	/* word 1 */
916 	msg_word++;
917 	*msg_word = 0;
918 	HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word,
919 		srng_params.ring_base_paddr & 0xffffffff);
920 
921 	/* word 2 */
922 	msg_word++;
923 	*msg_word = 0;
924 	HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word,
925 		(uint64_t)srng_params.ring_base_paddr >> 32);
926 
927 	/* word 3 */
928 	msg_word++;
929 	*msg_word = 0;
930 	HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size);
931 	HTT_SRING_SETUP_RING_SIZE_SET(*msg_word,
932 		(ring_entry_size * srng_params.num_entries));
933 	dp_info("%s: entry_size %d", __func__, ring_entry_size);
934 	dp_info("%s: num_entries %d", __func__, srng_params.num_entries);
935 	dp_info("%s: ring_size %d", __func__,
936 		(ring_entry_size * srng_params.num_entries));
937 	if (htt_ring_type == HTT_SW_TO_HW_RING)
938 		HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET(
939 						*msg_word, 1);
940 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word,
941 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
942 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word,
943 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
944 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word,
945 		!!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP));
946 
947 	/* word 4 */
948 	msg_word++;
949 	*msg_word = 0;
950 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
951 		hp_addr & 0xffffffff);
952 
953 	/* word 5 */
954 	msg_word++;
955 	*msg_word = 0;
956 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
957 		(uint64_t)hp_addr >> 32);
958 
959 	/* word 6 */
960 	msg_word++;
961 	*msg_word = 0;
962 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
963 		tp_addr & 0xffffffff);
964 
965 	/* word 7 */
966 	msg_word++;
967 	*msg_word = 0;
968 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
969 		(uint64_t)tp_addr >> 32);
970 
971 	/* word 8 */
972 	msg_word++;
973 	*msg_word = 0;
974 	HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word,
975 		srng_params.msi_addr & 0xffffffff);
976 
977 	/* word 9 */
978 	msg_word++;
979 	*msg_word = 0;
980 	HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word,
981 		(uint64_t)(srng_params.msi_addr) >> 32);
982 
983 	/* word 10 */
984 	msg_word++;
985 	*msg_word = 0;
986 	HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word,
987 		srng_params.msi_data);
988 
989 	/* word 11 */
990 	msg_word++;
991 	*msg_word = 0;
992 	HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word,
993 		srng_params.intr_batch_cntr_thres_entries *
994 		ring_entry_size);
995 	HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word,
996 		srng_params.intr_timer_thres_us >> 3);
997 
998 	/* word 12 */
999 	msg_word++;
1000 	*msg_word = 0;
1001 	if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
1002 		/* TODO: Setting low threshold to 1/8th of ring size - see
1003 		 * if this needs to be configurable
1004 		 */
1005 		HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word,
1006 			srng_params.low_threshold);
1007 	}
1008 	/* "response_required" field should be set if a HTT response message is
1009 	 * required after setting up the ring.
1010 	 */
1011 	pkt = htt_htc_pkt_alloc(soc);
1012 	if (!pkt)
1013 		goto fail1;
1014 
1015 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1016 
1017 	SET_HTC_PACKET_INFO_TX(
1018 		&pkt->htc_pkt,
1019 		dp_htt_h2t_send_complete_free_netbuf,
1020 		qdf_nbuf_data(htt_msg),
1021 		qdf_nbuf_len(htt_msg),
1022 		soc->htc_endpoint,
1023 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
1024 
1025 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1026 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_SRING_SETUP,
1027 				     htt_logger_bufp);
1028 
1029 	if (status != QDF_STATUS_SUCCESS) {
1030 		qdf_nbuf_free(htt_msg);
1031 		htt_htc_pkt_free(soc, pkt);
1032 	}
1033 
1034 	return status;
1035 
1036 fail1:
1037 	qdf_nbuf_free(htt_msg);
1038 fail0:
1039 	return QDF_STATUS_E_FAILURE;
1040 }
1041 
1042 #ifdef QCA_SUPPORT_FULL_MON
1043 /**
1044  * htt_h2t_full_mon_cfg() - Send full monitor configuarion msg to FW
1045  *
1046  * @htt_soc: HTT Soc handle
1047  * @pdev_id: Radio id
1048  * @dp_full_mon_config: enabled/disable configuration
1049  *
1050  * Return: Success when HTT message is sent, error on failure
1051  */
1052 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc,
1053 			 uint8_t pdev_id,
1054 			 enum dp_full_mon_config config)
1055 {
1056 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
1057 	struct dp_htt_htc_pkt *pkt;
1058 	qdf_nbuf_t htt_msg;
1059 	uint32_t *msg_word;
1060 	uint8_t *htt_logger_bufp;
1061 
1062 	htt_msg = qdf_nbuf_alloc(soc->osdev,
1063 				 HTT_MSG_BUF_SIZE(
1064 				 HTT_RX_FULL_MONITOR_MODE_SETUP_SZ),
1065 				 /* reserve room for the HTC header */
1066 				 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
1067 				 4,
1068 				 TRUE);
1069 	if (!htt_msg)
1070 		return QDF_STATUS_E_FAILURE;
1071 
1072 	/*
1073 	 * Set the length of the message.
1074 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
1075 	 * separately during the below call to qdf_nbuf_push_head.
1076 	 * The contribution from the HTC header is added separately inside HTC.
1077 	 */
1078 	if (!qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ)) {
1079 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1080 			  "%s: Failed to expand head for RX Ring Cfg msg",
1081 			  __func__);
1082 		goto fail1;
1083 	}
1084 
1085 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
1086 
1087 	/* rewind beyond alignment pad to get to the HTC header reserved area */
1088 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
1089 
1090 	/* word 0 */
1091 	*msg_word = 0;
1092 	htt_logger_bufp = (uint8_t *)msg_word;
1093 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE);
1094 	HTT_RX_FULL_MONITOR_MODE_OPERATION_PDEV_ID_SET(
1095 			*msg_word, DP_SW2HW_MACID(pdev_id));
1096 
1097 	msg_word++;
1098 	*msg_word = 0;
1099 	/* word 1 */
1100 	if (config == DP_FULL_MON_ENABLE) {
1101 		HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true);
1102 		HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, true);
1103 		HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, true);
1104 		HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2);
1105 	} else if (config == DP_FULL_MON_DISABLE) {
1106 		HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, false);
1107 		HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, false);
1108 		HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, false);
1109 		HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2);
1110 	}
1111 
1112 	pkt = htt_htc_pkt_alloc(soc);
1113 	if (!pkt) {
1114 		qdf_err("HTC packet allocation failed");
1115 		goto fail1;
1116 	}
1117 
1118 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1119 
1120 	SET_HTC_PACKET_INFO_TX(
1121 		&pkt->htc_pkt,
1122 		dp_htt_h2t_send_complete_free_netbuf,
1123 		qdf_nbuf_data(htt_msg),
1124 		qdf_nbuf_len(htt_msg),
1125 		soc->htc_endpoint,
1126 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
1127 
1128 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1129 	qdf_info("config: %d", config);
1130 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_SRING_SETUP,
1131 			    htt_logger_bufp);
1132 	return QDF_STATUS_SUCCESS;
1133 fail1:
1134 	qdf_nbuf_free(htt_msg);
1135 	return QDF_STATUS_E_FAILURE;
1136 }
1137 #else
1138 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc,
1139 			 uint8_t pdev_id,
1140 			 enum dp_full_mon_config config)
1141 {
1142 	return 0;
1143 }
1144 
1145 #endif
1146 
1147 /*
1148  * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter
1149  * config message to target
1150  * @htt_soc:	HTT SOC handle
1151  * @pdev_id:	WIN- PDEV Id, MCL- mac id
1152  * @hal_srng:	Opaque HAL SRNG pointer
1153  * @hal_ring_type:	SRNG ring type
1154  * @ring_buf_size:	SRNG buffer size
1155  * @htt_tlv_filter:	Rx SRNG TLV and filter setting
1156  * Return: 0 on success; error code on failure
1157  */
1158 int htt_h2t_rx_ring_cfg(struct htt_soc *htt_soc, int pdev_id,
1159 			hal_ring_handle_t hal_ring_hdl,
1160 			int hal_ring_type, int ring_buf_size,
1161 			struct htt_rx_ring_tlv_filter *htt_tlv_filter)
1162 {
1163 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
1164 	struct dp_htt_htc_pkt *pkt;
1165 	qdf_nbuf_t htt_msg;
1166 	uint32_t *msg_word;
1167 	struct hal_srng_params srng_params;
1168 	uint32_t htt_ring_type, htt_ring_id;
1169 	uint32_t tlv_filter;
1170 	uint8_t *htt_logger_bufp;
1171 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->dp_soc->wlan_cfg_ctx;
1172 	uint32_t mon_drop_th = wlan_cfg_get_mon_drop_thresh(wlan_cfg_ctx);
1173 	int target_pdev_id;
1174 	QDF_STATUS status;
1175 
1176 	htt_msg = qdf_nbuf_alloc(soc->osdev,
1177 		HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ),
1178 	/* reserve room for the HTC header */
1179 	HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
1180 	if (!htt_msg)
1181 		goto fail0;
1182 
1183 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
1184 
1185 	switch (hal_ring_type) {
1186 	case RXDMA_BUF:
1187 		htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
1188 		htt_ring_type = HTT_SW_TO_HW_RING;
1189 		break;
1190 	case RXDMA_MONITOR_BUF:
1191 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
1192 		htt_ring_type = HTT_SW_TO_HW_RING;
1193 		break;
1194 	case RXDMA_MONITOR_STATUS:
1195 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
1196 		htt_ring_type = HTT_SW_TO_HW_RING;
1197 		break;
1198 	case RXDMA_MONITOR_DST:
1199 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
1200 		htt_ring_type = HTT_HW_TO_SW_RING;
1201 		break;
1202 	case RXDMA_MONITOR_DESC:
1203 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
1204 		htt_ring_type = HTT_SW_TO_HW_RING;
1205 		break;
1206 	case RXDMA_DST:
1207 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
1208 		htt_ring_type = HTT_HW_TO_SW_RING;
1209 		break;
1210 
1211 	default:
1212 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1213 			"%s: Ring currently not supported", __func__);
1214 		goto fail1;
1215 	}
1216 
1217 	/*
1218 	 * Set the length of the message.
1219 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
1220 	 * separately during the below call to qdf_nbuf_push_head.
1221 	 * The contribution from the HTC header is added separately inside HTC.
1222 	 */
1223 	if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) {
1224 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1225 			"%s: Failed to expand head for RX Ring Cfg msg",
1226 			__func__);
1227 		goto fail1; /* failure */
1228 	}
1229 
1230 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
1231 
1232 	/* rewind beyond alignment pad to get to the HTC header reserved area */
1233 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
1234 
1235 	/* word 0 */
1236 	htt_logger_bufp = (uint8_t *)msg_word;
1237 	*msg_word = 0;
1238 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
1239 
1240 	/*
1241 	 * pdev_id is indexed from 0 whereas mac_id is indexed from 1
1242 	 * SW_TO_SW and SW_TO_HW rings are unaffected by this
1243 	 */
1244 	target_pdev_id =
1245 	dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, pdev_id);
1246 
1247 	if (htt_ring_type == HTT_SW_TO_SW_RING ||
1248 			htt_ring_type == HTT_SW_TO_HW_RING)
1249 		HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word,
1250 						      target_pdev_id);
1251 
1252 	/* TODO: Discuss with FW on changing this to unique ID and using
1253 	 * htt_ring_type to send the type of ring
1254 	 */
1255 	HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id);
1256 
1257 	HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word,
1258 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
1259 
1260 	HTT_RX_RING_SELECTION_CFG_PKT_TLV_SET(*msg_word,
1261 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
1262 
1263 	HTT_RX_RING_SELECTION_CFG_RX_OFFSETS_VALID_SET(*msg_word,
1264 						htt_tlv_filter->offset_valid);
1265 
1266 	if (mon_drop_th > 0)
1267 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1268 								   1);
1269 	else
1270 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1271 								   0);
1272 
1273 	/* word 1 */
1274 	msg_word++;
1275 	*msg_word = 0;
1276 	HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word,
1277 		ring_buf_size);
1278 
1279 	/* word 2 */
1280 	msg_word++;
1281 	*msg_word = 0;
1282 
1283 	if (htt_tlv_filter->enable_fp) {
1284 		/* TYPE: MGMT */
1285 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1286 			FP, MGMT, 0000,
1287 			(htt_tlv_filter->fp_mgmt_filter &
1288 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1289 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1290 			FP, MGMT, 0001,
1291 			(htt_tlv_filter->fp_mgmt_filter &
1292 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1293 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1294 			FP, MGMT, 0010,
1295 			(htt_tlv_filter->fp_mgmt_filter &
1296 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1297 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1298 			FP, MGMT, 0011,
1299 			(htt_tlv_filter->fp_mgmt_filter &
1300 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1301 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1302 			FP, MGMT, 0100,
1303 			(htt_tlv_filter->fp_mgmt_filter &
1304 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1305 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1306 			FP, MGMT, 0101,
1307 			(htt_tlv_filter->fp_mgmt_filter &
1308 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1309 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1310 			FP, MGMT, 0110,
1311 			(htt_tlv_filter->fp_mgmt_filter &
1312 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1313 		/* reserved */
1314 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
1315 			MGMT, 0111,
1316 			(htt_tlv_filter->fp_mgmt_filter &
1317 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1318 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1319 			FP, MGMT, 1000,
1320 			(htt_tlv_filter->fp_mgmt_filter &
1321 			FILTER_MGMT_BEACON) ? 1 : 0);
1322 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1323 			FP, MGMT, 1001,
1324 			(htt_tlv_filter->fp_mgmt_filter &
1325 			FILTER_MGMT_ATIM) ? 1 : 0);
1326 	}
1327 
1328 	if (htt_tlv_filter->enable_md) {
1329 			/* TYPE: MGMT */
1330 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1331 			MD, MGMT, 0000,
1332 			(htt_tlv_filter->md_mgmt_filter &
1333 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1334 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1335 			MD, MGMT, 0001,
1336 			(htt_tlv_filter->md_mgmt_filter &
1337 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1338 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1339 			MD, MGMT, 0010,
1340 			(htt_tlv_filter->md_mgmt_filter &
1341 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1342 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1343 			MD, MGMT, 0011,
1344 			(htt_tlv_filter->md_mgmt_filter &
1345 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1346 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1347 			MD, MGMT, 0100,
1348 			(htt_tlv_filter->md_mgmt_filter &
1349 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1350 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1351 			MD, MGMT, 0101,
1352 			(htt_tlv_filter->md_mgmt_filter &
1353 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1354 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1355 			MD, MGMT, 0110,
1356 			(htt_tlv_filter->md_mgmt_filter &
1357 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1358 		/* reserved */
1359 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
1360 			MGMT, 0111,
1361 			(htt_tlv_filter->md_mgmt_filter &
1362 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1363 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1364 			MD, MGMT, 1000,
1365 			(htt_tlv_filter->md_mgmt_filter &
1366 			FILTER_MGMT_BEACON) ? 1 : 0);
1367 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1368 			MD, MGMT, 1001,
1369 			(htt_tlv_filter->md_mgmt_filter &
1370 			FILTER_MGMT_ATIM) ? 1 : 0);
1371 	}
1372 
1373 	if (htt_tlv_filter->enable_mo) {
1374 		/* TYPE: MGMT */
1375 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1376 			MO, MGMT, 0000,
1377 			(htt_tlv_filter->mo_mgmt_filter &
1378 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1379 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1380 			MO, MGMT, 0001,
1381 			(htt_tlv_filter->mo_mgmt_filter &
1382 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1383 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1384 			MO, MGMT, 0010,
1385 			(htt_tlv_filter->mo_mgmt_filter &
1386 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1387 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1388 			MO, MGMT, 0011,
1389 			(htt_tlv_filter->mo_mgmt_filter &
1390 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1391 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1392 			MO, MGMT, 0100,
1393 			(htt_tlv_filter->mo_mgmt_filter &
1394 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1395 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1396 			MO, MGMT, 0101,
1397 			(htt_tlv_filter->mo_mgmt_filter &
1398 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1399 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1400 			MO, MGMT, 0110,
1401 			(htt_tlv_filter->mo_mgmt_filter &
1402 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1403 		/* reserved */
1404 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
1405 			MGMT, 0111,
1406 			(htt_tlv_filter->mo_mgmt_filter &
1407 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1408 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1409 			MO, MGMT, 1000,
1410 			(htt_tlv_filter->mo_mgmt_filter &
1411 			FILTER_MGMT_BEACON) ? 1 : 0);
1412 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1413 			MO, MGMT, 1001,
1414 			(htt_tlv_filter->mo_mgmt_filter &
1415 			FILTER_MGMT_ATIM) ? 1 : 0);
1416 	}
1417 
1418 	/* word 3 */
1419 	msg_word++;
1420 	*msg_word = 0;
1421 
1422 	if (htt_tlv_filter->enable_fp) {
1423 		/* TYPE: MGMT */
1424 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1425 			FP, MGMT, 1010,
1426 			(htt_tlv_filter->fp_mgmt_filter &
1427 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1428 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1429 			FP, MGMT, 1011,
1430 			(htt_tlv_filter->fp_mgmt_filter &
1431 			FILTER_MGMT_AUTH) ? 1 : 0);
1432 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1433 			FP, MGMT, 1100,
1434 			(htt_tlv_filter->fp_mgmt_filter &
1435 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1436 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1437 			FP, MGMT, 1101,
1438 			(htt_tlv_filter->fp_mgmt_filter &
1439 			FILTER_MGMT_ACTION) ? 1 : 0);
1440 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1441 			FP, MGMT, 1110,
1442 			(htt_tlv_filter->fp_mgmt_filter &
1443 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1444 		/* reserved*/
1445 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
1446 			MGMT, 1111,
1447 			(htt_tlv_filter->fp_mgmt_filter &
1448 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1449 	}
1450 
1451 	if (htt_tlv_filter->enable_md) {
1452 			/* TYPE: MGMT */
1453 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1454 			MD, MGMT, 1010,
1455 			(htt_tlv_filter->md_mgmt_filter &
1456 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1457 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1458 			MD, MGMT, 1011,
1459 			(htt_tlv_filter->md_mgmt_filter &
1460 			FILTER_MGMT_AUTH) ? 1 : 0);
1461 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1462 			MD, MGMT, 1100,
1463 			(htt_tlv_filter->md_mgmt_filter &
1464 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1465 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1466 			MD, MGMT, 1101,
1467 			(htt_tlv_filter->md_mgmt_filter &
1468 			FILTER_MGMT_ACTION) ? 1 : 0);
1469 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1470 			MD, MGMT, 1110,
1471 			(htt_tlv_filter->md_mgmt_filter &
1472 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1473 	}
1474 
1475 	if (htt_tlv_filter->enable_mo) {
1476 		/* TYPE: MGMT */
1477 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1478 			MO, MGMT, 1010,
1479 			(htt_tlv_filter->mo_mgmt_filter &
1480 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1481 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1482 			MO, MGMT, 1011,
1483 			(htt_tlv_filter->mo_mgmt_filter &
1484 			FILTER_MGMT_AUTH) ? 1 : 0);
1485 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1486 			MO, MGMT, 1100,
1487 			(htt_tlv_filter->mo_mgmt_filter &
1488 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1489 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1490 			MO, MGMT, 1101,
1491 			(htt_tlv_filter->mo_mgmt_filter &
1492 			FILTER_MGMT_ACTION) ? 1 : 0);
1493 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1494 			MO, MGMT, 1110,
1495 			(htt_tlv_filter->mo_mgmt_filter &
1496 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1497 		/* reserved*/
1498 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
1499 			MGMT, 1111,
1500 			(htt_tlv_filter->mo_mgmt_filter &
1501 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1502 	}
1503 
1504 	/* word 4 */
1505 	msg_word++;
1506 	*msg_word = 0;
1507 
1508 	if (htt_tlv_filter->enable_fp) {
1509 		/* TYPE: CTRL */
1510 		/* reserved */
1511 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1512 			CTRL, 0000,
1513 			(htt_tlv_filter->fp_ctrl_filter &
1514 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1515 		/* reserved */
1516 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1517 			CTRL, 0001,
1518 			(htt_tlv_filter->fp_ctrl_filter &
1519 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1520 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1521 			CTRL, 0010,
1522 			(htt_tlv_filter->fp_ctrl_filter &
1523 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1524 		/* reserved */
1525 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1526 			CTRL, 0011,
1527 			(htt_tlv_filter->fp_ctrl_filter &
1528 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1529 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1530 			CTRL, 0100,
1531 			(htt_tlv_filter->fp_ctrl_filter &
1532 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1533 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1534 			CTRL, 0101,
1535 			(htt_tlv_filter->fp_ctrl_filter &
1536 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1537 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1538 			CTRL, 0110,
1539 			(htt_tlv_filter->fp_ctrl_filter &
1540 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1541 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1542 			CTRL, 0111,
1543 			(htt_tlv_filter->fp_ctrl_filter &
1544 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1545 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1546 			CTRL, 1000,
1547 			(htt_tlv_filter->fp_ctrl_filter &
1548 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1549 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1550 			CTRL, 1001,
1551 			(htt_tlv_filter->fp_ctrl_filter &
1552 			FILTER_CTRL_BA) ? 1 : 0);
1553 	}
1554 
1555 	if (htt_tlv_filter->enable_md) {
1556 		/* TYPE: CTRL */
1557 		/* reserved */
1558 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1559 			CTRL, 0000,
1560 			(htt_tlv_filter->md_ctrl_filter &
1561 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1562 		/* reserved */
1563 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1564 			CTRL, 0001,
1565 			(htt_tlv_filter->md_ctrl_filter &
1566 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1567 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1568 			CTRL, 0010,
1569 			(htt_tlv_filter->md_ctrl_filter &
1570 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1571 		/* reserved */
1572 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1573 			CTRL, 0011,
1574 			(htt_tlv_filter->md_ctrl_filter &
1575 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1576 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1577 			CTRL, 0100,
1578 			(htt_tlv_filter->md_ctrl_filter &
1579 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1580 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1581 			CTRL, 0101,
1582 			(htt_tlv_filter->md_ctrl_filter &
1583 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1584 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1585 			CTRL, 0110,
1586 			(htt_tlv_filter->md_ctrl_filter &
1587 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1588 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1589 			CTRL, 0111,
1590 			(htt_tlv_filter->md_ctrl_filter &
1591 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1592 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1593 			CTRL, 1000,
1594 			(htt_tlv_filter->md_ctrl_filter &
1595 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1596 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1597 			CTRL, 1001,
1598 			(htt_tlv_filter->md_ctrl_filter &
1599 			FILTER_CTRL_BA) ? 1 : 0);
1600 	}
1601 
1602 	if (htt_tlv_filter->enable_mo) {
1603 		/* TYPE: CTRL */
1604 		/* reserved */
1605 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1606 			CTRL, 0000,
1607 			(htt_tlv_filter->mo_ctrl_filter &
1608 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1609 		/* reserved */
1610 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1611 			CTRL, 0001,
1612 			(htt_tlv_filter->mo_ctrl_filter &
1613 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1614 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1615 			CTRL, 0010,
1616 			(htt_tlv_filter->mo_ctrl_filter &
1617 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1618 		/* reserved */
1619 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1620 			CTRL, 0011,
1621 			(htt_tlv_filter->mo_ctrl_filter &
1622 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1623 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1624 			CTRL, 0100,
1625 			(htt_tlv_filter->mo_ctrl_filter &
1626 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1627 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1628 			CTRL, 0101,
1629 			(htt_tlv_filter->mo_ctrl_filter &
1630 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1631 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1632 			CTRL, 0110,
1633 			(htt_tlv_filter->mo_ctrl_filter &
1634 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1635 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1636 			CTRL, 0111,
1637 			(htt_tlv_filter->mo_ctrl_filter &
1638 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1639 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1640 			CTRL, 1000,
1641 			(htt_tlv_filter->mo_ctrl_filter &
1642 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1643 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1644 			CTRL, 1001,
1645 			(htt_tlv_filter->mo_ctrl_filter &
1646 			FILTER_CTRL_BA) ? 1 : 0);
1647 	}
1648 
1649 	/* word 5 */
1650 	msg_word++;
1651 	*msg_word = 0;
1652 	if (htt_tlv_filter->enable_fp) {
1653 		/* TYPE: CTRL */
1654 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1655 			CTRL, 1010,
1656 			(htt_tlv_filter->fp_ctrl_filter &
1657 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1658 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1659 			CTRL, 1011,
1660 			(htt_tlv_filter->fp_ctrl_filter &
1661 			FILTER_CTRL_RTS) ? 1 : 0);
1662 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1663 			CTRL, 1100,
1664 			(htt_tlv_filter->fp_ctrl_filter &
1665 			FILTER_CTRL_CTS) ? 1 : 0);
1666 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1667 			CTRL, 1101,
1668 			(htt_tlv_filter->fp_ctrl_filter &
1669 			FILTER_CTRL_ACK) ? 1 : 0);
1670 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1671 			CTRL, 1110,
1672 			(htt_tlv_filter->fp_ctrl_filter &
1673 			FILTER_CTRL_CFEND) ? 1 : 0);
1674 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1675 			CTRL, 1111,
1676 			(htt_tlv_filter->fp_ctrl_filter &
1677 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1678 		/* TYPE: DATA */
1679 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1680 			DATA, MCAST,
1681 			(htt_tlv_filter->fp_data_filter &
1682 			FILTER_DATA_MCAST) ? 1 : 0);
1683 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1684 			DATA, UCAST,
1685 			(htt_tlv_filter->fp_data_filter &
1686 			FILTER_DATA_UCAST) ? 1 : 0);
1687 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1688 			DATA, NULL,
1689 			(htt_tlv_filter->fp_data_filter &
1690 			FILTER_DATA_NULL) ? 1 : 0);
1691 	}
1692 
1693 	if (htt_tlv_filter->enable_md) {
1694 		/* TYPE: CTRL */
1695 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1696 			CTRL, 1010,
1697 			(htt_tlv_filter->md_ctrl_filter &
1698 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1699 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1700 			CTRL, 1011,
1701 			(htt_tlv_filter->md_ctrl_filter &
1702 			FILTER_CTRL_RTS) ? 1 : 0);
1703 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1704 			CTRL, 1100,
1705 			(htt_tlv_filter->md_ctrl_filter &
1706 			FILTER_CTRL_CTS) ? 1 : 0);
1707 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1708 			CTRL, 1101,
1709 			(htt_tlv_filter->md_ctrl_filter &
1710 			FILTER_CTRL_ACK) ? 1 : 0);
1711 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1712 			CTRL, 1110,
1713 			(htt_tlv_filter->md_ctrl_filter &
1714 			FILTER_CTRL_CFEND) ? 1 : 0);
1715 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1716 			CTRL, 1111,
1717 			(htt_tlv_filter->md_ctrl_filter &
1718 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1719 		/* TYPE: DATA */
1720 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1721 			DATA, MCAST,
1722 			(htt_tlv_filter->md_data_filter &
1723 			FILTER_DATA_MCAST) ? 1 : 0);
1724 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1725 			DATA, UCAST,
1726 			(htt_tlv_filter->md_data_filter &
1727 			FILTER_DATA_UCAST) ? 1 : 0);
1728 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1729 			DATA, NULL,
1730 			(htt_tlv_filter->md_data_filter &
1731 			FILTER_DATA_NULL) ? 1 : 0);
1732 	}
1733 
1734 	if (htt_tlv_filter->enable_mo) {
1735 		/* TYPE: CTRL */
1736 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1737 			CTRL, 1010,
1738 			(htt_tlv_filter->mo_ctrl_filter &
1739 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1740 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1741 			CTRL, 1011,
1742 			(htt_tlv_filter->mo_ctrl_filter &
1743 			FILTER_CTRL_RTS) ? 1 : 0);
1744 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1745 			CTRL, 1100,
1746 			(htt_tlv_filter->mo_ctrl_filter &
1747 			FILTER_CTRL_CTS) ? 1 : 0);
1748 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1749 			CTRL, 1101,
1750 			(htt_tlv_filter->mo_ctrl_filter &
1751 			FILTER_CTRL_ACK) ? 1 : 0);
1752 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1753 			CTRL, 1110,
1754 			(htt_tlv_filter->mo_ctrl_filter &
1755 			FILTER_CTRL_CFEND) ? 1 : 0);
1756 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1757 			CTRL, 1111,
1758 			(htt_tlv_filter->mo_ctrl_filter &
1759 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1760 		/* TYPE: DATA */
1761 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1762 			DATA, MCAST,
1763 			(htt_tlv_filter->mo_data_filter &
1764 			FILTER_DATA_MCAST) ? 1 : 0);
1765 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1766 			DATA, UCAST,
1767 			(htt_tlv_filter->mo_data_filter &
1768 			FILTER_DATA_UCAST) ? 1 : 0);
1769 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1770 			DATA, NULL,
1771 			(htt_tlv_filter->mo_data_filter &
1772 			FILTER_DATA_NULL) ? 1 : 0);
1773 	}
1774 
1775 	/* word 6 */
1776 	msg_word++;
1777 	*msg_word = 0;
1778 	tlv_filter = 0;
1779 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START,
1780 		htt_tlv_filter->mpdu_start);
1781 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START,
1782 		htt_tlv_filter->msdu_start);
1783 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET,
1784 		htt_tlv_filter->packet);
1785 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END,
1786 		htt_tlv_filter->msdu_end);
1787 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END,
1788 		htt_tlv_filter->mpdu_end);
1789 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER,
1790 		htt_tlv_filter->packet_header);
1791 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION,
1792 		htt_tlv_filter->attention);
1793 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START,
1794 		htt_tlv_filter->ppdu_start);
1795 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END,
1796 		htt_tlv_filter->ppdu_end);
1797 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS,
1798 		htt_tlv_filter->ppdu_end_user_stats);
1799 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter,
1800 		PPDU_END_USER_STATS_EXT,
1801 		htt_tlv_filter->ppdu_end_user_stats_ext);
1802 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE,
1803 		htt_tlv_filter->ppdu_end_status_done);
1804 	/* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/
1805 	 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED,
1806 		 htt_tlv_filter->header_per_msdu);
1807 
1808 	HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter);
1809 
1810 	msg_word++;
1811 	*msg_word = 0;
1812 	if (htt_tlv_filter->offset_valid) {
1813 		HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET_SET(*msg_word,
1814 					htt_tlv_filter->rx_packet_offset);
1815 		HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET_SET(*msg_word,
1816 					htt_tlv_filter->rx_header_offset);
1817 
1818 		msg_word++;
1819 		*msg_word = 0;
1820 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET_SET(*msg_word,
1821 					htt_tlv_filter->rx_mpdu_end_offset);
1822 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET_SET(*msg_word,
1823 					htt_tlv_filter->rx_mpdu_start_offset);
1824 
1825 		msg_word++;
1826 		*msg_word = 0;
1827 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET_SET(*msg_word,
1828 					htt_tlv_filter->rx_msdu_end_offset);
1829 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET_SET(*msg_word,
1830 					htt_tlv_filter->rx_msdu_start_offset);
1831 
1832 		msg_word++;
1833 		*msg_word = 0;
1834 		HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET_SET(*msg_word,
1835 					htt_tlv_filter->rx_attn_offset);
1836 		msg_word++;
1837 		*msg_word = 0;
1838 	} else {
1839 		msg_word += 4;
1840 		*msg_word = 0;
1841 	}
1842 
1843 	if (mon_drop_th > 0)
1844 		HTT_RX_RING_SELECTION_CFG_RX_DROP_THRESHOLD_SET(*msg_word,
1845 								mon_drop_th);
1846 
1847 	/* "response_required" field should be set if a HTT response message is
1848 	 * required after setting up the ring.
1849 	 */
1850 	pkt = htt_htc_pkt_alloc(soc);
1851 	if (!pkt)
1852 		goto fail1;
1853 
1854 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1855 
1856 	SET_HTC_PACKET_INFO_TX(
1857 		&pkt->htc_pkt,
1858 		dp_htt_h2t_send_complete_free_netbuf,
1859 		qdf_nbuf_data(htt_msg),
1860 		qdf_nbuf_len(htt_msg),
1861 		soc->htc_endpoint,
1862 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
1863 
1864 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1865 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
1866 				     HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG,
1867 				     htt_logger_bufp);
1868 
1869 	if (status != QDF_STATUS_SUCCESS) {
1870 		qdf_nbuf_free(htt_msg);
1871 		htt_htc_pkt_free(soc, pkt);
1872 	}
1873 
1874 	return status;
1875 
1876 fail1:
1877 	qdf_nbuf_free(htt_msg);
1878 fail0:
1879 	return QDF_STATUS_E_FAILURE;
1880 }
1881 
1882 #if defined(HTT_STATS_ENABLE)
1883 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1884 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1885 
1886 {
1887 	uint32_t pdev_id;
1888 	uint32_t *msg_word = NULL;
1889 	uint32_t msg_remain_len = 0;
1890 
1891 	msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1892 
1893 	/*COOKIE MSB*/
1894 	pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1895 
1896 	/* stats message length + 16 size of HTT header*/
1897 	msg_remain_len = qdf_min(htt_stats->msg_len + 16,
1898 				(uint32_t)DP_EXT_MSG_LENGTH);
1899 
1900 	dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc,
1901 			msg_word,  msg_remain_len,
1902 			WDI_NO_VAL, pdev_id);
1903 
1904 	if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1905 		htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1906 	}
1907 	/* Need to be freed here as WDI handler will
1908 	 * make a copy of pkt to send data to application
1909 	 */
1910 	qdf_nbuf_free(htt_msg);
1911 	return QDF_STATUS_SUCCESS;
1912 }
1913 #else
1914 static inline QDF_STATUS
1915 dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1916 		      struct dp_soc *soc, qdf_nbuf_t htt_msg)
1917 {
1918 	return QDF_STATUS_E_NOSUPPORT;
1919 }
1920 #endif
1921 /**
1922  * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats
1923  * @htt_stats: htt stats info
1924  *
1925  * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message
1926  * contains sub messages which are identified by a TLV header.
1927  * In this function we will process the stream of T2H messages and read all the
1928  * TLV contained in the message.
1929  *
1930  * THe following cases have been taken care of
1931  * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer
1932  *		In this case the buffer will contain multiple tlvs.
1933  * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer.
1934  *		Only one tlv will be contained in the HTT message and this tag
1935  *		will extend onto the next buffer.
1936  * Case 3: When the buffer is the continuation of the previous message
1937  * Case 4: tlv length is 0. which will indicate the end of message
1938  *
1939  * return: void
1940  */
1941 static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats,
1942 					struct dp_soc *soc)
1943 {
1944 	htt_tlv_tag_t tlv_type = 0xff;
1945 	qdf_nbuf_t htt_msg = NULL;
1946 	uint32_t *msg_word;
1947 	uint8_t *tlv_buf_head = NULL;
1948 	uint8_t *tlv_buf_tail = NULL;
1949 	uint32_t msg_remain_len = 0;
1950 	uint32_t tlv_remain_len = 0;
1951 	uint32_t *tlv_start;
1952 	int cookie_val;
1953 	int cookie_msb;
1954 	int pdev_id;
1955 	bool copy_stats = false;
1956 	struct dp_pdev *pdev;
1957 
1958 	/* Process node in the HTT message queue */
1959 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1960 		!= NULL) {
1961 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1962 		cookie_val = *(msg_word + 1);
1963 		htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET(
1964 					*(msg_word +
1965 					HTT_T2H_EXT_STATS_TLV_START_OFFSET));
1966 
1967 		if (cookie_val) {
1968 			if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg)
1969 					== QDF_STATUS_SUCCESS) {
1970 				continue;
1971 			}
1972 		}
1973 
1974 		cookie_msb = *(msg_word + 2);
1975 		pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1976 		pdev = soc->pdev_list[pdev_id];
1977 
1978 		if (cookie_msb >> 2) {
1979 			copy_stats = true;
1980 		}
1981 
1982 		/* read 5th word */
1983 		msg_word = msg_word + 4;
1984 		msg_remain_len = qdf_min(htt_stats->msg_len,
1985 				(uint32_t) DP_EXT_MSG_LENGTH);
1986 		/* Keep processing the node till node length is 0 */
1987 		while (msg_remain_len) {
1988 			/*
1989 			 * if message is not a continuation of previous message
1990 			 * read the tlv type and tlv length
1991 			 */
1992 			if (!tlv_buf_head) {
1993 				tlv_type = HTT_STATS_TLV_TAG_GET(
1994 						*msg_word);
1995 				tlv_remain_len = HTT_STATS_TLV_LENGTH_GET(
1996 						*msg_word);
1997 			}
1998 
1999 			if (tlv_remain_len == 0) {
2000 				msg_remain_len = 0;
2001 
2002 				if (tlv_buf_head) {
2003 					qdf_mem_free(tlv_buf_head);
2004 					tlv_buf_head = NULL;
2005 					tlv_buf_tail = NULL;
2006 				}
2007 
2008 				goto error;
2009 			}
2010 
2011 			if (!tlv_buf_head)
2012 				tlv_remain_len += HTT_TLV_HDR_LEN;
2013 
2014 			if ((tlv_remain_len <= msg_remain_len)) {
2015 				/* Case 3 */
2016 				if (tlv_buf_head) {
2017 					qdf_mem_copy(tlv_buf_tail,
2018 							(uint8_t *)msg_word,
2019 							tlv_remain_len);
2020 					tlv_start = (uint32_t *)tlv_buf_head;
2021 				} else {
2022 					/* Case 1 */
2023 					tlv_start = msg_word;
2024 				}
2025 
2026 				if (copy_stats)
2027 					dp_htt_stats_copy_tag(pdev,
2028 							      tlv_type,
2029 							      tlv_start);
2030 				else
2031 					dp_htt_stats_print_tag(pdev,
2032 							       tlv_type,
2033 							       tlv_start);
2034 
2035 				if (tlv_type == HTT_STATS_PEER_DETAILS_TAG ||
2036 				    tlv_type == HTT_STATS_PEER_STATS_CMN_TAG)
2037 					dp_peer_update_inactive_time(pdev,
2038 								     tlv_type,
2039 								     tlv_start);
2040 
2041 				msg_remain_len -= tlv_remain_len;
2042 
2043 				msg_word = (uint32_t *)
2044 					(((uint8_t *)msg_word) +
2045 					tlv_remain_len);
2046 
2047 				tlv_remain_len = 0;
2048 
2049 				if (tlv_buf_head) {
2050 					qdf_mem_free(tlv_buf_head);
2051 					tlv_buf_head = NULL;
2052 					tlv_buf_tail = NULL;
2053 				}
2054 
2055 			} else { /* tlv_remain_len > msg_remain_len */
2056 				/* Case 2 & 3 */
2057 				if (!tlv_buf_head) {
2058 					tlv_buf_head = qdf_mem_malloc(
2059 							tlv_remain_len);
2060 
2061 					if (!tlv_buf_head) {
2062 						QDF_TRACE(QDF_MODULE_ID_TXRX,
2063 								QDF_TRACE_LEVEL_ERROR,
2064 								"Alloc failed");
2065 						goto error;
2066 					}
2067 
2068 					tlv_buf_tail = tlv_buf_head;
2069 				}
2070 
2071 				qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word,
2072 						msg_remain_len);
2073 				tlv_remain_len -= msg_remain_len;
2074 				tlv_buf_tail += msg_remain_len;
2075 			}
2076 		}
2077 
2078 		if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
2079 			htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
2080 		}
2081 
2082 		qdf_nbuf_free(htt_msg);
2083 	}
2084 	return;
2085 
2086 error:
2087 	qdf_nbuf_free(htt_msg);
2088 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
2089 			!= NULL)
2090 		qdf_nbuf_free(htt_msg);
2091 }
2092 
2093 void htt_t2h_stats_handler(void *context)
2094 {
2095 	struct dp_soc *soc = (struct dp_soc *)context;
2096 	struct htt_stats_context htt_stats;
2097 	uint32_t *msg_word;
2098 	qdf_nbuf_t htt_msg = NULL;
2099 	uint8_t done;
2100 	uint32_t rem_stats;
2101 
2102 	if (!soc || !qdf_atomic_read(&soc->cmn_init_done)) {
2103 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2104 			"soc: 0x%pK, init_done: %d", soc,
2105 			qdf_atomic_read(&soc->cmn_init_done));
2106 		return;
2107 	}
2108 
2109 	qdf_mem_zero(&htt_stats, sizeof(htt_stats));
2110 	qdf_nbuf_queue_init(&htt_stats.msg);
2111 
2112 	/* pull one completed stats from soc->htt_stats_msg and process */
2113 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2114 	if (!soc->htt_stats.num_stats) {
2115 		qdf_spin_unlock_bh(&soc->htt_stats.lock);
2116 		return;
2117 	}
2118 	while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) {
2119 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
2120 		msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET;
2121 		done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2122 		qdf_nbuf_queue_add(&htt_stats.msg, htt_msg);
2123 		/*
2124 		 * Done bit signifies that this is the last T2H buffer in the
2125 		 * stream of HTT EXT STATS message
2126 		 */
2127 		if (done)
2128 			break;
2129 	}
2130 	rem_stats = --soc->htt_stats.num_stats;
2131 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2132 
2133 	/* If there are more stats to process, schedule stats work again.
2134 	 * Scheduling prior to processing ht_stats to queue with early
2135 	 * index
2136 	 */
2137 	if (rem_stats)
2138 		qdf_sched_work(0, &soc->htt_stats.work);
2139 
2140 	dp_process_htt_stat_msg(&htt_stats, soc);
2141 }
2142 
2143 /*
2144  * dp_get_ppdu_info_user_index: Find and allocate a per-user descriptor for a PPDU,
2145  * if a new peer id arrives in a PPDU
2146  * pdev: DP pdev handle
2147  * @peer_id : peer unique identifier
2148  * @ppdu_info: per ppdu tlv structure
2149  *
2150  * return:user index to be populated
2151  */
2152 #ifdef FEATURE_PERPKT_INFO
2153 static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev,
2154 						uint16_t peer_id,
2155 						struct ppdu_info *ppdu_info)
2156 {
2157 	uint8_t user_index = 0;
2158 	struct cdp_tx_completion_ppdu *ppdu_desc;
2159 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2160 
2161 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2162 
2163 	while ((user_index + 1) <= ppdu_info->last_user) {
2164 		ppdu_user_desc = &ppdu_desc->user[user_index];
2165 		if (ppdu_user_desc->peer_id != peer_id) {
2166 			user_index++;
2167 			continue;
2168 		} else {
2169 			/* Max users possible is 8 so user array index should
2170 			 * not exceed 7
2171 			 */
2172 			qdf_assert_always(user_index <= CDP_MU_MAX_USER_INDEX);
2173 			return user_index;
2174 		}
2175 	}
2176 
2177 	ppdu_info->last_user++;
2178 	/* Max users possible is 8 so last user should not exceed 8 */
2179 	qdf_assert_always(ppdu_info->last_user <= CDP_MU_MAX_USERS);
2180 	return ppdu_info->last_user - 1;
2181 }
2182 
2183 /*
2184  * dp_process_ppdu_stats_common_tlv: Process htt_ppdu_stats_common_tlv
2185  * pdev: DP pdev handle
2186  * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv
2187  * @ppdu_info: per ppdu tlv structure
2188  *
2189  * return:void
2190  */
2191 static void dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev,
2192 		uint32_t *tag_buf, struct ppdu_info *ppdu_info)
2193 {
2194 	uint16_t frame_type;
2195 	uint16_t frame_ctrl;
2196 	uint16_t freq;
2197 	struct dp_soc *soc = NULL;
2198 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
2199 	uint64_t ppdu_start_timestamp;
2200 	uint32_t *start_tag_buf;
2201 
2202 	start_tag_buf = tag_buf;
2203 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2204 
2205 	ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
2206 
2207 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RING_ID_SCH_CMD_ID);
2208 	ppdu_info->sched_cmdid =
2209 		HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_GET(*tag_buf);
2210 	ppdu_desc->num_users =
2211 		HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf);
2212 
2213 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(QTYPE_FRM_TYPE);
2214 	frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf);
2215 	ppdu_desc->htt_frame_type = frame_type;
2216 
2217 	frame_ctrl = ppdu_desc->frame_ctrl;
2218 
2219 	ppdu_desc->bar_ppdu_id = ppdu_info->ppdu_id;
2220 
2221 	switch (frame_type) {
2222 	case HTT_STATS_FTYPE_TIDQ_DATA_SU:
2223 	case HTT_STATS_FTYPE_TIDQ_DATA_MU:
2224 	case HTT_STATS_FTYPE_SGEN_QOS_NULL:
2225 		/*
2226 		 * for management packet, frame type come as DATA_SU
2227 		 * need to check frame_ctrl before setting frame_type
2228 		 */
2229 		if (HTT_GET_FRAME_CTRL_TYPE(frame_ctrl) <= FRAME_CTRL_TYPE_CTRL)
2230 			ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
2231 		else
2232 			ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA;
2233 	break;
2234 	case HTT_STATS_FTYPE_SGEN_MU_BAR:
2235 	case HTT_STATS_FTYPE_SGEN_BAR:
2236 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_BAR;
2237 	break;
2238 	default:
2239 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
2240 	break;
2241 	}
2242 
2243 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(FES_DUR_US);
2244 	ppdu_desc->tx_duration = *tag_buf;
2245 
2246 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_L32_US);
2247 	ppdu_desc->ppdu_start_timestamp = *tag_buf;
2248 
2249 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(CHAN_MHZ_PHY_MODE);
2250 	freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf);
2251 	if (freq != ppdu_desc->channel) {
2252 		soc = pdev->soc;
2253 		ppdu_desc->channel = freq;
2254 		pdev->operating_channel.freq = freq;
2255 		if (soc && soc->cdp_soc.ol_ops->freq_to_channel)
2256 			pdev->operating_channel.num =
2257 			    soc->cdp_soc.ol_ops->freq_to_channel(soc->ctrl_psoc,
2258 								 pdev->pdev_id,
2259 								 freq);
2260 
2261 		if (soc && soc->cdp_soc.ol_ops->freq_to_band)
2262 			pdev->operating_channel.band =
2263 			       soc->cdp_soc.ol_ops->freq_to_band(soc->ctrl_psoc,
2264 								 pdev->pdev_id,
2265 								 freq);
2266 	}
2267 
2268 	ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf);
2269 
2270 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RESV_NUM_UL_BEAM);
2271 	ppdu_desc->beam_change =
2272 		HTT_PPDU_STATS_COMMON_TLV_BEAM_CHANGE_GET(*tag_buf);
2273 	ppdu_desc->doppler =
2274 		HTT_PPDU_STATS_COMMON_TLV_DOPPLER_INDICATION_GET(*tag_buf);
2275 	ppdu_desc->spatial_reuse =
2276 		HTT_PPDU_STATS_COMMON_TLV_SPATIAL_REUSE_GET(*tag_buf);
2277 
2278 	dp_tx_capture_htt_frame_counter(pdev, frame_type);
2279 
2280 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_U32_US);
2281 	ppdu_start_timestamp = *tag_buf;
2282 	ppdu_desc->ppdu_start_timestamp |= ((ppdu_start_timestamp <<
2283 					     HTT_SHIFT_UPPER_TIMESTAMP) &
2284 					    HTT_MASK_UPPER_TIMESTAMP);
2285 
2286 	ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
2287 					ppdu_desc->tx_duration;
2288 	/* Ack time stamp is same as end time stamp*/
2289 	ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
2290 
2291 	ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
2292 					ppdu_desc->tx_duration;
2293 
2294 	ppdu_desc->bar_ppdu_start_timestamp = ppdu_desc->ppdu_start_timestamp;
2295 	ppdu_desc->bar_ppdu_end_timestamp = ppdu_desc->ppdu_end_timestamp;
2296 	ppdu_desc->bar_tx_duration = ppdu_desc->tx_duration;
2297 
2298 	/* Ack time stamp is same as end time stamp*/
2299 	ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
2300 }
2301 
2302 /*
2303  * dp_process_ppdu_stats_user_common_tlv: Process ppdu_stats_user_common
2304  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv
2305  * @ppdu_info: per ppdu tlv structure
2306  *
2307  * return:void
2308  */
2309 static void dp_process_ppdu_stats_user_common_tlv(
2310 		struct dp_pdev *pdev, uint32_t *tag_buf,
2311 		struct ppdu_info *ppdu_info)
2312 {
2313 	uint16_t peer_id;
2314 	struct cdp_tx_completion_ppdu *ppdu_desc;
2315 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2316 	uint8_t curr_user_index = 0;
2317 	struct dp_peer *peer;
2318 	struct dp_vdev *vdev;
2319 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2320 
2321 	ppdu_desc =
2322 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2323 
2324 	tag_buf++;
2325 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
2326 
2327 	curr_user_index =
2328 		dp_get_ppdu_info_user_index(pdev,
2329 					    peer_id, ppdu_info);
2330 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2331 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2332 
2333 	ppdu_desc->vdev_id =
2334 		HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(*tag_buf);
2335 
2336 	ppdu_user_desc->peer_id = peer_id;
2337 
2338 	tag_buf++;
2339 
2340 	if (HTT_PPDU_STATS_USER_COMMON_TLV_DELAYED_BA_GET(*tag_buf)) {
2341 		ppdu_user_desc->delayed_ba = 1;
2342 		ppdu_desc->delayed_ba = 1;
2343 	}
2344 
2345 	if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) {
2346 		ppdu_user_desc->is_mcast = true;
2347 		ppdu_user_desc->mpdu_tried_mcast =
2348 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
2349 		ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast;
2350 	} else {
2351 		ppdu_user_desc->mpdu_tried_ucast =
2352 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
2353 	}
2354 
2355 	tag_buf++;
2356 
2357 	ppdu_user_desc->qos_ctrl =
2358 		HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf);
2359 	ppdu_user_desc->frame_ctrl =
2360 		HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf);
2361 	ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl;
2362 
2363 	if (ppdu_user_desc->delayed_ba)
2364 		ppdu_user_desc->mpdu_success = 0;
2365 
2366 	tag_buf += 3;
2367 
2368 	if (HTT_PPDU_STATS_IS_OPAQUE_VALID_GET(*tag_buf)) {
2369 		ppdu_user_desc->ppdu_cookie =
2370 			HTT_PPDU_STATS_HOST_OPAQUE_COOKIE_GET(*tag_buf);
2371 		ppdu_user_desc->is_ppdu_cookie_valid = 1;
2372 	}
2373 
2374 	/* returning earlier causes other feilds unpopulated */
2375 	if (peer_id == DP_SCAN_PEER_ID) {
2376 		vdev =
2377 		       dp_get_vdev_from_soc_vdev_id_wifi3(pdev->soc,
2378 							  ppdu_desc->vdev_id);
2379 		if (!vdev)
2380 			return;
2381 		qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw,
2382 			     QDF_MAC_ADDR_SIZE);
2383 	} else {
2384 		peer = dp_peer_find_by_id(pdev->soc, peer_id);
2385 		if (!peer) {
2386 			/*
2387 			 * fw sends peer_id which is about to removed but
2388 			 * it was already removed in host.
2389 			 * eg: for disassoc, fw send ppdu stats
2390 			 * with peer id equal to previously associated
2391 			 * peer's peer_id but it was removed
2392 			 */
2393 			vdev =
2394 			dp_get_vdev_from_soc_vdev_id_wifi3(pdev->soc,
2395 							   ppdu_desc->vdev_id);
2396 			if (!vdev)
2397 				return;
2398 			qdf_mem_copy(ppdu_user_desc->mac_addr,
2399 				     vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE);
2400 			return;
2401 		}
2402 		qdf_mem_copy(ppdu_user_desc->mac_addr,
2403 			     peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
2404 		dp_peer_unref_del_find_by_id(peer);
2405 	}
2406 }
2407 
2408 
2409 /**
2410  * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv
2411  * @pdev: DP pdev handle
2412  * @tag_buf: T2H message buffer carrying the user rate TLV
2413  * @ppdu_info: per ppdu tlv structure
2414  *
2415  * return:void
2416  */
2417 static void dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev,
2418 		uint32_t *tag_buf,
2419 		struct ppdu_info *ppdu_info)
2420 {
2421 	uint16_t peer_id;
2422 	struct dp_peer *peer;
2423 	struct cdp_tx_completion_ppdu *ppdu_desc;
2424 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2425 	uint8_t curr_user_index = 0;
2426 	struct dp_vdev *vdev;
2427 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2428 
2429 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2430 
2431 	tag_buf++;
2432 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
2433 
2434 	curr_user_index =
2435 		dp_get_ppdu_info_user_index(pdev,
2436 					    peer_id, ppdu_info);
2437 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2438 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2439 	if (peer_id == DP_SCAN_PEER_ID) {
2440 		vdev =
2441 		       dp_get_vdev_from_soc_vdev_id_wifi3(pdev->soc,
2442 							  ppdu_desc->vdev_id);
2443 		if (!vdev)
2444 			return;
2445 		qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw,
2446 			     QDF_MAC_ADDR_SIZE);
2447 	} else {
2448 		peer = dp_peer_find_by_id(pdev->soc, peer_id);
2449 
2450 		if (peer) {
2451 			ppdu_desc->vdev_id = peer->vdev->vdev_id;
2452 			qdf_mem_copy(ppdu_user_desc->mac_addr,
2453 				     peer->mac_addr.raw,
2454 				     QDF_MAC_ADDR_SIZE);
2455 			dp_peer_unref_del_find_by_id(peer);
2456 		}
2457 	}
2458 
2459 	ppdu_user_desc->peer_id = peer_id;
2460 
2461 	ppdu_user_desc->tid =
2462 		HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf);
2463 
2464 	tag_buf += 1;
2465 
2466 	ppdu_user_desc->user_pos =
2467 		HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_GET(*tag_buf);
2468 	ppdu_user_desc->mu_group_id =
2469 		HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_GET(*tag_buf);
2470 
2471 	tag_buf += 1;
2472 
2473 	ppdu_user_desc->ru_start =
2474 		HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf);
2475 	ppdu_user_desc->ru_tones =
2476 		(HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) -
2477 		HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1;
2478 
2479 	tag_buf += 2;
2480 
2481 	ppdu_user_desc->ppdu_type =
2482 		HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf);
2483 
2484 	tag_buf++;
2485 	ppdu_user_desc->tx_rate = *tag_buf;
2486 
2487 	ppdu_user_desc->ltf_size =
2488 		HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf);
2489 	ppdu_user_desc->stbc =
2490 		HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf);
2491 	ppdu_user_desc->he_re =
2492 		HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf);
2493 	ppdu_user_desc->txbf =
2494 		HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf);
2495 	ppdu_user_desc->bw =
2496 		HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf) - 2;
2497 	ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf);
2498 	ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf);
2499 	ppdu_user_desc->preamble =
2500 		HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf);
2501 	ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf);
2502 	ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf);
2503 	ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf);
2504 }
2505 
2506 /*
2507  * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv: Process
2508  * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
2509  * pdev: DP PDEV handle
2510  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
2511  * @ppdu_info: per ppdu tlv structure
2512  *
2513  * return:void
2514  */
2515 static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
2516 		struct dp_pdev *pdev, uint32_t *tag_buf,
2517 		struct ppdu_info *ppdu_info)
2518 {
2519 	htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf =
2520 		(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf;
2521 
2522 	struct cdp_tx_completion_ppdu *ppdu_desc;
2523 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2524 	uint8_t curr_user_index = 0;
2525 	uint16_t peer_id;
2526 	struct dp_peer *peer;
2527 	uint32_t size = CDP_BA_64_BIT_MAP_SIZE_DWORDS;
2528 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2529 
2530 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2531 
2532 	tag_buf++;
2533 
2534 	peer_id =
2535 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2536 
2537 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2538 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2539 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2540 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2541 	if (peer) {
2542 		ppdu_desc->vdev_id = peer->vdev->vdev_id;
2543 		qdf_mem_copy(ppdu_user_desc->mac_addr,
2544 			     peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
2545 		dp_peer_unref_del_find_by_id(peer);
2546 	}
2547 	ppdu_user_desc->peer_id = peer_id;
2548 
2549 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
2550 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
2551 		     sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
2552 
2553 	dp_process_ppdu_stats_update_failed_bitmap(pdev,
2554 						   (void *)ppdu_user_desc,
2555 						   ppdu_info->ppdu_id,
2556 						   size);
2557 }
2558 
2559 /*
2560  * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv: Process
2561  * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
2562  * soc: DP SOC handle
2563  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
2564  * @ppdu_info: per ppdu tlv structure
2565  *
2566  * return:void
2567  */
2568 static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
2569 		struct dp_pdev *pdev, uint32_t *tag_buf,
2570 		struct ppdu_info *ppdu_info)
2571 {
2572 	htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf =
2573 		(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf;
2574 
2575 	struct cdp_tx_completion_ppdu *ppdu_desc;
2576 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2577 	uint8_t curr_user_index = 0;
2578 	uint16_t peer_id;
2579 	struct dp_peer *peer;
2580 	uint32_t size = CDP_BA_256_BIT_MAP_SIZE_DWORDS;
2581 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2582 
2583 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2584 
2585 	tag_buf++;
2586 
2587 	peer_id =
2588 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2589 
2590 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2591 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2592 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2593 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2594 	if (peer) {
2595 		ppdu_desc->vdev_id = peer->vdev->vdev_id;
2596 		qdf_mem_copy(ppdu_user_desc->mac_addr,
2597 			     peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
2598 		dp_peer_unref_del_find_by_id(peer);
2599 	}
2600 	ppdu_user_desc->peer_id = peer_id;
2601 
2602 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
2603 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
2604 		     sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2605 
2606 	dp_process_ppdu_stats_update_failed_bitmap(pdev,
2607 						   (void *)ppdu_user_desc,
2608 						   ppdu_info->ppdu_id,
2609 						   size);
2610 }
2611 
2612 /*
2613  * dp_process_ppdu_stats_user_cmpltn_common_tlv: Process
2614  * htt_ppdu_stats_user_cmpltn_common_tlv
2615  * soc: DP SOC handle
2616  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv
2617  * @ppdu_info: per ppdu tlv structure
2618  *
2619  * return:void
2620  */
2621 static void dp_process_ppdu_stats_user_cmpltn_common_tlv(
2622 		struct dp_pdev *pdev, uint32_t *tag_buf,
2623 		struct ppdu_info *ppdu_info)
2624 {
2625 	uint16_t peer_id;
2626 	struct dp_peer *peer;
2627 	struct cdp_tx_completion_ppdu *ppdu_desc;
2628 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2629 	uint8_t curr_user_index = 0;
2630 	uint8_t bw_iter;
2631 	htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf =
2632 		(htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf;
2633 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2634 
2635 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2636 
2637 	tag_buf++;
2638 	peer_id =
2639 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf);
2640 
2641 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2642 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2643 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2644 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2645 	if (peer) {
2646 		ppdu_desc->vdev_id = peer->vdev->vdev_id;
2647 		qdf_mem_copy(ppdu_user_desc->mac_addr,
2648 			     peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
2649 		dp_peer_unref_del_find_by_id(peer);
2650 	}
2651 	ppdu_user_desc->peer_id = peer_id;
2652 
2653 	ppdu_user_desc->completion_status =
2654 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET(
2655 				*tag_buf);
2656 
2657 	ppdu_user_desc->tid =
2658 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf);
2659 
2660 
2661 	tag_buf++;
2662 	if (qdf_likely(ppdu_user_desc->completion_status ==
2663 			HTT_PPDU_STATS_USER_STATUS_OK)) {
2664 		ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi;
2665 		ppdu_user_desc->ack_rssi_valid = 1;
2666 	} else {
2667 		ppdu_user_desc->ack_rssi_valid = 0;
2668 	}
2669 
2670 	tag_buf++;
2671 
2672 	ppdu_user_desc->mpdu_success =
2673 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf);
2674 
2675 	ppdu_user_desc->mpdu_failed =
2676 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_GET(*tag_buf) -
2677 						ppdu_user_desc->mpdu_success;
2678 
2679 	tag_buf++;
2680 
2681 	ppdu_user_desc->long_retries =
2682 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf);
2683 
2684 	ppdu_user_desc->short_retries =
2685 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf);
2686 	ppdu_user_desc->retry_msdus =
2687 		ppdu_user_desc->long_retries + ppdu_user_desc->short_retries;
2688 
2689 	ppdu_user_desc->is_ampdu =
2690 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf);
2691 	ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu;
2692 
2693 	ppdu_desc->resp_type =
2694 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_GET(*tag_buf);
2695 	ppdu_desc->mprot_type =
2696 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPROT_TYPE_GET(*tag_buf);
2697 	ppdu_desc->rts_success =
2698 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_SUCCESS_GET(*tag_buf);
2699 	ppdu_desc->rts_failure =
2700 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_FAILURE_GET(*tag_buf);
2701 
2702 	/*
2703 	 * on mpdu success, increase compltn_common_tlv counter
2704 	 */
2705 	if (ppdu_user_desc->mpdu_success)
2706 		ppdu_info->compltn_common_tlv++;
2707 
2708 	/*
2709 	 * MU BAR may send request to n users but we may received ack only from
2710 	 * m users. To have count of number of users respond back, we have a
2711 	 * separate counter bar_num_users per PPDU that get increment for every
2712 	 * htt_ppdu_stats_user_cmpltn_common_tlv
2713 	 */
2714 	ppdu_desc->bar_num_users++;
2715 
2716 	tag_buf++;
2717 	for (bw_iter = 0; bw_iter < CDP_RSSI_CHAIN_LEN; bw_iter++) {
2718 		ppdu_user_desc->rssi_chain[bw_iter] =
2719 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CHAIN_RSSI_GET(*tag_buf);
2720 		tag_buf++;
2721 	}
2722 
2723 	ppdu_user_desc->sa_tx_antenna =
2724 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TX_ANTENNA_MASK_GET(*tag_buf);
2725 
2726 	tag_buf++;
2727 	ppdu_user_desc->sa_is_training =
2728 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_TRAINING_GET(*tag_buf);
2729 	if (ppdu_user_desc->sa_is_training) {
2730 		ppdu_user_desc->sa_goodput =
2731 			HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PENDING_TRAINING_PKTS_GET(*tag_buf);
2732 	}
2733 
2734 	tag_buf++;
2735 	for (bw_iter = 0; bw_iter < CDP_NUM_SA_BW; bw_iter++) {
2736 		ppdu_user_desc->sa_max_rates[bw_iter] =
2737 			HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MAX_RATES_GET(tag_buf[bw_iter]);
2738 	}
2739 
2740 	tag_buf += CDP_NUM_SA_BW;
2741 	ppdu_user_desc->current_rate_per =
2742 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CURRENT_RATE_PER_GET(*tag_buf);
2743 }
2744 
2745 /*
2746  * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv: Process
2747  * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2748  * pdev: DP PDEV handle
2749  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2750  * @ppdu_info: per ppdu tlv structure
2751  *
2752  * return:void
2753  */
2754 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
2755 		struct dp_pdev *pdev, uint32_t *tag_buf,
2756 		struct ppdu_info *ppdu_info)
2757 {
2758 	htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf =
2759 		(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf;
2760 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2761 	struct cdp_tx_completion_ppdu *ppdu_desc;
2762 	uint8_t curr_user_index = 0;
2763 	uint16_t peer_id;
2764 	struct dp_peer *peer;
2765 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2766 
2767 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2768 
2769 	tag_buf++;
2770 
2771 	peer_id =
2772 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2773 
2774 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2775 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2776 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2777 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2778 	if (peer) {
2779 		ppdu_desc->vdev_id = peer->vdev->vdev_id;
2780 		qdf_mem_copy(ppdu_user_desc->mac_addr,
2781 			     peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
2782 		dp_peer_unref_del_find_by_id(peer);
2783 	}
2784 	ppdu_user_desc->peer_id = peer_id;
2785 
2786 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2787 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2788 		     sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
2789 	ppdu_user_desc->ba_size = CDP_BA_64_BIT_MAP_SIZE_DWORDS * 32;
2790 }
2791 
2792 /*
2793  * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv: Process
2794  * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2795  * pdev: DP PDEV handle
2796  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2797  * @ppdu_info: per ppdu tlv structure
2798  *
2799  * return:void
2800  */
2801 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
2802 		struct dp_pdev *pdev, uint32_t *tag_buf,
2803 		struct ppdu_info *ppdu_info)
2804 {
2805 	htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf =
2806 		(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf;
2807 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2808 	struct cdp_tx_completion_ppdu *ppdu_desc;
2809 	uint8_t curr_user_index = 0;
2810 	uint16_t peer_id;
2811 	struct dp_peer *peer;
2812 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2813 
2814 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2815 
2816 	tag_buf++;
2817 
2818 	peer_id =
2819 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2820 
2821 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2822 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2823 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2824 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2825 	if (peer) {
2826 		ppdu_desc->vdev_id = peer->vdev->vdev_id;
2827 		qdf_mem_copy(ppdu_user_desc->mac_addr,
2828 			     peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
2829 		dp_peer_unref_del_find_by_id(peer);
2830 	}
2831 	ppdu_user_desc->peer_id = peer_id;
2832 
2833 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2834 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2835 		     sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2836 	ppdu_user_desc->ba_size = CDP_BA_256_BIT_MAP_SIZE_DWORDS * 32;
2837 }
2838 
2839 /*
2840  * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv: Process
2841  * htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2842  * pdev: DP PDE handle
2843  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2844  * @ppdu_info: per ppdu tlv structure
2845  *
2846  * return:void
2847  */
2848 static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
2849 		struct dp_pdev *pdev, uint32_t *tag_buf,
2850 		struct ppdu_info *ppdu_info)
2851 {
2852 	uint16_t peer_id;
2853 	struct dp_peer *peer;
2854 	struct cdp_tx_completion_ppdu *ppdu_desc;
2855 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2856 	uint8_t curr_user_index = 0;
2857 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2858 
2859 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2860 
2861 	tag_buf += 2;
2862 	peer_id =
2863 	HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf);
2864 
2865 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2866 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2867 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2868 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2869 	if (peer) {
2870 		ppdu_desc->vdev_id = peer->vdev->vdev_id;
2871 		qdf_mem_copy(ppdu_user_desc->mac_addr,
2872 			     peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
2873 		dp_peer_unref_del_find_by_id(peer);
2874 	}
2875 	if (!ppdu_user_desc->ack_ba_tlv) {
2876 		ppdu_user_desc->ack_ba_tlv = 1;
2877 	} else {
2878 		pdev->stats.ack_ba_comes_twice++;
2879 		dp_peer_unref_del_find_by_id(peer);
2880 		return;
2881 	}
2882 
2883 	ppdu_user_desc->peer_id = peer_id;
2884 
2885 	tag_buf++;
2886 	/* not to update ppdu_desc->tid from this TLV */
2887 	ppdu_user_desc->num_mpdu =
2888 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf);
2889 
2890 	ppdu_user_desc->num_msdu =
2891 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf);
2892 
2893 	ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu;
2894 
2895 	tag_buf++;
2896 	ppdu_user_desc->start_seq =
2897 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_GET(
2898 			*tag_buf);
2899 
2900 	tag_buf++;
2901 	ppdu_user_desc->success_bytes = *tag_buf;
2902 
2903 	/* increase ack ba tlv counter on successful mpdu */
2904 	if (ppdu_user_desc->num_mpdu)
2905 		ppdu_info->ack_ba_tlv++;
2906 
2907 	if (ppdu_user_desc->ba_size == 0) {
2908 		ppdu_user_desc->ba_seq_no = ppdu_user_desc->start_seq;
2909 		ppdu_user_desc->ba_bitmap[0] = 1;
2910 		ppdu_user_desc->ba_size = 1;
2911 	}
2912 }
2913 
2914 /*
2915  * dp_process_ppdu_stats_user_common_array_tlv: Process
2916  * htt_ppdu_stats_user_common_array_tlv
2917  * pdev: DP PDEV handle
2918  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2919  * @ppdu_info: per ppdu tlv structure
2920  *
2921  * return:void
2922  */
2923 static void dp_process_ppdu_stats_user_common_array_tlv(
2924 		struct dp_pdev *pdev, uint32_t *tag_buf,
2925 		struct ppdu_info *ppdu_info)
2926 {
2927 	uint32_t peer_id;
2928 	struct cdp_tx_completion_ppdu *ppdu_desc;
2929 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2930 	uint8_t curr_user_index = 0;
2931 	struct htt_tx_ppdu_stats_info *dp_stats_buf;
2932 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2933 
2934 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2935 
2936 	tag_buf++;
2937 	dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf;
2938 	tag_buf += 3;
2939 	peer_id =
2940 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf);
2941 
2942 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) {
2943 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2944 			"Invalid peer");
2945 		return;
2946 	}
2947 
2948 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2949 
2950 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2951 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2952 
2953 	ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes;
2954 	ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes;
2955 
2956 	tag_buf++;
2957 
2958 	ppdu_user_desc->success_msdus =
2959 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf);
2960 	ppdu_user_desc->retry_bytes =
2961 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf);
2962 	tag_buf++;
2963 	ppdu_user_desc->failed_msdus =
2964 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf);
2965 }
2966 
2967 /*
2968  * dp_process_ppdu_stats_flush_tlv: Process
2969  * htt_ppdu_stats_flush_tlv
2970  * @pdev: DP PDEV handle
2971  * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv
2972  * @ppdu_info: per ppdu tlv structure
2973  *
2974  * return:void
2975  */
2976 static void
2977 dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev,
2978 					     uint32_t *tag_buf,
2979 					     struct ppdu_info *ppdu_info)
2980 {
2981 	struct cdp_tx_completion_ppdu *ppdu_desc;
2982 	uint32_t peer_id;
2983 	uint8_t tid;
2984 	struct dp_peer *peer;
2985 
2986 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
2987 				qdf_nbuf_data(ppdu_info->nbuf);
2988 	ppdu_desc->is_flush = 1;
2989 
2990 	tag_buf++;
2991 	ppdu_desc->drop_reason = *tag_buf;
2992 
2993 	tag_buf++;
2994 	ppdu_desc->num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf);
2995 	ppdu_desc->num_mpdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_GET(*tag_buf);
2996 	ppdu_desc->flow_type = HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_GET(*tag_buf);
2997 
2998 	tag_buf++;
2999 	peer_id = HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf);
3000 	tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf);
3001 
3002 	ppdu_desc->num_users = 1;
3003 	ppdu_desc->user[0].peer_id = peer_id;
3004 	ppdu_desc->user[0].tid = tid;
3005 
3006 	ppdu_desc->queue_type =
3007 			HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_GET(*tag_buf);
3008 
3009 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
3010 	if (!peer)
3011 		return;
3012 
3013 	if (ppdu_desc->drop_reason == HTT_FLUSH_EXCESS_RETRIES) {
3014 		DP_STATS_INC(peer,
3015 			     tx.excess_retries_per_ac[TID_TO_WME_AC(tid)],
3016 			     ppdu_desc->num_msdu);
3017 	}
3018 
3019 	dp_peer_unref_del_find_by_id(peer);
3020 }
3021 
3022 /**
3023  * dp_process_ppdu_stats_sch_cmd_status_tlv: Process schedule command status tlv
3024  * Here we are not going to process the buffer.
3025  * @pdev: DP PDEV handle
3026  * @ppdu_info: per ppdu tlv structure
3027  *
3028  * return:void
3029  */
3030 static void
3031 dp_process_ppdu_stats_sch_cmd_status_tlv(struct dp_pdev *pdev,
3032 					 struct ppdu_info *ppdu_info)
3033 {
3034 	struct cdp_tx_completion_ppdu *ppdu_desc;
3035 	uint8_t num_users;
3036 	uint8_t i;
3037 
3038 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
3039 				qdf_nbuf_data(ppdu_info->nbuf);
3040 
3041 	num_users = ppdu_desc->bar_num_users;
3042 
3043 	if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
3044 		for (i = 0; i < num_users; i++) {
3045 			if (ppdu_desc->user[i].user_pos == 0) {
3046 				/* update phy mode for bar frame */
3047 				ppdu_desc->phy_mode =
3048 					ppdu_desc->user[i].preamble;
3049 				ppdu_desc->user[0].mcs = ppdu_desc->user[i].mcs;
3050 				break;
3051 			}
3052 		}
3053 	}
3054 }
3055 
3056 #ifndef WLAN_TX_PKT_CAPTURE_ENH
3057 /*
3058  * dp_deliver_mgmt_frm: Process
3059  * @pdev: DP PDEV handle
3060  * @nbuf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
3061  *
3062  * return: void
3063  */
3064 void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
3065 {
3066 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
3067 		dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc,
3068 				     nbuf, HTT_INVALID_PEER,
3069 				     WDI_NO_VAL, pdev->pdev_id);
3070 	} else {
3071 		if (!pdev->bpr_enable)
3072 			qdf_nbuf_free(nbuf);
3073 	}
3074 }
3075 #endif
3076 
3077 /*
3078  * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv: Process
3079  * htt_ppdu_stats_tx_mgmtctrl_payload_tlv
3080  * @pdev: DP PDEV handle
3081  * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
3082  * @length: tlv_length
3083  *
3084  * return:QDF_STATUS_SUCCESS if nbuf as to be freed in caller
3085  */
3086 static QDF_STATUS
3087 dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev,
3088 					      qdf_nbuf_t tag_buf,
3089 					      uint32_t ppdu_id)
3090 {
3091 	uint32_t *nbuf_ptr;
3092 	uint8_t trim_size;
3093 	size_t head_size;
3094 	struct cdp_tx_mgmt_comp_info *ptr_mgmt_comp_info;
3095 	uint32_t *msg_word;
3096 	uint32_t tsf_hdr;
3097 
3098 	if ((!pdev->tx_sniffer_enable) && (!pdev->mcopy_mode) &&
3099 	    (!pdev->bpr_enable) && (!pdev->tx_capture_enabled))
3100 		return QDF_STATUS_SUCCESS;
3101 
3102 	/*
3103 	 * get timestamp from htt_t2h_ppdu_stats_ind_hdr_t
3104 	 */
3105 	msg_word = (uint32_t *)qdf_nbuf_data(tag_buf);
3106 	msg_word = msg_word + 2;
3107 	tsf_hdr = *msg_word;
3108 
3109 	trim_size = ((pdev->mgmtctrl_frm_info.mgmt_buf +
3110 		      HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) -
3111 		      qdf_nbuf_data(tag_buf));
3112 
3113 	if (!qdf_nbuf_pull_head(tag_buf, trim_size))
3114 		return QDF_STATUS_SUCCESS;
3115 
3116 	qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) -
3117 			    pdev->mgmtctrl_frm_info.mgmt_buf_len);
3118 
3119 	if (pdev->tx_capture_enabled) {
3120 		head_size = sizeof(struct cdp_tx_mgmt_comp_info);
3121 		if (qdf_unlikely(qdf_nbuf_headroom(tag_buf) < head_size)) {
3122 			qdf_err("Fail to get headroom h_sz %d h_avail %d\n",
3123 				head_size, qdf_nbuf_headroom(tag_buf));
3124 			qdf_assert_always(0);
3125 			return QDF_STATUS_E_NOMEM;
3126 		}
3127 		ptr_mgmt_comp_info = (struct cdp_tx_mgmt_comp_info *)
3128 					qdf_nbuf_push_head(tag_buf, head_size);
3129 		qdf_assert_always(ptr_mgmt_comp_info);
3130 		ptr_mgmt_comp_info->ppdu_id = ppdu_id;
3131 		ptr_mgmt_comp_info->is_sgen_pkt = true;
3132 		ptr_mgmt_comp_info->tx_tsf = tsf_hdr;
3133 	} else {
3134 		head_size = sizeof(ppdu_id);
3135 		nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(tag_buf, head_size);
3136 		*nbuf_ptr = ppdu_id;
3137 	}
3138 
3139 	if (pdev->bpr_enable) {
3140 		dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc,
3141 				     tag_buf, HTT_INVALID_PEER,
3142 				     WDI_NO_VAL, pdev->pdev_id);
3143 	}
3144 
3145 	dp_deliver_mgmt_frm(pdev, tag_buf);
3146 
3147 	return QDF_STATUS_E_ALREADY;
3148 }
3149 
3150 /**
3151  * dp_validate_fix_ppdu_tlv(): Function to validate the length of PPDU
3152  *
3153  * If the TLV length sent as part of PPDU TLV is less that expected size i.e
3154  * size of corresponding data structure, pad the remaining bytes with zeros
3155  * and continue processing the TLVs
3156  *
3157  * @pdev: DP pdev handle
3158  * @tag_buf: TLV buffer
3159  * @tlv_expected_size: Expected size of Tag
3160  * @tlv_len: TLV length received from FW
3161  *
3162  * Return: Pointer to updated TLV
3163  */
3164 static inline uint32_t *dp_validate_fix_ppdu_tlv(struct dp_pdev *pdev,
3165 						 uint32_t *tag_buf,
3166 						 uint16_t tlv_expected_size,
3167 						 uint16_t tlv_len)
3168 {
3169 	uint32_t *tlv_desc = tag_buf;
3170 
3171 	qdf_assert_always(tlv_len != 0);
3172 
3173 	if (tlv_len < tlv_expected_size) {
3174 		qdf_mem_zero(pdev->ppdu_tlv_buf, tlv_expected_size);
3175 		qdf_mem_copy(pdev->ppdu_tlv_buf, tag_buf, tlv_len);
3176 		tlv_desc = pdev->ppdu_tlv_buf;
3177 	}
3178 
3179 	return tlv_desc;
3180 }
3181 
3182 /**
3183  * dp_process_ppdu_tag(): Function to process the PPDU TLVs
3184  * @pdev: DP pdev handle
3185  * @tag_buf: TLV buffer
3186  * @tlv_len: length of tlv
3187  * @ppdu_info: per ppdu tlv structure
3188  *
3189  * return: void
3190  */
3191 static void dp_process_ppdu_tag(struct dp_pdev *pdev, uint32_t *tag_buf,
3192 		uint32_t tlv_len, struct ppdu_info *ppdu_info)
3193 {
3194 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
3195 	uint16_t tlv_expected_size;
3196 	uint32_t *tlv_desc;
3197 
3198 	switch (tlv_type) {
3199 	case HTT_PPDU_STATS_COMMON_TLV:
3200 		tlv_expected_size = sizeof(htt_ppdu_stats_common_tlv);
3201 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3202 						    tlv_expected_size, tlv_len);
3203 		dp_process_ppdu_stats_common_tlv(pdev, tlv_desc, ppdu_info);
3204 		break;
3205 	case HTT_PPDU_STATS_USR_COMMON_TLV:
3206 		tlv_expected_size = sizeof(htt_ppdu_stats_user_common_tlv);
3207 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3208 						    tlv_expected_size, tlv_len);
3209 		dp_process_ppdu_stats_user_common_tlv(pdev, tlv_desc,
3210 						      ppdu_info);
3211 		break;
3212 	case HTT_PPDU_STATS_USR_RATE_TLV:
3213 		tlv_expected_size = sizeof(htt_ppdu_stats_user_rate_tlv);
3214 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3215 						    tlv_expected_size, tlv_len);
3216 		dp_process_ppdu_stats_user_rate_tlv(pdev, tlv_desc,
3217 						    ppdu_info);
3218 		break;
3219 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV:
3220 		tlv_expected_size =
3221 			sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv);
3222 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3223 						    tlv_expected_size, tlv_len);
3224 		dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
3225 				pdev, tlv_desc, ppdu_info);
3226 		break;
3227 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV:
3228 		tlv_expected_size =
3229 			sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv);
3230 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3231 						    tlv_expected_size, tlv_len);
3232 		dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
3233 				pdev, tlv_desc, ppdu_info);
3234 		break;
3235 	case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV:
3236 		tlv_expected_size =
3237 			sizeof(htt_ppdu_stats_user_cmpltn_common_tlv);
3238 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3239 						    tlv_expected_size, tlv_len);
3240 		dp_process_ppdu_stats_user_cmpltn_common_tlv(
3241 				pdev, tlv_desc, ppdu_info);
3242 		break;
3243 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV:
3244 		tlv_expected_size =
3245 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv);
3246 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3247 						    tlv_expected_size, tlv_len);
3248 		dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
3249 				pdev, tlv_desc, ppdu_info);
3250 		break;
3251 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV:
3252 		tlv_expected_size =
3253 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv);
3254 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3255 						    tlv_expected_size, tlv_len);
3256 		dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
3257 				pdev, tlv_desc, ppdu_info);
3258 		break;
3259 	case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV:
3260 		tlv_expected_size =
3261 			sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv);
3262 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3263 						    tlv_expected_size, tlv_len);
3264 		dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
3265 				pdev, tlv_desc, ppdu_info);
3266 		break;
3267 	case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV:
3268 		tlv_expected_size =
3269 			sizeof(htt_ppdu_stats_usr_common_array_tlv_v);
3270 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3271 						    tlv_expected_size, tlv_len);
3272 		dp_process_ppdu_stats_user_common_array_tlv(
3273 				pdev, tlv_desc, ppdu_info);
3274 		break;
3275 	case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV:
3276 		tlv_expected_size = sizeof(htt_ppdu_stats_flush_tlv);
3277 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3278 						    tlv_expected_size, tlv_len);
3279 		dp_process_ppdu_stats_user_compltn_flush_tlv(pdev, tlv_desc,
3280 							     ppdu_info);
3281 		break;
3282 	case HTT_PPDU_STATS_SCH_CMD_STATUS_TLV:
3283 		dp_process_ppdu_stats_sch_cmd_status_tlv(pdev, ppdu_info);
3284 		break;
3285 	default:
3286 		break;
3287 	}
3288 }
3289 
3290 /**
3291  * dp_ppdu_desc_user_stats_update(): Function to update TX user stats
3292  * @pdev: DP pdev handle
3293  * @ppdu_info: per PPDU TLV descriptor
3294  *
3295  * return: void
3296  */
3297 void
3298 dp_ppdu_desc_user_stats_update(struct dp_pdev *pdev,
3299 			       struct ppdu_info *ppdu_info)
3300 {
3301 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3302 	struct dp_peer *peer = NULL;
3303 	uint32_t tlv_bitmap_expected;
3304 	uint32_t tlv_bitmap_default;
3305 	uint16_t i;
3306 	uint32_t num_users;
3307 
3308 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
3309 		qdf_nbuf_data(ppdu_info->nbuf);
3310 
3311 	if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_BAR)
3312 		ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
3313 
3314 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
3315 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode ||
3316 	    pdev->tx_capture_enabled) {
3317 		if (ppdu_info->is_ampdu)
3318 			tlv_bitmap_expected =
3319 				dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
3320 					ppdu_info->tlv_bitmap);
3321 	}
3322 
3323 	tlv_bitmap_default = tlv_bitmap_expected;
3324 
3325 	if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
3326 		num_users = ppdu_desc->bar_num_users;
3327 		ppdu_desc->num_users = ppdu_desc->bar_num_users;
3328 	} else {
3329 		num_users = ppdu_desc->num_users;
3330 	}
3331 
3332 	for (i = 0; i < num_users; i++) {
3333 		ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu;
3334 		ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu;
3335 
3336 		peer = dp_peer_find_hash_find(pdev->soc,
3337 					      ppdu_desc->user[i].mac_addr,
3338 					      0, ppdu_desc->vdev_id);
3339 		/**
3340 		 * This check is to make sure peer is not deleted
3341 		 * after processing the TLVs.
3342 		 */
3343 		if (!peer)
3344 			continue;
3345 
3346 		ppdu_desc->user[i].cookie = (void *)peer->wlanstats_ctx;
3347 
3348 		/*
3349 		 * different frame like DATA, BAR or CTRL has different
3350 		 * tlv bitmap expected. Apart from ACK_BA_STATUS TLV, we
3351 		 * receive other tlv in-order/sequential from fw.
3352 		 * Since ACK_BA_STATUS TLV come from Hardware it is
3353 		 * asynchronous So we need to depend on some tlv to confirm
3354 		 * all tlv is received for a ppdu.
3355 		 * So we depend on both SCHED_CMD_STATUS_TLV and
3356 		 * ACK_BA_STATUS_TLV. for failure packet we won't get
3357 		 * ACK_BA_STATUS_TLV.
3358 		 */
3359 		if (!(ppdu_info->tlv_bitmap &
3360 		      (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)) ||
3361 		    (!(ppdu_info->tlv_bitmap &
3362 		       (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)) &&
3363 		     (ppdu_desc->user[i].completion_status ==
3364 		      HTT_PPDU_STATS_USER_STATUS_OK))) {
3365 			dp_peer_unref_delete(peer);
3366 			continue;
3367 		}
3368 
3369 		/**
3370 		 * Update tx stats for data frames having Qos as well as
3371 		 * non-Qos data tid
3372 		 */
3373 
3374 		if ((ppdu_desc->user[i].tid < CDP_DATA_TID_MAX ||
3375 		     (ppdu_desc->user[i].tid == CDP_DATA_NON_QOS_TID) ||
3376 		     (ppdu_desc->htt_frame_type ==
3377 		      HTT_STATS_FTYPE_SGEN_QOS_NULL) ||
3378 		     ((ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) &&
3379 		      (ppdu_desc->num_mpdu > 1))) &&
3380 		      (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL)) {
3381 
3382 			dp_tx_stats_update(pdev, peer,
3383 					   &ppdu_desc->user[i],
3384 					   ppdu_desc->ack_rssi);
3385 			dp_tx_rate_stats_update(peer, &ppdu_desc->user[i]);
3386 		}
3387 
3388 		dp_peer_unref_delete(peer);
3389 		tlv_bitmap_expected = tlv_bitmap_default;
3390 	}
3391 }
3392 
3393 #ifndef WLAN_TX_PKT_CAPTURE_ENH
3394 
3395 /**
3396  * dp_ppdu_desc_deliver(): Function to deliver Tx PPDU status descriptor
3397  * to upper layer
3398  * @pdev: DP pdev handle
3399  * @ppdu_info: per PPDU TLV descriptor
3400  *
3401  * return: void
3402  */
3403 static
3404 void dp_ppdu_desc_deliver(struct dp_pdev *pdev,
3405 			  struct ppdu_info *ppdu_info)
3406 {
3407 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3408 	qdf_nbuf_t nbuf;
3409 
3410 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
3411 		qdf_nbuf_data(ppdu_info->nbuf);
3412 
3413 	dp_ppdu_desc_user_stats_update(pdev, ppdu_info);
3414 
3415 	/*
3416 	 * Remove from the list
3417 	 */
3418 	TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
3419 	nbuf = ppdu_info->nbuf;
3420 	pdev->list_depth--;
3421 	qdf_mem_free(ppdu_info);
3422 
3423 	qdf_assert_always(nbuf);
3424 
3425 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
3426 		qdf_nbuf_data(nbuf);
3427 
3428 	/**
3429 	 * Deliver PPDU stats only for valid (acked) data frames if
3430 	 * sniffer mode is not enabled.
3431 	 * If sniffer mode is enabled, PPDU stats for all frames
3432 	 * including mgmt/control frames should be delivered to upper layer
3433 	 */
3434 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
3435 		dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC, pdev->soc,
3436 				nbuf, HTT_INVALID_PEER,
3437 				WDI_NO_VAL, pdev->pdev_id);
3438 	} else {
3439 		if (ppdu_desc->num_mpdu != 0 && ppdu_desc->num_users != 0 &&
3440 				ppdu_desc->frame_ctrl & HTT_FRAMECTRL_DATATYPE) {
3441 
3442 			dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
3443 					pdev->soc, nbuf, HTT_INVALID_PEER,
3444 					WDI_NO_VAL, pdev->pdev_id);
3445 		} else
3446 			qdf_nbuf_free(nbuf);
3447 	}
3448 	return;
3449 }
3450 
3451 #endif
3452 
3453 /**
3454  * dp_get_ppdu_desc(): Function to allocate new PPDU status
3455  * desc for new ppdu id
3456  * @pdev: DP pdev handle
3457  * @ppdu_id: PPDU unique identifier
3458  * @tlv_type: TLV type received
3459  *
3460  * return: ppdu_info per ppdu tlv structure
3461  */
3462 static
3463 struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id,
3464 			uint8_t tlv_type)
3465 {
3466 	struct ppdu_info *ppdu_info = NULL;
3467 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3468 
3469 	/*
3470 	 * Find ppdu_id node exists or not
3471 	 */
3472 	TAILQ_FOREACH(ppdu_info, &pdev->ppdu_info_list, ppdu_info_list_elem) {
3473 
3474 		if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) {
3475 			break;
3476 		}
3477 	}
3478 
3479 	if (ppdu_info) {
3480 		if (ppdu_info->tlv_bitmap & (1 << tlv_type)) {
3481 			/**
3482 			 * if we get tlv_type that is already been processed
3483 			 * for ppdu, that means we got a new ppdu with same
3484 			 * ppdu id. Hence Flush the older ppdu
3485 			 * for MUMIMO and OFDMA, In a PPDU we have
3486 			 * multiple user with same tlv types. tlv bitmap is
3487 			 * used to check whether SU or MU_MIMO/OFDMA
3488 			 */
3489 			if (!(ppdu_info->tlv_bitmap &
3490 			    (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)))
3491 				return ppdu_info;
3492 
3493 			ppdu_desc = (struct cdp_tx_completion_ppdu *)
3494 				qdf_nbuf_data(ppdu_info->nbuf);
3495 
3496 			/**
3497 			 * apart from ACK BA STATUS TLV rest all comes in order
3498 			 * so if tlv type not ACK BA STATUS TLV we can deliver
3499 			 * ppdu_info
3500 			 */
3501 			if ((tlv_type ==
3502 			     HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) &&
3503 			    (ppdu_desc->htt_frame_type ==
3504 			     HTT_STATS_FTYPE_SGEN_MU_BAR))
3505 				return ppdu_info;
3506 
3507 			dp_ppdu_desc_deliver(pdev, ppdu_info);
3508 		} else {
3509 			return ppdu_info;
3510 		}
3511 	}
3512 
3513 	/**
3514 	 * Flush the head ppdu descriptor if ppdu desc list reaches max
3515 	 * threshold
3516 	 */
3517 	if (pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
3518 		ppdu_info = TAILQ_FIRST(&pdev->ppdu_info_list);
3519 		TAILQ_REMOVE(&pdev->ppdu_info_list,
3520 			     ppdu_info, ppdu_info_list_elem);
3521 		pdev->list_depth--;
3522 		pdev->stats.ppdu_drop++;
3523 		qdf_nbuf_free(ppdu_info->nbuf);
3524 		ppdu_info->nbuf = NULL;
3525 		qdf_mem_free(ppdu_info);
3526 	}
3527 
3528 	/*
3529 	 * Allocate new ppdu_info node
3530 	 */
3531 	ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info));
3532 	if (!ppdu_info)
3533 		return NULL;
3534 
3535 	ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
3536 			sizeof(struct cdp_tx_completion_ppdu), 0, 4,
3537 			TRUE);
3538 	if (!ppdu_info->nbuf) {
3539 		qdf_mem_free(ppdu_info);
3540 		return NULL;
3541 	}
3542 
3543 	ppdu_info->ppdu_desc =
3544 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
3545 	qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf),
3546 			sizeof(struct cdp_tx_completion_ppdu));
3547 
3548 	if (qdf_nbuf_put_tail(ppdu_info->nbuf,
3549 			sizeof(struct cdp_tx_completion_ppdu)) == NULL) {
3550 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3551 				"No tailroom for HTT PPDU");
3552 		qdf_nbuf_free(ppdu_info->nbuf);
3553 		ppdu_info->nbuf = NULL;
3554 		ppdu_info->last_user = 0;
3555 		qdf_mem_free(ppdu_info);
3556 		return NULL;
3557 	}
3558 
3559 	/**
3560 	 * No lock is needed because all PPDU TLVs are processed in
3561 	 * same context and this list is updated in same context
3562 	 */
3563 	TAILQ_INSERT_TAIL(&pdev->ppdu_info_list, ppdu_info,
3564 			ppdu_info_list_elem);
3565 	pdev->list_depth++;
3566 	return ppdu_info;
3567 }
3568 
3569 /**
3570  * dp_htt_process_tlv(): Function to process each PPDU TLVs
3571  * @pdev: DP pdev handle
3572  * @htt_t2h_msg: HTT target to host message
3573  *
3574  * return: ppdu_info per ppdu tlv structure
3575  */
3576 
3577 static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev,
3578 		qdf_nbuf_t htt_t2h_msg)
3579 {
3580 	uint32_t length;
3581 	uint32_t ppdu_id;
3582 	uint8_t tlv_type;
3583 	uint32_t tlv_length, tlv_bitmap_expected;
3584 	uint8_t *tlv_buf;
3585 	struct ppdu_info *ppdu_info = NULL;
3586 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3587 	struct dp_peer *peer;
3588 	uint32_t i = 0;
3589 
3590 	uint32_t *msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
3591 
3592 	length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word);
3593 
3594 	msg_word = msg_word + 1;
3595 	ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word);
3596 
3597 
3598 	msg_word = msg_word + 3;
3599 	while (length > 0) {
3600 		tlv_buf = (uint8_t *)msg_word;
3601 		tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
3602 		tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
3603 		if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG))
3604 			pdev->stats.ppdu_stats_counter[tlv_type]++;
3605 
3606 		if (tlv_length == 0)
3607 			break;
3608 
3609 		tlv_length += HTT_TLV_HDR_LEN;
3610 
3611 		/**
3612 		 * Not allocating separate ppdu descriptor for MGMT Payload
3613 		 * TLV as this is sent as separate WDI indication and it
3614 		 * doesn't contain any ppdu information
3615 		 */
3616 		if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) {
3617 			pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf;
3618 			pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id;
3619 			pdev->mgmtctrl_frm_info.mgmt_buf_len =
3620 				HTT_PPDU_STATS_TX_MGMTCTRL_TLV_FRAME_LENGTH_GET
3621 						(*(msg_word + 1));
3622 			msg_word =
3623 				(uint32_t *)((uint8_t *)tlv_buf + tlv_length);
3624 			length -= (tlv_length);
3625 			continue;
3626 		}
3627 
3628 		ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type);
3629 		if (!ppdu_info)
3630 			return NULL;
3631 		ppdu_info->ppdu_desc->bss_color =
3632 			pdev->rx_mon_recv_status.bsscolor;
3633 
3634 		ppdu_info->ppdu_id = ppdu_id;
3635 		ppdu_info->tlv_bitmap |= (1 << tlv_type);
3636 
3637 		dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info);
3638 
3639 		/**
3640 		 * Increment pdev level tlv count to monitor
3641 		 * missing TLVs
3642 		 */
3643 		pdev->tlv_count++;
3644 		ppdu_info->last_tlv_cnt = pdev->tlv_count;
3645 		msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
3646 		length -= (tlv_length);
3647 	}
3648 
3649 	if (!ppdu_info)
3650 		return NULL;
3651 
3652 	pdev->last_ppdu_id = ppdu_id;
3653 
3654 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
3655 
3656 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode ||
3657 	    pdev->tx_capture_enabled) {
3658 		if (ppdu_info->is_ampdu)
3659 			tlv_bitmap_expected =
3660 				dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
3661 					ppdu_info->tlv_bitmap);
3662 	}
3663 
3664 	ppdu_desc = ppdu_info->ppdu_desc;
3665 
3666 	if (!ppdu_desc)
3667 		return NULL;
3668 
3669 	if (ppdu_desc->user[ppdu_desc->last_usr_index].completion_status !=
3670 	    HTT_PPDU_STATS_USER_STATUS_OK) {
3671 		tlv_bitmap_expected = tlv_bitmap_expected & 0xFF;
3672 	}
3673 
3674 	if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA &&
3675 	    (ppdu_info->tlv_bitmap &
3676 	     (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)) &&
3677 	    ppdu_desc->delayed_ba) {
3678 		for (i = 0; i < ppdu_desc->num_users; i++) {
3679 			struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu;
3680 			uint64_t start_tsf;
3681 			uint64_t end_tsf;
3682 			uint32_t ppdu_id;
3683 
3684 			ppdu_id = ppdu_desc->ppdu_id;
3685 			peer = dp_peer_find_by_id(pdev->soc,
3686 						  ppdu_desc->user[i].peer_id);
3687 			/**
3688 			 * This check is to make sure peer is not deleted
3689 			 * after processing the TLVs.
3690 			 */
3691 			if (!peer)
3692 				continue;
3693 
3694 			delay_ppdu = &peer->delayed_ba_ppdu_stats;
3695 			start_tsf = ppdu_desc->ppdu_start_timestamp;
3696 			end_tsf = ppdu_desc->ppdu_end_timestamp;
3697 			/**
3698 			 * save delayed ba user info
3699 			 */
3700 			if (ppdu_desc->user[i].delayed_ba) {
3701 				dp_peer_copy_delay_stats(peer,
3702 							 &ppdu_desc->user[i],
3703 							 ppdu_id);
3704 				peer->last_delayed_ba_ppduid = ppdu_id;
3705 				delay_ppdu->ppdu_start_timestamp = start_tsf;
3706 				delay_ppdu->ppdu_end_timestamp = end_tsf;
3707 			}
3708 			dp_peer_unref_del_find_by_id(peer);
3709 		}
3710 	}
3711 
3712 	/*
3713 	 * when frame type is BAR and STATS_COMMON_TLV is set
3714 	 * copy the store peer delayed info to BAR status
3715 	 */
3716 	if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR &&
3717 	    (ppdu_info->tlv_bitmap &
3718 	     (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV))) {
3719 		for (i = 0; i < ppdu_desc->bar_num_users; i++) {
3720 			struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu;
3721 			uint64_t start_tsf;
3722 			uint64_t end_tsf;
3723 			peer = dp_peer_find_by_id(pdev->soc,
3724 						  ppdu_desc->user[i].peer_id);
3725 			/**
3726 			 * This check is to make sure peer is not deleted
3727 			 * after processing the TLVs.
3728 			 */
3729 			if (!peer)
3730 				continue;
3731 
3732 			delay_ppdu = &peer->delayed_ba_ppdu_stats;
3733 			start_tsf = delay_ppdu->ppdu_start_timestamp;
3734 			end_tsf = delay_ppdu->ppdu_end_timestamp;
3735 
3736 			if (peer->last_delayed_ba) {
3737 				dp_peer_copy_stats_to_bar(peer,
3738 							  &ppdu_desc->user[i]);
3739 				ppdu_desc->ppdu_id =
3740 					peer->last_delayed_ba_ppduid;
3741 				ppdu_desc->ppdu_start_timestamp = start_tsf;
3742 				ppdu_desc->ppdu_end_timestamp = end_tsf;
3743 			}
3744 			dp_peer_unref_del_find_by_id(peer);
3745 		}
3746 	}
3747 
3748 	/*
3749 	 * for frame type DATA and BAR, we update stats based on MSDU,
3750 	 * successful msdu and mpdu are populate from ACK BA STATUS TLV
3751 	 * which comes out of order. successful mpdu also populated from
3752 	 * COMPLTN COMMON TLV which comes in order. for every ppdu_info
3753 	 * we store successful mpdu from both tlv and compare before delivering
3754 	 * to make sure we received ACK BA STATUS TLV. For some self generated
3755 	 * frame we won't get ack ba status tlv so no need to wait for
3756 	 * ack ba status tlv.
3757 	 */
3758 	if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL &&
3759 	    ppdu_desc->htt_frame_type != HTT_STATS_FTYPE_SGEN_QOS_NULL) {
3760 		/*
3761 		 * successful mpdu count should match with both tlv
3762 		 */
3763 		if (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv)
3764 			return NULL;
3765 	}
3766 
3767 	/**
3768 	 * Once all the TLVs for a given PPDU has been processed,
3769 	 * return PPDU status to be delivered to higher layer.
3770 	 * tlv_bitmap_expected can't be available for different frame type.
3771 	 * But SCHED CMD STATS TLV is the last TLV from the FW for a ppdu.
3772 	 * apart from ACK BA TLV, FW sends other TLV in sequential order.
3773 	 * flush tlv comes separate.
3774 	 */
3775 	if ((ppdu_info->tlv_bitmap != 0 &&
3776 	     (ppdu_info->tlv_bitmap &
3777 	      (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV))) ||
3778 	    (ppdu_info->tlv_bitmap &
3779 	     (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV)))
3780 		return ppdu_info;
3781 
3782 	return NULL;
3783 }
3784 #endif /* FEATURE_PERPKT_INFO */
3785 
3786 /**
3787  * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW
3788  * @soc: DP SOC handle
3789  * @pdev_id: pdev id
3790  * @htt_t2h_msg: HTT message nbuf
3791  *
3792  * return:void
3793  */
3794 #if defined(WDI_EVENT_ENABLE)
3795 #ifdef FEATURE_PERPKT_INFO
3796 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
3797 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
3798 {
3799 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
3800 	struct ppdu_info *ppdu_info = NULL;
3801 	bool free_buf = true;
3802 
3803 	if (pdev_id >= MAX_PDEV_CNT)
3804 		return true;
3805 
3806 	pdev = soc->pdev_list[pdev_id];
3807 	if (!pdev)
3808 		return true;
3809 
3810 	if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
3811 	    !pdev->mcopy_mode && !pdev->bpr_enable)
3812 		return free_buf;
3813 
3814 	ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg);
3815 
3816 	if (pdev->mgmtctrl_frm_info.mgmt_buf) {
3817 		if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv
3818 		    (pdev, htt_t2h_msg, pdev->mgmtctrl_frm_info.ppdu_id) !=
3819 		    QDF_STATUS_SUCCESS)
3820 			free_buf = false;
3821 	}
3822 
3823 	if (ppdu_info)
3824 		dp_ppdu_desc_deliver(pdev, ppdu_info);
3825 
3826 	pdev->mgmtctrl_frm_info.mgmt_buf = NULL;
3827 	pdev->mgmtctrl_frm_info.mgmt_buf_len = 0;
3828 	pdev->mgmtctrl_frm_info.ppdu_id = 0;
3829 
3830 	return free_buf;
3831 }
3832 #else
3833 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
3834 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
3835 {
3836 	return true;
3837 }
3838 #endif
3839 #endif
3840 
3841 /**
3842  * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats
3843  * @soc: DP SOC handle
3844  * @htt_t2h_msg: HTT message nbuf
3845  *
3846  * return:void
3847  */
3848 static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc,
3849 		qdf_nbuf_t htt_t2h_msg)
3850 {
3851 	uint8_t done;
3852 	qdf_nbuf_t msg_copy;
3853 	uint32_t *msg_word;
3854 
3855 	msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
3856 	msg_word = msg_word + 3;
3857 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
3858 
3859 	/*
3860 	 * HTT EXT stats response comes as stream of TLVs which span over
3861 	 * multiple T2H messages.
3862 	 * The first message will carry length of the response.
3863 	 * For rest of the messages length will be zero.
3864 	 *
3865 	 * Clone the T2H message buffer and store it in a list to process
3866 	 * it later.
3867 	 *
3868 	 * The original T2H message buffers gets freed in the T2H HTT event
3869 	 * handler
3870 	 */
3871 	msg_copy = qdf_nbuf_clone(htt_t2h_msg);
3872 
3873 	if (!msg_copy) {
3874 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
3875 				"T2H messge clone failed for HTT EXT STATS");
3876 		goto error;
3877 	}
3878 
3879 	qdf_spin_lock_bh(&soc->htt_stats.lock);
3880 	qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy);
3881 	/*
3882 	 * Done bit signifies that this is the last T2H buffer in the stream of
3883 	 * HTT EXT STATS message
3884 	 */
3885 	if (done) {
3886 		soc->htt_stats.num_stats++;
3887 		qdf_sched_work(0, &soc->htt_stats.work);
3888 	}
3889 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
3890 
3891 	return;
3892 
3893 error:
3894 	qdf_spin_lock_bh(&soc->htt_stats.lock);
3895 	while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg))
3896 			!= NULL) {
3897 		qdf_nbuf_free(msg_copy);
3898 	}
3899 	soc->htt_stats.num_stats = 0;
3900 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
3901 	return;
3902 
3903 }
3904 
3905 /*
3906  * htt_soc_attach_target() - SOC level HTT setup
3907  * @htt_soc:	HTT SOC handle
3908  *
3909  * Return: 0 on success; error code on failure
3910  */
3911 int htt_soc_attach_target(struct htt_soc *htt_soc)
3912 {
3913 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
3914 
3915 	return htt_h2t_ver_req_msg(soc);
3916 }
3917 
3918 void htt_set_htc_handle(struct htt_soc *htt_soc, HTC_HANDLE htc_soc)
3919 {
3920 	htt_soc->htc_soc = htc_soc;
3921 }
3922 
3923 HTC_HANDLE htt_get_htc_handle(struct htt_soc *htt_soc)
3924 {
3925 	return htt_soc->htc_soc;
3926 }
3927 
3928 struct htt_soc *htt_soc_attach(struct dp_soc *soc, HTC_HANDLE htc_handle)
3929 {
3930 	int i;
3931 	int j;
3932 	int alloc_size = HTT_SW_UMAC_RING_IDX_MAX * sizeof(unsigned long);
3933 	struct htt_soc *htt_soc = NULL;
3934 
3935 	htt_soc = qdf_mem_malloc(sizeof(*htt_soc));
3936 	if (!htt_soc) {
3937 		dp_err("HTT attach failed");
3938 		return NULL;
3939 	}
3940 
3941 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3942 		htt_soc->pdevid_tt[i].umac_ttt = qdf_mem_malloc(alloc_size);
3943 		if (!htt_soc->pdevid_tt[i].umac_ttt)
3944 			break;
3945 		qdf_mem_set(htt_soc->pdevid_tt[i].umac_ttt, alloc_size, -1);
3946 		htt_soc->pdevid_tt[i].lmac_ttt = qdf_mem_malloc(alloc_size);
3947 		if (!htt_soc->pdevid_tt[i].lmac_ttt) {
3948 			qdf_mem_free(htt_soc->pdevid_tt[i].umac_ttt);
3949 			break;
3950 		}
3951 		qdf_mem_set(htt_soc->pdevid_tt[i].lmac_ttt, alloc_size, -1);
3952 	}
3953 	if (i != MAX_PDEV_CNT) {
3954 		for (j = 0; j < i; j++) {
3955 			qdf_mem_free(htt_soc->pdevid_tt[j].umac_ttt);
3956 			qdf_mem_free(htt_soc->pdevid_tt[j].lmac_ttt);
3957 		}
3958 		qdf_mem_free(htt_soc);
3959 		return NULL;
3960 	}
3961 
3962 	htt_soc->dp_soc = soc;
3963 	htt_soc->htc_soc = htc_handle;
3964 	HTT_TX_MUTEX_INIT(&htt_soc->htt_tx_mutex);
3965 
3966 	return htt_soc;
3967 }
3968 
3969 #if defined(WDI_EVENT_ENABLE) && !defined(REMOVE_PKT_LOG)
3970 /*
3971  * dp_ppdu_stats_ind_handler() - PPDU stats msg handler
3972  * @htt_soc:	 HTT SOC handle
3973  * @msg_word:    Pointer to payload
3974  * @htt_t2h_msg: HTT msg nbuf
3975  *
3976  * Return: True if buffer should be freed by caller.
3977  */
3978 static bool
3979 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
3980 				uint32_t *msg_word,
3981 				qdf_nbuf_t htt_t2h_msg)
3982 {
3983 	u_int8_t pdev_id;
3984 	u_int8_t target_pdev_id;
3985 	bool free_buf;
3986 	qdf_nbuf_set_pktlen(htt_t2h_msg, HTT_T2H_MAX_MSG_SIZE);
3987 	target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
3988 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
3989 							 target_pdev_id);
3990 	free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id,
3991 					      htt_t2h_msg);
3992 	dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc,
3993 		htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL,
3994 		pdev_id);
3995 	return free_buf;
3996 }
3997 #else
3998 static bool
3999 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
4000 				uint32_t *msg_word,
4001 				qdf_nbuf_t htt_t2h_msg)
4002 {
4003 	return true;
4004 }
4005 #endif
4006 
4007 #if defined(WDI_EVENT_ENABLE) && \
4008 	!defined(REMOVE_PKT_LOG)
4009 /*
4010  * dp_pktlog_msg_handler() - Pktlog msg handler
4011  * @htt_soc:	 HTT SOC handle
4012  * @msg_word:    Pointer to payload
4013  *
4014  * Return: None
4015  */
4016 static void
4017 dp_pktlog_msg_handler(struct htt_soc *soc,
4018 		      uint32_t *msg_word)
4019 {
4020 	uint8_t pdev_id;
4021 	uint8_t target_pdev_id;
4022 	uint32_t *pl_hdr;
4023 
4024 	target_pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word);
4025 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
4026 							 target_pdev_id);
4027 	pl_hdr = (msg_word + 1);
4028 	dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc,
4029 		pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL,
4030 		pdev_id);
4031 }
4032 #else
4033 static void
4034 dp_pktlog_msg_handler(struct htt_soc *soc,
4035 		      uint32_t *msg_word)
4036 {
4037 }
4038 #endif
4039 
4040 /*
4041  * time_allow_print() - time allow print
4042  * @htt_ring_tt:	ringi_id array of timestamps
4043  * @ring_id:		ring_id (index)
4044  *
4045  * Return: 1 for successfully saving timestamp in array
4046  *	and 0 for timestamp falling within 2 seconds after last one
4047  */
4048 static bool time_allow_print(unsigned long *htt_ring_tt, u_int8_t ring_id)
4049 {
4050 	unsigned long tstamp;
4051 	unsigned long delta;
4052 
4053 	tstamp = qdf_get_system_timestamp();
4054 
4055 	if (!htt_ring_tt)
4056 		return 0; //unable to print backpressure messages
4057 
4058 	if (htt_ring_tt[ring_id] == -1) {
4059 		htt_ring_tt[ring_id] = tstamp;
4060 		return 1;
4061 	}
4062 	delta = tstamp - htt_ring_tt[ring_id];
4063 	if (delta >= 2000) {
4064 		htt_ring_tt[ring_id] = tstamp;
4065 		return 1;
4066 	}
4067 
4068 	return 0;
4069 }
4070 
4071 static void dp_htt_alert_print(enum htt_t2h_msg_type msg_type,
4072 			       u_int8_t pdev_id, u_int8_t ring_id,
4073 			       u_int16_t hp_idx, u_int16_t tp_idx,
4074 			       u_int32_t bkp_time, char *ring_stype)
4075 {
4076 	dp_alert("msg_type: %d pdev_id: %d ring_type: %s ",
4077 		 msg_type, pdev_id, ring_stype);
4078 	dp_alert("ring_id: %d hp_idx: %d tp_idx: %d bkpressure_time_ms: %d ",
4079 		 ring_id, hp_idx, tp_idx, bkp_time);
4080 }
4081 
4082 /*
4083  * dp_htt_bkp_event_alert() - htt backpressure event alert
4084  * @msg_word:	htt packet context
4085  * @htt_soc:	HTT SOC handle
4086  *
4087  * Return: after attempting to print stats
4088  */
4089 static void dp_htt_bkp_event_alert(u_int32_t *msg_word, struct htt_soc *soc)
4090 {
4091 	u_int8_t ring_type;
4092 	u_int8_t pdev_id;
4093 	uint8_t target_pdev_id;
4094 	u_int8_t ring_id;
4095 	u_int16_t hp_idx;
4096 	u_int16_t tp_idx;
4097 	u_int32_t bkp_time;
4098 	enum htt_t2h_msg_type msg_type;
4099 	struct dp_soc *dpsoc;
4100 	struct dp_pdev *pdev;
4101 	struct dp_htt_timestamp *radio_tt;
4102 
4103 	if (!soc)
4104 		return;
4105 
4106 	dpsoc = (struct dp_soc *)soc->dp_soc;
4107 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
4108 	ring_type = HTT_T2H_RX_BKPRESSURE_RING_TYPE_GET(*msg_word);
4109 	target_pdev_id = HTT_T2H_RX_BKPRESSURE_PDEV_ID_GET(*msg_word);
4110 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
4111 							 target_pdev_id);
4112 	if (pdev_id >= MAX_PDEV_CNT) {
4113 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
4114 			  "pdev id %d is invalid", pdev_id);
4115 		return;
4116 	}
4117 
4118 	pdev = (struct dp_pdev *)dpsoc->pdev_list[pdev_id];
4119 	ring_id = HTT_T2H_RX_BKPRESSURE_RINGID_GET(*msg_word);
4120 	hp_idx = HTT_T2H_RX_BKPRESSURE_HEAD_IDX_GET(*(msg_word + 1));
4121 	tp_idx = HTT_T2H_RX_BKPRESSURE_TAIL_IDX_GET(*(msg_word + 1));
4122 	bkp_time = HTT_T2H_RX_BKPRESSURE_TIME_MS_GET(*(msg_word + 2));
4123 	radio_tt = &soc->pdevid_tt[pdev_id];
4124 
4125 	switch (ring_type) {
4126 	case HTT_SW_RING_TYPE_UMAC:
4127 		if (!time_allow_print(radio_tt->umac_ttt, ring_id))
4128 			return;
4129 		dp_htt_alert_print(msg_type, pdev_id, ring_id, hp_idx, tp_idx,
4130 				   bkp_time, "HTT_SW_RING_TYPE_UMAC");
4131 	break;
4132 	case HTT_SW_RING_TYPE_LMAC:
4133 		if (!time_allow_print(radio_tt->lmac_ttt, ring_id))
4134 			return;
4135 		dp_htt_alert_print(msg_type, pdev_id, ring_id, hp_idx, tp_idx,
4136 				   bkp_time, "HTT_SW_RING_TYPE_LMAC");
4137 	break;
4138 	default:
4139 		dp_htt_alert_print(msg_type, pdev_id, ring_id, hp_idx, tp_idx,
4140 				   bkp_time, "UNKNOWN");
4141 	break;
4142 	}
4143 
4144 	dp_print_ring_stats(pdev);
4145 	dp_print_napi_stats(pdev->soc);
4146 }
4147 
4148 /*
4149  * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler
4150  * @context:	Opaque context (HTT SOC handle)
4151  * @pkt:	HTC packet
4152  */
4153 static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
4154 {
4155 	struct htt_soc *soc = (struct htt_soc *) context;
4156 	qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
4157 	u_int32_t *msg_word;
4158 	enum htt_t2h_msg_type msg_type;
4159 	bool free_buf = true;
4160 
4161 	/* check for successful message reception */
4162 	if (pkt->Status != QDF_STATUS_SUCCESS) {
4163 		if (pkt->Status != QDF_STATUS_E_CANCELED)
4164 			soc->stats.htc_err_cnt++;
4165 
4166 		qdf_nbuf_free(htt_t2h_msg);
4167 		return;
4168 	}
4169 
4170 	/* TODO: Check if we should pop the HTC/HTT header alignment padding */
4171 
4172 	msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
4173 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
4174 	htt_event_record(soc->htt_logger_handle,
4175 			 msg_type, (uint8_t *)msg_word);
4176 	switch (msg_type) {
4177 	case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
4178 	{
4179 		dp_htt_bkp_event_alert(msg_word, soc);
4180 		break;
4181 	}
4182 	case HTT_T2H_MSG_TYPE_PEER_MAP:
4183 		{
4184 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
4185 			u_int8_t *peer_mac_addr;
4186 			u_int16_t peer_id;
4187 			u_int16_t hw_peer_id;
4188 			u_int8_t vdev_id;
4189 			u_int8_t is_wds;
4190 			struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
4191 
4192 			peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
4193 			hw_peer_id =
4194 				HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2));
4195 			vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
4196 			peer_mac_addr = htt_t2h_mac_addr_deswizzle(
4197 				(u_int8_t *) (msg_word+1),
4198 				&mac_addr_deswizzle_buf[0]);
4199 			QDF_TRACE(QDF_MODULE_ID_TXRX,
4200 				QDF_TRACE_LEVEL_INFO,
4201 				"HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
4202 				peer_id, vdev_id);
4203 
4204 			/*
4205 			 * check if peer already exists for this peer_id, if so
4206 			 * this peer map event is in response for a wds peer add
4207 			 * wmi command sent during wds source port learning.
4208 			 * in this case just add the ast entry to the existing
4209 			 * peer ast_list.
4210 			 */
4211 			is_wds = !!(dpsoc->peer_id_to_obj_map[peer_id]);
4212 			dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id,
4213 					       vdev_id, peer_mac_addr, 0,
4214 					       is_wds);
4215 			break;
4216 		}
4217 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
4218 		{
4219 			u_int16_t peer_id;
4220 			u_int8_t vdev_id;
4221 			u_int8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0};
4222 			peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
4223 			vdev_id = HTT_RX_PEER_UNMAP_VDEV_ID_GET(*msg_word);
4224 
4225 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
4226 						 vdev_id, mac_addr, 0,
4227 						 DP_PEER_WDS_COUNT_INVALID);
4228 			break;
4229 		}
4230 	case HTT_T2H_MSG_TYPE_SEC_IND:
4231 		{
4232 			u_int16_t peer_id;
4233 			enum cdp_sec_type sec_type;
4234 			int is_unicast;
4235 
4236 			peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
4237 			sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
4238 			is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
4239 			/* point to the first part of the Michael key */
4240 			msg_word++;
4241 			dp_rx_sec_ind_handler(
4242 				soc->dp_soc, peer_id, sec_type, is_unicast,
4243 				msg_word, msg_word + 2);
4244 			break;
4245 		}
4246 
4247 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
4248 		{
4249 			free_buf = dp_ppdu_stats_ind_handler(soc, msg_word,
4250 							     htt_t2h_msg);
4251 			break;
4252 		}
4253 
4254 	case HTT_T2H_MSG_TYPE_PKTLOG:
4255 		{
4256 			dp_pktlog_msg_handler(soc, msg_word);
4257 			break;
4258 		}
4259 
4260 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
4261 		{
4262 			htc_pm_runtime_put(soc->htc_soc);
4263 			soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
4264 			soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
4265 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4266 				"target uses HTT version %d.%d; host uses %d.%d",
4267 				soc->tgt_ver.major, soc->tgt_ver.minor,
4268 				HTT_CURRENT_VERSION_MAJOR,
4269 				HTT_CURRENT_VERSION_MINOR);
4270 			if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
4271 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4272 					QDF_TRACE_LEVEL_ERROR,
4273 					"*** Incompatible host/target HTT versions!");
4274 			}
4275 			/* abort if the target is incompatible with the host */
4276 			qdf_assert(soc->tgt_ver.major ==
4277 				HTT_CURRENT_VERSION_MAJOR);
4278 			if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
4279 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4280 					QDF_TRACE_LEVEL_WARN,
4281 					"*** Warning: host/target HTT versions"
4282 					" are different, though compatible!");
4283 			}
4284 			break;
4285 		}
4286 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
4287 		{
4288 			uint16_t peer_id;
4289 			uint8_t tid;
4290 			uint8_t win_sz;
4291 			uint16_t status;
4292 			struct dp_peer *peer;
4293 
4294 			/*
4295 			 * Update REO Queue Desc with new values
4296 			 */
4297 			peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
4298 			tid = HTT_RX_ADDBA_TID_GET(*msg_word);
4299 			win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
4300 			peer = dp_peer_find_by_id(soc->dp_soc, peer_id);
4301 
4302 			/*
4303 			 * Window size needs to be incremented by 1
4304 			 * since fw needs to represent a value of 256
4305 			 * using just 8 bits
4306 			 */
4307 			if (peer) {
4308 				status = dp_addba_requestprocess_wifi3(
4309 					(struct cdp_soc_t *)soc->dp_soc,
4310 					peer->mac_addr.raw, peer->vdev->vdev_id,
4311 					0, tid, 0, win_sz + 1, 0xffff);
4312 
4313 				/*
4314 				 * If PEER_LOCK_REF_PROTECT enbled dec ref
4315 				 * which is inc by dp_peer_find_by_id
4316 				 */
4317 				dp_peer_unref_del_find_by_id(peer);
4318 
4319 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4320 					QDF_TRACE_LEVEL_INFO,
4321 					FL("PeerID %d BAW %d TID %d stat %d"),
4322 					peer_id, win_sz, tid, status);
4323 
4324 			} else {
4325 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4326 					QDF_TRACE_LEVEL_ERROR,
4327 					FL("Peer not found peer id %d"),
4328 					peer_id);
4329 			}
4330 			break;
4331 		}
4332 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
4333 		{
4334 			dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg);
4335 			break;
4336 		}
4337 	case HTT_T2H_MSG_TYPE_PEER_MAP_V2:
4338 		{
4339 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
4340 			u_int8_t *peer_mac_addr;
4341 			u_int16_t peer_id;
4342 			u_int16_t hw_peer_id;
4343 			u_int8_t vdev_id;
4344 			bool is_wds;
4345 			u_int16_t ast_hash;
4346 			struct dp_ast_flow_override_info ast_flow_info;
4347 
4348 			qdf_mem_set(&ast_flow_info, 0,
4349 					    sizeof(struct dp_ast_flow_override_info));
4350 
4351 			peer_id = HTT_RX_PEER_MAP_V2_SW_PEER_ID_GET(*msg_word);
4352 			hw_peer_id =
4353 			HTT_RX_PEER_MAP_V2_HW_PEER_ID_GET(*(msg_word + 2));
4354 			vdev_id = HTT_RX_PEER_MAP_V2_VDEV_ID_GET(*msg_word);
4355 			peer_mac_addr =
4356 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
4357 						   &mac_addr_deswizzle_buf[0]);
4358 			is_wds =
4359 			HTT_RX_PEER_MAP_V2_NEXT_HOP_GET(*(msg_word + 3));
4360 			ast_hash =
4361 			HTT_RX_PEER_MAP_V2_AST_HASH_VALUE_GET(*(msg_word + 3));
4362 			/*
4363 			 * Update 4 ast_index per peer, ast valid mask
4364 			 * and TID flow valid mask.
4365 			 * AST valid mask is 3 bit field corresponds to
4366 			 * ast_index[3:1]. ast_index 0 is always valid.
4367 			 */
4368 			ast_flow_info.ast_valid_mask =
4369 			HTT_RX_PEER_MAP_V2_AST_VALID_MASK_GET(*(msg_word + 3));
4370 			ast_flow_info.ast_idx[0] = hw_peer_id;
4371 			ast_flow_info.ast_flow_mask[0] =
4372 			HTT_RX_PEER_MAP_V2_AST_0_FLOW_MASK_GET(*(msg_word + 4));
4373 			ast_flow_info.ast_idx[1] =
4374 			HTT_RX_PEER_MAP_V2_AST_INDEX_1_GET(*(msg_word + 4));
4375 			ast_flow_info.ast_flow_mask[1] =
4376 			HTT_RX_PEER_MAP_V2_AST_1_FLOW_MASK_GET(*(msg_word + 4));
4377 			ast_flow_info.ast_idx[2] =
4378 			HTT_RX_PEER_MAP_V2_AST_INDEX_2_GET(*(msg_word + 5));
4379 			ast_flow_info.ast_flow_mask[2] =
4380 			HTT_RX_PEER_MAP_V2_AST_2_FLOW_MASK_GET(*(msg_word + 4));
4381 			ast_flow_info.ast_idx[3] =
4382 			HTT_RX_PEER_MAP_V2_AST_INDEX_3_GET(*(msg_word + 6));
4383 			ast_flow_info.ast_flow_mask[3] =
4384 			HTT_RX_PEER_MAP_V2_AST_3_FLOW_MASK_GET(*(msg_word + 4));
4385 			/*
4386 			 * TID valid mask is applicable only
4387 			 * for HI and LOW priority flows.
4388 			 * tid_valid_mas is 8 bit field corresponds
4389 			 * to TID[7:0]
4390 			 */
4391 			ast_flow_info.tid_valid_low_pri_mask =
4392 			HTT_RX_PEER_MAP_V2_TID_VALID_LOW_PRI_GET(*(msg_word + 5));
4393 			ast_flow_info.tid_valid_hi_pri_mask =
4394 			HTT_RX_PEER_MAP_V2_TID_VALID_HI_PRI_GET(*(msg_word + 5));
4395 
4396 			QDF_TRACE(QDF_MODULE_ID_TXRX,
4397 				  QDF_TRACE_LEVEL_INFO,
4398 				  "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
4399 				  peer_id, vdev_id);
4400 
4401 			dp_rx_peer_map_handler(soc->dp_soc, peer_id,
4402 					       hw_peer_id, vdev_id,
4403 					       peer_mac_addr, ast_hash,
4404 					       is_wds);
4405 
4406 			/*
4407 			 * Update ast indexes for flow override support
4408 			 * Applicable only for non wds peers
4409 			 */
4410 			dp_peer_ast_index_flow_queue_map_create(
4411 					    soc->dp_soc, is_wds,
4412 					    peer_id, peer_mac_addr,
4413 					    &ast_flow_info);
4414 
4415 			break;
4416 		}
4417 	case HTT_T2H_MSG_TYPE_PEER_UNMAP_V2:
4418 		{
4419 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
4420 			u_int8_t *mac_addr;
4421 			u_int16_t peer_id;
4422 			u_int8_t vdev_id;
4423 			u_int8_t is_wds;
4424 			u_int32_t free_wds_count;
4425 
4426 			peer_id =
4427 			HTT_RX_PEER_UNMAP_V2_SW_PEER_ID_GET(*msg_word);
4428 			vdev_id = HTT_RX_PEER_UNMAP_V2_VDEV_ID_GET(*msg_word);
4429 			mac_addr =
4430 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
4431 						   &mac_addr_deswizzle_buf[0]);
4432 			is_wds =
4433 			HTT_RX_PEER_UNMAP_V2_NEXT_HOP_GET(*(msg_word + 2));
4434 			free_wds_count =
4435 			HTT_RX_PEER_UNMAP_V2_PEER_WDS_FREE_COUNT_GET(*(msg_word + 4));
4436 
4437 			QDF_TRACE(QDF_MODULE_ID_TXRX,
4438 				  QDF_TRACE_LEVEL_INFO,
4439 				  "HTT_T2H_MSG_TYPE_PEER_UNMAP msg for peer id %d vdev id %d n",
4440 				  peer_id, vdev_id);
4441 
4442 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
4443 						 vdev_id, mac_addr,
4444 						 is_wds, free_wds_count);
4445 			break;
4446 		}
4447 	case HTT_T2H_MSG_TYPE_RX_DELBA:
4448 		{
4449 			uint16_t peer_id;
4450 			uint8_t tid;
4451 			uint8_t win_sz;
4452 			QDF_STATUS status;
4453 
4454 			peer_id = HTT_RX_DELBA_PEER_ID_GET(*msg_word);
4455 			tid = HTT_RX_DELBA_TID_GET(*msg_word);
4456 			win_sz = HTT_RX_DELBA_WIN_SIZE_GET(*msg_word);
4457 
4458 			status = dp_rx_delba_ind_handler(
4459 				soc->dp_soc,
4460 				peer_id, tid, win_sz);
4461 
4462 			QDF_TRACE(QDF_MODULE_ID_TXRX,
4463 				  QDF_TRACE_LEVEL_INFO,
4464 				  FL("DELBA PeerID %d BAW %d TID %d stat %d"),
4465 				  peer_id, win_sz, tid, status);
4466 			break;
4467 		}
4468 	default:
4469 		break;
4470 	};
4471 
4472 	/* Free the indication buffer */
4473 	if (free_buf)
4474 		qdf_nbuf_free(htt_t2h_msg);
4475 }
4476 
4477 /*
4478  * dp_htt_h2t_full() - Send full handler (called from HTC)
4479  * @context:	Opaque context (HTT SOC handle)
4480  * @pkt:	HTC packet
4481  *
4482  * Return: enum htc_send_full_action
4483  */
4484 static enum htc_send_full_action
4485 dp_htt_h2t_full(void *context, HTC_PACKET *pkt)
4486 {
4487 	return HTC_SEND_FULL_KEEP;
4488 }
4489 
4490 /*
4491  * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages
4492  * @context:	Opaque context (HTT SOC handle)
4493  * @nbuf:	nbuf containing T2H message
4494  * @pipe_id:	HIF pipe ID
4495  *
4496  * Return: QDF_STATUS
4497  *
4498  * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which
4499  * will be used for packet log and other high-priority HTT messages. Proper
4500  * HTC connection to be added later once required FW changes are available
4501  */
4502 static QDF_STATUS
4503 dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id)
4504 {
4505 	QDF_STATUS rc = QDF_STATUS_SUCCESS;
4506 	HTC_PACKET htc_pkt;
4507 
4508 	qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE);
4509 	qdf_mem_zero(&htc_pkt, sizeof(htc_pkt));
4510 	htc_pkt.Status = QDF_STATUS_SUCCESS;
4511 	htc_pkt.pPktContext = (void *)nbuf;
4512 	dp_htt_t2h_msg_handler(context, &htc_pkt);
4513 
4514 	return rc;
4515 }
4516 
4517 /*
4518  * htt_htc_soc_attach() - Register SOC level HTT instance with HTC
4519  * @htt_soc:	HTT SOC handle
4520  *
4521  * Return: QDF_STATUS
4522  */
4523 static QDF_STATUS
4524 htt_htc_soc_attach(struct htt_soc *soc)
4525 {
4526 	struct htc_service_connect_req connect;
4527 	struct htc_service_connect_resp response;
4528 	QDF_STATUS status;
4529 	struct dp_soc *dpsoc = soc->dp_soc;
4530 
4531 	qdf_mem_zero(&connect, sizeof(connect));
4532 	qdf_mem_zero(&response, sizeof(response));
4533 
4534 	connect.pMetaData = NULL;
4535 	connect.MetaDataLength = 0;
4536 	connect.EpCallbacks.pContext = soc;
4537 	connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
4538 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
4539 	connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler;
4540 
4541 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
4542 	connect.EpCallbacks.EpRecvRefill = NULL;
4543 
4544 	/* N/A, fill is done by HIF */
4545 	connect.EpCallbacks.RecvRefillWaterMark = 1;
4546 
4547 	connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
4548 	/*
4549 	 * Specify how deep to let a queue get before htc_send_pkt will
4550 	 * call the EpSendFull function due to excessive send queue depth.
4551 	 */
4552 	connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
4553 
4554 	/* disable flow control for HTT data message service */
4555 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
4556 
4557 	/* connect to control service */
4558 	connect.service_id = HTT_DATA_MSG_SVC;
4559 
4560 	status = htc_connect_service(soc->htc_soc, &connect, &response);
4561 
4562 	if (status != QDF_STATUS_SUCCESS)
4563 		return status;
4564 
4565 	soc->htc_endpoint = response.Endpoint;
4566 
4567 	hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint);
4568 
4569 	htt_interface_logging_init(&soc->htt_logger_handle);
4570 	dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc,
4571 		dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE);
4572 
4573 	return QDF_STATUS_SUCCESS; /* success */
4574 }
4575 
4576 /*
4577  * htt_soc_initialize() - SOC level HTT initialization
4578  * @htt_soc: Opaque htt SOC handle
4579  * @ctrl_psoc: Opaque ctrl SOC handle
4580  * @htc_soc: SOC level HTC handle
4581  * @hal_soc: Opaque HAL SOC handle
4582  * @osdev: QDF device
4583  *
4584  * Return: HTT handle on success; NULL on failure
4585  */
4586 void *
4587 htt_soc_initialize(struct htt_soc *htt_soc,
4588 		   struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
4589 		   HTC_HANDLE htc_soc,
4590 		   hal_soc_handle_t hal_soc_hdl, qdf_device_t osdev)
4591 {
4592 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
4593 
4594 	soc->osdev = osdev;
4595 	soc->ctrl_psoc = ctrl_psoc;
4596 	soc->htc_soc = htc_soc;
4597 	soc->hal_soc = hal_soc_hdl;
4598 
4599 	if (htt_htc_soc_attach(soc))
4600 		goto fail2;
4601 
4602 	return soc;
4603 
4604 fail2:
4605 	return NULL;
4606 }
4607 
4608 void htt_soc_htc_dealloc(struct htt_soc *htt_handle)
4609 {
4610 	htt_interface_logging_deinit(htt_handle->htt_logger_handle);
4611 	htt_htc_misc_pkt_pool_free(htt_handle);
4612 	htt_htc_pkt_pool_free(htt_handle);
4613 }
4614 
4615 /*
4616  * htt_soc_htc_prealloc() - HTC memory prealloc
4617  * @htt_soc: SOC level HTT handle
4618  *
4619  * Return: QDF_STATUS_SUCCESS on Success or
4620  * QDF_STATUS_E_NOMEM on allocation failure
4621  */
4622 QDF_STATUS htt_soc_htc_prealloc(struct htt_soc *soc)
4623 {
4624 	int i;
4625 
4626 	soc->htt_htc_pkt_freelist = NULL;
4627 	/* pre-allocate some HTC_PACKET objects */
4628 	for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
4629 		struct dp_htt_htc_pkt_union *pkt;
4630 		pkt = qdf_mem_malloc(sizeof(*pkt));
4631 		if (!pkt)
4632 			return QDF_STATUS_E_NOMEM;
4633 
4634 		htt_htc_pkt_free(soc, &pkt->u.pkt);
4635 	}
4636 	return QDF_STATUS_SUCCESS;
4637 }
4638 
4639 /*
4640  * htt_soc_detach() - Free SOC level HTT handle
4641  * @htt_hdl: HTT SOC handle
4642  */
4643 void htt_soc_detach(struct htt_soc *htt_hdl)
4644 {
4645 	int i;
4646 	struct htt_soc *htt_handle = (struct htt_soc *)htt_hdl;
4647 
4648 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4649 		qdf_mem_free(htt_handle->pdevid_tt[i].umac_ttt);
4650 		qdf_mem_free(htt_handle->pdevid_tt[i].lmac_ttt);
4651 	}
4652 
4653 	HTT_TX_MUTEX_DESTROY(&htt_handle->htt_tx_mutex);
4654 	qdf_mem_free(htt_handle);
4655 
4656 }
4657 
4658 /**
4659  * dp_h2t_ext_stats_msg_send(): function to contruct HTT message to pass to FW
4660  * @pdev: DP PDEV handle
4661  * @stats_type_upload_mask: stats type requested by user
4662  * @config_param_0: extra configuration parameters
4663  * @config_param_1: extra configuration parameters
4664  * @config_param_2: extra configuration parameters
4665  * @config_param_3: extra configuration parameters
4666  * @mac_id: mac number
4667  *
4668  * return: QDF STATUS
4669  */
4670 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
4671 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
4672 		uint32_t config_param_1, uint32_t config_param_2,
4673 		uint32_t config_param_3, int cookie_val, int cookie_msb,
4674 		uint8_t mac_id)
4675 {
4676 	struct htt_soc *soc = pdev->soc->htt_handle;
4677 	struct dp_htt_htc_pkt *pkt;
4678 	qdf_nbuf_t msg;
4679 	uint32_t *msg_word;
4680 	uint8_t pdev_mask = 0;
4681 	uint8_t *htt_logger_bufp;
4682 	int mac_for_pdev;
4683 	int target_pdev_id;
4684 	QDF_STATUS status;
4685 
4686 	msg = qdf_nbuf_alloc(
4687 			soc->osdev,
4688 			HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ),
4689 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4690 
4691 	if (!msg)
4692 		return QDF_STATUS_E_NOMEM;
4693 
4694 	/*TODO:Add support for SOC stats
4695 	 * Bit 0: SOC Stats
4696 	 * Bit 1: Pdev stats for pdev id 0
4697 	 * Bit 2: Pdev stats for pdev id 1
4698 	 * Bit 3: Pdev stats for pdev id 2
4699 	 */
4700 	mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
4701 	target_pdev_id =
4702 	dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev);
4703 
4704 	pdev_mask = 1 << target_pdev_id;
4705 
4706 	/*
4707 	 * Set the length of the message.
4708 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4709 	 * separately during the below call to qdf_nbuf_push_head.
4710 	 * The contribution from the HTC header is added separately inside HTC.
4711 	 */
4712 	if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) {
4713 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4714 				"Failed to expand head for HTT_EXT_STATS");
4715 		qdf_nbuf_free(msg);
4716 		return QDF_STATUS_E_FAILURE;
4717 	}
4718 
4719 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4720 		"-----%s:%d----\n cookie <-> %d\n config_param_0 %u\n"
4721 		"config_param_1 %u\n config_param_2 %u\n"
4722 		"config_param_4 %u\n -------------",
4723 		__func__, __LINE__, cookie_val, config_param_0,
4724 		config_param_1, config_param_2,	config_param_3);
4725 
4726 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
4727 
4728 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4729 	htt_logger_bufp = (uint8_t *)msg_word;
4730 	*msg_word = 0;
4731 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ);
4732 	HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask);
4733 	HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask);
4734 
4735 	/* word 1 */
4736 	msg_word++;
4737 	*msg_word = 0;
4738 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0);
4739 
4740 	/* word 2 */
4741 	msg_word++;
4742 	*msg_word = 0;
4743 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1);
4744 
4745 	/* word 3 */
4746 	msg_word++;
4747 	*msg_word = 0;
4748 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2);
4749 
4750 	/* word 4 */
4751 	msg_word++;
4752 	*msg_word = 0;
4753 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3);
4754 
4755 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0);
4756 
4757 	/* word 5 */
4758 	msg_word++;
4759 
4760 	/* word 6 */
4761 	msg_word++;
4762 	*msg_word = 0;
4763 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val);
4764 
4765 	/* word 7 */
4766 	msg_word++;
4767 	*msg_word = 0;
4768 	/*Using last 2 bits for pdev_id */
4769 	cookie_msb = ((cookie_msb << 2) | pdev->pdev_id);
4770 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb);
4771 
4772 	pkt = htt_htc_pkt_alloc(soc);
4773 	if (!pkt) {
4774 		qdf_nbuf_free(msg);
4775 		return QDF_STATUS_E_NOMEM;
4776 	}
4777 
4778 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4779 
4780 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4781 			dp_htt_h2t_send_complete_free_netbuf,
4782 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
4783 			soc->htc_endpoint,
4784 			/* tag for FW response msg not guaranteed */
4785 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
4786 
4787 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4788 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_EXT_STATS_REQ,
4789 				     htt_logger_bufp);
4790 
4791 	if (status != QDF_STATUS_SUCCESS) {
4792 		qdf_nbuf_free(msg);
4793 		htt_htc_pkt_free(soc, pkt);
4794 	}
4795 
4796 	return status;
4797 }
4798 
4799 /**
4800  * dp_h2t_3tuple_config_send(): function to contruct 3 tuple configuration
4801  * HTT message to pass to FW
4802  * @pdev: DP PDEV handle
4803  * @tuple_mask: tuple configuration to report 3 tuple hash value in either
4804  * toeplitz_2_or_4 or flow_id_toeplitz in MSDU START TLV.
4805  *
4806  * tuple_mask[1:0]:
4807  *   00 - Do not report 3 tuple hash value
4808  *   10 - Report 3 tuple hash value in toeplitz_2_or_4
4809  *   01 - Report 3 tuple hash value in flow_id_toeplitz
4810  *   11 - Report 3 tuple hash value in both toeplitz_2_or_4 & flow_id_toeplitz
4811  *
4812  * return: QDF STATUS
4813  */
4814 QDF_STATUS dp_h2t_3tuple_config_send(struct dp_pdev *pdev,
4815 				     uint32_t tuple_mask, uint8_t mac_id)
4816 {
4817 	struct htt_soc *soc = pdev->soc->htt_handle;
4818 	struct dp_htt_htc_pkt *pkt;
4819 	qdf_nbuf_t msg;
4820 	uint32_t *msg_word;
4821 	uint8_t *htt_logger_bufp;
4822 	int mac_for_pdev;
4823 	int target_pdev_id;
4824 
4825 	msg = qdf_nbuf_alloc(
4826 			soc->osdev,
4827 			HTT_MSG_BUF_SIZE(HTT_3_TUPLE_HASH_CFG_REQ_BYTES),
4828 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4829 
4830 	if (!msg)
4831 		return QDF_STATUS_E_NOMEM;
4832 
4833 	mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
4834 	target_pdev_id =
4835 	dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev);
4836 
4837 	/*
4838 	 * Set the length of the message.
4839 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4840 	 * separately during the below call to qdf_nbuf_push_head.
4841 	 * The contribution from the HTC header is added separately inside HTC.
4842 	 */
4843 	if (!qdf_nbuf_put_tail(msg, HTT_3_TUPLE_HASH_CFG_REQ_BYTES)) {
4844 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4845 			  "Failed to expand head for HTT_3TUPLE_CONFIG");
4846 		qdf_nbuf_free(msg);
4847 		return QDF_STATUS_E_FAILURE;
4848 	}
4849 
4850 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4851 		  "config_param_sent %s:%d 0x%x for target_pdev %d\n -------------",
4852 		  __func__, __LINE__, tuple_mask, target_pdev_id);
4853 
4854 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
4855 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4856 	htt_logger_bufp = (uint8_t *)msg_word;
4857 
4858 	*msg_word = 0;
4859 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG);
4860 	HTT_RX_3_TUPLE_HASH_PDEV_ID_SET(*msg_word, target_pdev_id);
4861 
4862 	msg_word++;
4863 	*msg_word = 0;
4864 	HTT_H2T_FLOW_ID_TOEPLITZ_FIELD_CONFIG_SET(*msg_word, tuple_mask);
4865 	HTT_H2T_TOEPLITZ_2_OR_4_FIELD_CONFIG_SET(*msg_word, tuple_mask);
4866 
4867 	pkt = htt_htc_pkt_alloc(soc);
4868 	if (!pkt) {
4869 		qdf_nbuf_free(msg);
4870 		return QDF_STATUS_E_NOMEM;
4871 	}
4872 
4873 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4874 
4875 	SET_HTC_PACKET_INFO_TX(
4876 			&pkt->htc_pkt,
4877 			dp_htt_h2t_send_complete_free_netbuf,
4878 			qdf_nbuf_data(msg),
4879 			qdf_nbuf_len(msg),
4880 			soc->htc_endpoint,
4881 			/* tag for no FW response msg */
4882 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
4883 
4884 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4885 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG,
4886 			    htt_logger_bufp);
4887 
4888 	return QDF_STATUS_SUCCESS;
4889 }
4890 
4891 /* This macro will revert once proper HTT header will define for
4892  * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file
4893  * */
4894 #if defined(WDI_EVENT_ENABLE)
4895 /**
4896  * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW
4897  * @pdev: DP PDEV handle
4898  * @stats_type_upload_mask: stats type requested by user
4899  * @mac_id: Mac id number
4900  *
4901  * return: QDF STATUS
4902  */
4903 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
4904 		uint32_t stats_type_upload_mask, uint8_t mac_id)
4905 {
4906 	struct htt_soc *soc = pdev->soc->htt_handle;
4907 	struct dp_htt_htc_pkt *pkt;
4908 	qdf_nbuf_t msg;
4909 	uint32_t *msg_word;
4910 	uint8_t pdev_mask;
4911 	QDF_STATUS status;
4912 
4913 	msg = qdf_nbuf_alloc(
4914 			soc->osdev,
4915 			HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ),
4916 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
4917 
4918 	if (!msg) {
4919 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4920 		"Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer");
4921 		qdf_assert(0);
4922 		return QDF_STATUS_E_NOMEM;
4923 	}
4924 
4925 	/*TODO:Add support for SOC stats
4926 	 * Bit 0: SOC Stats
4927 	 * Bit 1: Pdev stats for pdev id 0
4928 	 * Bit 2: Pdev stats for pdev id 1
4929 	 * Bit 3: Pdev stats for pdev id 2
4930 	 */
4931 	pdev_mask = 1 << dp_get_target_pdev_id_for_host_pdev_id(pdev->soc,
4932 								mac_id);
4933 
4934 	/*
4935 	 * Set the length of the message.
4936 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4937 	 * separately during the below call to qdf_nbuf_push_head.
4938 	 * The contribution from the HTC header is added separately inside HTC.
4939 	 */
4940 	if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) {
4941 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4942 				"Failed to expand head for HTT_CFG_STATS");
4943 		qdf_nbuf_free(msg);
4944 		return QDF_STATUS_E_FAILURE;
4945 	}
4946 
4947 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
4948 
4949 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4950 	*msg_word = 0;
4951 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
4952 	HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask);
4953 	HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word,
4954 			stats_type_upload_mask);
4955 
4956 	pkt = htt_htc_pkt_alloc(soc);
4957 	if (!pkt) {
4958 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4959 				"Fail to allocate dp_htt_htc_pkt buffer");
4960 		qdf_assert(0);
4961 		qdf_nbuf_free(msg);
4962 		return QDF_STATUS_E_NOMEM;
4963 	}
4964 
4965 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4966 
4967 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4968 			dp_htt_h2t_send_complete_free_netbuf,
4969 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
4970 			soc->htc_endpoint,
4971 			/* tag for no FW response msg */
4972 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
4973 
4974 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4975 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG,
4976 				     (uint8_t *)msg_word);
4977 
4978 	if (status != QDF_STATUS_SUCCESS) {
4979 		qdf_nbuf_free(msg);
4980 		htt_htc_pkt_free(soc, pkt);
4981 	}
4982 
4983 	return status;
4984 }
4985 #endif
4986 
4987 void
4988 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
4989 			     uint32_t *tag_buf)
4990 {
4991 	switch (tag_type) {
4992 	case HTT_STATS_PEER_DETAILS_TAG:
4993 	{
4994 		htt_peer_details_tlv *dp_stats_buf =
4995 			(htt_peer_details_tlv *)tag_buf;
4996 
4997 		pdev->fw_stats_peer_id = dp_stats_buf->sw_peer_id;
4998 	}
4999 	break;
5000 	case HTT_STATS_PEER_STATS_CMN_TAG:
5001 	{
5002 		htt_peer_stats_cmn_tlv *dp_stats_buf =
5003 			(htt_peer_stats_cmn_tlv *)tag_buf;
5004 
5005 		struct dp_peer *peer = dp_peer_find_by_id(pdev->soc,
5006 						pdev->fw_stats_peer_id);
5007 
5008 		if (peer && !peer->bss_peer) {
5009 			peer->stats.tx.inactive_time =
5010 				dp_stats_buf->inactive_time;
5011 			qdf_event_set(&pdev->fw_peer_stats_event);
5012 		}
5013 		if (peer)
5014 			dp_peer_unref_del_find_by_id(peer);
5015 	}
5016 	break;
5017 	default:
5018 		qdf_err("Invalid tag_type");
5019 	}
5020 }
5021 
5022 /**
5023  * dp_htt_rx_flow_fst_setup(): Send HTT Rx FST setup message to FW
5024  * @pdev: DP pdev handle
5025  * @fse_setup_info: FST setup parameters
5026  *
5027  * Return: Success when HTT message is sent, error on failure
5028  */
5029 QDF_STATUS
5030 dp_htt_rx_flow_fst_setup(struct dp_pdev *pdev,
5031 			 struct dp_htt_rx_flow_fst_setup *fse_setup_info)
5032 {
5033 	struct htt_soc *soc = pdev->soc->htt_handle;
5034 	struct dp_htt_htc_pkt *pkt;
5035 	qdf_nbuf_t msg;
5036 	u_int32_t *msg_word;
5037 	struct htt_h2t_msg_rx_fse_setup_t *fse_setup;
5038 	uint8_t *htt_logger_bufp;
5039 	u_int32_t *key;
5040 	QDF_STATUS status;
5041 
5042 	msg = qdf_nbuf_alloc(
5043 		soc->osdev,
5044 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_setup_t)),
5045 		/* reserve room for the HTC header */
5046 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
5047 
5048 	if (!msg)
5049 		return QDF_STATUS_E_NOMEM;
5050 
5051 	/*
5052 	 * Set the length of the message.
5053 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
5054 	 * separately during the below call to qdf_nbuf_push_head.
5055 	 * The contribution from the HTC header is added separately inside HTC.
5056 	 */
5057 	if (!qdf_nbuf_put_tail(msg,
5058 			       sizeof(struct htt_h2t_msg_rx_fse_setup_t))) {
5059 		qdf_err("Failed to expand head for HTT RX_FSE_SETUP msg");
5060 		return QDF_STATUS_E_FAILURE;
5061 	}
5062 
5063 	/* fill in the message contents */
5064 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
5065 
5066 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_setup_t));
5067 	/* rewind beyond alignment pad to get to the HTC header reserved area */
5068 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
5069 	htt_logger_bufp = (uint8_t *)msg_word;
5070 
5071 	*msg_word = 0;
5072 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG);
5073 
5074 	fse_setup = (struct htt_h2t_msg_rx_fse_setup_t *)msg_word;
5075 
5076 	HTT_RX_FSE_SETUP_PDEV_ID_SET(*msg_word, fse_setup_info->pdev_id);
5077 
5078 	msg_word++;
5079 	HTT_RX_FSE_SETUP_NUM_REC_SET(*msg_word, fse_setup_info->max_entries);
5080 	HTT_RX_FSE_SETUP_MAX_SEARCH_SET(*msg_word, fse_setup_info->max_search);
5081 	HTT_RX_FSE_SETUP_IP_DA_SA_PREFIX_SET(*msg_word,
5082 					     fse_setup_info->ip_da_sa_prefix);
5083 
5084 	msg_word++;
5085 	HTT_RX_FSE_SETUP_BASE_ADDR_LO_SET(*msg_word,
5086 					  fse_setup_info->base_addr_lo);
5087 	msg_word++;
5088 	HTT_RX_FSE_SETUP_BASE_ADDR_HI_SET(*msg_word,
5089 					  fse_setup_info->base_addr_hi);
5090 
5091 	key = (u_int32_t *)fse_setup_info->hash_key;
5092 	fse_setup->toeplitz31_0 = *key++;
5093 	fse_setup->toeplitz63_32 = *key++;
5094 	fse_setup->toeplitz95_64 = *key++;
5095 	fse_setup->toeplitz127_96 = *key++;
5096 	fse_setup->toeplitz159_128 = *key++;
5097 	fse_setup->toeplitz191_160 = *key++;
5098 	fse_setup->toeplitz223_192 = *key++;
5099 	fse_setup->toeplitz255_224 = *key++;
5100 	fse_setup->toeplitz287_256 = *key++;
5101 	fse_setup->toeplitz314_288 = *key;
5102 
5103 	msg_word++;
5104 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz31_0);
5105 	msg_word++;
5106 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz63_32);
5107 	msg_word++;
5108 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz95_64);
5109 	msg_word++;
5110 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz127_96);
5111 	msg_word++;
5112 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz159_128);
5113 	msg_word++;
5114 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz191_160);
5115 	msg_word++;
5116 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz223_192);
5117 	msg_word++;
5118 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz255_224);
5119 	msg_word++;
5120 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz287_256);
5121 	msg_word++;
5122 	HTT_RX_FSE_SETUP_HASH_314_288_SET(*msg_word,
5123 					  fse_setup->toeplitz314_288);
5124 
5125 	pkt = htt_htc_pkt_alloc(soc);
5126 	if (!pkt) {
5127 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
5128 		qdf_assert(0);
5129 		qdf_nbuf_free(msg);
5130 		return QDF_STATUS_E_RESOURCES; /* failure */
5131 	}
5132 
5133 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5134 
5135 	SET_HTC_PACKET_INFO_TX(
5136 		&pkt->htc_pkt,
5137 		dp_htt_h2t_send_complete_free_netbuf,
5138 		qdf_nbuf_data(msg),
5139 		qdf_nbuf_len(msg),
5140 		soc->htc_endpoint,
5141 		/* tag for no FW response msg */
5142 		HTC_TX_PACKET_TAG_RUNTIME_PUT);
5143 
5144 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5145 
5146 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
5147 				     HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG,
5148 				     htt_logger_bufp);
5149 
5150 	if (status == QDF_STATUS_SUCCESS) {
5151 		dp_info("HTT_H2T RX_FSE_SETUP sent to FW for pdev = %u",
5152 			fse_setup_info->pdev_id);
5153 		QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_DEBUG,
5154 				   (void *)fse_setup_info->hash_key,
5155 				   fse_setup_info->hash_key_len);
5156 	} else {
5157 		qdf_nbuf_free(msg);
5158 		htt_htc_pkt_free(soc, pkt);
5159 	}
5160 
5161 	return status;
5162 }
5163 
5164 /**
5165  * dp_htt_rx_flow_fse_operation(): Send HTT Flow Search Entry msg to
5166  * add/del a flow in HW
5167  * @pdev: DP pdev handle
5168  * @fse_op_info: Flow entry parameters
5169  *
5170  * Return: Success when HTT message is sent, error on failure
5171  */
5172 QDF_STATUS
5173 dp_htt_rx_flow_fse_operation(struct dp_pdev *pdev,
5174 			     struct dp_htt_rx_flow_fst_operation *fse_op_info)
5175 {
5176 	struct htt_soc *soc = pdev->soc->htt_handle;
5177 	struct dp_htt_htc_pkt *pkt;
5178 	qdf_nbuf_t msg;
5179 	u_int32_t *msg_word;
5180 	struct htt_h2t_msg_rx_fse_operation_t *fse_operation;
5181 	uint8_t *htt_logger_bufp;
5182 	QDF_STATUS status;
5183 
5184 	msg = qdf_nbuf_alloc(
5185 		soc->osdev,
5186 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_operation_t)),
5187 		/* reserve room for the HTC header */
5188 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
5189 	if (!msg)
5190 		return QDF_STATUS_E_NOMEM;
5191 
5192 	/*
5193 	 * Set the length of the message.
5194 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
5195 	 * separately during the below call to qdf_nbuf_push_head.
5196 	 * The contribution from the HTC header is added separately inside HTC.
5197 	 */
5198 	if (!qdf_nbuf_put_tail(msg,
5199 			       sizeof(struct htt_h2t_msg_rx_fse_operation_t))) {
5200 		qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
5201 		qdf_nbuf_free(msg);
5202 		return QDF_STATUS_E_FAILURE;
5203 	}
5204 
5205 	/* fill in the message contents */
5206 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
5207 
5208 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_operation_t));
5209 	/* rewind beyond alignment pad to get to the HTC header reserved area */
5210 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
5211 	htt_logger_bufp = (uint8_t *)msg_word;
5212 
5213 	*msg_word = 0;
5214 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG);
5215 
5216 	fse_operation = (struct htt_h2t_msg_rx_fse_operation_t *)msg_word;
5217 
5218 	HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, fse_op_info->pdev_id);
5219 	msg_word++;
5220 	HTT_RX_FSE_IPSEC_VALID_SET(*msg_word, false);
5221 	if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_ENTRY) {
5222 		HTT_RX_FSE_OPERATION_SET(*msg_word,
5223 					 HTT_RX_FSE_CACHE_INVALIDATE_ENTRY);
5224 		msg_word++;
5225 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5226 		*msg_word,
5227 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_31_0));
5228 		msg_word++;
5229 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5230 		*msg_word,
5231 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_63_32));
5232 		msg_word++;
5233 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5234 		*msg_word,
5235 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_95_64));
5236 		msg_word++;
5237 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5238 		*msg_word,
5239 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_127_96));
5240 		msg_word++;
5241 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5242 		*msg_word,
5243 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_31_0));
5244 		msg_word++;
5245 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5246 		*msg_word,
5247 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_63_32));
5248 		msg_word++;
5249 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5250 		*msg_word,
5251 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_95_64));
5252 		msg_word++;
5253 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5254 		*msg_word,
5255 		qdf_htonl(
5256 		fse_op_info->rx_flow->flow_tuple_info.dest_ip_127_96));
5257 		msg_word++;
5258 		HTT_RX_FSE_SOURCEPORT_SET(
5259 			*msg_word,
5260 			fse_op_info->rx_flow->flow_tuple_info.src_port);
5261 		HTT_RX_FSE_DESTPORT_SET(
5262 			*msg_word,
5263 			fse_op_info->rx_flow->flow_tuple_info.dest_port);
5264 		msg_word++;
5265 		HTT_RX_FSE_L4_PROTO_SET(
5266 			*msg_word,
5267 			fse_op_info->rx_flow->flow_tuple_info.l4_protocol);
5268 	} else if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_FULL) {
5269 		HTT_RX_FSE_OPERATION_SET(*msg_word,
5270 					 HTT_RX_FSE_CACHE_INVALIDATE_FULL);
5271 	} else if (fse_op_info->op_code == DP_HTT_FST_DISABLE) {
5272 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_DISABLE);
5273 	} else if (fse_op_info->op_code == DP_HTT_FST_ENABLE) {
5274 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_ENABLE);
5275 	}
5276 
5277 	pkt = htt_htc_pkt_alloc(soc);
5278 	if (!pkt) {
5279 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
5280 		qdf_assert(0);
5281 		qdf_nbuf_free(msg);
5282 		return QDF_STATUS_E_RESOURCES; /* failure */
5283 	}
5284 
5285 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5286 
5287 	SET_HTC_PACKET_INFO_TX(
5288 		&pkt->htc_pkt,
5289 		dp_htt_h2t_send_complete_free_netbuf,
5290 		qdf_nbuf_data(msg),
5291 		qdf_nbuf_len(msg),
5292 		soc->htc_endpoint,
5293 		/* tag for no FW response msg */
5294 		HTC_TX_PACKET_TAG_RUNTIME_PUT);
5295 
5296 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5297 
5298 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
5299 				     HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG,
5300 				     htt_logger_bufp);
5301 
5302 	if (status == QDF_STATUS_SUCCESS) {
5303 		dp_info("HTT_H2T RX_FSE_OPERATION_CFG sent to FW for pdev = %u",
5304 			fse_op_info->pdev_id);
5305 	} else {
5306 		qdf_nbuf_free(msg);
5307 		htt_htc_pkt_free(soc, pkt);
5308 	}
5309 
5310 	return status;
5311 }
5312 
5313 /**
5314  * dp_htt_rx_fisa_config(): Send HTT msg to configure FISA
5315  * @pdev: DP pdev handle
5316  * @fse_op_info: Flow entry parameters
5317  *
5318  * Return: Success when HTT message is sent, error on failure
5319  */
5320 QDF_STATUS
5321 dp_htt_rx_fisa_config(struct dp_pdev *pdev,
5322 		      struct dp_htt_rx_fisa_cfg *fisa_config)
5323 {
5324 	struct htt_soc *soc = pdev->soc->htt_handle;
5325 	struct dp_htt_htc_pkt *pkt;
5326 	qdf_nbuf_t msg;
5327 	u_int32_t *msg_word;
5328 	struct htt_h2t_msg_type_fisa_config_t *htt_fisa_config;
5329 	uint8_t *htt_logger_bufp;
5330 	uint32_t len;
5331 	QDF_STATUS status;
5332 
5333 	len = HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_type_fisa_config_t));
5334 
5335 	msg = qdf_nbuf_alloc(soc->osdev,
5336 			     len,
5337 			     /* reserve room for the HTC header */
5338 			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
5339 			     4,
5340 			     TRUE);
5341 	if (!msg)
5342 		return QDF_STATUS_E_NOMEM;
5343 
5344 	/*
5345 	 * Set the length of the message.
5346 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
5347 	 * separately during the below call to qdf_nbuf_push_head.
5348 	 * The contribution from the HTC header is added separately inside HTC.
5349 	 */
5350 	if (!qdf_nbuf_put_tail(msg,
5351 			       sizeof(struct htt_h2t_msg_type_fisa_config_t))) {
5352 		qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
5353 		qdf_nbuf_free(msg);
5354 		return QDF_STATUS_E_FAILURE;
5355 	}
5356 
5357 	/* fill in the message contents */
5358 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
5359 
5360 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_type_fisa_config_t));
5361 	/* rewind beyond alignment pad to get to the HTC header reserved area */
5362 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
5363 	htt_logger_bufp = (uint8_t *)msg_word;
5364 
5365 	*msg_word = 0;
5366 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FISA_CFG);
5367 
5368 	htt_fisa_config = (struct htt_h2t_msg_type_fisa_config_t *)msg_word;
5369 
5370 	HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, htt_fisa_config->pdev_id);
5371 
5372 	msg_word++;
5373 	HTT_RX_FISA_CONFIG_FISA_ENABLE_SET(*msg_word, 1);
5374 	HTT_RX_FISA_CONFIG_IPSEC_SKIP_SEARCH_SET(*msg_word, 1);
5375 	HTT_RX_FISA_CONFIG_NON_TCP_SKIP_SEARCH_SET(*msg_word, 0);
5376 	HTT_RX_FISA_CONFIG_ADD_IPV4_FIXED_HDR_LEN_SET(*msg_word, 0);
5377 	HTT_RX_FISA_CONFIG_ADD_IPV6_FIXED_HDR_LEN_SET(*msg_word, 0);
5378 	HTT_RX_FISA_CONFIG_ADD_TCP_FIXED_HDR_LEN_SET(*msg_word, 0);
5379 	HTT_RX_FISA_CONFIG_ADD_UDP_HDR_LEN_SET(*msg_word, 0);
5380 	HTT_RX_FISA_CONFIG_CHKSUM_CUM_IP_LEN_EN_SET(*msg_word, 1);
5381 	HTT_RX_FISA_CONFIG_DISABLE_TID_CHECK_SET(*msg_word, 1);
5382 	HTT_RX_FISA_CONFIG_DISABLE_TA_CHECK_SET(*msg_word, 1);
5383 	HTT_RX_FISA_CONFIG_DISABLE_QOS_CHECK_SET(*msg_word, 1);
5384 	HTT_RX_FISA_CONFIG_DISABLE_RAW_CHECK_SET(*msg_word, 1);
5385 	HTT_RX_FISA_CONFIG_DISABLE_DECRYPT_ERR_CHECK_SET(*msg_word, 1);
5386 	HTT_RX_FISA_CONFIG_DISABLE_MSDU_DROP_CHECK_SET(*msg_word, 1);
5387 	HTT_RX_FISA_CONFIG_FISA_AGGR_LIMIT_SET(*msg_word, 0xf);
5388 
5389 	msg_word++;
5390 	htt_fisa_config->fisa_timeout_threshold = fisa_config->fisa_timeout;
5391 
5392 	pkt = htt_htc_pkt_alloc(soc);
5393 	if (!pkt) {
5394 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
5395 		qdf_assert(0);
5396 		qdf_nbuf_free(msg);
5397 		return QDF_STATUS_E_RESOURCES; /* failure */
5398 	}
5399 
5400 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5401 
5402 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
5403 			       dp_htt_h2t_send_complete_free_netbuf,
5404 			       qdf_nbuf_data(msg),
5405 			       qdf_nbuf_len(msg),
5406 			       soc->htc_endpoint,
5407 			       /* tag for no FW response msg */
5408 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
5409 
5410 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5411 
5412 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FISA_CFG,
5413 				     htt_logger_bufp);
5414 
5415 	if (status == QDF_STATUS_SUCCESS) {
5416 		dp_info("HTT_H2T_MSG_TYPE_RX_FISA_CFG sent to FW for pdev = %u",
5417 			fisa_config->pdev_id);
5418 	} else {
5419 		qdf_nbuf_free(msg);
5420 		htt_htc_pkt_free(soc, pkt);
5421 	}
5422 
5423 	return status;
5424 }
5425