xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c (revision bea437e2293c3d4fb1b5704fcf633aedac996962)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <htt.h>
20 #include <hal_hw_headers.h>
21 #include <hal_api.h>
22 #include "dp_peer.h"
23 #include "dp_types.h"
24 #include "dp_internal.h"
25 #include "dp_rx_mon.h"
26 #include "htt_stats.h"
27 #include "htt_ppdu_stats.h"
28 #include "dp_htt.h"
29 #include "dp_rx.h"
30 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
31 #include "cdp_txrx_cmn_struct.h"
32 
33 #ifdef FEATURE_PERPKT_INFO
34 #include "dp_ratetable.h"
35 #endif
36 
37 #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
38 
39 #define HTT_HTC_PKT_POOL_INIT_SIZE 64
40 
41 #define HTT_MSG_BUF_SIZE(msg_bytes) \
42 	((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
43 
44 #define HTT_PID_BIT_MASK 0x3
45 
46 #define DP_EXT_MSG_LENGTH 2048
47 
48 #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16
49 
50 #define HTT_SHIFT_UPPER_TIMESTAMP 32
51 #define HTT_MASK_UPPER_TIMESTAMP 0xFFFFFFFF00000000
52 
53 /*
54  * dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap() - Get ppdu stats tlv
55  * bitmap for sniffer mode
56  * @bitmap: received bitmap
57  *
58  * Return: expected bitmap value, returns zero if doesn't match with
59  * either 64-bit Tx window or 256-bit window tlv bitmap
60  */
61 int
62 dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(uint32_t bitmap)
63 {
64 	if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64))
65 		return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64;
66 	else if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256))
67 		return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256;
68 
69 	return 0;
70 }
71 
72 #ifdef FEATURE_PERPKT_INFO
73 /*
74  * dp_peer_copy_delay_stats() - copy ppdu stats to peer delayed stats.
75  * @peer: Datapath peer handle
76  * @ppdu: PPDU Descriptor
77  *
78  * Return: None
79  *
80  * on Tx data frame, we may get delayed ba set
81  * in htt_ppdu_stats_user_common_tlv. which mean we get Block Ack(BA) after we
82  * request Block Ack Request(BAR). Successful msdu is received only after Block
83  * Ack. To populate peer stats we need successful msdu(data frame).
84  * So we hold the Tx data stats on delayed_ba for stats update.
85  */
86 static inline void
87 dp_peer_copy_delay_stats(struct dp_peer *peer,
88 			 struct cdp_tx_completion_ppdu_user *ppdu)
89 {
90 	struct dp_pdev *pdev;
91 	struct dp_vdev *vdev;
92 
93 	if (peer->last_delayed_ba) {
94 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
95 			  "BA not yet recv for prev delayed ppdu[%d]\n",
96 			  peer->last_delayed_ba_ppduid);
97 		vdev = peer->vdev;
98 		if (vdev) {
99 			pdev = vdev->pdev;
100 			pdev->stats.cdp_delayed_ba_not_recev++;
101 		}
102 	}
103 
104 	peer->delayed_ba_ppdu_stats.ltf_size = ppdu->ltf_size;
105 	peer->delayed_ba_ppdu_stats.stbc = ppdu->stbc;
106 	peer->delayed_ba_ppdu_stats.he_re = ppdu->he_re;
107 	peer->delayed_ba_ppdu_stats.txbf = ppdu->txbf;
108 	peer->delayed_ba_ppdu_stats.bw = ppdu->bw;
109 	peer->delayed_ba_ppdu_stats.nss = ppdu->nss;
110 	peer->delayed_ba_ppdu_stats.preamble = ppdu->preamble;
111 	peer->delayed_ba_ppdu_stats.gi = ppdu->gi;
112 	peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
113 	peer->delayed_ba_ppdu_stats.ldpc = ppdu->ldpc;
114 	peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
115 	peer->delayed_ba_ppdu_stats.mpdu_tried_ucast = ppdu->mpdu_tried_ucast;
116 	peer->delayed_ba_ppdu_stats.mpdu_tried_mcast = ppdu->mpdu_tried_mcast;
117 	peer->delayed_ba_ppdu_stats.frame_ctrl = ppdu->frame_ctrl;
118 	peer->delayed_ba_ppdu_stats.qos_ctrl = ppdu->qos_ctrl;
119 	peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
120 
121 	peer->delayed_ba_ppdu_stats.ru_start = ppdu->ru_start;
122 	peer->delayed_ba_ppdu_stats.ru_tones = ppdu->ru_tones;
123 	peer->delayed_ba_ppdu_stats.is_mcast = ppdu->is_mcast;
124 
125 	peer->delayed_ba_ppdu_stats.user_pos = ppdu->user_pos;
126 	peer->delayed_ba_ppdu_stats.mu_group_id = ppdu->mu_group_id;
127 
128 	peer->last_delayed_ba = true;
129 }
130 
131 /*
132  * dp_peer_copy_stats_to_bar() - copy delayed stats to ppdu stats.
133  * @peer: Datapath peer handle
134  * @ppdu: PPDU Descriptor
135  *
136  * Return: None
137  *
138  * For Tx BAR, PPDU stats TLV include Block Ack info. PPDU info
139  * from Tx BAR frame not required to populate peer stats.
140  * But we need successful MPDU and MSDU to update previous
141  * transmitted Tx data frame. Overwrite ppdu stats with the previous
142  * stored ppdu stats.
143  */
144 static void
145 dp_peer_copy_stats_to_bar(struct dp_peer *peer,
146 			  struct cdp_tx_completion_ppdu_user *ppdu)
147 {
148 	ppdu->ltf_size = peer->delayed_ba_ppdu_stats.ltf_size;
149 	ppdu->stbc = peer->delayed_ba_ppdu_stats.stbc;
150 	ppdu->he_re = peer->delayed_ba_ppdu_stats.he_re;
151 	ppdu->txbf = peer->delayed_ba_ppdu_stats.txbf;
152 	ppdu->bw = peer->delayed_ba_ppdu_stats.bw;
153 	ppdu->nss = peer->delayed_ba_ppdu_stats.nss;
154 	ppdu->preamble = peer->delayed_ba_ppdu_stats.preamble;
155 	ppdu->gi = peer->delayed_ba_ppdu_stats.gi;
156 	ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm;
157 	ppdu->ldpc = peer->delayed_ba_ppdu_stats.ldpc;
158 	ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm;
159 	ppdu->mpdu_tried_ucast = peer->delayed_ba_ppdu_stats.mpdu_tried_ucast;
160 	ppdu->mpdu_tried_mcast = peer->delayed_ba_ppdu_stats.mpdu_tried_mcast;
161 	ppdu->frame_ctrl = peer->delayed_ba_ppdu_stats.frame_ctrl;
162 	ppdu->qos_ctrl = peer->delayed_ba_ppdu_stats.qos_ctrl;
163 	ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm;
164 
165 	ppdu->ru_start = peer->delayed_ba_ppdu_stats.ru_start;
166 	ppdu->ru_tones = peer->delayed_ba_ppdu_stats.ru_tones;
167 	ppdu->is_mcast = peer->delayed_ba_ppdu_stats.is_mcast;
168 
169 	ppdu->user_pos = peer->delayed_ba_ppdu_stats.user_pos;
170 	ppdu->mu_group_id = peer->delayed_ba_ppdu_stats.mu_group_id;
171 
172 	peer->last_delayed_ba = false;
173 }
174 
175 /*
176  * dp_tx_rate_stats_update() - Update rate per-peer statistics
177  * @peer: Datapath peer handle
178  * @ppdu: PPDU Descriptor
179  *
180  * Return: None
181  */
182 static void
183 dp_tx_rate_stats_update(struct dp_peer *peer,
184 			struct cdp_tx_completion_ppdu_user *ppdu)
185 {
186 	uint32_t ratekbps = 0;
187 	uint64_t ppdu_tx_rate = 0;
188 	uint32_t rix;
189 	uint16_t ratecode = 0;
190 
191 	if (!peer || !ppdu)
192 		return;
193 
194 	ratekbps = dp_getrateindex(ppdu->gi,
195 				   ppdu->mcs,
196 				   ppdu->nss,
197 				   ppdu->preamble,
198 				   ppdu->bw,
199 				   &rix,
200 				   &ratecode);
201 
202 	DP_STATS_UPD(peer, tx.last_tx_rate, ratekbps);
203 
204 	if (!ratekbps)
205 		return;
206 
207 	/* Calculate goodput in non-training period
208 	 * In training period, don't do anything as
209 	 * pending pkt is send as goodput.
210 	 */
211 	if ((!peer->bss_peer) && (!ppdu->sa_is_training)) {
212 		ppdu->sa_goodput = ((ratekbps / CDP_NUM_KB_IN_MB) *
213 				(CDP_PERCENT_MACRO - ppdu->current_rate_per));
214 	}
215 	ppdu->rix = rix;
216 	ppdu->tx_ratekbps = ratekbps;
217 	ppdu->tx_ratecode = ratecode;
218 	peer->stats.tx.avg_tx_rate =
219 		dp_ath_rate_lpf(peer->stats.tx.avg_tx_rate, ratekbps);
220 	ppdu_tx_rate = dp_ath_rate_out(peer->stats.tx.avg_tx_rate);
221 	DP_STATS_UPD(peer, tx.rnd_avg_tx_rate, ppdu_tx_rate);
222 
223 	if (peer->vdev) {
224 		/*
225 		 * In STA mode:
226 		 *	We get ucast stats as BSS peer stats.
227 		 *
228 		 * In AP mode:
229 		 *	We get mcast stats as BSS peer stats.
230 		 *	We get ucast stats as assoc peer stats.
231 		 */
232 		if (peer->vdev->opmode == wlan_op_mode_ap && peer->bss_peer) {
233 			peer->vdev->stats.tx.mcast_last_tx_rate = ratekbps;
234 			peer->vdev->stats.tx.mcast_last_tx_rate_mcs = ppdu->mcs;
235 		} else {
236 			peer->vdev->stats.tx.last_tx_rate = ratekbps;
237 			peer->vdev->stats.tx.last_tx_rate_mcs = ppdu->mcs;
238 		}
239 	}
240 }
241 
242 /*
243  * dp_tx_stats_update() - Update per-peer statistics
244  * @pdev: Datapath pdev handle
245  * @peer: Datapath peer handle
246  * @ppdu: PPDU Descriptor
247  * @ack_rssi: RSSI of last ack received
248  *
249  * Return: None
250  */
251 static void
252 dp_tx_stats_update(struct dp_pdev *pdev, struct dp_peer *peer,
253 		   struct cdp_tx_completion_ppdu_user *ppdu,
254 		   uint32_t ack_rssi)
255 {
256 	uint8_t preamble, mcs;
257 	uint16_t num_msdu;
258 	uint16_t num_mpdu;
259 	uint16_t mpdu_tried;
260 	uint16_t mpdu_failed;
261 
262 	preamble = ppdu->preamble;
263 	mcs = ppdu->mcs;
264 	num_msdu = ppdu->num_msdu;
265 	num_mpdu = ppdu->mpdu_success;
266 	mpdu_tried = ppdu->mpdu_tried_ucast + ppdu->mpdu_tried_mcast;
267 	mpdu_failed = mpdu_tried - num_mpdu;
268 
269 	/* If the peer statistics are already processed as part of
270 	 * per-MSDU completion handler, do not process these again in per-PPDU
271 	 * indications */
272 	if (pdev->soc->process_tx_status)
273 		return;
274 
275 	if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK) {
276 		/*
277 		 * All failed mpdu will be retried, so incrementing
278 		 * retries mpdu based on mpdu failed. Even for
279 		 * ack failure i.e for long retries we get
280 		 * mpdu failed equal mpdu tried.
281 		 */
282 		DP_STATS_INC(peer, tx.retries, mpdu_failed);
283 		DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus);
284 		return;
285 	}
286 
287 	if (ppdu->is_ppdu_cookie_valid)
288 		DP_STATS_INC(peer, tx.num_ppdu_cookie_valid, 1);
289 
290 	if (ppdu->mu_group_id <= MAX_MU_GROUP_ID &&
291 	    ppdu->ppdu_type != HTT_PPDU_STATS_PPDU_TYPE_SU) {
292 		if (unlikely(!(ppdu->mu_group_id & (MAX_MU_GROUP_ID - 1))))
293 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
294 				  "mu_group_id out of bound!!\n");
295 		else
296 			DP_STATS_UPD(peer, tx.mu_group_id[ppdu->mu_group_id],
297 				     (ppdu->user_pos + 1));
298 	}
299 
300 	if (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA ||
301 	    ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA) {
302 		DP_STATS_UPD(peer, tx.ru_tones, ppdu->ru_tones);
303 		DP_STATS_UPD(peer, tx.ru_start, ppdu->ru_start);
304 		switch (ppdu->ru_tones) {
305 		case RU_26:
306 			DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].num_msdu,
307 				     num_msdu);
308 			DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].num_mpdu,
309 				     num_mpdu);
310 			DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].mpdu_tried,
311 				     mpdu_tried);
312 		break;
313 		case RU_52:
314 			DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].num_msdu,
315 				     num_msdu);
316 			DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].num_mpdu,
317 				     num_mpdu);
318 			DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].mpdu_tried,
319 				     mpdu_tried);
320 		break;
321 		case RU_106:
322 			DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].num_msdu,
323 				     num_msdu);
324 			DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].num_mpdu,
325 				     num_mpdu);
326 			DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].mpdu_tried,
327 				     mpdu_tried);
328 		break;
329 		case RU_242:
330 			DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].num_msdu,
331 				     num_msdu);
332 			DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].num_mpdu,
333 				     num_mpdu);
334 			DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].mpdu_tried,
335 				     mpdu_tried);
336 		break;
337 		case RU_484:
338 			DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].num_msdu,
339 				     num_msdu);
340 			DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].num_mpdu,
341 				     num_mpdu);
342 			DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].mpdu_tried,
343 				     mpdu_tried);
344 		break;
345 		case RU_996:
346 			DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].num_msdu,
347 				     num_msdu);
348 			DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].num_mpdu,
349 				     num_mpdu);
350 			DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].mpdu_tried,
351 				     mpdu_tried);
352 		break;
353 		}
354 	}
355 
356 	/*
357 	 * All failed mpdu will be retried, so incrementing
358 	 * retries mpdu based on mpdu failed. Even for
359 	 * ack failure i.e for long retries we get
360 	 * mpdu failed equal mpdu tried.
361 	 */
362 	DP_STATS_INC(peer, tx.retries, mpdu_failed);
363 	DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus);
364 
365 	DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].num_msdu,
366 		     num_msdu);
367 	DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].num_mpdu,
368 		     num_mpdu);
369 	DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].mpdu_tried,
370 		     mpdu_tried);
371 
372 	DP_STATS_INC_PKT(peer, tx.comp_pkt,
373 			num_msdu, (ppdu->success_bytes +
374 				ppdu->retry_bytes + ppdu->failed_bytes));
375 	DP_STATS_UPD(peer, tx.tx_rate, ppdu->tx_rate);
376 	DP_STATS_INC(peer, tx.sgi_count[ppdu->gi], num_msdu);
377 	DP_STATS_INC(peer, tx.bw[ppdu->bw], num_msdu);
378 	DP_STATS_INC(peer, tx.nss[ppdu->nss], num_msdu);
379 	if (ppdu->tid < CDP_DATA_TID_MAX)
380 		DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)],
381 			     num_msdu);
382 	DP_STATS_INCC(peer, tx.stbc, num_msdu, ppdu->stbc);
383 	DP_STATS_INCC(peer, tx.ldpc, num_msdu, ppdu->ldpc);
384 	if (!(ppdu->is_mcast) && ppdu->ack_rssi_valid)
385 		DP_STATS_UPD(peer, tx.last_ack_rssi, ack_rssi);
386 
387 	DP_STATS_INCC(peer,
388 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
389 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
390 	DP_STATS_INCC(peer,
391 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
392 			((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
393 	DP_STATS_INCC(peer,
394 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
395 			((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
396 	DP_STATS_INCC(peer,
397 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
398 			((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B)));
399 	DP_STATS_INCC(peer,
400 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
401 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
402 	DP_STATS_INCC(peer,
403 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
404 			((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
405 	DP_STATS_INCC(peer,
406 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
407 			((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
408 	DP_STATS_INCC(peer,
409 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
410 			((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
411 	DP_STATS_INCC(peer,
412 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
413 			((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX)));
414 	DP_STATS_INCC(peer,
415 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
416 			((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX)));
417 	DP_STATS_INCC(peer, tx.ampdu_cnt, num_msdu, ppdu->is_ampdu);
418 	DP_STATS_INCC(peer, tx.non_ampdu_cnt, num_msdu, !(ppdu->is_ampdu));
419 
420 	dp_peer_stats_notify(pdev, peer);
421 
422 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
423 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
424 			     &peer->stats, ppdu->peer_id,
425 			     UPDATE_PEER_STATS, pdev->pdev_id);
426 #endif
427 }
428 #endif
429 
430 #ifdef WLAN_TX_PKT_CAPTURE_ENH
431 #include "dp_tx_capture.h"
432 #else
433 static inline void
434 dp_process_ppdu_stats_update_failed_bitmap(struct dp_pdev *pdev,
435 					   void *data,
436 					   uint32_t ppdu_id,
437 					   uint32_t size)
438 {
439 }
440 #endif
441 
442 /*
443  * htt_htc_pkt_alloc() - Allocate HTC packet buffer
444  * @htt_soc:	HTT SOC handle
445  *
446  * Return: Pointer to htc packet buffer
447  */
448 static struct dp_htt_htc_pkt *
449 htt_htc_pkt_alloc(struct htt_soc *soc)
450 {
451 	struct dp_htt_htc_pkt_union *pkt = NULL;
452 
453 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
454 	if (soc->htt_htc_pkt_freelist) {
455 		pkt = soc->htt_htc_pkt_freelist;
456 		soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next;
457 	}
458 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
459 
460 	if (!pkt)
461 		pkt = qdf_mem_malloc(sizeof(*pkt));
462 	return &pkt->u.pkt; /* not actually a dereference */
463 }
464 
465 /*
466  * htt_htc_pkt_free() - Free HTC packet buffer
467  * @htt_soc:	HTT SOC handle
468  */
469 static void
470 htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
471 {
472 	struct dp_htt_htc_pkt_union *u_pkt =
473 		(struct dp_htt_htc_pkt_union *)pkt;
474 
475 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
476 	u_pkt->u.next = soc->htt_htc_pkt_freelist;
477 	soc->htt_htc_pkt_freelist = u_pkt;
478 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
479 }
480 
481 /*
482  * htt_htc_pkt_pool_free() - Free HTC packet pool
483  * @htt_soc:	HTT SOC handle
484  */
485 static void
486 htt_htc_pkt_pool_free(struct htt_soc *soc)
487 {
488 	struct dp_htt_htc_pkt_union *pkt, *next;
489 	pkt = soc->htt_htc_pkt_freelist;
490 	while (pkt) {
491 		next = pkt->u.next;
492 		qdf_mem_free(pkt);
493 		pkt = next;
494 	}
495 	soc->htt_htc_pkt_freelist = NULL;
496 }
497 
498 /*
499  * htt_htc_misc_pkt_list_trim() - trim misc list
500  * @htt_soc: HTT SOC handle
501  * @level: max no. of pkts in list
502  */
503 static void
504 htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level)
505 {
506 	struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL;
507 	int i = 0;
508 	qdf_nbuf_t netbuf;
509 
510 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
511 	pkt = soc->htt_htc_pkt_misclist;
512 	while (pkt) {
513 		next = pkt->u.next;
514 		/* trim the out grown list*/
515 		if (++i > level) {
516 			netbuf =
517 				(qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
518 			qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
519 			qdf_nbuf_free(netbuf);
520 			qdf_mem_free(pkt);
521 			pkt = NULL;
522 			if (prev)
523 				prev->u.next = NULL;
524 		}
525 		prev = pkt;
526 		pkt = next;
527 	}
528 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
529 }
530 
531 /*
532  * htt_htc_misc_pkt_list_add() - Add pkt to misc list
533  * @htt_soc:	HTT SOC handle
534  * @dp_htt_htc_pkt: pkt to be added to list
535  */
536 static void
537 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
538 {
539 	struct dp_htt_htc_pkt_union *u_pkt =
540 				(struct dp_htt_htc_pkt_union *)pkt;
541 	int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc,
542 							pkt->htc_pkt.Endpoint)
543 				+ DP_HTT_HTC_PKT_MISCLIST_SIZE;
544 
545 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
546 	if (soc->htt_htc_pkt_misclist) {
547 		u_pkt->u.next = soc->htt_htc_pkt_misclist;
548 		soc->htt_htc_pkt_misclist = u_pkt;
549 	} else {
550 		soc->htt_htc_pkt_misclist = u_pkt;
551 	}
552 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
553 
554 	/* only ce pipe size + tx_queue_depth could possibly be in use
555 	 * free older packets in the misclist
556 	 */
557 	htt_htc_misc_pkt_list_trim(soc, misclist_trim_level);
558 }
559 
560 /**
561  * DP_HTT_SEND_HTC_PKT() - Send htt packet from host
562  * @soc : HTT SOC handle
563  * @pkt: pkt to be send
564  * @cmd : command to be recorded in dp htt logger
565  * @buf : Pointer to buffer needs to be recored for above cmd
566  *
567  * Return: None
568  */
569 static inline void DP_HTT_SEND_HTC_PKT(struct htt_soc *soc,
570 				       struct dp_htt_htc_pkt *pkt, uint8_t cmd,
571 				       uint8_t *buf)
572 {
573 	htt_command_record(soc->htt_logger_handle, cmd, buf);
574 	if (htc_send_pkt(soc->htc_soc, &pkt->htc_pkt) ==
575 	    QDF_STATUS_SUCCESS)
576 		htt_htc_misc_pkt_list_add(soc, pkt);
577 }
578 
579 /*
580  * htt_htc_misc_pkt_pool_free() - free pkts in misc list
581  * @htt_soc:	HTT SOC handle
582  */
583 static void
584 htt_htc_misc_pkt_pool_free(struct htt_soc *soc)
585 {
586 	struct dp_htt_htc_pkt_union *pkt, *next;
587 	qdf_nbuf_t netbuf;
588 
589 	pkt = soc->htt_htc_pkt_misclist;
590 
591 	while (pkt) {
592 		next = pkt->u.next;
593 		netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
594 		qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
595 
596 		soc->stats.htc_pkt_free++;
597 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
598 			 "%s: Pkt free count %d",
599 			 __func__, soc->stats.htc_pkt_free);
600 
601 		qdf_nbuf_free(netbuf);
602 		qdf_mem_free(pkt);
603 		pkt = next;
604 	}
605 	soc->htt_htc_pkt_misclist = NULL;
606 }
607 
608 /*
609  * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ
610  * @tgt_mac_addr:	Target MAC
611  * @buffer:		Output buffer
612  */
613 static u_int8_t *
614 htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer)
615 {
616 #ifdef BIG_ENDIAN_HOST
617 	/*
618 	 * The host endianness is opposite of the target endianness.
619 	 * To make u_int32_t elements come out correctly, the target->host
620 	 * upload has swizzled the bytes in each u_int32_t element of the
621 	 * message.
622 	 * For byte-array message fields like the MAC address, this
623 	 * upload swizzling puts the bytes in the wrong order, and needs
624 	 * to be undone.
625 	 */
626 	buffer[0] = tgt_mac_addr[3];
627 	buffer[1] = tgt_mac_addr[2];
628 	buffer[2] = tgt_mac_addr[1];
629 	buffer[3] = tgt_mac_addr[0];
630 	buffer[4] = tgt_mac_addr[7];
631 	buffer[5] = tgt_mac_addr[6];
632 	return buffer;
633 #else
634 	/*
635 	 * The host endianness matches the target endianness -
636 	 * we can use the mac addr directly from the message buffer.
637 	 */
638 	return tgt_mac_addr;
639 #endif
640 }
641 
642 /*
643  * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
644  * @soc:	SOC handle
645  * @status:	Completion status
646  * @netbuf:	HTT buffer
647  */
648 static void
649 dp_htt_h2t_send_complete_free_netbuf(
650 	void *soc, A_STATUS status, qdf_nbuf_t netbuf)
651 {
652 	qdf_nbuf_free(netbuf);
653 }
654 
655 /*
656  * dp_htt_h2t_send_complete() - H2T completion handler
657  * @context:	Opaque context (HTT SOC handle)
658  * @htc_pkt:	HTC packet
659  */
660 static void
661 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
662 {
663 	void (*send_complete_part2)(
664 		void *soc, QDF_STATUS status, qdf_nbuf_t msdu);
665 	struct htt_soc *soc =  (struct htt_soc *) context;
666 	struct dp_htt_htc_pkt *htt_pkt;
667 	qdf_nbuf_t netbuf;
668 
669 	send_complete_part2 = htc_pkt->pPktContext;
670 
671 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
672 
673 	/* process (free or keep) the netbuf that held the message */
674 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
675 	/*
676 	 * adf sendcomplete is required for windows only
677 	 */
678 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
679 	if (send_complete_part2) {
680 		send_complete_part2(
681 			htt_pkt->soc_ctxt, htc_pkt->Status, netbuf);
682 	}
683 	/* free the htt_htc_pkt / HTC_PACKET object */
684 	htt_htc_pkt_free(soc, htt_pkt);
685 }
686 
687 /*
688  * htt_h2t_ver_req_msg() - Send HTT version request message to target
689  * @htt_soc:	HTT SOC handle
690  *
691  * Return: 0 on success; error code on failure
692  */
693 static int htt_h2t_ver_req_msg(struct htt_soc *soc)
694 {
695 	struct dp_htt_htc_pkt *pkt;
696 	qdf_nbuf_t msg;
697 	uint32_t *msg_word;
698 
699 	msg = qdf_nbuf_alloc(
700 		soc->osdev,
701 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
702 		/* reserve room for the HTC header */
703 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
704 	if (!msg)
705 		return QDF_STATUS_E_NOMEM;
706 
707 	/*
708 	 * Set the length of the message.
709 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
710 	 * separately during the below call to qdf_nbuf_push_head.
711 	 * The contribution from the HTC header is added separately inside HTC.
712 	 */
713 	if (qdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES) == NULL) {
714 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
715 			"%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
716 			__func__);
717 		return QDF_STATUS_E_FAILURE;
718 	}
719 
720 	/* fill in the message contents */
721 	msg_word = (u_int32_t *) qdf_nbuf_data(msg);
722 
723 	/* rewind beyond alignment pad to get to the HTC header reserved area */
724 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
725 
726 	*msg_word = 0;
727 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
728 
729 	pkt = htt_htc_pkt_alloc(soc);
730 	if (!pkt) {
731 		qdf_nbuf_free(msg);
732 		return QDF_STATUS_E_FAILURE;
733 	}
734 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
735 
736 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
737 		dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg),
738 		qdf_nbuf_len(msg), soc->htc_endpoint,
739 		1); /* tag - not relevant here */
740 
741 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
742 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_VERSION_REQ, NULL);
743 	return 0;
744 }
745 
746 /*
747  * htt_srng_setup() - Send SRNG setup message to target
748  * @htt_soc:	HTT SOC handle
749  * @mac_id:	MAC Id
750  * @hal_srng:	Opaque HAL SRNG pointer
751  * @hal_ring_type:	SRNG ring type
752  *
753  * Return: 0 on success; error code on failure
754  */
755 int htt_srng_setup(struct htt_soc *soc, int mac_id,
756 		   hal_ring_handle_t hal_ring_hdl,
757 		   int hal_ring_type)
758 {
759 	struct dp_htt_htc_pkt *pkt;
760 	qdf_nbuf_t htt_msg;
761 	uint32_t *msg_word;
762 	struct hal_srng_params srng_params;
763 	qdf_dma_addr_t hp_addr, tp_addr;
764 	uint32_t ring_entry_size =
765 		hal_srng_get_entrysize(soc->hal_soc, hal_ring_type);
766 	int htt_ring_type, htt_ring_id;
767 	uint8_t *htt_logger_bufp;
768 
769 	/* Sizes should be set in 4-byte words */
770 	ring_entry_size = ring_entry_size >> 2;
771 
772 	htt_msg = qdf_nbuf_alloc(soc->osdev,
773 		HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ),
774 		/* reserve room for the HTC header */
775 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
776 	if (!htt_msg)
777 		goto fail0;
778 
779 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
780 	hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_ring_hdl);
781 	tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_ring_hdl);
782 
783 	switch (hal_ring_type) {
784 	case RXDMA_BUF:
785 #ifdef QCA_HOST2FW_RXBUF_RING
786 		if (srng_params.ring_id ==
787 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0)) {
788 			htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
789 			htt_ring_type = HTT_SW_TO_SW_RING;
790 #ifdef IPA_OFFLOAD
791 		} else if (srng_params.ring_id ==
792 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2)) {
793 			htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING;
794 			htt_ring_type = HTT_SW_TO_SW_RING;
795 #endif
796 #else
797 		if (srng_params.ring_id ==
798 			(HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
799 			  (mac_id * HAL_MAX_RINGS_PER_LMAC))) {
800 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
801 			htt_ring_type = HTT_SW_TO_HW_RING;
802 #endif
803 		} else if (srng_params.ring_id ==
804 #ifdef IPA_OFFLOAD
805 			 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 +
806 #else
807 			 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF +
808 #endif
809 			  (mac_id * HAL_MAX_RINGS_PER_LMAC))) {
810 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
811 			htt_ring_type = HTT_SW_TO_HW_RING;
812 		} else {
813 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
814 				   "%s: Ring %d currently not supported",
815 				   __func__, srng_params.ring_id);
816 			goto fail1;
817 		}
818 
819 		dp_info("ring_type %d ring_id %d htt_ring_id %d hp_addr 0x%llx tp_addr 0x%llx",
820 			hal_ring_type, srng_params.ring_id, htt_ring_id,
821 			(uint64_t)hp_addr,
822 			(uint64_t)tp_addr);
823 		break;
824 	case RXDMA_MONITOR_BUF:
825 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
826 		htt_ring_type = HTT_SW_TO_HW_RING;
827 		break;
828 	case RXDMA_MONITOR_STATUS:
829 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
830 		htt_ring_type = HTT_SW_TO_HW_RING;
831 		break;
832 	case RXDMA_MONITOR_DST:
833 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
834 		htt_ring_type = HTT_HW_TO_SW_RING;
835 		break;
836 	case RXDMA_MONITOR_DESC:
837 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
838 		htt_ring_type = HTT_SW_TO_HW_RING;
839 		break;
840 	case RXDMA_DST:
841 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
842 		htt_ring_type = HTT_HW_TO_SW_RING;
843 		break;
844 
845 	default:
846 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
847 			"%s: Ring currently not supported", __func__);
848 			goto fail1;
849 	}
850 
851 	/*
852 	 * Set the length of the message.
853 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
854 	 * separately during the below call to qdf_nbuf_push_head.
855 	 * The contribution from the HTC header is added separately inside HTC.
856 	 */
857 	if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) {
858 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
859 			"%s: Failed to expand head for SRING_SETUP msg",
860 			__func__);
861 		return QDF_STATUS_E_FAILURE;
862 	}
863 
864 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
865 
866 	/* rewind beyond alignment pad to get to the HTC header reserved area */
867 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
868 
869 	/* word 0 */
870 	*msg_word = 0;
871 	htt_logger_bufp = (uint8_t *)msg_word;
872 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP);
873 
874 	if ((htt_ring_type == HTT_SW_TO_HW_RING) ||
875 			(htt_ring_type == HTT_HW_TO_SW_RING))
876 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word,
877 			 DP_SW2HW_MACID(mac_id));
878 	else
879 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id);
880 
881 	dp_info("%s: mac_id %d", __func__, mac_id);
882 	HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type);
883 	/* TODO: Discuss with FW on changing this to unique ID and using
884 	 * htt_ring_type to send the type of ring
885 	 */
886 	HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id);
887 
888 	/* word 1 */
889 	msg_word++;
890 	*msg_word = 0;
891 	HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word,
892 		srng_params.ring_base_paddr & 0xffffffff);
893 
894 	/* word 2 */
895 	msg_word++;
896 	*msg_word = 0;
897 	HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word,
898 		(uint64_t)srng_params.ring_base_paddr >> 32);
899 
900 	/* word 3 */
901 	msg_word++;
902 	*msg_word = 0;
903 	HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size);
904 	HTT_SRING_SETUP_RING_SIZE_SET(*msg_word,
905 		(ring_entry_size * srng_params.num_entries));
906 	dp_info("%s: entry_size %d", __func__, ring_entry_size);
907 	dp_info("%s: num_entries %d", __func__, srng_params.num_entries);
908 	dp_info("%s: ring_size %d", __func__,
909 		(ring_entry_size * srng_params.num_entries));
910 	if (htt_ring_type == HTT_SW_TO_HW_RING)
911 		HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET(
912 						*msg_word, 1);
913 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word,
914 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
915 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word,
916 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
917 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word,
918 		!!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP));
919 
920 	/* word 4 */
921 	msg_word++;
922 	*msg_word = 0;
923 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
924 		hp_addr & 0xffffffff);
925 
926 	/* word 5 */
927 	msg_word++;
928 	*msg_word = 0;
929 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
930 		(uint64_t)hp_addr >> 32);
931 
932 	/* word 6 */
933 	msg_word++;
934 	*msg_word = 0;
935 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
936 		tp_addr & 0xffffffff);
937 
938 	/* word 7 */
939 	msg_word++;
940 	*msg_word = 0;
941 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
942 		(uint64_t)tp_addr >> 32);
943 
944 	/* word 8 */
945 	msg_word++;
946 	*msg_word = 0;
947 	HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word,
948 		srng_params.msi_addr & 0xffffffff);
949 
950 	/* word 9 */
951 	msg_word++;
952 	*msg_word = 0;
953 	HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word,
954 		(uint64_t)(srng_params.msi_addr) >> 32);
955 
956 	/* word 10 */
957 	msg_word++;
958 	*msg_word = 0;
959 	HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word,
960 		srng_params.msi_data);
961 
962 	/* word 11 */
963 	msg_word++;
964 	*msg_word = 0;
965 	HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word,
966 		srng_params.intr_batch_cntr_thres_entries *
967 		ring_entry_size);
968 	HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word,
969 		srng_params.intr_timer_thres_us >> 3);
970 
971 	/* word 12 */
972 	msg_word++;
973 	*msg_word = 0;
974 	if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
975 		/* TODO: Setting low threshold to 1/8th of ring size - see
976 		 * if this needs to be configurable
977 		 */
978 		HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word,
979 			srng_params.low_threshold);
980 	}
981 	/* "response_required" field should be set if a HTT response message is
982 	 * required after setting up the ring.
983 	 */
984 	pkt = htt_htc_pkt_alloc(soc);
985 	if (!pkt)
986 		goto fail1;
987 
988 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
989 
990 	SET_HTC_PACKET_INFO_TX(
991 		&pkt->htc_pkt,
992 		dp_htt_h2t_send_complete_free_netbuf,
993 		qdf_nbuf_data(htt_msg),
994 		qdf_nbuf_len(htt_msg),
995 		soc->htc_endpoint,
996 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
997 
998 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
999 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_SRING_SETUP,
1000 			    htt_logger_bufp);
1001 
1002 	return QDF_STATUS_SUCCESS;
1003 
1004 fail1:
1005 	qdf_nbuf_free(htt_msg);
1006 fail0:
1007 	return QDF_STATUS_E_FAILURE;
1008 }
1009 
1010 /*
1011  * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter
1012  * config message to target
1013  * @htt_soc:	HTT SOC handle
1014  * @pdev_id:	PDEV Id
1015  * @hal_srng:	Opaque HAL SRNG pointer
1016  * @hal_ring_type:	SRNG ring type
1017  * @ring_buf_size:	SRNG buffer size
1018  * @htt_tlv_filter:	Rx SRNG TLV and filter setting
1019  * Return: 0 on success; error code on failure
1020  */
1021 int htt_h2t_rx_ring_cfg(struct htt_soc *htt_soc, int pdev_id,
1022 			hal_ring_handle_t hal_ring_hdl,
1023 			int hal_ring_type, int ring_buf_size,
1024 			struct htt_rx_ring_tlv_filter *htt_tlv_filter)
1025 {
1026 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
1027 	struct dp_htt_htc_pkt *pkt;
1028 	qdf_nbuf_t htt_msg;
1029 	uint32_t *msg_word;
1030 	struct hal_srng_params srng_params;
1031 	uint32_t htt_ring_type, htt_ring_id;
1032 	uint32_t tlv_filter;
1033 	uint8_t *htt_logger_bufp;
1034 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->dp_soc->wlan_cfg_ctx;
1035 	uint32_t mon_drop_th = wlan_cfg_get_mon_drop_thresh(wlan_cfg_ctx);
1036 
1037 	htt_msg = qdf_nbuf_alloc(soc->osdev,
1038 		HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ),
1039 	/* reserve room for the HTC header */
1040 	HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
1041 	if (!htt_msg)
1042 		goto fail0;
1043 
1044 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
1045 
1046 	switch (hal_ring_type) {
1047 	case RXDMA_BUF:
1048 		htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
1049 		htt_ring_type = HTT_SW_TO_HW_RING;
1050 		break;
1051 	case RXDMA_MONITOR_BUF:
1052 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
1053 		htt_ring_type = HTT_SW_TO_HW_RING;
1054 		break;
1055 	case RXDMA_MONITOR_STATUS:
1056 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
1057 		htt_ring_type = HTT_SW_TO_HW_RING;
1058 		break;
1059 	case RXDMA_MONITOR_DST:
1060 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
1061 		htt_ring_type = HTT_HW_TO_SW_RING;
1062 		break;
1063 	case RXDMA_MONITOR_DESC:
1064 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
1065 		htt_ring_type = HTT_SW_TO_HW_RING;
1066 		break;
1067 	case RXDMA_DST:
1068 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
1069 		htt_ring_type = HTT_HW_TO_SW_RING;
1070 		break;
1071 
1072 	default:
1073 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1074 			"%s: Ring currently not supported", __func__);
1075 		goto fail1;
1076 	}
1077 
1078 	/*
1079 	 * Set the length of the message.
1080 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
1081 	 * separately during the below call to qdf_nbuf_push_head.
1082 	 * The contribution from the HTC header is added separately inside HTC.
1083 	 */
1084 	if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) {
1085 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1086 			"%s: Failed to expand head for RX Ring Cfg msg",
1087 			__func__);
1088 		goto fail1; /* failure */
1089 	}
1090 
1091 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
1092 
1093 	/* rewind beyond alignment pad to get to the HTC header reserved area */
1094 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
1095 
1096 	/* word 0 */
1097 	htt_logger_bufp = (uint8_t *)msg_word;
1098 	*msg_word = 0;
1099 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
1100 
1101 	/*
1102 	 * pdev_id is indexed from 0 whereas mac_id is indexed from 1
1103 	 * SW_TO_SW and SW_TO_HW rings are unaffected by this
1104 	 */
1105 	if (htt_ring_type == HTT_SW_TO_SW_RING ||
1106 			htt_ring_type == HTT_SW_TO_HW_RING)
1107 		HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word,
1108 						DP_SW2HW_MACID(pdev_id));
1109 
1110 	/* TODO: Discuss with FW on changing this to unique ID and using
1111 	 * htt_ring_type to send the type of ring
1112 	 */
1113 	HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id);
1114 
1115 	HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word,
1116 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
1117 
1118 	HTT_RX_RING_SELECTION_CFG_PKT_TLV_SET(*msg_word,
1119 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
1120 
1121 	HTT_RX_RING_SELECTION_CFG_RX_OFFSETS_VALID_SET(*msg_word,
1122 						htt_tlv_filter->offset_valid);
1123 
1124 	if (mon_drop_th > 0)
1125 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1126 								   1);
1127 	else
1128 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1129 								   0);
1130 
1131 	/* word 1 */
1132 	msg_word++;
1133 	*msg_word = 0;
1134 	HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word,
1135 		ring_buf_size);
1136 
1137 	/* word 2 */
1138 	msg_word++;
1139 	*msg_word = 0;
1140 
1141 	if (htt_tlv_filter->enable_fp) {
1142 		/* TYPE: MGMT */
1143 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1144 			FP, MGMT, 0000,
1145 			(htt_tlv_filter->fp_mgmt_filter &
1146 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1147 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1148 			FP, MGMT, 0001,
1149 			(htt_tlv_filter->fp_mgmt_filter &
1150 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1151 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1152 			FP, MGMT, 0010,
1153 			(htt_tlv_filter->fp_mgmt_filter &
1154 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1155 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1156 			FP, MGMT, 0011,
1157 			(htt_tlv_filter->fp_mgmt_filter &
1158 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1159 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1160 			FP, MGMT, 0100,
1161 			(htt_tlv_filter->fp_mgmt_filter &
1162 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1163 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1164 			FP, MGMT, 0101,
1165 			(htt_tlv_filter->fp_mgmt_filter &
1166 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1167 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1168 			FP, MGMT, 0110,
1169 			(htt_tlv_filter->fp_mgmt_filter &
1170 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1171 		/* reserved */
1172 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
1173 			MGMT, 0111,
1174 			(htt_tlv_filter->fp_mgmt_filter &
1175 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1176 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1177 			FP, MGMT, 1000,
1178 			(htt_tlv_filter->fp_mgmt_filter &
1179 			FILTER_MGMT_BEACON) ? 1 : 0);
1180 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1181 			FP, MGMT, 1001,
1182 			(htt_tlv_filter->fp_mgmt_filter &
1183 			FILTER_MGMT_ATIM) ? 1 : 0);
1184 	}
1185 
1186 	if (htt_tlv_filter->enable_md) {
1187 			/* TYPE: MGMT */
1188 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1189 			MD, MGMT, 0000,
1190 			(htt_tlv_filter->md_mgmt_filter &
1191 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1192 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1193 			MD, MGMT, 0001,
1194 			(htt_tlv_filter->md_mgmt_filter &
1195 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1196 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1197 			MD, MGMT, 0010,
1198 			(htt_tlv_filter->md_mgmt_filter &
1199 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1200 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1201 			MD, MGMT, 0011,
1202 			(htt_tlv_filter->md_mgmt_filter &
1203 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1204 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1205 			MD, MGMT, 0100,
1206 			(htt_tlv_filter->md_mgmt_filter &
1207 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1208 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1209 			MD, MGMT, 0101,
1210 			(htt_tlv_filter->md_mgmt_filter &
1211 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1212 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1213 			MD, MGMT, 0110,
1214 			(htt_tlv_filter->md_mgmt_filter &
1215 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1216 		/* reserved */
1217 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
1218 			MGMT, 0111,
1219 			(htt_tlv_filter->md_mgmt_filter &
1220 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1221 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1222 			MD, MGMT, 1000,
1223 			(htt_tlv_filter->md_mgmt_filter &
1224 			FILTER_MGMT_BEACON) ? 1 : 0);
1225 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1226 			MD, MGMT, 1001,
1227 			(htt_tlv_filter->md_mgmt_filter &
1228 			FILTER_MGMT_ATIM) ? 1 : 0);
1229 	}
1230 
1231 	if (htt_tlv_filter->enable_mo) {
1232 		/* TYPE: MGMT */
1233 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1234 			MO, MGMT, 0000,
1235 			(htt_tlv_filter->mo_mgmt_filter &
1236 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1237 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1238 			MO, MGMT, 0001,
1239 			(htt_tlv_filter->mo_mgmt_filter &
1240 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1241 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1242 			MO, MGMT, 0010,
1243 			(htt_tlv_filter->mo_mgmt_filter &
1244 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1245 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1246 			MO, MGMT, 0011,
1247 			(htt_tlv_filter->mo_mgmt_filter &
1248 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1249 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1250 			MO, MGMT, 0100,
1251 			(htt_tlv_filter->mo_mgmt_filter &
1252 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1253 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1254 			MO, MGMT, 0101,
1255 			(htt_tlv_filter->mo_mgmt_filter &
1256 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1257 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1258 			MO, MGMT, 0110,
1259 			(htt_tlv_filter->mo_mgmt_filter &
1260 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1261 		/* reserved */
1262 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
1263 			MGMT, 0111,
1264 			(htt_tlv_filter->mo_mgmt_filter &
1265 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1266 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1267 			MO, MGMT, 1000,
1268 			(htt_tlv_filter->mo_mgmt_filter &
1269 			FILTER_MGMT_BEACON) ? 1 : 0);
1270 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1271 			MO, MGMT, 1001,
1272 			(htt_tlv_filter->mo_mgmt_filter &
1273 			FILTER_MGMT_ATIM) ? 1 : 0);
1274 	}
1275 
1276 	/* word 3 */
1277 	msg_word++;
1278 	*msg_word = 0;
1279 
1280 	if (htt_tlv_filter->enable_fp) {
1281 		/* TYPE: MGMT */
1282 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1283 			FP, MGMT, 1010,
1284 			(htt_tlv_filter->fp_mgmt_filter &
1285 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1286 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1287 			FP, MGMT, 1011,
1288 			(htt_tlv_filter->fp_mgmt_filter &
1289 			FILTER_MGMT_AUTH) ? 1 : 0);
1290 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1291 			FP, MGMT, 1100,
1292 			(htt_tlv_filter->fp_mgmt_filter &
1293 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1294 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1295 			FP, MGMT, 1101,
1296 			(htt_tlv_filter->fp_mgmt_filter &
1297 			FILTER_MGMT_ACTION) ? 1 : 0);
1298 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1299 			FP, MGMT, 1110,
1300 			(htt_tlv_filter->fp_mgmt_filter &
1301 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1302 		/* reserved*/
1303 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
1304 			MGMT, 1111,
1305 			(htt_tlv_filter->fp_mgmt_filter &
1306 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1307 	}
1308 
1309 	if (htt_tlv_filter->enable_md) {
1310 			/* TYPE: MGMT */
1311 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1312 			MD, MGMT, 1010,
1313 			(htt_tlv_filter->md_mgmt_filter &
1314 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1315 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1316 			MD, MGMT, 1011,
1317 			(htt_tlv_filter->md_mgmt_filter &
1318 			FILTER_MGMT_AUTH) ? 1 : 0);
1319 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1320 			MD, MGMT, 1100,
1321 			(htt_tlv_filter->md_mgmt_filter &
1322 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1323 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1324 			MD, MGMT, 1101,
1325 			(htt_tlv_filter->md_mgmt_filter &
1326 			FILTER_MGMT_ACTION) ? 1 : 0);
1327 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1328 			MD, MGMT, 1110,
1329 			(htt_tlv_filter->md_mgmt_filter &
1330 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1331 	}
1332 
1333 	if (htt_tlv_filter->enable_mo) {
1334 		/* TYPE: MGMT */
1335 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1336 			MO, MGMT, 1010,
1337 			(htt_tlv_filter->mo_mgmt_filter &
1338 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1339 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1340 			MO, MGMT, 1011,
1341 			(htt_tlv_filter->mo_mgmt_filter &
1342 			FILTER_MGMT_AUTH) ? 1 : 0);
1343 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1344 			MO, MGMT, 1100,
1345 			(htt_tlv_filter->mo_mgmt_filter &
1346 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1347 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1348 			MO, MGMT, 1101,
1349 			(htt_tlv_filter->mo_mgmt_filter &
1350 			FILTER_MGMT_ACTION) ? 1 : 0);
1351 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1352 			MO, MGMT, 1110,
1353 			(htt_tlv_filter->mo_mgmt_filter &
1354 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1355 		/* reserved*/
1356 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
1357 			MGMT, 1111,
1358 			(htt_tlv_filter->mo_mgmt_filter &
1359 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1360 	}
1361 
1362 	/* word 4 */
1363 	msg_word++;
1364 	*msg_word = 0;
1365 
1366 	if (htt_tlv_filter->enable_fp) {
1367 		/* TYPE: CTRL */
1368 		/* reserved */
1369 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1370 			CTRL, 0000,
1371 			(htt_tlv_filter->fp_ctrl_filter &
1372 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1373 		/* reserved */
1374 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1375 			CTRL, 0001,
1376 			(htt_tlv_filter->fp_ctrl_filter &
1377 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1378 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1379 			CTRL, 0010,
1380 			(htt_tlv_filter->fp_ctrl_filter &
1381 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1382 		/* reserved */
1383 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1384 			CTRL, 0011,
1385 			(htt_tlv_filter->fp_ctrl_filter &
1386 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1387 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1388 			CTRL, 0100,
1389 			(htt_tlv_filter->fp_ctrl_filter &
1390 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1391 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1392 			CTRL, 0101,
1393 			(htt_tlv_filter->fp_ctrl_filter &
1394 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1395 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1396 			CTRL, 0110,
1397 			(htt_tlv_filter->fp_ctrl_filter &
1398 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1399 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1400 			CTRL, 0111,
1401 			(htt_tlv_filter->fp_ctrl_filter &
1402 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1403 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1404 			CTRL, 1000,
1405 			(htt_tlv_filter->fp_ctrl_filter &
1406 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1407 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1408 			CTRL, 1001,
1409 			(htt_tlv_filter->fp_ctrl_filter &
1410 			FILTER_CTRL_BA) ? 1 : 0);
1411 	}
1412 
1413 	if (htt_tlv_filter->enable_md) {
1414 		/* TYPE: CTRL */
1415 		/* reserved */
1416 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1417 			CTRL, 0000,
1418 			(htt_tlv_filter->md_ctrl_filter &
1419 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1420 		/* reserved */
1421 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1422 			CTRL, 0001,
1423 			(htt_tlv_filter->md_ctrl_filter &
1424 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1425 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1426 			CTRL, 0010,
1427 			(htt_tlv_filter->md_ctrl_filter &
1428 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1429 		/* reserved */
1430 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1431 			CTRL, 0011,
1432 			(htt_tlv_filter->md_ctrl_filter &
1433 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1434 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1435 			CTRL, 0100,
1436 			(htt_tlv_filter->md_ctrl_filter &
1437 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1438 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1439 			CTRL, 0101,
1440 			(htt_tlv_filter->md_ctrl_filter &
1441 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1442 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1443 			CTRL, 0110,
1444 			(htt_tlv_filter->md_ctrl_filter &
1445 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1446 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1447 			CTRL, 0111,
1448 			(htt_tlv_filter->md_ctrl_filter &
1449 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1450 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1451 			CTRL, 1000,
1452 			(htt_tlv_filter->md_ctrl_filter &
1453 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1454 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1455 			CTRL, 1001,
1456 			(htt_tlv_filter->md_ctrl_filter &
1457 			FILTER_CTRL_BA) ? 1 : 0);
1458 	}
1459 
1460 	if (htt_tlv_filter->enable_mo) {
1461 		/* TYPE: CTRL */
1462 		/* reserved */
1463 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1464 			CTRL, 0000,
1465 			(htt_tlv_filter->mo_ctrl_filter &
1466 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1467 		/* reserved */
1468 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1469 			CTRL, 0001,
1470 			(htt_tlv_filter->mo_ctrl_filter &
1471 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1472 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1473 			CTRL, 0010,
1474 			(htt_tlv_filter->mo_ctrl_filter &
1475 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1476 		/* reserved */
1477 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1478 			CTRL, 0011,
1479 			(htt_tlv_filter->mo_ctrl_filter &
1480 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1481 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1482 			CTRL, 0100,
1483 			(htt_tlv_filter->mo_ctrl_filter &
1484 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1485 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1486 			CTRL, 0101,
1487 			(htt_tlv_filter->mo_ctrl_filter &
1488 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1489 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1490 			CTRL, 0110,
1491 			(htt_tlv_filter->mo_ctrl_filter &
1492 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1493 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1494 			CTRL, 0111,
1495 			(htt_tlv_filter->mo_ctrl_filter &
1496 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1497 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1498 			CTRL, 1000,
1499 			(htt_tlv_filter->mo_ctrl_filter &
1500 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1501 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1502 			CTRL, 1001,
1503 			(htt_tlv_filter->mo_ctrl_filter &
1504 			FILTER_CTRL_BA) ? 1 : 0);
1505 	}
1506 
1507 	/* word 5 */
1508 	msg_word++;
1509 	*msg_word = 0;
1510 	if (htt_tlv_filter->enable_fp) {
1511 		/* TYPE: CTRL */
1512 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1513 			CTRL, 1010,
1514 			(htt_tlv_filter->fp_ctrl_filter &
1515 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1516 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1517 			CTRL, 1011,
1518 			(htt_tlv_filter->fp_ctrl_filter &
1519 			FILTER_CTRL_RTS) ? 1 : 0);
1520 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1521 			CTRL, 1100,
1522 			(htt_tlv_filter->fp_ctrl_filter &
1523 			FILTER_CTRL_CTS) ? 1 : 0);
1524 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1525 			CTRL, 1101,
1526 			(htt_tlv_filter->fp_ctrl_filter &
1527 			FILTER_CTRL_ACK) ? 1 : 0);
1528 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1529 			CTRL, 1110,
1530 			(htt_tlv_filter->fp_ctrl_filter &
1531 			FILTER_CTRL_CFEND) ? 1 : 0);
1532 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1533 			CTRL, 1111,
1534 			(htt_tlv_filter->fp_ctrl_filter &
1535 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1536 		/* TYPE: DATA */
1537 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1538 			DATA, MCAST,
1539 			(htt_tlv_filter->fp_data_filter &
1540 			FILTER_DATA_MCAST) ? 1 : 0);
1541 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1542 			DATA, UCAST,
1543 			(htt_tlv_filter->fp_data_filter &
1544 			FILTER_DATA_UCAST) ? 1 : 0);
1545 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1546 			DATA, NULL,
1547 			(htt_tlv_filter->fp_data_filter &
1548 			FILTER_DATA_NULL) ? 1 : 0);
1549 	}
1550 
1551 	if (htt_tlv_filter->enable_md) {
1552 		/* TYPE: CTRL */
1553 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1554 			CTRL, 1010,
1555 			(htt_tlv_filter->md_ctrl_filter &
1556 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1557 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1558 			CTRL, 1011,
1559 			(htt_tlv_filter->md_ctrl_filter &
1560 			FILTER_CTRL_RTS) ? 1 : 0);
1561 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1562 			CTRL, 1100,
1563 			(htt_tlv_filter->md_ctrl_filter &
1564 			FILTER_CTRL_CTS) ? 1 : 0);
1565 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1566 			CTRL, 1101,
1567 			(htt_tlv_filter->md_ctrl_filter &
1568 			FILTER_CTRL_ACK) ? 1 : 0);
1569 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1570 			CTRL, 1110,
1571 			(htt_tlv_filter->md_ctrl_filter &
1572 			FILTER_CTRL_CFEND) ? 1 : 0);
1573 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1574 			CTRL, 1111,
1575 			(htt_tlv_filter->md_ctrl_filter &
1576 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1577 		/* TYPE: DATA */
1578 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1579 			DATA, MCAST,
1580 			(htt_tlv_filter->md_data_filter &
1581 			FILTER_DATA_MCAST) ? 1 : 0);
1582 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1583 			DATA, UCAST,
1584 			(htt_tlv_filter->md_data_filter &
1585 			FILTER_DATA_UCAST) ? 1 : 0);
1586 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1587 			DATA, NULL,
1588 			(htt_tlv_filter->md_data_filter &
1589 			FILTER_DATA_NULL) ? 1 : 0);
1590 	}
1591 
1592 	if (htt_tlv_filter->enable_mo) {
1593 		/* TYPE: CTRL */
1594 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1595 			CTRL, 1010,
1596 			(htt_tlv_filter->mo_ctrl_filter &
1597 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1598 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1599 			CTRL, 1011,
1600 			(htt_tlv_filter->mo_ctrl_filter &
1601 			FILTER_CTRL_RTS) ? 1 : 0);
1602 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1603 			CTRL, 1100,
1604 			(htt_tlv_filter->mo_ctrl_filter &
1605 			FILTER_CTRL_CTS) ? 1 : 0);
1606 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1607 			CTRL, 1101,
1608 			(htt_tlv_filter->mo_ctrl_filter &
1609 			FILTER_CTRL_ACK) ? 1 : 0);
1610 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1611 			CTRL, 1110,
1612 			(htt_tlv_filter->mo_ctrl_filter &
1613 			FILTER_CTRL_CFEND) ? 1 : 0);
1614 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1615 			CTRL, 1111,
1616 			(htt_tlv_filter->mo_ctrl_filter &
1617 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1618 		/* TYPE: DATA */
1619 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1620 			DATA, MCAST,
1621 			(htt_tlv_filter->mo_data_filter &
1622 			FILTER_DATA_MCAST) ? 1 : 0);
1623 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1624 			DATA, UCAST,
1625 			(htt_tlv_filter->mo_data_filter &
1626 			FILTER_DATA_UCAST) ? 1 : 0);
1627 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1628 			DATA, NULL,
1629 			(htt_tlv_filter->mo_data_filter &
1630 			FILTER_DATA_NULL) ? 1 : 0);
1631 	}
1632 
1633 	/* word 6 */
1634 	msg_word++;
1635 	*msg_word = 0;
1636 	tlv_filter = 0;
1637 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START,
1638 		htt_tlv_filter->mpdu_start);
1639 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START,
1640 		htt_tlv_filter->msdu_start);
1641 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET,
1642 		htt_tlv_filter->packet);
1643 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END,
1644 		htt_tlv_filter->msdu_end);
1645 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END,
1646 		htt_tlv_filter->mpdu_end);
1647 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER,
1648 		htt_tlv_filter->packet_header);
1649 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION,
1650 		htt_tlv_filter->attention);
1651 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START,
1652 		htt_tlv_filter->ppdu_start);
1653 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END,
1654 		htt_tlv_filter->ppdu_end);
1655 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS,
1656 		htt_tlv_filter->ppdu_end_user_stats);
1657 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter,
1658 		PPDU_END_USER_STATS_EXT,
1659 		htt_tlv_filter->ppdu_end_user_stats_ext);
1660 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE,
1661 		htt_tlv_filter->ppdu_end_status_done);
1662 	/* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/
1663 	 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED,
1664 		 htt_tlv_filter->header_per_msdu);
1665 
1666 	HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter);
1667 
1668 	msg_word++;
1669 	*msg_word = 0;
1670 	if (htt_tlv_filter->offset_valid) {
1671 		HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET_SET(*msg_word,
1672 					htt_tlv_filter->rx_packet_offset);
1673 		HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET_SET(*msg_word,
1674 					htt_tlv_filter->rx_header_offset);
1675 
1676 		msg_word++;
1677 		*msg_word = 0;
1678 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET_SET(*msg_word,
1679 					htt_tlv_filter->rx_mpdu_end_offset);
1680 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET_SET(*msg_word,
1681 					htt_tlv_filter->rx_mpdu_start_offset);
1682 
1683 		msg_word++;
1684 		*msg_word = 0;
1685 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET_SET(*msg_word,
1686 					htt_tlv_filter->rx_msdu_end_offset);
1687 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET_SET(*msg_word,
1688 					htt_tlv_filter->rx_msdu_start_offset);
1689 
1690 		msg_word++;
1691 		*msg_word = 0;
1692 		HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET_SET(*msg_word,
1693 					htt_tlv_filter->rx_attn_offset);
1694 		msg_word++;
1695 		*msg_word = 0;
1696 	} else {
1697 		msg_word += 4;
1698 		*msg_word = 0;
1699 	}
1700 
1701 	if (mon_drop_th > 0)
1702 		HTT_RX_RING_SELECTION_CFG_RX_DROP_THRESHOLD_SET(*msg_word,
1703 								mon_drop_th);
1704 
1705 	/* "response_required" field should be set if a HTT response message is
1706 	 * required after setting up the ring.
1707 	 */
1708 	pkt = htt_htc_pkt_alloc(soc);
1709 	if (!pkt)
1710 		goto fail1;
1711 
1712 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1713 
1714 	SET_HTC_PACKET_INFO_TX(
1715 		&pkt->htc_pkt,
1716 		dp_htt_h2t_send_complete_free_netbuf,
1717 		qdf_nbuf_data(htt_msg),
1718 		qdf_nbuf_len(htt_msg),
1719 		soc->htc_endpoint,
1720 		1); /* tag - not relevant here */
1721 
1722 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1723 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG,
1724 			    htt_logger_bufp);
1725 	return QDF_STATUS_SUCCESS;
1726 
1727 fail1:
1728 	qdf_nbuf_free(htt_msg);
1729 fail0:
1730 	return QDF_STATUS_E_FAILURE;
1731 }
1732 
1733 #if defined(HTT_STATS_ENABLE)
1734 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1735 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1736 
1737 {
1738 	uint32_t pdev_id;
1739 	uint32_t *msg_word = NULL;
1740 	uint32_t msg_remain_len = 0;
1741 
1742 	msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1743 
1744 	/*COOKIE MSB*/
1745 	pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1746 
1747 	/* stats message length + 16 size of HTT header*/
1748 	msg_remain_len = qdf_min(htt_stats->msg_len + 16,
1749 				(uint32_t)DP_EXT_MSG_LENGTH);
1750 
1751 	dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc,
1752 			msg_word,  msg_remain_len,
1753 			WDI_NO_VAL, pdev_id);
1754 
1755 	if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1756 		htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1757 	}
1758 	/* Need to be freed here as WDI handler will
1759 	 * make a copy of pkt to send data to application
1760 	 */
1761 	qdf_nbuf_free(htt_msg);
1762 	return QDF_STATUS_SUCCESS;
1763 }
1764 #else
1765 static inline QDF_STATUS
1766 dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1767 		      struct dp_soc *soc, qdf_nbuf_t htt_msg)
1768 {
1769 	return QDF_STATUS_E_NOSUPPORT;
1770 }
1771 #endif
1772 /**
1773  * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats
1774  * @htt_stats: htt stats info
1775  *
1776  * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message
1777  * contains sub messages which are identified by a TLV header.
1778  * In this function we will process the stream of T2H messages and read all the
1779  * TLV contained in the message.
1780  *
1781  * THe following cases have been taken care of
1782  * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer
1783  *		In this case the buffer will contain multiple tlvs.
1784  * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer.
1785  *		Only one tlv will be contained in the HTT message and this tag
1786  *		will extend onto the next buffer.
1787  * Case 3: When the buffer is the continuation of the previous message
1788  * Case 4: tlv length is 0. which will indicate the end of message
1789  *
1790  * return: void
1791  */
1792 static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats,
1793 					struct dp_soc *soc)
1794 {
1795 	htt_tlv_tag_t tlv_type = 0xff;
1796 	qdf_nbuf_t htt_msg = NULL;
1797 	uint32_t *msg_word;
1798 	uint8_t *tlv_buf_head = NULL;
1799 	uint8_t *tlv_buf_tail = NULL;
1800 	uint32_t msg_remain_len = 0;
1801 	uint32_t tlv_remain_len = 0;
1802 	uint32_t *tlv_start;
1803 	int cookie_val;
1804 	int cookie_msb;
1805 	int pdev_id;
1806 	bool copy_stats = false;
1807 	struct dp_pdev *pdev;
1808 
1809 	/* Process node in the HTT message queue */
1810 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1811 		!= NULL) {
1812 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1813 		cookie_val = *(msg_word + 1);
1814 		htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET(
1815 					*(msg_word +
1816 					HTT_T2H_EXT_STATS_TLV_START_OFFSET));
1817 
1818 		if (cookie_val) {
1819 			if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg)
1820 					== QDF_STATUS_SUCCESS) {
1821 				continue;
1822 			}
1823 		}
1824 
1825 		cookie_msb = *(msg_word + 2);
1826 		pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1827 		pdev = soc->pdev_list[pdev_id];
1828 
1829 		if (cookie_msb >> 2) {
1830 			copy_stats = true;
1831 		}
1832 
1833 		/* read 5th word */
1834 		msg_word = msg_word + 4;
1835 		msg_remain_len = qdf_min(htt_stats->msg_len,
1836 				(uint32_t) DP_EXT_MSG_LENGTH);
1837 		/* Keep processing the node till node length is 0 */
1838 		while (msg_remain_len) {
1839 			/*
1840 			 * if message is not a continuation of previous message
1841 			 * read the tlv type and tlv length
1842 			 */
1843 			if (!tlv_buf_head) {
1844 				tlv_type = HTT_STATS_TLV_TAG_GET(
1845 						*msg_word);
1846 				tlv_remain_len = HTT_STATS_TLV_LENGTH_GET(
1847 						*msg_word);
1848 			}
1849 
1850 			if (tlv_remain_len == 0) {
1851 				msg_remain_len = 0;
1852 
1853 				if (tlv_buf_head) {
1854 					qdf_mem_free(tlv_buf_head);
1855 					tlv_buf_head = NULL;
1856 					tlv_buf_tail = NULL;
1857 				}
1858 
1859 				goto error;
1860 			}
1861 
1862 			if (!tlv_buf_head)
1863 				tlv_remain_len += HTT_TLV_HDR_LEN;
1864 
1865 			if ((tlv_remain_len <= msg_remain_len)) {
1866 				/* Case 3 */
1867 				if (tlv_buf_head) {
1868 					qdf_mem_copy(tlv_buf_tail,
1869 							(uint8_t *)msg_word,
1870 							tlv_remain_len);
1871 					tlv_start = (uint32_t *)tlv_buf_head;
1872 				} else {
1873 					/* Case 1 */
1874 					tlv_start = msg_word;
1875 				}
1876 
1877 				if (copy_stats)
1878 					dp_htt_stats_copy_tag(pdev,
1879 							      tlv_type,
1880 							      tlv_start);
1881 				else
1882 					dp_htt_stats_print_tag(pdev,
1883 							       tlv_type,
1884 							       tlv_start);
1885 
1886 				if (tlv_type == HTT_STATS_PEER_DETAILS_TAG ||
1887 				    tlv_type == HTT_STATS_PEER_STATS_CMN_TAG)
1888 					dp_peer_update_inactive_time(pdev,
1889 								     tlv_type,
1890 								     tlv_start);
1891 
1892 				msg_remain_len -= tlv_remain_len;
1893 
1894 				msg_word = (uint32_t *)
1895 					(((uint8_t *)msg_word) +
1896 					tlv_remain_len);
1897 
1898 				tlv_remain_len = 0;
1899 
1900 				if (tlv_buf_head) {
1901 					qdf_mem_free(tlv_buf_head);
1902 					tlv_buf_head = NULL;
1903 					tlv_buf_tail = NULL;
1904 				}
1905 
1906 			} else { /* tlv_remain_len > msg_remain_len */
1907 				/* Case 2 & 3 */
1908 				if (!tlv_buf_head) {
1909 					tlv_buf_head = qdf_mem_malloc(
1910 							tlv_remain_len);
1911 
1912 					if (!tlv_buf_head) {
1913 						QDF_TRACE(QDF_MODULE_ID_TXRX,
1914 								QDF_TRACE_LEVEL_ERROR,
1915 								"Alloc failed");
1916 						goto error;
1917 					}
1918 
1919 					tlv_buf_tail = tlv_buf_head;
1920 				}
1921 
1922 				qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word,
1923 						msg_remain_len);
1924 				tlv_remain_len -= msg_remain_len;
1925 				tlv_buf_tail += msg_remain_len;
1926 			}
1927 		}
1928 
1929 		if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1930 			htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1931 		}
1932 
1933 		qdf_nbuf_free(htt_msg);
1934 	}
1935 	return;
1936 
1937 error:
1938 	qdf_nbuf_free(htt_msg);
1939 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1940 			!= NULL)
1941 		qdf_nbuf_free(htt_msg);
1942 }
1943 
1944 void htt_t2h_stats_handler(void *context)
1945 {
1946 	struct dp_soc *soc = (struct dp_soc *)context;
1947 	struct htt_stats_context htt_stats;
1948 	uint32_t *msg_word;
1949 	qdf_nbuf_t htt_msg = NULL;
1950 	uint8_t done;
1951 	uint32_t rem_stats;
1952 
1953 	if (!soc || !qdf_atomic_read(&soc->cmn_init_done)) {
1954 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1955 			"soc: 0x%pK, init_done: %d", soc,
1956 			qdf_atomic_read(&soc->cmn_init_done));
1957 		return;
1958 	}
1959 
1960 	qdf_mem_zero(&htt_stats, sizeof(htt_stats));
1961 	qdf_nbuf_queue_init(&htt_stats.msg);
1962 
1963 	/* pull one completed stats from soc->htt_stats_msg and process */
1964 	qdf_spin_lock_bh(&soc->htt_stats.lock);
1965 	if (!soc->htt_stats.num_stats) {
1966 		qdf_spin_unlock_bh(&soc->htt_stats.lock);
1967 		return;
1968 	}
1969 	while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) {
1970 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1971 		msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET;
1972 		done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
1973 		qdf_nbuf_queue_add(&htt_stats.msg, htt_msg);
1974 		/*
1975 		 * Done bit signifies that this is the last T2H buffer in the
1976 		 * stream of HTT EXT STATS message
1977 		 */
1978 		if (done)
1979 			break;
1980 	}
1981 	rem_stats = --soc->htt_stats.num_stats;
1982 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
1983 
1984 	/* If there are more stats to process, schedule stats work again.
1985 	 * Scheduling prior to processing ht_stats to queue with early
1986 	 * index
1987 	 */
1988 	if (rem_stats)
1989 		qdf_sched_work(0, &soc->htt_stats.work);
1990 
1991 	dp_process_htt_stat_msg(&htt_stats, soc);
1992 }
1993 
1994 /*
1995  * dp_get_ppdu_info_user_index: Find and allocate a per-user descriptor for a PPDU,
1996  * if a new peer id arrives in a PPDU
1997  * pdev: DP pdev handle
1998  * @peer_id : peer unique identifier
1999  * @ppdu_info: per ppdu tlv structure
2000  *
2001  * return:user index to be populated
2002  */
2003 #ifdef FEATURE_PERPKT_INFO
2004 static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev,
2005 						uint16_t peer_id,
2006 						struct ppdu_info *ppdu_info)
2007 {
2008 	uint8_t user_index = 0;
2009 	struct cdp_tx_completion_ppdu *ppdu_desc;
2010 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2011 
2012 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2013 
2014 	while ((user_index + 1) <= ppdu_info->last_user) {
2015 		ppdu_user_desc = &ppdu_desc->user[user_index];
2016 		if (ppdu_user_desc->peer_id != peer_id) {
2017 			user_index++;
2018 			continue;
2019 		} else {
2020 			/* Max users possible is 8 so user array index should
2021 			 * not exceed 7
2022 			 */
2023 			qdf_assert_always(user_index <= CDP_MU_MAX_USER_INDEX);
2024 			return user_index;
2025 		}
2026 	}
2027 
2028 	ppdu_info->last_user++;
2029 	/* Max users possible is 8 so last user should not exceed 8 */
2030 	qdf_assert_always(ppdu_info->last_user <= CDP_MU_MAX_USERS);
2031 	return ppdu_info->last_user - 1;
2032 }
2033 
2034 /*
2035  * dp_process_ppdu_stats_common_tlv: Process htt_ppdu_stats_common_tlv
2036  * pdev: DP pdev handle
2037  * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv
2038  * @ppdu_info: per ppdu tlv structure
2039  *
2040  * return:void
2041  */
2042 static void dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev,
2043 		uint32_t *tag_buf, struct ppdu_info *ppdu_info)
2044 {
2045 	uint16_t frame_type;
2046 	uint16_t frame_ctrl;
2047 	uint16_t freq;
2048 	struct dp_soc *soc = NULL;
2049 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
2050 	uint64_t ppdu_start_timestamp;
2051 	uint32_t *start_tag_buf;
2052 
2053 	start_tag_buf = tag_buf;
2054 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2055 
2056 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RING_ID_SCH_CMD_ID);
2057 	ppdu_info->sched_cmdid =
2058 		HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_GET(*tag_buf);
2059 	ppdu_desc->num_users =
2060 		HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf);
2061 
2062 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(QTYPE_FRM_TYPE);
2063 	frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf);
2064 	ppdu_desc->htt_frame_type = frame_type;
2065 
2066 	frame_ctrl = ppdu_desc->frame_ctrl;
2067 
2068 	switch (frame_type) {
2069 	case HTT_STATS_FTYPE_TIDQ_DATA_SU:
2070 	case HTT_STATS_FTYPE_TIDQ_DATA_MU:
2071 	case HTT_STATS_FTYPE_SGEN_QOS_NULL:
2072 		/*
2073 		 * for management packet, frame type come as DATA_SU
2074 		 * need to check frame_ctrl before setting frame_type
2075 		 */
2076 		if (HTT_GET_FRAME_CTRL_TYPE(frame_ctrl) <= FRAME_CTRL_TYPE_CTRL)
2077 			ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
2078 		else
2079 			ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA;
2080 	break;
2081 	case HTT_STATS_FTYPE_SGEN_MU_BAR:
2082 	case HTT_STATS_FTYPE_SGEN_BAR:
2083 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_BAR;
2084 		ppdu_desc->bar_ppdu_id = ppdu_info->ppdu_id;
2085 	break;
2086 	default:
2087 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
2088 	break;
2089 	}
2090 
2091 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(FES_DUR_US);
2092 	ppdu_desc->tx_duration = *tag_buf;
2093 
2094 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_L32_US);
2095 	ppdu_desc->ppdu_start_timestamp = *tag_buf;
2096 
2097 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(CHAN_MHZ_PHY_MODE);
2098 	freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf);
2099 	if (freq != ppdu_desc->channel) {
2100 		soc = pdev->soc;
2101 		ppdu_desc->channel = freq;
2102 		if (soc && soc->cdp_soc.ol_ops->freq_to_channel)
2103 			pdev->operating_channel =
2104 		soc->cdp_soc.ol_ops->freq_to_channel(soc->ctrl_psoc,
2105 						     pdev->pdev_id, freq);
2106 	}
2107 
2108 	ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf);
2109 
2110 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RESV_NUM_UL_BEAM);
2111 	ppdu_desc->beam_change =
2112 		HTT_PPDU_STATS_COMMON_TLV_BEAM_CHANGE_GET(*tag_buf);
2113 
2114 	dp_tx_capture_htt_frame_counter(pdev, frame_type);
2115 
2116 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_U32_US);
2117 	ppdu_start_timestamp = *tag_buf;
2118 	ppdu_desc->ppdu_start_timestamp |= ((ppdu_start_timestamp <<
2119 					     HTT_SHIFT_UPPER_TIMESTAMP) &
2120 					    HTT_MASK_UPPER_TIMESTAMP);
2121 
2122 	ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
2123 					ppdu_desc->tx_duration;
2124 	/* Ack time stamp is same as end time stamp*/
2125 	ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
2126 
2127 	ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
2128 					ppdu_desc->tx_duration;
2129 
2130 	ppdu_desc->bar_ppdu_start_timestamp = ppdu_desc->ppdu_start_timestamp;
2131 	ppdu_desc->bar_ppdu_end_timestamp = ppdu_desc->ppdu_end_timestamp;
2132 	ppdu_desc->bar_tx_duration = ppdu_desc->tx_duration;
2133 
2134 	/* Ack time stamp is same as end time stamp*/
2135 	ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
2136 }
2137 
2138 /*
2139  * dp_process_ppdu_stats_user_common_tlv: Process ppdu_stats_user_common
2140  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv
2141  * @ppdu_info: per ppdu tlv structure
2142  *
2143  * return:void
2144  */
2145 static void dp_process_ppdu_stats_user_common_tlv(
2146 		struct dp_pdev *pdev, uint32_t *tag_buf,
2147 		struct ppdu_info *ppdu_info)
2148 {
2149 	uint16_t peer_id;
2150 	struct cdp_tx_completion_ppdu *ppdu_desc;
2151 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2152 	uint8_t curr_user_index = 0;
2153 	struct dp_peer *peer;
2154 	struct dp_vdev *vdev;
2155 
2156 	ppdu_desc =
2157 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2158 
2159 	tag_buf++;
2160 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
2161 
2162 	curr_user_index =
2163 		dp_get_ppdu_info_user_index(pdev,
2164 					    peer_id, ppdu_info);
2165 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2166 
2167 	if (peer_id == DP_SCAN_PEER_ID) {
2168 		ppdu_desc->vdev_id =
2169 			HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(*tag_buf);
2170 		vdev =
2171 		       dp_get_vdev_from_soc_vdev_id_wifi3(pdev->soc,
2172 							  ppdu_desc->vdev_id);
2173 		if (!vdev)
2174 			return;
2175 		qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw,
2176 			     QDF_MAC_ADDR_SIZE);
2177 	} else {
2178 		peer = dp_peer_find_by_id(pdev->soc, peer_id);
2179 		if (!peer)
2180 			return;
2181 		qdf_mem_copy(ppdu_user_desc->mac_addr,
2182 			     peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
2183 		dp_peer_unref_del_find_by_id(peer);
2184 	}
2185 
2186 	ppdu_user_desc->peer_id = peer_id;
2187 
2188 	tag_buf++;
2189 
2190 	if (HTT_PPDU_STATS_USER_COMMON_TLV_DELAYED_BA_GET(*tag_buf)) {
2191 		ppdu_user_desc->delayed_ba = 1;
2192 		ppdu_desc->delayed_ba = 1;
2193 	}
2194 
2195 	if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) {
2196 		ppdu_user_desc->is_mcast = true;
2197 		ppdu_user_desc->mpdu_tried_mcast =
2198 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
2199 		ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast;
2200 	} else {
2201 		ppdu_user_desc->mpdu_tried_ucast =
2202 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
2203 	}
2204 
2205 	tag_buf++;
2206 
2207 	ppdu_user_desc->qos_ctrl =
2208 		HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf);
2209 	ppdu_user_desc->frame_ctrl =
2210 		HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf);
2211 	ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl;
2212 
2213 	if (ppdu_user_desc->delayed_ba)
2214 		ppdu_user_desc->mpdu_success = 0;
2215 
2216 	tag_buf += 3;
2217 
2218 	if (HTT_PPDU_STATS_IS_OPAQUE_VALID_GET(*tag_buf)) {
2219 		ppdu_user_desc->ppdu_cookie =
2220 			HTT_PPDU_STATS_HOST_OPAQUE_COOKIE_GET(*tag_buf);
2221 		ppdu_user_desc->is_ppdu_cookie_valid = 1;
2222 	}
2223 }
2224 
2225 
2226 /**
2227  * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv
2228  * @pdev: DP pdev handle
2229  * @tag_buf: T2H message buffer carrying the user rate TLV
2230  * @ppdu_info: per ppdu tlv structure
2231  *
2232  * return:void
2233  */
2234 static void dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev,
2235 		uint32_t *tag_buf,
2236 		struct ppdu_info *ppdu_info)
2237 {
2238 	uint16_t peer_id;
2239 	struct dp_peer *peer;
2240 	struct cdp_tx_completion_ppdu *ppdu_desc;
2241 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2242 	uint8_t curr_user_index = 0;
2243 	struct dp_vdev *vdev;
2244 
2245 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2246 
2247 	tag_buf++;
2248 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
2249 
2250 	curr_user_index =
2251 		dp_get_ppdu_info_user_index(pdev,
2252 					    peer_id, ppdu_info);
2253 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2254 	if (peer_id == DP_SCAN_PEER_ID) {
2255 		vdev =
2256 		       dp_get_vdev_from_soc_vdev_id_wifi3(pdev->soc,
2257 							  ppdu_desc->vdev_id);
2258 		if (!vdev)
2259 			return;
2260 	} else {
2261 		peer = dp_peer_find_by_id(pdev->soc, peer_id);
2262 		if (!peer)
2263 			return;
2264 		dp_peer_unref_del_find_by_id(peer);
2265 	}
2266 
2267 	ppdu_user_desc->peer_id = peer_id;
2268 
2269 	ppdu_user_desc->tid =
2270 		HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf);
2271 
2272 	tag_buf += 1;
2273 
2274 	ppdu_user_desc->user_pos =
2275 		HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_GET(*tag_buf);
2276 	ppdu_user_desc->mu_group_id =
2277 		HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_GET(*tag_buf);
2278 
2279 	tag_buf += 1;
2280 
2281 	ppdu_user_desc->ru_start =
2282 		HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf);
2283 	ppdu_user_desc->ru_tones =
2284 		(HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) -
2285 		HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1;
2286 
2287 	tag_buf += 2;
2288 
2289 	ppdu_user_desc->ppdu_type =
2290 		HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf);
2291 
2292 	tag_buf++;
2293 	ppdu_user_desc->tx_rate = *tag_buf;
2294 
2295 	ppdu_user_desc->ltf_size =
2296 		HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf);
2297 	ppdu_user_desc->stbc =
2298 		HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf);
2299 	ppdu_user_desc->he_re =
2300 		HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf);
2301 	ppdu_user_desc->txbf =
2302 		HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf);
2303 	ppdu_user_desc->bw =
2304 		HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf) - 2;
2305 	ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf);
2306 	ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf);
2307 	ppdu_user_desc->preamble =
2308 		HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf);
2309 	ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf);
2310 	ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf);
2311 	ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf);
2312 }
2313 
2314 /*
2315  * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv: Process
2316  * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
2317  * pdev: DP PDEV handle
2318  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
2319  * @ppdu_info: per ppdu tlv structure
2320  *
2321  * return:void
2322  */
2323 static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
2324 		struct dp_pdev *pdev, uint32_t *tag_buf,
2325 		struct ppdu_info *ppdu_info)
2326 {
2327 	htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf =
2328 		(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf;
2329 
2330 	struct cdp_tx_completion_ppdu *ppdu_desc;
2331 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2332 	uint8_t curr_user_index = 0;
2333 	uint16_t peer_id;
2334 	uint32_t size = CDP_BA_64_BIT_MAP_SIZE_DWORDS;
2335 
2336 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2337 
2338 	tag_buf++;
2339 
2340 	peer_id =
2341 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2342 
2343 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2344 		return;
2345 
2346 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2347 
2348 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2349 	ppdu_user_desc->peer_id = peer_id;
2350 
2351 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
2352 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
2353 		     sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
2354 
2355 	dp_process_ppdu_stats_update_failed_bitmap(pdev,
2356 						   (void *)ppdu_user_desc,
2357 						   ppdu_info->ppdu_id,
2358 						   size);
2359 }
2360 
2361 /*
2362  * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv: Process
2363  * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
2364  * soc: DP SOC handle
2365  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
2366  * @ppdu_info: per ppdu tlv structure
2367  *
2368  * return:void
2369  */
2370 static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
2371 		struct dp_pdev *pdev, uint32_t *tag_buf,
2372 		struct ppdu_info *ppdu_info)
2373 {
2374 	htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf =
2375 		(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf;
2376 
2377 	struct cdp_tx_completion_ppdu *ppdu_desc;
2378 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2379 	uint8_t curr_user_index = 0;
2380 	uint16_t peer_id;
2381 	uint32_t size = CDP_BA_256_BIT_MAP_SIZE_DWORDS;
2382 
2383 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2384 
2385 	tag_buf++;
2386 
2387 	peer_id =
2388 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2389 
2390 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2391 		return;
2392 
2393 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2394 
2395 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2396 	ppdu_user_desc->peer_id = peer_id;
2397 
2398 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
2399 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
2400 		     sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2401 
2402 	dp_process_ppdu_stats_update_failed_bitmap(pdev,
2403 						   (void *)ppdu_user_desc,
2404 						   ppdu_info->ppdu_id,
2405 						   size);
2406 }
2407 
2408 /*
2409  * dp_process_ppdu_stats_user_cmpltn_common_tlv: Process
2410  * htt_ppdu_stats_user_cmpltn_common_tlv
2411  * soc: DP SOC handle
2412  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv
2413  * @ppdu_info: per ppdu tlv structure
2414  *
2415  * return:void
2416  */
2417 static void dp_process_ppdu_stats_user_cmpltn_common_tlv(
2418 		struct dp_pdev *pdev, uint32_t *tag_buf,
2419 		struct ppdu_info *ppdu_info)
2420 {
2421 	uint16_t peer_id;
2422 	struct cdp_tx_completion_ppdu *ppdu_desc;
2423 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2424 	uint8_t curr_user_index = 0;
2425 	uint8_t bw_iter;
2426 	htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf =
2427 		(htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf;
2428 
2429 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2430 
2431 	tag_buf++;
2432 	peer_id =
2433 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf);
2434 
2435 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2436 		return;
2437 
2438 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2439 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2440 	ppdu_user_desc->peer_id = peer_id;
2441 	ppdu_desc->last_usr_index = curr_user_index;
2442 
2443 	ppdu_user_desc->completion_status =
2444 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET(
2445 				*tag_buf);
2446 
2447 	ppdu_user_desc->tid =
2448 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf);
2449 
2450 
2451 	tag_buf++;
2452 	if (qdf_likely(ppdu_user_desc->completion_status ==
2453 			HTT_PPDU_STATS_USER_STATUS_OK)) {
2454 		ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi;
2455 		ppdu_user_desc->ack_rssi_valid = 1;
2456 	} else {
2457 		ppdu_user_desc->ack_rssi_valid = 0;
2458 	}
2459 
2460 	tag_buf++;
2461 
2462 	ppdu_user_desc->mpdu_success =
2463 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf);
2464 
2465 	ppdu_user_desc->mpdu_failed =
2466 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_GET(*tag_buf) -
2467 						ppdu_user_desc->mpdu_success;
2468 
2469 	tag_buf++;
2470 
2471 	ppdu_user_desc->long_retries =
2472 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf);
2473 
2474 	ppdu_user_desc->short_retries =
2475 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf);
2476 	ppdu_user_desc->retry_msdus =
2477 		ppdu_user_desc->long_retries + ppdu_user_desc->short_retries;
2478 
2479 	ppdu_user_desc->is_ampdu =
2480 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf);
2481 	ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu;
2482 
2483 	ppdu_desc->resp_type =
2484 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_GET(*tag_buf);
2485 	ppdu_desc->mprot_type =
2486 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPROT_TYPE_GET(*tag_buf);
2487 	ppdu_desc->rts_success =
2488 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_SUCCESS_GET(*tag_buf);
2489 	ppdu_desc->rts_failure =
2490 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_FAILURE_GET(*tag_buf);
2491 
2492 	/*
2493 	 * increase successful mpdu counter from
2494 	 * htt_ppdu_stats_user_cmpltn_common_tlv
2495 	 */
2496 	ppdu_info->mpdu_compltn_common_tlv += ppdu_user_desc->mpdu_success;
2497 
2498 	/*
2499 	 * MU BAR may send request to n users but we may received ack only from
2500 	 * m users. To have count of number of users respond back, we have a
2501 	 * separate counter bar_num_users per PPDU that get increment for every
2502 	 * htt_ppdu_stats_user_cmpltn_common_tlv
2503 	 */
2504 	ppdu_desc->bar_num_users++;
2505 
2506 	tag_buf++;
2507 	for (bw_iter = 0; bw_iter < CDP_RSSI_CHAIN_LEN; bw_iter++) {
2508 		ppdu_user_desc->rssi_chain[bw_iter] =
2509 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CHAIN_RSSI_GET(*tag_buf);
2510 		tag_buf++;
2511 	}
2512 
2513 	ppdu_user_desc->sa_tx_antenna =
2514 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TX_ANTENNA_MASK_GET(*tag_buf);
2515 
2516 	tag_buf++;
2517 	ppdu_user_desc->sa_is_training =
2518 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_TRAINING_GET(*tag_buf);
2519 	if (ppdu_user_desc->sa_is_training) {
2520 		ppdu_user_desc->sa_goodput =
2521 			HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PENDING_TRAINING_PKTS_GET(*tag_buf);
2522 	}
2523 
2524 	tag_buf++;
2525 	for (bw_iter = 0; bw_iter < CDP_NUM_SA_BW; bw_iter++) {
2526 		ppdu_user_desc->sa_max_rates[bw_iter] =
2527 			HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MAX_RATES_GET(tag_buf[bw_iter]);
2528 	}
2529 
2530 	tag_buf += CDP_NUM_SA_BW;
2531 	ppdu_user_desc->current_rate_per =
2532 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CURRENT_RATE_PER_GET(*tag_buf);
2533 }
2534 
2535 /*
2536  * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv: Process
2537  * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2538  * pdev: DP PDEV handle
2539  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2540  * @ppdu_info: per ppdu tlv structure
2541  *
2542  * return:void
2543  */
2544 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
2545 		struct dp_pdev *pdev, uint32_t *tag_buf,
2546 		struct ppdu_info *ppdu_info)
2547 {
2548 	htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf =
2549 		(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf;
2550 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2551 	struct cdp_tx_completion_ppdu *ppdu_desc;
2552 	uint8_t curr_user_index = 0;
2553 	uint16_t peer_id;
2554 
2555 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2556 
2557 	tag_buf++;
2558 
2559 	peer_id =
2560 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2561 
2562 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2563 		return;
2564 
2565 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2566 
2567 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2568 	ppdu_user_desc->peer_id = peer_id;
2569 
2570 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2571 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2572 		     sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
2573 	ppdu_user_desc->ba_size = CDP_BA_64_BIT_MAP_SIZE_DWORDS * 32;
2574 }
2575 
2576 /*
2577  * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv: Process
2578  * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2579  * pdev: DP PDEV handle
2580  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2581  * @ppdu_info: per ppdu tlv structure
2582  *
2583  * return:void
2584  */
2585 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
2586 		struct dp_pdev *pdev, uint32_t *tag_buf,
2587 		struct ppdu_info *ppdu_info)
2588 {
2589 	htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf =
2590 		(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf;
2591 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2592 	struct cdp_tx_completion_ppdu *ppdu_desc;
2593 	uint8_t curr_user_index = 0;
2594 	uint16_t peer_id;
2595 
2596 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2597 
2598 	tag_buf++;
2599 
2600 	peer_id =
2601 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2602 
2603 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2604 		return;
2605 
2606 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2607 
2608 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2609 	ppdu_user_desc->peer_id = peer_id;
2610 
2611 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2612 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2613 		     sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2614 	ppdu_user_desc->ba_size = CDP_BA_256_BIT_MAP_SIZE_DWORDS * 32;
2615 }
2616 
2617 /*
2618  * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv: Process
2619  * htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2620  * pdev: DP PDE handle
2621  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2622  * @ppdu_info: per ppdu tlv structure
2623  *
2624  * return:void
2625  */
2626 static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
2627 		struct dp_pdev *pdev, uint32_t *tag_buf,
2628 		struct ppdu_info *ppdu_info)
2629 {
2630 	uint16_t peer_id;
2631 	struct cdp_tx_completion_ppdu *ppdu_desc;
2632 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2633 	uint8_t curr_user_index = 0;
2634 
2635 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2636 
2637 	tag_buf += 2;
2638 	peer_id =
2639 	HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf);
2640 
2641 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2642 		return;
2643 
2644 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2645 
2646 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2647 	ppdu_user_desc->peer_id = peer_id;
2648 
2649 	tag_buf++;
2650 	/* not to update ppdu_desc->tid from this TLV */
2651 	ppdu_user_desc->num_mpdu =
2652 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf);
2653 
2654 	ppdu_user_desc->num_msdu =
2655 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf);
2656 
2657 	ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu;
2658 
2659 	tag_buf++;
2660 	ppdu_user_desc->start_seq =
2661 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_GET(
2662 			*tag_buf);
2663 
2664 	tag_buf++;
2665 	ppdu_user_desc->success_bytes = *tag_buf;
2666 
2667 	/* increase successful mpdu counter */
2668 	ppdu_info->mpdu_ack_ba_tlv += ppdu_user_desc->num_mpdu;
2669 }
2670 
2671 /*
2672  * dp_process_ppdu_stats_user_common_array_tlv: Process
2673  * htt_ppdu_stats_user_common_array_tlv
2674  * pdev: DP PDEV handle
2675  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2676  * @ppdu_info: per ppdu tlv structure
2677  *
2678  * return:void
2679  */
2680 static void dp_process_ppdu_stats_user_common_array_tlv(
2681 		struct dp_pdev *pdev, uint32_t *tag_buf,
2682 		struct ppdu_info *ppdu_info)
2683 {
2684 	uint32_t peer_id;
2685 	struct cdp_tx_completion_ppdu *ppdu_desc;
2686 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2687 	uint8_t curr_user_index = 0;
2688 	struct htt_tx_ppdu_stats_info *dp_stats_buf;
2689 
2690 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2691 
2692 	tag_buf++;
2693 	dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf;
2694 	tag_buf += 3;
2695 	peer_id =
2696 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf);
2697 
2698 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) {
2699 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2700 			"Invalid peer");
2701 		return;
2702 	}
2703 
2704 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2705 
2706 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2707 
2708 	ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes;
2709 	ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes;
2710 
2711 	tag_buf++;
2712 
2713 	ppdu_user_desc->success_msdus =
2714 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf);
2715 	ppdu_user_desc->retry_bytes =
2716 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf);
2717 	tag_buf++;
2718 	ppdu_user_desc->failed_msdus =
2719 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf);
2720 }
2721 
2722 /*
2723  * dp_process_ppdu_stats_flush_tlv: Process
2724  * htt_ppdu_stats_flush_tlv
2725  * @pdev: DP PDEV handle
2726  * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv
2727  * @ppdu_info: per ppdu tlv structure
2728  *
2729  * return:void
2730  */
2731 static void
2732 dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev,
2733 					     uint32_t *tag_buf,
2734 					     struct ppdu_info *ppdu_info)
2735 {
2736 	struct cdp_tx_completion_ppdu *ppdu_desc;
2737 	uint32_t peer_id;
2738 	uint8_t tid;
2739 	struct dp_peer *peer;
2740 
2741 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
2742 				qdf_nbuf_data(ppdu_info->nbuf);
2743 	ppdu_desc->is_flush = 1;
2744 
2745 	tag_buf++;
2746 	ppdu_desc->drop_reason = *tag_buf;
2747 
2748 	tag_buf++;
2749 	ppdu_desc->num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf);
2750 	ppdu_desc->num_mpdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_GET(*tag_buf);
2751 	ppdu_desc->flow_type = HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_GET(*tag_buf);
2752 
2753 	tag_buf++;
2754 	peer_id = HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf);
2755 	tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf);
2756 
2757 	ppdu_desc->user[0].peer_id = peer_id;
2758 	ppdu_desc->user[0].tid = tid;
2759 
2760 	ppdu_desc->queue_type =
2761 			HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_GET(*tag_buf);
2762 
2763 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2764 	if (!peer)
2765 		return;
2766 
2767 	if (ppdu_desc->drop_reason == HTT_FLUSH_EXCESS_RETRIES) {
2768 		DP_STATS_INC(peer,
2769 			     tx.excess_retries_per_ac[TID_TO_WME_AC(tid)],
2770 			     ppdu_desc->num_msdu);
2771 	}
2772 
2773 	dp_peer_unref_del_find_by_id(peer);
2774 }
2775 
2776 #ifndef WLAN_TX_PKT_CAPTURE_ENH
2777 /*
2778  * dp_deliver_mgmt_frm: Process
2779  * @pdev: DP PDEV handle
2780  * @nbuf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2781  *
2782  * return: void
2783  */
2784 void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
2785 {
2786 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2787 		dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc,
2788 				     nbuf, HTT_INVALID_PEER,
2789 				     WDI_NO_VAL, pdev->pdev_id);
2790 	}
2791 }
2792 #endif
2793 
2794 /*
2795  * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv: Process
2796  * htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2797  * @pdev: DP PDEV handle
2798  * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2799  * @length: tlv_length
2800  *
2801  * return:QDF_STATUS_SUCCESS if nbuf as to be freed in caller
2802  */
2803 static QDF_STATUS
2804 dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev,
2805 					      qdf_nbuf_t tag_buf,
2806 					      uint32_t ppdu_id)
2807 {
2808 	uint32_t *nbuf_ptr;
2809 	uint8_t trim_size;
2810 	size_t head_size;
2811 	struct cdp_tx_mgmt_comp_info *ptr_mgmt_comp_info;
2812 
2813 	if ((!pdev->tx_sniffer_enable) && (!pdev->mcopy_mode) &&
2814 	    (!pdev->bpr_enable) && (!pdev->tx_capture_enabled))
2815 		return QDF_STATUS_SUCCESS;
2816 
2817 	trim_size = ((pdev->mgmtctrl_frm_info.mgmt_buf +
2818 		      HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) -
2819 		      qdf_nbuf_data(tag_buf));
2820 
2821 	if (!qdf_nbuf_pull_head(tag_buf, trim_size))
2822 		return QDF_STATUS_SUCCESS;
2823 
2824 	qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) -
2825 			    pdev->mgmtctrl_frm_info.mgmt_buf_len);
2826 
2827 	if (pdev->tx_capture_enabled) {
2828 		head_size = sizeof(struct cdp_tx_mgmt_comp_info);
2829 		if (qdf_unlikely(qdf_nbuf_headroom(tag_buf) < head_size)) {
2830 			qdf_err("Fail to get headroom h_sz %d h_avail %d\n",
2831 				head_size, qdf_nbuf_headroom(tag_buf));
2832 			qdf_assert_always(0);
2833 			return QDF_STATUS_E_NOMEM;
2834 		}
2835 		ptr_mgmt_comp_info = (struct cdp_tx_mgmt_comp_info *)
2836 					qdf_nbuf_push_head(tag_buf, head_size);
2837 		qdf_assert_always(ptr_mgmt_comp_info);
2838 		ptr_mgmt_comp_info->ppdu_id = ppdu_id;
2839 		ptr_mgmt_comp_info->is_sgen_pkt = true;
2840 	} else {
2841 		head_size = sizeof(ppdu_id);
2842 		nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(tag_buf, head_size);
2843 		*nbuf_ptr = ppdu_id;
2844 	}
2845 
2846 	if (pdev->bpr_enable) {
2847 		dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc,
2848 				     tag_buf, HTT_INVALID_PEER,
2849 				     WDI_NO_VAL, pdev->pdev_id);
2850 	}
2851 
2852 	dp_deliver_mgmt_frm(pdev, tag_buf);
2853 
2854 	return QDF_STATUS_E_ALREADY;
2855 }
2856 
2857 /**
2858  * dp_validate_fix_ppdu_tlv(): Function to validate the length of PPDU
2859  *
2860  * If the TLV length sent as part of PPDU TLV is less that expected size i.e
2861  * size of corresponding data structure, pad the remaining bytes with zeros
2862  * and continue processing the TLVs
2863  *
2864  * @pdev: DP pdev handle
2865  * @tag_buf: TLV buffer
2866  * @tlv_expected_size: Expected size of Tag
2867  * @tlv_len: TLV length received from FW
2868  *
2869  * Return: Pointer to updated TLV
2870  */
2871 static inline uint32_t *dp_validate_fix_ppdu_tlv(struct dp_pdev *pdev,
2872 						 uint32_t *tag_buf,
2873 						 uint16_t tlv_expected_size,
2874 						 uint16_t tlv_len)
2875 {
2876 	uint32_t *tlv_desc = tag_buf;
2877 
2878 	qdf_assert_always(tlv_len != 0);
2879 
2880 	if (tlv_len < tlv_expected_size) {
2881 		qdf_mem_zero(pdev->ppdu_tlv_buf, tlv_expected_size);
2882 		qdf_mem_copy(pdev->ppdu_tlv_buf, tag_buf, tlv_len);
2883 		tlv_desc = pdev->ppdu_tlv_buf;
2884 	}
2885 
2886 	return tlv_desc;
2887 }
2888 
2889 /**
2890  * dp_process_ppdu_tag(): Function to process the PPDU TLVs
2891  * @pdev: DP pdev handle
2892  * @tag_buf: TLV buffer
2893  * @tlv_len: length of tlv
2894  * @ppdu_info: per ppdu tlv structure
2895  *
2896  * return: void
2897  */
2898 static void dp_process_ppdu_tag(struct dp_pdev *pdev, uint32_t *tag_buf,
2899 		uint32_t tlv_len, struct ppdu_info *ppdu_info)
2900 {
2901 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2902 	uint16_t tlv_expected_size;
2903 	uint32_t *tlv_desc;
2904 
2905 	switch (tlv_type) {
2906 	case HTT_PPDU_STATS_COMMON_TLV:
2907 		tlv_expected_size = sizeof(htt_ppdu_stats_common_tlv);
2908 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2909 						    tlv_expected_size, tlv_len);
2910 		dp_process_ppdu_stats_common_tlv(pdev, tlv_desc, ppdu_info);
2911 		break;
2912 	case HTT_PPDU_STATS_USR_COMMON_TLV:
2913 		tlv_expected_size = sizeof(htt_ppdu_stats_user_common_tlv);
2914 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2915 						    tlv_expected_size, tlv_len);
2916 		dp_process_ppdu_stats_user_common_tlv(pdev, tlv_desc,
2917 						      ppdu_info);
2918 		break;
2919 	case HTT_PPDU_STATS_USR_RATE_TLV:
2920 		tlv_expected_size = sizeof(htt_ppdu_stats_user_rate_tlv);
2921 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2922 						    tlv_expected_size, tlv_len);
2923 		dp_process_ppdu_stats_user_rate_tlv(pdev, tlv_desc,
2924 						    ppdu_info);
2925 		break;
2926 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV:
2927 		tlv_expected_size =
2928 			sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv);
2929 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2930 						    tlv_expected_size, tlv_len);
2931 		dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
2932 				pdev, tlv_desc, ppdu_info);
2933 		break;
2934 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV:
2935 		tlv_expected_size =
2936 			sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv);
2937 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2938 						    tlv_expected_size, tlv_len);
2939 		dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
2940 				pdev, tlv_desc, ppdu_info);
2941 		break;
2942 	case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV:
2943 		tlv_expected_size =
2944 			sizeof(htt_ppdu_stats_user_cmpltn_common_tlv);
2945 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2946 						    tlv_expected_size, tlv_len);
2947 		dp_process_ppdu_stats_user_cmpltn_common_tlv(
2948 				pdev, tlv_desc, ppdu_info);
2949 		break;
2950 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV:
2951 		tlv_expected_size =
2952 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv);
2953 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2954 						    tlv_expected_size, tlv_len);
2955 		dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
2956 				pdev, tlv_desc, ppdu_info);
2957 		break;
2958 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV:
2959 		tlv_expected_size =
2960 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv);
2961 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2962 						    tlv_expected_size, tlv_len);
2963 		dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
2964 				pdev, tlv_desc, ppdu_info);
2965 		break;
2966 	case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV:
2967 		tlv_expected_size =
2968 			sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv);
2969 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2970 						    tlv_expected_size, tlv_len);
2971 		dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
2972 				pdev, tlv_desc, ppdu_info);
2973 		break;
2974 	case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV:
2975 		tlv_expected_size =
2976 			sizeof(htt_ppdu_stats_usr_common_array_tlv_v);
2977 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2978 						    tlv_expected_size, tlv_len);
2979 		dp_process_ppdu_stats_user_common_array_tlv(
2980 				pdev, tlv_desc, ppdu_info);
2981 		break;
2982 	case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV:
2983 		tlv_expected_size = sizeof(htt_ppdu_stats_flush_tlv);
2984 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2985 						    tlv_expected_size, tlv_len);
2986 		dp_process_ppdu_stats_user_compltn_flush_tlv(pdev, tlv_desc,
2987 							     ppdu_info);
2988 		break;
2989 	default:
2990 		break;
2991 	}
2992 }
2993 
2994 /**
2995  * dp_ppdu_desc_user_stats_update(): Function to update TX user stats
2996  * @pdev: DP pdev handle
2997  * @ppdu_info: per PPDU TLV descriptor
2998  *
2999  * return: void
3000  */
3001 void
3002 dp_ppdu_desc_user_stats_update(struct dp_pdev *pdev,
3003 			       struct ppdu_info *ppdu_info)
3004 {
3005 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3006 	struct dp_peer *peer = NULL;
3007 	uint32_t tlv_bitmap_expected;
3008 	uint32_t tlv_bitmap_default;
3009 	uint16_t i;
3010 	uint32_t num_users;
3011 
3012 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
3013 		qdf_nbuf_data(ppdu_info->nbuf);
3014 
3015 	ppdu_desc->num_users = ppdu_info->last_user;
3016 	ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
3017 
3018 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
3019 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode ||
3020 	    pdev->tx_capture_enabled) {
3021 		if (ppdu_info->is_ampdu)
3022 			tlv_bitmap_expected =
3023 				dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
3024 					ppdu_info->tlv_bitmap);
3025 	}
3026 
3027 	tlv_bitmap_default = tlv_bitmap_expected;
3028 
3029 	if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
3030 		num_users = ppdu_desc->bar_num_users;
3031 		ppdu_desc->num_users = ppdu_desc->bar_num_users;
3032 	} else {
3033 		num_users = ppdu_desc->num_users;
3034 	}
3035 
3036 	for (i = 0; i < num_users; i++) {
3037 		ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu;
3038 		ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu;
3039 
3040 		peer = dp_peer_find_by_id(pdev->soc,
3041 					  ppdu_desc->user[i].peer_id);
3042 		/**
3043 		 * This check is to make sure peer is not deleted
3044 		 * after processing the TLVs.
3045 		 */
3046 		if (!peer)
3047 			continue;
3048 
3049 		ppdu_desc->user[i].cookie = (void *)peer->wlanstats_ctx;
3050 
3051 		/*
3052 		 * different frame like DATA, BAR or CTRL has different
3053 		 * tlv bitmap expected. Apart from ACK_BA_STATUS TLV, we
3054 		 * receive other tlv in-order/sequential from fw.
3055 		 * Since ACK_BA_STATUS TLV come from Hardware it is
3056 		 * asynchronous So we need to depend on some tlv to confirm
3057 		 * all tlv is received for a ppdu.
3058 		 * So we depend on both HTT_PPDU_STATS_COMMON_TLV and
3059 		 * ACK_BA_STATUS_TLV. for failure packet we won't get
3060 		 * ACK_BA_STATUS_TLV.
3061 		 */
3062 		if (!(ppdu_info->tlv_bitmap &
3063 		      (1 << HTT_PPDU_STATS_COMMON_TLV)) ||
3064 		    (!(ppdu_info->tlv_bitmap &
3065 		       (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)) &&
3066 		     (ppdu_desc->user[i].completion_status ==
3067 		      HTT_PPDU_STATS_USER_STATUS_OK))) {
3068 			dp_peer_unref_del_find_by_id(peer);
3069 			continue;
3070 		}
3071 
3072 		/**
3073 		 * Update tx stats for data frames having Qos as well as
3074 		 * non-Qos data tid
3075 		 */
3076 
3077 		if ((ppdu_desc->user[i].tid < CDP_DATA_TID_MAX ||
3078 		     (ppdu_desc->user[i].tid == CDP_DATA_NON_QOS_TID) ||
3079 		     (ppdu_desc->htt_frame_type ==
3080 		      HTT_STATS_FTYPE_SGEN_QOS_NULL)) &&
3081 		      (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL)) {
3082 
3083 			dp_tx_stats_update(pdev, peer,
3084 					   &ppdu_desc->user[i],
3085 					   ppdu_desc->ack_rssi);
3086 			dp_tx_rate_stats_update(peer, &ppdu_desc->user[i]);
3087 		}
3088 
3089 		dp_peer_unref_del_find_by_id(peer);
3090 		tlv_bitmap_expected = tlv_bitmap_default;
3091 	}
3092 }
3093 
3094 #ifndef WLAN_TX_PKT_CAPTURE_ENH
3095 
3096 /**
3097  * dp_ppdu_desc_deliver(): Function to deliver Tx PPDU status descriptor
3098  * to upper layer
3099  * @pdev: DP pdev handle
3100  * @ppdu_info: per PPDU TLV descriptor
3101  *
3102  * return: void
3103  */
3104 static
3105 void dp_ppdu_desc_deliver(struct dp_pdev *pdev,
3106 			  struct ppdu_info *ppdu_info)
3107 {
3108 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3109 	qdf_nbuf_t nbuf;
3110 
3111 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
3112 		qdf_nbuf_data(ppdu_info->nbuf);
3113 
3114 	dp_ppdu_desc_user_stats_update(pdev, ppdu_info);
3115 
3116 	/*
3117 	 * Remove from the list
3118 	 */
3119 	TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
3120 	nbuf = ppdu_info->nbuf;
3121 	pdev->list_depth--;
3122 	qdf_mem_free(ppdu_info);
3123 
3124 	qdf_assert_always(nbuf);
3125 
3126 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
3127 		qdf_nbuf_data(nbuf);
3128 
3129 	/**
3130 	 * Deliver PPDU stats only for valid (acked) data frames if
3131 	 * sniffer mode is not enabled.
3132 	 * If sniffer mode is enabled, PPDU stats for all frames
3133 	 * including mgmt/control frames should be delivered to upper layer
3134 	 */
3135 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
3136 		dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC, pdev->soc,
3137 				nbuf, HTT_INVALID_PEER,
3138 				WDI_NO_VAL, pdev->pdev_id);
3139 	} else {
3140 		if (ppdu_desc->num_mpdu != 0 && ppdu_desc->num_users != 0 &&
3141 				ppdu_desc->frame_ctrl & HTT_FRAMECTRL_DATATYPE) {
3142 
3143 			dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
3144 					pdev->soc, nbuf, HTT_INVALID_PEER,
3145 					WDI_NO_VAL, pdev->pdev_id);
3146 		} else
3147 			qdf_nbuf_free(nbuf);
3148 	}
3149 	return;
3150 }
3151 
3152 #endif
3153 
3154 /**
3155  * dp_get_ppdu_desc(): Function to allocate new PPDU status
3156  * desc for new ppdu id
3157  * @pdev: DP pdev handle
3158  * @ppdu_id: PPDU unique identifier
3159  * @tlv_type: TLV type received
3160  *
3161  * return: ppdu_info per ppdu tlv structure
3162  */
3163 static
3164 struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id,
3165 			uint8_t tlv_type)
3166 {
3167 	struct ppdu_info *ppdu_info = NULL;
3168 
3169 	/*
3170 	 * Find ppdu_id node exists or not
3171 	 */
3172 	TAILQ_FOREACH(ppdu_info, &pdev->ppdu_info_list, ppdu_info_list_elem) {
3173 
3174 		if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) {
3175 			break;
3176 		}
3177 	}
3178 
3179 	if (ppdu_info) {
3180 		if (ppdu_info->tlv_bitmap & (1 << tlv_type)) {
3181 			/**
3182 			 * if we get tlv_type that is already been processed
3183 			 * for ppdu, that means we got a new ppdu with same
3184 			 * ppdu id. Hence Flush the older ppdu
3185 			 * for MUMIMO and OFDMA, In a PPDU we have
3186 			 * multiple user with same tlv types. tlv bitmap is
3187 			 * used to check whether SU or MU_MIMO/OFDMA
3188 			 */
3189 			if (!(ppdu_info->tlv_bitmap &
3190 			    (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)))
3191 				return ppdu_info;
3192 
3193 			/**
3194 			 * apart from ACK BA STATUS TLV rest all comes in order
3195 			 * so if tlv type not ACK BA STATUS TLV we can deliver
3196 			 * ppdu_info
3197 			 */
3198 			if (tlv_type ==
3199 			    HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)
3200 				return ppdu_info;
3201 
3202 			dp_ppdu_desc_deliver(pdev, ppdu_info);
3203 		} else {
3204 			return ppdu_info;
3205 		}
3206 	}
3207 
3208 	/**
3209 	 * Flush the head ppdu descriptor if ppdu desc list reaches max
3210 	 * threshold
3211 	 */
3212 	if (pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
3213 		ppdu_info = TAILQ_FIRST(&pdev->ppdu_info_list);
3214 		dp_ppdu_desc_deliver(pdev, ppdu_info);
3215 	}
3216 
3217 	/*
3218 	 * Allocate new ppdu_info node
3219 	 */
3220 	ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info));
3221 	if (!ppdu_info)
3222 		return NULL;
3223 
3224 	ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
3225 			sizeof(struct cdp_tx_completion_ppdu), 0, 4,
3226 			TRUE);
3227 	if (!ppdu_info->nbuf) {
3228 		qdf_mem_free(ppdu_info);
3229 		return NULL;
3230 	}
3231 
3232 	ppdu_info->ppdu_desc =
3233 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
3234 	qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf),
3235 			sizeof(struct cdp_tx_completion_ppdu));
3236 
3237 	if (qdf_nbuf_put_tail(ppdu_info->nbuf,
3238 			sizeof(struct cdp_tx_completion_ppdu)) == NULL) {
3239 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3240 				"No tailroom for HTT PPDU");
3241 		qdf_nbuf_free(ppdu_info->nbuf);
3242 		ppdu_info->nbuf = NULL;
3243 		ppdu_info->last_user = 0;
3244 		qdf_mem_free(ppdu_info);
3245 		return NULL;
3246 	}
3247 
3248 	/**
3249 	 * No lock is needed because all PPDU TLVs are processed in
3250 	 * same context and this list is updated in same context
3251 	 */
3252 	TAILQ_INSERT_TAIL(&pdev->ppdu_info_list, ppdu_info,
3253 			ppdu_info_list_elem);
3254 	pdev->list_depth++;
3255 	return ppdu_info;
3256 }
3257 
3258 /**
3259  * dp_htt_process_tlv(): Function to process each PPDU TLVs
3260  * @pdev: DP pdev handle
3261  * @htt_t2h_msg: HTT target to host message
3262  *
3263  * return: ppdu_info per ppdu tlv structure
3264  */
3265 
3266 static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev,
3267 		qdf_nbuf_t htt_t2h_msg)
3268 {
3269 	uint32_t length;
3270 	uint32_t ppdu_id;
3271 	uint8_t tlv_type;
3272 	uint32_t tlv_length, tlv_bitmap_expected;
3273 	uint8_t *tlv_buf;
3274 	struct ppdu_info *ppdu_info = NULL;
3275 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3276 	struct dp_peer *peer;
3277 	uint32_t i = 0;
3278 
3279 	uint32_t *msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
3280 
3281 	length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word);
3282 
3283 	msg_word = msg_word + 1;
3284 	ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word);
3285 
3286 
3287 	msg_word = msg_word + 3;
3288 	while (length > 0) {
3289 		tlv_buf = (uint8_t *)msg_word;
3290 		tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
3291 		tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
3292 		if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG))
3293 			pdev->stats.ppdu_stats_counter[tlv_type]++;
3294 
3295 		if (tlv_length == 0)
3296 			break;
3297 
3298 		tlv_length += HTT_TLV_HDR_LEN;
3299 
3300 		/**
3301 		 * Not allocating separate ppdu descriptor for MGMT Payload
3302 		 * TLV as this is sent as separate WDI indication and it
3303 		 * doesn't contain any ppdu information
3304 		 */
3305 		if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) {
3306 			pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf;
3307 			pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id;
3308 			pdev->mgmtctrl_frm_info.mgmt_buf_len =
3309 				HTT_PPDU_STATS_TX_MGMTCTRL_TLV_FRAME_LENGTH_GET
3310 						(*(msg_word + 1));
3311 			msg_word =
3312 				(uint32_t *)((uint8_t *)tlv_buf + tlv_length);
3313 			length -= (tlv_length);
3314 			continue;
3315 		}
3316 
3317 		ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type);
3318 		if (!ppdu_info)
3319 			return NULL;
3320 		ppdu_info->ppdu_desc->bss_color =
3321 			pdev->rx_mon_recv_status.bsscolor;
3322 
3323 		ppdu_info->ppdu_id = ppdu_id;
3324 		ppdu_info->tlv_bitmap |= (1 << tlv_type);
3325 
3326 		dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info);
3327 
3328 		/**
3329 		 * Increment pdev level tlv count to monitor
3330 		 * missing TLVs
3331 		 */
3332 		pdev->tlv_count++;
3333 		ppdu_info->last_tlv_cnt = pdev->tlv_count;
3334 		msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
3335 		length -= (tlv_length);
3336 	}
3337 
3338 	if (!ppdu_info)
3339 		return NULL;
3340 
3341 	pdev->last_ppdu_id = ppdu_id;
3342 
3343 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
3344 
3345 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode ||
3346 	    pdev->tx_capture_enabled) {
3347 		if (ppdu_info->is_ampdu)
3348 			tlv_bitmap_expected =
3349 				dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
3350 					ppdu_info->tlv_bitmap);
3351 	}
3352 
3353 	ppdu_desc = ppdu_info->ppdu_desc;
3354 
3355 	if (!ppdu_desc)
3356 		return NULL;
3357 
3358 	if (ppdu_desc->user[ppdu_desc->last_usr_index].completion_status !=
3359 	    HTT_PPDU_STATS_USER_STATUS_OK) {
3360 		tlv_bitmap_expected = tlv_bitmap_expected & 0xFF;
3361 	}
3362 
3363 	if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA &&
3364 	    (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_COMMON_TLV)) &&
3365 	    ppdu_desc->delayed_ba) {
3366 		for (i = 0; i < ppdu_desc->num_users; i++) {
3367 			uint32_t ppdu_id;
3368 
3369 			ppdu_id = ppdu_desc->ppdu_id;
3370 			peer = dp_peer_find_by_id(pdev->soc,
3371 						  ppdu_desc->user[i].peer_id);
3372 			/**
3373 			 * This check is to make sure peer is not deleted
3374 			 * after processing the TLVs.
3375 			 */
3376 			if (!peer)
3377 				continue;
3378 
3379 			/**
3380 			 * save delayed ba user info
3381 			 */
3382 			if (ppdu_desc->user[i].delayed_ba) {
3383 				dp_peer_copy_delay_stats(peer,
3384 							 &ppdu_desc->user[i]);
3385 				peer->last_delayed_ba_ppduid = ppdu_id;
3386 			}
3387 			dp_peer_unref_del_find_by_id(peer);
3388 		}
3389 	}
3390 
3391 	/*
3392 	 * when frame type is BAR and STATS_COMMON_TLV is set
3393 	 * copy the store peer delayed info to BAR status
3394 	 */
3395 	if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR &&
3396 	    (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_COMMON_TLV))) {
3397 		for (i = 0; i < ppdu_desc->bar_num_users; i++) {
3398 			peer = dp_peer_find_by_id(pdev->soc,
3399 						  ppdu_desc->user[i].peer_id);
3400 			/**
3401 			 * This check is to make sure peer is not deleted
3402 			 * after processing the TLVs.
3403 			 */
3404 			if (!peer)
3405 				continue;
3406 
3407 			if (peer->last_delayed_ba) {
3408 				dp_peer_copy_stats_to_bar(peer,
3409 							  &ppdu_desc->user[i]);
3410 				ppdu_desc->bar_ppdu_id = ppdu_desc->ppdu_id;
3411 				ppdu_desc->ppdu_id =
3412 					peer->last_delayed_ba_ppduid;
3413 			}
3414 			dp_peer_unref_del_find_by_id(peer);
3415 		}
3416 	}
3417 
3418 	/*
3419 	 * for frame type DATA and BAR, we update stats based on MSDU,
3420 	 * successful msdu and mpdu are populate from ACK BA STATUS TLV
3421 	 * which comes out of order. successful mpdu also populated from
3422 	 * COMPLTN COMMON TLV which comes in order. for every ppdu_info
3423 	 * we store successful mpdu from both tlv and compare before delivering
3424 	 * to make sure we received ACK BA STATUS TLV. For some self generated
3425 	 * frame we won't get ack ba status tlv so no need to wait for
3426 	 * ack ba status tlv.
3427 	 */
3428 	if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL &&
3429 	    ppdu_desc->htt_frame_type != HTT_STATS_FTYPE_SGEN_QOS_NULL) {
3430 		/*
3431 		 * successful mpdu count should match with both tlv
3432 		 */
3433 		if (ppdu_info->mpdu_compltn_common_tlv !=
3434 		    ppdu_info->mpdu_ack_ba_tlv)
3435 			return NULL;
3436 	}
3437 
3438 	/**
3439 	 * Once all the TLVs for a given PPDU has been processed,
3440 	 * return PPDU status to be delivered to higher layer.
3441 	 * tlv_bitmap_expected can't be available for different frame type.
3442 	 * But STATS COMMON TLV is the last TLV from the FW for a ppdu.
3443 	 * apart from ACK BA TLV, FW sends other TLV in sequential order.
3444 	 * flush tlv comes separate.
3445 	 */
3446 	if ((ppdu_info->tlv_bitmap != 0 &&
3447 	     (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_COMMON_TLV))) ||
3448 	    (ppdu_info->tlv_bitmap &
3449 	     (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV)))
3450 		return ppdu_info;
3451 
3452 	return NULL;
3453 }
3454 #endif /* FEATURE_PERPKT_INFO */
3455 
3456 /**
3457  * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW
3458  * @soc: DP SOC handle
3459  * @pdev_id: pdev id
3460  * @htt_t2h_msg: HTT message nbuf
3461  *
3462  * return:void
3463  */
3464 #if defined(WDI_EVENT_ENABLE)
3465 #ifdef FEATURE_PERPKT_INFO
3466 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
3467 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
3468 {
3469 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
3470 	struct ppdu_info *ppdu_info = NULL;
3471 	bool free_buf = true;
3472 
3473 	if (!pdev)
3474 		return true;
3475 
3476 	if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
3477 	    !pdev->mcopy_mode && !pdev->bpr_enable)
3478 		return free_buf;
3479 
3480 	ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg);
3481 
3482 	if (pdev->mgmtctrl_frm_info.mgmt_buf) {
3483 		if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv
3484 		    (pdev, htt_t2h_msg, pdev->mgmtctrl_frm_info.ppdu_id) !=
3485 		    QDF_STATUS_SUCCESS)
3486 			free_buf = false;
3487 	}
3488 
3489 	if (ppdu_info)
3490 		dp_ppdu_desc_deliver(pdev, ppdu_info);
3491 
3492 	pdev->mgmtctrl_frm_info.mgmt_buf = NULL;
3493 	pdev->mgmtctrl_frm_info.mgmt_buf_len = 0;
3494 	pdev->mgmtctrl_frm_info.ppdu_id = 0;
3495 
3496 	return free_buf;
3497 }
3498 #else
3499 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
3500 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
3501 {
3502 	return true;
3503 }
3504 #endif
3505 #endif
3506 
3507 /**
3508  * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats
3509  * @soc: DP SOC handle
3510  * @htt_t2h_msg: HTT message nbuf
3511  *
3512  * return:void
3513  */
3514 static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc,
3515 		qdf_nbuf_t htt_t2h_msg)
3516 {
3517 	uint8_t done;
3518 	qdf_nbuf_t msg_copy;
3519 	uint32_t *msg_word;
3520 
3521 	msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
3522 	msg_word = msg_word + 3;
3523 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
3524 
3525 	/*
3526 	 * HTT EXT stats response comes as stream of TLVs which span over
3527 	 * multiple T2H messages.
3528 	 * The first message will carry length of the response.
3529 	 * For rest of the messages length will be zero.
3530 	 *
3531 	 * Clone the T2H message buffer and store it in a list to process
3532 	 * it later.
3533 	 *
3534 	 * The original T2H message buffers gets freed in the T2H HTT event
3535 	 * handler
3536 	 */
3537 	msg_copy = qdf_nbuf_clone(htt_t2h_msg);
3538 
3539 	if (!msg_copy) {
3540 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
3541 				"T2H messge clone failed for HTT EXT STATS");
3542 		goto error;
3543 	}
3544 
3545 	qdf_spin_lock_bh(&soc->htt_stats.lock);
3546 	qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy);
3547 	/*
3548 	 * Done bit signifies that this is the last T2H buffer in the stream of
3549 	 * HTT EXT STATS message
3550 	 */
3551 	if (done) {
3552 		soc->htt_stats.num_stats++;
3553 		qdf_sched_work(0, &soc->htt_stats.work);
3554 	}
3555 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
3556 
3557 	return;
3558 
3559 error:
3560 	qdf_spin_lock_bh(&soc->htt_stats.lock);
3561 	while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg))
3562 			!= NULL) {
3563 		qdf_nbuf_free(msg_copy);
3564 	}
3565 	soc->htt_stats.num_stats = 0;
3566 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
3567 	return;
3568 
3569 }
3570 
3571 /*
3572  * htt_soc_attach_target() - SOC level HTT setup
3573  * @htt_soc:	HTT SOC handle
3574  *
3575  * Return: 0 on success; error code on failure
3576  */
3577 int htt_soc_attach_target(struct htt_soc *htt_soc)
3578 {
3579 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
3580 
3581 	return htt_h2t_ver_req_msg(soc);
3582 }
3583 
3584 void htt_set_htc_handle(struct htt_soc *htt_soc, HTC_HANDLE htc_soc)
3585 {
3586 	htt_soc->htc_soc = htc_soc;
3587 }
3588 
3589 HTC_HANDLE htt_get_htc_handle(struct htt_soc *htt_soc)
3590 {
3591 	return htt_soc->htc_soc;
3592 }
3593 
3594 struct htt_soc *htt_soc_attach(struct dp_soc *soc, HTC_HANDLE htc_handle)
3595 {
3596 	int i;
3597 	int j;
3598 	int alloc_size = HTT_SW_UMAC_RING_IDX_MAX * sizeof(unsigned long);
3599 	struct htt_soc *htt_soc = NULL;
3600 
3601 	htt_soc = qdf_mem_malloc(sizeof(*htt_soc));
3602 	if (!htt_soc) {
3603 		dp_err("HTT attach failed");
3604 		return NULL;
3605 	}
3606 
3607 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3608 		htt_soc->pdevid_tt[i].umac_ttt = qdf_mem_malloc(alloc_size);
3609 		if (!htt_soc->pdevid_tt[i].umac_ttt)
3610 			break;
3611 		qdf_mem_set(htt_soc->pdevid_tt[i].umac_ttt, alloc_size, -1);
3612 		htt_soc->pdevid_tt[i].lmac_ttt = qdf_mem_malloc(alloc_size);
3613 		if (!htt_soc->pdevid_tt[i].lmac_ttt) {
3614 			qdf_mem_free(htt_soc->pdevid_tt[i].umac_ttt);
3615 			break;
3616 		}
3617 		qdf_mem_set(htt_soc->pdevid_tt[i].lmac_ttt, alloc_size, -1);
3618 	}
3619 	if (i != MAX_PDEV_CNT) {
3620 		for (j = 0; j < i; j++) {
3621 			qdf_mem_free(htt_soc->pdevid_tt[i].umac_ttt);
3622 			qdf_mem_free(htt_soc->pdevid_tt[i].lmac_ttt);
3623 		}
3624 		return NULL;
3625 	}
3626 
3627 	htt_soc->dp_soc = soc;
3628 	htt_soc->htc_soc = htc_handle;
3629 	HTT_TX_MUTEX_INIT(&htt_soc->htt_tx_mutex);
3630 
3631 	return htt_soc;
3632 }
3633 
3634 #if defined(WDI_EVENT_ENABLE) && !defined(REMOVE_PKT_LOG)
3635 /*
3636  * dp_ppdu_stats_ind_handler() - PPDU stats msg handler
3637  * @htt_soc:	 HTT SOC handle
3638  * @msg_word:    Pointer to payload
3639  * @htt_t2h_msg: HTT msg nbuf
3640  *
3641  * Return: True if buffer should be freed by caller.
3642  */
3643 static bool
3644 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
3645 				uint32_t *msg_word,
3646 				qdf_nbuf_t htt_t2h_msg)
3647 {
3648 	u_int8_t pdev_id;
3649 	bool free_buf;
3650 	qdf_nbuf_set_pktlen(htt_t2h_msg, HTT_T2H_MAX_MSG_SIZE);
3651 	pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
3652 	pdev_id = DP_HW2SW_MACID(pdev_id);
3653 	free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id,
3654 					      htt_t2h_msg);
3655 	dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc,
3656 		htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL,
3657 		pdev_id);
3658 	return free_buf;
3659 }
3660 #else
3661 static bool
3662 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
3663 				uint32_t *msg_word,
3664 				qdf_nbuf_t htt_t2h_msg)
3665 {
3666 	return true;
3667 }
3668 #endif
3669 
3670 #if defined(WDI_EVENT_ENABLE) && \
3671 	!defined(REMOVE_PKT_LOG)
3672 /*
3673  * dp_pktlog_msg_handler() - Pktlog msg handler
3674  * @htt_soc:	 HTT SOC handle
3675  * @msg_word:    Pointer to payload
3676  *
3677  * Return: None
3678  */
3679 static void
3680 dp_pktlog_msg_handler(struct htt_soc *soc,
3681 		      uint32_t *msg_word)
3682 {
3683 	uint8_t pdev_id;
3684 	uint32_t *pl_hdr;
3685 
3686 	pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word);
3687 	pdev_id = DP_HW2SW_MACID(pdev_id);
3688 	pl_hdr = (msg_word + 1);
3689 	dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc,
3690 		pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL,
3691 		pdev_id);
3692 }
3693 #else
3694 static void
3695 dp_pktlog_msg_handler(struct htt_soc *soc,
3696 		      uint32_t *msg_word)
3697 {
3698 }
3699 #endif
3700 
3701 /*
3702  * time_allow_print() - time allow print
3703  * @htt_ring_tt:	ringi_id array of timestamps
3704  * @ring_id:		ring_id (index)
3705  *
3706  * Return: 1 for successfully saving timestamp in array
3707  *	and 0 for timestamp falling within 2 seconds after last one
3708  */
3709 static bool time_allow_print(unsigned long *htt_ring_tt, u_int8_t ring_id)
3710 {
3711 	unsigned long tstamp;
3712 	unsigned long delta;
3713 
3714 	tstamp = qdf_get_system_timestamp();
3715 
3716 	if (!htt_ring_tt)
3717 		return 0; //unable to print backpressure messages
3718 
3719 	if (htt_ring_tt[ring_id] == -1) {
3720 		htt_ring_tt[ring_id] = tstamp;
3721 		return 1;
3722 	}
3723 	delta = tstamp - htt_ring_tt[ring_id];
3724 	if (delta >= 2000) {
3725 		htt_ring_tt[ring_id] = tstamp;
3726 		return 1;
3727 	}
3728 
3729 	return 0;
3730 }
3731 
3732 static void dp_htt_alert_print(enum htt_t2h_msg_type msg_type,
3733 			       u_int8_t pdev_id, u_int8_t ring_id,
3734 			       u_int16_t hp_idx, u_int16_t tp_idx,
3735 			       u_int32_t bkp_time, char *ring_stype)
3736 {
3737 	dp_alert("msg_type: %d pdev_id: %d ring_type: %s ",
3738 		 msg_type, pdev_id, ring_stype);
3739 	dp_alert("ring_id: %d hp_idx: %d tp_idx: %d bkpressure_time_ms: %d ",
3740 		 ring_id, hp_idx, tp_idx, bkp_time);
3741 }
3742 
3743 /*
3744  * dp_htt_bkp_event_alert() - htt backpressure event alert
3745  * @msg_word:	htt packet context
3746  * @htt_soc:	HTT SOC handle
3747  *
3748  * Return: after attempting to print stats
3749  */
3750 static void dp_htt_bkp_event_alert(u_int32_t *msg_word, struct htt_soc *soc)
3751 {
3752 	u_int8_t ring_type;
3753 	u_int8_t pdev_id;
3754 	u_int8_t ring_id;
3755 	u_int16_t hp_idx;
3756 	u_int16_t tp_idx;
3757 	u_int32_t bkp_time;
3758 	enum htt_t2h_msg_type msg_type;
3759 	struct dp_soc *dpsoc;
3760 	struct dp_pdev *pdev;
3761 	struct dp_htt_timestamp *radio_tt;
3762 
3763 	if (!soc)
3764 		return;
3765 
3766 	dpsoc = (struct dp_soc *)soc->dp_soc;
3767 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
3768 	ring_type = HTT_T2H_RX_BKPRESSURE_RING_TYPE_GET(*msg_word);
3769 	pdev_id = HTT_T2H_RX_BKPRESSURE_PDEV_ID_GET(*msg_word);
3770 	pdev_id = DP_HW2SW_MACID(pdev_id);
3771 	pdev = (struct dp_pdev *)dpsoc->pdev_list[pdev_id];
3772 	ring_id = HTT_T2H_RX_BKPRESSURE_RINGID_GET(*msg_word);
3773 	hp_idx = HTT_T2H_RX_BKPRESSURE_HEAD_IDX_GET(*(msg_word + 1));
3774 	tp_idx = HTT_T2H_RX_BKPRESSURE_TAIL_IDX_GET(*(msg_word + 1));
3775 	bkp_time = HTT_T2H_RX_BKPRESSURE_TIME_MS_GET(*(msg_word + 2));
3776 	radio_tt = &soc->pdevid_tt[pdev_id];
3777 
3778 	switch (ring_type) {
3779 	case HTT_SW_RING_TYPE_UMAC:
3780 		if (!time_allow_print(radio_tt->umac_ttt, ring_id))
3781 			return;
3782 		dp_htt_alert_print(msg_type, pdev_id, ring_id, hp_idx, tp_idx,
3783 				   bkp_time, "HTT_SW_RING_TYPE_UMAC");
3784 	break;
3785 	case HTT_SW_RING_TYPE_LMAC:
3786 		if (!time_allow_print(radio_tt->lmac_ttt, ring_id))
3787 			return;
3788 		dp_htt_alert_print(msg_type, pdev_id, ring_id, hp_idx, tp_idx,
3789 				   bkp_time, "HTT_SW_RING_TYPE_LMAC");
3790 	break;
3791 	default:
3792 		dp_htt_alert_print(msg_type, pdev_id, ring_id, hp_idx, tp_idx,
3793 				   bkp_time, "UNKNOWN");
3794 	break;
3795 	}
3796 
3797 	dp_print_ring_stats(pdev);
3798 	dp_print_napi_stats(pdev->soc);
3799 }
3800 
3801 /*
3802  * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler
3803  * @context:	Opaque context (HTT SOC handle)
3804  * @pkt:	HTC packet
3805  */
3806 static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
3807 {
3808 	struct htt_soc *soc = (struct htt_soc *) context;
3809 	qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
3810 	u_int32_t *msg_word;
3811 	enum htt_t2h_msg_type msg_type;
3812 	bool free_buf = true;
3813 
3814 	/* check for successful message reception */
3815 	if (pkt->Status != QDF_STATUS_SUCCESS) {
3816 		if (pkt->Status != QDF_STATUS_E_CANCELED)
3817 			soc->stats.htc_err_cnt++;
3818 
3819 		qdf_nbuf_free(htt_t2h_msg);
3820 		return;
3821 	}
3822 
3823 	/* TODO: Check if we should pop the HTC/HTT header alignment padding */
3824 
3825 	msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
3826 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
3827 	htt_event_record(soc->htt_logger_handle,
3828 			 msg_type, (uint8_t *)msg_word);
3829 	switch (msg_type) {
3830 	case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
3831 	{
3832 		dp_htt_bkp_event_alert(msg_word, soc);
3833 		break;
3834 	}
3835 	case HTT_T2H_MSG_TYPE_PEER_MAP:
3836 		{
3837 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3838 			u_int8_t *peer_mac_addr;
3839 			u_int16_t peer_id;
3840 			u_int16_t hw_peer_id;
3841 			u_int8_t vdev_id;
3842 			u_int8_t is_wds;
3843 			struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
3844 
3845 			peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
3846 			hw_peer_id =
3847 				HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2));
3848 			vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
3849 			peer_mac_addr = htt_t2h_mac_addr_deswizzle(
3850 				(u_int8_t *) (msg_word+1),
3851 				&mac_addr_deswizzle_buf[0]);
3852 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3853 				QDF_TRACE_LEVEL_INFO,
3854 				"HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3855 				peer_id, vdev_id);
3856 
3857 			/*
3858 			 * check if peer already exists for this peer_id, if so
3859 			 * this peer map event is in response for a wds peer add
3860 			 * wmi command sent during wds source port learning.
3861 			 * in this case just add the ast entry to the existing
3862 			 * peer ast_list.
3863 			 */
3864 			is_wds = !!(dpsoc->peer_id_to_obj_map[peer_id]);
3865 			dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id,
3866 					       vdev_id, peer_mac_addr, 0,
3867 					       is_wds);
3868 			break;
3869 		}
3870 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
3871 		{
3872 			u_int16_t peer_id;
3873 			u_int8_t vdev_id;
3874 			u_int8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0};
3875 			peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
3876 			vdev_id = HTT_RX_PEER_UNMAP_VDEV_ID_GET(*msg_word);
3877 
3878 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
3879 						 vdev_id, mac_addr, 0);
3880 			break;
3881 		}
3882 	case HTT_T2H_MSG_TYPE_SEC_IND:
3883 		{
3884 			u_int16_t peer_id;
3885 			enum cdp_sec_type sec_type;
3886 			int is_unicast;
3887 
3888 			peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
3889 			sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
3890 			is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
3891 			/* point to the first part of the Michael key */
3892 			msg_word++;
3893 			dp_rx_sec_ind_handler(
3894 				soc->dp_soc, peer_id, sec_type, is_unicast,
3895 				msg_word, msg_word + 2);
3896 			break;
3897 		}
3898 
3899 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
3900 		{
3901 			free_buf = dp_ppdu_stats_ind_handler(soc, msg_word,
3902 							     htt_t2h_msg);
3903 			break;
3904 		}
3905 
3906 	case HTT_T2H_MSG_TYPE_PKTLOG:
3907 		{
3908 			dp_pktlog_msg_handler(soc, msg_word);
3909 			break;
3910 		}
3911 
3912 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
3913 		{
3914 			htc_pm_runtime_put(soc->htc_soc);
3915 			soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
3916 			soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
3917 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
3918 				"target uses HTT version %d.%d; host uses %d.%d",
3919 				soc->tgt_ver.major, soc->tgt_ver.minor,
3920 				HTT_CURRENT_VERSION_MAJOR,
3921 				HTT_CURRENT_VERSION_MINOR);
3922 			if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
3923 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3924 					QDF_TRACE_LEVEL_ERROR,
3925 					"*** Incompatible host/target HTT versions!");
3926 			}
3927 			/* abort if the target is incompatible with the host */
3928 			qdf_assert(soc->tgt_ver.major ==
3929 				HTT_CURRENT_VERSION_MAJOR);
3930 			if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
3931 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3932 					QDF_TRACE_LEVEL_WARN,
3933 					"*** Warning: host/target HTT versions"
3934 					" are different, though compatible!");
3935 			}
3936 			break;
3937 		}
3938 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
3939 		{
3940 			uint16_t peer_id;
3941 			uint8_t tid;
3942 			uint8_t win_sz;
3943 			uint16_t status;
3944 			struct dp_peer *peer;
3945 
3946 			/*
3947 			 * Update REO Queue Desc with new values
3948 			 */
3949 			peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
3950 			tid = HTT_RX_ADDBA_TID_GET(*msg_word);
3951 			win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
3952 			peer = dp_peer_find_by_id(soc->dp_soc, peer_id);
3953 
3954 			/*
3955 			 * Window size needs to be incremented by 1
3956 			 * since fw needs to represent a value of 256
3957 			 * using just 8 bits
3958 			 */
3959 			if (peer) {
3960 				status = dp_addba_requestprocess_wifi3(
3961 					(struct cdp_soc_t *)soc->dp_soc,
3962 					peer->mac_addr.raw, peer->vdev->vdev_id,
3963 					0, tid, 0, win_sz + 1, 0xffff);
3964 
3965 				/*
3966 				 * If PEER_LOCK_REF_PROTECT enbled dec ref
3967 				 * which is inc by dp_peer_find_by_id
3968 				 */
3969 				dp_peer_unref_del_find_by_id(peer);
3970 
3971 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3972 					QDF_TRACE_LEVEL_INFO,
3973 					FL("PeerID %d BAW %d TID %d stat %d"),
3974 					peer_id, win_sz, tid, status);
3975 
3976 			} else {
3977 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3978 					QDF_TRACE_LEVEL_ERROR,
3979 					FL("Peer not found peer id %d"),
3980 					peer_id);
3981 			}
3982 			break;
3983 		}
3984 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
3985 		{
3986 			dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg);
3987 			break;
3988 		}
3989 	case HTT_T2H_MSG_TYPE_PEER_MAP_V2:
3990 		{
3991 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3992 			u_int8_t *peer_mac_addr;
3993 			u_int16_t peer_id;
3994 			u_int16_t hw_peer_id;
3995 			u_int8_t vdev_id;
3996 			bool is_wds;
3997 			u_int16_t ast_hash;
3998 
3999 			peer_id = HTT_RX_PEER_MAP_V2_SW_PEER_ID_GET(*msg_word);
4000 			hw_peer_id =
4001 			HTT_RX_PEER_MAP_V2_HW_PEER_ID_GET(*(msg_word + 2));
4002 			vdev_id = HTT_RX_PEER_MAP_V2_VDEV_ID_GET(*msg_word);
4003 			peer_mac_addr =
4004 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
4005 						   &mac_addr_deswizzle_buf[0]);
4006 			is_wds =
4007 			HTT_RX_PEER_MAP_V2_NEXT_HOP_GET(*(msg_word + 3));
4008 			ast_hash =
4009 			HTT_RX_PEER_MAP_V2_AST_HASH_VALUE_GET(*(msg_word + 3));
4010 			QDF_TRACE(QDF_MODULE_ID_TXRX,
4011 				  QDF_TRACE_LEVEL_INFO,
4012 				  "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
4013 				  peer_id, vdev_id);
4014 
4015 			dp_rx_peer_map_handler(soc->dp_soc, peer_id,
4016 					       hw_peer_id, vdev_id,
4017 					       peer_mac_addr, ast_hash,
4018 					       is_wds);
4019 			break;
4020 		}
4021 	case HTT_T2H_MSG_TYPE_PEER_UNMAP_V2:
4022 		{
4023 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
4024 			u_int8_t *mac_addr;
4025 			u_int16_t peer_id;
4026 			u_int8_t vdev_id;
4027 			u_int8_t is_wds;
4028 
4029 			peer_id =
4030 			HTT_RX_PEER_UNMAP_V2_SW_PEER_ID_GET(*msg_word);
4031 			vdev_id = HTT_RX_PEER_UNMAP_V2_VDEV_ID_GET(*msg_word);
4032 			mac_addr =
4033 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
4034 						   &mac_addr_deswizzle_buf[0]);
4035 			is_wds =
4036 			HTT_RX_PEER_UNMAP_V2_NEXT_HOP_GET(*(msg_word + 2));
4037 			QDF_TRACE(QDF_MODULE_ID_TXRX,
4038 				  QDF_TRACE_LEVEL_INFO,
4039 				  "HTT_T2H_MSG_TYPE_PEER_UNMAP msg for peer id %d vdev id %d n",
4040 				  peer_id, vdev_id);
4041 
4042 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
4043 						 vdev_id, mac_addr,
4044 						 is_wds);
4045 			break;
4046 		}
4047 	default:
4048 		break;
4049 	};
4050 
4051 	/* Free the indication buffer */
4052 	if (free_buf)
4053 		qdf_nbuf_free(htt_t2h_msg);
4054 }
4055 
4056 /*
4057  * dp_htt_h2t_full() - Send full handler (called from HTC)
4058  * @context:	Opaque context (HTT SOC handle)
4059  * @pkt:	HTC packet
4060  *
4061  * Return: enum htc_send_full_action
4062  */
4063 static enum htc_send_full_action
4064 dp_htt_h2t_full(void *context, HTC_PACKET *pkt)
4065 {
4066 	return HTC_SEND_FULL_KEEP;
4067 }
4068 
4069 /*
4070  * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages
4071  * @context:	Opaque context (HTT SOC handle)
4072  * @nbuf:	nbuf containing T2H message
4073  * @pipe_id:	HIF pipe ID
4074  *
4075  * Return: QDF_STATUS
4076  *
4077  * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which
4078  * will be used for packet log and other high-priority HTT messages. Proper
4079  * HTC connection to be added later once required FW changes are available
4080  */
4081 static QDF_STATUS
4082 dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id)
4083 {
4084 	QDF_STATUS rc = QDF_STATUS_SUCCESS;
4085 	HTC_PACKET htc_pkt;
4086 
4087 	qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE);
4088 	qdf_mem_zero(&htc_pkt, sizeof(htc_pkt));
4089 	htc_pkt.Status = QDF_STATUS_SUCCESS;
4090 	htc_pkt.pPktContext = (void *)nbuf;
4091 	dp_htt_t2h_msg_handler(context, &htc_pkt);
4092 
4093 	return rc;
4094 }
4095 
4096 /*
4097  * htt_htc_soc_attach() - Register SOC level HTT instance with HTC
4098  * @htt_soc:	HTT SOC handle
4099  *
4100  * Return: QDF_STATUS
4101  */
4102 static QDF_STATUS
4103 htt_htc_soc_attach(struct htt_soc *soc)
4104 {
4105 	struct htc_service_connect_req connect;
4106 	struct htc_service_connect_resp response;
4107 	QDF_STATUS status;
4108 	struct dp_soc *dpsoc = soc->dp_soc;
4109 
4110 	qdf_mem_zero(&connect, sizeof(connect));
4111 	qdf_mem_zero(&response, sizeof(response));
4112 
4113 	connect.pMetaData = NULL;
4114 	connect.MetaDataLength = 0;
4115 	connect.EpCallbacks.pContext = soc;
4116 	connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
4117 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
4118 	connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler;
4119 
4120 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
4121 	connect.EpCallbacks.EpRecvRefill = NULL;
4122 
4123 	/* N/A, fill is done by HIF */
4124 	connect.EpCallbacks.RecvRefillWaterMark = 1;
4125 
4126 	connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
4127 	/*
4128 	 * Specify how deep to let a queue get before htc_send_pkt will
4129 	 * call the EpSendFull function due to excessive send queue depth.
4130 	 */
4131 	connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
4132 
4133 	/* disable flow control for HTT data message service */
4134 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
4135 
4136 	/* connect to control service */
4137 	connect.service_id = HTT_DATA_MSG_SVC;
4138 
4139 	status = htc_connect_service(soc->htc_soc, &connect, &response);
4140 
4141 	if (status != QDF_STATUS_SUCCESS)
4142 		return status;
4143 
4144 	soc->htc_endpoint = response.Endpoint;
4145 
4146 	hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint);
4147 
4148 	htt_interface_logging_init(&soc->htt_logger_handle);
4149 	dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc,
4150 		dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE);
4151 
4152 	return QDF_STATUS_SUCCESS; /* success */
4153 }
4154 
4155 /*
4156  * htt_soc_initialize() - SOC level HTT initialization
4157  * @htt_soc: Opaque htt SOC handle
4158  * @ctrl_psoc: Opaque ctrl SOC handle
4159  * @htc_soc: SOC level HTC handle
4160  * @hal_soc: Opaque HAL SOC handle
4161  * @osdev: QDF device
4162  *
4163  * Return: HTT handle on success; NULL on failure
4164  */
4165 void *
4166 htt_soc_initialize(struct htt_soc *htt_soc,
4167 		   struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
4168 		   HTC_HANDLE htc_soc,
4169 		   hal_soc_handle_t hal_soc_hdl, qdf_device_t osdev)
4170 {
4171 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
4172 
4173 	soc->osdev = osdev;
4174 	soc->ctrl_psoc = ctrl_psoc;
4175 	soc->htc_soc = htc_soc;
4176 	soc->hal_soc = hal_soc_hdl;
4177 
4178 	if (htt_htc_soc_attach(soc))
4179 		goto fail2;
4180 
4181 	return soc;
4182 
4183 fail2:
4184 	return NULL;
4185 }
4186 
4187 void htt_soc_htc_dealloc(struct htt_soc *htt_handle)
4188 {
4189 	htt_interface_logging_deinit(htt_handle->htt_logger_handle);
4190 	htt_htc_misc_pkt_pool_free(htt_handle);
4191 	htt_htc_pkt_pool_free(htt_handle);
4192 }
4193 
4194 /*
4195  * htt_soc_htc_prealloc() - HTC memory prealloc
4196  * @htt_soc: SOC level HTT handle
4197  *
4198  * Return: QDF_STATUS_SUCCESS on Success or
4199  * QDF_STATUS_E_NOMEM on allocation failure
4200  */
4201 QDF_STATUS htt_soc_htc_prealloc(struct htt_soc *soc)
4202 {
4203 	int i;
4204 
4205 	soc->htt_htc_pkt_freelist = NULL;
4206 	/* pre-allocate some HTC_PACKET objects */
4207 	for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
4208 		struct dp_htt_htc_pkt_union *pkt;
4209 		pkt = qdf_mem_malloc(sizeof(*pkt));
4210 		if (!pkt)
4211 			return QDF_STATUS_E_NOMEM;
4212 
4213 		htt_htc_pkt_free(soc, &pkt->u.pkt);
4214 	}
4215 	return QDF_STATUS_SUCCESS;
4216 }
4217 
4218 /*
4219  * htt_soc_detach() - Free SOC level HTT handle
4220  * @htt_hdl: HTT SOC handle
4221  */
4222 void htt_soc_detach(struct htt_soc *htt_hdl)
4223 {
4224 	int i;
4225 	struct htt_soc *htt_handle = (struct htt_soc *)htt_hdl;
4226 
4227 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4228 		qdf_mem_free(htt_handle->pdevid_tt[i].umac_ttt);
4229 		qdf_mem_free(htt_handle->pdevid_tt[i].lmac_ttt);
4230 	}
4231 
4232 	HTT_TX_MUTEX_DESTROY(&htt_handle->htt_tx_mutex);
4233 	qdf_mem_free(htt_handle);
4234 
4235 }
4236 
4237 /**
4238  * dp_h2t_ext_stats_msg_send(): function to contruct HTT message to pass to FW
4239  * @pdev: DP PDEV handle
4240  * @stats_type_upload_mask: stats type requested by user
4241  * @config_param_0: extra configuration parameters
4242  * @config_param_1: extra configuration parameters
4243  * @config_param_2: extra configuration parameters
4244  * @config_param_3: extra configuration parameters
4245  * @mac_id: mac number
4246  *
4247  * return: QDF STATUS
4248  */
4249 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
4250 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
4251 		uint32_t config_param_1, uint32_t config_param_2,
4252 		uint32_t config_param_3, int cookie_val, int cookie_msb,
4253 		uint8_t mac_id)
4254 {
4255 	struct htt_soc *soc = pdev->soc->htt_handle;
4256 	struct dp_htt_htc_pkt *pkt;
4257 	qdf_nbuf_t msg;
4258 	uint32_t *msg_word;
4259 	uint8_t pdev_mask = 0;
4260 	uint8_t *htt_logger_bufp;
4261 
4262 	msg = qdf_nbuf_alloc(
4263 			soc->osdev,
4264 			HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ),
4265 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4266 
4267 	if (!msg)
4268 		return QDF_STATUS_E_NOMEM;
4269 
4270 	/*TODO:Add support for SOC stats
4271 	 * Bit 0: SOC Stats
4272 	 * Bit 1: Pdev stats for pdev id 0
4273 	 * Bit 2: Pdev stats for pdev id 1
4274 	 * Bit 3: Pdev stats for pdev id 2
4275 	 */
4276 	mac_id = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
4277 
4278 	pdev_mask = 1 << DP_SW2HW_MACID(mac_id);
4279 	/*
4280 	 * Set the length of the message.
4281 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4282 	 * separately during the below call to qdf_nbuf_push_head.
4283 	 * The contribution from the HTC header is added separately inside HTC.
4284 	 */
4285 	if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) {
4286 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4287 				"Failed to expand head for HTT_EXT_STATS");
4288 		qdf_nbuf_free(msg);
4289 		return QDF_STATUS_E_FAILURE;
4290 	}
4291 
4292 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4293 		"-----%s:%d----\n cookie <-> %d\n config_param_0 %u\n"
4294 		"config_param_1 %u\n config_param_2 %u\n"
4295 		"config_param_4 %u\n -------------",
4296 		__func__, __LINE__, cookie_val, config_param_0,
4297 		config_param_1, config_param_2,	config_param_3);
4298 
4299 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
4300 
4301 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4302 	htt_logger_bufp = (uint8_t *)msg_word;
4303 	*msg_word = 0;
4304 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ);
4305 	HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask);
4306 	HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask);
4307 
4308 	/* word 1 */
4309 	msg_word++;
4310 	*msg_word = 0;
4311 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0);
4312 
4313 	/* word 2 */
4314 	msg_word++;
4315 	*msg_word = 0;
4316 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1);
4317 
4318 	/* word 3 */
4319 	msg_word++;
4320 	*msg_word = 0;
4321 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2);
4322 
4323 	/* word 4 */
4324 	msg_word++;
4325 	*msg_word = 0;
4326 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3);
4327 
4328 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0);
4329 
4330 	/* word 5 */
4331 	msg_word++;
4332 
4333 	/* word 6 */
4334 	msg_word++;
4335 	*msg_word = 0;
4336 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val);
4337 
4338 	/* word 7 */
4339 	msg_word++;
4340 	*msg_word = 0;
4341 	/*Using last 2 bits for pdev_id */
4342 	cookie_msb = ((cookie_msb << 2) | pdev->pdev_id);
4343 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb);
4344 
4345 	pkt = htt_htc_pkt_alloc(soc);
4346 	if (!pkt) {
4347 		qdf_nbuf_free(msg);
4348 		return QDF_STATUS_E_NOMEM;
4349 	}
4350 
4351 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4352 
4353 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4354 			dp_htt_h2t_send_complete_free_netbuf,
4355 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
4356 			soc->htc_endpoint,
4357 			/* tag for FW response msg not guaranteed */
4358 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
4359 
4360 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4361 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_EXT_STATS_REQ,
4362 			    htt_logger_bufp);
4363 	return 0;
4364 }
4365 
4366 /* This macro will revert once proper HTT header will define for
4367  * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file
4368  * */
4369 #if defined(WDI_EVENT_ENABLE)
4370 /**
4371  * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW
4372  * @pdev: DP PDEV handle
4373  * @stats_type_upload_mask: stats type requested by user
4374  * @mac_id: Mac id number
4375  *
4376  * return: QDF STATUS
4377  */
4378 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
4379 		uint32_t stats_type_upload_mask, uint8_t mac_id)
4380 {
4381 	struct htt_soc *soc = pdev->soc->htt_handle;
4382 	struct dp_htt_htc_pkt *pkt;
4383 	qdf_nbuf_t msg;
4384 	uint32_t *msg_word;
4385 	uint8_t pdev_mask;
4386 
4387 	msg = qdf_nbuf_alloc(
4388 			soc->osdev,
4389 			HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ),
4390 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
4391 
4392 	if (!msg) {
4393 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4394 		"Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer");
4395 		qdf_assert(0);
4396 		return QDF_STATUS_E_NOMEM;
4397 	}
4398 
4399 	/*TODO:Add support for SOC stats
4400 	 * Bit 0: SOC Stats
4401 	 * Bit 1: Pdev stats for pdev id 0
4402 	 * Bit 2: Pdev stats for pdev id 1
4403 	 * Bit 3: Pdev stats for pdev id 2
4404 	 */
4405 	pdev_mask = 1 << DP_SW2HW_MACID(mac_id);
4406 
4407 	/*
4408 	 * Set the length of the message.
4409 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4410 	 * separately during the below call to qdf_nbuf_push_head.
4411 	 * The contribution from the HTC header is added separately inside HTC.
4412 	 */
4413 	if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) {
4414 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4415 				"Failed to expand head for HTT_CFG_STATS");
4416 		qdf_nbuf_free(msg);
4417 		return QDF_STATUS_E_FAILURE;
4418 	}
4419 
4420 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
4421 
4422 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4423 	*msg_word = 0;
4424 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
4425 	HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask);
4426 	HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word,
4427 			stats_type_upload_mask);
4428 
4429 	pkt = htt_htc_pkt_alloc(soc);
4430 	if (!pkt) {
4431 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4432 				"Fail to allocate dp_htt_htc_pkt buffer");
4433 		qdf_assert(0);
4434 		qdf_nbuf_free(msg);
4435 		return QDF_STATUS_E_NOMEM;
4436 	}
4437 
4438 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4439 
4440 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4441 			dp_htt_h2t_send_complete_free_netbuf,
4442 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
4443 			soc->htc_endpoint,
4444 			1); /* tag - not relevant here */
4445 
4446 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4447 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG,
4448 			    (uint8_t *)msg_word);
4449 	return 0;
4450 }
4451 #endif
4452 
4453 void
4454 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
4455 			     uint32_t *tag_buf)
4456 {
4457 	switch (tag_type) {
4458 	case HTT_STATS_PEER_DETAILS_TAG:
4459 	{
4460 		htt_peer_details_tlv *dp_stats_buf =
4461 			(htt_peer_details_tlv *)tag_buf;
4462 
4463 		pdev->fw_stats_peer_id = dp_stats_buf->sw_peer_id;
4464 	}
4465 	break;
4466 	case HTT_STATS_PEER_STATS_CMN_TAG:
4467 	{
4468 		htt_peer_stats_cmn_tlv *dp_stats_buf =
4469 			(htt_peer_stats_cmn_tlv *)tag_buf;
4470 
4471 		struct dp_peer *peer = dp_peer_find_by_id(pdev->soc,
4472 						pdev->fw_stats_peer_id);
4473 
4474 		if (peer && !peer->bss_peer) {
4475 			peer->stats.tx.inactive_time =
4476 				dp_stats_buf->inactive_time;
4477 			qdf_event_set(&pdev->fw_peer_stats_event);
4478 		}
4479 		if (peer)
4480 			dp_peer_unref_del_find_by_id(peer);
4481 	}
4482 	break;
4483 	default:
4484 		qdf_err("Invalid tag_type");
4485 	}
4486 }
4487 
4488 /**
4489  * dp_htt_rx_flow_fst_setup(): Send HTT Rx FST setup message to FW
4490  * @pdev: DP pdev handle
4491  * @fse_setup_info: FST setup parameters
4492  *
4493  * Return: Success when HTT message is sent, error on failure
4494  */
4495 QDF_STATUS
4496 dp_htt_rx_flow_fst_setup(struct dp_pdev *pdev,
4497 			 struct dp_htt_rx_flow_fst_setup *fse_setup_info)
4498 {
4499 	struct htt_soc *soc = pdev->soc->htt_handle;
4500 	struct dp_htt_htc_pkt *pkt;
4501 	qdf_nbuf_t msg;
4502 	u_int32_t *msg_word;
4503 	struct htt_h2t_msg_rx_fse_setup_t *fse_setup;
4504 	uint8_t *htt_logger_bufp;
4505 	u_int32_t *key;
4506 
4507 	msg = qdf_nbuf_alloc(
4508 		soc->osdev,
4509 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_setup_t)),
4510 		/* reserve room for the HTC header */
4511 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4512 
4513 	if (!msg)
4514 		return QDF_STATUS_E_NOMEM;
4515 
4516 	/*
4517 	 * Set the length of the message.
4518 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4519 	 * separately during the below call to qdf_nbuf_push_head.
4520 	 * The contribution from the HTC header is added separately inside HTC.
4521 	 */
4522 	if (!qdf_nbuf_put_tail(msg,
4523 			       sizeof(struct htt_h2t_msg_rx_fse_setup_t))) {
4524 		qdf_err("Failed to expand head for HTT RX_FSE_SETUP msg");
4525 		return QDF_STATUS_E_FAILURE;
4526 	}
4527 
4528 	/* fill in the message contents */
4529 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4530 
4531 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_setup_t));
4532 	/* rewind beyond alignment pad to get to the HTC header reserved area */
4533 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4534 	htt_logger_bufp = (uint8_t *)msg_word;
4535 
4536 	*msg_word = 0;
4537 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG);
4538 
4539 	fse_setup = (struct htt_h2t_msg_rx_fse_setup_t *)msg_word;
4540 
4541 	HTT_RX_FSE_SETUP_PDEV_ID_SET(*msg_word, fse_setup_info->pdev_id);
4542 
4543 	msg_word++;
4544 	HTT_RX_FSE_SETUP_NUM_REC_SET(*msg_word, fse_setup_info->max_entries);
4545 	HTT_RX_FSE_SETUP_MAX_SEARCH_SET(*msg_word, fse_setup_info->max_search);
4546 	HTT_RX_FSE_SETUP_IP_DA_SA_PREFIX_SET(*msg_word,
4547 					     fse_setup_info->ip_da_sa_prefix);
4548 
4549 	msg_word++;
4550 	HTT_RX_FSE_SETUP_BASE_ADDR_LO_SET(*msg_word,
4551 					  fse_setup_info->base_addr_lo);
4552 	msg_word++;
4553 	HTT_RX_FSE_SETUP_BASE_ADDR_HI_SET(*msg_word,
4554 					  fse_setup_info->base_addr_hi);
4555 
4556 	key = (u_int32_t *)fse_setup_info->hash_key;
4557 	fse_setup->toeplitz31_0 = *key++;
4558 	fse_setup->toeplitz63_32 = *key++;
4559 	fse_setup->toeplitz95_64 = *key++;
4560 	fse_setup->toeplitz127_96 = *key++;
4561 	fse_setup->toeplitz159_128 = *key++;
4562 	fse_setup->toeplitz191_160 = *key++;
4563 	fse_setup->toeplitz223_192 = *key++;
4564 	fse_setup->toeplitz255_224 = *key++;
4565 	fse_setup->toeplitz287_256 = *key++;
4566 	fse_setup->toeplitz314_288 = *key;
4567 
4568 	msg_word++;
4569 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz31_0);
4570 	msg_word++;
4571 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz63_32);
4572 	msg_word++;
4573 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz95_64);
4574 	msg_word++;
4575 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz127_96);
4576 	msg_word++;
4577 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz159_128);
4578 	msg_word++;
4579 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz191_160);
4580 	msg_word++;
4581 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz223_192);
4582 	msg_word++;
4583 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz255_224);
4584 	msg_word++;
4585 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz287_256);
4586 	msg_word++;
4587 	HTT_RX_FSE_SETUP_HASH_314_288_SET(*msg_word,
4588 					  fse_setup->toeplitz314_288);
4589 
4590 	pkt = htt_htc_pkt_alloc(soc);
4591 	if (!pkt) {
4592 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4593 		qdf_assert(0);
4594 		qdf_nbuf_free(msg);
4595 		return QDF_STATUS_E_RESOURCES; /* failure */
4596 	}
4597 
4598 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4599 
4600 	SET_HTC_PACKET_INFO_TX(
4601 		&pkt->htc_pkt,
4602 		dp_htt_h2t_send_complete_free_netbuf,
4603 		qdf_nbuf_data(msg),
4604 		qdf_nbuf_len(msg),
4605 		soc->htc_endpoint,
4606 		1); /* tag - not relevant here */
4607 
4608 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4609 
4610 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG,
4611 			    htt_logger_bufp);
4612 
4613 	qdf_info("HTT_H2T RX_FSE_SETUP sent to FW for pdev = %u",
4614 		 fse_setup_info->pdev_id);
4615 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_DEBUG,
4616 			   (void *)fse_setup_info->hash_key,
4617 			   fse_setup_info->hash_key_len);
4618 
4619 	return QDF_STATUS_SUCCESS;
4620 }
4621 
4622 /**
4623  * dp_htt_rx_flow_fse_operation(): Send HTT Flow Search Entry msg to
4624  * add/del a flow in HW
4625  * @pdev: DP pdev handle
4626  * @fse_op_info: Flow entry parameters
4627  *
4628  * Return: Success when HTT message is sent, error on failure
4629  */
4630 QDF_STATUS
4631 dp_htt_rx_flow_fse_operation(struct dp_pdev *pdev,
4632 			     struct dp_htt_rx_flow_fst_operation *fse_op_info)
4633 {
4634 	struct htt_soc *soc = pdev->soc->htt_handle;
4635 	struct dp_htt_htc_pkt *pkt;
4636 	qdf_nbuf_t msg;
4637 	u_int32_t *msg_word;
4638 	struct htt_h2t_msg_rx_fse_operation_t *fse_operation;
4639 	uint8_t *htt_logger_bufp;
4640 
4641 	msg = qdf_nbuf_alloc(
4642 		soc->osdev,
4643 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_operation_t)),
4644 		/* reserve room for the HTC header */
4645 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4646 	if (!msg)
4647 		return QDF_STATUS_E_NOMEM;
4648 
4649 	/*
4650 	 * Set the length of the message.
4651 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4652 	 * separately during the below call to qdf_nbuf_push_head.
4653 	 * The contribution from the HTC header is added separately inside HTC.
4654 	 */
4655 	if (!qdf_nbuf_put_tail(msg,
4656 			       sizeof(struct htt_h2t_msg_rx_fse_operation_t))) {
4657 		qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
4658 		return QDF_STATUS_E_FAILURE;
4659 	}
4660 
4661 	/* fill in the message contents */
4662 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4663 
4664 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_operation_t));
4665 	/* rewind beyond alignment pad to get to the HTC header reserved area */
4666 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4667 	htt_logger_bufp = (uint8_t *)msg_word;
4668 
4669 	*msg_word = 0;
4670 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG);
4671 
4672 	fse_operation = (struct htt_h2t_msg_rx_fse_operation_t *)msg_word;
4673 
4674 	HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, fse_op_info->pdev_id);
4675 	msg_word++;
4676 	HTT_RX_FSE_IPSEC_VALID_SET(*msg_word, false);
4677 	if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_ENTRY) {
4678 		HTT_RX_FSE_OPERATION_SET(*msg_word,
4679 					 HTT_RX_FSE_CACHE_INVALIDATE_ENTRY);
4680 		msg_word++;
4681 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4682 		*msg_word,
4683 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_31_0));
4684 		msg_word++;
4685 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4686 		*msg_word,
4687 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_63_32));
4688 		msg_word++;
4689 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4690 		*msg_word,
4691 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_95_64));
4692 		msg_word++;
4693 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4694 		*msg_word,
4695 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_127_96));
4696 		msg_word++;
4697 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4698 		*msg_word,
4699 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_31_0));
4700 		msg_word++;
4701 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4702 		*msg_word,
4703 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_63_32));
4704 		msg_word++;
4705 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4706 		*msg_word,
4707 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_95_64));
4708 		msg_word++;
4709 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4710 		*msg_word,
4711 		qdf_htonl(
4712 		fse_op_info->rx_flow->flow_tuple_info.dest_ip_127_96));
4713 		msg_word++;
4714 		HTT_RX_FSE_SOURCEPORT_SET(
4715 			*msg_word,
4716 			fse_op_info->rx_flow->flow_tuple_info.src_port);
4717 		HTT_RX_FSE_DESTPORT_SET(
4718 			*msg_word,
4719 			fse_op_info->rx_flow->flow_tuple_info.dest_port);
4720 		msg_word++;
4721 		HTT_RX_FSE_L4_PROTO_SET(
4722 			*msg_word,
4723 			fse_op_info->rx_flow->flow_tuple_info.l4_protocol);
4724 	} else if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_FULL) {
4725 		HTT_RX_FSE_OPERATION_SET(*msg_word,
4726 					 HTT_RX_FSE_CACHE_INVALIDATE_FULL);
4727 	} else if (fse_op_info->op_code == DP_HTT_FST_DISABLE) {
4728 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_DISABLE);
4729 	} else if (fse_op_info->op_code == DP_HTT_FST_ENABLE) {
4730 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_ENABLE);
4731 	}
4732 
4733 	pkt = htt_htc_pkt_alloc(soc);
4734 	if (!pkt) {
4735 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4736 		qdf_assert(0);
4737 		qdf_nbuf_free(msg);
4738 		return QDF_STATUS_E_RESOURCES; /* failure */
4739 	}
4740 
4741 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
4742 
4743 	SET_HTC_PACKET_INFO_TX(
4744 		&pkt->htc_pkt,
4745 		dp_htt_h2t_send_complete_free_netbuf,
4746 		qdf_nbuf_data(msg),
4747 		qdf_nbuf_len(msg),
4748 		soc->htc_endpoint,
4749 		1); /* tag - not relevant here */
4750 
4751 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4752 
4753 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG,
4754 			    htt_logger_bufp);
4755 
4756 	qdf_info("HTT_H2T RX_FSE_OPERATION_CFG sent to FW for pdev = %u",
4757 		 fse_op_info->pdev_id);
4758 
4759 	return QDF_STATUS_SUCCESS;
4760 }
4761