xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c (revision a2d910900d3182481ddd6fa24ef7a7cf04e14f69)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <htt.h>
20 #include <hal_hw_headers.h>
21 #include <hal_api.h>
22 #include "dp_peer.h"
23 #include "dp_types.h"
24 #include "dp_internal.h"
25 #include "dp_rx.h"
26 #include "dp_rx_mon.h"
27 #include "htt_stats.h"
28 #include "htt_ppdu_stats.h"
29 #include "dp_htt.h"
30 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
31 #include "cdp_txrx_cmn_struct.h"
32 
33 #ifdef FEATURE_PERPKT_INFO
34 #include "dp_ratetable.h"
35 #endif
36 
37 #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
38 
39 #define HTT_HTC_PKT_POOL_INIT_SIZE 64
40 
41 #define HTT_MSG_BUF_SIZE(msg_bytes) \
42 	((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
43 
44 #define HTT_PID_BIT_MASK 0x3
45 
46 #define DP_EXT_MSG_LENGTH 2048
47 
48 #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16
49 
50 #define HTT_SHIFT_UPPER_TIMESTAMP 32
51 #define HTT_MASK_UPPER_TIMESTAMP 0xFFFFFFFF00000000
52 
53 /*
54  * dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap() - Get ppdu stats tlv
55  * bitmap for sniffer mode
56  * @bitmap: received bitmap
57  *
58  * Return: expected bitmap value, returns zero if doesn't match with
59  * either 64-bit Tx window or 256-bit window tlv bitmap
60  */
61 int
62 dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(uint32_t bitmap)
63 {
64 	if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64))
65 		return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64;
66 	else if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256))
67 		return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256;
68 
69 	return 0;
70 }
71 
72 #ifdef FEATURE_PERPKT_INFO
73 /*
74  * dp_peer_find_by_id_valid - check if peer exists for given id
75  * @soc: core DP soc context
76  * @peer_id: peer id from peer object can be retrieved
77  *
78  * Return: true if peer exists of false otherwise
79  */
80 
81 static
82 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id)
83 {
84 	struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id,
85 						     DP_MOD_ID_HTT);
86 
87 	if (peer) {
88 		/*
89 		 * Decrement the peer ref which is taken as part of
90 		 * dp_peer_get_ref_by_id if PEER_LOCK_REF_PROTECT is enabled
91 		 */
92 		dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
93 
94 		return true;
95 	}
96 
97 	return false;
98 }
99 
100 /*
101  * dp_peer_copy_delay_stats() - copy ppdu stats to peer delayed stats.
102  * @peer: Datapath peer handle
103  * @ppdu: User PPDU Descriptor
104  * @cur_ppdu_id: PPDU_ID
105  *
106  * Return: None
107  *
108  * on Tx data frame, we may get delayed ba set
109  * in htt_ppdu_stats_user_common_tlv. which mean we get Block Ack(BA) after we
110  * request Block Ack Request(BAR). Successful msdu is received only after Block
111  * Ack. To populate peer stats we need successful msdu(data frame).
112  * So we hold the Tx data stats on delayed_ba for stats update.
113  */
114 static void
115 dp_peer_copy_delay_stats(struct dp_peer *peer,
116 			 struct cdp_tx_completion_ppdu_user *ppdu,
117 			 uint32_t cur_ppdu_id)
118 {
119 	struct dp_pdev *pdev;
120 	struct dp_vdev *vdev;
121 
122 	if (peer->last_delayed_ba) {
123 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
124 			  "BA not yet recv for prev delayed ppdu[%d] - cur ppdu[%d]",
125 			  peer->last_delayed_ba_ppduid, cur_ppdu_id);
126 		vdev = peer->vdev;
127 		if (vdev) {
128 			pdev = vdev->pdev;
129 			pdev->stats.cdp_delayed_ba_not_recev++;
130 		}
131 	}
132 
133 	peer->delayed_ba_ppdu_stats.ltf_size = ppdu->ltf_size;
134 	peer->delayed_ba_ppdu_stats.stbc = ppdu->stbc;
135 	peer->delayed_ba_ppdu_stats.he_re = ppdu->he_re;
136 	peer->delayed_ba_ppdu_stats.txbf = ppdu->txbf;
137 	peer->delayed_ba_ppdu_stats.bw = ppdu->bw;
138 	peer->delayed_ba_ppdu_stats.nss = ppdu->nss;
139 	peer->delayed_ba_ppdu_stats.preamble = ppdu->preamble;
140 	peer->delayed_ba_ppdu_stats.gi = ppdu->gi;
141 	peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
142 	peer->delayed_ba_ppdu_stats.ldpc = ppdu->ldpc;
143 	peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
144 	peer->delayed_ba_ppdu_stats.mpdu_tried_ucast = ppdu->mpdu_tried_ucast;
145 	peer->delayed_ba_ppdu_stats.mpdu_tried_mcast = ppdu->mpdu_tried_mcast;
146 	peer->delayed_ba_ppdu_stats.frame_ctrl = ppdu->frame_ctrl;
147 	peer->delayed_ba_ppdu_stats.qos_ctrl = ppdu->qos_ctrl;
148 	peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
149 
150 	peer->delayed_ba_ppdu_stats.ru_start = ppdu->ru_start;
151 	peer->delayed_ba_ppdu_stats.ru_tones = ppdu->ru_tones;
152 	peer->delayed_ba_ppdu_stats.is_mcast = ppdu->is_mcast;
153 
154 	peer->delayed_ba_ppdu_stats.user_pos = ppdu->user_pos;
155 	peer->delayed_ba_ppdu_stats.mu_group_id = ppdu->mu_group_id;
156 
157 	peer->last_delayed_ba = true;
158 
159 	ppdu->debug_copied = true;
160 }
161 
162 /*
163  * dp_peer_copy_stats_to_bar() - copy delayed stats to ppdu stats.
164  * @peer: Datapath peer handle
165  * @ppdu: PPDU Descriptor
166  *
167  * Return: None
168  *
169  * For Tx BAR, PPDU stats TLV include Block Ack info. PPDU info
170  * from Tx BAR frame not required to populate peer stats.
171  * But we need successful MPDU and MSDU to update previous
172  * transmitted Tx data frame. Overwrite ppdu stats with the previous
173  * stored ppdu stats.
174  */
175 static void
176 dp_peer_copy_stats_to_bar(struct dp_peer *peer,
177 			  struct cdp_tx_completion_ppdu_user *ppdu)
178 {
179 	ppdu->ltf_size = peer->delayed_ba_ppdu_stats.ltf_size;
180 	ppdu->stbc = peer->delayed_ba_ppdu_stats.stbc;
181 	ppdu->he_re = peer->delayed_ba_ppdu_stats.he_re;
182 	ppdu->txbf = peer->delayed_ba_ppdu_stats.txbf;
183 	ppdu->bw = peer->delayed_ba_ppdu_stats.bw;
184 	ppdu->nss = peer->delayed_ba_ppdu_stats.nss;
185 	ppdu->preamble = peer->delayed_ba_ppdu_stats.preamble;
186 	ppdu->gi = peer->delayed_ba_ppdu_stats.gi;
187 	ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm;
188 	ppdu->ldpc = peer->delayed_ba_ppdu_stats.ldpc;
189 	ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm;
190 	ppdu->mpdu_tried_ucast = peer->delayed_ba_ppdu_stats.mpdu_tried_ucast;
191 	ppdu->mpdu_tried_mcast = peer->delayed_ba_ppdu_stats.mpdu_tried_mcast;
192 	ppdu->frame_ctrl = peer->delayed_ba_ppdu_stats.frame_ctrl;
193 	ppdu->qos_ctrl = peer->delayed_ba_ppdu_stats.qos_ctrl;
194 	ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm;
195 
196 	ppdu->ru_start = peer->delayed_ba_ppdu_stats.ru_start;
197 	ppdu->ru_tones = peer->delayed_ba_ppdu_stats.ru_tones;
198 	ppdu->is_mcast = peer->delayed_ba_ppdu_stats.is_mcast;
199 
200 	ppdu->user_pos = peer->delayed_ba_ppdu_stats.user_pos;
201 	ppdu->mu_group_id = peer->delayed_ba_ppdu_stats.mu_group_id;
202 
203 	peer->last_delayed_ba = false;
204 
205 	ppdu->debug_copied = true;
206 }
207 
208 /*
209  * dp_tx_rate_stats_update() - Update rate per-peer statistics
210  * @peer: Datapath peer handle
211  * @ppdu: PPDU Descriptor
212  *
213  * Return: None
214  */
215 static void
216 dp_tx_rate_stats_update(struct dp_peer *peer,
217 			struct cdp_tx_completion_ppdu_user *ppdu)
218 {
219 	uint32_t ratekbps = 0;
220 	uint64_t ppdu_tx_rate = 0;
221 	uint32_t rix;
222 	uint16_t ratecode = 0;
223 
224 	if (!peer || !ppdu)
225 		return;
226 
227 	if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK)
228 		return;
229 
230 	ratekbps = dp_getrateindex(ppdu->gi,
231 				   ppdu->mcs,
232 				   ppdu->nss,
233 				   ppdu->preamble,
234 				   ppdu->bw,
235 				   &rix,
236 				   &ratecode);
237 
238 	DP_STATS_UPD(peer, tx.last_tx_rate, ratekbps);
239 
240 	if (!ratekbps)
241 		return;
242 
243 	/* Calculate goodput in non-training period
244 	 * In training period, don't do anything as
245 	 * pending pkt is send as goodput.
246 	 */
247 	if ((!peer->bss_peer) && (!ppdu->sa_is_training)) {
248 		ppdu->sa_goodput = ((ratekbps / CDP_NUM_KB_IN_MB) *
249 				(CDP_PERCENT_MACRO - ppdu->current_rate_per));
250 	}
251 	ppdu->rix = rix;
252 	ppdu->tx_ratekbps = ratekbps;
253 	ppdu->tx_ratecode = ratecode;
254 	peer->stats.tx.avg_tx_rate =
255 		dp_ath_rate_lpf(peer->stats.tx.avg_tx_rate, ratekbps);
256 	ppdu_tx_rate = dp_ath_rate_out(peer->stats.tx.avg_tx_rate);
257 	DP_STATS_UPD(peer, tx.rnd_avg_tx_rate, ppdu_tx_rate);
258 
259 	if (peer->vdev) {
260 		/*
261 		 * In STA mode:
262 		 *	We get ucast stats as BSS peer stats.
263 		 *
264 		 * In AP mode:
265 		 *	We get mcast stats as BSS peer stats.
266 		 *	We get ucast stats as assoc peer stats.
267 		 */
268 		if (peer->vdev->opmode == wlan_op_mode_ap && peer->bss_peer) {
269 			peer->vdev->stats.tx.mcast_last_tx_rate = ratekbps;
270 			peer->vdev->stats.tx.mcast_last_tx_rate_mcs = ppdu->mcs;
271 		} else {
272 			peer->vdev->stats.tx.last_tx_rate = ratekbps;
273 			peer->vdev->stats.tx.last_tx_rate_mcs = ppdu->mcs;
274 		}
275 	}
276 }
277 
278 /*
279  * dp_tx_stats_update() - Update per-peer statistics
280  * @pdev: Datapath pdev handle
281  * @peer: Datapath peer handle
282  * @ppdu: PPDU Descriptor
283  * @ack_rssi: RSSI of last ack received
284  *
285  * Return: None
286  */
287 static void
288 dp_tx_stats_update(struct dp_pdev *pdev, struct dp_peer *peer,
289 		   struct cdp_tx_completion_ppdu_user *ppdu,
290 		   uint32_t ack_rssi)
291 {
292 	uint8_t preamble, mcs;
293 	uint16_t num_msdu;
294 	uint16_t num_mpdu;
295 	uint16_t mpdu_tried;
296 	uint16_t mpdu_failed;
297 
298 	preamble = ppdu->preamble;
299 	mcs = ppdu->mcs;
300 	num_msdu = ppdu->num_msdu;
301 	num_mpdu = ppdu->mpdu_success;
302 	mpdu_tried = ppdu->mpdu_tried_ucast + ppdu->mpdu_tried_mcast;
303 	mpdu_failed = mpdu_tried - num_mpdu;
304 
305 	/* If the peer statistics are already processed as part of
306 	 * per-MSDU completion handler, do not process these again in per-PPDU
307 	 * indications */
308 	if (pdev->soc->process_tx_status)
309 		return;
310 
311 	if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK) {
312 		/*
313 		 * All failed mpdu will be retried, so incrementing
314 		 * retries mpdu based on mpdu failed. Even for
315 		 * ack failure i.e for long retries we get
316 		 * mpdu failed equal mpdu tried.
317 		 */
318 		DP_STATS_INC(peer, tx.retries, mpdu_failed);
319 		DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus);
320 		return;
321 	}
322 
323 	if (ppdu->is_ppdu_cookie_valid)
324 		DP_STATS_INC(peer, tx.num_ppdu_cookie_valid, 1);
325 
326 	if (ppdu->mu_group_id <= MAX_MU_GROUP_ID &&
327 	    ppdu->ppdu_type != HTT_PPDU_STATS_PPDU_TYPE_SU) {
328 		if (unlikely(!(ppdu->mu_group_id & (MAX_MU_GROUP_ID - 1))))
329 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
330 				  "mu_group_id out of bound!!\n");
331 		else
332 			DP_STATS_UPD(peer, tx.mu_group_id[ppdu->mu_group_id],
333 				     (ppdu->user_pos + 1));
334 	}
335 
336 	if (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA ||
337 	    ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA) {
338 		DP_STATS_UPD(peer, tx.ru_tones, ppdu->ru_tones);
339 		DP_STATS_UPD(peer, tx.ru_start, ppdu->ru_start);
340 		switch (ppdu->ru_tones) {
341 		case RU_26:
342 			DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].num_msdu,
343 				     num_msdu);
344 			DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].num_mpdu,
345 				     num_mpdu);
346 			DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].mpdu_tried,
347 				     mpdu_tried);
348 		break;
349 		case RU_52:
350 			DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].num_msdu,
351 				     num_msdu);
352 			DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].num_mpdu,
353 				     num_mpdu);
354 			DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].mpdu_tried,
355 				     mpdu_tried);
356 		break;
357 		case RU_106:
358 			DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].num_msdu,
359 				     num_msdu);
360 			DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].num_mpdu,
361 				     num_mpdu);
362 			DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].mpdu_tried,
363 				     mpdu_tried);
364 		break;
365 		case RU_242:
366 			DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].num_msdu,
367 				     num_msdu);
368 			DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].num_mpdu,
369 				     num_mpdu);
370 			DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].mpdu_tried,
371 				     mpdu_tried);
372 		break;
373 		case RU_484:
374 			DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].num_msdu,
375 				     num_msdu);
376 			DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].num_mpdu,
377 				     num_mpdu);
378 			DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].mpdu_tried,
379 				     mpdu_tried);
380 		break;
381 		case RU_996:
382 			DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].num_msdu,
383 				     num_msdu);
384 			DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].num_mpdu,
385 				     num_mpdu);
386 			DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].mpdu_tried,
387 				     mpdu_tried);
388 		break;
389 		}
390 	}
391 
392 	/*
393 	 * All failed mpdu will be retried, so incrementing
394 	 * retries mpdu based on mpdu failed. Even for
395 	 * ack failure i.e for long retries we get
396 	 * mpdu failed equal mpdu tried.
397 	 */
398 	DP_STATS_INC(peer, tx.retries, mpdu_failed);
399 	DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus);
400 
401 	DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].num_msdu,
402 		     num_msdu);
403 	DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].num_mpdu,
404 		     num_mpdu);
405 	DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].mpdu_tried,
406 		     mpdu_tried);
407 
408 	DP_STATS_INC_PKT(peer, tx.comp_pkt,
409 			num_msdu, (ppdu->success_bytes +
410 				ppdu->retry_bytes + ppdu->failed_bytes));
411 	DP_STATS_UPD(peer, tx.tx_rate, ppdu->tx_rate);
412 	DP_STATS_INC(peer, tx.sgi_count[ppdu->gi], num_msdu);
413 	DP_STATS_INC(peer, tx.bw[ppdu->bw], num_msdu);
414 	DP_STATS_INC(peer, tx.nss[ppdu->nss], num_msdu);
415 	if (ppdu->tid < CDP_DATA_TID_MAX)
416 		DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)],
417 			     num_msdu);
418 	DP_STATS_INCC(peer, tx.stbc, num_msdu, ppdu->stbc);
419 	DP_STATS_INCC(peer, tx.ldpc, num_msdu, ppdu->ldpc);
420 	if (!(ppdu->is_mcast) && ppdu->ack_rssi_valid)
421 		DP_STATS_UPD(peer, tx.last_ack_rssi, ack_rssi);
422 
423 	DP_STATS_INCC(peer,
424 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
425 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
426 	DP_STATS_INCC(peer,
427 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
428 			((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
429 	DP_STATS_INCC(peer,
430 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
431 			((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
432 	DP_STATS_INCC(peer,
433 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
434 			((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B)));
435 	DP_STATS_INCC(peer,
436 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
437 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
438 	DP_STATS_INCC(peer,
439 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
440 			((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
441 	DP_STATS_INCC(peer,
442 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
443 			((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
444 	DP_STATS_INCC(peer,
445 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
446 			((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
447 	DP_STATS_INCC(peer,
448 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
449 			((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX)));
450 	DP_STATS_INCC(peer,
451 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
452 			((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX)));
453 	DP_STATS_INCC(peer, tx.ampdu_cnt, num_msdu, ppdu->is_ampdu);
454 	DP_STATS_INCC(peer, tx.non_ampdu_cnt, num_msdu, !(ppdu->is_ampdu));
455 	DP_STATS_INCC(peer, tx.pream_punct_cnt, 1, ppdu->pream_punct);
456 
457 	dp_peer_stats_notify(pdev, peer);
458 
459 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
460 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
461 			     &peer->stats, ppdu->peer_id,
462 			     UPDATE_PEER_STATS, pdev->pdev_id);
463 #endif
464 }
465 #endif
466 
467 #ifdef WLAN_TX_PKT_CAPTURE_ENH
468 #include "dp_tx_capture.h"
469 #else
470 static inline void
471 dp_process_ppdu_stats_update_failed_bitmap(struct dp_pdev *pdev,
472 					   void *data,
473 					   uint32_t ppdu_id,
474 					   uint32_t size)
475 {
476 }
477 #endif
478 
479 /*
480  * htt_htc_pkt_alloc() - Allocate HTC packet buffer
481  * @htt_soc:	HTT SOC handle
482  *
483  * Return: Pointer to htc packet buffer
484  */
485 static struct dp_htt_htc_pkt *
486 htt_htc_pkt_alloc(struct htt_soc *soc)
487 {
488 	struct dp_htt_htc_pkt_union *pkt = NULL;
489 
490 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
491 	if (soc->htt_htc_pkt_freelist) {
492 		pkt = soc->htt_htc_pkt_freelist;
493 		soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next;
494 	}
495 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
496 
497 	if (!pkt)
498 		pkt = qdf_mem_malloc(sizeof(*pkt));
499 	return &pkt->u.pkt; /* not actually a dereference */
500 }
501 
502 /*
503  * htt_htc_pkt_free() - Free HTC packet buffer
504  * @htt_soc:	HTT SOC handle
505  */
506 static void
507 htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
508 {
509 	struct dp_htt_htc_pkt_union *u_pkt =
510 		(struct dp_htt_htc_pkt_union *)pkt;
511 
512 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
513 	u_pkt->u.next = soc->htt_htc_pkt_freelist;
514 	soc->htt_htc_pkt_freelist = u_pkt;
515 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
516 }
517 
518 /*
519  * htt_htc_pkt_pool_free() - Free HTC packet pool
520  * @htt_soc:	HTT SOC handle
521  */
522 void
523 htt_htc_pkt_pool_free(struct htt_soc *soc)
524 {
525 	struct dp_htt_htc_pkt_union *pkt, *next;
526 	pkt = soc->htt_htc_pkt_freelist;
527 	while (pkt) {
528 		next = pkt->u.next;
529 		qdf_mem_free(pkt);
530 		pkt = next;
531 	}
532 	soc->htt_htc_pkt_freelist = NULL;
533 }
534 
535 /*
536  * htt_htc_misc_pkt_list_trim() - trim misc list
537  * @htt_soc: HTT SOC handle
538  * @level: max no. of pkts in list
539  */
540 static void
541 htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level)
542 {
543 	struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL;
544 	int i = 0;
545 	qdf_nbuf_t netbuf;
546 
547 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
548 	pkt = soc->htt_htc_pkt_misclist;
549 	while (pkt) {
550 		next = pkt->u.next;
551 		/* trim the out grown list*/
552 		if (++i > level) {
553 			netbuf =
554 				(qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
555 			qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
556 			qdf_nbuf_free(netbuf);
557 			qdf_mem_free(pkt);
558 			pkt = NULL;
559 			if (prev)
560 				prev->u.next = NULL;
561 		}
562 		prev = pkt;
563 		pkt = next;
564 	}
565 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
566 }
567 
568 /*
569  * htt_htc_misc_pkt_list_add() - Add pkt to misc list
570  * @htt_soc:	HTT SOC handle
571  * @dp_htt_htc_pkt: pkt to be added to list
572  */
573 static void
574 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
575 {
576 	struct dp_htt_htc_pkt_union *u_pkt =
577 				(struct dp_htt_htc_pkt_union *)pkt;
578 	int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc,
579 							pkt->htc_pkt.Endpoint)
580 				+ DP_HTT_HTC_PKT_MISCLIST_SIZE;
581 
582 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
583 	if (soc->htt_htc_pkt_misclist) {
584 		u_pkt->u.next = soc->htt_htc_pkt_misclist;
585 		soc->htt_htc_pkt_misclist = u_pkt;
586 	} else {
587 		soc->htt_htc_pkt_misclist = u_pkt;
588 	}
589 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
590 
591 	/* only ce pipe size + tx_queue_depth could possibly be in use
592 	 * free older packets in the misclist
593 	 */
594 	htt_htc_misc_pkt_list_trim(soc, misclist_trim_level);
595 }
596 
597 /**
598  * DP_HTT_SEND_HTC_PKT() - Send htt packet from host
599  * @soc : HTT SOC handle
600  * @pkt: pkt to be send
601  * @cmd : command to be recorded in dp htt logger
602  * @buf : Pointer to buffer needs to be recored for above cmd
603  *
604  * Return: None
605  */
606 static inline QDF_STATUS DP_HTT_SEND_HTC_PKT(struct htt_soc *soc,
607 					     struct dp_htt_htc_pkt *pkt,
608 					     uint8_t cmd, uint8_t *buf)
609 {
610 	QDF_STATUS status;
611 
612 	htt_command_record(soc->htt_logger_handle, cmd, buf);
613 
614 	status = htc_send_pkt(soc->htc_soc, &pkt->htc_pkt);
615 	if (status == QDF_STATUS_SUCCESS)
616 		htt_htc_misc_pkt_list_add(soc, pkt);
617 
618 	return status;
619 }
620 
621 /*
622  * htt_htc_misc_pkt_pool_free() - free pkts in misc list
623  * @htt_soc:	HTT SOC handle
624  */
625 static void
626 htt_htc_misc_pkt_pool_free(struct htt_soc *soc)
627 {
628 	struct dp_htt_htc_pkt_union *pkt, *next;
629 	qdf_nbuf_t netbuf;
630 
631 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
632 	pkt = soc->htt_htc_pkt_misclist;
633 
634 	while (pkt) {
635 		next = pkt->u.next;
636 		netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
637 		qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
638 
639 		soc->stats.htc_pkt_free++;
640 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
641 			 "%s: Pkt free count %d",
642 			 __func__, soc->stats.htc_pkt_free);
643 
644 		qdf_nbuf_free(netbuf);
645 		qdf_mem_free(pkt);
646 		pkt = next;
647 	}
648 	soc->htt_htc_pkt_misclist = NULL;
649 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
650 }
651 
652 /*
653  * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ
654  * @tgt_mac_addr:	Target MAC
655  * @buffer:		Output buffer
656  */
657 static u_int8_t *
658 htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer)
659 {
660 #ifdef BIG_ENDIAN_HOST
661 	/*
662 	 * The host endianness is opposite of the target endianness.
663 	 * To make u_int32_t elements come out correctly, the target->host
664 	 * upload has swizzled the bytes in each u_int32_t element of the
665 	 * message.
666 	 * For byte-array message fields like the MAC address, this
667 	 * upload swizzling puts the bytes in the wrong order, and needs
668 	 * to be undone.
669 	 */
670 	buffer[0] = tgt_mac_addr[3];
671 	buffer[1] = tgt_mac_addr[2];
672 	buffer[2] = tgt_mac_addr[1];
673 	buffer[3] = tgt_mac_addr[0];
674 	buffer[4] = tgt_mac_addr[7];
675 	buffer[5] = tgt_mac_addr[6];
676 	return buffer;
677 #else
678 	/*
679 	 * The host endianness matches the target endianness -
680 	 * we can use the mac addr directly from the message buffer.
681 	 */
682 	return tgt_mac_addr;
683 #endif
684 }
685 
686 /*
687  * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
688  * @soc:	SOC handle
689  * @status:	Completion status
690  * @netbuf:	HTT buffer
691  */
692 static void
693 dp_htt_h2t_send_complete_free_netbuf(
694 	void *soc, A_STATUS status, qdf_nbuf_t netbuf)
695 {
696 	qdf_nbuf_free(netbuf);
697 }
698 
699 /*
700  * dp_htt_h2t_send_complete() - H2T completion handler
701  * @context:	Opaque context (HTT SOC handle)
702  * @htc_pkt:	HTC packet
703  */
704 static void
705 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
706 {
707 	void (*send_complete_part2)(
708 		void *soc, QDF_STATUS status, qdf_nbuf_t msdu);
709 	struct htt_soc *soc =  (struct htt_soc *) context;
710 	struct dp_htt_htc_pkt *htt_pkt;
711 	qdf_nbuf_t netbuf;
712 
713 	send_complete_part2 = htc_pkt->pPktContext;
714 
715 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
716 
717 	/* process (free or keep) the netbuf that held the message */
718 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
719 	/*
720 	 * adf sendcomplete is required for windows only
721 	 */
722 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
723 	if (send_complete_part2) {
724 		send_complete_part2(
725 			htt_pkt->soc_ctxt, htc_pkt->Status, netbuf);
726 	}
727 	/* free the htt_htc_pkt / HTC_PACKET object */
728 	htt_htc_pkt_free(soc, htt_pkt);
729 }
730 
731 /*
732  * htt_h2t_ver_req_msg() - Send HTT version request message to target
733  * @htt_soc:	HTT SOC handle
734  *
735  * Return: 0 on success; error code on failure
736  */
737 static int htt_h2t_ver_req_msg(struct htt_soc *soc)
738 {
739 	struct dp_htt_htc_pkt *pkt;
740 	qdf_nbuf_t msg;
741 	uint32_t *msg_word;
742 	QDF_STATUS status;
743 
744 	msg = qdf_nbuf_alloc(
745 		soc->osdev,
746 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
747 		/* reserve room for the HTC header */
748 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
749 	if (!msg)
750 		return QDF_STATUS_E_NOMEM;
751 
752 	/*
753 	 * Set the length of the message.
754 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
755 	 * separately during the below call to qdf_nbuf_push_head.
756 	 * The contribution from the HTC header is added separately inside HTC.
757 	 */
758 	if (qdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES) == NULL) {
759 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
760 			"%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
761 			__func__);
762 		return QDF_STATUS_E_FAILURE;
763 	}
764 
765 	/* fill in the message contents */
766 	msg_word = (u_int32_t *) qdf_nbuf_data(msg);
767 
768 	/* rewind beyond alignment pad to get to the HTC header reserved area */
769 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
770 
771 	*msg_word = 0;
772 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
773 
774 	pkt = htt_htc_pkt_alloc(soc);
775 	if (!pkt) {
776 		qdf_nbuf_free(msg);
777 		return QDF_STATUS_E_FAILURE;
778 	}
779 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
780 
781 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
782 		dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg),
783 		qdf_nbuf_len(msg), soc->htc_endpoint,
784 		1); /* tag - not relevant here */
785 
786 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
787 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_VERSION_REQ,
788 				     NULL);
789 
790 	if (status != QDF_STATUS_SUCCESS) {
791 		qdf_nbuf_free(msg);
792 		htt_htc_pkt_free(soc, pkt);
793 	}
794 
795 	return status;
796 }
797 
798 /*
799  * htt_srng_setup() - Send SRNG setup message to target
800  * @htt_soc:	HTT SOC handle
801  * @mac_id:	MAC Id
802  * @hal_srng:	Opaque HAL SRNG pointer
803  * @hal_ring_type:	SRNG ring type
804  *
805  * Return: 0 on success; error code on failure
806  */
807 int htt_srng_setup(struct htt_soc *soc, int mac_id,
808 		   hal_ring_handle_t hal_ring_hdl,
809 		   int hal_ring_type)
810 {
811 	struct dp_htt_htc_pkt *pkt;
812 	qdf_nbuf_t htt_msg;
813 	uint32_t *msg_word;
814 	struct hal_srng_params srng_params;
815 	qdf_dma_addr_t hp_addr, tp_addr;
816 	uint32_t ring_entry_size =
817 		hal_srng_get_entrysize(soc->hal_soc, hal_ring_type);
818 	int htt_ring_type, htt_ring_id;
819 	uint8_t *htt_logger_bufp;
820 	int target_pdev_id;
821 	int lmac_id = dp_get_lmac_id_for_pdev_id(soc->dp_soc, 0, mac_id);
822 	QDF_STATUS status;
823 
824 	/* Sizes should be set in 4-byte words */
825 	ring_entry_size = ring_entry_size >> 2;
826 
827 	htt_msg = qdf_nbuf_alloc(soc->osdev,
828 		HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ),
829 		/* reserve room for the HTC header */
830 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
831 	if (!htt_msg)
832 		goto fail0;
833 
834 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
835 	hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_ring_hdl);
836 	tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_ring_hdl);
837 
838 	switch (hal_ring_type) {
839 	case RXDMA_BUF:
840 #ifdef QCA_HOST2FW_RXBUF_RING
841 		if (srng_params.ring_id ==
842 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0)) {
843 			htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
844 			htt_ring_type = HTT_SW_TO_SW_RING;
845 #ifdef IPA_OFFLOAD
846 		} else if (srng_params.ring_id ==
847 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2)) {
848 			htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING;
849 			htt_ring_type = HTT_SW_TO_SW_RING;
850 #endif
851 #else
852 		if (srng_params.ring_id ==
853 			(HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
854 			(lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
855 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
856 			htt_ring_type = HTT_SW_TO_HW_RING;
857 #endif
858 		} else if (srng_params.ring_id ==
859 #ifdef IPA_OFFLOAD
860 			 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 +
861 #else
862 			 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF +
863 #endif
864 			(lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
865 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
866 			htt_ring_type = HTT_SW_TO_HW_RING;
867 		} else {
868 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
869 				   "%s: Ring %d currently not supported",
870 				   __func__, srng_params.ring_id);
871 			goto fail1;
872 		}
873 
874 		dp_info("ring_type %d ring_id %d htt_ring_id %d hp_addr 0x%llx tp_addr 0x%llx",
875 			hal_ring_type, srng_params.ring_id, htt_ring_id,
876 			(uint64_t)hp_addr,
877 			(uint64_t)tp_addr);
878 		break;
879 	case RXDMA_MONITOR_BUF:
880 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
881 		htt_ring_type = HTT_SW_TO_HW_RING;
882 		break;
883 	case RXDMA_MONITOR_STATUS:
884 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
885 		htt_ring_type = HTT_SW_TO_HW_RING;
886 		break;
887 	case RXDMA_MONITOR_DST:
888 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
889 		htt_ring_type = HTT_HW_TO_SW_RING;
890 		break;
891 	case RXDMA_MONITOR_DESC:
892 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
893 		htt_ring_type = HTT_SW_TO_HW_RING;
894 		break;
895 	case RXDMA_DST:
896 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
897 		htt_ring_type = HTT_HW_TO_SW_RING;
898 		break;
899 
900 	default:
901 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
902 			"%s: Ring currently not supported", __func__);
903 			goto fail1;
904 	}
905 
906 	/*
907 	 * Set the length of the message.
908 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
909 	 * separately during the below call to qdf_nbuf_push_head.
910 	 * The contribution from the HTC header is added separately inside HTC.
911 	 */
912 	if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) {
913 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
914 			"%s: Failed to expand head for SRING_SETUP msg",
915 			__func__);
916 		return QDF_STATUS_E_FAILURE;
917 	}
918 
919 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
920 
921 	/* rewind beyond alignment pad to get to the HTC header reserved area */
922 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
923 
924 	/* word 0 */
925 	*msg_word = 0;
926 	htt_logger_bufp = (uint8_t *)msg_word;
927 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP);
928 	target_pdev_id =
929 	dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, mac_id);
930 
931 	if ((htt_ring_type == HTT_SW_TO_HW_RING) ||
932 			(htt_ring_type == HTT_HW_TO_SW_RING))
933 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, target_pdev_id);
934 	else
935 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id);
936 
937 	dp_info("%s: mac_id %d", __func__, mac_id);
938 	HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type);
939 	/* TODO: Discuss with FW on changing this to unique ID and using
940 	 * htt_ring_type to send the type of ring
941 	 */
942 	HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id);
943 
944 	/* word 1 */
945 	msg_word++;
946 	*msg_word = 0;
947 	HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word,
948 		srng_params.ring_base_paddr & 0xffffffff);
949 
950 	/* word 2 */
951 	msg_word++;
952 	*msg_word = 0;
953 	HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word,
954 		(uint64_t)srng_params.ring_base_paddr >> 32);
955 
956 	/* word 3 */
957 	msg_word++;
958 	*msg_word = 0;
959 	HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size);
960 	HTT_SRING_SETUP_RING_SIZE_SET(*msg_word,
961 		(ring_entry_size * srng_params.num_entries));
962 	dp_info("%s: entry_size %d", __func__, ring_entry_size);
963 	dp_info("%s: num_entries %d", __func__, srng_params.num_entries);
964 	dp_info("%s: ring_size %d", __func__,
965 		(ring_entry_size * srng_params.num_entries));
966 	if (htt_ring_type == HTT_SW_TO_HW_RING)
967 		HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET(
968 						*msg_word, 1);
969 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word,
970 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
971 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word,
972 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
973 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word,
974 		!!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP));
975 
976 	/* word 4 */
977 	msg_word++;
978 	*msg_word = 0;
979 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
980 		hp_addr & 0xffffffff);
981 
982 	/* word 5 */
983 	msg_word++;
984 	*msg_word = 0;
985 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
986 		(uint64_t)hp_addr >> 32);
987 
988 	/* word 6 */
989 	msg_word++;
990 	*msg_word = 0;
991 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
992 		tp_addr & 0xffffffff);
993 
994 	/* word 7 */
995 	msg_word++;
996 	*msg_word = 0;
997 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
998 		(uint64_t)tp_addr >> 32);
999 
1000 	/* word 8 */
1001 	msg_word++;
1002 	*msg_word = 0;
1003 	HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word,
1004 		srng_params.msi_addr & 0xffffffff);
1005 
1006 	/* word 9 */
1007 	msg_word++;
1008 	*msg_word = 0;
1009 	HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word,
1010 		(uint64_t)(srng_params.msi_addr) >> 32);
1011 
1012 	/* word 10 */
1013 	msg_word++;
1014 	*msg_word = 0;
1015 	HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word,
1016 		srng_params.msi_data);
1017 
1018 	/* word 11 */
1019 	msg_word++;
1020 	*msg_word = 0;
1021 	HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word,
1022 		srng_params.intr_batch_cntr_thres_entries *
1023 		ring_entry_size);
1024 	HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word,
1025 		srng_params.intr_timer_thres_us >> 3);
1026 
1027 	/* word 12 */
1028 	msg_word++;
1029 	*msg_word = 0;
1030 	if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
1031 		/* TODO: Setting low threshold to 1/8th of ring size - see
1032 		 * if this needs to be configurable
1033 		 */
1034 		HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word,
1035 			srng_params.low_threshold);
1036 	}
1037 	/* "response_required" field should be set if a HTT response message is
1038 	 * required after setting up the ring.
1039 	 */
1040 	pkt = htt_htc_pkt_alloc(soc);
1041 	if (!pkt)
1042 		goto fail1;
1043 
1044 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1045 
1046 	SET_HTC_PACKET_INFO_TX(
1047 		&pkt->htc_pkt,
1048 		dp_htt_h2t_send_complete_free_netbuf,
1049 		qdf_nbuf_data(htt_msg),
1050 		qdf_nbuf_len(htt_msg),
1051 		soc->htc_endpoint,
1052 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
1053 
1054 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1055 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_SRING_SETUP,
1056 				     htt_logger_bufp);
1057 
1058 	if (status != QDF_STATUS_SUCCESS) {
1059 		qdf_nbuf_free(htt_msg);
1060 		htt_htc_pkt_free(soc, pkt);
1061 	}
1062 
1063 	return status;
1064 
1065 fail1:
1066 	qdf_nbuf_free(htt_msg);
1067 fail0:
1068 	return QDF_STATUS_E_FAILURE;
1069 }
1070 
1071 #ifdef QCA_SUPPORT_FULL_MON
1072 /**
1073  * htt_h2t_full_mon_cfg() - Send full monitor configuarion msg to FW
1074  *
1075  * @htt_soc: HTT Soc handle
1076  * @pdev_id: Radio id
1077  * @dp_full_mon_config: enabled/disable configuration
1078  *
1079  * Return: Success when HTT message is sent, error on failure
1080  */
1081 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc,
1082 			 uint8_t pdev_id,
1083 			 enum dp_full_mon_config config)
1084 {
1085 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
1086 	struct dp_htt_htc_pkt *pkt;
1087 	qdf_nbuf_t htt_msg;
1088 	uint32_t *msg_word;
1089 	uint8_t *htt_logger_bufp;
1090 
1091 	htt_msg = qdf_nbuf_alloc(soc->osdev,
1092 				 HTT_MSG_BUF_SIZE(
1093 				 HTT_RX_FULL_MONITOR_MODE_SETUP_SZ),
1094 				 /* reserve room for the HTC header */
1095 				 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
1096 				 4,
1097 				 TRUE);
1098 	if (!htt_msg)
1099 		return QDF_STATUS_E_FAILURE;
1100 
1101 	/*
1102 	 * Set the length of the message.
1103 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
1104 	 * separately during the below call to qdf_nbuf_push_head.
1105 	 * The contribution from the HTC header is added separately inside HTC.
1106 	 */
1107 	if (!qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ)) {
1108 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1109 			  "%s: Failed to expand head for RX Ring Cfg msg",
1110 			  __func__);
1111 		goto fail1;
1112 	}
1113 
1114 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
1115 
1116 	/* rewind beyond alignment pad to get to the HTC header reserved area */
1117 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
1118 
1119 	/* word 0 */
1120 	*msg_word = 0;
1121 	htt_logger_bufp = (uint8_t *)msg_word;
1122 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE);
1123 	HTT_RX_FULL_MONITOR_MODE_OPERATION_PDEV_ID_SET(
1124 			*msg_word, DP_SW2HW_MACID(pdev_id));
1125 
1126 	msg_word++;
1127 	*msg_word = 0;
1128 	/* word 1 */
1129 	if (config == DP_FULL_MON_ENABLE) {
1130 		HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true);
1131 		HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, true);
1132 		HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, true);
1133 		HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2);
1134 	} else if (config == DP_FULL_MON_DISABLE) {
1135 		/* As per MAC team's suggestion, While disbaling full monitor
1136 		 * mode, Set 'en' bit to true in full monitor mode register.
1137 		 */
1138 		HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true);
1139 		HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, false);
1140 		HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, false);
1141 		HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2);
1142 	}
1143 
1144 	pkt = htt_htc_pkt_alloc(soc);
1145 	if (!pkt) {
1146 		qdf_err("HTC packet allocation failed");
1147 		goto fail1;
1148 	}
1149 
1150 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1151 
1152 	SET_HTC_PACKET_INFO_TX(
1153 		&pkt->htc_pkt,
1154 		dp_htt_h2t_send_complete_free_netbuf,
1155 		qdf_nbuf_data(htt_msg),
1156 		qdf_nbuf_len(htt_msg),
1157 		soc->htc_endpoint,
1158 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
1159 
1160 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1161 	qdf_info("config: %d", config);
1162 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_SRING_SETUP,
1163 			    htt_logger_bufp);
1164 	return QDF_STATUS_SUCCESS;
1165 fail1:
1166 	qdf_nbuf_free(htt_msg);
1167 	return QDF_STATUS_E_FAILURE;
1168 }
1169 #else
1170 int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc,
1171 			 uint8_t pdev_id,
1172 			 enum dp_full_mon_config config)
1173 {
1174 	return 0;
1175 }
1176 
1177 #endif
1178 
1179 /*
1180  * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter
1181  * config message to target
1182  * @htt_soc:	HTT SOC handle
1183  * @pdev_id:	WIN- PDEV Id, MCL- mac id
1184  * @hal_srng:	Opaque HAL SRNG pointer
1185  * @hal_ring_type:	SRNG ring type
1186  * @ring_buf_size:	SRNG buffer size
1187  * @htt_tlv_filter:	Rx SRNG TLV and filter setting
1188  * Return: 0 on success; error code on failure
1189  */
1190 int htt_h2t_rx_ring_cfg(struct htt_soc *htt_soc, int pdev_id,
1191 			hal_ring_handle_t hal_ring_hdl,
1192 			int hal_ring_type, int ring_buf_size,
1193 			struct htt_rx_ring_tlv_filter *htt_tlv_filter)
1194 {
1195 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
1196 	struct dp_htt_htc_pkt *pkt;
1197 	qdf_nbuf_t htt_msg;
1198 	uint32_t *msg_word;
1199 	struct hal_srng_params srng_params;
1200 	uint32_t htt_ring_type, htt_ring_id;
1201 	uint32_t tlv_filter;
1202 	uint8_t *htt_logger_bufp;
1203 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->dp_soc->wlan_cfg_ctx;
1204 	uint32_t mon_drop_th = wlan_cfg_get_mon_drop_thresh(wlan_cfg_ctx);
1205 	int target_pdev_id;
1206 	QDF_STATUS status;
1207 
1208 	htt_msg = qdf_nbuf_alloc(soc->osdev,
1209 		HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ),
1210 	/* reserve room for the HTC header */
1211 	HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
1212 	if (!htt_msg)
1213 		goto fail0;
1214 
1215 	hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
1216 
1217 	switch (hal_ring_type) {
1218 	case RXDMA_BUF:
1219 		htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
1220 		htt_ring_type = HTT_SW_TO_HW_RING;
1221 		break;
1222 	case RXDMA_MONITOR_BUF:
1223 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
1224 		htt_ring_type = HTT_SW_TO_HW_RING;
1225 		break;
1226 	case RXDMA_MONITOR_STATUS:
1227 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
1228 		htt_ring_type = HTT_SW_TO_HW_RING;
1229 		break;
1230 	case RXDMA_MONITOR_DST:
1231 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
1232 		htt_ring_type = HTT_HW_TO_SW_RING;
1233 		break;
1234 	case RXDMA_MONITOR_DESC:
1235 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
1236 		htt_ring_type = HTT_SW_TO_HW_RING;
1237 		break;
1238 	case RXDMA_DST:
1239 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
1240 		htt_ring_type = HTT_HW_TO_SW_RING;
1241 		break;
1242 
1243 	default:
1244 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1245 			"%s: Ring currently not supported", __func__);
1246 		goto fail1;
1247 	}
1248 
1249 	/*
1250 	 * Set the length of the message.
1251 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
1252 	 * separately during the below call to qdf_nbuf_push_head.
1253 	 * The contribution from the HTC header is added separately inside HTC.
1254 	 */
1255 	if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) {
1256 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1257 			"%s: Failed to expand head for RX Ring Cfg msg",
1258 			__func__);
1259 		goto fail1; /* failure */
1260 	}
1261 
1262 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
1263 
1264 	/* rewind beyond alignment pad to get to the HTC header reserved area */
1265 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
1266 
1267 	/* word 0 */
1268 	htt_logger_bufp = (uint8_t *)msg_word;
1269 	*msg_word = 0;
1270 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
1271 
1272 	/*
1273 	 * pdev_id is indexed from 0 whereas mac_id is indexed from 1
1274 	 * SW_TO_SW and SW_TO_HW rings are unaffected by this
1275 	 */
1276 	target_pdev_id =
1277 	dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, pdev_id);
1278 
1279 	if (htt_ring_type == HTT_SW_TO_SW_RING ||
1280 			htt_ring_type == HTT_SW_TO_HW_RING)
1281 		HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word,
1282 						      target_pdev_id);
1283 
1284 	/* TODO: Discuss with FW on changing this to unique ID and using
1285 	 * htt_ring_type to send the type of ring
1286 	 */
1287 	HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id);
1288 
1289 	HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word,
1290 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
1291 
1292 	HTT_RX_RING_SELECTION_CFG_PKT_TLV_SET(*msg_word,
1293 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
1294 
1295 	HTT_RX_RING_SELECTION_CFG_RX_OFFSETS_VALID_SET(*msg_word,
1296 						htt_tlv_filter->offset_valid);
1297 
1298 	if (mon_drop_th > 0)
1299 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1300 								   1);
1301 	else
1302 		HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1303 								   0);
1304 
1305 	/* word 1 */
1306 	msg_word++;
1307 	*msg_word = 0;
1308 	HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word,
1309 		ring_buf_size);
1310 
1311 	/* word 2 */
1312 	msg_word++;
1313 	*msg_word = 0;
1314 
1315 	if (htt_tlv_filter->enable_fp) {
1316 		/* TYPE: MGMT */
1317 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1318 			FP, MGMT, 0000,
1319 			(htt_tlv_filter->fp_mgmt_filter &
1320 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1321 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1322 			FP, MGMT, 0001,
1323 			(htt_tlv_filter->fp_mgmt_filter &
1324 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1325 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1326 			FP, MGMT, 0010,
1327 			(htt_tlv_filter->fp_mgmt_filter &
1328 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1329 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1330 			FP, MGMT, 0011,
1331 			(htt_tlv_filter->fp_mgmt_filter &
1332 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1333 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1334 			FP, MGMT, 0100,
1335 			(htt_tlv_filter->fp_mgmt_filter &
1336 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1337 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1338 			FP, MGMT, 0101,
1339 			(htt_tlv_filter->fp_mgmt_filter &
1340 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1341 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1342 			FP, MGMT, 0110,
1343 			(htt_tlv_filter->fp_mgmt_filter &
1344 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1345 		/* reserved */
1346 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
1347 			MGMT, 0111,
1348 			(htt_tlv_filter->fp_mgmt_filter &
1349 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1350 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1351 			FP, MGMT, 1000,
1352 			(htt_tlv_filter->fp_mgmt_filter &
1353 			FILTER_MGMT_BEACON) ? 1 : 0);
1354 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1355 			FP, MGMT, 1001,
1356 			(htt_tlv_filter->fp_mgmt_filter &
1357 			FILTER_MGMT_ATIM) ? 1 : 0);
1358 	}
1359 
1360 	if (htt_tlv_filter->enable_md) {
1361 			/* TYPE: MGMT */
1362 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1363 			MD, MGMT, 0000,
1364 			(htt_tlv_filter->md_mgmt_filter &
1365 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1366 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1367 			MD, MGMT, 0001,
1368 			(htt_tlv_filter->md_mgmt_filter &
1369 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1370 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1371 			MD, MGMT, 0010,
1372 			(htt_tlv_filter->md_mgmt_filter &
1373 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1374 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1375 			MD, MGMT, 0011,
1376 			(htt_tlv_filter->md_mgmt_filter &
1377 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1378 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1379 			MD, MGMT, 0100,
1380 			(htt_tlv_filter->md_mgmt_filter &
1381 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1382 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1383 			MD, MGMT, 0101,
1384 			(htt_tlv_filter->md_mgmt_filter &
1385 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1386 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1387 			MD, MGMT, 0110,
1388 			(htt_tlv_filter->md_mgmt_filter &
1389 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1390 		/* reserved */
1391 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
1392 			MGMT, 0111,
1393 			(htt_tlv_filter->md_mgmt_filter &
1394 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1395 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1396 			MD, MGMT, 1000,
1397 			(htt_tlv_filter->md_mgmt_filter &
1398 			FILTER_MGMT_BEACON) ? 1 : 0);
1399 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1400 			MD, MGMT, 1001,
1401 			(htt_tlv_filter->md_mgmt_filter &
1402 			FILTER_MGMT_ATIM) ? 1 : 0);
1403 	}
1404 
1405 	if (htt_tlv_filter->enable_mo) {
1406 		/* TYPE: MGMT */
1407 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1408 			MO, MGMT, 0000,
1409 			(htt_tlv_filter->mo_mgmt_filter &
1410 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1411 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1412 			MO, MGMT, 0001,
1413 			(htt_tlv_filter->mo_mgmt_filter &
1414 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1415 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1416 			MO, MGMT, 0010,
1417 			(htt_tlv_filter->mo_mgmt_filter &
1418 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1419 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1420 			MO, MGMT, 0011,
1421 			(htt_tlv_filter->mo_mgmt_filter &
1422 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1423 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1424 			MO, MGMT, 0100,
1425 			(htt_tlv_filter->mo_mgmt_filter &
1426 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1427 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1428 			MO, MGMT, 0101,
1429 			(htt_tlv_filter->mo_mgmt_filter &
1430 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1431 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1432 			MO, MGMT, 0110,
1433 			(htt_tlv_filter->mo_mgmt_filter &
1434 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1435 		/* reserved */
1436 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
1437 			MGMT, 0111,
1438 			(htt_tlv_filter->mo_mgmt_filter &
1439 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1440 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1441 			MO, MGMT, 1000,
1442 			(htt_tlv_filter->mo_mgmt_filter &
1443 			FILTER_MGMT_BEACON) ? 1 : 0);
1444 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1445 			MO, MGMT, 1001,
1446 			(htt_tlv_filter->mo_mgmt_filter &
1447 			FILTER_MGMT_ATIM) ? 1 : 0);
1448 	}
1449 
1450 	/* word 3 */
1451 	msg_word++;
1452 	*msg_word = 0;
1453 
1454 	if (htt_tlv_filter->enable_fp) {
1455 		/* TYPE: MGMT */
1456 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1457 			FP, MGMT, 1010,
1458 			(htt_tlv_filter->fp_mgmt_filter &
1459 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1460 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1461 			FP, MGMT, 1011,
1462 			(htt_tlv_filter->fp_mgmt_filter &
1463 			FILTER_MGMT_AUTH) ? 1 : 0);
1464 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1465 			FP, MGMT, 1100,
1466 			(htt_tlv_filter->fp_mgmt_filter &
1467 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1468 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1469 			FP, MGMT, 1101,
1470 			(htt_tlv_filter->fp_mgmt_filter &
1471 			FILTER_MGMT_ACTION) ? 1 : 0);
1472 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1473 			FP, MGMT, 1110,
1474 			(htt_tlv_filter->fp_mgmt_filter &
1475 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1476 		/* reserved*/
1477 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
1478 			MGMT, 1111,
1479 			(htt_tlv_filter->fp_mgmt_filter &
1480 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1481 	}
1482 
1483 	if (htt_tlv_filter->enable_md) {
1484 			/* TYPE: MGMT */
1485 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1486 			MD, MGMT, 1010,
1487 			(htt_tlv_filter->md_mgmt_filter &
1488 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1489 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1490 			MD, MGMT, 1011,
1491 			(htt_tlv_filter->md_mgmt_filter &
1492 			FILTER_MGMT_AUTH) ? 1 : 0);
1493 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1494 			MD, MGMT, 1100,
1495 			(htt_tlv_filter->md_mgmt_filter &
1496 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1497 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1498 			MD, MGMT, 1101,
1499 			(htt_tlv_filter->md_mgmt_filter &
1500 			FILTER_MGMT_ACTION) ? 1 : 0);
1501 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1502 			MD, MGMT, 1110,
1503 			(htt_tlv_filter->md_mgmt_filter &
1504 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1505 	}
1506 
1507 	if (htt_tlv_filter->enable_mo) {
1508 		/* TYPE: MGMT */
1509 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1510 			MO, MGMT, 1010,
1511 			(htt_tlv_filter->mo_mgmt_filter &
1512 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1513 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1514 			MO, MGMT, 1011,
1515 			(htt_tlv_filter->mo_mgmt_filter &
1516 			FILTER_MGMT_AUTH) ? 1 : 0);
1517 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1518 			MO, MGMT, 1100,
1519 			(htt_tlv_filter->mo_mgmt_filter &
1520 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1521 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1522 			MO, MGMT, 1101,
1523 			(htt_tlv_filter->mo_mgmt_filter &
1524 			FILTER_MGMT_ACTION) ? 1 : 0);
1525 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1526 			MO, MGMT, 1110,
1527 			(htt_tlv_filter->mo_mgmt_filter &
1528 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1529 		/* reserved*/
1530 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
1531 			MGMT, 1111,
1532 			(htt_tlv_filter->mo_mgmt_filter &
1533 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1534 	}
1535 
1536 	/* word 4 */
1537 	msg_word++;
1538 	*msg_word = 0;
1539 
1540 	if (htt_tlv_filter->enable_fp) {
1541 		/* TYPE: CTRL */
1542 		/* reserved */
1543 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1544 			CTRL, 0000,
1545 			(htt_tlv_filter->fp_ctrl_filter &
1546 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1547 		/* reserved */
1548 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1549 			CTRL, 0001,
1550 			(htt_tlv_filter->fp_ctrl_filter &
1551 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1552 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1553 			CTRL, 0010,
1554 			(htt_tlv_filter->fp_ctrl_filter &
1555 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1556 		/* reserved */
1557 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1558 			CTRL, 0011,
1559 			(htt_tlv_filter->fp_ctrl_filter &
1560 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1561 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1562 			CTRL, 0100,
1563 			(htt_tlv_filter->fp_ctrl_filter &
1564 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1565 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1566 			CTRL, 0101,
1567 			(htt_tlv_filter->fp_ctrl_filter &
1568 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1569 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1570 			CTRL, 0110,
1571 			(htt_tlv_filter->fp_ctrl_filter &
1572 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1573 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1574 			CTRL, 0111,
1575 			(htt_tlv_filter->fp_ctrl_filter &
1576 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1577 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1578 			CTRL, 1000,
1579 			(htt_tlv_filter->fp_ctrl_filter &
1580 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1581 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1582 			CTRL, 1001,
1583 			(htt_tlv_filter->fp_ctrl_filter &
1584 			FILTER_CTRL_BA) ? 1 : 0);
1585 	}
1586 
1587 	if (htt_tlv_filter->enable_md) {
1588 		/* TYPE: CTRL */
1589 		/* reserved */
1590 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1591 			CTRL, 0000,
1592 			(htt_tlv_filter->md_ctrl_filter &
1593 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1594 		/* reserved */
1595 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1596 			CTRL, 0001,
1597 			(htt_tlv_filter->md_ctrl_filter &
1598 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1599 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1600 			CTRL, 0010,
1601 			(htt_tlv_filter->md_ctrl_filter &
1602 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1603 		/* reserved */
1604 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1605 			CTRL, 0011,
1606 			(htt_tlv_filter->md_ctrl_filter &
1607 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1608 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1609 			CTRL, 0100,
1610 			(htt_tlv_filter->md_ctrl_filter &
1611 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1612 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1613 			CTRL, 0101,
1614 			(htt_tlv_filter->md_ctrl_filter &
1615 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1616 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1617 			CTRL, 0110,
1618 			(htt_tlv_filter->md_ctrl_filter &
1619 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1620 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1621 			CTRL, 0111,
1622 			(htt_tlv_filter->md_ctrl_filter &
1623 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1624 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1625 			CTRL, 1000,
1626 			(htt_tlv_filter->md_ctrl_filter &
1627 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1628 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1629 			CTRL, 1001,
1630 			(htt_tlv_filter->md_ctrl_filter &
1631 			FILTER_CTRL_BA) ? 1 : 0);
1632 	}
1633 
1634 	if (htt_tlv_filter->enable_mo) {
1635 		/* TYPE: CTRL */
1636 		/* reserved */
1637 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1638 			CTRL, 0000,
1639 			(htt_tlv_filter->mo_ctrl_filter &
1640 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1641 		/* reserved */
1642 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1643 			CTRL, 0001,
1644 			(htt_tlv_filter->mo_ctrl_filter &
1645 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1646 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1647 			CTRL, 0010,
1648 			(htt_tlv_filter->mo_ctrl_filter &
1649 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1650 		/* reserved */
1651 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1652 			CTRL, 0011,
1653 			(htt_tlv_filter->mo_ctrl_filter &
1654 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1655 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1656 			CTRL, 0100,
1657 			(htt_tlv_filter->mo_ctrl_filter &
1658 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1659 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1660 			CTRL, 0101,
1661 			(htt_tlv_filter->mo_ctrl_filter &
1662 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1663 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1664 			CTRL, 0110,
1665 			(htt_tlv_filter->mo_ctrl_filter &
1666 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1667 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1668 			CTRL, 0111,
1669 			(htt_tlv_filter->mo_ctrl_filter &
1670 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1671 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1672 			CTRL, 1000,
1673 			(htt_tlv_filter->mo_ctrl_filter &
1674 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1675 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1676 			CTRL, 1001,
1677 			(htt_tlv_filter->mo_ctrl_filter &
1678 			FILTER_CTRL_BA) ? 1 : 0);
1679 	}
1680 
1681 	/* word 5 */
1682 	msg_word++;
1683 	*msg_word = 0;
1684 	if (htt_tlv_filter->enable_fp) {
1685 		/* TYPE: CTRL */
1686 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1687 			CTRL, 1010,
1688 			(htt_tlv_filter->fp_ctrl_filter &
1689 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1690 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1691 			CTRL, 1011,
1692 			(htt_tlv_filter->fp_ctrl_filter &
1693 			FILTER_CTRL_RTS) ? 1 : 0);
1694 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1695 			CTRL, 1100,
1696 			(htt_tlv_filter->fp_ctrl_filter &
1697 			FILTER_CTRL_CTS) ? 1 : 0);
1698 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1699 			CTRL, 1101,
1700 			(htt_tlv_filter->fp_ctrl_filter &
1701 			FILTER_CTRL_ACK) ? 1 : 0);
1702 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1703 			CTRL, 1110,
1704 			(htt_tlv_filter->fp_ctrl_filter &
1705 			FILTER_CTRL_CFEND) ? 1 : 0);
1706 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1707 			CTRL, 1111,
1708 			(htt_tlv_filter->fp_ctrl_filter &
1709 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1710 		/* TYPE: DATA */
1711 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1712 			DATA, MCAST,
1713 			(htt_tlv_filter->fp_data_filter &
1714 			FILTER_DATA_MCAST) ? 1 : 0);
1715 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1716 			DATA, UCAST,
1717 			(htt_tlv_filter->fp_data_filter &
1718 			FILTER_DATA_UCAST) ? 1 : 0);
1719 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1720 			DATA, NULL,
1721 			(htt_tlv_filter->fp_data_filter &
1722 			FILTER_DATA_NULL) ? 1 : 0);
1723 	}
1724 
1725 	if (htt_tlv_filter->enable_md) {
1726 		/* TYPE: CTRL */
1727 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1728 			CTRL, 1010,
1729 			(htt_tlv_filter->md_ctrl_filter &
1730 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1731 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1732 			CTRL, 1011,
1733 			(htt_tlv_filter->md_ctrl_filter &
1734 			FILTER_CTRL_RTS) ? 1 : 0);
1735 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1736 			CTRL, 1100,
1737 			(htt_tlv_filter->md_ctrl_filter &
1738 			FILTER_CTRL_CTS) ? 1 : 0);
1739 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1740 			CTRL, 1101,
1741 			(htt_tlv_filter->md_ctrl_filter &
1742 			FILTER_CTRL_ACK) ? 1 : 0);
1743 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1744 			CTRL, 1110,
1745 			(htt_tlv_filter->md_ctrl_filter &
1746 			FILTER_CTRL_CFEND) ? 1 : 0);
1747 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1748 			CTRL, 1111,
1749 			(htt_tlv_filter->md_ctrl_filter &
1750 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1751 		/* TYPE: DATA */
1752 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1753 			DATA, MCAST,
1754 			(htt_tlv_filter->md_data_filter &
1755 			FILTER_DATA_MCAST) ? 1 : 0);
1756 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1757 			DATA, UCAST,
1758 			(htt_tlv_filter->md_data_filter &
1759 			FILTER_DATA_UCAST) ? 1 : 0);
1760 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1761 			DATA, NULL,
1762 			(htt_tlv_filter->md_data_filter &
1763 			FILTER_DATA_NULL) ? 1 : 0);
1764 	}
1765 
1766 	if (htt_tlv_filter->enable_mo) {
1767 		/* TYPE: CTRL */
1768 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1769 			CTRL, 1010,
1770 			(htt_tlv_filter->mo_ctrl_filter &
1771 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1772 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1773 			CTRL, 1011,
1774 			(htt_tlv_filter->mo_ctrl_filter &
1775 			FILTER_CTRL_RTS) ? 1 : 0);
1776 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1777 			CTRL, 1100,
1778 			(htt_tlv_filter->mo_ctrl_filter &
1779 			FILTER_CTRL_CTS) ? 1 : 0);
1780 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1781 			CTRL, 1101,
1782 			(htt_tlv_filter->mo_ctrl_filter &
1783 			FILTER_CTRL_ACK) ? 1 : 0);
1784 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1785 			CTRL, 1110,
1786 			(htt_tlv_filter->mo_ctrl_filter &
1787 			FILTER_CTRL_CFEND) ? 1 : 0);
1788 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1789 			CTRL, 1111,
1790 			(htt_tlv_filter->mo_ctrl_filter &
1791 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1792 		/* TYPE: DATA */
1793 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1794 			DATA, MCAST,
1795 			(htt_tlv_filter->mo_data_filter &
1796 			FILTER_DATA_MCAST) ? 1 : 0);
1797 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1798 			DATA, UCAST,
1799 			(htt_tlv_filter->mo_data_filter &
1800 			FILTER_DATA_UCAST) ? 1 : 0);
1801 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1802 			DATA, NULL,
1803 			(htt_tlv_filter->mo_data_filter &
1804 			FILTER_DATA_NULL) ? 1 : 0);
1805 	}
1806 
1807 	/* word 6 */
1808 	msg_word++;
1809 	*msg_word = 0;
1810 	tlv_filter = 0;
1811 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START,
1812 		htt_tlv_filter->mpdu_start);
1813 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START,
1814 		htt_tlv_filter->msdu_start);
1815 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET,
1816 		htt_tlv_filter->packet);
1817 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END,
1818 		htt_tlv_filter->msdu_end);
1819 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END,
1820 		htt_tlv_filter->mpdu_end);
1821 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER,
1822 		htt_tlv_filter->packet_header);
1823 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION,
1824 		htt_tlv_filter->attention);
1825 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START,
1826 		htt_tlv_filter->ppdu_start);
1827 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END,
1828 		htt_tlv_filter->ppdu_end);
1829 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS,
1830 		htt_tlv_filter->ppdu_end_user_stats);
1831 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter,
1832 		PPDU_END_USER_STATS_EXT,
1833 		htt_tlv_filter->ppdu_end_user_stats_ext);
1834 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE,
1835 		htt_tlv_filter->ppdu_end_status_done);
1836 	/* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/
1837 	 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED,
1838 		 htt_tlv_filter->header_per_msdu);
1839 
1840 	HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter);
1841 
1842 	msg_word++;
1843 	*msg_word = 0;
1844 	if (htt_tlv_filter->offset_valid) {
1845 		HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET_SET(*msg_word,
1846 					htt_tlv_filter->rx_packet_offset);
1847 		HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET_SET(*msg_word,
1848 					htt_tlv_filter->rx_header_offset);
1849 
1850 		msg_word++;
1851 		*msg_word = 0;
1852 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET_SET(*msg_word,
1853 					htt_tlv_filter->rx_mpdu_end_offset);
1854 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET_SET(*msg_word,
1855 					htt_tlv_filter->rx_mpdu_start_offset);
1856 
1857 		msg_word++;
1858 		*msg_word = 0;
1859 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET_SET(*msg_word,
1860 					htt_tlv_filter->rx_msdu_end_offset);
1861 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET_SET(*msg_word,
1862 					htt_tlv_filter->rx_msdu_start_offset);
1863 
1864 		msg_word++;
1865 		*msg_word = 0;
1866 		HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET_SET(*msg_word,
1867 					htt_tlv_filter->rx_attn_offset);
1868 		msg_word++;
1869 		*msg_word = 0;
1870 	} else {
1871 		msg_word += 4;
1872 		*msg_word = 0;
1873 	}
1874 
1875 	if (mon_drop_th > 0)
1876 		HTT_RX_RING_SELECTION_CFG_RX_DROP_THRESHOLD_SET(*msg_word,
1877 								mon_drop_th);
1878 
1879 	/* "response_required" field should be set if a HTT response message is
1880 	 * required after setting up the ring.
1881 	 */
1882 	pkt = htt_htc_pkt_alloc(soc);
1883 	if (!pkt)
1884 		goto fail1;
1885 
1886 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1887 
1888 	SET_HTC_PACKET_INFO_TX(
1889 		&pkt->htc_pkt,
1890 		dp_htt_h2t_send_complete_free_netbuf,
1891 		qdf_nbuf_data(htt_msg),
1892 		qdf_nbuf_len(htt_msg),
1893 		soc->htc_endpoint,
1894 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
1895 
1896 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1897 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
1898 				     HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG,
1899 				     htt_logger_bufp);
1900 
1901 	if (status != QDF_STATUS_SUCCESS) {
1902 		qdf_nbuf_free(htt_msg);
1903 		htt_htc_pkt_free(soc, pkt);
1904 	}
1905 
1906 	return status;
1907 
1908 fail1:
1909 	qdf_nbuf_free(htt_msg);
1910 fail0:
1911 	return QDF_STATUS_E_FAILURE;
1912 }
1913 
1914 #if defined(HTT_STATS_ENABLE)
1915 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1916 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1917 
1918 {
1919 	uint32_t pdev_id;
1920 	uint32_t *msg_word = NULL;
1921 	uint32_t msg_remain_len = 0;
1922 
1923 	msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1924 
1925 	/*COOKIE MSB*/
1926 	pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1927 
1928 	/* stats message length + 16 size of HTT header*/
1929 	msg_remain_len = qdf_min(htt_stats->msg_len + 16,
1930 				(uint32_t)DP_EXT_MSG_LENGTH);
1931 
1932 	dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc,
1933 			msg_word,  msg_remain_len,
1934 			WDI_NO_VAL, pdev_id);
1935 
1936 	if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1937 		htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1938 	}
1939 	/* Need to be freed here as WDI handler will
1940 	 * make a copy of pkt to send data to application
1941 	 */
1942 	qdf_nbuf_free(htt_msg);
1943 	return QDF_STATUS_SUCCESS;
1944 }
1945 #else
1946 static inline QDF_STATUS
1947 dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1948 		      struct dp_soc *soc, qdf_nbuf_t htt_msg)
1949 {
1950 	return QDF_STATUS_E_NOSUPPORT;
1951 }
1952 #endif
1953 /**
1954  * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats
1955  * @htt_stats: htt stats info
1956  *
1957  * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message
1958  * contains sub messages which are identified by a TLV header.
1959  * In this function we will process the stream of T2H messages and read all the
1960  * TLV contained in the message.
1961  *
1962  * THe following cases have been taken care of
1963  * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer
1964  *		In this case the buffer will contain multiple tlvs.
1965  * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer.
1966  *		Only one tlv will be contained in the HTT message and this tag
1967  *		will extend onto the next buffer.
1968  * Case 3: When the buffer is the continuation of the previous message
1969  * Case 4: tlv length is 0. which will indicate the end of message
1970  *
1971  * return: void
1972  */
1973 static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats,
1974 					struct dp_soc *soc)
1975 {
1976 	htt_tlv_tag_t tlv_type = 0xff;
1977 	qdf_nbuf_t htt_msg = NULL;
1978 	uint32_t *msg_word;
1979 	uint8_t *tlv_buf_head = NULL;
1980 	uint8_t *tlv_buf_tail = NULL;
1981 	uint32_t msg_remain_len = 0;
1982 	uint32_t tlv_remain_len = 0;
1983 	uint32_t *tlv_start;
1984 	int cookie_val;
1985 	int cookie_msb;
1986 	int pdev_id;
1987 	bool copy_stats = false;
1988 	struct dp_pdev *pdev;
1989 
1990 	/* Process node in the HTT message queue */
1991 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1992 		!= NULL) {
1993 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1994 		cookie_val = *(msg_word + 1);
1995 		htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET(
1996 					*(msg_word +
1997 					HTT_T2H_EXT_STATS_TLV_START_OFFSET));
1998 
1999 		if (cookie_val) {
2000 			if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg)
2001 					== QDF_STATUS_SUCCESS) {
2002 				continue;
2003 			}
2004 		}
2005 
2006 		cookie_msb = *(msg_word + 2);
2007 		pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
2008 		pdev = soc->pdev_list[pdev_id];
2009 
2010 		if (cookie_msb >> 2) {
2011 			copy_stats = true;
2012 		}
2013 
2014 		/* read 5th word */
2015 		msg_word = msg_word + 4;
2016 		msg_remain_len = qdf_min(htt_stats->msg_len,
2017 				(uint32_t) DP_EXT_MSG_LENGTH);
2018 		/* Keep processing the node till node length is 0 */
2019 		while (msg_remain_len) {
2020 			/*
2021 			 * if message is not a continuation of previous message
2022 			 * read the tlv type and tlv length
2023 			 */
2024 			if (!tlv_buf_head) {
2025 				tlv_type = HTT_STATS_TLV_TAG_GET(
2026 						*msg_word);
2027 				tlv_remain_len = HTT_STATS_TLV_LENGTH_GET(
2028 						*msg_word);
2029 			}
2030 
2031 			if (tlv_remain_len == 0) {
2032 				msg_remain_len = 0;
2033 
2034 				if (tlv_buf_head) {
2035 					qdf_mem_free(tlv_buf_head);
2036 					tlv_buf_head = NULL;
2037 					tlv_buf_tail = NULL;
2038 				}
2039 
2040 				goto error;
2041 			}
2042 
2043 			if (!tlv_buf_head)
2044 				tlv_remain_len += HTT_TLV_HDR_LEN;
2045 
2046 			if ((tlv_remain_len <= msg_remain_len)) {
2047 				/* Case 3 */
2048 				if (tlv_buf_head) {
2049 					qdf_mem_copy(tlv_buf_tail,
2050 							(uint8_t *)msg_word,
2051 							tlv_remain_len);
2052 					tlv_start = (uint32_t *)tlv_buf_head;
2053 				} else {
2054 					/* Case 1 */
2055 					tlv_start = msg_word;
2056 				}
2057 
2058 				if (copy_stats)
2059 					dp_htt_stats_copy_tag(pdev,
2060 							      tlv_type,
2061 							      tlv_start);
2062 				else
2063 					dp_htt_stats_print_tag(pdev,
2064 							       tlv_type,
2065 							       tlv_start);
2066 
2067 				if (tlv_type == HTT_STATS_PEER_DETAILS_TAG ||
2068 				    tlv_type == HTT_STATS_PEER_STATS_CMN_TAG)
2069 					dp_peer_update_inactive_time(pdev,
2070 								     tlv_type,
2071 								     tlv_start);
2072 
2073 				msg_remain_len -= tlv_remain_len;
2074 
2075 				msg_word = (uint32_t *)
2076 					(((uint8_t *)msg_word) +
2077 					tlv_remain_len);
2078 
2079 				tlv_remain_len = 0;
2080 
2081 				if (tlv_buf_head) {
2082 					qdf_mem_free(tlv_buf_head);
2083 					tlv_buf_head = NULL;
2084 					tlv_buf_tail = NULL;
2085 				}
2086 
2087 			} else { /* tlv_remain_len > msg_remain_len */
2088 				/* Case 2 & 3 */
2089 				if (!tlv_buf_head) {
2090 					tlv_buf_head = qdf_mem_malloc(
2091 							tlv_remain_len);
2092 
2093 					if (!tlv_buf_head) {
2094 						QDF_TRACE(QDF_MODULE_ID_TXRX,
2095 								QDF_TRACE_LEVEL_ERROR,
2096 								"Alloc failed");
2097 						goto error;
2098 					}
2099 
2100 					tlv_buf_tail = tlv_buf_head;
2101 				}
2102 
2103 				qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word,
2104 						msg_remain_len);
2105 				tlv_remain_len -= msg_remain_len;
2106 				tlv_buf_tail += msg_remain_len;
2107 			}
2108 		}
2109 
2110 		if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
2111 			htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
2112 		}
2113 
2114 		qdf_nbuf_free(htt_msg);
2115 	}
2116 	return;
2117 
2118 error:
2119 	qdf_nbuf_free(htt_msg);
2120 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
2121 			!= NULL)
2122 		qdf_nbuf_free(htt_msg);
2123 }
2124 
2125 void htt_t2h_stats_handler(void *context)
2126 {
2127 	struct dp_soc *soc = (struct dp_soc *)context;
2128 	struct htt_stats_context htt_stats;
2129 	uint32_t *msg_word;
2130 	qdf_nbuf_t htt_msg = NULL;
2131 	uint8_t done;
2132 	uint32_t rem_stats;
2133 
2134 	if (!soc) {
2135 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2136 			  "soc is NULL");
2137 		return;
2138 	}
2139 
2140 	if (!qdf_atomic_read(&soc->cmn_init_done)) {
2141 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2142 			  "soc: 0x%pK, init_done: %d", soc,
2143 			  qdf_atomic_read(&soc->cmn_init_done));
2144 		return;
2145 	}
2146 
2147 	qdf_mem_zero(&htt_stats, sizeof(htt_stats));
2148 	qdf_nbuf_queue_init(&htt_stats.msg);
2149 
2150 	/* pull one completed stats from soc->htt_stats_msg and process */
2151 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2152 	if (!soc->htt_stats.num_stats) {
2153 		qdf_spin_unlock_bh(&soc->htt_stats.lock);
2154 		return;
2155 	}
2156 	while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) {
2157 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
2158 		msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET;
2159 		done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2160 		qdf_nbuf_queue_add(&htt_stats.msg, htt_msg);
2161 		/*
2162 		 * Done bit signifies that this is the last T2H buffer in the
2163 		 * stream of HTT EXT STATS message
2164 		 */
2165 		if (done)
2166 			break;
2167 	}
2168 	rem_stats = --soc->htt_stats.num_stats;
2169 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2170 
2171 	/* If there are more stats to process, schedule stats work again.
2172 	 * Scheduling prior to processing ht_stats to queue with early
2173 	 * index
2174 	 */
2175 	if (rem_stats)
2176 		qdf_sched_work(0, &soc->htt_stats.work);
2177 
2178 	dp_process_htt_stat_msg(&htt_stats, soc);
2179 }
2180 
2181 /*
2182  * dp_get_ppdu_info_user_index: Find and allocate a per-user descriptor for a PPDU,
2183  * if a new peer id arrives in a PPDU
2184  * pdev: DP pdev handle
2185  * @peer_id : peer unique identifier
2186  * @ppdu_info: per ppdu tlv structure
2187  *
2188  * return:user index to be populated
2189  */
2190 #ifdef FEATURE_PERPKT_INFO
2191 static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev,
2192 						uint16_t peer_id,
2193 						struct ppdu_info *ppdu_info)
2194 {
2195 	uint8_t user_index = 0;
2196 	struct cdp_tx_completion_ppdu *ppdu_desc;
2197 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2198 
2199 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2200 
2201 	while ((user_index + 1) <= ppdu_info->last_user) {
2202 		ppdu_user_desc = &ppdu_desc->user[user_index];
2203 		if (ppdu_user_desc->peer_id != peer_id) {
2204 			user_index++;
2205 			continue;
2206 		} else {
2207 			/* Max users possible is 8 so user array index should
2208 			 * not exceed 7
2209 			 */
2210 			qdf_assert_always(user_index <= (ppdu_desc->max_users - 1));
2211 			return user_index;
2212 		}
2213 	}
2214 
2215 	ppdu_info->last_user++;
2216 	/* Max users possible is 8 so last user should not exceed 8 */
2217 	qdf_assert_always(ppdu_info->last_user <= ppdu_desc->max_users);
2218 	return ppdu_info->last_user - 1;
2219 }
2220 
2221 /*
2222  * dp_process_ppdu_stats_common_tlv: Process htt_ppdu_stats_common_tlv
2223  * pdev: DP pdev handle
2224  * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv
2225  * @ppdu_info: per ppdu tlv structure
2226  *
2227  * return:void
2228  */
2229 static void dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev,
2230 		uint32_t *tag_buf, struct ppdu_info *ppdu_info)
2231 {
2232 	uint16_t frame_type;
2233 	uint16_t frame_ctrl;
2234 	uint16_t freq;
2235 	struct dp_soc *soc = NULL;
2236 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
2237 	uint64_t ppdu_start_timestamp;
2238 	uint32_t *start_tag_buf;
2239 
2240 	start_tag_buf = tag_buf;
2241 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2242 
2243 	ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
2244 
2245 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RING_ID_SCH_CMD_ID);
2246 	ppdu_info->sched_cmdid =
2247 		HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_GET(*tag_buf);
2248 	ppdu_desc->num_users =
2249 		HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf);
2250 
2251 	qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
2252 
2253 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(QTYPE_FRM_TYPE);
2254 	frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf);
2255 	ppdu_desc->htt_frame_type = frame_type;
2256 
2257 	frame_ctrl = ppdu_desc->frame_ctrl;
2258 
2259 	ppdu_desc->bar_ppdu_id = ppdu_info->ppdu_id;
2260 
2261 	switch (frame_type) {
2262 	case HTT_STATS_FTYPE_TIDQ_DATA_SU:
2263 	case HTT_STATS_FTYPE_TIDQ_DATA_MU:
2264 	case HTT_STATS_FTYPE_SGEN_QOS_NULL:
2265 		/*
2266 		 * for management packet, frame type come as DATA_SU
2267 		 * need to check frame_ctrl before setting frame_type
2268 		 */
2269 		if (HTT_GET_FRAME_CTRL_TYPE(frame_ctrl) <= FRAME_CTRL_TYPE_CTRL)
2270 			ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
2271 		else
2272 			ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA;
2273 	break;
2274 	case HTT_STATS_FTYPE_SGEN_MU_BAR:
2275 	case HTT_STATS_FTYPE_SGEN_BAR:
2276 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_BAR;
2277 	break;
2278 	default:
2279 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
2280 	break;
2281 	}
2282 
2283 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(FES_DUR_US);
2284 	ppdu_desc->tx_duration = *tag_buf;
2285 
2286 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_L32_US);
2287 	ppdu_desc->ppdu_start_timestamp = *tag_buf;
2288 
2289 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(CHAN_MHZ_PHY_MODE);
2290 	freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf);
2291 	if (freq != ppdu_desc->channel) {
2292 		soc = pdev->soc;
2293 		ppdu_desc->channel = freq;
2294 		pdev->operating_channel.freq = freq;
2295 		if (soc && soc->cdp_soc.ol_ops->freq_to_channel)
2296 			pdev->operating_channel.num =
2297 			    soc->cdp_soc.ol_ops->freq_to_channel(soc->ctrl_psoc,
2298 								 pdev->pdev_id,
2299 								 freq);
2300 
2301 		if (soc && soc->cdp_soc.ol_ops->freq_to_band)
2302 			pdev->operating_channel.band =
2303 			       soc->cdp_soc.ol_ops->freq_to_band(soc->ctrl_psoc,
2304 								 pdev->pdev_id,
2305 								 freq);
2306 	}
2307 
2308 	ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf);
2309 
2310 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RESV_NUM_UL_BEAM);
2311 	ppdu_desc->phy_ppdu_tx_time_us =
2312 		HTT_PPDU_STATS_COMMON_TLV_PHY_PPDU_TX_TIME_US_GET(*tag_buf);
2313 	ppdu_desc->beam_change =
2314 		HTT_PPDU_STATS_COMMON_TLV_BEAM_CHANGE_GET(*tag_buf);
2315 	ppdu_desc->doppler =
2316 		HTT_PPDU_STATS_COMMON_TLV_DOPPLER_INDICATION_GET(*tag_buf);
2317 	ppdu_desc->spatial_reuse =
2318 		HTT_PPDU_STATS_COMMON_TLV_SPATIAL_REUSE_GET(*tag_buf);
2319 
2320 	dp_tx_capture_htt_frame_counter(pdev, frame_type);
2321 
2322 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_U32_US);
2323 	ppdu_start_timestamp = *tag_buf;
2324 	ppdu_desc->ppdu_start_timestamp |= ((ppdu_start_timestamp <<
2325 					     HTT_SHIFT_UPPER_TIMESTAMP) &
2326 					    HTT_MASK_UPPER_TIMESTAMP);
2327 
2328 	ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
2329 					ppdu_desc->tx_duration;
2330 	/* Ack time stamp is same as end time stamp*/
2331 	ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
2332 
2333 	ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
2334 					ppdu_desc->tx_duration;
2335 
2336 	ppdu_desc->bar_ppdu_start_timestamp = ppdu_desc->ppdu_start_timestamp;
2337 	ppdu_desc->bar_ppdu_end_timestamp = ppdu_desc->ppdu_end_timestamp;
2338 	ppdu_desc->bar_tx_duration = ppdu_desc->tx_duration;
2339 
2340 	/* Ack time stamp is same as end time stamp*/
2341 	ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
2342 }
2343 
2344 /*
2345  * dp_process_ppdu_stats_user_common_tlv: Process ppdu_stats_user_common
2346  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv
2347  * @ppdu_info: per ppdu tlv structure
2348  *
2349  * return:void
2350  */
2351 static void dp_process_ppdu_stats_user_common_tlv(
2352 		struct dp_pdev *pdev, uint32_t *tag_buf,
2353 		struct ppdu_info *ppdu_info)
2354 {
2355 	uint16_t peer_id;
2356 	struct cdp_tx_completion_ppdu *ppdu_desc;
2357 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2358 	uint8_t curr_user_index = 0;
2359 	struct dp_peer *peer;
2360 	struct dp_vdev *vdev;
2361 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2362 
2363 	ppdu_desc =
2364 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2365 
2366 	tag_buf++;
2367 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
2368 
2369 	curr_user_index =
2370 		dp_get_ppdu_info_user_index(pdev,
2371 					    peer_id, ppdu_info);
2372 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2373 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2374 
2375 	ppdu_desc->vdev_id =
2376 		HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(*tag_buf);
2377 
2378 	ppdu_user_desc->peer_id = peer_id;
2379 
2380 	tag_buf++;
2381 
2382 	if (HTT_PPDU_STATS_USER_COMMON_TLV_DELAYED_BA_GET(*tag_buf)) {
2383 		ppdu_user_desc->delayed_ba = 1;
2384 		ppdu_desc->delayed_ba = 1;
2385 	}
2386 
2387 	if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) {
2388 		ppdu_user_desc->is_mcast = true;
2389 		ppdu_user_desc->mpdu_tried_mcast =
2390 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
2391 		ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast;
2392 	} else {
2393 		ppdu_user_desc->mpdu_tried_ucast =
2394 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
2395 	}
2396 
2397 	tag_buf++;
2398 
2399 	ppdu_user_desc->qos_ctrl =
2400 		HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf);
2401 	ppdu_user_desc->frame_ctrl =
2402 		HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf);
2403 	ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl;
2404 
2405 	if (ppdu_user_desc->delayed_ba)
2406 		ppdu_user_desc->mpdu_success = 0;
2407 
2408 	tag_buf += 3;
2409 
2410 	if (HTT_PPDU_STATS_IS_OPAQUE_VALID_GET(*tag_buf)) {
2411 		ppdu_user_desc->ppdu_cookie =
2412 			HTT_PPDU_STATS_HOST_OPAQUE_COOKIE_GET(*tag_buf);
2413 		ppdu_user_desc->is_ppdu_cookie_valid = 1;
2414 	}
2415 
2416 	/* returning earlier causes other feilds unpopulated */
2417 	if (peer_id == DP_SCAN_PEER_ID) {
2418 		vdev = dp_vdev_get_ref_by_id(pdev->soc, ppdu_desc->vdev_id,
2419 					     DP_MOD_ID_TX_PPDU_STATS);
2420 		if (!vdev)
2421 			return;
2422 		qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw,
2423 			     QDF_MAC_ADDR_SIZE);
2424 		dp_vdev_unref_delete(pdev->soc, vdev, DP_MOD_ID_TX_PPDU_STATS);
2425 	} else {
2426 		peer = dp_peer_get_ref_by_id(pdev->soc, peer_id,
2427 					     DP_MOD_ID_TX_PPDU_STATS);
2428 		if (!peer) {
2429 			/*
2430 			 * fw sends peer_id which is about to removed but
2431 			 * it was already removed in host.
2432 			 * eg: for disassoc, fw send ppdu stats
2433 			 * with peer id equal to previously associated
2434 			 * peer's peer_id but it was removed
2435 			 */
2436 			vdev = dp_vdev_get_ref_by_id(pdev->soc,
2437 						     ppdu_desc->vdev_id,
2438 						     DP_MOD_ID_TX_PPDU_STATS);
2439 			if (!vdev)
2440 				return;
2441 			qdf_mem_copy(ppdu_user_desc->mac_addr,
2442 				     vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE);
2443 			dp_vdev_unref_delete(pdev->soc, vdev,
2444 					     DP_MOD_ID_TX_PPDU_STATS);
2445 			return;
2446 		}
2447 		qdf_mem_copy(ppdu_user_desc->mac_addr,
2448 			     peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
2449 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
2450 	}
2451 }
2452 
2453 
2454 /**
2455  * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv
2456  * @pdev: DP pdev handle
2457  * @tag_buf: T2H message buffer carrying the user rate TLV
2458  * @ppdu_info: per ppdu tlv structure
2459  *
2460  * return:void
2461  */
2462 static void dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev,
2463 		uint32_t *tag_buf,
2464 		struct ppdu_info *ppdu_info)
2465 {
2466 	uint16_t peer_id;
2467 	struct cdp_tx_completion_ppdu *ppdu_desc;
2468 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2469 	uint8_t curr_user_index = 0;
2470 	struct dp_vdev *vdev;
2471 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2472 
2473 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2474 
2475 	tag_buf++;
2476 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
2477 
2478 	curr_user_index =
2479 		dp_get_ppdu_info_user_index(pdev,
2480 					    peer_id, ppdu_info);
2481 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2482 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2483 	if (peer_id == DP_SCAN_PEER_ID) {
2484 		vdev = dp_vdev_get_ref_by_id(pdev->soc, ppdu_desc->vdev_id,
2485 					     DP_MOD_ID_TX_PPDU_STATS);
2486 		if (!vdev)
2487 			return;
2488 		dp_vdev_unref_delete(pdev->soc, vdev,
2489 				     DP_MOD_ID_TX_PPDU_STATS);
2490 	}
2491 	ppdu_user_desc->peer_id = peer_id;
2492 
2493 	ppdu_user_desc->tid =
2494 		HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf);
2495 
2496 	tag_buf += 1;
2497 
2498 	ppdu_user_desc->user_pos =
2499 		HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_GET(*tag_buf);
2500 	ppdu_user_desc->mu_group_id =
2501 		HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_GET(*tag_buf);
2502 
2503 	tag_buf += 1;
2504 
2505 	ppdu_user_desc->ru_start =
2506 		HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf);
2507 	ppdu_user_desc->ru_tones =
2508 		(HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) -
2509 		HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1;
2510 	ppdu_desc->usr_ru_tones_sum += ppdu_user_desc->ru_tones;
2511 
2512 	tag_buf += 2;
2513 
2514 	ppdu_user_desc->ppdu_type =
2515 		HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf);
2516 
2517 	tag_buf++;
2518 	ppdu_user_desc->tx_rate = *tag_buf;
2519 
2520 	ppdu_user_desc->ltf_size =
2521 		HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf);
2522 	ppdu_user_desc->stbc =
2523 		HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf);
2524 	ppdu_user_desc->he_re =
2525 		HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf);
2526 	ppdu_user_desc->txbf =
2527 		HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf);
2528 	ppdu_user_desc->bw =
2529 		HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf) - 2;
2530 	ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf);
2531 	ppdu_desc->usr_nss_sum += ppdu_user_desc->nss;
2532 	ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf);
2533 	ppdu_user_desc->preamble =
2534 		HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf);
2535 	ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf);
2536 	ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf);
2537 	ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf);
2538 }
2539 
2540 /*
2541  * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv: Process
2542  * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
2543  * pdev: DP PDEV handle
2544  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
2545  * @ppdu_info: per ppdu tlv structure
2546  *
2547  * return:void
2548  */
2549 static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
2550 		struct dp_pdev *pdev, uint32_t *tag_buf,
2551 		struct ppdu_info *ppdu_info)
2552 {
2553 	htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf =
2554 		(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf;
2555 
2556 	struct cdp_tx_completion_ppdu *ppdu_desc;
2557 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2558 	uint8_t curr_user_index = 0;
2559 	uint16_t peer_id;
2560 	uint32_t size = CDP_BA_64_BIT_MAP_SIZE_DWORDS;
2561 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2562 
2563 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2564 
2565 	tag_buf++;
2566 
2567 	peer_id =
2568 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2569 
2570 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2571 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2572 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2573 	ppdu_user_desc->peer_id = peer_id;
2574 
2575 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
2576 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
2577 		     sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
2578 
2579 	dp_process_ppdu_stats_update_failed_bitmap(pdev,
2580 						   (void *)ppdu_user_desc,
2581 						   ppdu_info->ppdu_id,
2582 						   size);
2583 }
2584 
2585 /*
2586  * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv: Process
2587  * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
2588  * soc: DP SOC handle
2589  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
2590  * @ppdu_info: per ppdu tlv structure
2591  *
2592  * return:void
2593  */
2594 static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
2595 		struct dp_pdev *pdev, uint32_t *tag_buf,
2596 		struct ppdu_info *ppdu_info)
2597 {
2598 	htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf =
2599 		(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf;
2600 
2601 	struct cdp_tx_completion_ppdu *ppdu_desc;
2602 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2603 	uint8_t curr_user_index = 0;
2604 	uint16_t peer_id;
2605 	uint32_t size = CDP_BA_256_BIT_MAP_SIZE_DWORDS;
2606 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2607 
2608 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2609 
2610 	tag_buf++;
2611 
2612 	peer_id =
2613 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2614 
2615 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2616 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2617 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2618 	ppdu_user_desc->peer_id = peer_id;
2619 
2620 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
2621 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
2622 		     sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2623 
2624 	dp_process_ppdu_stats_update_failed_bitmap(pdev,
2625 						   (void *)ppdu_user_desc,
2626 						   ppdu_info->ppdu_id,
2627 						   size);
2628 }
2629 
2630 /*
2631  * dp_process_ppdu_stats_user_cmpltn_common_tlv: Process
2632  * htt_ppdu_stats_user_cmpltn_common_tlv
2633  * soc: DP SOC handle
2634  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv
2635  * @ppdu_info: per ppdu tlv structure
2636  *
2637  * return:void
2638  */
2639 static void dp_process_ppdu_stats_user_cmpltn_common_tlv(
2640 		struct dp_pdev *pdev, uint32_t *tag_buf,
2641 		struct ppdu_info *ppdu_info)
2642 {
2643 	uint16_t peer_id;
2644 	struct cdp_tx_completion_ppdu *ppdu_desc;
2645 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2646 	uint8_t curr_user_index = 0;
2647 	uint8_t bw_iter;
2648 	htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf =
2649 		(htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf;
2650 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2651 
2652 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2653 
2654 	tag_buf++;
2655 	peer_id =
2656 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf);
2657 
2658 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2659 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2660 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2661 	ppdu_user_desc->peer_id = peer_id;
2662 
2663 	ppdu_user_desc->completion_status =
2664 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET(
2665 				*tag_buf);
2666 
2667 	ppdu_user_desc->tid =
2668 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf);
2669 
2670 
2671 	tag_buf++;
2672 	if (qdf_likely(ppdu_user_desc->completion_status ==
2673 			HTT_PPDU_STATS_USER_STATUS_OK)) {
2674 		ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi;
2675 		ppdu_user_desc->usr_ack_rssi = dp_stats_buf->ack_rssi;
2676 		ppdu_user_desc->ack_rssi_valid = 1;
2677 	} else {
2678 		ppdu_user_desc->ack_rssi_valid = 0;
2679 	}
2680 
2681 	tag_buf++;
2682 
2683 	ppdu_user_desc->mpdu_success =
2684 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf);
2685 
2686 	ppdu_user_desc->mpdu_failed =
2687 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_GET(*tag_buf) -
2688 						ppdu_user_desc->mpdu_success;
2689 
2690 	tag_buf++;
2691 
2692 	ppdu_user_desc->long_retries =
2693 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf);
2694 
2695 	ppdu_user_desc->short_retries =
2696 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf);
2697 	ppdu_user_desc->retry_msdus =
2698 		ppdu_user_desc->long_retries + ppdu_user_desc->short_retries;
2699 
2700 	ppdu_user_desc->is_ampdu =
2701 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf);
2702 	ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu;
2703 
2704 	ppdu_desc->resp_type =
2705 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_GET(*tag_buf);
2706 	ppdu_desc->mprot_type =
2707 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPROT_TYPE_GET(*tag_buf);
2708 	ppdu_desc->rts_success =
2709 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_SUCCESS_GET(*tag_buf);
2710 	ppdu_desc->rts_failure =
2711 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_FAILURE_GET(*tag_buf);
2712 	ppdu_user_desc->pream_punct =
2713 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PREAM_PUNC_TX_GET(*tag_buf);
2714 
2715 	ppdu_info->compltn_common_tlv++;
2716 
2717 	/*
2718 	 * MU BAR may send request to n users but we may received ack only from
2719 	 * m users. To have count of number of users respond back, we have a
2720 	 * separate counter bar_num_users per PPDU that get increment for every
2721 	 * htt_ppdu_stats_user_cmpltn_common_tlv
2722 	 */
2723 	ppdu_desc->bar_num_users++;
2724 
2725 	tag_buf++;
2726 	for (bw_iter = 0; bw_iter < CDP_RSSI_CHAIN_LEN; bw_iter++) {
2727 		ppdu_user_desc->rssi_chain[bw_iter] =
2728 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CHAIN_RSSI_GET(*tag_buf);
2729 		tag_buf++;
2730 	}
2731 
2732 	ppdu_user_desc->sa_tx_antenna =
2733 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TX_ANTENNA_MASK_GET(*tag_buf);
2734 
2735 	tag_buf++;
2736 	ppdu_user_desc->sa_is_training =
2737 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_TRAINING_GET(*tag_buf);
2738 	if (ppdu_user_desc->sa_is_training) {
2739 		ppdu_user_desc->sa_goodput =
2740 			HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PENDING_TRAINING_PKTS_GET(*tag_buf);
2741 	}
2742 
2743 	tag_buf++;
2744 	for (bw_iter = 0; bw_iter < CDP_NUM_SA_BW; bw_iter++) {
2745 		ppdu_user_desc->sa_max_rates[bw_iter] =
2746 			HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MAX_RATES_GET(tag_buf[bw_iter]);
2747 	}
2748 
2749 	tag_buf += CDP_NUM_SA_BW;
2750 	ppdu_user_desc->current_rate_per =
2751 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CURRENT_RATE_PER_GET(*tag_buf);
2752 }
2753 
2754 /*
2755  * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv: Process
2756  * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2757  * pdev: DP PDEV handle
2758  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2759  * @ppdu_info: per ppdu tlv structure
2760  *
2761  * return:void
2762  */
2763 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
2764 		struct dp_pdev *pdev, uint32_t *tag_buf,
2765 		struct ppdu_info *ppdu_info)
2766 {
2767 	htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf =
2768 		(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf;
2769 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2770 	struct cdp_tx_completion_ppdu *ppdu_desc;
2771 	uint8_t curr_user_index = 0;
2772 	uint16_t peer_id;
2773 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2774 
2775 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2776 
2777 	tag_buf++;
2778 
2779 	peer_id =
2780 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2781 
2782 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2783 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2784 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2785 	ppdu_user_desc->peer_id = peer_id;
2786 
2787 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2788 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2789 		     sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
2790 	ppdu_user_desc->ba_size = CDP_BA_64_BIT_MAP_SIZE_DWORDS * 32;
2791 }
2792 
2793 /*
2794  * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv: Process
2795  * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2796  * pdev: DP PDEV handle
2797  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2798  * @ppdu_info: per ppdu tlv structure
2799  *
2800  * return:void
2801  */
2802 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
2803 		struct dp_pdev *pdev, uint32_t *tag_buf,
2804 		struct ppdu_info *ppdu_info)
2805 {
2806 	htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf =
2807 		(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf;
2808 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2809 	struct cdp_tx_completion_ppdu *ppdu_desc;
2810 	uint8_t curr_user_index = 0;
2811 	uint16_t peer_id;
2812 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2813 
2814 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2815 
2816 	tag_buf++;
2817 
2818 	peer_id =
2819 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2820 
2821 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2822 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2823 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2824 	ppdu_user_desc->peer_id = peer_id;
2825 
2826 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2827 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2828 		     sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2829 	ppdu_user_desc->ba_size = CDP_BA_256_BIT_MAP_SIZE_DWORDS * 32;
2830 }
2831 
2832 /*
2833  * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv: Process
2834  * htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2835  * pdev: DP PDE handle
2836  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2837  * @ppdu_info: per ppdu tlv structure
2838  *
2839  * return:void
2840  */
2841 static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
2842 		struct dp_pdev *pdev, uint32_t *tag_buf,
2843 		struct ppdu_info *ppdu_info)
2844 {
2845 	uint16_t peer_id;
2846 	struct cdp_tx_completion_ppdu *ppdu_desc;
2847 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2848 	uint8_t curr_user_index = 0;
2849 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2850 
2851 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2852 
2853 	tag_buf += 2;
2854 	peer_id =
2855 	HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf);
2856 
2857 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2858 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2859 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2860 	if (!ppdu_user_desc->ack_ba_tlv) {
2861 		ppdu_user_desc->ack_ba_tlv = 1;
2862 	} else {
2863 		pdev->stats.ack_ba_comes_twice++;
2864 		return;
2865 	}
2866 
2867 	ppdu_user_desc->peer_id = peer_id;
2868 
2869 	tag_buf++;
2870 	/* not to update ppdu_desc->tid from this TLV */
2871 	ppdu_user_desc->num_mpdu =
2872 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf);
2873 
2874 	ppdu_user_desc->num_msdu =
2875 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf);
2876 
2877 	ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu;
2878 
2879 	tag_buf++;
2880 	ppdu_user_desc->start_seq =
2881 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_GET(
2882 			*tag_buf);
2883 
2884 	tag_buf++;
2885 	ppdu_user_desc->success_bytes = *tag_buf;
2886 
2887 	/* increase ack ba tlv counter on successful mpdu */
2888 	if (ppdu_user_desc->num_mpdu)
2889 		ppdu_info->ack_ba_tlv++;
2890 
2891 	if (ppdu_user_desc->ba_size == 0) {
2892 		ppdu_user_desc->ba_seq_no = ppdu_user_desc->start_seq;
2893 		ppdu_user_desc->ba_bitmap[0] = 1;
2894 		ppdu_user_desc->ba_size = 1;
2895 	}
2896 }
2897 
2898 /*
2899  * dp_process_ppdu_stats_user_common_array_tlv: Process
2900  * htt_ppdu_stats_user_common_array_tlv
2901  * pdev: DP PDEV handle
2902  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2903  * @ppdu_info: per ppdu tlv structure
2904  *
2905  * return:void
2906  */
2907 static void dp_process_ppdu_stats_user_common_array_tlv(
2908 		struct dp_pdev *pdev, uint32_t *tag_buf,
2909 		struct ppdu_info *ppdu_info)
2910 {
2911 	uint32_t peer_id;
2912 	struct cdp_tx_completion_ppdu *ppdu_desc;
2913 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2914 	uint8_t curr_user_index = 0;
2915 	struct htt_tx_ppdu_stats_info *dp_stats_buf;
2916 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2917 
2918 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2919 
2920 	tag_buf++;
2921 	dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf;
2922 	tag_buf += 3;
2923 	peer_id =
2924 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf);
2925 
2926 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) {
2927 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2928 			"Invalid peer");
2929 		return;
2930 	}
2931 
2932 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2933 
2934 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2935 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2936 
2937 	ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes;
2938 	ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes;
2939 
2940 	tag_buf++;
2941 
2942 	ppdu_user_desc->success_msdus =
2943 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf);
2944 	ppdu_user_desc->retry_bytes =
2945 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf);
2946 	tag_buf++;
2947 	ppdu_user_desc->failed_msdus =
2948 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf);
2949 }
2950 
2951 /*
2952  * dp_process_ppdu_stats_flush_tlv: Process
2953  * htt_ppdu_stats_flush_tlv
2954  * @pdev: DP PDEV handle
2955  * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv
2956  * @ppdu_info: per ppdu tlv structure
2957  *
2958  * return:void
2959  */
2960 static void
2961 dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev,
2962 					     uint32_t *tag_buf,
2963 					     struct ppdu_info *ppdu_info)
2964 {
2965 	struct cdp_tx_completion_ppdu *ppdu_desc;
2966 	uint32_t peer_id;
2967 	uint8_t tid;
2968 	struct dp_peer *peer;
2969 
2970 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
2971 				qdf_nbuf_data(ppdu_info->nbuf);
2972 	ppdu_desc->is_flush = 1;
2973 
2974 	tag_buf++;
2975 	ppdu_desc->drop_reason = *tag_buf;
2976 
2977 	tag_buf++;
2978 	ppdu_desc->num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf);
2979 	ppdu_desc->num_mpdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_GET(*tag_buf);
2980 	ppdu_desc->flow_type = HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_GET(*tag_buf);
2981 
2982 	tag_buf++;
2983 	peer_id = HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf);
2984 	tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf);
2985 
2986 	ppdu_desc->num_users = 1;
2987 	ppdu_desc->user[0].peer_id = peer_id;
2988 	ppdu_desc->user[0].tid = tid;
2989 
2990 	ppdu_desc->queue_type =
2991 			HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_GET(*tag_buf);
2992 
2993 	peer = dp_peer_get_ref_by_id(pdev->soc, peer_id,
2994 				     DP_MOD_ID_TX_PPDU_STATS);
2995 	if (!peer)
2996 		goto add_ppdu_to_sched_list;
2997 
2998 	if (ppdu_desc->drop_reason == HTT_FLUSH_EXCESS_RETRIES) {
2999 		DP_STATS_INC(peer,
3000 			     tx.excess_retries_per_ac[TID_TO_WME_AC(tid)],
3001 			     ppdu_desc->num_msdu);
3002 	}
3003 
3004 	dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
3005 
3006 add_ppdu_to_sched_list:
3007 	ppdu_info->done = 1;
3008 	TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
3009 	pdev->list_depth--;
3010 	TAILQ_INSERT_TAIL(&pdev->sched_comp_ppdu_list, ppdu_info,
3011 			  ppdu_info_list_elem);
3012 	pdev->sched_comp_list_depth++;
3013 }
3014 
3015 /**
3016  * dp_process_ppdu_stats_sch_cmd_status_tlv: Process schedule command status tlv
3017  * Here we are not going to process the buffer.
3018  * @pdev: DP PDEV handle
3019  * @ppdu_info: per ppdu tlv structure
3020  *
3021  * return:void
3022  */
3023 static void
3024 dp_process_ppdu_stats_sch_cmd_status_tlv(struct dp_pdev *pdev,
3025 					 struct ppdu_info *ppdu_info)
3026 {
3027 	struct cdp_tx_completion_ppdu *ppdu_desc;
3028 	struct dp_peer *peer;
3029 	uint8_t num_users;
3030 	uint8_t i;
3031 
3032 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
3033 				qdf_nbuf_data(ppdu_info->nbuf);
3034 
3035 	num_users = ppdu_desc->bar_num_users;
3036 
3037 	for (i = 0; i < num_users; i++) {
3038 		if (ppdu_desc->user[i].user_pos == 0) {
3039 			if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
3040 				/* update phy mode for bar frame */
3041 				ppdu_desc->phy_mode =
3042 					ppdu_desc->user[i].preamble;
3043 				ppdu_desc->user[0].mcs = ppdu_desc->user[i].mcs;
3044 				break;
3045 			}
3046 			if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_CTRL) {
3047 				ppdu_desc->frame_ctrl =
3048 					ppdu_desc->user[i].frame_ctrl;
3049 				break;
3050 			}
3051 		}
3052 	}
3053 
3054 	if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA &&
3055 	    ppdu_desc->delayed_ba) {
3056 		qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
3057 
3058 		for (i = 0; i < ppdu_desc->num_users; i++) {
3059 			struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu;
3060 			uint64_t start_tsf;
3061 			uint64_t end_tsf;
3062 			uint32_t ppdu_id;
3063 
3064 			ppdu_id = ppdu_desc->ppdu_id;
3065 			peer = dp_peer_get_ref_by_id
3066 				(pdev->soc, ppdu_desc->user[i].peer_id,
3067 				 DP_MOD_ID_TX_PPDU_STATS);
3068 			/**
3069 			 * This check is to make sure peer is not deleted
3070 			 * after processing the TLVs.
3071 			 */
3072 			if (!peer)
3073 				continue;
3074 
3075 			delay_ppdu = &peer->delayed_ba_ppdu_stats;
3076 			start_tsf = ppdu_desc->ppdu_start_timestamp;
3077 			end_tsf = ppdu_desc->ppdu_end_timestamp;
3078 			/**
3079 			 * save delayed ba user info
3080 			 */
3081 			if (ppdu_desc->user[i].delayed_ba) {
3082 				dp_peer_copy_delay_stats(peer,
3083 							 &ppdu_desc->user[i],
3084 							 ppdu_id);
3085 				peer->last_delayed_ba_ppduid = ppdu_id;
3086 				delay_ppdu->ppdu_start_timestamp = start_tsf;
3087 				delay_ppdu->ppdu_end_timestamp = end_tsf;
3088 			}
3089 			ppdu_desc->user[i].peer_last_delayed_ba =
3090 				peer->last_delayed_ba;
3091 
3092 			dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
3093 
3094 			if (ppdu_desc->user[i].delayed_ba &&
3095 			    !ppdu_desc->user[i].debug_copied) {
3096 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3097 					  QDF_TRACE_LEVEL_INFO_MED,
3098 					  "%s: %d ppdu_id[%d] bar_ppdu_id[%d] num_users[%d] usr[%d] htt_frame_type[%d]\n",
3099 					  __func__, __LINE__,
3100 					  ppdu_desc->ppdu_id,
3101 					  ppdu_desc->bar_ppdu_id,
3102 					  ppdu_desc->num_users,
3103 					  i,
3104 					  ppdu_desc->htt_frame_type);
3105 			}
3106 		}
3107 	}
3108 
3109 	/*
3110 	 * when frame type is BAR and STATS_COMMON_TLV is set
3111 	 * copy the store peer delayed info to BAR status
3112 	 */
3113 	if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
3114 		for (i = 0; i < ppdu_desc->bar_num_users; i++) {
3115 			struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu;
3116 			uint64_t start_tsf;
3117 			uint64_t end_tsf;
3118 
3119 			peer = dp_peer_get_ref_by_id
3120 				(pdev->soc,
3121 				 ppdu_desc->user[i].peer_id,
3122 				 DP_MOD_ID_TX_PPDU_STATS);
3123 			/**
3124 			 * This check is to make sure peer is not deleted
3125 			 * after processing the TLVs.
3126 			 */
3127 			if (!peer)
3128 				continue;
3129 
3130 			if (ppdu_desc->user[i].completion_status !=
3131 			    HTT_PPDU_STATS_USER_STATUS_OK) {
3132 				dp_peer_unref_delete(peer,
3133 						     DP_MOD_ID_TX_PPDU_STATS);
3134 				continue;
3135 			}
3136 
3137 			delay_ppdu = &peer->delayed_ba_ppdu_stats;
3138 			start_tsf = delay_ppdu->ppdu_start_timestamp;
3139 			end_tsf = delay_ppdu->ppdu_end_timestamp;
3140 
3141 			if (peer->last_delayed_ba) {
3142 				dp_peer_copy_stats_to_bar(peer,
3143 							  &ppdu_desc->user[i]);
3144 				ppdu_desc->ppdu_id =
3145 					peer->last_delayed_ba_ppduid;
3146 				ppdu_desc->ppdu_start_timestamp = start_tsf;
3147 				ppdu_desc->ppdu_end_timestamp = end_tsf;
3148 			}
3149 			ppdu_desc->user[i].peer_last_delayed_ba =
3150 				peer->last_delayed_ba;
3151 			dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
3152 		}
3153 	}
3154 
3155 	TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
3156 	pdev->list_depth--;
3157 	TAILQ_INSERT_TAIL(&pdev->sched_comp_ppdu_list, ppdu_info,
3158 			  ppdu_info_list_elem);
3159 	pdev->sched_comp_list_depth++;
3160 }
3161 
3162 #ifndef WLAN_TX_PKT_CAPTURE_ENH
3163 /*
3164  * dp_deliver_mgmt_frm: Process
3165  * @pdev: DP PDEV handle
3166  * @nbuf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
3167  *
3168  * return: void
3169  */
3170 void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
3171 {
3172 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
3173 		dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc,
3174 				     nbuf, HTT_INVALID_PEER,
3175 				     WDI_NO_VAL, pdev->pdev_id);
3176 	} else {
3177 		if (!pdev->bpr_enable)
3178 			qdf_nbuf_free(nbuf);
3179 	}
3180 }
3181 #endif
3182 
3183 /*
3184  * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv: Process
3185  * htt_ppdu_stats_tx_mgmtctrl_payload_tlv
3186  * @pdev: DP PDEV handle
3187  * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
3188  * @length: tlv_length
3189  *
3190  * return:QDF_STATUS_SUCCESS if nbuf as to be freed in caller
3191  */
3192 static QDF_STATUS
3193 dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev,
3194 					      qdf_nbuf_t tag_buf,
3195 					      uint32_t ppdu_id)
3196 {
3197 	uint32_t *nbuf_ptr;
3198 	uint8_t trim_size;
3199 	size_t head_size;
3200 	struct cdp_tx_mgmt_comp_info *ptr_mgmt_comp_info;
3201 	uint32_t *msg_word;
3202 	uint32_t tsf_hdr;
3203 
3204 	if ((!pdev->tx_sniffer_enable) && (!pdev->mcopy_mode) &&
3205 	    (!pdev->bpr_enable) && (!pdev->tx_capture_enabled))
3206 		return QDF_STATUS_SUCCESS;
3207 
3208 	/*
3209 	 * get timestamp from htt_t2h_ppdu_stats_ind_hdr_t
3210 	 */
3211 	msg_word = (uint32_t *)qdf_nbuf_data(tag_buf);
3212 	msg_word = msg_word + 2;
3213 	tsf_hdr = *msg_word;
3214 
3215 	trim_size = ((pdev->mgmtctrl_frm_info.mgmt_buf +
3216 		      HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) -
3217 		      qdf_nbuf_data(tag_buf));
3218 
3219 	if (!qdf_nbuf_pull_head(tag_buf, trim_size))
3220 		return QDF_STATUS_SUCCESS;
3221 
3222 	qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) -
3223 			    pdev->mgmtctrl_frm_info.mgmt_buf_len);
3224 
3225 	if (pdev->tx_capture_enabled) {
3226 		head_size = sizeof(struct cdp_tx_mgmt_comp_info);
3227 		if (qdf_unlikely(qdf_nbuf_headroom(tag_buf) < head_size)) {
3228 			qdf_err("Fail to get headroom h_sz %d h_avail %d\n",
3229 				head_size, qdf_nbuf_headroom(tag_buf));
3230 			qdf_assert_always(0);
3231 			return QDF_STATUS_E_NOMEM;
3232 		}
3233 		ptr_mgmt_comp_info = (struct cdp_tx_mgmt_comp_info *)
3234 					qdf_nbuf_push_head(tag_buf, head_size);
3235 		qdf_assert_always(ptr_mgmt_comp_info);
3236 		ptr_mgmt_comp_info->ppdu_id = ppdu_id;
3237 		ptr_mgmt_comp_info->is_sgen_pkt = true;
3238 		ptr_mgmt_comp_info->tx_tsf = tsf_hdr;
3239 	} else {
3240 		head_size = sizeof(ppdu_id);
3241 		nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(tag_buf, head_size);
3242 		*nbuf_ptr = ppdu_id;
3243 	}
3244 
3245 	if (pdev->bpr_enable) {
3246 		dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc,
3247 				     tag_buf, HTT_INVALID_PEER,
3248 				     WDI_NO_VAL, pdev->pdev_id);
3249 	}
3250 
3251 	dp_deliver_mgmt_frm(pdev, tag_buf);
3252 
3253 	return QDF_STATUS_E_ALREADY;
3254 }
3255 
3256 /**
3257  * dp_validate_fix_ppdu_tlv(): Function to validate the length of PPDU
3258  *
3259  * If the TLV length sent as part of PPDU TLV is less that expected size i.e
3260  * size of corresponding data structure, pad the remaining bytes with zeros
3261  * and continue processing the TLVs
3262  *
3263  * @pdev: DP pdev handle
3264  * @tag_buf: TLV buffer
3265  * @tlv_expected_size: Expected size of Tag
3266  * @tlv_len: TLV length received from FW
3267  *
3268  * Return: Pointer to updated TLV
3269  */
3270 static inline uint32_t *dp_validate_fix_ppdu_tlv(struct dp_pdev *pdev,
3271 						 uint32_t *tag_buf,
3272 						 uint16_t tlv_expected_size,
3273 						 uint16_t tlv_len)
3274 {
3275 	uint32_t *tlv_desc = tag_buf;
3276 
3277 	qdf_assert_always(tlv_len != 0);
3278 
3279 	if (tlv_len < tlv_expected_size) {
3280 		qdf_mem_zero(pdev->ppdu_tlv_buf, tlv_expected_size);
3281 		qdf_mem_copy(pdev->ppdu_tlv_buf, tag_buf, tlv_len);
3282 		tlv_desc = pdev->ppdu_tlv_buf;
3283 	}
3284 
3285 	return tlv_desc;
3286 }
3287 
3288 /**
3289  * dp_process_ppdu_tag(): Function to process the PPDU TLVs
3290  * @pdev: DP pdev handle
3291  * @tag_buf: TLV buffer
3292  * @tlv_len: length of tlv
3293  * @ppdu_info: per ppdu tlv structure
3294  *
3295  * return: void
3296  */
3297 static void dp_process_ppdu_tag(struct dp_pdev *pdev, uint32_t *tag_buf,
3298 		uint32_t tlv_len, struct ppdu_info *ppdu_info)
3299 {
3300 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
3301 	uint16_t tlv_expected_size;
3302 	uint32_t *tlv_desc;
3303 
3304 	switch (tlv_type) {
3305 	case HTT_PPDU_STATS_COMMON_TLV:
3306 		tlv_expected_size = sizeof(htt_ppdu_stats_common_tlv);
3307 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3308 						    tlv_expected_size, tlv_len);
3309 		dp_process_ppdu_stats_common_tlv(pdev, tlv_desc, ppdu_info);
3310 		break;
3311 	case HTT_PPDU_STATS_USR_COMMON_TLV:
3312 		tlv_expected_size = sizeof(htt_ppdu_stats_user_common_tlv);
3313 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3314 						    tlv_expected_size, tlv_len);
3315 		dp_process_ppdu_stats_user_common_tlv(pdev, tlv_desc,
3316 						      ppdu_info);
3317 		break;
3318 	case HTT_PPDU_STATS_USR_RATE_TLV:
3319 		tlv_expected_size = sizeof(htt_ppdu_stats_user_rate_tlv);
3320 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3321 						    tlv_expected_size, tlv_len);
3322 		dp_process_ppdu_stats_user_rate_tlv(pdev, tlv_desc,
3323 						    ppdu_info);
3324 		break;
3325 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV:
3326 		tlv_expected_size =
3327 			sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv);
3328 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3329 						    tlv_expected_size, tlv_len);
3330 		dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
3331 				pdev, tlv_desc, ppdu_info);
3332 		break;
3333 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV:
3334 		tlv_expected_size =
3335 			sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv);
3336 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3337 						    tlv_expected_size, tlv_len);
3338 		dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
3339 				pdev, tlv_desc, ppdu_info);
3340 		break;
3341 	case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV:
3342 		tlv_expected_size =
3343 			sizeof(htt_ppdu_stats_user_cmpltn_common_tlv);
3344 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3345 						    tlv_expected_size, tlv_len);
3346 		dp_process_ppdu_stats_user_cmpltn_common_tlv(
3347 				pdev, tlv_desc, ppdu_info);
3348 		break;
3349 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV:
3350 		tlv_expected_size =
3351 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv);
3352 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3353 						    tlv_expected_size, tlv_len);
3354 		dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
3355 				pdev, tlv_desc, ppdu_info);
3356 		break;
3357 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV:
3358 		tlv_expected_size =
3359 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv);
3360 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3361 						    tlv_expected_size, tlv_len);
3362 		dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
3363 				pdev, tlv_desc, ppdu_info);
3364 		break;
3365 	case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV:
3366 		tlv_expected_size =
3367 			sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv);
3368 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3369 						    tlv_expected_size, tlv_len);
3370 		dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
3371 				pdev, tlv_desc, ppdu_info);
3372 		break;
3373 	case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV:
3374 		tlv_expected_size =
3375 			sizeof(htt_ppdu_stats_usr_common_array_tlv_v);
3376 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3377 						    tlv_expected_size, tlv_len);
3378 		dp_process_ppdu_stats_user_common_array_tlv(
3379 				pdev, tlv_desc, ppdu_info);
3380 		break;
3381 	case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV:
3382 		tlv_expected_size = sizeof(htt_ppdu_stats_flush_tlv);
3383 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
3384 						    tlv_expected_size, tlv_len);
3385 		dp_process_ppdu_stats_user_compltn_flush_tlv(pdev, tlv_desc,
3386 							     ppdu_info);
3387 		break;
3388 	case HTT_PPDU_STATS_SCH_CMD_STATUS_TLV:
3389 		dp_process_ppdu_stats_sch_cmd_status_tlv(pdev, ppdu_info);
3390 		break;
3391 	default:
3392 		break;
3393 	}
3394 }
3395 
3396 #ifdef WLAN_ATF_ENABLE
3397 static void
3398 dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev,
3399 				     struct cdp_tx_completion_ppdu *ppdu_desc,
3400 				     struct cdp_tx_completion_ppdu_user *user)
3401 {
3402 	uint32_t nss_ru_width_sum = 0;
3403 
3404 	if (!pdev || !ppdu_desc || !user)
3405 		return;
3406 
3407 	if (!pdev->dp_atf_stats_enable)
3408 		return;
3409 
3410 	if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_DATA)
3411 		return;
3412 
3413 	nss_ru_width_sum = ppdu_desc->usr_nss_sum * ppdu_desc->usr_ru_tones_sum;
3414 	if (!nss_ru_width_sum)
3415 		nss_ru_width_sum = 1;
3416 
3417 	/**
3418 	 * For SU-MIMO PPDU phy Tx time is same for the single user.
3419 	 * For MU-MIMO phy Tx time is calculated per user as below
3420 	 *     user phy tx time =
3421 	 *           Entire PPDU duration * MU Ratio * OFDMA Ratio
3422 	 *     MU Ratio = usr_nss / Sum_of_nss_of_all_users
3423 	 *     OFDMA_ratio = usr_ru_width / Sum_of_ru_width_of_all_users
3424 	 *     usr_ru_widt = ru_end – ru_start + 1
3425 	 */
3426 	if (ppdu_desc->htt_frame_type == HTT_STATS_FTYPE_TIDQ_DATA_SU) {
3427 		user->phy_tx_time_us = ppdu_desc->phy_ppdu_tx_time_us;
3428 	} else {
3429 		user->phy_tx_time_us = (ppdu_desc->phy_ppdu_tx_time_us *
3430 				user->nss * user->ru_tones) / nss_ru_width_sum;
3431 	}
3432 }
3433 #else
3434 static void
3435 dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev,
3436 				     struct cdp_tx_completion_ppdu *ppdu_desc,
3437 				     struct cdp_tx_completion_ppdu_user *user)
3438 {
3439 }
3440 #endif
3441 
3442 /**
3443  * dp_ppdu_desc_user_stats_update(): Function to update TX user stats
3444  * @pdev: DP pdev handle
3445  * @ppdu_info: per PPDU TLV descriptor
3446  *
3447  * return: void
3448  */
3449 void
3450 dp_ppdu_desc_user_stats_update(struct dp_pdev *pdev,
3451 			       struct ppdu_info *ppdu_info)
3452 {
3453 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3454 	struct dp_peer *peer = NULL;
3455 	uint32_t tlv_bitmap_expected;
3456 	uint32_t tlv_bitmap_default;
3457 	uint16_t i;
3458 	uint32_t num_users;
3459 
3460 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
3461 		qdf_nbuf_data(ppdu_info->nbuf);
3462 
3463 	if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_BAR)
3464 		ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
3465 
3466 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
3467 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode ||
3468 	    pdev->tx_capture_enabled) {
3469 		if (ppdu_info->is_ampdu)
3470 			tlv_bitmap_expected =
3471 				dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
3472 					ppdu_info->tlv_bitmap);
3473 	}
3474 
3475 	tlv_bitmap_default = tlv_bitmap_expected;
3476 
3477 	if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
3478 		num_users = ppdu_desc->bar_num_users;
3479 		ppdu_desc->num_users = ppdu_desc->bar_num_users;
3480 	} else {
3481 		num_users = ppdu_desc->num_users;
3482 	}
3483 	qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
3484 
3485 	for (i = 0; i < num_users; i++) {
3486 		ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu;
3487 		ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu;
3488 
3489 		peer = dp_peer_get_ref_by_id(pdev->soc,
3490 					     ppdu_desc->user[i].peer_id,
3491 					     DP_MOD_ID_TX_PPDU_STATS);
3492 		/**
3493 		 * This check is to make sure peer is not deleted
3494 		 * after processing the TLVs.
3495 		 */
3496 		if (!peer)
3497 			continue;
3498 
3499 		/*
3500 		 * different frame like DATA, BAR or CTRL has different
3501 		 * tlv bitmap expected. Apart from ACK_BA_STATUS TLV, we
3502 		 * receive other tlv in-order/sequential from fw.
3503 		 * Since ACK_BA_STATUS TLV come from Hardware it is
3504 		 * asynchronous So we need to depend on some tlv to confirm
3505 		 * all tlv is received for a ppdu.
3506 		 * So we depend on both SCHED_CMD_STATUS_TLV and
3507 		 * ACK_BA_STATUS_TLV. for failure packet we won't get
3508 		 * ACK_BA_STATUS_TLV.
3509 		 */
3510 		if (!(ppdu_info->tlv_bitmap &
3511 		      (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)) ||
3512 		    (!(ppdu_info->tlv_bitmap &
3513 		       (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)) &&
3514 		     (ppdu_desc->user[i].completion_status ==
3515 		      HTT_PPDU_STATS_USER_STATUS_OK))) {
3516 			dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
3517 			continue;
3518 		}
3519 
3520 		/**
3521 		 * Update tx stats for data frames having Qos as well as
3522 		 * non-Qos data tid
3523 		 */
3524 
3525 		if ((ppdu_desc->user[i].tid < CDP_DATA_TID_MAX ||
3526 		     (ppdu_desc->user[i].tid == CDP_DATA_NON_QOS_TID) ||
3527 		     (ppdu_desc->htt_frame_type ==
3528 		      HTT_STATS_FTYPE_SGEN_QOS_NULL) ||
3529 		     ((ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) &&
3530 		      (ppdu_desc->num_mpdu > 1))) &&
3531 		      (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL)) {
3532 
3533 			dp_tx_stats_update(pdev, peer,
3534 					   &ppdu_desc->user[i],
3535 					   ppdu_desc->ack_rssi);
3536 			dp_tx_rate_stats_update(peer, &ppdu_desc->user[i]);
3537 		}
3538 
3539 		dp_ppdu_desc_user_phy_tx_time_update(pdev, ppdu_desc,
3540 						     &ppdu_desc->user[i]);
3541 
3542 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
3543 		tlv_bitmap_expected = tlv_bitmap_default;
3544 	}
3545 }
3546 
3547 #ifndef WLAN_TX_PKT_CAPTURE_ENH
3548 
3549 /**
3550  * dp_ppdu_desc_deliver(): Function to deliver Tx PPDU status descriptor
3551  * to upper layer
3552  * @pdev: DP pdev handle
3553  * @ppdu_info: per PPDU TLV descriptor
3554  *
3555  * return: void
3556  */
3557 static
3558 void dp_ppdu_desc_deliver(struct dp_pdev *pdev,
3559 			  struct ppdu_info *ppdu_info)
3560 {
3561 	struct ppdu_info *s_ppdu_info = NULL;
3562 	struct ppdu_info *ppdu_info_next = NULL;
3563 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3564 	qdf_nbuf_t nbuf;
3565 	uint32_t time_delta = 0;
3566 	bool starved = 0;
3567 	bool matched = 0;
3568 	bool recv_ack_ba_done = 0;
3569 
3570 	if (ppdu_info->tlv_bitmap &
3571 	    (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) &&
3572 	    ppdu_info->done)
3573 		recv_ack_ba_done = 1;
3574 
3575 	pdev->last_sched_cmdid = ppdu_info->sched_cmdid;
3576 
3577 	s_ppdu_info = TAILQ_FIRST(&pdev->sched_comp_ppdu_list);
3578 
3579 	TAILQ_FOREACH_SAFE(s_ppdu_info, &pdev->sched_comp_ppdu_list,
3580 			   ppdu_info_list_elem, ppdu_info_next) {
3581 		if (s_ppdu_info->tsf_l32 > ppdu_info->tsf_l32)
3582 			time_delta = (MAX_TSF_32 - s_ppdu_info->tsf_l32) +
3583 					ppdu_info->tsf_l32;
3584 		else
3585 			time_delta = ppdu_info->tsf_l32 - s_ppdu_info->tsf_l32;
3586 
3587 		if (!s_ppdu_info->done && !recv_ack_ba_done) {
3588 			if (time_delta < MAX_SCHED_STARVE) {
3589 				qdf_err("pdev[%d] ppdu_id[%d] sched_cmdid[%d] TLV_B[0x%x] TSF[%u] D[%d]",
3590 					pdev->pdev_id,
3591 					s_ppdu_info->ppdu_id,
3592 					s_ppdu_info->sched_cmdid,
3593 					s_ppdu_info->tlv_bitmap,
3594 					s_ppdu_info->tsf_l32,
3595 					s_ppdu_info->done);
3596 				break;
3597 			}
3598 			starved = 1;
3599 		}
3600 
3601 		pdev->delivered_sched_cmdid = s_ppdu_info->sched_cmdid;
3602 		TAILQ_REMOVE(&pdev->sched_comp_ppdu_list, s_ppdu_info,
3603 			     ppdu_info_list_elem);
3604 		pdev->sched_comp_list_depth--;
3605 
3606 		nbuf = s_ppdu_info->nbuf;
3607 		qdf_assert_always(nbuf);
3608 		ppdu_desc = (struct cdp_tx_completion_ppdu *)
3609 				qdf_nbuf_data(nbuf);
3610 		ppdu_desc->tlv_bitmap = s_ppdu_info->tlv_bitmap;
3611 
3612 		if (starved) {
3613 			qdf_err("ppdu starved fc[0x%x] h_ftype[%d] tlv_bitmap[0x%x] cs[%d]\n",
3614 				ppdu_desc->frame_ctrl,
3615 				ppdu_desc->htt_frame_type,
3616 				ppdu_desc->tlv_bitmap,
3617 				ppdu_desc->user[0].completion_status);
3618 			starved = 0;
3619 		}
3620 
3621 		if (ppdu_info->ppdu_id == s_ppdu_info->ppdu_id &&
3622 		    ppdu_info->sched_cmdid == s_ppdu_info->sched_cmdid)
3623 			matched = 1;
3624 
3625 		dp_ppdu_desc_user_stats_update(pdev, s_ppdu_info);
3626 
3627 		qdf_mem_free(s_ppdu_info);
3628 
3629 		/**
3630 		 * Deliver PPDU stats only for valid (acked) data
3631 		 * frames if sniffer mode is not enabled.
3632 		 * If sniffer mode is enabled, PPDU stats
3633 		 * for all frames including mgmt/control
3634 		 * frames should be delivered to upper layer
3635 		 */
3636 		if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
3637 			dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
3638 					     pdev->soc,
3639 					     nbuf, HTT_INVALID_PEER,
3640 					     WDI_NO_VAL,
3641 					     pdev->pdev_id);
3642 		} else {
3643 			if (ppdu_desc->num_mpdu != 0 &&
3644 			    ppdu_desc->num_users != 0 &&
3645 			    ppdu_desc->frame_ctrl &
3646 			    HTT_FRAMECTRL_DATATYPE) {
3647 				dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
3648 						     pdev->soc,
3649 						     nbuf, HTT_INVALID_PEER,
3650 						     WDI_NO_VAL,
3651 						     pdev->pdev_id);
3652 			} else {
3653 				qdf_nbuf_free(nbuf);
3654 			}
3655 		}
3656 
3657 		if (matched)
3658 			break;
3659 	}
3660 	return;
3661 }
3662 
3663 #endif
3664 
3665 /**
3666  * dp_get_ppdu_desc(): Function to allocate new PPDU status
3667  * desc for new ppdu id
3668  * @pdev: DP pdev handle
3669  * @ppdu_id: PPDU unique identifier
3670  * @tlv_type: TLV type received
3671  * @tsf_l32: timestamp received along with ppdu stats indication header
3672  * @max_users: Maximum user for that particular ppdu
3673  *
3674  * return: ppdu_info per ppdu tlv structure
3675  */
3676 static
3677 struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id,
3678 				   uint8_t tlv_type, uint32_t tsf_l32,
3679 				   uint8_t max_users)
3680 {
3681 	struct ppdu_info *ppdu_info = NULL;
3682 	struct ppdu_info *s_ppdu_info = NULL;
3683 	struct ppdu_info *ppdu_info_next = NULL;
3684 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3685 	uint32_t size = 0;
3686 	struct cdp_tx_completion_ppdu *tmp_ppdu_desc = NULL;
3687 	struct cdp_tx_completion_ppdu_user *tmp_user;
3688 	uint32_t time_delta;
3689 
3690 	/*
3691 	 * Find ppdu_id node exists or not
3692 	 */
3693 	TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
3694 			   ppdu_info_list_elem, ppdu_info_next) {
3695 		if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) {
3696 			if (ppdu_info->tsf_l32 > tsf_l32)
3697 				time_delta  = (MAX_TSF_32 -
3698 					       ppdu_info->tsf_l32) + tsf_l32;
3699 			else
3700 				time_delta  = tsf_l32 - ppdu_info->tsf_l32;
3701 
3702 			if (time_delta > WRAP_DROP_TSF_DELTA) {
3703 				TAILQ_REMOVE(&pdev->ppdu_info_list,
3704 					     ppdu_info, ppdu_info_list_elem);
3705 				pdev->list_depth--;
3706 				pdev->stats.ppdu_wrap_drop++;
3707 				tmp_ppdu_desc =
3708 					(struct cdp_tx_completion_ppdu *)
3709 					qdf_nbuf_data(ppdu_info->nbuf);
3710 				tmp_user = &tmp_ppdu_desc->user[0];
3711 				QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
3712 					  QDF_TRACE_LEVEL_INFO_MED,
3713 					  "S_PID [%d] S_TSF[%u] TLV_BITMAP[0x%x] [CMPLTN - %d ACK_BA - %d] CS[%d] - R_PID[%d] R_TSF[%u] R_TLV_TAG[0x%x]\n",
3714 					  ppdu_info->ppdu_id,
3715 					  ppdu_info->tsf_l32,
3716 					  ppdu_info->tlv_bitmap,
3717 					  tmp_user->completion_status,
3718 					  ppdu_info->compltn_common_tlv,
3719 					  ppdu_info->ack_ba_tlv,
3720 					  ppdu_id, tsf_l32, tlv_type);
3721 				qdf_nbuf_free(ppdu_info->nbuf);
3722 				ppdu_info->nbuf = NULL;
3723 				qdf_mem_free(ppdu_info);
3724 			} else {
3725 				break;
3726 			}
3727 		}
3728 	}
3729 
3730 	/*
3731 	 * check if it is ack ba tlv and if it is not there in ppdu info
3732 	 * list then check it in sched completion ppdu list
3733 	 */
3734 	if (!ppdu_info &&
3735 	    tlv_type == HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) {
3736 		TAILQ_FOREACH(s_ppdu_info,
3737 			      &pdev->sched_comp_ppdu_list,
3738 			      ppdu_info_list_elem) {
3739 			if (s_ppdu_info && (s_ppdu_info->ppdu_id == ppdu_id)) {
3740 				if (s_ppdu_info->tsf_l32 > tsf_l32)
3741 					time_delta  = (MAX_TSF_32 -
3742 						       s_ppdu_info->tsf_l32) +
3743 							tsf_l32;
3744 				else
3745 					time_delta  = tsf_l32 -
3746 						s_ppdu_info->tsf_l32;
3747 				if (time_delta < WRAP_DROP_TSF_DELTA) {
3748 					ppdu_info = s_ppdu_info;
3749 					break;
3750 				}
3751 			} else {
3752 				/*
3753 				 * ACK BA STATUS TLV comes sequential order
3754 				 * if we received ack ba status tlv for second
3755 				 * ppdu and first ppdu is still waiting for
3756 				 * ACK BA STATUS TLV. Based on fw comment
3757 				 * we won't receive it tlv later. So we can
3758 				 * set ppdu info done.
3759 				 */
3760 				if (s_ppdu_info)
3761 					s_ppdu_info->done = 1;
3762 			}
3763 		}
3764 	}
3765 
3766 	if (ppdu_info) {
3767 		if (ppdu_info->tlv_bitmap & (1 << tlv_type)) {
3768 			/**
3769 			 * if we get tlv_type that is already been processed
3770 			 * for ppdu, that means we got a new ppdu with same
3771 			 * ppdu id. Hence Flush the older ppdu
3772 			 * for MUMIMO and OFDMA, In a PPDU we have
3773 			 * multiple user with same tlv types. tlv bitmap is
3774 			 * used to check whether SU or MU_MIMO/OFDMA
3775 			 */
3776 			if (!(ppdu_info->tlv_bitmap &
3777 			    (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)))
3778 				return ppdu_info;
3779 
3780 			ppdu_desc = (struct cdp_tx_completion_ppdu *)
3781 				qdf_nbuf_data(ppdu_info->nbuf);
3782 
3783 			/**
3784 			 * apart from ACK BA STATUS TLV rest all comes in order
3785 			 * so if tlv type not ACK BA STATUS TLV we can deliver
3786 			 * ppdu_info
3787 			 */
3788 			if ((tlv_type ==
3789 			     HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) &&
3790 			    (ppdu_desc->htt_frame_type ==
3791 			     HTT_STATS_FTYPE_SGEN_MU_BAR))
3792 				return ppdu_info;
3793 
3794 			dp_ppdu_desc_deliver(pdev, ppdu_info);
3795 		} else {
3796 			return ppdu_info;
3797 		}
3798 	}
3799 
3800 	/**
3801 	 * Flush the head ppdu descriptor if ppdu desc list reaches max
3802 	 * threshold
3803 	 */
3804 	if (pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
3805 		ppdu_info = TAILQ_FIRST(&pdev->ppdu_info_list);
3806 		TAILQ_REMOVE(&pdev->ppdu_info_list,
3807 			     ppdu_info, ppdu_info_list_elem);
3808 		pdev->list_depth--;
3809 		pdev->stats.ppdu_drop++;
3810 		qdf_nbuf_free(ppdu_info->nbuf);
3811 		ppdu_info->nbuf = NULL;
3812 		qdf_mem_free(ppdu_info);
3813 	}
3814 
3815 	size = sizeof(struct cdp_tx_completion_ppdu) +
3816 			(max_users * sizeof(struct cdp_tx_completion_ppdu_user));
3817 
3818 	/*
3819 	 * Allocate new ppdu_info node
3820 	 */
3821 	ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info));
3822 	if (!ppdu_info)
3823 		return NULL;
3824 
3825 	ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev, size,
3826 									 0, 4, TRUE);
3827 	if (!ppdu_info->nbuf) {
3828 		qdf_mem_free(ppdu_info);
3829 		return NULL;
3830 	}
3831 
3832 	ppdu_info->ppdu_desc =
3833 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
3834 	qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf), size);
3835 
3836 	if (qdf_nbuf_put_tail(ppdu_info->nbuf, size) == NULL) {
3837 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3838 				"No tailroom for HTT PPDU");
3839 		qdf_nbuf_free(ppdu_info->nbuf);
3840 		ppdu_info->nbuf = NULL;
3841 		ppdu_info->last_user = 0;
3842 		qdf_mem_free(ppdu_info);
3843 		return NULL;
3844 	}
3845 
3846 	ppdu_info->ppdu_desc->max_users = max_users;
3847 	ppdu_info->tsf_l32 = tsf_l32;
3848 	/**
3849 	 * No lock is needed because all PPDU TLVs are processed in
3850 	 * same context and this list is updated in same context
3851 	 */
3852 	TAILQ_INSERT_TAIL(&pdev->ppdu_info_list, ppdu_info,
3853 			ppdu_info_list_elem);
3854 	pdev->list_depth++;
3855 	return ppdu_info;
3856 }
3857 
3858 /**
3859  * dp_htt_process_tlv(): Function to process each PPDU TLVs
3860  * @pdev: DP pdev handle
3861  * @htt_t2h_msg: HTT target to host message
3862  *
3863  * return: ppdu_info per ppdu tlv structure
3864  */
3865 
3866 static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev,
3867 		qdf_nbuf_t htt_t2h_msg)
3868 {
3869 	uint32_t length;
3870 	uint32_t ppdu_id;
3871 	uint8_t tlv_type;
3872 	uint32_t tlv_length, tlv_bitmap_expected;
3873 	uint8_t *tlv_buf;
3874 	struct ppdu_info *ppdu_info = NULL;
3875 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3876 	uint8_t max_users = CDP_MU_MAX_USERS;
3877 	uint32_t tsf_l32;
3878 
3879 	uint32_t *msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
3880 
3881 	length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word);
3882 
3883 	msg_word = msg_word + 1;
3884 	ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word);
3885 
3886 	msg_word = msg_word + 1;
3887 	tsf_l32 = (uint32_t)(*msg_word);
3888 
3889 	msg_word = msg_word + 2;
3890 	while (length > 0) {
3891 		tlv_buf = (uint8_t *)msg_word;
3892 		tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
3893 		tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
3894 		if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG))
3895 			pdev->stats.ppdu_stats_counter[tlv_type]++;
3896 
3897 		if (tlv_length == 0)
3898 			break;
3899 
3900 		tlv_length += HTT_TLV_HDR_LEN;
3901 
3902 		/**
3903 		 * Not allocating separate ppdu descriptor for MGMT Payload
3904 		 * TLV as this is sent as separate WDI indication and it
3905 		 * doesn't contain any ppdu information
3906 		 */
3907 		if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) {
3908 			pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf;
3909 			pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id;
3910 			pdev->mgmtctrl_frm_info.mgmt_buf_len =
3911 				HTT_PPDU_STATS_TX_MGMTCTRL_TLV_FRAME_LENGTH_GET
3912 						(*(msg_word + 1));
3913 			msg_word =
3914 				(uint32_t *)((uint8_t *)tlv_buf + tlv_length);
3915 			length -= (tlv_length);
3916 			continue;
3917 		}
3918 
3919 		/*
3920 		 * retrieve max_users if it's USERS_INFO,
3921 		 * else, it's 1 for COMPLTN_FLUSH,
3922 		 * else, use CDP_MU_MAX_USERS
3923 		 */
3924 		if (tlv_type == HTT_PPDU_STATS_USERS_INFO_TLV) {
3925 			max_users =
3926 				HTT_PPDU_STATS_USERS_INFO_TLV_MAX_USERS_GET(*(msg_word + 1));
3927 		} else if (tlv_type == HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) {
3928 			max_users = 1;
3929 		}
3930 
3931 		ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type,
3932 					     tsf_l32, max_users);
3933 		if (!ppdu_info)
3934 			return NULL;
3935 
3936 		ppdu_info->ppdu_desc->bss_color =
3937 			pdev->rx_mon_recv_status.bsscolor;
3938 
3939 		ppdu_info->ppdu_id = ppdu_id;
3940 		ppdu_info->tlv_bitmap |= (1 << tlv_type);
3941 
3942 		dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info);
3943 
3944 		/**
3945 		 * Increment pdev level tlv count to monitor
3946 		 * missing TLVs
3947 		 */
3948 		pdev->tlv_count++;
3949 		ppdu_info->last_tlv_cnt = pdev->tlv_count;
3950 		msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
3951 		length -= (tlv_length);
3952 	}
3953 
3954 	if (!ppdu_info)
3955 		return NULL;
3956 
3957 	pdev->last_ppdu_id = ppdu_id;
3958 
3959 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
3960 
3961 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode ||
3962 	    pdev->tx_capture_enabled) {
3963 		if (ppdu_info->is_ampdu)
3964 			tlv_bitmap_expected =
3965 				dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
3966 					ppdu_info->tlv_bitmap);
3967 	}
3968 
3969 	ppdu_desc = ppdu_info->ppdu_desc;
3970 
3971 	if (!ppdu_desc)
3972 		return NULL;
3973 
3974 	if (ppdu_desc->user[ppdu_desc->last_usr_index].completion_status !=
3975 	    HTT_PPDU_STATS_USER_STATUS_OK) {
3976 		tlv_bitmap_expected = tlv_bitmap_expected & 0xFF;
3977 	}
3978 
3979 	/*
3980 	 * for frame type DATA and BAR, we update stats based on MSDU,
3981 	 * successful msdu and mpdu are populate from ACK BA STATUS TLV
3982 	 * which comes out of order. successful mpdu also populated from
3983 	 * COMPLTN COMMON TLV which comes in order. for every ppdu_info
3984 	 * we store successful mpdu from both tlv and compare before delivering
3985 	 * to make sure we received ACK BA STATUS TLV. For some self generated
3986 	 * frame we won't get ack ba status tlv so no need to wait for
3987 	 * ack ba status tlv.
3988 	 */
3989 	if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL &&
3990 	    ppdu_desc->htt_frame_type != HTT_STATS_FTYPE_SGEN_QOS_NULL) {
3991 		/*
3992 		 * most of the time bar frame will have duplicate ack ba
3993 		 * status tlv
3994 		 */
3995 		if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR &&
3996 		    (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv))
3997 			return NULL;
3998 		/*
3999 		 * For data frame, compltn common tlv should match ack ba status
4000 		 * tlv and completion status. Reason we are checking first user
4001 		 * for ofdma, completion seen at next MU BAR frm, for mimo
4002 		 * only for first user completion will be immediate.
4003 		 */
4004 		if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA &&
4005 		    (ppdu_desc->user[0].completion_status == 0 &&
4006 		     (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv)))
4007 			return NULL;
4008 	}
4009 
4010 	/**
4011 	 * Once all the TLVs for a given PPDU has been processed,
4012 	 * return PPDU status to be delivered to higher layer.
4013 	 * tlv_bitmap_expected can't be available for different frame type.
4014 	 * But SCHED CMD STATS TLV is the last TLV from the FW for a ppdu.
4015 	 * apart from ACK BA TLV, FW sends other TLV in sequential order.
4016 	 * flush tlv comes separate.
4017 	 */
4018 	if ((ppdu_info->tlv_bitmap != 0 &&
4019 	     (ppdu_info->tlv_bitmap &
4020 	      (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV))) ||
4021 	    (ppdu_info->tlv_bitmap &
4022 	     (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV))) {
4023 		ppdu_info->done = 1;
4024 		return ppdu_info;
4025 	}
4026 
4027 	return NULL;
4028 }
4029 #endif /* FEATURE_PERPKT_INFO */
4030 
4031 /**
4032  * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW
4033  * @soc: DP SOC handle
4034  * @pdev_id: pdev id
4035  * @htt_t2h_msg: HTT message nbuf
4036  *
4037  * return:void
4038  */
4039 #if defined(WDI_EVENT_ENABLE)
4040 #ifdef FEATURE_PERPKT_INFO
4041 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
4042 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
4043 {
4044 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
4045 	struct ppdu_info *ppdu_info = NULL;
4046 	bool free_buf = true;
4047 
4048 	if (pdev_id >= MAX_PDEV_CNT)
4049 		return true;
4050 
4051 	pdev = soc->pdev_list[pdev_id];
4052 	if (!pdev)
4053 		return true;
4054 
4055 	if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
4056 	    !pdev->mcopy_mode && !pdev->bpr_enable)
4057 		return free_buf;
4058 
4059 	ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg);
4060 
4061 	if (pdev->mgmtctrl_frm_info.mgmt_buf) {
4062 		if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv
4063 		    (pdev, htt_t2h_msg, pdev->mgmtctrl_frm_info.ppdu_id) !=
4064 		    QDF_STATUS_SUCCESS)
4065 			free_buf = false;
4066 	}
4067 
4068 	if (ppdu_info)
4069 		dp_ppdu_desc_deliver(pdev, ppdu_info);
4070 
4071 	pdev->mgmtctrl_frm_info.mgmt_buf = NULL;
4072 	pdev->mgmtctrl_frm_info.mgmt_buf_len = 0;
4073 	pdev->mgmtctrl_frm_info.ppdu_id = 0;
4074 
4075 	return free_buf;
4076 }
4077 #else
4078 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
4079 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
4080 {
4081 	return true;
4082 }
4083 #endif
4084 #endif
4085 
4086 /**
4087  * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats
4088  * @soc: DP SOC handle
4089  * @htt_t2h_msg: HTT message nbuf
4090  *
4091  * return:void
4092  */
4093 static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc,
4094 		qdf_nbuf_t htt_t2h_msg)
4095 {
4096 	uint8_t done;
4097 	qdf_nbuf_t msg_copy;
4098 	uint32_t *msg_word;
4099 
4100 	msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
4101 	msg_word = msg_word + 3;
4102 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
4103 
4104 	/*
4105 	 * HTT EXT stats response comes as stream of TLVs which span over
4106 	 * multiple T2H messages.
4107 	 * The first message will carry length of the response.
4108 	 * For rest of the messages length will be zero.
4109 	 *
4110 	 * Clone the T2H message buffer and store it in a list to process
4111 	 * it later.
4112 	 *
4113 	 * The original T2H message buffers gets freed in the T2H HTT event
4114 	 * handler
4115 	 */
4116 	msg_copy = qdf_nbuf_clone(htt_t2h_msg);
4117 
4118 	if (!msg_copy) {
4119 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
4120 				"T2H messge clone failed for HTT EXT STATS");
4121 		goto error;
4122 	}
4123 
4124 	qdf_spin_lock_bh(&soc->htt_stats.lock);
4125 	qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy);
4126 	/*
4127 	 * Done bit signifies that this is the last T2H buffer in the stream of
4128 	 * HTT EXT STATS message
4129 	 */
4130 	if (done) {
4131 		soc->htt_stats.num_stats++;
4132 		qdf_sched_work(0, &soc->htt_stats.work);
4133 	}
4134 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
4135 
4136 	return;
4137 
4138 error:
4139 	qdf_spin_lock_bh(&soc->htt_stats.lock);
4140 	while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg))
4141 			!= NULL) {
4142 		qdf_nbuf_free(msg_copy);
4143 	}
4144 	soc->htt_stats.num_stats = 0;
4145 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
4146 	return;
4147 
4148 }
4149 
4150 /*
4151  * htt_soc_attach_target() - SOC level HTT setup
4152  * @htt_soc:	HTT SOC handle
4153  *
4154  * Return: 0 on success; error code on failure
4155  */
4156 int htt_soc_attach_target(struct htt_soc *htt_soc)
4157 {
4158 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
4159 
4160 	return htt_h2t_ver_req_msg(soc);
4161 }
4162 
4163 void htt_set_htc_handle(struct htt_soc *htt_soc, HTC_HANDLE htc_soc)
4164 {
4165 	htt_soc->htc_soc = htc_soc;
4166 }
4167 
4168 HTC_HANDLE htt_get_htc_handle(struct htt_soc *htt_soc)
4169 {
4170 	return htt_soc->htc_soc;
4171 }
4172 
4173 struct htt_soc *htt_soc_attach(struct dp_soc *soc, HTC_HANDLE htc_handle)
4174 {
4175 	int i;
4176 	int j;
4177 	int alloc_size = HTT_SW_UMAC_RING_IDX_MAX * sizeof(unsigned long);
4178 	struct htt_soc *htt_soc = NULL;
4179 
4180 	htt_soc = qdf_mem_malloc(sizeof(*htt_soc));
4181 	if (!htt_soc) {
4182 		dp_err("HTT attach failed");
4183 		return NULL;
4184 	}
4185 
4186 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4187 		htt_soc->pdevid_tt[i].umac_ttt = qdf_mem_malloc(alloc_size);
4188 		if (!htt_soc->pdevid_tt[i].umac_ttt)
4189 			break;
4190 		qdf_mem_set(htt_soc->pdevid_tt[i].umac_ttt, alloc_size, -1);
4191 		htt_soc->pdevid_tt[i].lmac_ttt = qdf_mem_malloc(alloc_size);
4192 		if (!htt_soc->pdevid_tt[i].lmac_ttt) {
4193 			qdf_mem_free(htt_soc->pdevid_tt[i].umac_ttt);
4194 			break;
4195 		}
4196 		qdf_mem_set(htt_soc->pdevid_tt[i].lmac_ttt, alloc_size, -1);
4197 	}
4198 	if (i != MAX_PDEV_CNT) {
4199 		for (j = 0; j < i; j++) {
4200 			qdf_mem_free(htt_soc->pdevid_tt[j].umac_ttt);
4201 			qdf_mem_free(htt_soc->pdevid_tt[j].lmac_ttt);
4202 		}
4203 		qdf_mem_free(htt_soc);
4204 		return NULL;
4205 	}
4206 
4207 	htt_soc->dp_soc = soc;
4208 	htt_soc->htc_soc = htc_handle;
4209 	HTT_TX_MUTEX_INIT(&htt_soc->htt_tx_mutex);
4210 
4211 	return htt_soc;
4212 }
4213 
4214 #if defined(WDI_EVENT_ENABLE) && !defined(REMOVE_PKT_LOG)
4215 /*
4216  * dp_ppdu_stats_ind_handler() - PPDU stats msg handler
4217  * @htt_soc:	 HTT SOC handle
4218  * @msg_word:    Pointer to payload
4219  * @htt_t2h_msg: HTT msg nbuf
4220  *
4221  * Return: True if buffer should be freed by caller.
4222  */
4223 static bool
4224 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
4225 				uint32_t *msg_word,
4226 				qdf_nbuf_t htt_t2h_msg)
4227 {
4228 	u_int8_t pdev_id;
4229 	u_int8_t target_pdev_id;
4230 	bool free_buf;
4231 	qdf_nbuf_set_pktlen(htt_t2h_msg, HTT_T2H_MAX_MSG_SIZE);
4232 	target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
4233 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
4234 							 target_pdev_id);
4235 	dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc,
4236 			     htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL,
4237 			     pdev_id);
4238 	free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id,
4239 					      htt_t2h_msg);
4240 	return free_buf;
4241 }
4242 #else
4243 static bool
4244 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
4245 				uint32_t *msg_word,
4246 				qdf_nbuf_t htt_t2h_msg)
4247 {
4248 	return true;
4249 }
4250 #endif
4251 
4252 #if defined(WDI_EVENT_ENABLE) && \
4253 	!defined(REMOVE_PKT_LOG)
4254 /*
4255  * dp_pktlog_msg_handler() - Pktlog msg handler
4256  * @htt_soc:	 HTT SOC handle
4257  * @msg_word:    Pointer to payload
4258  *
4259  * Return: None
4260  */
4261 static void
4262 dp_pktlog_msg_handler(struct htt_soc *soc,
4263 		      uint32_t *msg_word)
4264 {
4265 	uint8_t pdev_id;
4266 	uint8_t target_pdev_id;
4267 	uint32_t *pl_hdr;
4268 
4269 	target_pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word);
4270 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
4271 							 target_pdev_id);
4272 	pl_hdr = (msg_word + 1);
4273 	dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc,
4274 		pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL,
4275 		pdev_id);
4276 }
4277 #else
4278 static void
4279 dp_pktlog_msg_handler(struct htt_soc *soc,
4280 		      uint32_t *msg_word)
4281 {
4282 }
4283 #endif
4284 
4285 /*
4286  * time_allow_print() - time allow print
4287  * @htt_ring_tt:	ringi_id array of timestamps
4288  * @ring_id:		ring_id (index)
4289  *
4290  * Return: 1 for successfully saving timestamp in array
4291  *	and 0 for timestamp falling within 2 seconds after last one
4292  */
4293 static bool time_allow_print(unsigned long *htt_ring_tt, u_int8_t ring_id)
4294 {
4295 	unsigned long tstamp;
4296 	unsigned long delta;
4297 
4298 	tstamp = qdf_get_system_timestamp();
4299 
4300 	if (!htt_ring_tt)
4301 		return 0; //unable to print backpressure messages
4302 
4303 	if (htt_ring_tt[ring_id] == -1) {
4304 		htt_ring_tt[ring_id] = tstamp;
4305 		return 1;
4306 	}
4307 	delta = tstamp - htt_ring_tt[ring_id];
4308 	if (delta >= 2000) {
4309 		htt_ring_tt[ring_id] = tstamp;
4310 		return 1;
4311 	}
4312 
4313 	return 0;
4314 }
4315 
4316 static void dp_htt_alert_print(enum htt_t2h_msg_type msg_type,
4317 			       u_int8_t pdev_id, u_int8_t ring_id,
4318 			       u_int16_t hp_idx, u_int16_t tp_idx,
4319 			       u_int32_t bkp_time, char *ring_stype)
4320 {
4321 	dp_alert("msg_type: %d pdev_id: %d ring_type: %s ",
4322 		 msg_type, pdev_id, ring_stype);
4323 	dp_alert("ring_id: %d hp_idx: %d tp_idx: %d bkpressure_time_ms: %d ",
4324 		 ring_id, hp_idx, tp_idx, bkp_time);
4325 }
4326 
4327 /*
4328  * dp_htt_bkp_event_alert() - htt backpressure event alert
4329  * @msg_word:	htt packet context
4330  * @htt_soc:	HTT SOC handle
4331  *
4332  * Return: after attempting to print stats
4333  */
4334 static void dp_htt_bkp_event_alert(u_int32_t *msg_word, struct htt_soc *soc)
4335 {
4336 	u_int8_t ring_type;
4337 	u_int8_t pdev_id;
4338 	uint8_t target_pdev_id;
4339 	u_int8_t ring_id;
4340 	u_int16_t hp_idx;
4341 	u_int16_t tp_idx;
4342 	u_int32_t bkp_time;
4343 	enum htt_t2h_msg_type msg_type;
4344 	struct dp_soc *dpsoc;
4345 	struct dp_pdev *pdev;
4346 	struct dp_htt_timestamp *radio_tt;
4347 
4348 	if (!soc)
4349 		return;
4350 
4351 	dpsoc = (struct dp_soc *)soc->dp_soc;
4352 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
4353 	ring_type = HTT_T2H_RX_BKPRESSURE_RING_TYPE_GET(*msg_word);
4354 	target_pdev_id = HTT_T2H_RX_BKPRESSURE_PDEV_ID_GET(*msg_word);
4355 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
4356 							 target_pdev_id);
4357 	if (pdev_id >= MAX_PDEV_CNT) {
4358 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
4359 			  "pdev id %d is invalid", pdev_id);
4360 		return;
4361 	}
4362 
4363 	pdev = (struct dp_pdev *)dpsoc->pdev_list[pdev_id];
4364 	ring_id = HTT_T2H_RX_BKPRESSURE_RINGID_GET(*msg_word);
4365 	hp_idx = HTT_T2H_RX_BKPRESSURE_HEAD_IDX_GET(*(msg_word + 1));
4366 	tp_idx = HTT_T2H_RX_BKPRESSURE_TAIL_IDX_GET(*(msg_word + 1));
4367 	bkp_time = HTT_T2H_RX_BKPRESSURE_TIME_MS_GET(*(msg_word + 2));
4368 	radio_tt = &soc->pdevid_tt[pdev_id];
4369 
4370 	switch (ring_type) {
4371 	case HTT_SW_RING_TYPE_UMAC:
4372 		if (!time_allow_print(radio_tt->umac_ttt, ring_id))
4373 			return;
4374 		dp_htt_alert_print(msg_type, pdev_id, ring_id, hp_idx, tp_idx,
4375 				   bkp_time, "HTT_SW_RING_TYPE_UMAC");
4376 	break;
4377 	case HTT_SW_RING_TYPE_LMAC:
4378 		if (!time_allow_print(radio_tt->lmac_ttt, ring_id))
4379 			return;
4380 		dp_htt_alert_print(msg_type, pdev_id, ring_id, hp_idx, tp_idx,
4381 				   bkp_time, "HTT_SW_RING_TYPE_LMAC");
4382 	break;
4383 	default:
4384 		dp_htt_alert_print(msg_type, pdev_id, ring_id, hp_idx, tp_idx,
4385 				   bkp_time, "UNKNOWN");
4386 	break;
4387 	}
4388 
4389 	dp_print_ring_stats(pdev);
4390 	dp_print_napi_stats(pdev->soc);
4391 }
4392 
4393 /*
4394  * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler
4395  * @context:	Opaque context (HTT SOC handle)
4396  * @pkt:	HTC packet
4397  */
4398 static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
4399 {
4400 	struct htt_soc *soc = (struct htt_soc *) context;
4401 	qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
4402 	u_int32_t *msg_word;
4403 	enum htt_t2h_msg_type msg_type;
4404 	bool free_buf = true;
4405 
4406 	/* check for successful message reception */
4407 	if (pkt->Status != QDF_STATUS_SUCCESS) {
4408 		if (pkt->Status != QDF_STATUS_E_CANCELED)
4409 			soc->stats.htc_err_cnt++;
4410 
4411 		qdf_nbuf_free(htt_t2h_msg);
4412 		return;
4413 	}
4414 
4415 	/* TODO: Check if we should pop the HTC/HTT header alignment padding */
4416 
4417 	msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
4418 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
4419 	htt_event_record(soc->htt_logger_handle,
4420 			 msg_type, (uint8_t *)msg_word);
4421 	switch (msg_type) {
4422 	case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
4423 	{
4424 		dp_htt_bkp_event_alert(msg_word, soc);
4425 		break;
4426 	}
4427 	case HTT_T2H_MSG_TYPE_PEER_MAP:
4428 		{
4429 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
4430 			u_int8_t *peer_mac_addr;
4431 			u_int16_t peer_id;
4432 			u_int16_t hw_peer_id;
4433 			u_int8_t vdev_id;
4434 			u_int8_t is_wds;
4435 			struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
4436 
4437 			peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
4438 			hw_peer_id =
4439 				HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2));
4440 			vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
4441 			peer_mac_addr = htt_t2h_mac_addr_deswizzle(
4442 				(u_int8_t *) (msg_word+1),
4443 				&mac_addr_deswizzle_buf[0]);
4444 			QDF_TRACE(QDF_MODULE_ID_TXRX,
4445 				QDF_TRACE_LEVEL_INFO,
4446 				"HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
4447 				peer_id, vdev_id);
4448 
4449 			/*
4450 			 * check if peer already exists for this peer_id, if so
4451 			 * this peer map event is in response for a wds peer add
4452 			 * wmi command sent during wds source port learning.
4453 			 * in this case just add the ast entry to the existing
4454 			 * peer ast_list.
4455 			 */
4456 			is_wds = !!(dpsoc->peer_id_to_obj_map[peer_id]);
4457 			dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id,
4458 					       vdev_id, peer_mac_addr, 0,
4459 					       is_wds);
4460 			break;
4461 		}
4462 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
4463 		{
4464 			u_int16_t peer_id;
4465 			u_int8_t vdev_id;
4466 			u_int8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0};
4467 			peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
4468 			vdev_id = HTT_RX_PEER_UNMAP_VDEV_ID_GET(*msg_word);
4469 
4470 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
4471 						 vdev_id, mac_addr, 0,
4472 						 DP_PEER_WDS_COUNT_INVALID);
4473 			break;
4474 		}
4475 	case HTT_T2H_MSG_TYPE_SEC_IND:
4476 		{
4477 			u_int16_t peer_id;
4478 			enum cdp_sec_type sec_type;
4479 			int is_unicast;
4480 
4481 			peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
4482 			sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
4483 			is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
4484 			/* point to the first part of the Michael key */
4485 			msg_word++;
4486 			dp_rx_sec_ind_handler(
4487 				soc->dp_soc, peer_id, sec_type, is_unicast,
4488 				msg_word, msg_word + 2);
4489 			break;
4490 		}
4491 
4492 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
4493 		{
4494 			free_buf = dp_ppdu_stats_ind_handler(soc, msg_word,
4495 							     htt_t2h_msg);
4496 			break;
4497 		}
4498 
4499 	case HTT_T2H_MSG_TYPE_PKTLOG:
4500 		{
4501 			dp_pktlog_msg_handler(soc, msg_word);
4502 			break;
4503 		}
4504 
4505 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
4506 		{
4507 			htc_pm_runtime_put(soc->htc_soc);
4508 			soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
4509 			soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
4510 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4511 				"target uses HTT version %d.%d; host uses %d.%d",
4512 				soc->tgt_ver.major, soc->tgt_ver.minor,
4513 				HTT_CURRENT_VERSION_MAJOR,
4514 				HTT_CURRENT_VERSION_MINOR);
4515 			if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
4516 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4517 					QDF_TRACE_LEVEL_WARN,
4518 					"*** Incompatible host/target HTT versions!");
4519 			}
4520 			/* abort if the target is incompatible with the host */
4521 			qdf_assert(soc->tgt_ver.major ==
4522 				HTT_CURRENT_VERSION_MAJOR);
4523 			if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
4524 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4525 					QDF_TRACE_LEVEL_INFO_LOW,
4526 					"*** Warning: host/target HTT versions"
4527 					" are different, though compatible!");
4528 			}
4529 			break;
4530 		}
4531 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
4532 		{
4533 			uint16_t peer_id;
4534 			uint8_t tid;
4535 			uint8_t win_sz;
4536 			uint16_t status;
4537 			struct dp_peer *peer;
4538 
4539 			/*
4540 			 * Update REO Queue Desc with new values
4541 			 */
4542 			peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
4543 			tid = HTT_RX_ADDBA_TID_GET(*msg_word);
4544 			win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
4545 			peer = dp_peer_get_ref_by_id(soc->dp_soc, peer_id,
4546 						     DP_MOD_ID_HTT);
4547 
4548 			/*
4549 			 * Window size needs to be incremented by 1
4550 			 * since fw needs to represent a value of 256
4551 			 * using just 8 bits
4552 			 */
4553 			if (peer) {
4554 				status = dp_addba_requestprocess_wifi3(
4555 					(struct cdp_soc_t *)soc->dp_soc,
4556 					peer->mac_addr.raw, peer->vdev->vdev_id,
4557 					0, tid, 0, win_sz + 1, 0xffff);
4558 
4559 				/*
4560 				 * If PEER_LOCK_REF_PROTECT enbled dec ref
4561 				 * which is inc by dp_peer_get_ref_by_id
4562 				 */
4563 				dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
4564 
4565 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4566 					QDF_TRACE_LEVEL_INFO,
4567 					FL("PeerID %d BAW %d TID %d stat %d"),
4568 					peer_id, win_sz, tid, status);
4569 
4570 			} else {
4571 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4572 					QDF_TRACE_LEVEL_ERROR,
4573 					FL("Peer not found peer id %d"),
4574 					peer_id);
4575 			}
4576 			break;
4577 		}
4578 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
4579 		{
4580 			dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg);
4581 			break;
4582 		}
4583 	case HTT_T2H_MSG_TYPE_PEER_MAP_V2:
4584 		{
4585 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
4586 			u_int8_t *peer_mac_addr;
4587 			u_int16_t peer_id;
4588 			u_int16_t hw_peer_id;
4589 			u_int8_t vdev_id;
4590 			bool is_wds;
4591 			u_int16_t ast_hash;
4592 			struct dp_ast_flow_override_info ast_flow_info;
4593 
4594 			qdf_mem_set(&ast_flow_info, 0,
4595 					    sizeof(struct dp_ast_flow_override_info));
4596 
4597 			peer_id = HTT_RX_PEER_MAP_V2_SW_PEER_ID_GET(*msg_word);
4598 			hw_peer_id =
4599 			HTT_RX_PEER_MAP_V2_HW_PEER_ID_GET(*(msg_word + 2));
4600 			vdev_id = HTT_RX_PEER_MAP_V2_VDEV_ID_GET(*msg_word);
4601 			peer_mac_addr =
4602 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
4603 						   &mac_addr_deswizzle_buf[0]);
4604 			is_wds =
4605 			HTT_RX_PEER_MAP_V2_NEXT_HOP_GET(*(msg_word + 3));
4606 			ast_hash =
4607 			HTT_RX_PEER_MAP_V2_AST_HASH_VALUE_GET(*(msg_word + 3));
4608 			/*
4609 			 * Update 4 ast_index per peer, ast valid mask
4610 			 * and TID flow valid mask.
4611 			 * AST valid mask is 3 bit field corresponds to
4612 			 * ast_index[3:1]. ast_index 0 is always valid.
4613 			 */
4614 			ast_flow_info.ast_valid_mask =
4615 			HTT_RX_PEER_MAP_V2_AST_VALID_MASK_GET(*(msg_word + 3));
4616 			ast_flow_info.ast_idx[0] = hw_peer_id;
4617 			ast_flow_info.ast_flow_mask[0] =
4618 			HTT_RX_PEER_MAP_V2_AST_0_FLOW_MASK_GET(*(msg_word + 4));
4619 			ast_flow_info.ast_idx[1] =
4620 			HTT_RX_PEER_MAP_V2_AST_INDEX_1_GET(*(msg_word + 4));
4621 			ast_flow_info.ast_flow_mask[1] =
4622 			HTT_RX_PEER_MAP_V2_AST_1_FLOW_MASK_GET(*(msg_word + 4));
4623 			ast_flow_info.ast_idx[2] =
4624 			HTT_RX_PEER_MAP_V2_AST_INDEX_2_GET(*(msg_word + 5));
4625 			ast_flow_info.ast_flow_mask[2] =
4626 			HTT_RX_PEER_MAP_V2_AST_2_FLOW_MASK_GET(*(msg_word + 4));
4627 			ast_flow_info.ast_idx[3] =
4628 			HTT_RX_PEER_MAP_V2_AST_INDEX_3_GET(*(msg_word + 6));
4629 			ast_flow_info.ast_flow_mask[3] =
4630 			HTT_RX_PEER_MAP_V2_AST_3_FLOW_MASK_GET(*(msg_word + 4));
4631 			/*
4632 			 * TID valid mask is applicable only
4633 			 * for HI and LOW priority flows.
4634 			 * tid_valid_mas is 8 bit field corresponds
4635 			 * to TID[7:0]
4636 			 */
4637 			ast_flow_info.tid_valid_low_pri_mask =
4638 			HTT_RX_PEER_MAP_V2_TID_VALID_LOW_PRI_GET(*(msg_word + 5));
4639 			ast_flow_info.tid_valid_hi_pri_mask =
4640 			HTT_RX_PEER_MAP_V2_TID_VALID_HI_PRI_GET(*(msg_word + 5));
4641 
4642 			QDF_TRACE(QDF_MODULE_ID_TXRX,
4643 				  QDF_TRACE_LEVEL_INFO,
4644 				  "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
4645 				  peer_id, vdev_id);
4646 
4647 			dp_rx_peer_map_handler(soc->dp_soc, peer_id,
4648 					       hw_peer_id, vdev_id,
4649 					       peer_mac_addr, ast_hash,
4650 					       is_wds);
4651 
4652 			/*
4653 			 * Update ast indexes for flow override support
4654 			 * Applicable only for non wds peers
4655 			 */
4656 			dp_peer_ast_index_flow_queue_map_create(
4657 					    soc->dp_soc, is_wds,
4658 					    peer_id, peer_mac_addr,
4659 					    &ast_flow_info);
4660 
4661 			break;
4662 		}
4663 	case HTT_T2H_MSG_TYPE_PEER_UNMAP_V2:
4664 		{
4665 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
4666 			u_int8_t *mac_addr;
4667 			u_int16_t peer_id;
4668 			u_int8_t vdev_id;
4669 			u_int8_t is_wds;
4670 			u_int32_t free_wds_count;
4671 
4672 			peer_id =
4673 			HTT_RX_PEER_UNMAP_V2_SW_PEER_ID_GET(*msg_word);
4674 			vdev_id = HTT_RX_PEER_UNMAP_V2_VDEV_ID_GET(*msg_word);
4675 			mac_addr =
4676 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
4677 						   &mac_addr_deswizzle_buf[0]);
4678 			is_wds =
4679 			HTT_RX_PEER_UNMAP_V2_NEXT_HOP_GET(*(msg_word + 2));
4680 			free_wds_count =
4681 			HTT_RX_PEER_UNMAP_V2_PEER_WDS_FREE_COUNT_GET(*(msg_word + 4));
4682 
4683 			QDF_TRACE(QDF_MODULE_ID_TXRX,
4684 				  QDF_TRACE_LEVEL_INFO,
4685 				  "HTT_T2H_MSG_TYPE_PEER_UNMAP msg for peer id %d vdev id %d n",
4686 				  peer_id, vdev_id);
4687 
4688 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
4689 						 vdev_id, mac_addr,
4690 						 is_wds, free_wds_count);
4691 			break;
4692 		}
4693 	case HTT_T2H_MSG_TYPE_RX_DELBA:
4694 		{
4695 			uint16_t peer_id;
4696 			uint8_t tid;
4697 			uint8_t win_sz;
4698 			QDF_STATUS status;
4699 
4700 			peer_id = HTT_RX_DELBA_PEER_ID_GET(*msg_word);
4701 			tid = HTT_RX_DELBA_TID_GET(*msg_word);
4702 			win_sz = HTT_RX_DELBA_WIN_SIZE_GET(*msg_word);
4703 
4704 			status = dp_rx_delba_ind_handler(
4705 				soc->dp_soc,
4706 				peer_id, tid, win_sz);
4707 
4708 			QDF_TRACE(QDF_MODULE_ID_TXRX,
4709 				  QDF_TRACE_LEVEL_INFO,
4710 				  FL("DELBA PeerID %d BAW %d TID %d stat %d"),
4711 				  peer_id, win_sz, tid, status);
4712 			break;
4713 		}
4714 	default:
4715 		break;
4716 	};
4717 
4718 	/* Free the indication buffer */
4719 	if (free_buf)
4720 		qdf_nbuf_free(htt_t2h_msg);
4721 }
4722 
4723 /*
4724  * dp_htt_h2t_full() - Send full handler (called from HTC)
4725  * @context:	Opaque context (HTT SOC handle)
4726  * @pkt:	HTC packet
4727  *
4728  * Return: enum htc_send_full_action
4729  */
4730 static enum htc_send_full_action
4731 dp_htt_h2t_full(void *context, HTC_PACKET *pkt)
4732 {
4733 	return HTC_SEND_FULL_KEEP;
4734 }
4735 
4736 /*
4737  * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages
4738  * @context:	Opaque context (HTT SOC handle)
4739  * @nbuf:	nbuf containing T2H message
4740  * @pipe_id:	HIF pipe ID
4741  *
4742  * Return: QDF_STATUS
4743  *
4744  * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which
4745  * will be used for packet log and other high-priority HTT messages. Proper
4746  * HTC connection to be added later once required FW changes are available
4747  */
4748 static QDF_STATUS
4749 dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id)
4750 {
4751 	QDF_STATUS rc = QDF_STATUS_SUCCESS;
4752 	HTC_PACKET htc_pkt;
4753 
4754 	qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE);
4755 	qdf_mem_zero(&htc_pkt, sizeof(htc_pkt));
4756 	htc_pkt.Status = QDF_STATUS_SUCCESS;
4757 	htc_pkt.pPktContext = (void *)nbuf;
4758 	dp_htt_t2h_msg_handler(context, &htc_pkt);
4759 
4760 	return rc;
4761 }
4762 
4763 /*
4764  * htt_htc_soc_attach() - Register SOC level HTT instance with HTC
4765  * @htt_soc:	HTT SOC handle
4766  *
4767  * Return: QDF_STATUS
4768  */
4769 static QDF_STATUS
4770 htt_htc_soc_attach(struct htt_soc *soc)
4771 {
4772 	struct htc_service_connect_req connect;
4773 	struct htc_service_connect_resp response;
4774 	QDF_STATUS status;
4775 	struct dp_soc *dpsoc = soc->dp_soc;
4776 
4777 	qdf_mem_zero(&connect, sizeof(connect));
4778 	qdf_mem_zero(&response, sizeof(response));
4779 
4780 	connect.pMetaData = NULL;
4781 	connect.MetaDataLength = 0;
4782 	connect.EpCallbacks.pContext = soc;
4783 	connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
4784 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
4785 	connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler;
4786 
4787 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
4788 	connect.EpCallbacks.EpRecvRefill = NULL;
4789 
4790 	/* N/A, fill is done by HIF */
4791 	connect.EpCallbacks.RecvRefillWaterMark = 1;
4792 
4793 	connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
4794 	/*
4795 	 * Specify how deep to let a queue get before htc_send_pkt will
4796 	 * call the EpSendFull function due to excessive send queue depth.
4797 	 */
4798 	connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
4799 
4800 	/* disable flow control for HTT data message service */
4801 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
4802 
4803 	/* connect to control service */
4804 	connect.service_id = HTT_DATA_MSG_SVC;
4805 
4806 	status = htc_connect_service(soc->htc_soc, &connect, &response);
4807 
4808 	if (status != QDF_STATUS_SUCCESS)
4809 		return status;
4810 
4811 	soc->htc_endpoint = response.Endpoint;
4812 
4813 	hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint);
4814 
4815 	htt_interface_logging_init(&soc->htt_logger_handle);
4816 	dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc,
4817 		dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE);
4818 
4819 	return QDF_STATUS_SUCCESS; /* success */
4820 }
4821 
4822 /*
4823  * htt_soc_initialize() - SOC level HTT initialization
4824  * @htt_soc: Opaque htt SOC handle
4825  * @ctrl_psoc: Opaque ctrl SOC handle
4826  * @htc_soc: SOC level HTC handle
4827  * @hal_soc: Opaque HAL SOC handle
4828  * @osdev: QDF device
4829  *
4830  * Return: HTT handle on success; NULL on failure
4831  */
4832 void *
4833 htt_soc_initialize(struct htt_soc *htt_soc,
4834 		   struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
4835 		   HTC_HANDLE htc_soc,
4836 		   hal_soc_handle_t hal_soc_hdl, qdf_device_t osdev)
4837 {
4838 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
4839 
4840 	soc->osdev = osdev;
4841 	soc->ctrl_psoc = ctrl_psoc;
4842 	soc->htc_soc = htc_soc;
4843 	soc->hal_soc = hal_soc_hdl;
4844 
4845 	if (htt_htc_soc_attach(soc))
4846 		goto fail2;
4847 
4848 	return soc;
4849 
4850 fail2:
4851 	return NULL;
4852 }
4853 
4854 void htt_soc_htc_dealloc(struct htt_soc *htt_handle)
4855 {
4856 	htt_interface_logging_deinit(htt_handle->htt_logger_handle);
4857 	htt_htc_misc_pkt_pool_free(htt_handle);
4858 	htt_htc_pkt_pool_free(htt_handle);
4859 }
4860 
4861 /*
4862  * htt_soc_htc_prealloc() - HTC memory prealloc
4863  * @htt_soc: SOC level HTT handle
4864  *
4865  * Return: QDF_STATUS_SUCCESS on Success or
4866  * QDF_STATUS_E_NOMEM on allocation failure
4867  */
4868 QDF_STATUS htt_soc_htc_prealloc(struct htt_soc *soc)
4869 {
4870 	int i;
4871 
4872 	soc->htt_htc_pkt_freelist = NULL;
4873 	/* pre-allocate some HTC_PACKET objects */
4874 	for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
4875 		struct dp_htt_htc_pkt_union *pkt;
4876 		pkt = qdf_mem_malloc(sizeof(*pkt));
4877 		if (!pkt)
4878 			return QDF_STATUS_E_NOMEM;
4879 
4880 		htt_htc_pkt_free(soc, &pkt->u.pkt);
4881 	}
4882 	return QDF_STATUS_SUCCESS;
4883 }
4884 
4885 /*
4886  * htt_soc_detach() - Free SOC level HTT handle
4887  * @htt_hdl: HTT SOC handle
4888  */
4889 void htt_soc_detach(struct htt_soc *htt_hdl)
4890 {
4891 	int i;
4892 	struct htt_soc *htt_handle = (struct htt_soc *)htt_hdl;
4893 
4894 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4895 		qdf_mem_free(htt_handle->pdevid_tt[i].umac_ttt);
4896 		qdf_mem_free(htt_handle->pdevid_tt[i].lmac_ttt);
4897 	}
4898 
4899 	HTT_TX_MUTEX_DESTROY(&htt_handle->htt_tx_mutex);
4900 	qdf_mem_free(htt_handle);
4901 
4902 }
4903 
4904 /**
4905  * dp_h2t_ext_stats_msg_send(): function to contruct HTT message to pass to FW
4906  * @pdev: DP PDEV handle
4907  * @stats_type_upload_mask: stats type requested by user
4908  * @config_param_0: extra configuration parameters
4909  * @config_param_1: extra configuration parameters
4910  * @config_param_2: extra configuration parameters
4911  * @config_param_3: extra configuration parameters
4912  * @mac_id: mac number
4913  *
4914  * return: QDF STATUS
4915  */
4916 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
4917 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
4918 		uint32_t config_param_1, uint32_t config_param_2,
4919 		uint32_t config_param_3, int cookie_val, int cookie_msb,
4920 		uint8_t mac_id)
4921 {
4922 	struct htt_soc *soc = pdev->soc->htt_handle;
4923 	struct dp_htt_htc_pkt *pkt;
4924 	qdf_nbuf_t msg;
4925 	uint32_t *msg_word;
4926 	uint8_t pdev_mask = 0;
4927 	uint8_t *htt_logger_bufp;
4928 	int mac_for_pdev;
4929 	int target_pdev_id;
4930 	QDF_STATUS status;
4931 
4932 	msg = qdf_nbuf_alloc(
4933 			soc->osdev,
4934 			HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ),
4935 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4936 
4937 	if (!msg)
4938 		return QDF_STATUS_E_NOMEM;
4939 
4940 	/*TODO:Add support for SOC stats
4941 	 * Bit 0: SOC Stats
4942 	 * Bit 1: Pdev stats for pdev id 0
4943 	 * Bit 2: Pdev stats for pdev id 1
4944 	 * Bit 3: Pdev stats for pdev id 2
4945 	 */
4946 	mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
4947 	target_pdev_id =
4948 	dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev);
4949 
4950 	pdev_mask = 1 << target_pdev_id;
4951 
4952 	/*
4953 	 * Set the length of the message.
4954 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4955 	 * separately during the below call to qdf_nbuf_push_head.
4956 	 * The contribution from the HTC header is added separately inside HTC.
4957 	 */
4958 	if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) {
4959 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4960 				"Failed to expand head for HTT_EXT_STATS");
4961 		qdf_nbuf_free(msg);
4962 		return QDF_STATUS_E_FAILURE;
4963 	}
4964 
4965 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4966 		"-----%s:%d----\n cookie <-> %d\n config_param_0 %u\n"
4967 		"config_param_1 %u\n config_param_2 %u\n"
4968 		"config_param_4 %u\n -------------",
4969 		__func__, __LINE__, cookie_val, config_param_0,
4970 		config_param_1, config_param_2,	config_param_3);
4971 
4972 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
4973 
4974 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4975 	htt_logger_bufp = (uint8_t *)msg_word;
4976 	*msg_word = 0;
4977 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ);
4978 	HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask);
4979 	HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask);
4980 
4981 	/* word 1 */
4982 	msg_word++;
4983 	*msg_word = 0;
4984 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0);
4985 
4986 	/* word 2 */
4987 	msg_word++;
4988 	*msg_word = 0;
4989 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1);
4990 
4991 	/* word 3 */
4992 	msg_word++;
4993 	*msg_word = 0;
4994 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2);
4995 
4996 	/* word 4 */
4997 	msg_word++;
4998 	*msg_word = 0;
4999 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3);
5000 
5001 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0);
5002 
5003 	/* word 5 */
5004 	msg_word++;
5005 
5006 	/* word 6 */
5007 	msg_word++;
5008 	*msg_word = 0;
5009 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val);
5010 
5011 	/* word 7 */
5012 	msg_word++;
5013 	*msg_word = 0;
5014 	/*Using last 2 bits for pdev_id */
5015 	cookie_msb = ((cookie_msb << 2) | pdev->pdev_id);
5016 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb);
5017 
5018 	pkt = htt_htc_pkt_alloc(soc);
5019 	if (!pkt) {
5020 		qdf_nbuf_free(msg);
5021 		return QDF_STATUS_E_NOMEM;
5022 	}
5023 
5024 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5025 
5026 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
5027 			dp_htt_h2t_send_complete_free_netbuf,
5028 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
5029 			soc->htc_endpoint,
5030 			/* tag for FW response msg not guaranteed */
5031 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
5032 
5033 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5034 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_EXT_STATS_REQ,
5035 				     htt_logger_bufp);
5036 
5037 	if (status != QDF_STATUS_SUCCESS) {
5038 		qdf_nbuf_free(msg);
5039 		htt_htc_pkt_free(soc, pkt);
5040 	}
5041 
5042 	return status;
5043 }
5044 
5045 /**
5046  * dp_h2t_3tuple_config_send(): function to contruct 3 tuple configuration
5047  * HTT message to pass to FW
5048  * @pdev: DP PDEV handle
5049  * @tuple_mask: tuple configuration to report 3 tuple hash value in either
5050  * toeplitz_2_or_4 or flow_id_toeplitz in MSDU START TLV.
5051  *
5052  * tuple_mask[1:0]:
5053  *   00 - Do not report 3 tuple hash value
5054  *   10 - Report 3 tuple hash value in toeplitz_2_or_4
5055  *   01 - Report 3 tuple hash value in flow_id_toeplitz
5056  *   11 - Report 3 tuple hash value in both toeplitz_2_or_4 & flow_id_toeplitz
5057  *
5058  * return: QDF STATUS
5059  */
5060 QDF_STATUS dp_h2t_3tuple_config_send(struct dp_pdev *pdev,
5061 				     uint32_t tuple_mask, uint8_t mac_id)
5062 {
5063 	struct htt_soc *soc = pdev->soc->htt_handle;
5064 	struct dp_htt_htc_pkt *pkt;
5065 	qdf_nbuf_t msg;
5066 	uint32_t *msg_word;
5067 	uint8_t *htt_logger_bufp;
5068 	int mac_for_pdev;
5069 	int target_pdev_id;
5070 
5071 	msg = qdf_nbuf_alloc(
5072 			soc->osdev,
5073 			HTT_MSG_BUF_SIZE(HTT_3_TUPLE_HASH_CFG_REQ_BYTES),
5074 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
5075 
5076 	if (!msg)
5077 		return QDF_STATUS_E_NOMEM;
5078 
5079 	mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
5080 	target_pdev_id =
5081 	dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev);
5082 
5083 	/*
5084 	 * Set the length of the message.
5085 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
5086 	 * separately during the below call to qdf_nbuf_push_head.
5087 	 * The contribution from the HTC header is added separately inside HTC.
5088 	 */
5089 	if (!qdf_nbuf_put_tail(msg, HTT_3_TUPLE_HASH_CFG_REQ_BYTES)) {
5090 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5091 			  "Failed to expand head for HTT_3TUPLE_CONFIG");
5092 		qdf_nbuf_free(msg);
5093 		return QDF_STATUS_E_FAILURE;
5094 	}
5095 
5096 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5097 		  "config_param_sent %s:%d 0x%x for target_pdev %d\n -------------",
5098 		  __func__, __LINE__, tuple_mask, target_pdev_id);
5099 
5100 	msg_word = (uint32_t *)qdf_nbuf_data(msg);
5101 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
5102 	htt_logger_bufp = (uint8_t *)msg_word;
5103 
5104 	*msg_word = 0;
5105 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG);
5106 	HTT_RX_3_TUPLE_HASH_PDEV_ID_SET(*msg_word, target_pdev_id);
5107 
5108 	msg_word++;
5109 	*msg_word = 0;
5110 	HTT_H2T_FLOW_ID_TOEPLITZ_FIELD_CONFIG_SET(*msg_word, tuple_mask);
5111 	HTT_H2T_TOEPLITZ_2_OR_4_FIELD_CONFIG_SET(*msg_word, tuple_mask);
5112 
5113 	pkt = htt_htc_pkt_alloc(soc);
5114 	if (!pkt) {
5115 		qdf_nbuf_free(msg);
5116 		return QDF_STATUS_E_NOMEM;
5117 	}
5118 
5119 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5120 
5121 	SET_HTC_PACKET_INFO_TX(
5122 			&pkt->htc_pkt,
5123 			dp_htt_h2t_send_complete_free_netbuf,
5124 			qdf_nbuf_data(msg),
5125 			qdf_nbuf_len(msg),
5126 			soc->htc_endpoint,
5127 			/* tag for no FW response msg */
5128 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
5129 
5130 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5131 	DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG,
5132 			    htt_logger_bufp);
5133 
5134 	return QDF_STATUS_SUCCESS;
5135 }
5136 
5137 /* This macro will revert once proper HTT header will define for
5138  * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file
5139  * */
5140 #if defined(WDI_EVENT_ENABLE)
5141 /**
5142  * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW
5143  * @pdev: DP PDEV handle
5144  * @stats_type_upload_mask: stats type requested by user
5145  * @mac_id: Mac id number
5146  *
5147  * return: QDF STATUS
5148  */
5149 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
5150 		uint32_t stats_type_upload_mask, uint8_t mac_id)
5151 {
5152 	struct htt_soc *soc = pdev->soc->htt_handle;
5153 	struct dp_htt_htc_pkt *pkt;
5154 	qdf_nbuf_t msg;
5155 	uint32_t *msg_word;
5156 	uint8_t pdev_mask;
5157 	QDF_STATUS status;
5158 
5159 	msg = qdf_nbuf_alloc(
5160 			soc->osdev,
5161 			HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ),
5162 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
5163 
5164 	if (!msg) {
5165 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5166 		"Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer");
5167 		qdf_assert(0);
5168 		return QDF_STATUS_E_NOMEM;
5169 	}
5170 
5171 	/*TODO:Add support for SOC stats
5172 	 * Bit 0: SOC Stats
5173 	 * Bit 1: Pdev stats for pdev id 0
5174 	 * Bit 2: Pdev stats for pdev id 1
5175 	 * Bit 3: Pdev stats for pdev id 2
5176 	 */
5177 	pdev_mask = 1 << dp_get_target_pdev_id_for_host_pdev_id(pdev->soc,
5178 								mac_id);
5179 
5180 	/*
5181 	 * Set the length of the message.
5182 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
5183 	 * separately during the below call to qdf_nbuf_push_head.
5184 	 * The contribution from the HTC header is added separately inside HTC.
5185 	 */
5186 	if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) {
5187 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5188 				"Failed to expand head for HTT_CFG_STATS");
5189 		qdf_nbuf_free(msg);
5190 		return QDF_STATUS_E_FAILURE;
5191 	}
5192 
5193 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
5194 
5195 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
5196 	*msg_word = 0;
5197 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
5198 	HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask);
5199 	HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word,
5200 			stats_type_upload_mask);
5201 
5202 	pkt = htt_htc_pkt_alloc(soc);
5203 	if (!pkt) {
5204 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5205 				"Fail to allocate dp_htt_htc_pkt buffer");
5206 		qdf_assert(0);
5207 		qdf_nbuf_free(msg);
5208 		return QDF_STATUS_E_NOMEM;
5209 	}
5210 
5211 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5212 
5213 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
5214 			dp_htt_h2t_send_complete_free_netbuf,
5215 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
5216 			soc->htc_endpoint,
5217 			/* tag for no FW response msg */
5218 			HTC_TX_PACKET_TAG_RUNTIME_PUT);
5219 
5220 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5221 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG,
5222 				     (uint8_t *)msg_word);
5223 
5224 	if (status != QDF_STATUS_SUCCESS) {
5225 		qdf_nbuf_free(msg);
5226 		htt_htc_pkt_free(soc, pkt);
5227 	}
5228 
5229 	return status;
5230 }
5231 #endif
5232 
5233 void
5234 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
5235 			     uint32_t *tag_buf)
5236 {
5237 	struct dp_peer *peer = NULL;
5238 	switch (tag_type) {
5239 	case HTT_STATS_PEER_DETAILS_TAG:
5240 	{
5241 		htt_peer_details_tlv *dp_stats_buf =
5242 			(htt_peer_details_tlv *)tag_buf;
5243 
5244 		pdev->fw_stats_peer_id = dp_stats_buf->sw_peer_id;
5245 	}
5246 	break;
5247 	case HTT_STATS_PEER_STATS_CMN_TAG:
5248 	{
5249 		htt_peer_stats_cmn_tlv *dp_stats_buf =
5250 			(htt_peer_stats_cmn_tlv *)tag_buf;
5251 
5252 		peer = dp_peer_get_ref_by_id(pdev->soc, pdev->fw_stats_peer_id,
5253 					     DP_MOD_ID_HTT);
5254 
5255 		if (peer && !peer->bss_peer) {
5256 			peer->stats.tx.inactive_time =
5257 				dp_stats_buf->inactive_time;
5258 			qdf_event_set(&pdev->fw_peer_stats_event);
5259 		}
5260 		if (peer)
5261 			dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
5262 	}
5263 	break;
5264 	default:
5265 		qdf_err("Invalid tag_type");
5266 	}
5267 }
5268 
5269 /**
5270  * dp_htt_rx_flow_fst_setup(): Send HTT Rx FST setup message to FW
5271  * @pdev: DP pdev handle
5272  * @fse_setup_info: FST setup parameters
5273  *
5274  * Return: Success when HTT message is sent, error on failure
5275  */
5276 QDF_STATUS
5277 dp_htt_rx_flow_fst_setup(struct dp_pdev *pdev,
5278 			 struct dp_htt_rx_flow_fst_setup *fse_setup_info)
5279 {
5280 	struct htt_soc *soc = pdev->soc->htt_handle;
5281 	struct dp_htt_htc_pkt *pkt;
5282 	qdf_nbuf_t msg;
5283 	u_int32_t *msg_word;
5284 	struct htt_h2t_msg_rx_fse_setup_t *fse_setup;
5285 	uint8_t *htt_logger_bufp;
5286 	u_int32_t *key;
5287 	QDF_STATUS status;
5288 
5289 	msg = qdf_nbuf_alloc(
5290 		soc->osdev,
5291 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_setup_t)),
5292 		/* reserve room for the HTC header */
5293 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
5294 
5295 	if (!msg)
5296 		return QDF_STATUS_E_NOMEM;
5297 
5298 	/*
5299 	 * Set the length of the message.
5300 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
5301 	 * separately during the below call to qdf_nbuf_push_head.
5302 	 * The contribution from the HTC header is added separately inside HTC.
5303 	 */
5304 	if (!qdf_nbuf_put_tail(msg,
5305 			       sizeof(struct htt_h2t_msg_rx_fse_setup_t))) {
5306 		qdf_err("Failed to expand head for HTT RX_FSE_SETUP msg");
5307 		return QDF_STATUS_E_FAILURE;
5308 	}
5309 
5310 	/* fill in the message contents */
5311 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
5312 
5313 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_setup_t));
5314 	/* rewind beyond alignment pad to get to the HTC header reserved area */
5315 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
5316 	htt_logger_bufp = (uint8_t *)msg_word;
5317 
5318 	*msg_word = 0;
5319 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG);
5320 
5321 	fse_setup = (struct htt_h2t_msg_rx_fse_setup_t *)msg_word;
5322 
5323 	HTT_RX_FSE_SETUP_PDEV_ID_SET(*msg_word, fse_setup_info->pdev_id);
5324 
5325 	msg_word++;
5326 	HTT_RX_FSE_SETUP_NUM_REC_SET(*msg_word, fse_setup_info->max_entries);
5327 	HTT_RX_FSE_SETUP_MAX_SEARCH_SET(*msg_word, fse_setup_info->max_search);
5328 	HTT_RX_FSE_SETUP_IP_DA_SA_PREFIX_SET(*msg_word,
5329 					     fse_setup_info->ip_da_sa_prefix);
5330 
5331 	msg_word++;
5332 	HTT_RX_FSE_SETUP_BASE_ADDR_LO_SET(*msg_word,
5333 					  fse_setup_info->base_addr_lo);
5334 	msg_word++;
5335 	HTT_RX_FSE_SETUP_BASE_ADDR_HI_SET(*msg_word,
5336 					  fse_setup_info->base_addr_hi);
5337 
5338 	key = (u_int32_t *)fse_setup_info->hash_key;
5339 	fse_setup->toeplitz31_0 = *key++;
5340 	fse_setup->toeplitz63_32 = *key++;
5341 	fse_setup->toeplitz95_64 = *key++;
5342 	fse_setup->toeplitz127_96 = *key++;
5343 	fse_setup->toeplitz159_128 = *key++;
5344 	fse_setup->toeplitz191_160 = *key++;
5345 	fse_setup->toeplitz223_192 = *key++;
5346 	fse_setup->toeplitz255_224 = *key++;
5347 	fse_setup->toeplitz287_256 = *key++;
5348 	fse_setup->toeplitz314_288 = *key;
5349 
5350 	msg_word++;
5351 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz31_0);
5352 	msg_word++;
5353 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz63_32);
5354 	msg_word++;
5355 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz95_64);
5356 	msg_word++;
5357 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz127_96);
5358 	msg_word++;
5359 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz159_128);
5360 	msg_word++;
5361 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz191_160);
5362 	msg_word++;
5363 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz223_192);
5364 	msg_word++;
5365 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz255_224);
5366 	msg_word++;
5367 	HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz287_256);
5368 	msg_word++;
5369 	HTT_RX_FSE_SETUP_HASH_314_288_SET(*msg_word,
5370 					  fse_setup->toeplitz314_288);
5371 
5372 	pkt = htt_htc_pkt_alloc(soc);
5373 	if (!pkt) {
5374 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
5375 		qdf_assert(0);
5376 		qdf_nbuf_free(msg);
5377 		return QDF_STATUS_E_RESOURCES; /* failure */
5378 	}
5379 
5380 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5381 
5382 	SET_HTC_PACKET_INFO_TX(
5383 		&pkt->htc_pkt,
5384 		dp_htt_h2t_send_complete_free_netbuf,
5385 		qdf_nbuf_data(msg),
5386 		qdf_nbuf_len(msg),
5387 		soc->htc_endpoint,
5388 		/* tag for no FW response msg */
5389 		HTC_TX_PACKET_TAG_RUNTIME_PUT);
5390 
5391 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5392 
5393 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
5394 				     HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG,
5395 				     htt_logger_bufp);
5396 
5397 	if (status == QDF_STATUS_SUCCESS) {
5398 		dp_info("HTT_H2T RX_FSE_SETUP sent to FW for pdev = %u",
5399 			fse_setup_info->pdev_id);
5400 		QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_DEBUG,
5401 				   (void *)fse_setup_info->hash_key,
5402 				   fse_setup_info->hash_key_len);
5403 	} else {
5404 		qdf_nbuf_free(msg);
5405 		htt_htc_pkt_free(soc, pkt);
5406 	}
5407 
5408 	return status;
5409 }
5410 
5411 /**
5412  * dp_htt_rx_flow_fse_operation(): Send HTT Flow Search Entry msg to
5413  * add/del a flow in HW
5414  * @pdev: DP pdev handle
5415  * @fse_op_info: Flow entry parameters
5416  *
5417  * Return: Success when HTT message is sent, error on failure
5418  */
5419 QDF_STATUS
5420 dp_htt_rx_flow_fse_operation(struct dp_pdev *pdev,
5421 			     struct dp_htt_rx_flow_fst_operation *fse_op_info)
5422 {
5423 	struct htt_soc *soc = pdev->soc->htt_handle;
5424 	struct dp_htt_htc_pkt *pkt;
5425 	qdf_nbuf_t msg;
5426 	u_int32_t *msg_word;
5427 	struct htt_h2t_msg_rx_fse_operation_t *fse_operation;
5428 	uint8_t *htt_logger_bufp;
5429 	QDF_STATUS status;
5430 
5431 	msg = qdf_nbuf_alloc(
5432 		soc->osdev,
5433 		HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_operation_t)),
5434 		/* reserve room for the HTC header */
5435 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
5436 	if (!msg)
5437 		return QDF_STATUS_E_NOMEM;
5438 
5439 	/*
5440 	 * Set the length of the message.
5441 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
5442 	 * separately during the below call to qdf_nbuf_push_head.
5443 	 * The contribution from the HTC header is added separately inside HTC.
5444 	 */
5445 	if (!qdf_nbuf_put_tail(msg,
5446 			       sizeof(struct htt_h2t_msg_rx_fse_operation_t))) {
5447 		qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
5448 		qdf_nbuf_free(msg);
5449 		return QDF_STATUS_E_FAILURE;
5450 	}
5451 
5452 	/* fill in the message contents */
5453 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
5454 
5455 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_operation_t));
5456 	/* rewind beyond alignment pad to get to the HTC header reserved area */
5457 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
5458 	htt_logger_bufp = (uint8_t *)msg_word;
5459 
5460 	*msg_word = 0;
5461 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG);
5462 
5463 	fse_operation = (struct htt_h2t_msg_rx_fse_operation_t *)msg_word;
5464 
5465 	HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, fse_op_info->pdev_id);
5466 	msg_word++;
5467 	HTT_RX_FSE_IPSEC_VALID_SET(*msg_word, false);
5468 	if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_ENTRY) {
5469 		HTT_RX_FSE_OPERATION_SET(*msg_word,
5470 					 HTT_RX_FSE_CACHE_INVALIDATE_ENTRY);
5471 		msg_word++;
5472 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5473 		*msg_word,
5474 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_31_0));
5475 		msg_word++;
5476 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5477 		*msg_word,
5478 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_63_32));
5479 		msg_word++;
5480 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5481 		*msg_word,
5482 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_95_64));
5483 		msg_word++;
5484 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5485 		*msg_word,
5486 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_127_96));
5487 		msg_word++;
5488 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5489 		*msg_word,
5490 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_31_0));
5491 		msg_word++;
5492 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5493 		*msg_word,
5494 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_63_32));
5495 		msg_word++;
5496 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5497 		*msg_word,
5498 		qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_95_64));
5499 		msg_word++;
5500 		HTT_RX_FSE_OPERATION_IP_ADDR_SET(
5501 		*msg_word,
5502 		qdf_htonl(
5503 		fse_op_info->rx_flow->flow_tuple_info.dest_ip_127_96));
5504 		msg_word++;
5505 		HTT_RX_FSE_SOURCEPORT_SET(
5506 			*msg_word,
5507 			fse_op_info->rx_flow->flow_tuple_info.src_port);
5508 		HTT_RX_FSE_DESTPORT_SET(
5509 			*msg_word,
5510 			fse_op_info->rx_flow->flow_tuple_info.dest_port);
5511 		msg_word++;
5512 		HTT_RX_FSE_L4_PROTO_SET(
5513 			*msg_word,
5514 			fse_op_info->rx_flow->flow_tuple_info.l4_protocol);
5515 	} else if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_FULL) {
5516 		HTT_RX_FSE_OPERATION_SET(*msg_word,
5517 					 HTT_RX_FSE_CACHE_INVALIDATE_FULL);
5518 	} else if (fse_op_info->op_code == DP_HTT_FST_DISABLE) {
5519 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_DISABLE);
5520 	} else if (fse_op_info->op_code == DP_HTT_FST_ENABLE) {
5521 		HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_ENABLE);
5522 	}
5523 
5524 	pkt = htt_htc_pkt_alloc(soc);
5525 	if (!pkt) {
5526 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
5527 		qdf_assert(0);
5528 		qdf_nbuf_free(msg);
5529 		return QDF_STATUS_E_RESOURCES; /* failure */
5530 	}
5531 
5532 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5533 
5534 	SET_HTC_PACKET_INFO_TX(
5535 		&pkt->htc_pkt,
5536 		dp_htt_h2t_send_complete_free_netbuf,
5537 		qdf_nbuf_data(msg),
5538 		qdf_nbuf_len(msg),
5539 		soc->htc_endpoint,
5540 		/* tag for no FW response msg */
5541 		HTC_TX_PACKET_TAG_RUNTIME_PUT);
5542 
5543 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5544 
5545 	status = DP_HTT_SEND_HTC_PKT(soc, pkt,
5546 				     HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG,
5547 				     htt_logger_bufp);
5548 
5549 	if (status == QDF_STATUS_SUCCESS) {
5550 		dp_info("HTT_H2T RX_FSE_OPERATION_CFG sent to FW for pdev = %u",
5551 			fse_op_info->pdev_id);
5552 	} else {
5553 		qdf_nbuf_free(msg);
5554 		htt_htc_pkt_free(soc, pkt);
5555 	}
5556 
5557 	return status;
5558 }
5559 
5560 /**
5561  * dp_htt_rx_fisa_config(): Send HTT msg to configure FISA
5562  * @pdev: DP pdev handle
5563  * @fse_op_info: Flow entry parameters
5564  *
5565  * Return: Success when HTT message is sent, error on failure
5566  */
5567 QDF_STATUS
5568 dp_htt_rx_fisa_config(struct dp_pdev *pdev,
5569 		      struct dp_htt_rx_fisa_cfg *fisa_config)
5570 {
5571 	struct htt_soc *soc = pdev->soc->htt_handle;
5572 	struct dp_htt_htc_pkt *pkt;
5573 	qdf_nbuf_t msg;
5574 	u_int32_t *msg_word;
5575 	struct htt_h2t_msg_type_fisa_config_t *htt_fisa_config;
5576 	uint8_t *htt_logger_bufp;
5577 	uint32_t len;
5578 	QDF_STATUS status;
5579 
5580 	len = HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_type_fisa_config_t));
5581 
5582 	msg = qdf_nbuf_alloc(soc->osdev,
5583 			     len,
5584 			     /* reserve room for the HTC header */
5585 			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
5586 			     4,
5587 			     TRUE);
5588 	if (!msg)
5589 		return QDF_STATUS_E_NOMEM;
5590 
5591 	/*
5592 	 * Set the length of the message.
5593 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
5594 	 * separately during the below call to qdf_nbuf_push_head.
5595 	 * The contribution from the HTC header is added separately inside HTC.
5596 	 */
5597 	if (!qdf_nbuf_put_tail(msg,
5598 			       sizeof(struct htt_h2t_msg_type_fisa_config_t))) {
5599 		qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
5600 		qdf_nbuf_free(msg);
5601 		return QDF_STATUS_E_FAILURE;
5602 	}
5603 
5604 	/* fill in the message contents */
5605 	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
5606 
5607 	memset(msg_word, 0, sizeof(struct htt_h2t_msg_type_fisa_config_t));
5608 	/* rewind beyond alignment pad to get to the HTC header reserved area */
5609 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
5610 	htt_logger_bufp = (uint8_t *)msg_word;
5611 
5612 	*msg_word = 0;
5613 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FISA_CFG);
5614 
5615 	htt_fisa_config = (struct htt_h2t_msg_type_fisa_config_t *)msg_word;
5616 
5617 	HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, htt_fisa_config->pdev_id);
5618 
5619 	msg_word++;
5620 	HTT_RX_FISA_CONFIG_FISA_V2_ENABLE_SET(*msg_word, 1);
5621 	HTT_RX_FISA_CONFIG_FISA_V2_AGGR_LIMIT_SET(*msg_word, 0xf);
5622 
5623 	msg_word++;
5624 	htt_fisa_config->fisa_timeout_threshold = fisa_config->fisa_timeout;
5625 
5626 	pkt = htt_htc_pkt_alloc(soc);
5627 	if (!pkt) {
5628 		qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
5629 		qdf_assert(0);
5630 		qdf_nbuf_free(msg);
5631 		return QDF_STATUS_E_RESOURCES; /* failure */
5632 	}
5633 
5634 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
5635 
5636 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
5637 			       dp_htt_h2t_send_complete_free_netbuf,
5638 			       qdf_nbuf_data(msg),
5639 			       qdf_nbuf_len(msg),
5640 			       soc->htc_endpoint,
5641 			       /* tag for no FW response msg */
5642 			       HTC_TX_PACKET_TAG_RUNTIME_PUT);
5643 
5644 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
5645 
5646 	status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FISA_CFG,
5647 				     htt_logger_bufp);
5648 
5649 	if (status == QDF_STATUS_SUCCESS) {
5650 		dp_info("HTT_H2T_MSG_TYPE_RX_FISA_CFG sent to FW for pdev = %u",
5651 			fisa_config->pdev_id);
5652 	} else {
5653 		qdf_nbuf_free(msg);
5654 		htt_htc_pkt_free(soc, pkt);
5655 	}
5656 
5657 	return status;
5658 }
5659