xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c (revision 8ddef7dd9a290d4a9b1efd5d3efacf51d78a1a0d)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <htt.h>
20 #include <hal_hw_headers.h>
21 #include <hal_api.h>
22 #include "dp_htt.h"
23 #include "dp_peer.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_rx_mon.h"
27 #include "htt_stats.h"
28 #include "htt_ppdu_stats.h"
29 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
30 #include "cdp_txrx_cmn_struct.h"
31 
32 #ifdef FEATURE_PERPKT_INFO
33 #include "dp_ratetable.h"
34 #endif
35 
36 #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
37 
38 #define HTT_HTC_PKT_POOL_INIT_SIZE 64
39 #define HTT_T2H_MAX_MSG_SIZE 2048
40 
41 #define HTT_MSG_BUF_SIZE(msg_bytes) \
42 	((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
43 
44 #define HTT_PID_BIT_MASK 0x3
45 
46 #define DP_EXT_MSG_LENGTH 2048
47 #define DP_HTT_SEND_HTC_PKT(soc, pkt)                            \
48 do {                                                             \
49 	if (htc_send_pkt(soc->htc_soc, &pkt->htc_pkt) ==         \
50 					QDF_STATUS_SUCCESS)      \
51 		htt_htc_misc_pkt_list_add(soc, pkt);             \
52 } while (0)
53 
54 #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16
55 
56 /**
57  * Bitmap of HTT PPDU TLV types for Default mode
58  */
59 #define HTT_PPDU_DEFAULT_TLV_BITMAP \
60 	(1 << HTT_PPDU_STATS_COMMON_TLV) | \
61 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
62 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
63 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
64 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
65 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)
66 
67 /**
68  * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 64
69  */
70 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64 \
71 	((1 << HTT_PPDU_STATS_COMMON_TLV) | \
72 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
73 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
74 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
75 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
76 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \
77 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \
78 	(1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV))
79 
80 /**
81  * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 256
82  */
83 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256 \
84 	((1 << HTT_PPDU_STATS_COMMON_TLV) | \
85 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
86 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
87 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
88 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
89 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \
90 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \
91 	(1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV))
92 
93 #define HTT_FRAMECTRL_DATATYPE 0x08
94 #define HTT_PPDU_DESC_MAX_DEPTH 16
95 #define DP_SCAN_PEER_ID 0xFFFF
96 
97 /*
98  * dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap() - Get ppdu stats tlv
99  * bitmap for sniffer mode
100  * @bitmap: received bitmap
101  *
102  * Return: expected bitmap value, returns zero if doesn't match with
103  * either 64-bit Tx window or 256-bit window tlv bitmap
104  */
105 
106 static inline int
107 dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(uint32_t bitmap)
108 {
109 	if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64))
110 		return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64;
111 	else if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256))
112 		return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256;
113 
114 	return 0;
115 }
116 
117 /*
118  * dp_tx_stats_update() - Update per-peer statistics
119  * @soc: Datapath soc handle
120  * @peer: Datapath peer handle
121  * @ppdu: PPDU Descriptor
122  * @ack_rssi: RSSI of last ack received
123  *
124  * Return: None
125  */
126 #ifdef FEATURE_PERPKT_INFO
127 static inline void
128 dp_tx_rate_stats_update(struct dp_peer *peer,
129 			struct cdp_tx_completion_ppdu_user *ppdu)
130 {
131 	uint32_t ratekbps = 0;
132 	uint32_t ppdu_tx_rate = 0;
133 	uint32_t rix;
134 
135 	if (!peer || !ppdu)
136 		return;
137 
138 
139 	ratekbps = dp_getrateindex(ppdu->gi,
140 				   ppdu->mcs,
141 				   ppdu->nss,
142 				   ppdu->preamble,
143 				   ppdu->bw,
144 				   &rix);
145 
146 	DP_STATS_UPD(peer, tx.last_tx_rate, ratekbps);
147 
148 	if (!ratekbps)
149 		return;
150 
151 	peer->stats.tx.avg_tx_rate =
152 		dp_ath_rate_lpf(peer->stats.tx.avg_tx_rate, ratekbps);
153 	ppdu_tx_rate = dp_ath_rate_out(peer->stats.tx.avg_tx_rate);
154 	DP_STATS_UPD(peer, tx.rnd_avg_tx_rate, ppdu_tx_rate);
155 
156 	if (peer->vdev) {
157 		if (peer->bss_peer) {
158 			peer->vdev->stats.tx.mcast_last_tx_rate = ratekbps;
159 			peer->vdev->stats.tx.mcast_last_tx_rate_mcs = ppdu->mcs;
160 		} else {
161 			peer->vdev->stats.tx.last_tx_rate = ratekbps;
162 			peer->vdev->stats.tx.last_tx_rate_mcs = ppdu->mcs;
163 		}
164 	}
165 }
166 
167 static void dp_tx_stats_update(struct dp_soc *soc, struct dp_peer *peer,
168 		struct cdp_tx_completion_ppdu_user *ppdu, uint32_t ack_rssi)
169 {
170 	struct dp_pdev *pdev = peer->vdev->pdev;
171 	uint8_t preamble, mcs;
172 	uint16_t num_msdu;
173 
174 	preamble = ppdu->preamble;
175 	mcs = ppdu->mcs;
176 	num_msdu = ppdu->num_msdu;
177 
178 	/* If the peer statistics are already processed as part of
179 	 * per-MSDU completion handler, do not process these again in per-PPDU
180 	 * indications */
181 	if (soc->process_tx_status)
182 		return;
183 
184 	if (ppdu->mu_group_id <= MAX_MU_GROUP_ID &&
185 	    ppdu->ppdu_type != HTT_PPDU_STATS_PPDU_TYPE_SU) {
186 		if (unlikely(!(ppdu->mu_group_id & (MAX_MU_GROUP_ID - 1))))
187 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
188 				  "mu_group_id out of bound!!\n");
189 		else
190 			DP_STATS_UPD(peer, tx.mu_group_id[ppdu->mu_group_id],
191 				     (ppdu->user_pos + 1));
192 	}
193 
194 	if (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA ||
195 	    ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA) {
196 		DP_STATS_UPD(peer, tx.ru_tones, ppdu->ru_tones);
197 		DP_STATS_UPD(peer, tx.ru_start, ppdu->ru_start);
198 		switch (ppdu->ru_tones) {
199 		case RU_26:
200 			DP_STATS_INC(peer, tx.ru_loc[0], num_msdu);
201 		break;
202 		case RU_52:
203 			DP_STATS_INC(peer, tx.ru_loc[1], num_msdu);
204 		break;
205 		case RU_106:
206 			DP_STATS_INC(peer, tx.ru_loc[2], num_msdu);
207 		break;
208 		case RU_242:
209 			DP_STATS_INC(peer, tx.ru_loc[3], num_msdu);
210 		break;
211 		case RU_484:
212 			DP_STATS_INC(peer, tx.ru_loc[4], num_msdu);
213 		break;
214 		case RU_996:
215 			DP_STATS_INC(peer, tx.ru_loc[5], num_msdu);
216 		break;
217 		}
218 	}
219 
220 	DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type], num_msdu);
221 	DP_STATS_INC_PKT(peer, tx.comp_pkt,
222 			num_msdu, (ppdu->success_bytes +
223 				ppdu->retry_bytes + ppdu->failed_bytes));
224 	DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus);
225 	DP_STATS_UPD(peer, tx.tx_rate, ppdu->tx_rate);
226 	DP_STATS_INC(peer, tx.sgi_count[ppdu->gi], num_msdu);
227 	DP_STATS_INC(peer, tx.bw[ppdu->bw], num_msdu);
228 	DP_STATS_INC(peer, tx.nss[ppdu->nss], num_msdu);
229 	DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)], num_msdu);
230 	DP_STATS_INCC(peer, tx.stbc, num_msdu, ppdu->stbc);
231 	DP_STATS_INCC(peer, tx.ldpc, num_msdu, ppdu->ldpc);
232 	if (!(ppdu->is_mcast) && ppdu->ack_rssi_valid)
233 		DP_STATS_UPD(peer, tx.last_ack_rssi, ack_rssi);
234 
235 	DP_STATS_INC(peer, tx.retries,
236 			(ppdu->long_retries + ppdu->short_retries));
237 	DP_STATS_INCC(peer,
238 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
239 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
240 	DP_STATS_INCC(peer,
241 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
242 			((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
243 	DP_STATS_INCC(peer,
244 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
245 			((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
246 	DP_STATS_INCC(peer,
247 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
248 			((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B)));
249 	DP_STATS_INCC(peer,
250 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
251 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
252 	DP_STATS_INCC(peer,
253 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
254 			((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
255 	DP_STATS_INCC(peer,
256 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
257 			((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
258 	DP_STATS_INCC(peer,
259 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
260 			((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
261 	DP_STATS_INCC(peer,
262 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
263 			((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX)));
264 	DP_STATS_INCC(peer,
265 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
266 			((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX)));
267 
268 	dp_peer_stats_notify(peer);
269 
270 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
271 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
272 			     &peer->stats, ppdu->peer_id,
273 			     UPDATE_PEER_STATS, pdev->pdev_id);
274 #endif
275 }
276 #endif
277 
278 /*
279  * htt_htc_pkt_alloc() - Allocate HTC packet buffer
280  * @htt_soc:	HTT SOC handle
281  *
282  * Return: Pointer to htc packet buffer
283  */
284 static struct dp_htt_htc_pkt *
285 htt_htc_pkt_alloc(struct htt_soc *soc)
286 {
287 	struct dp_htt_htc_pkt_union *pkt = NULL;
288 
289 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
290 	if (soc->htt_htc_pkt_freelist) {
291 		pkt = soc->htt_htc_pkt_freelist;
292 		soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next;
293 	}
294 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
295 
296 	if (pkt == NULL)
297 		pkt = qdf_mem_malloc(sizeof(*pkt));
298 	return &pkt->u.pkt; /* not actually a dereference */
299 }
300 
301 /*
302  * htt_htc_pkt_free() - Free HTC packet buffer
303  * @htt_soc:	HTT SOC handle
304  */
305 static void
306 htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
307 {
308 	struct dp_htt_htc_pkt_union *u_pkt =
309 		(struct dp_htt_htc_pkt_union *)pkt;
310 
311 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
312 	u_pkt->u.next = soc->htt_htc_pkt_freelist;
313 	soc->htt_htc_pkt_freelist = u_pkt;
314 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
315 }
316 
317 /*
318  * htt_htc_pkt_pool_free() - Free HTC packet pool
319  * @htt_soc:	HTT SOC handle
320  */
321 static void
322 htt_htc_pkt_pool_free(struct htt_soc *soc)
323 {
324 	struct dp_htt_htc_pkt_union *pkt, *next;
325 	pkt = soc->htt_htc_pkt_freelist;
326 	while (pkt) {
327 		next = pkt->u.next;
328 		qdf_mem_free(pkt);
329 		pkt = next;
330 	}
331 	soc->htt_htc_pkt_freelist = NULL;
332 }
333 
334 /*
335  * htt_htc_misc_pkt_list_trim() - trim misc list
336  * @htt_soc: HTT SOC handle
337  * @level: max no. of pkts in list
338  */
339 static void
340 htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level)
341 {
342 	struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL;
343 	int i = 0;
344 	qdf_nbuf_t netbuf;
345 
346 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
347 	pkt = soc->htt_htc_pkt_misclist;
348 	while (pkt) {
349 		next = pkt->u.next;
350 		/* trim the out grown list*/
351 		if (++i > level) {
352 			netbuf =
353 				(qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
354 			qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
355 			qdf_nbuf_free(netbuf);
356 			qdf_mem_free(pkt);
357 			pkt = NULL;
358 			if (prev)
359 				prev->u.next = NULL;
360 		}
361 		prev = pkt;
362 		pkt = next;
363 	}
364 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
365 }
366 
367 /*
368  * htt_htc_misc_pkt_list_add() - Add pkt to misc list
369  * @htt_soc:	HTT SOC handle
370  * @dp_htt_htc_pkt: pkt to be added to list
371  */
372 static void
373 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
374 {
375 	struct dp_htt_htc_pkt_union *u_pkt =
376 				(struct dp_htt_htc_pkt_union *)pkt;
377 	int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc,
378 							pkt->htc_pkt.Endpoint)
379 				+ DP_HTT_HTC_PKT_MISCLIST_SIZE;
380 
381 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
382 	if (soc->htt_htc_pkt_misclist) {
383 		u_pkt->u.next = soc->htt_htc_pkt_misclist;
384 		soc->htt_htc_pkt_misclist = u_pkt;
385 	} else {
386 		soc->htt_htc_pkt_misclist = u_pkt;
387 	}
388 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
389 
390 	/* only ce pipe size + tx_queue_depth could possibly be in use
391 	 * free older packets in the misclist
392 	 */
393 	htt_htc_misc_pkt_list_trim(soc, misclist_trim_level);
394 }
395 
396 /*
397  * htt_htc_misc_pkt_pool_free() - free pkts in misc list
398  * @htt_soc:	HTT SOC handle
399  */
400 static void
401 htt_htc_misc_pkt_pool_free(struct htt_soc *soc)
402 {
403 	struct dp_htt_htc_pkt_union *pkt, *next;
404 	qdf_nbuf_t netbuf;
405 
406 	pkt = soc->htt_htc_pkt_misclist;
407 
408 	while (pkt) {
409 		next = pkt->u.next;
410 		netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
411 		qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
412 
413 		soc->stats.htc_pkt_free++;
414 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
415 			 "%s: Pkt free count %d",
416 			 __func__, soc->stats.htc_pkt_free);
417 
418 		qdf_nbuf_free(netbuf);
419 		qdf_mem_free(pkt);
420 		pkt = next;
421 	}
422 	soc->htt_htc_pkt_misclist = NULL;
423 }
424 
425 /*
426  * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ
427  * @tgt_mac_addr:	Target MAC
428  * @buffer:		Output buffer
429  */
430 static u_int8_t *
431 htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer)
432 {
433 #ifdef BIG_ENDIAN_HOST
434 	/*
435 	 * The host endianness is opposite of the target endianness.
436 	 * To make u_int32_t elements come out correctly, the target->host
437 	 * upload has swizzled the bytes in each u_int32_t element of the
438 	 * message.
439 	 * For byte-array message fields like the MAC address, this
440 	 * upload swizzling puts the bytes in the wrong order, and needs
441 	 * to be undone.
442 	 */
443 	buffer[0] = tgt_mac_addr[3];
444 	buffer[1] = tgt_mac_addr[2];
445 	buffer[2] = tgt_mac_addr[1];
446 	buffer[3] = tgt_mac_addr[0];
447 	buffer[4] = tgt_mac_addr[7];
448 	buffer[5] = tgt_mac_addr[6];
449 	return buffer;
450 #else
451 	/*
452 	 * The host endianness matches the target endianness -
453 	 * we can use the mac addr directly from the message buffer.
454 	 */
455 	return tgt_mac_addr;
456 #endif
457 }
458 
459 /*
460  * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
461  * @soc:	SOC handle
462  * @status:	Completion status
463  * @netbuf:	HTT buffer
464  */
465 static void
466 dp_htt_h2t_send_complete_free_netbuf(
467 	void *soc, A_STATUS status, qdf_nbuf_t netbuf)
468 {
469 	qdf_nbuf_free(netbuf);
470 }
471 
472 /*
473  * dp_htt_h2t_send_complete() - H2T completion handler
474  * @context:	Opaque context (HTT SOC handle)
475  * @htc_pkt:	HTC packet
476  */
477 static void
478 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
479 {
480 	void (*send_complete_part2)(
481 		void *soc, QDF_STATUS status, qdf_nbuf_t msdu);
482 	struct htt_soc *soc =  (struct htt_soc *) context;
483 	struct dp_htt_htc_pkt *htt_pkt;
484 	qdf_nbuf_t netbuf;
485 
486 	send_complete_part2 = htc_pkt->pPktContext;
487 
488 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
489 
490 	/* process (free or keep) the netbuf that held the message */
491 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
492 	/*
493 	 * adf sendcomplete is required for windows only
494 	 */
495 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
496 	if (send_complete_part2 != NULL) {
497 		send_complete_part2(
498 			htt_pkt->soc_ctxt, htc_pkt->Status, netbuf);
499 	}
500 	/* free the htt_htc_pkt / HTC_PACKET object */
501 	htt_htc_pkt_free(soc, htt_pkt);
502 }
503 
504 /*
505  * htt_h2t_ver_req_msg() - Send HTT version request message to target
506  * @htt_soc:	HTT SOC handle
507  *
508  * Return: 0 on success; error code on failure
509  */
510 static int htt_h2t_ver_req_msg(struct htt_soc *soc)
511 {
512 	struct dp_htt_htc_pkt *pkt;
513 	qdf_nbuf_t msg;
514 	uint32_t *msg_word;
515 
516 	msg = qdf_nbuf_alloc(
517 		soc->osdev,
518 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
519 		/* reserve room for the HTC header */
520 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
521 	if (!msg)
522 		return QDF_STATUS_E_NOMEM;
523 
524 	/*
525 	 * Set the length of the message.
526 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
527 	 * separately during the below call to qdf_nbuf_push_head.
528 	 * The contribution from the HTC header is added separately inside HTC.
529 	 */
530 	if (qdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES) == NULL) {
531 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
532 			"%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
533 			__func__);
534 		return QDF_STATUS_E_FAILURE;
535 	}
536 
537 	/* fill in the message contents */
538 	msg_word = (u_int32_t *) qdf_nbuf_data(msg);
539 
540 	/* rewind beyond alignment pad to get to the HTC header reserved area */
541 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
542 
543 	*msg_word = 0;
544 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
545 
546 	pkt = htt_htc_pkt_alloc(soc);
547 	if (!pkt) {
548 		qdf_nbuf_free(msg);
549 		return QDF_STATUS_E_FAILURE;
550 	}
551 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
552 
553 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
554 		dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg),
555 		qdf_nbuf_len(msg), soc->htc_endpoint,
556 		1); /* tag - not relevant here */
557 
558 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
559 	DP_HTT_SEND_HTC_PKT(soc, pkt);
560 	return 0;
561 }
562 
563 /*
564  * htt_srng_setup() - Send SRNG setup message to target
565  * @htt_soc:	HTT SOC handle
566  * @mac_id:	MAC Id
567  * @hal_srng:	Opaque HAL SRNG pointer
568  * @hal_ring_type:	SRNG ring type
569  *
570  * Return: 0 on success; error code on failure
571  */
572 int htt_srng_setup(void *htt_soc, int mac_id, void *hal_srng,
573 	int hal_ring_type)
574 {
575 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
576 	struct dp_htt_htc_pkt *pkt;
577 	qdf_nbuf_t htt_msg;
578 	uint32_t *msg_word;
579 	struct hal_srng_params srng_params;
580 	qdf_dma_addr_t hp_addr, tp_addr;
581 	uint32_t ring_entry_size =
582 		hal_srng_get_entrysize(soc->hal_soc, hal_ring_type);
583 	int htt_ring_type, htt_ring_id;
584 
585 	/* Sizes should be set in 4-byte words */
586 	ring_entry_size = ring_entry_size >> 2;
587 
588 	htt_msg = qdf_nbuf_alloc(soc->osdev,
589 		HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ),
590 		/* reserve room for the HTC header */
591 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
592 	if (!htt_msg)
593 		goto fail0;
594 
595 	hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params);
596 	hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_srng);
597 	tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_srng);
598 
599 	switch (hal_ring_type) {
600 	case RXDMA_BUF:
601 #ifdef QCA_HOST2FW_RXBUF_RING
602 		if (srng_params.ring_id ==
603 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0)) {
604 			htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
605 			htt_ring_type = HTT_SW_TO_SW_RING;
606 #ifdef IPA_OFFLOAD
607 		} else if (srng_params.ring_id ==
608 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2)) {
609 			htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING;
610 			htt_ring_type = HTT_SW_TO_SW_RING;
611 #endif
612 #else
613 		if (srng_params.ring_id ==
614 			(HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
615 			  (mac_id * HAL_MAX_RINGS_PER_LMAC))) {
616 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
617 			htt_ring_type = HTT_SW_TO_HW_RING;
618 #endif
619 		} else if (srng_params.ring_id ==
620 #ifdef IPA_OFFLOAD
621 			 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 +
622 #else
623 			 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF +
624 #endif
625 			  (mac_id * HAL_MAX_RINGS_PER_LMAC))) {
626 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
627 			htt_ring_type = HTT_SW_TO_HW_RING;
628 		} else {
629 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
630 				   "%s: Ring %d currently not supported",
631 				   __func__, srng_params.ring_id);
632 			goto fail1;
633 		}
634 
635 		dp_info("ring_type %d ring_id %d htt_ring_id %d hp_addr 0x%llx tp_addr 0x%llx",
636 			hal_ring_type, srng_params.ring_id, htt_ring_id,
637 			(uint64_t)hp_addr,
638 			(uint64_t)tp_addr);
639 		break;
640 	case RXDMA_MONITOR_BUF:
641 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
642 		htt_ring_type = HTT_SW_TO_HW_RING;
643 		break;
644 	case RXDMA_MONITOR_STATUS:
645 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
646 		htt_ring_type = HTT_SW_TO_HW_RING;
647 		break;
648 	case RXDMA_MONITOR_DST:
649 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
650 		htt_ring_type = HTT_HW_TO_SW_RING;
651 		break;
652 	case RXDMA_MONITOR_DESC:
653 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
654 		htt_ring_type = HTT_SW_TO_HW_RING;
655 		break;
656 	case RXDMA_DST:
657 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
658 		htt_ring_type = HTT_HW_TO_SW_RING;
659 		break;
660 
661 	default:
662 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
663 			"%s: Ring currently not supported", __func__);
664 			goto fail1;
665 	}
666 
667 	/*
668 	 * Set the length of the message.
669 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
670 	 * separately during the below call to qdf_nbuf_push_head.
671 	 * The contribution from the HTC header is added separately inside HTC.
672 	 */
673 	if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) {
674 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
675 			"%s: Failed to expand head for SRING_SETUP msg",
676 			__func__);
677 		return QDF_STATUS_E_FAILURE;
678 	}
679 
680 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
681 
682 	/* rewind beyond alignment pad to get to the HTC header reserved area */
683 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
684 
685 	/* word 0 */
686 	*msg_word = 0;
687 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP);
688 
689 	if ((htt_ring_type == HTT_SW_TO_HW_RING) ||
690 			(htt_ring_type == HTT_HW_TO_SW_RING))
691 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word,
692 			 DP_SW2HW_MACID(mac_id));
693 	else
694 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id);
695 
696 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
697 		  "%s: mac_id %d", __func__, mac_id);
698 	HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type);
699 	/* TODO: Discuss with FW on changing this to unique ID and using
700 	 * htt_ring_type to send the type of ring
701 	 */
702 	HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id);
703 
704 	/* word 1 */
705 	msg_word++;
706 	*msg_word = 0;
707 	HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word,
708 		srng_params.ring_base_paddr & 0xffffffff);
709 
710 	/* word 2 */
711 	msg_word++;
712 	*msg_word = 0;
713 	HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word,
714 		(uint64_t)srng_params.ring_base_paddr >> 32);
715 
716 	/* word 3 */
717 	msg_word++;
718 	*msg_word = 0;
719 	HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size);
720 	HTT_SRING_SETUP_RING_SIZE_SET(*msg_word,
721 		(ring_entry_size * srng_params.num_entries));
722 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
723 		  "%s: entry_size %d", __func__,
724 			 ring_entry_size);
725 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
726 		  "%s: num_entries %d", __func__,
727 			 srng_params.num_entries);
728 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
729 		  "%s: ring_size %d", __func__,
730 			 (ring_entry_size * srng_params.num_entries));
731 	if (htt_ring_type == HTT_SW_TO_HW_RING)
732 		HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET(
733 						*msg_word, 1);
734 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word,
735 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
736 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word,
737 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
738 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word,
739 		!!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP));
740 
741 	/* word 4 */
742 	msg_word++;
743 	*msg_word = 0;
744 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
745 		hp_addr & 0xffffffff);
746 
747 	/* word 5 */
748 	msg_word++;
749 	*msg_word = 0;
750 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
751 		(uint64_t)hp_addr >> 32);
752 
753 	/* word 6 */
754 	msg_word++;
755 	*msg_word = 0;
756 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
757 		tp_addr & 0xffffffff);
758 
759 	/* word 7 */
760 	msg_word++;
761 	*msg_word = 0;
762 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
763 		(uint64_t)tp_addr >> 32);
764 
765 	/* word 8 */
766 	msg_word++;
767 	*msg_word = 0;
768 	HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word,
769 		srng_params.msi_addr & 0xffffffff);
770 
771 	/* word 9 */
772 	msg_word++;
773 	*msg_word = 0;
774 	HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word,
775 		(uint64_t)(srng_params.msi_addr) >> 32);
776 
777 	/* word 10 */
778 	msg_word++;
779 	*msg_word = 0;
780 	HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word,
781 		srng_params.msi_data);
782 
783 	/* word 11 */
784 	msg_word++;
785 	*msg_word = 0;
786 	HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word,
787 		srng_params.intr_batch_cntr_thres_entries *
788 		ring_entry_size);
789 	HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word,
790 		srng_params.intr_timer_thres_us >> 3);
791 
792 	/* word 12 */
793 	msg_word++;
794 	*msg_word = 0;
795 	if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
796 		/* TODO: Setting low threshold to 1/8th of ring size - see
797 		 * if this needs to be configurable
798 		 */
799 		HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word,
800 			srng_params.low_threshold);
801 	}
802 	/* "response_required" field should be set if a HTT response message is
803 	 * required after setting up the ring.
804 	 */
805 	pkt = htt_htc_pkt_alloc(soc);
806 	if (!pkt)
807 		goto fail1;
808 
809 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
810 
811 	SET_HTC_PACKET_INFO_TX(
812 		&pkt->htc_pkt,
813 		dp_htt_h2t_send_complete_free_netbuf,
814 		qdf_nbuf_data(htt_msg),
815 		qdf_nbuf_len(htt_msg),
816 		soc->htc_endpoint,
817 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
818 
819 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
820 	DP_HTT_SEND_HTC_PKT(soc, pkt);
821 
822 	return QDF_STATUS_SUCCESS;
823 
824 fail1:
825 	qdf_nbuf_free(htt_msg);
826 fail0:
827 	return QDF_STATUS_E_FAILURE;
828 }
829 
830 /*
831  * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter
832  * config message to target
833  * @htt_soc:	HTT SOC handle
834  * @pdev_id:	PDEV Id
835  * @hal_srng:	Opaque HAL SRNG pointer
836  * @hal_ring_type:	SRNG ring type
837  * @ring_buf_size:	SRNG buffer size
838  * @htt_tlv_filter:	Rx SRNG TLV and filter setting
839  * Return: 0 on success; error code on failure
840  */
841 int htt_h2t_rx_ring_cfg(void *htt_soc, int pdev_id, void *hal_srng,
842 	int hal_ring_type, int ring_buf_size,
843 	struct htt_rx_ring_tlv_filter *htt_tlv_filter)
844 {
845 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
846 	struct dp_htt_htc_pkt *pkt;
847 	qdf_nbuf_t htt_msg;
848 	uint32_t *msg_word;
849 	struct hal_srng_params srng_params;
850 	uint32_t htt_ring_type, htt_ring_id;
851 	uint32_t tlv_filter;
852 
853 	htt_msg = qdf_nbuf_alloc(soc->osdev,
854 		HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ),
855 	/* reserve room for the HTC header */
856 	HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
857 	if (!htt_msg)
858 		goto fail0;
859 
860 	hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params);
861 
862 	switch (hal_ring_type) {
863 	case RXDMA_BUF:
864 		htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
865 		htt_ring_type = HTT_SW_TO_HW_RING;
866 		break;
867 	case RXDMA_MONITOR_BUF:
868 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
869 		htt_ring_type = HTT_SW_TO_HW_RING;
870 		break;
871 	case RXDMA_MONITOR_STATUS:
872 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
873 		htt_ring_type = HTT_SW_TO_HW_RING;
874 		break;
875 	case RXDMA_MONITOR_DST:
876 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
877 		htt_ring_type = HTT_HW_TO_SW_RING;
878 		break;
879 	case RXDMA_MONITOR_DESC:
880 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
881 		htt_ring_type = HTT_SW_TO_HW_RING;
882 		break;
883 	case RXDMA_DST:
884 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
885 		htt_ring_type = HTT_HW_TO_SW_RING;
886 		break;
887 
888 	default:
889 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
890 			"%s: Ring currently not supported", __func__);
891 		goto fail1;
892 	}
893 
894 	/*
895 	 * Set the length of the message.
896 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
897 	 * separately during the below call to qdf_nbuf_push_head.
898 	 * The contribution from the HTC header is added separately inside HTC.
899 	 */
900 	if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) {
901 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
902 			"%s: Failed to expand head for RX Ring Cfg msg",
903 			__func__);
904 		goto fail1; /* failure */
905 	}
906 
907 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
908 
909 	/* rewind beyond alignment pad to get to the HTC header reserved area */
910 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
911 
912 	/* word 0 */
913 	*msg_word = 0;
914 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
915 
916 	/*
917 	 * pdev_id is indexed from 0 whereas mac_id is indexed from 1
918 	 * SW_TO_SW and SW_TO_HW rings are unaffected by this
919 	 */
920 	if (htt_ring_type == HTT_SW_TO_SW_RING ||
921 			htt_ring_type == HTT_SW_TO_HW_RING)
922 		HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word,
923 						DP_SW2HW_MACID(pdev_id));
924 
925 	/* TODO: Discuss with FW on changing this to unique ID and using
926 	 * htt_ring_type to send the type of ring
927 	 */
928 	HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id);
929 
930 	HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word,
931 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
932 
933 	HTT_RX_RING_SELECTION_CFG_PKT_TLV_SET(*msg_word,
934 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
935 
936 	/* word 1 */
937 	msg_word++;
938 	*msg_word = 0;
939 	HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word,
940 		ring_buf_size);
941 
942 	/* word 2 */
943 	msg_word++;
944 	*msg_word = 0;
945 
946 	if (htt_tlv_filter->enable_fp) {
947 		/* TYPE: MGMT */
948 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
949 			FP, MGMT, 0000,
950 			(htt_tlv_filter->fp_mgmt_filter &
951 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
952 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
953 			FP, MGMT, 0001,
954 			(htt_tlv_filter->fp_mgmt_filter &
955 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
956 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
957 			FP, MGMT, 0010,
958 			(htt_tlv_filter->fp_mgmt_filter &
959 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
960 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
961 			FP, MGMT, 0011,
962 			(htt_tlv_filter->fp_mgmt_filter &
963 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
964 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
965 			FP, MGMT, 0100,
966 			(htt_tlv_filter->fp_mgmt_filter &
967 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
968 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
969 			FP, MGMT, 0101,
970 			(htt_tlv_filter->fp_mgmt_filter &
971 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
972 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
973 			FP, MGMT, 0110,
974 			(htt_tlv_filter->fp_mgmt_filter &
975 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
976 		/* reserved */
977 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
978 			MGMT, 0111,
979 			(htt_tlv_filter->fp_mgmt_filter &
980 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
981 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
982 			FP, MGMT, 1000,
983 			(htt_tlv_filter->fp_mgmt_filter &
984 			FILTER_MGMT_BEACON) ? 1 : 0);
985 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
986 			FP, MGMT, 1001,
987 			(htt_tlv_filter->fp_mgmt_filter &
988 			FILTER_MGMT_ATIM) ? 1 : 0);
989 	}
990 
991 	if (htt_tlv_filter->enable_md) {
992 			/* TYPE: MGMT */
993 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
994 			MD, MGMT, 0000,
995 			(htt_tlv_filter->md_mgmt_filter &
996 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
997 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
998 			MD, MGMT, 0001,
999 			(htt_tlv_filter->md_mgmt_filter &
1000 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1001 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1002 			MD, MGMT, 0010,
1003 			(htt_tlv_filter->md_mgmt_filter &
1004 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1005 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1006 			MD, MGMT, 0011,
1007 			(htt_tlv_filter->md_mgmt_filter &
1008 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1009 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1010 			MD, MGMT, 0100,
1011 			(htt_tlv_filter->md_mgmt_filter &
1012 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1013 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1014 			MD, MGMT, 0101,
1015 			(htt_tlv_filter->md_mgmt_filter &
1016 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1017 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1018 			MD, MGMT, 0110,
1019 			(htt_tlv_filter->md_mgmt_filter &
1020 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1021 		/* reserved */
1022 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
1023 			MGMT, 0111,
1024 			(htt_tlv_filter->md_mgmt_filter &
1025 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1026 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1027 			MD, MGMT, 1000,
1028 			(htt_tlv_filter->md_mgmt_filter &
1029 			FILTER_MGMT_BEACON) ? 1 : 0);
1030 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1031 			MD, MGMT, 1001,
1032 			(htt_tlv_filter->md_mgmt_filter &
1033 			FILTER_MGMT_ATIM) ? 1 : 0);
1034 	}
1035 
1036 	if (htt_tlv_filter->enable_mo) {
1037 		/* TYPE: MGMT */
1038 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1039 			MO, MGMT, 0000,
1040 			(htt_tlv_filter->mo_mgmt_filter &
1041 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1042 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1043 			MO, MGMT, 0001,
1044 			(htt_tlv_filter->mo_mgmt_filter &
1045 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1046 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1047 			MO, MGMT, 0010,
1048 			(htt_tlv_filter->mo_mgmt_filter &
1049 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1050 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1051 			MO, MGMT, 0011,
1052 			(htt_tlv_filter->mo_mgmt_filter &
1053 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1054 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1055 			MO, MGMT, 0100,
1056 			(htt_tlv_filter->mo_mgmt_filter &
1057 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1058 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1059 			MO, MGMT, 0101,
1060 			(htt_tlv_filter->mo_mgmt_filter &
1061 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1062 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1063 			MO, MGMT, 0110,
1064 			(htt_tlv_filter->mo_mgmt_filter &
1065 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1066 		/* reserved */
1067 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
1068 			MGMT, 0111,
1069 			(htt_tlv_filter->mo_mgmt_filter &
1070 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1071 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1072 			MO, MGMT, 1000,
1073 			(htt_tlv_filter->mo_mgmt_filter &
1074 			FILTER_MGMT_BEACON) ? 1 : 0);
1075 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1076 			MO, MGMT, 1001,
1077 			(htt_tlv_filter->mo_mgmt_filter &
1078 			FILTER_MGMT_ATIM) ? 1 : 0);
1079 	}
1080 
1081 	/* word 3 */
1082 	msg_word++;
1083 	*msg_word = 0;
1084 
1085 	if (htt_tlv_filter->enable_fp) {
1086 		/* TYPE: MGMT */
1087 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1088 			FP, MGMT, 1010,
1089 			(htt_tlv_filter->fp_mgmt_filter &
1090 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1091 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1092 			FP, MGMT, 1011,
1093 			(htt_tlv_filter->fp_mgmt_filter &
1094 			FILTER_MGMT_AUTH) ? 1 : 0);
1095 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1096 			FP, MGMT, 1100,
1097 			(htt_tlv_filter->fp_mgmt_filter &
1098 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1099 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1100 			FP, MGMT, 1101,
1101 			(htt_tlv_filter->fp_mgmt_filter &
1102 			FILTER_MGMT_ACTION) ? 1 : 0);
1103 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1104 			FP, MGMT, 1110,
1105 			(htt_tlv_filter->fp_mgmt_filter &
1106 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1107 		/* reserved*/
1108 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
1109 			MGMT, 1111,
1110 			(htt_tlv_filter->fp_mgmt_filter &
1111 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1112 	}
1113 
1114 	if (htt_tlv_filter->enable_md) {
1115 			/* TYPE: MGMT */
1116 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1117 			MD, MGMT, 1010,
1118 			(htt_tlv_filter->md_mgmt_filter &
1119 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1120 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1121 			MD, MGMT, 1011,
1122 			(htt_tlv_filter->md_mgmt_filter &
1123 			FILTER_MGMT_AUTH) ? 1 : 0);
1124 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1125 			MD, MGMT, 1100,
1126 			(htt_tlv_filter->md_mgmt_filter &
1127 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1128 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1129 			MD, MGMT, 1101,
1130 			(htt_tlv_filter->md_mgmt_filter &
1131 			FILTER_MGMT_ACTION) ? 1 : 0);
1132 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1133 			MD, MGMT, 1110,
1134 			(htt_tlv_filter->md_mgmt_filter &
1135 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1136 	}
1137 
1138 	if (htt_tlv_filter->enable_mo) {
1139 		/* TYPE: MGMT */
1140 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1141 			MO, MGMT, 1010,
1142 			(htt_tlv_filter->mo_mgmt_filter &
1143 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1144 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1145 			MO, MGMT, 1011,
1146 			(htt_tlv_filter->mo_mgmt_filter &
1147 			FILTER_MGMT_AUTH) ? 1 : 0);
1148 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1149 			MO, MGMT, 1100,
1150 			(htt_tlv_filter->mo_mgmt_filter &
1151 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1152 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1153 			MO, MGMT, 1101,
1154 			(htt_tlv_filter->mo_mgmt_filter &
1155 			FILTER_MGMT_ACTION) ? 1 : 0);
1156 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1157 			MO, MGMT, 1110,
1158 			(htt_tlv_filter->mo_mgmt_filter &
1159 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1160 		/* reserved*/
1161 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
1162 			MGMT, 1111,
1163 			(htt_tlv_filter->mo_mgmt_filter &
1164 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1165 	}
1166 
1167 	/* word 4 */
1168 	msg_word++;
1169 	*msg_word = 0;
1170 
1171 	if (htt_tlv_filter->enable_fp) {
1172 		/* TYPE: CTRL */
1173 		/* reserved */
1174 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1175 			CTRL, 0000,
1176 			(htt_tlv_filter->fp_ctrl_filter &
1177 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1178 		/* reserved */
1179 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1180 			CTRL, 0001,
1181 			(htt_tlv_filter->fp_ctrl_filter &
1182 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1183 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1184 			CTRL, 0010,
1185 			(htt_tlv_filter->fp_ctrl_filter &
1186 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1187 		/* reserved */
1188 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1189 			CTRL, 0011,
1190 			(htt_tlv_filter->fp_ctrl_filter &
1191 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1192 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1193 			CTRL, 0100,
1194 			(htt_tlv_filter->fp_ctrl_filter &
1195 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1196 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1197 			CTRL, 0101,
1198 			(htt_tlv_filter->fp_ctrl_filter &
1199 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1200 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1201 			CTRL, 0110,
1202 			(htt_tlv_filter->fp_ctrl_filter &
1203 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1204 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1205 			CTRL, 0111,
1206 			(htt_tlv_filter->fp_ctrl_filter &
1207 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1208 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1209 			CTRL, 1000,
1210 			(htt_tlv_filter->fp_ctrl_filter &
1211 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1212 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1213 			CTRL, 1001,
1214 			(htt_tlv_filter->fp_ctrl_filter &
1215 			FILTER_CTRL_BA) ? 1 : 0);
1216 	}
1217 
1218 	if (htt_tlv_filter->enable_md) {
1219 		/* TYPE: CTRL */
1220 		/* reserved */
1221 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1222 			CTRL, 0000,
1223 			(htt_tlv_filter->md_ctrl_filter &
1224 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1225 		/* reserved */
1226 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1227 			CTRL, 0001,
1228 			(htt_tlv_filter->md_ctrl_filter &
1229 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1230 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1231 			CTRL, 0010,
1232 			(htt_tlv_filter->md_ctrl_filter &
1233 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1234 		/* reserved */
1235 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1236 			CTRL, 0011,
1237 			(htt_tlv_filter->md_ctrl_filter &
1238 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1239 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1240 			CTRL, 0100,
1241 			(htt_tlv_filter->md_ctrl_filter &
1242 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1243 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1244 			CTRL, 0101,
1245 			(htt_tlv_filter->md_ctrl_filter &
1246 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1247 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1248 			CTRL, 0110,
1249 			(htt_tlv_filter->md_ctrl_filter &
1250 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1251 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1252 			CTRL, 0111,
1253 			(htt_tlv_filter->md_ctrl_filter &
1254 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1255 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1256 			CTRL, 1000,
1257 			(htt_tlv_filter->md_ctrl_filter &
1258 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1259 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1260 			CTRL, 1001,
1261 			(htt_tlv_filter->md_ctrl_filter &
1262 			FILTER_CTRL_BA) ? 1 : 0);
1263 	}
1264 
1265 	if (htt_tlv_filter->enable_mo) {
1266 		/* TYPE: CTRL */
1267 		/* reserved */
1268 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1269 			CTRL, 0000,
1270 			(htt_tlv_filter->mo_ctrl_filter &
1271 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1272 		/* reserved */
1273 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1274 			CTRL, 0001,
1275 			(htt_tlv_filter->mo_ctrl_filter &
1276 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1277 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1278 			CTRL, 0010,
1279 			(htt_tlv_filter->mo_ctrl_filter &
1280 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1281 		/* reserved */
1282 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1283 			CTRL, 0011,
1284 			(htt_tlv_filter->mo_ctrl_filter &
1285 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1286 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1287 			CTRL, 0100,
1288 			(htt_tlv_filter->mo_ctrl_filter &
1289 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1290 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1291 			CTRL, 0101,
1292 			(htt_tlv_filter->mo_ctrl_filter &
1293 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1294 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1295 			CTRL, 0110,
1296 			(htt_tlv_filter->mo_ctrl_filter &
1297 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1298 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1299 			CTRL, 0111,
1300 			(htt_tlv_filter->mo_ctrl_filter &
1301 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1302 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1303 			CTRL, 1000,
1304 			(htt_tlv_filter->mo_ctrl_filter &
1305 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1306 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1307 			CTRL, 1001,
1308 			(htt_tlv_filter->mo_ctrl_filter &
1309 			FILTER_CTRL_BA) ? 1 : 0);
1310 	}
1311 
1312 	/* word 5 */
1313 	msg_word++;
1314 	*msg_word = 0;
1315 	if (htt_tlv_filter->enable_fp) {
1316 		/* TYPE: CTRL */
1317 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1318 			CTRL, 1010,
1319 			(htt_tlv_filter->fp_ctrl_filter &
1320 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1321 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1322 			CTRL, 1011,
1323 			(htt_tlv_filter->fp_ctrl_filter &
1324 			FILTER_CTRL_RTS) ? 1 : 0);
1325 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1326 			CTRL, 1100,
1327 			(htt_tlv_filter->fp_ctrl_filter &
1328 			FILTER_CTRL_CTS) ? 1 : 0);
1329 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1330 			CTRL, 1101,
1331 			(htt_tlv_filter->fp_ctrl_filter &
1332 			FILTER_CTRL_ACK) ? 1 : 0);
1333 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1334 			CTRL, 1110,
1335 			(htt_tlv_filter->fp_ctrl_filter &
1336 			FILTER_CTRL_CFEND) ? 1 : 0);
1337 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1338 			CTRL, 1111,
1339 			(htt_tlv_filter->fp_ctrl_filter &
1340 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1341 		/* TYPE: DATA */
1342 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1343 			DATA, MCAST,
1344 			(htt_tlv_filter->fp_data_filter &
1345 			FILTER_DATA_MCAST) ? 1 : 0);
1346 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1347 			DATA, UCAST,
1348 			(htt_tlv_filter->fp_data_filter &
1349 			FILTER_DATA_UCAST) ? 1 : 0);
1350 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1351 			DATA, NULL,
1352 			(htt_tlv_filter->fp_data_filter &
1353 			FILTER_DATA_NULL) ? 1 : 0);
1354 	}
1355 
1356 	if (htt_tlv_filter->enable_md) {
1357 		/* TYPE: CTRL */
1358 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1359 			CTRL, 1010,
1360 			(htt_tlv_filter->md_ctrl_filter &
1361 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1362 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1363 			CTRL, 1011,
1364 			(htt_tlv_filter->md_ctrl_filter &
1365 			FILTER_CTRL_RTS) ? 1 : 0);
1366 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1367 			CTRL, 1100,
1368 			(htt_tlv_filter->md_ctrl_filter &
1369 			FILTER_CTRL_CTS) ? 1 : 0);
1370 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1371 			CTRL, 1101,
1372 			(htt_tlv_filter->md_ctrl_filter &
1373 			FILTER_CTRL_ACK) ? 1 : 0);
1374 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1375 			CTRL, 1110,
1376 			(htt_tlv_filter->md_ctrl_filter &
1377 			FILTER_CTRL_CFEND) ? 1 : 0);
1378 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1379 			CTRL, 1111,
1380 			(htt_tlv_filter->md_ctrl_filter &
1381 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1382 		/* TYPE: DATA */
1383 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1384 			DATA, MCAST,
1385 			(htt_tlv_filter->md_data_filter &
1386 			FILTER_DATA_MCAST) ? 1 : 0);
1387 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1388 			DATA, UCAST,
1389 			(htt_tlv_filter->md_data_filter &
1390 			FILTER_DATA_UCAST) ? 1 : 0);
1391 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1392 			DATA, NULL,
1393 			(htt_tlv_filter->md_data_filter &
1394 			FILTER_DATA_NULL) ? 1 : 0);
1395 	}
1396 
1397 	if (htt_tlv_filter->enable_mo) {
1398 		/* TYPE: CTRL */
1399 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1400 			CTRL, 1010,
1401 			(htt_tlv_filter->mo_ctrl_filter &
1402 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1403 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1404 			CTRL, 1011,
1405 			(htt_tlv_filter->mo_ctrl_filter &
1406 			FILTER_CTRL_RTS) ? 1 : 0);
1407 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1408 			CTRL, 1100,
1409 			(htt_tlv_filter->mo_ctrl_filter &
1410 			FILTER_CTRL_CTS) ? 1 : 0);
1411 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1412 			CTRL, 1101,
1413 			(htt_tlv_filter->mo_ctrl_filter &
1414 			FILTER_CTRL_ACK) ? 1 : 0);
1415 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1416 			CTRL, 1110,
1417 			(htt_tlv_filter->mo_ctrl_filter &
1418 			FILTER_CTRL_CFEND) ? 1 : 0);
1419 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1420 			CTRL, 1111,
1421 			(htt_tlv_filter->mo_ctrl_filter &
1422 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1423 		/* TYPE: DATA */
1424 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1425 			DATA, MCAST,
1426 			(htt_tlv_filter->mo_data_filter &
1427 			FILTER_DATA_MCAST) ? 1 : 0);
1428 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1429 			DATA, UCAST,
1430 			(htt_tlv_filter->mo_data_filter &
1431 			FILTER_DATA_UCAST) ? 1 : 0);
1432 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1433 			DATA, NULL,
1434 			(htt_tlv_filter->mo_data_filter &
1435 			FILTER_DATA_NULL) ? 1 : 0);
1436 	}
1437 
1438 	/* word 6 */
1439 	msg_word++;
1440 	*msg_word = 0;
1441 	tlv_filter = 0;
1442 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START,
1443 		htt_tlv_filter->mpdu_start);
1444 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START,
1445 		htt_tlv_filter->msdu_start);
1446 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET,
1447 		htt_tlv_filter->packet);
1448 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END,
1449 		htt_tlv_filter->msdu_end);
1450 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END,
1451 		htt_tlv_filter->mpdu_end);
1452 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER,
1453 		htt_tlv_filter->packet_header);
1454 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION,
1455 		htt_tlv_filter->attention);
1456 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START,
1457 		htt_tlv_filter->ppdu_start);
1458 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END,
1459 		htt_tlv_filter->ppdu_end);
1460 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS,
1461 		htt_tlv_filter->ppdu_end_user_stats);
1462 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter,
1463 		PPDU_END_USER_STATS_EXT,
1464 		htt_tlv_filter->ppdu_end_user_stats_ext);
1465 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE,
1466 		htt_tlv_filter->ppdu_end_status_done);
1467 	/* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/
1468 	 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED,
1469 		 htt_tlv_filter->header_per_msdu);
1470 
1471 	HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter);
1472 
1473 	/* "response_required" field should be set if a HTT response message is
1474 	 * required after setting up the ring.
1475 	 */
1476 	pkt = htt_htc_pkt_alloc(soc);
1477 	if (!pkt)
1478 		goto fail1;
1479 
1480 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1481 
1482 	SET_HTC_PACKET_INFO_TX(
1483 		&pkt->htc_pkt,
1484 		dp_htt_h2t_send_complete_free_netbuf,
1485 		qdf_nbuf_data(htt_msg),
1486 		qdf_nbuf_len(htt_msg),
1487 		soc->htc_endpoint,
1488 		1); /* tag - not relevant here */
1489 
1490 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1491 	DP_HTT_SEND_HTC_PKT(soc, pkt);
1492 	return QDF_STATUS_SUCCESS;
1493 
1494 fail1:
1495 	qdf_nbuf_free(htt_msg);
1496 fail0:
1497 	return QDF_STATUS_E_FAILURE;
1498 }
1499 
1500 #if defined(CONFIG_WIN) && WDI_EVENT_ENABLE
1501 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1502 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1503 
1504 {
1505 	uint32_t pdev_id;
1506 	uint32_t *msg_word = NULL;
1507 	uint32_t msg_remain_len = 0;
1508 
1509 	msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1510 
1511 	/*COOKIE MSB*/
1512 	pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1513 
1514 	/* stats message length + 16 size of HTT header*/
1515 	msg_remain_len = qdf_min(htt_stats->msg_len + 16,
1516 				(uint32_t)DP_EXT_MSG_LENGTH);
1517 
1518 	dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc,
1519 			msg_word,  msg_remain_len,
1520 			WDI_NO_VAL, pdev_id);
1521 
1522 	if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1523 		htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1524 	}
1525 	/* Need to be freed here as WDI handler will
1526 	 * make a copy of pkt to send data to application
1527 	 */
1528 	qdf_nbuf_free(htt_msg);
1529 	return QDF_STATUS_SUCCESS;
1530 }
1531 #else
1532 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1533 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1534 {
1535 	return QDF_STATUS_E_NOSUPPORT;
1536 }
1537 #endif
1538 
1539 /**
1540  * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats
1541  * @htt_stats: htt stats info
1542  *
1543  * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message
1544  * contains sub messages which are identified by a TLV header.
1545  * In this function we will process the stream of T2H messages and read all the
1546  * TLV contained in the message.
1547  *
1548  * THe following cases have been taken care of
1549  * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer
1550  *		In this case the buffer will contain multiple tlvs.
1551  * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer.
1552  *		Only one tlv will be contained in the HTT message and this tag
1553  *		will extend onto the next buffer.
1554  * Case 3: When the buffer is the continuation of the previous message
1555  * Case 4: tlv length is 0. which will indicate the end of message
1556  *
1557  * return: void
1558  */
1559 static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats,
1560 					struct dp_soc *soc)
1561 {
1562 	htt_tlv_tag_t tlv_type = 0xff;
1563 	qdf_nbuf_t htt_msg = NULL;
1564 	uint32_t *msg_word;
1565 	uint8_t *tlv_buf_head = NULL;
1566 	uint8_t *tlv_buf_tail = NULL;
1567 	uint32_t msg_remain_len = 0;
1568 	uint32_t tlv_remain_len = 0;
1569 	uint32_t *tlv_start;
1570 	int cookie_val;
1571 	int cookie_msb;
1572 	int pdev_id;
1573 	bool copy_stats = false;
1574 	struct dp_pdev *pdev;
1575 
1576 	/* Process node in the HTT message queue */
1577 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1578 		!= NULL) {
1579 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1580 		cookie_val = *(msg_word + 1);
1581 		htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET(
1582 					*(msg_word +
1583 					HTT_T2H_EXT_STATS_TLV_START_OFFSET));
1584 
1585 		if (cookie_val) {
1586 			if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg)
1587 					== QDF_STATUS_SUCCESS) {
1588 				continue;
1589 			}
1590 		}
1591 
1592 		cookie_msb = *(msg_word + 2);
1593 		pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1594 		pdev = soc->pdev_list[pdev_id];
1595 
1596 		if (cookie_msb >> 2) {
1597 			copy_stats = true;
1598 		}
1599 
1600 		/* read 5th word */
1601 		msg_word = msg_word + 4;
1602 		msg_remain_len = qdf_min(htt_stats->msg_len,
1603 				(uint32_t) DP_EXT_MSG_LENGTH);
1604 		/* Keep processing the node till node length is 0 */
1605 		while (msg_remain_len) {
1606 			/*
1607 			 * if message is not a continuation of previous message
1608 			 * read the tlv type and tlv length
1609 			 */
1610 			if (!tlv_buf_head) {
1611 				tlv_type = HTT_STATS_TLV_TAG_GET(
1612 						*msg_word);
1613 				tlv_remain_len = HTT_STATS_TLV_LENGTH_GET(
1614 						*msg_word);
1615 			}
1616 
1617 			if (tlv_remain_len == 0) {
1618 				msg_remain_len = 0;
1619 
1620 				if (tlv_buf_head) {
1621 					qdf_mem_free(tlv_buf_head);
1622 					tlv_buf_head = NULL;
1623 					tlv_buf_tail = NULL;
1624 				}
1625 
1626 				goto error;
1627 			}
1628 
1629 			if (!tlv_buf_head)
1630 				tlv_remain_len += HTT_TLV_HDR_LEN;
1631 
1632 			if ((tlv_remain_len <= msg_remain_len)) {
1633 				/* Case 3 */
1634 				if (tlv_buf_head) {
1635 					qdf_mem_copy(tlv_buf_tail,
1636 							(uint8_t *)msg_word,
1637 							tlv_remain_len);
1638 					tlv_start = (uint32_t *)tlv_buf_head;
1639 				} else {
1640 					/* Case 1 */
1641 					tlv_start = msg_word;
1642 				}
1643 
1644 				if (copy_stats)
1645 					dp_htt_stats_copy_tag(pdev, tlv_type, tlv_start);
1646 				else
1647 					dp_htt_stats_print_tag(tlv_type, tlv_start);
1648 
1649 				if (tlv_type == HTT_STATS_PEER_DETAILS_TAG ||
1650 				    tlv_type == HTT_STATS_PEER_STATS_CMN_TAG)
1651 					dp_peer_update_inactive_time(pdev,
1652 								     tlv_type,
1653 								     tlv_start);
1654 
1655 				msg_remain_len -= tlv_remain_len;
1656 
1657 				msg_word = (uint32_t *)
1658 					(((uint8_t *)msg_word) +
1659 					tlv_remain_len);
1660 
1661 				tlv_remain_len = 0;
1662 
1663 				if (tlv_buf_head) {
1664 					qdf_mem_free(tlv_buf_head);
1665 					tlv_buf_head = NULL;
1666 					tlv_buf_tail = NULL;
1667 				}
1668 
1669 			} else { /* tlv_remain_len > msg_remain_len */
1670 				/* Case 2 & 3 */
1671 				if (!tlv_buf_head) {
1672 					tlv_buf_head = qdf_mem_malloc(
1673 							tlv_remain_len);
1674 
1675 					if (!tlv_buf_head) {
1676 						QDF_TRACE(QDF_MODULE_ID_TXRX,
1677 								QDF_TRACE_LEVEL_ERROR,
1678 								"Alloc failed");
1679 						goto error;
1680 					}
1681 
1682 					tlv_buf_tail = tlv_buf_head;
1683 				}
1684 
1685 				qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word,
1686 						msg_remain_len);
1687 				tlv_remain_len -= msg_remain_len;
1688 				tlv_buf_tail += msg_remain_len;
1689 			}
1690 		}
1691 
1692 		if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1693 			htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1694 		}
1695 
1696 		qdf_nbuf_free(htt_msg);
1697 	}
1698 	return;
1699 
1700 error:
1701 	qdf_nbuf_free(htt_msg);
1702 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1703 			!= NULL)
1704 		qdf_nbuf_free(htt_msg);
1705 }
1706 
1707 void htt_t2h_stats_handler(void *context)
1708 {
1709 	struct dp_soc *soc = (struct dp_soc *)context;
1710 	struct htt_stats_context htt_stats;
1711 	uint32_t *msg_word;
1712 	qdf_nbuf_t htt_msg = NULL;
1713 	uint8_t done;
1714 	uint8_t rem_stats;
1715 
1716 	if (!soc || !qdf_atomic_read(&soc->cmn_init_done)) {
1717 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1718 			"soc: 0x%pK, init_done: %d", soc,
1719 			qdf_atomic_read(&soc->cmn_init_done));
1720 		return;
1721 	}
1722 
1723 	qdf_mem_zero(&htt_stats, sizeof(htt_stats));
1724 	qdf_nbuf_queue_init(&htt_stats.msg);
1725 
1726 	/* pull one completed stats from soc->htt_stats_msg and process */
1727 	qdf_spin_lock_bh(&soc->htt_stats.lock);
1728 	if (!soc->htt_stats.num_stats) {
1729 		qdf_spin_unlock_bh(&soc->htt_stats.lock);
1730 		return;
1731 	}
1732 	while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) {
1733 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1734 		msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET;
1735 		done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
1736 		qdf_nbuf_queue_add(&htt_stats.msg, htt_msg);
1737 		/*
1738 		 * Done bit signifies that this is the last T2H buffer in the
1739 		 * stream of HTT EXT STATS message
1740 		 */
1741 		if (done)
1742 			break;
1743 	}
1744 	rem_stats = --soc->htt_stats.num_stats;
1745 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
1746 
1747 	dp_process_htt_stat_msg(&htt_stats, soc);
1748 	/* If there are more stats to process, schedule stats work again */
1749 	if (rem_stats)
1750 		qdf_sched_work(0, &soc->htt_stats.work);
1751 }
1752 
1753 /*
1754  * dp_get_ppdu_info_user_index: Find and allocate a per-user descriptor for a PPDU,
1755  * if a new peer id arrives in a PPDU
1756  * pdev: DP pdev handle
1757  * @peer_id : peer unique identifier
1758  * @ppdu_info: per ppdu tlv structure
1759  *
1760  * return:user index to be populated
1761  */
1762 #ifdef FEATURE_PERPKT_INFO
1763 static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev,
1764 						uint16_t peer_id,
1765 						struct ppdu_info *ppdu_info)
1766 {
1767 	uint8_t user_index = 0;
1768 	struct cdp_tx_completion_ppdu *ppdu_desc;
1769 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1770 
1771 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1772 
1773 	while ((user_index + 1) <= ppdu_info->last_user) {
1774 		ppdu_user_desc = &ppdu_desc->user[user_index];
1775 		if (ppdu_user_desc->peer_id != peer_id) {
1776 			user_index++;
1777 			continue;
1778 		} else {
1779 			/* Max users possible is 8 so user array index should
1780 			 * not exceed 7
1781 			 */
1782 			qdf_assert_always(user_index <= CDP_MU_MAX_USER_INDEX);
1783 			return user_index;
1784 		}
1785 	}
1786 
1787 	ppdu_info->last_user++;
1788 	/* Max users possible is 8 so last user should not exceed 8 */
1789 	qdf_assert_always(ppdu_info->last_user <= CDP_MU_MAX_USERS);
1790 	return ppdu_info->last_user - 1;
1791 }
1792 
1793 /*
1794  * dp_process_ppdu_stats_common_tlv: Process htt_ppdu_stats_common_tlv
1795  * pdev: DP pdev handle
1796  * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv
1797  * @ppdu_info: per ppdu tlv structure
1798  *
1799  * return:void
1800  */
1801 static void dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev,
1802 		uint32_t *tag_buf, struct ppdu_info *ppdu_info)
1803 {
1804 	uint16_t frame_type;
1805 	uint16_t freq;
1806 	struct dp_soc *soc = NULL;
1807 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
1808 
1809 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1810 
1811 	tag_buf += 2;
1812 	ppdu_desc->num_users =
1813 		HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf);
1814 	tag_buf++;
1815 	frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf);
1816 
1817 	if ((frame_type == HTT_STATS_FTYPE_TIDQ_DATA_SU) ||
1818 			(frame_type == HTT_STATS_FTYPE_TIDQ_DATA_MU))
1819 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA;
1820 	else if ((frame_type == HTT_STATS_FTYPE_SGEN_MU_BAR) ||
1821 		 (frame_type == HTT_STATS_FTYPE_SGEN_BAR))
1822 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_BAR;
1823 	else
1824 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
1825 
1826 	tag_buf += 2;
1827 	ppdu_desc->tx_duration = *tag_buf;
1828 	tag_buf += 3;
1829 	ppdu_desc->ppdu_start_timestamp = *tag_buf;
1830 
1831 	ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
1832 					ppdu_desc->tx_duration;
1833 	/* Ack time stamp is same as end time stamp*/
1834 	ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
1835 
1836 	tag_buf++;
1837 
1838 	freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf);
1839 	if (freq != ppdu_desc->channel) {
1840 		soc = pdev->soc;
1841 		ppdu_desc->channel = freq;
1842 		if (soc && soc->cdp_soc.ol_ops->freq_to_channel)
1843 			pdev->operating_channel =
1844 		soc->cdp_soc.ol_ops->freq_to_channel(pdev->ctrl_pdev, freq);
1845 	}
1846 
1847 	ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf);
1848 }
1849 
1850 /*
1851  * dp_process_ppdu_stats_user_common_tlv: Process ppdu_stats_user_common
1852  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv
1853  * @ppdu_info: per ppdu tlv structure
1854  *
1855  * return:void
1856  */
1857 static void dp_process_ppdu_stats_user_common_tlv(
1858 		struct dp_pdev *pdev, uint32_t *tag_buf,
1859 		struct ppdu_info *ppdu_info)
1860 {
1861 	uint16_t peer_id;
1862 	struct cdp_tx_completion_ppdu *ppdu_desc;
1863 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1864 	uint8_t curr_user_index = 0;
1865 
1866 	ppdu_desc =
1867 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1868 
1869 	tag_buf++;
1870 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
1871 
1872 	curr_user_index =
1873 		dp_get_ppdu_info_user_index(pdev,
1874 					    peer_id, ppdu_info);
1875 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1876 
1877 	if (peer_id == DP_SCAN_PEER_ID) {
1878 		ppdu_desc->vdev_id =
1879 			HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(*tag_buf);
1880 	} else {
1881 		if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
1882 			return;
1883 	}
1884 
1885 	ppdu_user_desc->peer_id = peer_id;
1886 
1887 	tag_buf++;
1888 
1889 	if (HTT_PPDU_STATS_USER_COMMON_TLV_DELAYED_BA_GET(*tag_buf)) {
1890 		ppdu_user_desc->delayed_ba = 1;
1891 	}
1892 
1893 	if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) {
1894 		ppdu_user_desc->is_mcast = true;
1895 		ppdu_user_desc->mpdu_tried_mcast =
1896 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
1897 		ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast;
1898 	} else {
1899 		ppdu_user_desc->mpdu_tried_ucast =
1900 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
1901 	}
1902 
1903 	tag_buf++;
1904 
1905 	ppdu_user_desc->qos_ctrl =
1906 		HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf);
1907 	ppdu_user_desc->frame_ctrl =
1908 		HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf);
1909 	ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl;
1910 
1911 	if (ppdu_user_desc->delayed_ba) {
1912 		ppdu_user_desc->mpdu_success = 0;
1913 		ppdu_user_desc->mpdu_tried_mcast = 0;
1914 		ppdu_user_desc->mpdu_tried_ucast = 0;
1915 	}
1916 }
1917 
1918 
1919 /**
1920  * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv
1921  * @pdev: DP pdev handle
1922  * @tag_buf: T2H message buffer carrying the user rate TLV
1923  * @ppdu_info: per ppdu tlv structure
1924  *
1925  * return:void
1926  */
1927 static void dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev,
1928 		uint32_t *tag_buf,
1929 		struct ppdu_info *ppdu_info)
1930 {
1931 	uint16_t peer_id;
1932 	struct dp_peer *peer;
1933 	struct cdp_tx_completion_ppdu *ppdu_desc;
1934 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1935 	uint8_t curr_user_index = 0;
1936 	struct dp_vdev *vdev;
1937 
1938 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1939 
1940 	tag_buf++;
1941 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
1942 
1943 	curr_user_index =
1944 		dp_get_ppdu_info_user_index(pdev,
1945 					    peer_id, ppdu_info);
1946 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1947 	if (peer_id == DP_SCAN_PEER_ID) {
1948 		vdev =
1949 		       dp_get_vdev_from_soc_vdev_id_wifi3(pdev->soc,
1950 							  ppdu_desc->vdev_id);
1951 		if (!vdev)
1952 			return;
1953 		qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw,
1954 			     DP_MAC_ADDR_LEN);
1955 	} else {
1956 		peer = dp_peer_find_by_id(pdev->soc, peer_id);
1957 		if (!peer)
1958 			return;
1959 		qdf_mem_copy(ppdu_user_desc->mac_addr,
1960 			     peer->mac_addr.raw, DP_MAC_ADDR_LEN);
1961 		dp_peer_unref_del_find_by_id(peer);
1962 	}
1963 
1964 	ppdu_user_desc->peer_id = peer_id;
1965 
1966 	ppdu_user_desc->tid =
1967 		HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf);
1968 
1969 	tag_buf += 1;
1970 
1971 	ppdu_user_desc->user_pos =
1972 		HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_GET(*tag_buf);
1973 	ppdu_user_desc->mu_group_id =
1974 		HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_GET(*tag_buf);
1975 
1976 	tag_buf += 1;
1977 
1978 	ppdu_user_desc->ru_start =
1979 		HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf);
1980 	ppdu_user_desc->ru_tones =
1981 		(HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) -
1982 		HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1;
1983 
1984 	tag_buf += 2;
1985 
1986 	ppdu_user_desc->ppdu_type =
1987 		HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf);
1988 
1989 	tag_buf++;
1990 	ppdu_user_desc->tx_rate = *tag_buf;
1991 
1992 	ppdu_user_desc->ltf_size =
1993 		HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf);
1994 	ppdu_user_desc->stbc =
1995 		HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf);
1996 	ppdu_user_desc->he_re =
1997 		HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf);
1998 	ppdu_user_desc->txbf =
1999 		HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf);
2000 	ppdu_user_desc->bw =
2001 		HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf) - 2;
2002 	ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf);
2003 	ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf);
2004 	ppdu_user_desc->preamble =
2005 		HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf);
2006 	ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf);
2007 	ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf);
2008 	ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf);
2009 }
2010 
2011 /*
2012  * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv: Process
2013  * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
2014  * pdev: DP PDEV handle
2015  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
2016  * @ppdu_info: per ppdu tlv structure
2017  *
2018  * return:void
2019  */
2020 static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
2021 		struct dp_pdev *pdev, uint32_t *tag_buf,
2022 		struct ppdu_info *ppdu_info)
2023 {
2024 	htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf =
2025 		(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf;
2026 
2027 	struct cdp_tx_completion_ppdu *ppdu_desc;
2028 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2029 	uint8_t curr_user_index = 0;
2030 	uint16_t peer_id;
2031 
2032 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2033 
2034 	tag_buf++;
2035 
2036 	peer_id =
2037 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2038 
2039 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2040 		return;
2041 
2042 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2043 
2044 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2045 	ppdu_user_desc->peer_id = peer_id;
2046 
2047 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
2048 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
2049 					CDP_BA_64_BIT_MAP_SIZE_DWORDS);
2050 }
2051 
2052 /*
2053  * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv: Process
2054  * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
2055  * soc: DP SOC handle
2056  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
2057  * @ppdu_info: per ppdu tlv structure
2058  *
2059  * return:void
2060  */
2061 static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
2062 		struct dp_pdev *pdev, uint32_t *tag_buf,
2063 		struct ppdu_info *ppdu_info)
2064 {
2065 	htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf =
2066 		(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf;
2067 
2068 	struct cdp_tx_completion_ppdu *ppdu_desc;
2069 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2070 	uint8_t curr_user_index = 0;
2071 	uint16_t peer_id;
2072 
2073 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2074 
2075 	tag_buf++;
2076 
2077 	peer_id =
2078 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2079 
2080 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2081 		return;
2082 
2083 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2084 
2085 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2086 	ppdu_user_desc->peer_id = peer_id;
2087 
2088 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
2089 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
2090 					CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2091 }
2092 
2093 /*
2094  * dp_process_ppdu_stats_user_cmpltn_common_tlv: Process
2095  * htt_ppdu_stats_user_cmpltn_common_tlv
2096  * soc: DP SOC handle
2097  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv
2098  * @ppdu_info: per ppdu tlv structure
2099  *
2100  * return:void
2101  */
2102 static void dp_process_ppdu_stats_user_cmpltn_common_tlv(
2103 		struct dp_pdev *pdev, uint32_t *tag_buf,
2104 		struct ppdu_info *ppdu_info)
2105 {
2106 	uint16_t peer_id;
2107 	struct cdp_tx_completion_ppdu *ppdu_desc;
2108 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2109 	uint8_t curr_user_index = 0;
2110 	htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf =
2111 		(htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf;
2112 
2113 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2114 
2115 	tag_buf++;
2116 	peer_id =
2117 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf);
2118 
2119 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2120 		return;
2121 
2122 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2123 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2124 	ppdu_user_desc->peer_id = peer_id;
2125 
2126 	ppdu_user_desc->completion_status =
2127 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET(
2128 				*tag_buf);
2129 
2130 	ppdu_user_desc->tid =
2131 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf);
2132 
2133 
2134 	tag_buf++;
2135 	if (qdf_likely(ppdu_user_desc->completion_status ==
2136 			HTT_PPDU_STATS_USER_STATUS_OK)) {
2137 		ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi;
2138 		ppdu_user_desc->ack_rssi_valid = 1;
2139 	} else {
2140 		ppdu_user_desc->ack_rssi_valid = 0;
2141 	}
2142 
2143 	tag_buf++;
2144 
2145 	ppdu_user_desc->mpdu_success =
2146 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf);
2147 
2148 	tag_buf++;
2149 
2150 	ppdu_user_desc->long_retries =
2151 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf);
2152 
2153 	ppdu_user_desc->short_retries =
2154 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf);
2155 	ppdu_user_desc->retry_msdus =
2156 		ppdu_user_desc->long_retries + ppdu_user_desc->short_retries;
2157 
2158 	ppdu_user_desc->is_ampdu =
2159 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf);
2160 	ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu;
2161 
2162 }
2163 
2164 /*
2165  * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv: Process
2166  * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2167  * pdev: DP PDEV handle
2168  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2169  * @ppdu_info: per ppdu tlv structure
2170  *
2171  * return:void
2172  */
2173 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
2174 		struct dp_pdev *pdev, uint32_t *tag_buf,
2175 		struct ppdu_info *ppdu_info)
2176 {
2177 	htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf =
2178 		(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf;
2179 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2180 	struct cdp_tx_completion_ppdu *ppdu_desc;
2181 	uint8_t curr_user_index = 0;
2182 	uint16_t peer_id;
2183 
2184 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2185 
2186 	tag_buf++;
2187 
2188 	peer_id =
2189 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2190 
2191 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2192 		return;
2193 
2194 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2195 
2196 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2197 	ppdu_user_desc->peer_id = peer_id;
2198 
2199 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2200 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2201 			CDP_BA_64_BIT_MAP_SIZE_DWORDS);
2202 }
2203 
2204 /*
2205  * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv: Process
2206  * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2207  * pdev: DP PDEV handle
2208  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2209  * @ppdu_info: per ppdu tlv structure
2210  *
2211  * return:void
2212  */
2213 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
2214 		struct dp_pdev *pdev, uint32_t *tag_buf,
2215 		struct ppdu_info *ppdu_info)
2216 {
2217 	htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf =
2218 		(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf;
2219 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2220 	struct cdp_tx_completion_ppdu *ppdu_desc;
2221 	uint8_t curr_user_index = 0;
2222 	uint16_t peer_id;
2223 
2224 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2225 
2226 	tag_buf++;
2227 
2228 	peer_id =
2229 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2230 
2231 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2232 		return;
2233 
2234 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2235 
2236 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2237 	ppdu_user_desc->peer_id = peer_id;
2238 
2239 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2240 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2241 			CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2242 }
2243 
2244 /*
2245  * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv: Process
2246  * htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2247  * pdev: DP PDE handle
2248  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2249  * @ppdu_info: per ppdu tlv structure
2250  *
2251  * return:void
2252  */
2253 static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
2254 		struct dp_pdev *pdev, uint32_t *tag_buf,
2255 		struct ppdu_info *ppdu_info)
2256 {
2257 	uint16_t peer_id;
2258 	struct cdp_tx_completion_ppdu *ppdu_desc;
2259 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2260 	uint8_t curr_user_index = 0;
2261 
2262 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2263 
2264 	tag_buf += 2;
2265 	peer_id =
2266 	HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf);
2267 
2268 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2269 		return;
2270 
2271 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2272 
2273 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2274 	ppdu_user_desc->peer_id = peer_id;
2275 
2276 	tag_buf++;
2277 	ppdu_user_desc->tid =
2278 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_TID_NUM_GET(*tag_buf);
2279 	ppdu_user_desc->num_mpdu =
2280 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf);
2281 
2282 	ppdu_user_desc->num_msdu =
2283 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf);
2284 
2285 	ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu;
2286 
2287 	tag_buf += 2;
2288 	ppdu_user_desc->success_bytes = *tag_buf;
2289 
2290 }
2291 
2292 /*
2293  * dp_process_ppdu_stats_user_common_array_tlv: Process
2294  * htt_ppdu_stats_user_common_array_tlv
2295  * pdev: DP PDEV handle
2296  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2297  * @ppdu_info: per ppdu tlv structure
2298  *
2299  * return:void
2300  */
2301 static void dp_process_ppdu_stats_user_common_array_tlv(
2302 		struct dp_pdev *pdev, uint32_t *tag_buf,
2303 		struct ppdu_info *ppdu_info)
2304 {
2305 	uint32_t peer_id;
2306 	struct cdp_tx_completion_ppdu *ppdu_desc;
2307 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2308 	uint8_t curr_user_index = 0;
2309 	struct htt_tx_ppdu_stats_info *dp_stats_buf;
2310 
2311 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2312 
2313 	tag_buf++;
2314 	dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf;
2315 	tag_buf += 3;
2316 	peer_id =
2317 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf);
2318 
2319 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) {
2320 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2321 			"Invalid peer");
2322 		return;
2323 	}
2324 
2325 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2326 
2327 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2328 
2329 	ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes;
2330 	ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes;
2331 
2332 	tag_buf++;
2333 
2334 	ppdu_user_desc->success_msdus =
2335 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf);
2336 	ppdu_user_desc->retry_bytes =
2337 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf);
2338 	tag_buf++;
2339 	ppdu_user_desc->failed_msdus =
2340 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf);
2341 }
2342 
2343 /*
2344  * dp_process_ppdu_stats_flush_tlv: Process
2345  * htt_ppdu_stats_flush_tlv
2346  * @pdev: DP PDEV handle
2347  * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv
2348  *
2349  * return:void
2350  */
2351 static void dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev,
2352 						uint32_t *tag_buf)
2353 {
2354 	uint32_t peer_id;
2355 	uint32_t drop_reason;
2356 	uint8_t tid;
2357 	uint32_t num_msdu;
2358 	struct dp_peer *peer;
2359 
2360 	tag_buf++;
2361 	drop_reason = *tag_buf;
2362 
2363 	tag_buf++;
2364 	num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf);
2365 
2366 	tag_buf++;
2367 	peer_id =
2368 		HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf);
2369 
2370 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2371 	if (!peer)
2372 		return;
2373 
2374 	tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf);
2375 
2376 	if (drop_reason == HTT_FLUSH_EXCESS_RETRIES) {
2377 		DP_STATS_INC(peer, tx.excess_retries_per_ac[TID_TO_WME_AC(tid)],
2378 					num_msdu);
2379 	}
2380 
2381 	dp_peer_unref_del_find_by_id(peer);
2382 }
2383 
2384 /*
2385  * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv: Process
2386  * htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2387  * @pdev: DP PDEV handle
2388  * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2389  * @length: tlv_length
2390  *
2391  * return:QDF_STATUS_SUCCESS if nbuf as to be freed in caller
2392  */
2393 static QDF_STATUS
2394 dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev,
2395 					      qdf_nbuf_t tag_buf,
2396 					      uint32_t ppdu_id)
2397 {
2398 	uint32_t *nbuf_ptr;
2399 	uint8_t trim_size;
2400 
2401 	if ((!pdev->tx_sniffer_enable) && (!pdev->mcopy_mode) &&
2402 	    (!pdev->bpr_enable))
2403 		return QDF_STATUS_SUCCESS;
2404 
2405 	trim_size = ((pdev->mgmtctrl_frm_info.mgmt_buf +
2406 		      HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) -
2407 		      qdf_nbuf_data(tag_buf));
2408 
2409 	if (!qdf_nbuf_pull_head(tag_buf, trim_size))
2410 		return QDF_STATUS_SUCCESS;
2411 
2412 	qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) -
2413 			    pdev->mgmtctrl_frm_info.mgmt_buf_len);
2414 
2415 	nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(
2416 				tag_buf, sizeof(ppdu_id));
2417 	*nbuf_ptr = ppdu_id;
2418 
2419 	if (pdev->bpr_enable) {
2420 		dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc,
2421 				     tag_buf, HTT_INVALID_PEER,
2422 				     WDI_NO_VAL, pdev->pdev_id);
2423 	}
2424 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2425 		dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc,
2426 				     tag_buf, HTT_INVALID_PEER,
2427 				     WDI_NO_VAL, pdev->pdev_id);
2428 	}
2429 
2430 	return QDF_STATUS_E_ALREADY;
2431 }
2432 
2433 /**
2434  * dp_process_ppdu_tag(): Function to process the PPDU TLVs
2435  * @pdev: DP pdev handle
2436  * @tag_buf: TLV buffer
2437  * @tlv_len: length of tlv
2438  * @ppdu_info: per ppdu tlv structure
2439  *
2440  * return: void
2441  */
2442 static void dp_process_ppdu_tag(struct dp_pdev *pdev, uint32_t *tag_buf,
2443 		uint32_t tlv_len, struct ppdu_info *ppdu_info)
2444 {
2445 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2446 
2447 	switch (tlv_type) {
2448 	case HTT_PPDU_STATS_COMMON_TLV:
2449 		qdf_assert_always(tlv_len >=
2450 				sizeof(htt_ppdu_stats_common_tlv));
2451 		dp_process_ppdu_stats_common_tlv(pdev, tag_buf, ppdu_info);
2452 		break;
2453 	case HTT_PPDU_STATS_USR_COMMON_TLV:
2454 		qdf_assert_always(tlv_len >=
2455 				sizeof(htt_ppdu_stats_user_common_tlv));
2456 		dp_process_ppdu_stats_user_common_tlv(
2457 				pdev, tag_buf, ppdu_info);
2458 		break;
2459 	case HTT_PPDU_STATS_USR_RATE_TLV:
2460 		qdf_assert_always(tlv_len >=
2461 				sizeof(htt_ppdu_stats_user_rate_tlv));
2462 		dp_process_ppdu_stats_user_rate_tlv(pdev, tag_buf, ppdu_info);
2463 		break;
2464 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV:
2465 		qdf_assert_always(tlv_len >=
2466 				sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv));
2467 		dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
2468 				pdev, tag_buf, ppdu_info);
2469 		break;
2470 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV:
2471 		qdf_assert_always(tlv_len >=
2472 				sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv));
2473 		dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
2474 				pdev, tag_buf, ppdu_info);
2475 		break;
2476 	case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV:
2477 		qdf_assert_always(tlv_len >=
2478 				sizeof(htt_ppdu_stats_user_cmpltn_common_tlv));
2479 		dp_process_ppdu_stats_user_cmpltn_common_tlv(
2480 				pdev, tag_buf, ppdu_info);
2481 		break;
2482 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV:
2483 		qdf_assert_always(tlv_len >=
2484 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv));
2485 		dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
2486 				pdev, tag_buf, ppdu_info);
2487 		break;
2488 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV:
2489 		qdf_assert_always(tlv_len >=
2490 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv));
2491 		dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
2492 				pdev, tag_buf, ppdu_info);
2493 		break;
2494 	case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV:
2495 		qdf_assert_always(tlv_len >=
2496 			sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv));
2497 		dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
2498 				pdev, tag_buf, ppdu_info);
2499 		break;
2500 	case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV:
2501 		qdf_assert_always(tlv_len >=
2502 			sizeof(htt_ppdu_stats_usr_common_array_tlv_v));
2503 		dp_process_ppdu_stats_user_common_array_tlv(
2504 				pdev, tag_buf, ppdu_info);
2505 		break;
2506 	case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV:
2507 		qdf_assert_always(tlv_len >=
2508 			sizeof(htt_ppdu_stats_flush_tlv));
2509 		dp_process_ppdu_stats_user_compltn_flush_tlv(
2510 				pdev, tag_buf);
2511 		break;
2512 	default:
2513 		break;
2514 	}
2515 }
2516 
2517 /**
2518  * dp_ppdu_desc_deliver(): Function to deliver Tx PPDU status descriptor
2519  * to upper layer
2520  * @pdev: DP pdev handle
2521  * @ppdu_info: per PPDU TLV descriptor
2522  *
2523  * return: void
2524  */
2525 static
2526 void dp_ppdu_desc_deliver(struct dp_pdev *pdev,
2527 			  struct ppdu_info *ppdu_info)
2528 {
2529 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
2530 	struct dp_peer *peer = NULL;
2531 	qdf_nbuf_t nbuf;
2532 	uint16_t i;
2533 	uint32_t tlv_bitmap_expected;
2534 
2535 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
2536 		qdf_nbuf_data(ppdu_info->nbuf);
2537 
2538 	ppdu_desc->num_users = ppdu_info->last_user;
2539 	ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
2540 
2541 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
2542 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2543 		if (ppdu_info->is_ampdu)
2544 			tlv_bitmap_expected =
2545 				dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
2546 					ppdu_info->tlv_bitmap);
2547 	}
2548 	for (i = 0; i < ppdu_desc->num_users; i++) {
2549 
2550 		ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu;
2551 		ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu;
2552 
2553 		peer = dp_peer_find_by_id(pdev->soc,
2554 					  ppdu_desc->user[i].peer_id);
2555 		/**
2556 		 * This check is to make sure peer is not deleted
2557 		 * after processing the TLVs.
2558 		 */
2559 		if (!peer)
2560 			continue;
2561 
2562 		if (ppdu_info->tlv_bitmap != tlv_bitmap_expected) {
2563 			dp_peer_unref_del_find_by_id(peer);
2564 			continue;
2565 		}
2566 		if (ppdu_desc->user[i].tid < CDP_DATA_TID_MAX) {
2567 
2568 			dp_tx_stats_update(pdev->soc, peer,
2569 					&ppdu_desc->user[i],
2570 					ppdu_desc->ack_rssi);
2571 		}
2572 
2573 		dp_tx_rate_stats_update(peer, &ppdu_desc->user[i]);
2574 		dp_peer_unref_del_find_by_id(peer);
2575 	}
2576 
2577 	/*
2578 	 * Remove from the list
2579 	 */
2580 	TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
2581 	nbuf = ppdu_info->nbuf;
2582 	pdev->list_depth--;
2583 	qdf_mem_free(ppdu_info);
2584 
2585 	qdf_assert_always(nbuf);
2586 
2587 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
2588 		qdf_nbuf_data(nbuf);
2589 
2590 	/**
2591 	 * Deliver PPDU stats only for valid (acked) data frames if
2592 	 * sniffer mode is not enabled.
2593 	 * If sniffer mode is enabled, PPDU stats for all frames
2594 	 * including mgmt/control frames should be delivered to upper layer
2595 	 */
2596 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2597 		dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC, pdev->soc,
2598 				nbuf, HTT_INVALID_PEER,
2599 				WDI_NO_VAL, pdev->pdev_id);
2600 	} else {
2601 		if (ppdu_desc->num_mpdu != 0 && ppdu_desc->num_users != 0 &&
2602 				ppdu_desc->frame_ctrl & HTT_FRAMECTRL_DATATYPE) {
2603 
2604 			dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
2605 					pdev->soc, nbuf, HTT_INVALID_PEER,
2606 					WDI_NO_VAL, pdev->pdev_id);
2607 		} else
2608 			qdf_nbuf_free(nbuf);
2609 	}
2610 	return;
2611 }
2612 
2613 /**
2614  * dp_get_ppdu_desc(): Function to allocate new PPDU status
2615  * desc for new ppdu id
2616  * @pdev: DP pdev handle
2617  * @ppdu_id: PPDU unique identifier
2618  * @tlv_type: TLV type received
2619  *
2620  * return: ppdu_info per ppdu tlv structure
2621  */
2622 static
2623 struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id,
2624 			uint8_t tlv_type)
2625 {
2626 	struct ppdu_info *ppdu_info = NULL;
2627 
2628 	/*
2629 	 * Find ppdu_id node exists or not
2630 	 */
2631 	TAILQ_FOREACH(ppdu_info, &pdev->ppdu_info_list, ppdu_info_list_elem) {
2632 
2633 		if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) {
2634 			break;
2635 		}
2636 	}
2637 
2638 	if (ppdu_info) {
2639 		if (ppdu_info->tlv_bitmap & (1 << tlv_type)) {
2640 			/**
2641 			 * if we get tlv_type that is already been processed
2642 			 * for ppdu, that means we got a new ppdu with same
2643 			 * ppdu id. Hence Flush the older ppdu
2644 			 * for MUMIMO and OFDMA, In a PPDU we have
2645 			 * multiple user with same tlv types. tlv bitmap is
2646 			 * used to check whether SU or MU_MIMO/OFDMA
2647 			 */
2648 			if (!(ppdu_info->tlv_bitmap &
2649 			    (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)))
2650 				return ppdu_info;
2651 
2652 			dp_ppdu_desc_deliver(pdev, ppdu_info);
2653 		} else {
2654 			return ppdu_info;
2655 		}
2656 	}
2657 
2658 	/**
2659 	 * Flush the head ppdu descriptor if ppdu desc list reaches max
2660 	 * threshold
2661 	 */
2662 	if (pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
2663 		ppdu_info = TAILQ_FIRST(&pdev->ppdu_info_list);
2664 		dp_ppdu_desc_deliver(pdev, ppdu_info);
2665 	}
2666 
2667 	/*
2668 	 * Allocate new ppdu_info node
2669 	 */
2670 	ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info));
2671 	if (!ppdu_info)
2672 		return NULL;
2673 
2674 	ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
2675 			sizeof(struct cdp_tx_completion_ppdu), 0, 4,
2676 			TRUE);
2677 	if (!ppdu_info->nbuf) {
2678 		qdf_mem_free(ppdu_info);
2679 		return NULL;
2680 	}
2681 
2682 	qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf),
2683 			sizeof(struct cdp_tx_completion_ppdu));
2684 
2685 	if (qdf_nbuf_put_tail(ppdu_info->nbuf,
2686 			sizeof(struct cdp_tx_completion_ppdu)) == NULL) {
2687 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2688 				"No tailroom for HTT PPDU");
2689 		qdf_nbuf_free(ppdu_info->nbuf);
2690 		ppdu_info->nbuf = NULL;
2691 		ppdu_info->last_user = 0;
2692 		qdf_mem_free(ppdu_info);
2693 		return NULL;
2694 	}
2695 
2696 	/**
2697 	 * No lock is needed because all PPDU TLVs are processed in
2698 	 * same context and this list is updated in same context
2699 	 */
2700 	TAILQ_INSERT_TAIL(&pdev->ppdu_info_list, ppdu_info,
2701 			ppdu_info_list_elem);
2702 	pdev->list_depth++;
2703 	return ppdu_info;
2704 }
2705 
2706 /**
2707  * dp_htt_process_tlv(): Function to process each PPDU TLVs
2708  * @pdev: DP pdev handle
2709  * @htt_t2h_msg: HTT target to host message
2710  *
2711  * return: ppdu_info per ppdu tlv structure
2712  */
2713 
2714 static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev,
2715 		qdf_nbuf_t htt_t2h_msg)
2716 {
2717 	uint32_t length;
2718 	uint32_t ppdu_id;
2719 	uint8_t tlv_type;
2720 	uint32_t tlv_length, tlv_bitmap_expected;
2721 	uint8_t *tlv_buf;
2722 	struct ppdu_info *ppdu_info = NULL;
2723 
2724 	uint32_t *msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
2725 
2726 	length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word);
2727 
2728 	msg_word = msg_word + 1;
2729 	ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word);
2730 
2731 
2732 	msg_word = msg_word + 3;
2733 	while (length > 0) {
2734 		tlv_buf = (uint8_t *)msg_word;
2735 		tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
2736 		tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
2737 		if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG))
2738 			pdev->stats.ppdu_stats_counter[tlv_type]++;
2739 
2740 		if (tlv_length == 0)
2741 			break;
2742 
2743 		tlv_length += HTT_TLV_HDR_LEN;
2744 
2745 		/**
2746 		 * Not allocating separate ppdu descriptor for MGMT Payload
2747 		 * TLV as this is sent as separate WDI indication and it
2748 		 * doesn't contain any ppdu information
2749 		 */
2750 		if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) {
2751 			pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf;
2752 			pdev->mgmtctrl_frm_info.mgmt_buf_len = tlv_length;
2753 			pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id;
2754 			msg_word =
2755 				(uint32_t *)((uint8_t *)tlv_buf + tlv_length);
2756 			length -= (tlv_length);
2757 			continue;
2758 		}
2759 
2760 		ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type);
2761 		if (!ppdu_info)
2762 			return NULL;
2763 		ppdu_info->ppdu_id = ppdu_id;
2764 		ppdu_info->tlv_bitmap |= (1 << tlv_type);
2765 
2766 		dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info);
2767 
2768 		/**
2769 		 * Increment pdev level tlv count to monitor
2770 		 * missing TLVs
2771 		 */
2772 		pdev->tlv_count++;
2773 		ppdu_info->last_tlv_cnt = pdev->tlv_count;
2774 		msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
2775 		length -= (tlv_length);
2776 	}
2777 
2778 	if (!ppdu_info)
2779 		return NULL;
2780 
2781 	pdev->last_ppdu_id = ppdu_id;
2782 
2783 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
2784 
2785 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2786 		if (ppdu_info->is_ampdu)
2787 			tlv_bitmap_expected =
2788 				dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
2789 					ppdu_info->tlv_bitmap);
2790 	}
2791 
2792 	/**
2793 	 * Once all the TLVs for a given PPDU has been processed,
2794 	 * return PPDU status to be delivered to higher layer
2795 	 */
2796 	if (ppdu_info->tlv_bitmap != 0 &&
2797 	    ppdu_info->tlv_bitmap == tlv_bitmap_expected)
2798 		return ppdu_info;
2799 
2800 	return NULL;
2801 }
2802 #endif /* FEATURE_PERPKT_INFO */
2803 
2804 /**
2805  * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW
2806  * @soc: DP SOC handle
2807  * @pdev_id: pdev id
2808  * @htt_t2h_msg: HTT message nbuf
2809  *
2810  * return:void
2811  */
2812 #if defined(WDI_EVENT_ENABLE)
2813 #ifdef FEATURE_PERPKT_INFO
2814 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
2815 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
2816 {
2817 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
2818 	struct ppdu_info *ppdu_info = NULL;
2819 	bool free_buf = true;
2820 
2821 	if (!pdev)
2822 		return true;
2823 
2824 	if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
2825 	    !pdev->mcopy_mode && !pdev->bpr_enable)
2826 		return free_buf;
2827 
2828 	ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg);
2829 
2830 	if (pdev->mgmtctrl_frm_info.mgmt_buf) {
2831 		if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv
2832 		    (pdev, htt_t2h_msg, pdev->mgmtctrl_frm_info.ppdu_id) !=
2833 		    QDF_STATUS_SUCCESS)
2834 			free_buf = false;
2835 
2836 		pdev->mgmtctrl_frm_info.mgmt_buf = NULL;
2837 		pdev->mgmtctrl_frm_info.mgmt_buf_len = 0;
2838 		pdev->mgmtctrl_frm_info.ppdu_id = 0;
2839 	}
2840 
2841 	if (ppdu_info)
2842 		dp_ppdu_desc_deliver(pdev, ppdu_info);
2843 
2844 	return free_buf;
2845 }
2846 #else
2847 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
2848 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
2849 {
2850 	return true;
2851 }
2852 #endif
2853 #endif
2854 
2855 /**
2856  * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats
2857  * @soc: DP SOC handle
2858  * @htt_t2h_msg: HTT message nbuf
2859  *
2860  * return:void
2861  */
2862 static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc,
2863 		qdf_nbuf_t htt_t2h_msg)
2864 {
2865 	uint8_t done;
2866 	qdf_nbuf_t msg_copy;
2867 	uint32_t *msg_word;
2868 
2869 	msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
2870 	msg_word = msg_word + 3;
2871 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2872 
2873 	/*
2874 	 * HTT EXT stats response comes as stream of TLVs which span over
2875 	 * multiple T2H messages.
2876 	 * The first message will carry length of the response.
2877 	 * For rest of the messages length will be zero.
2878 	 *
2879 	 * Clone the T2H message buffer and store it in a list to process
2880 	 * it later.
2881 	 *
2882 	 * The original T2H message buffers gets freed in the T2H HTT event
2883 	 * handler
2884 	 */
2885 	msg_copy = qdf_nbuf_clone(htt_t2h_msg);
2886 
2887 	if (!msg_copy) {
2888 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2889 				"T2H messge clone failed for HTT EXT STATS");
2890 		goto error;
2891 	}
2892 
2893 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2894 	qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy);
2895 	/*
2896 	 * Done bit signifies that this is the last T2H buffer in the stream of
2897 	 * HTT EXT STATS message
2898 	 */
2899 	if (done) {
2900 		soc->htt_stats.num_stats++;
2901 		qdf_sched_work(0, &soc->htt_stats.work);
2902 	}
2903 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2904 
2905 	return;
2906 
2907 error:
2908 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2909 	while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg))
2910 			!= NULL) {
2911 		qdf_nbuf_free(msg_copy);
2912 	}
2913 	soc->htt_stats.num_stats = 0;
2914 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2915 	return;
2916 
2917 }
2918 
2919 /*
2920  * htt_soc_attach_target() - SOC level HTT setup
2921  * @htt_soc:	HTT SOC handle
2922  *
2923  * Return: 0 on success; error code on failure
2924  */
2925 int htt_soc_attach_target(void *htt_soc)
2926 {
2927 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
2928 
2929 	return htt_h2t_ver_req_msg(soc);
2930 }
2931 
2932 
2933 #if defined(WDI_EVENT_ENABLE) && !defined(REMOVE_PKT_LOG)
2934 /*
2935  * dp_ppdu_stats_ind_handler() - PPDU stats msg handler
2936  * @htt_soc:	 HTT SOC handle
2937  * @msg_word:    Pointer to payload
2938  * @htt_t2h_msg: HTT msg nbuf
2939  *
2940  * Return: True if buffer should be freed by caller.
2941  */
2942 static bool
2943 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
2944 				uint32_t *msg_word,
2945 				qdf_nbuf_t htt_t2h_msg)
2946 {
2947 	u_int8_t pdev_id;
2948 	bool free_buf;
2949 	qdf_nbuf_set_pktlen(htt_t2h_msg, HTT_T2H_MAX_MSG_SIZE);
2950 	dp_debug("received HTT_T2H_MSG_TYPE_PPDU_STATS_IND");
2951 	pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
2952 	pdev_id = DP_HW2SW_MACID(pdev_id);
2953 	free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id,
2954 					      htt_t2h_msg);
2955 	dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc,
2956 		htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL,
2957 		pdev_id);
2958 	return free_buf;
2959 }
2960 #else
2961 static bool
2962 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
2963 				uint32_t *msg_word,
2964 				qdf_nbuf_t htt_t2h_msg)
2965 {
2966 	return true;
2967 }
2968 #endif
2969 
2970 #if defined(WDI_EVENT_ENABLE) && \
2971 	!defined(REMOVE_PKT_LOG)
2972 /*
2973  * dp_pktlog_msg_handler() - Pktlog msg handler
2974  * @htt_soc:	 HTT SOC handle
2975  * @msg_word:    Pointer to payload
2976  *
2977  * Return: None
2978  */
2979 static void
2980 dp_pktlog_msg_handler(struct htt_soc *soc,
2981 		      uint32_t *msg_word)
2982 {
2983 	uint8_t pdev_id;
2984 	uint32_t *pl_hdr;
2985 
2986 	dp_debug("received HTT_T2H_MSG_TYPE_PKTLOG");
2987 	pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word);
2988 	pdev_id = DP_HW2SW_MACID(pdev_id);
2989 	pl_hdr = (msg_word + 1);
2990 	dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc,
2991 		pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL,
2992 		pdev_id);
2993 }
2994 #else
2995 static void
2996 dp_pktlog_msg_handler(struct htt_soc *soc,
2997 		      uint32_t *msg_word)
2998 {
2999 }
3000 #endif
3001 /*
3002  * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler
3003  * @context:	Opaque context (HTT SOC handle)
3004  * @pkt:	HTC packet
3005  */
3006 static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
3007 {
3008 	struct htt_soc *soc = (struct htt_soc *) context;
3009 	qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
3010 	u_int32_t *msg_word;
3011 	enum htt_t2h_msg_type msg_type;
3012 	bool free_buf = true;
3013 
3014 	/* check for successful message reception */
3015 	if (pkt->Status != QDF_STATUS_SUCCESS) {
3016 		if (pkt->Status != QDF_STATUS_E_CANCELED)
3017 			soc->stats.htc_err_cnt++;
3018 
3019 		qdf_nbuf_free(htt_t2h_msg);
3020 		return;
3021 	}
3022 
3023 	/* TODO: Check if we should pop the HTC/HTT header alignment padding */
3024 
3025 	msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
3026 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
3027 	switch (msg_type) {
3028 	case HTT_T2H_MSG_TYPE_PEER_MAP:
3029 		{
3030 			u_int8_t mac_addr_deswizzle_buf[HTT_MAC_ADDR_LEN];
3031 			u_int8_t *peer_mac_addr;
3032 			u_int16_t peer_id;
3033 			u_int16_t hw_peer_id;
3034 			u_int8_t vdev_id;
3035 			u_int8_t is_wds;
3036 			struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
3037 
3038 			peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
3039 			hw_peer_id =
3040 				HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2));
3041 			vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
3042 			peer_mac_addr = htt_t2h_mac_addr_deswizzle(
3043 				(u_int8_t *) (msg_word+1),
3044 				&mac_addr_deswizzle_buf[0]);
3045 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3046 				QDF_TRACE_LEVEL_INFO,
3047 				"HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3048 				peer_id, vdev_id);
3049 
3050 			/*
3051 			 * check if peer already exists for this peer_id, if so
3052 			 * this peer map event is in response for a wds peer add
3053 			 * wmi command sent during wds source port learning.
3054 			 * in this case just add the ast entry to the existing
3055 			 * peer ast_list.
3056 			 */
3057 			is_wds = !!(dpsoc->peer_id_to_obj_map[peer_id]);
3058 			dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id,
3059 					       vdev_id, peer_mac_addr, 0,
3060 					       is_wds);
3061 			break;
3062 		}
3063 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
3064 		{
3065 			u_int16_t peer_id;
3066 			u_int8_t vdev_id;
3067 			u_int8_t mac_addr[HTT_MAC_ADDR_LEN] = {0};
3068 			peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
3069 			vdev_id = HTT_RX_PEER_UNMAP_VDEV_ID_GET(*msg_word);
3070 
3071 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
3072 						 vdev_id, mac_addr, 0);
3073 			break;
3074 		}
3075 	case HTT_T2H_MSG_TYPE_SEC_IND:
3076 		{
3077 			u_int16_t peer_id;
3078 			enum cdp_sec_type sec_type;
3079 			int is_unicast;
3080 
3081 			peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
3082 			sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
3083 			is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
3084 			/* point to the first part of the Michael key */
3085 			msg_word++;
3086 			dp_rx_sec_ind_handler(
3087 				soc->dp_soc, peer_id, sec_type, is_unicast,
3088 				msg_word, msg_word + 2);
3089 			break;
3090 		}
3091 
3092 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
3093 		{
3094 			free_buf = dp_ppdu_stats_ind_handler(soc, msg_word,
3095 							     htt_t2h_msg);
3096 			break;
3097 		}
3098 
3099 	case HTT_T2H_MSG_TYPE_PKTLOG:
3100 		{
3101 			dp_pktlog_msg_handler(soc, msg_word);
3102 			break;
3103 		}
3104 
3105 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
3106 		{
3107 			htc_pm_runtime_put(soc->htc_soc);
3108 			soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
3109 			soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
3110 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
3111 				"target uses HTT version %d.%d; host uses %d.%d",
3112 				soc->tgt_ver.major, soc->tgt_ver.minor,
3113 				HTT_CURRENT_VERSION_MAJOR,
3114 				HTT_CURRENT_VERSION_MINOR);
3115 			if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
3116 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3117 					QDF_TRACE_LEVEL_ERROR,
3118 					"*** Incompatible host/target HTT versions!");
3119 			}
3120 			/* abort if the target is incompatible with the host */
3121 			qdf_assert(soc->tgt_ver.major ==
3122 				HTT_CURRENT_VERSION_MAJOR);
3123 			if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
3124 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3125 					QDF_TRACE_LEVEL_WARN,
3126 					"*** Warning: host/target HTT versions"
3127 					" are different, though compatible!");
3128 			}
3129 			break;
3130 		}
3131 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
3132 		{
3133 			uint16_t peer_id;
3134 			uint8_t tid;
3135 			uint8_t win_sz;
3136 			uint16_t status;
3137 			struct dp_peer *peer;
3138 
3139 			/*
3140 			 * Update REO Queue Desc with new values
3141 			 */
3142 			peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
3143 			tid = HTT_RX_ADDBA_TID_GET(*msg_word);
3144 			win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
3145 			peer = dp_peer_find_by_id(soc->dp_soc, peer_id);
3146 
3147 			/*
3148 			 * Window size needs to be incremented by 1
3149 			 * since fw needs to represent a value of 256
3150 			 * using just 8 bits
3151 			 */
3152 			if (peer) {
3153 				status = dp_addba_requestprocess_wifi3(peer,
3154 						0, tid, 0, win_sz + 1, 0xffff);
3155 
3156 				/*
3157 				 * If PEER_LOCK_REF_PROTECT enbled dec ref
3158 				 * which is inc by dp_peer_find_by_id
3159 				 */
3160 				dp_peer_unref_del_find_by_id(peer);
3161 
3162 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3163 					QDF_TRACE_LEVEL_INFO,
3164 					FL("PeerID %d BAW %d TID %d stat %d"),
3165 					peer_id, win_sz, tid, status);
3166 
3167 			} else {
3168 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3169 					QDF_TRACE_LEVEL_ERROR,
3170 					FL("Peer not found peer id %d"),
3171 					peer_id);
3172 			}
3173 			break;
3174 		}
3175 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
3176 		{
3177 			dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg);
3178 			break;
3179 		}
3180 	case HTT_T2H_MSG_TYPE_PEER_MAP_V2:
3181 		{
3182 			u_int8_t mac_addr_deswizzle_buf[HTT_MAC_ADDR_LEN];
3183 			u_int8_t *peer_mac_addr;
3184 			u_int16_t peer_id;
3185 			u_int16_t hw_peer_id;
3186 			u_int8_t vdev_id;
3187 			bool is_wds;
3188 			u_int16_t ast_hash;
3189 
3190 			peer_id = HTT_RX_PEER_MAP_V2_SW_PEER_ID_GET(*msg_word);
3191 			hw_peer_id =
3192 			HTT_RX_PEER_MAP_V2_HW_PEER_ID_GET(*(msg_word + 2));
3193 			vdev_id = HTT_RX_PEER_MAP_V2_VDEV_ID_GET(*msg_word);
3194 			peer_mac_addr =
3195 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3196 						   &mac_addr_deswizzle_buf[0]);
3197 			is_wds =
3198 			HTT_RX_PEER_MAP_V2_NEXT_HOP_GET(*(msg_word + 3));
3199 			ast_hash =
3200 			HTT_RX_PEER_MAP_V2_AST_HASH_VALUE_GET(*(msg_word + 3));
3201 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3202 				  QDF_TRACE_LEVEL_INFO,
3203 				  "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3204 				  peer_id, vdev_id);
3205 
3206 			dp_rx_peer_map_handler(soc->dp_soc, peer_id,
3207 					       hw_peer_id, vdev_id,
3208 					       peer_mac_addr, ast_hash,
3209 					       is_wds);
3210 			break;
3211 		}
3212 	case HTT_T2H_MSG_TYPE_PEER_UNMAP_V2:
3213 		{
3214 			u_int8_t mac_addr_deswizzle_buf[HTT_MAC_ADDR_LEN];
3215 			u_int8_t *mac_addr;
3216 			u_int16_t peer_id;
3217 			u_int8_t vdev_id;
3218 			u_int8_t is_wds;
3219 
3220 			peer_id =
3221 			HTT_RX_PEER_UNMAP_V2_SW_PEER_ID_GET(*msg_word);
3222 			vdev_id = HTT_RX_PEER_UNMAP_V2_VDEV_ID_GET(*msg_word);
3223 			mac_addr =
3224 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3225 						   &mac_addr_deswizzle_buf[0]);
3226 			is_wds =
3227 			HTT_RX_PEER_UNMAP_V2_NEXT_HOP_GET(*(msg_word + 2));
3228 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3229 				  QDF_TRACE_LEVEL_INFO,
3230 				  "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3231 				  peer_id, vdev_id);
3232 
3233 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
3234 						 vdev_id, mac_addr,
3235 						 is_wds);
3236 			break;
3237 		}
3238 	default:
3239 		break;
3240 	};
3241 
3242 	/* Free the indication buffer */
3243 	if (free_buf)
3244 		qdf_nbuf_free(htt_t2h_msg);
3245 }
3246 
3247 /*
3248  * dp_htt_h2t_full() - Send full handler (called from HTC)
3249  * @context:	Opaque context (HTT SOC handle)
3250  * @pkt:	HTC packet
3251  *
3252  * Return: enum htc_send_full_action
3253  */
3254 static enum htc_send_full_action
3255 dp_htt_h2t_full(void *context, HTC_PACKET *pkt)
3256 {
3257 	return HTC_SEND_FULL_KEEP;
3258 }
3259 
3260 /*
3261  * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages
3262  * @context:	Opaque context (HTT SOC handle)
3263  * @nbuf:	nbuf containing T2H message
3264  * @pipe_id:	HIF pipe ID
3265  *
3266  * Return: QDF_STATUS
3267  *
3268  * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which
3269  * will be used for packet log and other high-priority HTT messages. Proper
3270  * HTC connection to be added later once required FW changes are available
3271  */
3272 static QDF_STATUS
3273 dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id)
3274 {
3275 	QDF_STATUS rc = QDF_STATUS_SUCCESS;
3276 	HTC_PACKET htc_pkt;
3277 
3278 	qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE);
3279 	qdf_mem_zero(&htc_pkt, sizeof(htc_pkt));
3280 	htc_pkt.Status = QDF_STATUS_SUCCESS;
3281 	htc_pkt.pPktContext = (void *)nbuf;
3282 	dp_htt_t2h_msg_handler(context, &htc_pkt);
3283 
3284 	return rc;
3285 }
3286 
3287 /*
3288  * htt_htc_soc_attach() - Register SOC level HTT instance with HTC
3289  * @htt_soc:	HTT SOC handle
3290  *
3291  * Return: QDF_STATUS
3292  */
3293 static QDF_STATUS
3294 htt_htc_soc_attach(struct htt_soc *soc)
3295 {
3296 	struct htc_service_connect_req connect;
3297 	struct htc_service_connect_resp response;
3298 	QDF_STATUS status;
3299 	struct dp_soc *dpsoc = soc->dp_soc;
3300 
3301 	qdf_mem_zero(&connect, sizeof(connect));
3302 	qdf_mem_zero(&response, sizeof(response));
3303 
3304 	connect.pMetaData = NULL;
3305 	connect.MetaDataLength = 0;
3306 	connect.EpCallbacks.pContext = soc;
3307 	connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
3308 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
3309 	connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler;
3310 
3311 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
3312 	connect.EpCallbacks.EpRecvRefill = NULL;
3313 
3314 	/* N/A, fill is done by HIF */
3315 	connect.EpCallbacks.RecvRefillWaterMark = 1;
3316 
3317 	connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
3318 	/*
3319 	 * Specify how deep to let a queue get before htc_send_pkt will
3320 	 * call the EpSendFull function due to excessive send queue depth.
3321 	 */
3322 	connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
3323 
3324 	/* disable flow control for HTT data message service */
3325 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
3326 
3327 	/* connect to control service */
3328 	connect.service_id = HTT_DATA_MSG_SVC;
3329 
3330 	status = htc_connect_service(soc->htc_soc, &connect, &response);
3331 
3332 	if (status != QDF_STATUS_SUCCESS)
3333 		return status;
3334 
3335 	soc->htc_endpoint = response.Endpoint;
3336 
3337 	hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint);
3338 	dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc,
3339 		dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE);
3340 
3341 	return QDF_STATUS_SUCCESS; /* success */
3342 }
3343 
3344 /*
3345  * htt_soc_initialize() - SOC level HTT initialization
3346  * @htt_soc: Opaque htt SOC handle
3347  * @ctrl_psoc: Opaque ctrl SOC handle
3348  * @htc_soc: SOC level HTC handle
3349  * @hal_soc: Opaque HAL SOC handle
3350  * @osdev: QDF device
3351  *
3352  * Return: HTT handle on success; NULL on failure
3353  */
3354 void *
3355 htt_soc_initialize(void *htt_soc, void *ctrl_psoc, HTC_HANDLE htc_soc,
3356 		   void *hal_soc, qdf_device_t osdev)
3357 {
3358 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
3359 
3360 	soc->osdev = osdev;
3361 	soc->ctrl_psoc = ctrl_psoc;
3362 	soc->htc_soc = htc_soc;
3363 	soc->hal_soc = hal_soc;
3364 
3365 	if (htt_htc_soc_attach(soc))
3366 		goto fail2;
3367 
3368 	return soc;
3369 
3370 fail2:
3371 	return NULL;
3372 }
3373 
3374 void htt_soc_htc_dealloc(struct htt_soc *htt_handle)
3375 {
3376 	htt_htc_misc_pkt_pool_free(htt_handle);
3377 	htt_htc_pkt_pool_free(htt_handle);
3378 }
3379 
3380 /*
3381  * htt_soc_htc_prealloc() - HTC memory prealloc
3382  * @htt_soc: SOC level HTT handle
3383  *
3384  * Return: QDF_STATUS_SUCCESS on Success or
3385  * QDF_STATUS_E_NOMEM on allocation failure
3386  */
3387 QDF_STATUS htt_soc_htc_prealloc(struct htt_soc *soc)
3388 {
3389 	int i;
3390 
3391 	HTT_TX_MUTEX_INIT(&soc->htt_tx_mutex);
3392 
3393 	soc->htt_htc_pkt_freelist = NULL;
3394 	/* pre-allocate some HTC_PACKET objects */
3395 	for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
3396 		struct dp_htt_htc_pkt_union *pkt;
3397 		pkt = qdf_mem_malloc(sizeof(*pkt));
3398 		if (!pkt)
3399 			return QDF_STATUS_E_NOMEM;
3400 
3401 		htt_htc_pkt_free(soc, &pkt->u.pkt);
3402 	}
3403 	return QDF_STATUS_SUCCESS;
3404 }
3405 
3406 /*
3407  * htt_soc_detach() - Free SOC level HTT handle
3408  * @htt_hdl: HTT SOC handle
3409  */
3410 void htt_soc_detach(void *htt_hdl)
3411 {
3412 	struct htt_soc *htt_handle = (struct htt_soc *)htt_hdl;
3413 
3414 	HTT_TX_MUTEX_DESTROY(&htt_handle->htt_tx_mutex);
3415 	qdf_mem_free(htt_handle);
3416 }
3417 
3418 /**
3419  * dp_h2t_ext_stats_msg_send(): function to contruct HTT message to pass to FW
3420  * @pdev: DP PDEV handle
3421  * @stats_type_upload_mask: stats type requested by user
3422  * @config_param_0: extra configuration parameters
3423  * @config_param_1: extra configuration parameters
3424  * @config_param_2: extra configuration parameters
3425  * @config_param_3: extra configuration parameters
3426  * @mac_id: mac number
3427  *
3428  * return: QDF STATUS
3429  */
3430 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
3431 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
3432 		uint32_t config_param_1, uint32_t config_param_2,
3433 		uint32_t config_param_3, int cookie_val, int cookie_msb,
3434 		uint8_t mac_id)
3435 {
3436 	struct htt_soc *soc = pdev->soc->htt_handle;
3437 	struct dp_htt_htc_pkt *pkt;
3438 	qdf_nbuf_t msg;
3439 	uint32_t *msg_word;
3440 	uint8_t pdev_mask = 0;
3441 
3442 	msg = qdf_nbuf_alloc(
3443 			soc->osdev,
3444 			HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ),
3445 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
3446 
3447 	if (!msg)
3448 		return QDF_STATUS_E_NOMEM;
3449 
3450 	/*TODO:Add support for SOC stats
3451 	 * Bit 0: SOC Stats
3452 	 * Bit 1: Pdev stats for pdev id 0
3453 	 * Bit 2: Pdev stats for pdev id 1
3454 	 * Bit 3: Pdev stats for pdev id 2
3455 	 */
3456 	mac_id = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
3457 
3458 	pdev_mask = 1 << DP_SW2HW_MACID(mac_id);
3459 	/*
3460 	 * Set the length of the message.
3461 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3462 	 * separately during the below call to qdf_nbuf_push_head.
3463 	 * The contribution from the HTC header is added separately inside HTC.
3464 	 */
3465 	if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) {
3466 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3467 				"Failed to expand head for HTT_EXT_STATS");
3468 		qdf_nbuf_free(msg);
3469 		return QDF_STATUS_E_FAILURE;
3470 	}
3471 
3472 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3473 		"-----%s:%d----\n cookie <-> %d\n config_param_0 %u\n"
3474 		"config_param_1 %u\n config_param_2 %u\n"
3475 		"config_param_4 %u\n -------------",
3476 		__func__, __LINE__, cookie_val, config_param_0,
3477 		config_param_1, config_param_2,	config_param_3);
3478 
3479 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
3480 
3481 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3482 	*msg_word = 0;
3483 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ);
3484 	HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask);
3485 	HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask);
3486 
3487 	/* word 1 */
3488 	msg_word++;
3489 	*msg_word = 0;
3490 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0);
3491 
3492 	/* word 2 */
3493 	msg_word++;
3494 	*msg_word = 0;
3495 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1);
3496 
3497 	/* word 3 */
3498 	msg_word++;
3499 	*msg_word = 0;
3500 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2);
3501 
3502 	/* word 4 */
3503 	msg_word++;
3504 	*msg_word = 0;
3505 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3);
3506 
3507 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0);
3508 
3509 	/* word 5 */
3510 	msg_word++;
3511 
3512 	/* word 6 */
3513 	msg_word++;
3514 	*msg_word = 0;
3515 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val);
3516 
3517 	/* word 7 */
3518 	msg_word++;
3519 	*msg_word = 0;
3520 	/*Using last 2 bits for pdev_id */
3521 	cookie_msb = ((cookie_msb << 2) | pdev->pdev_id);
3522 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb);
3523 
3524 	pkt = htt_htc_pkt_alloc(soc);
3525 	if (!pkt) {
3526 		qdf_nbuf_free(msg);
3527 		return QDF_STATUS_E_NOMEM;
3528 	}
3529 
3530 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
3531 
3532 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
3533 			dp_htt_h2t_send_complete_free_netbuf,
3534 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
3535 			soc->htc_endpoint,
3536 			1); /* tag - not relevant here */
3537 
3538 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
3539 	DP_HTT_SEND_HTC_PKT(soc, pkt);
3540 	return 0;
3541 }
3542 
3543 /* This macro will revert once proper HTT header will define for
3544  * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file
3545  * */
3546 #if defined(WDI_EVENT_ENABLE)
3547 /**
3548  * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW
3549  * @pdev: DP PDEV handle
3550  * @stats_type_upload_mask: stats type requested by user
3551  * @mac_id: Mac id number
3552  *
3553  * return: QDF STATUS
3554  */
3555 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
3556 		uint32_t stats_type_upload_mask, uint8_t mac_id)
3557 {
3558 	struct htt_soc *soc = pdev->soc->htt_handle;
3559 	struct dp_htt_htc_pkt *pkt;
3560 	qdf_nbuf_t msg;
3561 	uint32_t *msg_word;
3562 	uint8_t pdev_mask;
3563 
3564 	msg = qdf_nbuf_alloc(
3565 			soc->osdev,
3566 			HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ),
3567 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
3568 
3569 	if (!msg) {
3570 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3571 		"Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer");
3572 		qdf_assert(0);
3573 		return QDF_STATUS_E_NOMEM;
3574 	}
3575 
3576 	/*TODO:Add support for SOC stats
3577 	 * Bit 0: SOC Stats
3578 	 * Bit 1: Pdev stats for pdev id 0
3579 	 * Bit 2: Pdev stats for pdev id 1
3580 	 * Bit 3: Pdev stats for pdev id 2
3581 	 */
3582 	pdev_mask = 1 << DP_SW2HW_MACID(mac_id);
3583 
3584 	/*
3585 	 * Set the length of the message.
3586 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3587 	 * separately during the below call to qdf_nbuf_push_head.
3588 	 * The contribution from the HTC header is added separately inside HTC.
3589 	 */
3590 	if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) {
3591 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3592 				"Failed to expand head for HTT_CFG_STATS");
3593 		qdf_nbuf_free(msg);
3594 		return QDF_STATUS_E_FAILURE;
3595 	}
3596 
3597 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
3598 
3599 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3600 	*msg_word = 0;
3601 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
3602 	HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask);
3603 	HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word,
3604 			stats_type_upload_mask);
3605 
3606 	pkt = htt_htc_pkt_alloc(soc);
3607 	if (!pkt) {
3608 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3609 				"Fail to allocate dp_htt_htc_pkt buffer");
3610 		qdf_assert(0);
3611 		qdf_nbuf_free(msg);
3612 		return QDF_STATUS_E_NOMEM;
3613 	}
3614 
3615 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
3616 
3617 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
3618 			dp_htt_h2t_send_complete_free_netbuf,
3619 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
3620 			soc->htc_endpoint,
3621 			1); /* tag - not relevant here */
3622 
3623 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
3624 	DP_HTT_SEND_HTC_PKT(soc, pkt);
3625 	return 0;
3626 }
3627 #endif
3628 
3629 void
3630 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
3631 			     uint32_t *tag_buf)
3632 {
3633 	switch (tag_type) {
3634 	case HTT_STATS_PEER_DETAILS_TAG:
3635 	{
3636 		htt_peer_details_tlv *dp_stats_buf =
3637 			(htt_peer_details_tlv *)tag_buf;
3638 
3639 		pdev->fw_stats_peer_id = dp_stats_buf->sw_peer_id;
3640 	}
3641 	break;
3642 	case HTT_STATS_PEER_STATS_CMN_TAG:
3643 	{
3644 		htt_peer_stats_cmn_tlv *dp_stats_buf =
3645 			(htt_peer_stats_cmn_tlv *)tag_buf;
3646 
3647 		struct dp_peer *peer = dp_peer_find_by_id(pdev->soc,
3648 						pdev->fw_stats_peer_id);
3649 
3650 		if (peer && !peer->bss_peer) {
3651 			peer->stats.tx.inactive_time =
3652 				dp_stats_buf->inactive_time;
3653 			qdf_event_set(&pdev->fw_peer_stats_event);
3654 		}
3655 		if (peer)
3656 			dp_peer_unref_del_find_by_id(peer);
3657 	}
3658 	break;
3659 	default:
3660 		qdf_err("Invalid tag_type");
3661 	}
3662 }
3663