xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c (revision ad85c389289a03e320cd08dea21861f9857892fc)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <htt.h>
20 #include <hal_hw_headers.h>
21 #include <hal_api.h>
22 #include "dp_htt.h"
23 #include "dp_peer.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_rx_mon.h"
27 #include "htt_stats.h"
28 #include "htt_ppdu_stats.h"
29 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
30 #include "cdp_txrx_cmn_struct.h"
31 
32 #ifdef FEATURE_PERPKT_INFO
33 #include "dp_ratetable.h"
34 #endif
35 
36 #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
37 
38 #define HTT_HTC_PKT_POOL_INIT_SIZE 64
39 #define HTT_T2H_MAX_MSG_SIZE 2048
40 
41 #define HTT_MSG_BUF_SIZE(msg_bytes) \
42 	((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
43 
44 #define HTT_PID_BIT_MASK 0x3
45 
46 #define DP_EXT_MSG_LENGTH 2048
47 #define DP_HTT_SEND_HTC_PKT(soc, pkt)                            \
48 do {                                                             \
49 	if (htc_send_pkt(soc->htc_soc, &pkt->htc_pkt) ==         \
50 					QDF_STATUS_SUCCESS)      \
51 		htt_htc_misc_pkt_list_add(soc, pkt);             \
52 } while (0)
53 
54 #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16
55 
56 /**
57  * Bitmap of HTT PPDU TLV types for Default mode
58  */
59 #define HTT_PPDU_DEFAULT_TLV_BITMAP \
60 	(1 << HTT_PPDU_STATS_COMMON_TLV) | \
61 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
62 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
63 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
64 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
65 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)
66 
67 /**
68  * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 64
69  */
70 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64 \
71 	((1 << HTT_PPDU_STATS_COMMON_TLV) | \
72 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
73 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
74 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
75 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
76 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \
77 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \
78 	(1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV))
79 
80 /**
81  * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 256
82  */
83 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256 \
84 	((1 << HTT_PPDU_STATS_COMMON_TLV) | \
85 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
86 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
87 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
88 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
89 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \
90 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \
91 	(1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV))
92 
93 #define HTT_FRAMECTRL_DATATYPE 0x08
94 #define HTT_PPDU_DESC_MAX_DEPTH 16
95 #define DP_SCAN_PEER_ID 0xFFFF
96 
97 /*
98  * dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap() - Get ppdu stats tlv
99  * bitmap for sniffer mode
100  * @bitmap: received bitmap
101  *
102  * Return: expected bitmap value, returns zero if doesn't match with
103  * either 64-bit Tx window or 256-bit window tlv bitmap
104  */
105 
106 static inline int
107 dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(uint32_t bitmap)
108 {
109 	if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64))
110 		return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64;
111 	else if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256))
112 		return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256;
113 
114 	return 0;
115 }
116 
117 /*
118  * dp_tx_stats_update() - Update per-peer statistics
119  * @soc: Datapath soc handle
120  * @peer: Datapath peer handle
121  * @ppdu: PPDU Descriptor
122  * @ack_rssi: RSSI of last ack received
123  *
124  * Return: None
125  */
126 #ifdef FEATURE_PERPKT_INFO
127 static inline void
128 dp_tx_rate_stats_update(struct dp_peer *peer,
129 			struct cdp_tx_completion_ppdu_user *ppdu)
130 {
131 	uint32_t ratekbps = 0;
132 	uint32_t ppdu_tx_rate = 0;
133 
134 	if (!peer || !ppdu)
135 		return;
136 
137 
138 	ratekbps = dp_getrateindex(ppdu->gi,
139 				   ppdu->mcs,
140 				   ppdu->nss,
141 				   ppdu->preamble,
142 				   ppdu->bw);
143 
144 	DP_STATS_UPD(peer, tx.last_tx_rate, ratekbps);
145 
146 	if (!ratekbps)
147 		return;
148 
149 	peer->stats.tx.avg_tx_rate =
150 		dp_ath_rate_lpf(peer->stats.tx.avg_tx_rate, ratekbps);
151 	ppdu_tx_rate = dp_ath_rate_out(peer->stats.tx.avg_tx_rate);
152 	DP_STATS_UPD(peer, tx.rnd_avg_tx_rate, ppdu_tx_rate);
153 
154 	if (peer->vdev) {
155 		if (peer->bss_peer) {
156 			peer->vdev->stats.tx.mcast_last_tx_rate = ratekbps;
157 			peer->vdev->stats.tx.mcast_last_tx_rate_mcs = ppdu->mcs;
158 		} else {
159 			peer->vdev->stats.tx.last_tx_rate = ratekbps;
160 			peer->vdev->stats.tx.last_tx_rate_mcs = ppdu->mcs;
161 		}
162 	}
163 }
164 
165 static void dp_tx_stats_update(struct dp_soc *soc, struct dp_peer *peer,
166 		struct cdp_tx_completion_ppdu_user *ppdu, uint32_t ack_rssi)
167 {
168 	struct dp_pdev *pdev = peer->vdev->pdev;
169 	uint8_t preamble, mcs;
170 	uint16_t num_msdu;
171 
172 	preamble = ppdu->preamble;
173 	mcs = ppdu->mcs;
174 	num_msdu = ppdu->num_msdu;
175 
176 	/* If the peer statistics are already processed as part of
177 	 * per-MSDU completion handler, do not process these again in per-PPDU
178 	 * indications */
179 	if (soc->process_tx_status)
180 		return;
181 
182 	if (ppdu->mu_group_id <= MAX_MU_GROUP_ID && ppdu->ppdu_type != SU_TX) {
183 		if (unlikely(!(ppdu->mu_group_id & (MAX_MU_GROUP_ID - 1))))
184 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
185 				  "mu_group_id out of bound!!\n");
186 		else
187 			DP_STATS_UPD(peer, tx.mu_group_id[ppdu->mu_group_id],
188 				     (ppdu->user_pos + 1));
189 	}
190 
191 	if (ppdu->ppdu_type == MUOFDMA_TX ||
192 	    ppdu->ppdu_type == MUMIMO_OFDMA_TX) {
193 		DP_STATS_UPD(peer, tx.ru_tones, ppdu->ru_tones);
194 		DP_STATS_UPD(peer, tx.ru_start, ppdu->ru_start);
195 		switch (ppdu->ru_tones) {
196 		case RU_26:
197 			DP_STATS_INC(peer, tx.ru_loc[0], 1);
198 		break;
199 		case RU_52:
200 			DP_STATS_INC(peer, tx.ru_loc[1], 1);
201 		break;
202 		case RU_106:
203 			DP_STATS_INC(peer, tx.ru_loc[2], 1);
204 		break;
205 		case RU_242:
206 			DP_STATS_INC(peer, tx.ru_loc[3], 1);
207 		break;
208 		case RU_484:
209 			DP_STATS_INC(peer, tx.ru_loc[4], 1);
210 		break;
211 		case RU_996:
212 			DP_STATS_INC(peer, tx.ru_loc[5], 1);
213 		break;
214 		}
215 	}
216 
217 	DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type], num_msdu);
218 	DP_STATS_INC_PKT(peer, tx.comp_pkt,
219 			num_msdu, (ppdu->success_bytes +
220 				ppdu->retry_bytes + ppdu->failed_bytes));
221 	DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus);
222 	DP_STATS_UPD(peer, tx.tx_rate, ppdu->tx_rate);
223 	DP_STATS_INC(peer, tx.sgi_count[ppdu->gi], num_msdu);
224 	DP_STATS_INC(peer, tx.bw[ppdu->bw], num_msdu);
225 	DP_STATS_INC(peer, tx.nss[ppdu->nss], num_msdu);
226 	DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)], num_msdu);
227 	DP_STATS_INCC(peer, tx.stbc, num_msdu, ppdu->stbc);
228 	DP_STATS_INCC(peer, tx.ldpc, num_msdu, ppdu->ldpc);
229 	if (!(ppdu->is_mcast) && ppdu->ack_rssi_valid)
230 		DP_STATS_UPD(peer, tx.last_ack_rssi, ack_rssi);
231 
232 	DP_STATS_INC(peer, tx.retries,
233 			(ppdu->long_retries + ppdu->short_retries));
234 	DP_STATS_INCC(peer,
235 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
236 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
237 	DP_STATS_INCC(peer,
238 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
239 			((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
240 	DP_STATS_INCC(peer,
241 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
242 			((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
243 	DP_STATS_INCC(peer,
244 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
245 			((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B)));
246 	DP_STATS_INCC(peer,
247 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
248 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
249 	DP_STATS_INCC(peer,
250 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
251 			((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
252 	DP_STATS_INCC(peer,
253 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
254 			((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
255 	DP_STATS_INCC(peer,
256 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
257 			((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
258 	DP_STATS_INCC(peer,
259 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
260 			((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX)));
261 	DP_STATS_INCC(peer,
262 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
263 			((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX)));
264 
265 	dp_peer_stats_notify(peer);
266 
267 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
268 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
269 			     &peer->stats, ppdu->peer_id,
270 			     UPDATE_PEER_STATS, pdev->pdev_id);
271 #endif
272 }
273 #endif
274 
275 /*
276  * htt_htc_pkt_alloc() - Allocate HTC packet buffer
277  * @htt_soc:	HTT SOC handle
278  *
279  * Return: Pointer to htc packet buffer
280  */
281 static struct dp_htt_htc_pkt *
282 htt_htc_pkt_alloc(struct htt_soc *soc)
283 {
284 	struct dp_htt_htc_pkt_union *pkt = NULL;
285 
286 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
287 	if (soc->htt_htc_pkt_freelist) {
288 		pkt = soc->htt_htc_pkt_freelist;
289 		soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next;
290 	}
291 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
292 
293 	if (pkt == NULL)
294 		pkt = qdf_mem_malloc(sizeof(*pkt));
295 	return &pkt->u.pkt; /* not actually a dereference */
296 }
297 
298 /*
299  * htt_htc_pkt_free() - Free HTC packet buffer
300  * @htt_soc:	HTT SOC handle
301  */
302 static void
303 htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
304 {
305 	struct dp_htt_htc_pkt_union *u_pkt =
306 		(struct dp_htt_htc_pkt_union *)pkt;
307 
308 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
309 	u_pkt->u.next = soc->htt_htc_pkt_freelist;
310 	soc->htt_htc_pkt_freelist = u_pkt;
311 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
312 }
313 
314 /*
315  * htt_htc_pkt_pool_free() - Free HTC packet pool
316  * @htt_soc:	HTT SOC handle
317  */
318 static void
319 htt_htc_pkt_pool_free(struct htt_soc *soc)
320 {
321 	struct dp_htt_htc_pkt_union *pkt, *next;
322 	pkt = soc->htt_htc_pkt_freelist;
323 	while (pkt) {
324 		next = pkt->u.next;
325 		qdf_mem_free(pkt);
326 		pkt = next;
327 	}
328 	soc->htt_htc_pkt_freelist = NULL;
329 }
330 
331 /*
332  * htt_htc_misc_pkt_list_trim() - trim misc list
333  * @htt_soc: HTT SOC handle
334  * @level: max no. of pkts in list
335  */
336 static void
337 htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level)
338 {
339 	struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL;
340 	int i = 0;
341 	qdf_nbuf_t netbuf;
342 
343 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
344 	pkt = soc->htt_htc_pkt_misclist;
345 	while (pkt) {
346 		next = pkt->u.next;
347 		/* trim the out grown list*/
348 		if (++i > level) {
349 			netbuf =
350 				(qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
351 			qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
352 			qdf_nbuf_free(netbuf);
353 			qdf_mem_free(pkt);
354 			pkt = NULL;
355 			if (prev)
356 				prev->u.next = NULL;
357 		}
358 		prev = pkt;
359 		pkt = next;
360 	}
361 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
362 }
363 
364 /*
365  * htt_htc_misc_pkt_list_add() - Add pkt to misc list
366  * @htt_soc:	HTT SOC handle
367  * @dp_htt_htc_pkt: pkt to be added to list
368  */
369 static void
370 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
371 {
372 	struct dp_htt_htc_pkt_union *u_pkt =
373 				(struct dp_htt_htc_pkt_union *)pkt;
374 	int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc,
375 							pkt->htc_pkt.Endpoint)
376 				+ DP_HTT_HTC_PKT_MISCLIST_SIZE;
377 
378 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
379 	if (soc->htt_htc_pkt_misclist) {
380 		u_pkt->u.next = soc->htt_htc_pkt_misclist;
381 		soc->htt_htc_pkt_misclist = u_pkt;
382 	} else {
383 		soc->htt_htc_pkt_misclist = u_pkt;
384 	}
385 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
386 
387 	/* only ce pipe size + tx_queue_depth could possibly be in use
388 	 * free older packets in the misclist
389 	 */
390 	htt_htc_misc_pkt_list_trim(soc, misclist_trim_level);
391 }
392 
393 /*
394  * htt_htc_misc_pkt_pool_free() - free pkts in misc list
395  * @htt_soc:	HTT SOC handle
396  */
397 static void
398 htt_htc_misc_pkt_pool_free(struct htt_soc *soc)
399 {
400 	struct dp_htt_htc_pkt_union *pkt, *next;
401 	qdf_nbuf_t netbuf;
402 
403 	pkt = soc->htt_htc_pkt_misclist;
404 
405 	while (pkt) {
406 		next = pkt->u.next;
407 		netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
408 		qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
409 
410 		soc->stats.htc_pkt_free++;
411 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
412 			 "%s: Pkt free count %d",
413 			 __func__, soc->stats.htc_pkt_free);
414 
415 		qdf_nbuf_free(netbuf);
416 		qdf_mem_free(pkt);
417 		pkt = next;
418 	}
419 	soc->htt_htc_pkt_misclist = NULL;
420 }
421 
422 /*
423  * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ
424  * @tgt_mac_addr:	Target MAC
425  * @buffer:		Output buffer
426  */
427 static u_int8_t *
428 htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer)
429 {
430 #ifdef BIG_ENDIAN_HOST
431 	/*
432 	 * The host endianness is opposite of the target endianness.
433 	 * To make u_int32_t elements come out correctly, the target->host
434 	 * upload has swizzled the bytes in each u_int32_t element of the
435 	 * message.
436 	 * For byte-array message fields like the MAC address, this
437 	 * upload swizzling puts the bytes in the wrong order, and needs
438 	 * to be undone.
439 	 */
440 	buffer[0] = tgt_mac_addr[3];
441 	buffer[1] = tgt_mac_addr[2];
442 	buffer[2] = tgt_mac_addr[1];
443 	buffer[3] = tgt_mac_addr[0];
444 	buffer[4] = tgt_mac_addr[7];
445 	buffer[5] = tgt_mac_addr[6];
446 	return buffer;
447 #else
448 	/*
449 	 * The host endianness matches the target endianness -
450 	 * we can use the mac addr directly from the message buffer.
451 	 */
452 	return tgt_mac_addr;
453 #endif
454 }
455 
456 /*
457  * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
458  * @soc:	SOC handle
459  * @status:	Completion status
460  * @netbuf:	HTT buffer
461  */
462 static void
463 dp_htt_h2t_send_complete_free_netbuf(
464 	void *soc, A_STATUS status, qdf_nbuf_t netbuf)
465 {
466 	qdf_nbuf_free(netbuf);
467 }
468 
469 /*
470  * dp_htt_h2t_send_complete() - H2T completion handler
471  * @context:	Opaque context (HTT SOC handle)
472  * @htc_pkt:	HTC packet
473  */
474 static void
475 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
476 {
477 	void (*send_complete_part2)(
478 		void *soc, QDF_STATUS status, qdf_nbuf_t msdu);
479 	struct htt_soc *soc =  (struct htt_soc *) context;
480 	struct dp_htt_htc_pkt *htt_pkt;
481 	qdf_nbuf_t netbuf;
482 
483 	send_complete_part2 = htc_pkt->pPktContext;
484 
485 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
486 
487 	/* process (free or keep) the netbuf that held the message */
488 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
489 	/*
490 	 * adf sendcomplete is required for windows only
491 	 */
492 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
493 	if (send_complete_part2 != NULL) {
494 		send_complete_part2(
495 			htt_pkt->soc_ctxt, htc_pkt->Status, netbuf);
496 	}
497 	/* free the htt_htc_pkt / HTC_PACKET object */
498 	htt_htc_pkt_free(soc, htt_pkt);
499 }
500 
501 /*
502  * htt_h2t_ver_req_msg() - Send HTT version request message to target
503  * @htt_soc:	HTT SOC handle
504  *
505  * Return: 0 on success; error code on failure
506  */
507 static int htt_h2t_ver_req_msg(struct htt_soc *soc)
508 {
509 	struct dp_htt_htc_pkt *pkt;
510 	qdf_nbuf_t msg;
511 	uint32_t *msg_word;
512 
513 	msg = qdf_nbuf_alloc(
514 		soc->osdev,
515 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
516 		/* reserve room for the HTC header */
517 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
518 	if (!msg)
519 		return QDF_STATUS_E_NOMEM;
520 
521 	/*
522 	 * Set the length of the message.
523 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
524 	 * separately during the below call to qdf_nbuf_push_head.
525 	 * The contribution from the HTC header is added separately inside HTC.
526 	 */
527 	if (qdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES) == NULL) {
528 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
529 			"%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
530 			__func__);
531 		return QDF_STATUS_E_FAILURE;
532 	}
533 
534 	/* fill in the message contents */
535 	msg_word = (u_int32_t *) qdf_nbuf_data(msg);
536 
537 	/* rewind beyond alignment pad to get to the HTC header reserved area */
538 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
539 
540 	*msg_word = 0;
541 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
542 
543 	pkt = htt_htc_pkt_alloc(soc);
544 	if (!pkt) {
545 		qdf_nbuf_free(msg);
546 		return QDF_STATUS_E_FAILURE;
547 	}
548 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
549 
550 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
551 		dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg),
552 		qdf_nbuf_len(msg), soc->htc_endpoint,
553 		1); /* tag - not relevant here */
554 
555 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
556 	DP_HTT_SEND_HTC_PKT(soc, pkt);
557 	return 0;
558 }
559 
560 /*
561  * htt_srng_setup() - Send SRNG setup message to target
562  * @htt_soc:	HTT SOC handle
563  * @mac_id:	MAC Id
564  * @hal_srng:	Opaque HAL SRNG pointer
565  * @hal_ring_type:	SRNG ring type
566  *
567  * Return: 0 on success; error code on failure
568  */
569 int htt_srng_setup(void *htt_soc, int mac_id, void *hal_srng,
570 	int hal_ring_type)
571 {
572 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
573 	struct dp_htt_htc_pkt *pkt;
574 	qdf_nbuf_t htt_msg;
575 	uint32_t *msg_word;
576 	struct hal_srng_params srng_params;
577 	qdf_dma_addr_t hp_addr, tp_addr;
578 	uint32_t ring_entry_size =
579 		hal_srng_get_entrysize(soc->hal_soc, hal_ring_type);
580 	int htt_ring_type, htt_ring_id;
581 
582 	/* Sizes should be set in 4-byte words */
583 	ring_entry_size = ring_entry_size >> 2;
584 
585 	htt_msg = qdf_nbuf_alloc(soc->osdev,
586 		HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ),
587 		/* reserve room for the HTC header */
588 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
589 	if (!htt_msg)
590 		goto fail0;
591 
592 	hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params);
593 	hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_srng);
594 	tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_srng);
595 
596 	switch (hal_ring_type) {
597 	case RXDMA_BUF:
598 #ifdef QCA_HOST2FW_RXBUF_RING
599 		if (srng_params.ring_id ==
600 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0)) {
601 			htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
602 			htt_ring_type = HTT_SW_TO_SW_RING;
603 #ifdef IPA_OFFLOAD
604 		} else if (srng_params.ring_id ==
605 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2)) {
606 			htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING;
607 			htt_ring_type = HTT_SW_TO_SW_RING;
608 #endif
609 #else
610 		if (srng_params.ring_id ==
611 			(HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
612 			  (mac_id * HAL_MAX_RINGS_PER_LMAC))) {
613 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
614 			htt_ring_type = HTT_SW_TO_HW_RING;
615 #endif
616 		} else if (srng_params.ring_id ==
617 #ifdef IPA_OFFLOAD
618 			 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 +
619 #else
620 			 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF +
621 #endif
622 			  (mac_id * HAL_MAX_RINGS_PER_LMAC))) {
623 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
624 			htt_ring_type = HTT_SW_TO_HW_RING;
625 		} else {
626 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
627 				   "%s: Ring %d currently not supported",
628 				   __func__, srng_params.ring_id);
629 			goto fail1;
630 		}
631 
632 		dp_info("ring_type %d ring_id %d htt_ring_id %d hp_addr 0x%llx tp_addr 0x%llx",
633 			hal_ring_type, srng_params.ring_id, htt_ring_id,
634 			(uint64_t)hp_addr,
635 			(uint64_t)tp_addr);
636 		break;
637 	case RXDMA_MONITOR_BUF:
638 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
639 		htt_ring_type = HTT_SW_TO_HW_RING;
640 		break;
641 	case RXDMA_MONITOR_STATUS:
642 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
643 		htt_ring_type = HTT_SW_TO_HW_RING;
644 		break;
645 	case RXDMA_MONITOR_DST:
646 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
647 		htt_ring_type = HTT_HW_TO_SW_RING;
648 		break;
649 	case RXDMA_MONITOR_DESC:
650 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
651 		htt_ring_type = HTT_SW_TO_HW_RING;
652 		break;
653 	case RXDMA_DST:
654 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
655 		htt_ring_type = HTT_HW_TO_SW_RING;
656 		break;
657 
658 	default:
659 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
660 			"%s: Ring currently not supported", __func__);
661 			goto fail1;
662 	}
663 
664 	/*
665 	 * Set the length of the message.
666 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
667 	 * separately during the below call to qdf_nbuf_push_head.
668 	 * The contribution from the HTC header is added separately inside HTC.
669 	 */
670 	if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) {
671 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
672 			"%s: Failed to expand head for SRING_SETUP msg",
673 			__func__);
674 		return QDF_STATUS_E_FAILURE;
675 	}
676 
677 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
678 
679 	/* rewind beyond alignment pad to get to the HTC header reserved area */
680 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
681 
682 	/* word 0 */
683 	*msg_word = 0;
684 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP);
685 
686 	if ((htt_ring_type == HTT_SW_TO_HW_RING) ||
687 			(htt_ring_type == HTT_HW_TO_SW_RING))
688 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word,
689 			 DP_SW2HW_MACID(mac_id));
690 	else
691 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id);
692 
693 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
694 		  "%s: mac_id %d", __func__, mac_id);
695 	HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type);
696 	/* TODO: Discuss with FW on changing this to unique ID and using
697 	 * htt_ring_type to send the type of ring
698 	 */
699 	HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id);
700 
701 	/* word 1 */
702 	msg_word++;
703 	*msg_word = 0;
704 	HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word,
705 		srng_params.ring_base_paddr & 0xffffffff);
706 
707 	/* word 2 */
708 	msg_word++;
709 	*msg_word = 0;
710 	HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word,
711 		(uint64_t)srng_params.ring_base_paddr >> 32);
712 
713 	/* word 3 */
714 	msg_word++;
715 	*msg_word = 0;
716 	HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size);
717 	HTT_SRING_SETUP_RING_SIZE_SET(*msg_word,
718 		(ring_entry_size * srng_params.num_entries));
719 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
720 		  "%s: entry_size %d", __func__,
721 			 ring_entry_size);
722 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
723 		  "%s: num_entries %d", __func__,
724 			 srng_params.num_entries);
725 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
726 		  "%s: ring_size %d", __func__,
727 			 (ring_entry_size * srng_params.num_entries));
728 	if (htt_ring_type == HTT_SW_TO_HW_RING)
729 		HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET(
730 						*msg_word, 1);
731 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word,
732 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
733 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word,
734 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
735 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word,
736 		!!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP));
737 
738 	/* word 4 */
739 	msg_word++;
740 	*msg_word = 0;
741 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
742 		hp_addr & 0xffffffff);
743 
744 	/* word 5 */
745 	msg_word++;
746 	*msg_word = 0;
747 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
748 		(uint64_t)hp_addr >> 32);
749 
750 	/* word 6 */
751 	msg_word++;
752 	*msg_word = 0;
753 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
754 		tp_addr & 0xffffffff);
755 
756 	/* word 7 */
757 	msg_word++;
758 	*msg_word = 0;
759 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
760 		(uint64_t)tp_addr >> 32);
761 
762 	/* word 8 */
763 	msg_word++;
764 	*msg_word = 0;
765 	HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word,
766 		srng_params.msi_addr & 0xffffffff);
767 
768 	/* word 9 */
769 	msg_word++;
770 	*msg_word = 0;
771 	HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word,
772 		(uint64_t)(srng_params.msi_addr) >> 32);
773 
774 	/* word 10 */
775 	msg_word++;
776 	*msg_word = 0;
777 	HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word,
778 		srng_params.msi_data);
779 
780 	/* word 11 */
781 	msg_word++;
782 	*msg_word = 0;
783 	HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word,
784 		srng_params.intr_batch_cntr_thres_entries *
785 		ring_entry_size);
786 	HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word,
787 		srng_params.intr_timer_thres_us >> 3);
788 
789 	/* word 12 */
790 	msg_word++;
791 	*msg_word = 0;
792 	if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
793 		/* TODO: Setting low threshold to 1/8th of ring size - see
794 		 * if this needs to be configurable
795 		 */
796 		HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word,
797 			srng_params.low_threshold);
798 	}
799 	/* "response_required" field should be set if a HTT response message is
800 	 * required after setting up the ring.
801 	 */
802 	pkt = htt_htc_pkt_alloc(soc);
803 	if (!pkt)
804 		goto fail1;
805 
806 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
807 
808 	SET_HTC_PACKET_INFO_TX(
809 		&pkt->htc_pkt,
810 		dp_htt_h2t_send_complete_free_netbuf,
811 		qdf_nbuf_data(htt_msg),
812 		qdf_nbuf_len(htt_msg),
813 		soc->htc_endpoint,
814 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
815 
816 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
817 	DP_HTT_SEND_HTC_PKT(soc, pkt);
818 
819 	return QDF_STATUS_SUCCESS;
820 
821 fail1:
822 	qdf_nbuf_free(htt_msg);
823 fail0:
824 	return QDF_STATUS_E_FAILURE;
825 }
826 
827 /*
828  * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter
829  * config message to target
830  * @htt_soc:	HTT SOC handle
831  * @pdev_id:	PDEV Id
832  * @hal_srng:	Opaque HAL SRNG pointer
833  * @hal_ring_type:	SRNG ring type
834  * @ring_buf_size:	SRNG buffer size
835  * @htt_tlv_filter:	Rx SRNG TLV and filter setting
836  * Return: 0 on success; error code on failure
837  */
838 int htt_h2t_rx_ring_cfg(void *htt_soc, int pdev_id, void *hal_srng,
839 	int hal_ring_type, int ring_buf_size,
840 	struct htt_rx_ring_tlv_filter *htt_tlv_filter)
841 {
842 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
843 	struct dp_htt_htc_pkt *pkt;
844 	qdf_nbuf_t htt_msg;
845 	uint32_t *msg_word;
846 	struct hal_srng_params srng_params;
847 	uint32_t htt_ring_type, htt_ring_id;
848 	uint32_t tlv_filter;
849 
850 	htt_msg = qdf_nbuf_alloc(soc->osdev,
851 		HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ),
852 	/* reserve room for the HTC header */
853 	HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
854 	if (!htt_msg)
855 		goto fail0;
856 
857 	hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params);
858 
859 	switch (hal_ring_type) {
860 	case RXDMA_BUF:
861 		htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
862 		htt_ring_type = HTT_SW_TO_HW_RING;
863 		break;
864 	case RXDMA_MONITOR_BUF:
865 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
866 		htt_ring_type = HTT_SW_TO_HW_RING;
867 		break;
868 	case RXDMA_MONITOR_STATUS:
869 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
870 		htt_ring_type = HTT_SW_TO_HW_RING;
871 		break;
872 	case RXDMA_MONITOR_DST:
873 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
874 		htt_ring_type = HTT_HW_TO_SW_RING;
875 		break;
876 	case RXDMA_MONITOR_DESC:
877 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
878 		htt_ring_type = HTT_SW_TO_HW_RING;
879 		break;
880 	case RXDMA_DST:
881 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
882 		htt_ring_type = HTT_HW_TO_SW_RING;
883 		break;
884 
885 	default:
886 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
887 			"%s: Ring currently not supported", __func__);
888 		goto fail1;
889 	}
890 
891 	/*
892 	 * Set the length of the message.
893 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
894 	 * separately during the below call to qdf_nbuf_push_head.
895 	 * The contribution from the HTC header is added separately inside HTC.
896 	 */
897 	if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) {
898 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
899 			"%s: Failed to expand head for RX Ring Cfg msg",
900 			__func__);
901 		goto fail1; /* failure */
902 	}
903 
904 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
905 
906 	/* rewind beyond alignment pad to get to the HTC header reserved area */
907 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
908 
909 	/* word 0 */
910 	*msg_word = 0;
911 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
912 
913 	/*
914 	 * pdev_id is indexed from 0 whereas mac_id is indexed from 1
915 	 * SW_TO_SW and SW_TO_HW rings are unaffected by this
916 	 */
917 	if (htt_ring_type == HTT_SW_TO_SW_RING ||
918 			htt_ring_type == HTT_SW_TO_HW_RING)
919 		HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word,
920 						DP_SW2HW_MACID(pdev_id));
921 
922 	/* TODO: Discuss with FW on changing this to unique ID and using
923 	 * htt_ring_type to send the type of ring
924 	 */
925 	HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id);
926 
927 	HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word,
928 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
929 
930 	HTT_RX_RING_SELECTION_CFG_PKT_TLV_SET(*msg_word,
931 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
932 
933 	/* word 1 */
934 	msg_word++;
935 	*msg_word = 0;
936 	HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word,
937 		ring_buf_size);
938 
939 	/* word 2 */
940 	msg_word++;
941 	*msg_word = 0;
942 
943 	if (htt_tlv_filter->enable_fp) {
944 		/* TYPE: MGMT */
945 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
946 			FP, MGMT, 0000,
947 			(htt_tlv_filter->fp_mgmt_filter &
948 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
949 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
950 			FP, MGMT, 0001,
951 			(htt_tlv_filter->fp_mgmt_filter &
952 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
953 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
954 			FP, MGMT, 0010,
955 			(htt_tlv_filter->fp_mgmt_filter &
956 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
957 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
958 			FP, MGMT, 0011,
959 			(htt_tlv_filter->fp_mgmt_filter &
960 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
961 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
962 			FP, MGMT, 0100,
963 			(htt_tlv_filter->fp_mgmt_filter &
964 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
965 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
966 			FP, MGMT, 0101,
967 			(htt_tlv_filter->fp_mgmt_filter &
968 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
969 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
970 			FP, MGMT, 0110,
971 			(htt_tlv_filter->fp_mgmt_filter &
972 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
973 		/* reserved */
974 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
975 			MGMT, 0111,
976 			(htt_tlv_filter->fp_mgmt_filter &
977 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
978 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
979 			FP, MGMT, 1000,
980 			(htt_tlv_filter->fp_mgmt_filter &
981 			FILTER_MGMT_BEACON) ? 1 : 0);
982 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
983 			FP, MGMT, 1001,
984 			(htt_tlv_filter->fp_mgmt_filter &
985 			FILTER_MGMT_ATIM) ? 1 : 0);
986 	}
987 
988 	if (htt_tlv_filter->enable_md) {
989 			/* TYPE: MGMT */
990 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
991 			MD, MGMT, 0000,
992 			(htt_tlv_filter->md_mgmt_filter &
993 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
994 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
995 			MD, MGMT, 0001,
996 			(htt_tlv_filter->md_mgmt_filter &
997 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
998 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
999 			MD, MGMT, 0010,
1000 			(htt_tlv_filter->md_mgmt_filter &
1001 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1002 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1003 			MD, MGMT, 0011,
1004 			(htt_tlv_filter->md_mgmt_filter &
1005 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1006 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1007 			MD, MGMT, 0100,
1008 			(htt_tlv_filter->md_mgmt_filter &
1009 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1010 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1011 			MD, MGMT, 0101,
1012 			(htt_tlv_filter->md_mgmt_filter &
1013 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1014 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1015 			MD, MGMT, 0110,
1016 			(htt_tlv_filter->md_mgmt_filter &
1017 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1018 		/* reserved */
1019 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
1020 			MGMT, 0111,
1021 			(htt_tlv_filter->md_mgmt_filter &
1022 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1023 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1024 			MD, MGMT, 1000,
1025 			(htt_tlv_filter->md_mgmt_filter &
1026 			FILTER_MGMT_BEACON) ? 1 : 0);
1027 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1028 			MD, MGMT, 1001,
1029 			(htt_tlv_filter->md_mgmt_filter &
1030 			FILTER_MGMT_ATIM) ? 1 : 0);
1031 	}
1032 
1033 	if (htt_tlv_filter->enable_mo) {
1034 		/* TYPE: MGMT */
1035 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1036 			MO, MGMT, 0000,
1037 			(htt_tlv_filter->mo_mgmt_filter &
1038 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1039 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1040 			MO, MGMT, 0001,
1041 			(htt_tlv_filter->mo_mgmt_filter &
1042 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1043 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1044 			MO, MGMT, 0010,
1045 			(htt_tlv_filter->mo_mgmt_filter &
1046 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1047 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1048 			MO, MGMT, 0011,
1049 			(htt_tlv_filter->mo_mgmt_filter &
1050 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1051 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1052 			MO, MGMT, 0100,
1053 			(htt_tlv_filter->mo_mgmt_filter &
1054 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1055 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1056 			MO, MGMT, 0101,
1057 			(htt_tlv_filter->mo_mgmt_filter &
1058 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1059 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1060 			MO, MGMT, 0110,
1061 			(htt_tlv_filter->mo_mgmt_filter &
1062 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1063 		/* reserved */
1064 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
1065 			MGMT, 0111,
1066 			(htt_tlv_filter->mo_mgmt_filter &
1067 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1068 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1069 			MO, MGMT, 1000,
1070 			(htt_tlv_filter->mo_mgmt_filter &
1071 			FILTER_MGMT_BEACON) ? 1 : 0);
1072 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1073 			MO, MGMT, 1001,
1074 			(htt_tlv_filter->mo_mgmt_filter &
1075 			FILTER_MGMT_ATIM) ? 1 : 0);
1076 	}
1077 
1078 	/* word 3 */
1079 	msg_word++;
1080 	*msg_word = 0;
1081 
1082 	if (htt_tlv_filter->enable_fp) {
1083 		/* TYPE: MGMT */
1084 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1085 			FP, MGMT, 1010,
1086 			(htt_tlv_filter->fp_mgmt_filter &
1087 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1088 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1089 			FP, MGMT, 1011,
1090 			(htt_tlv_filter->fp_mgmt_filter &
1091 			FILTER_MGMT_AUTH) ? 1 : 0);
1092 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1093 			FP, MGMT, 1100,
1094 			(htt_tlv_filter->fp_mgmt_filter &
1095 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1096 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1097 			FP, MGMT, 1101,
1098 			(htt_tlv_filter->fp_mgmt_filter &
1099 			FILTER_MGMT_ACTION) ? 1 : 0);
1100 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1101 			FP, MGMT, 1110,
1102 			(htt_tlv_filter->fp_mgmt_filter &
1103 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1104 		/* reserved*/
1105 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
1106 			MGMT, 1111,
1107 			(htt_tlv_filter->fp_mgmt_filter &
1108 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1109 	}
1110 
1111 	if (htt_tlv_filter->enable_md) {
1112 			/* TYPE: MGMT */
1113 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1114 			MD, MGMT, 1010,
1115 			(htt_tlv_filter->md_mgmt_filter &
1116 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1117 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1118 			MD, MGMT, 1011,
1119 			(htt_tlv_filter->md_mgmt_filter &
1120 			FILTER_MGMT_AUTH) ? 1 : 0);
1121 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1122 			MD, MGMT, 1100,
1123 			(htt_tlv_filter->md_mgmt_filter &
1124 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1125 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1126 			MD, MGMT, 1101,
1127 			(htt_tlv_filter->md_mgmt_filter &
1128 			FILTER_MGMT_ACTION) ? 1 : 0);
1129 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1130 			MD, MGMT, 1110,
1131 			(htt_tlv_filter->md_mgmt_filter &
1132 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1133 	}
1134 
1135 	if (htt_tlv_filter->enable_mo) {
1136 		/* TYPE: MGMT */
1137 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1138 			MO, MGMT, 1010,
1139 			(htt_tlv_filter->mo_mgmt_filter &
1140 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1141 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1142 			MO, MGMT, 1011,
1143 			(htt_tlv_filter->mo_mgmt_filter &
1144 			FILTER_MGMT_AUTH) ? 1 : 0);
1145 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1146 			MO, MGMT, 1100,
1147 			(htt_tlv_filter->mo_mgmt_filter &
1148 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1149 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1150 			MO, MGMT, 1101,
1151 			(htt_tlv_filter->mo_mgmt_filter &
1152 			FILTER_MGMT_ACTION) ? 1 : 0);
1153 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1154 			MO, MGMT, 1110,
1155 			(htt_tlv_filter->mo_mgmt_filter &
1156 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1157 		/* reserved*/
1158 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
1159 			MGMT, 1111,
1160 			(htt_tlv_filter->mo_mgmt_filter &
1161 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1162 	}
1163 
1164 	/* word 4 */
1165 	msg_word++;
1166 	*msg_word = 0;
1167 
1168 	if (htt_tlv_filter->enable_fp) {
1169 		/* TYPE: CTRL */
1170 		/* reserved */
1171 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1172 			CTRL, 0000,
1173 			(htt_tlv_filter->fp_ctrl_filter &
1174 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1175 		/* reserved */
1176 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1177 			CTRL, 0001,
1178 			(htt_tlv_filter->fp_ctrl_filter &
1179 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1180 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1181 			CTRL, 0010,
1182 			(htt_tlv_filter->fp_ctrl_filter &
1183 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1184 		/* reserved */
1185 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1186 			CTRL, 0011,
1187 			(htt_tlv_filter->fp_ctrl_filter &
1188 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1189 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1190 			CTRL, 0100,
1191 			(htt_tlv_filter->fp_ctrl_filter &
1192 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1193 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1194 			CTRL, 0101,
1195 			(htt_tlv_filter->fp_ctrl_filter &
1196 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1197 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1198 			CTRL, 0110,
1199 			(htt_tlv_filter->fp_ctrl_filter &
1200 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1201 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1202 			CTRL, 0111,
1203 			(htt_tlv_filter->fp_ctrl_filter &
1204 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1205 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1206 			CTRL, 1000,
1207 			(htt_tlv_filter->fp_ctrl_filter &
1208 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1209 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1210 			CTRL, 1001,
1211 			(htt_tlv_filter->fp_ctrl_filter &
1212 			FILTER_CTRL_BA) ? 1 : 0);
1213 	}
1214 
1215 	if (htt_tlv_filter->enable_md) {
1216 		/* TYPE: CTRL */
1217 		/* reserved */
1218 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1219 			CTRL, 0000,
1220 			(htt_tlv_filter->md_ctrl_filter &
1221 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1222 		/* reserved */
1223 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1224 			CTRL, 0001,
1225 			(htt_tlv_filter->md_ctrl_filter &
1226 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1227 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1228 			CTRL, 0010,
1229 			(htt_tlv_filter->md_ctrl_filter &
1230 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1231 		/* reserved */
1232 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1233 			CTRL, 0011,
1234 			(htt_tlv_filter->md_ctrl_filter &
1235 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1236 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1237 			CTRL, 0100,
1238 			(htt_tlv_filter->md_ctrl_filter &
1239 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1240 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1241 			CTRL, 0101,
1242 			(htt_tlv_filter->md_ctrl_filter &
1243 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1244 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1245 			CTRL, 0110,
1246 			(htt_tlv_filter->md_ctrl_filter &
1247 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1248 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1249 			CTRL, 0111,
1250 			(htt_tlv_filter->md_ctrl_filter &
1251 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1252 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1253 			CTRL, 1000,
1254 			(htt_tlv_filter->md_ctrl_filter &
1255 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1256 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1257 			CTRL, 1001,
1258 			(htt_tlv_filter->md_ctrl_filter &
1259 			FILTER_CTRL_BA) ? 1 : 0);
1260 	}
1261 
1262 	if (htt_tlv_filter->enable_mo) {
1263 		/* TYPE: CTRL */
1264 		/* reserved */
1265 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1266 			CTRL, 0000,
1267 			(htt_tlv_filter->mo_ctrl_filter &
1268 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1269 		/* reserved */
1270 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1271 			CTRL, 0001,
1272 			(htt_tlv_filter->mo_ctrl_filter &
1273 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1274 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1275 			CTRL, 0010,
1276 			(htt_tlv_filter->mo_ctrl_filter &
1277 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1278 		/* reserved */
1279 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1280 			CTRL, 0011,
1281 			(htt_tlv_filter->mo_ctrl_filter &
1282 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1283 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1284 			CTRL, 0100,
1285 			(htt_tlv_filter->mo_ctrl_filter &
1286 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1287 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1288 			CTRL, 0101,
1289 			(htt_tlv_filter->mo_ctrl_filter &
1290 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1291 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1292 			CTRL, 0110,
1293 			(htt_tlv_filter->mo_ctrl_filter &
1294 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1295 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1296 			CTRL, 0111,
1297 			(htt_tlv_filter->mo_ctrl_filter &
1298 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1299 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1300 			CTRL, 1000,
1301 			(htt_tlv_filter->mo_ctrl_filter &
1302 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1303 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1304 			CTRL, 1001,
1305 			(htt_tlv_filter->mo_ctrl_filter &
1306 			FILTER_CTRL_BA) ? 1 : 0);
1307 	}
1308 
1309 	/* word 5 */
1310 	msg_word++;
1311 	*msg_word = 0;
1312 	if (htt_tlv_filter->enable_fp) {
1313 		/* TYPE: CTRL */
1314 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1315 			CTRL, 1010,
1316 			(htt_tlv_filter->fp_ctrl_filter &
1317 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1318 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1319 			CTRL, 1011,
1320 			(htt_tlv_filter->fp_ctrl_filter &
1321 			FILTER_CTRL_RTS) ? 1 : 0);
1322 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1323 			CTRL, 1100,
1324 			(htt_tlv_filter->fp_ctrl_filter &
1325 			FILTER_CTRL_CTS) ? 1 : 0);
1326 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1327 			CTRL, 1101,
1328 			(htt_tlv_filter->fp_ctrl_filter &
1329 			FILTER_CTRL_ACK) ? 1 : 0);
1330 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1331 			CTRL, 1110,
1332 			(htt_tlv_filter->fp_ctrl_filter &
1333 			FILTER_CTRL_CFEND) ? 1 : 0);
1334 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1335 			CTRL, 1111,
1336 			(htt_tlv_filter->fp_ctrl_filter &
1337 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1338 		/* TYPE: DATA */
1339 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1340 			DATA, MCAST,
1341 			(htt_tlv_filter->fp_data_filter &
1342 			FILTER_DATA_MCAST) ? 1 : 0);
1343 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1344 			DATA, UCAST,
1345 			(htt_tlv_filter->fp_data_filter &
1346 			FILTER_DATA_UCAST) ? 1 : 0);
1347 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1348 			DATA, NULL,
1349 			(htt_tlv_filter->fp_data_filter &
1350 			FILTER_DATA_NULL) ? 1 : 0);
1351 	}
1352 
1353 	if (htt_tlv_filter->enable_md) {
1354 		/* TYPE: CTRL */
1355 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1356 			CTRL, 1010,
1357 			(htt_tlv_filter->md_ctrl_filter &
1358 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1359 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1360 			CTRL, 1011,
1361 			(htt_tlv_filter->md_ctrl_filter &
1362 			FILTER_CTRL_RTS) ? 1 : 0);
1363 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1364 			CTRL, 1100,
1365 			(htt_tlv_filter->md_ctrl_filter &
1366 			FILTER_CTRL_CTS) ? 1 : 0);
1367 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1368 			CTRL, 1101,
1369 			(htt_tlv_filter->md_ctrl_filter &
1370 			FILTER_CTRL_ACK) ? 1 : 0);
1371 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1372 			CTRL, 1110,
1373 			(htt_tlv_filter->md_ctrl_filter &
1374 			FILTER_CTRL_CFEND) ? 1 : 0);
1375 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1376 			CTRL, 1111,
1377 			(htt_tlv_filter->md_ctrl_filter &
1378 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1379 		/* TYPE: DATA */
1380 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1381 			DATA, MCAST,
1382 			(htt_tlv_filter->md_data_filter &
1383 			FILTER_DATA_MCAST) ? 1 : 0);
1384 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1385 			DATA, UCAST,
1386 			(htt_tlv_filter->md_data_filter &
1387 			FILTER_DATA_UCAST) ? 1 : 0);
1388 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1389 			DATA, NULL,
1390 			(htt_tlv_filter->md_data_filter &
1391 			FILTER_DATA_NULL) ? 1 : 0);
1392 	}
1393 
1394 	if (htt_tlv_filter->enable_mo) {
1395 		/* TYPE: CTRL */
1396 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1397 			CTRL, 1010,
1398 			(htt_tlv_filter->mo_ctrl_filter &
1399 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1400 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1401 			CTRL, 1011,
1402 			(htt_tlv_filter->mo_ctrl_filter &
1403 			FILTER_CTRL_RTS) ? 1 : 0);
1404 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1405 			CTRL, 1100,
1406 			(htt_tlv_filter->mo_ctrl_filter &
1407 			FILTER_CTRL_CTS) ? 1 : 0);
1408 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1409 			CTRL, 1101,
1410 			(htt_tlv_filter->mo_ctrl_filter &
1411 			FILTER_CTRL_ACK) ? 1 : 0);
1412 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1413 			CTRL, 1110,
1414 			(htt_tlv_filter->mo_ctrl_filter &
1415 			FILTER_CTRL_CFEND) ? 1 : 0);
1416 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1417 			CTRL, 1111,
1418 			(htt_tlv_filter->mo_ctrl_filter &
1419 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1420 		/* TYPE: DATA */
1421 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1422 			DATA, MCAST,
1423 			(htt_tlv_filter->mo_data_filter &
1424 			FILTER_DATA_MCAST) ? 1 : 0);
1425 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1426 			DATA, UCAST,
1427 			(htt_tlv_filter->mo_data_filter &
1428 			FILTER_DATA_UCAST) ? 1 : 0);
1429 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1430 			DATA, NULL,
1431 			(htt_tlv_filter->mo_data_filter &
1432 			FILTER_DATA_NULL) ? 1 : 0);
1433 	}
1434 
1435 	/* word 6 */
1436 	msg_word++;
1437 	*msg_word = 0;
1438 	tlv_filter = 0;
1439 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START,
1440 		htt_tlv_filter->mpdu_start);
1441 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START,
1442 		htt_tlv_filter->msdu_start);
1443 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET,
1444 		htt_tlv_filter->packet);
1445 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END,
1446 		htt_tlv_filter->msdu_end);
1447 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END,
1448 		htt_tlv_filter->mpdu_end);
1449 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER,
1450 		htt_tlv_filter->packet_header);
1451 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION,
1452 		htt_tlv_filter->attention);
1453 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START,
1454 		htt_tlv_filter->ppdu_start);
1455 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END,
1456 		htt_tlv_filter->ppdu_end);
1457 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS,
1458 		htt_tlv_filter->ppdu_end_user_stats);
1459 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter,
1460 		PPDU_END_USER_STATS_EXT,
1461 		htt_tlv_filter->ppdu_end_user_stats_ext);
1462 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE,
1463 		htt_tlv_filter->ppdu_end_status_done);
1464 	/* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/
1465 	 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED,
1466 		 htt_tlv_filter->header_per_msdu);
1467 
1468 	HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter);
1469 
1470 	/* "response_required" field should be set if a HTT response message is
1471 	 * required after setting up the ring.
1472 	 */
1473 	pkt = htt_htc_pkt_alloc(soc);
1474 	if (!pkt)
1475 		goto fail1;
1476 
1477 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1478 
1479 	SET_HTC_PACKET_INFO_TX(
1480 		&pkt->htc_pkt,
1481 		dp_htt_h2t_send_complete_free_netbuf,
1482 		qdf_nbuf_data(htt_msg),
1483 		qdf_nbuf_len(htt_msg),
1484 		soc->htc_endpoint,
1485 		1); /* tag - not relevant here */
1486 
1487 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1488 	DP_HTT_SEND_HTC_PKT(soc, pkt);
1489 	return QDF_STATUS_SUCCESS;
1490 
1491 fail1:
1492 	qdf_nbuf_free(htt_msg);
1493 fail0:
1494 	return QDF_STATUS_E_FAILURE;
1495 }
1496 
1497 #if defined(CONFIG_WIN) && WDI_EVENT_ENABLE
1498 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1499 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1500 
1501 {
1502 	uint32_t pdev_id;
1503 	uint32_t *msg_word = NULL;
1504 	uint32_t msg_remain_len = 0;
1505 
1506 	msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1507 
1508 	/*COOKIE MSB*/
1509 	pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1510 
1511 	/* stats message length + 16 size of HTT header*/
1512 	msg_remain_len = qdf_min(htt_stats->msg_len + 16,
1513 				(uint32_t)DP_EXT_MSG_LENGTH);
1514 
1515 	dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc,
1516 			msg_word,  msg_remain_len,
1517 			WDI_NO_VAL, pdev_id);
1518 
1519 	if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1520 		htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1521 	}
1522 	/* Need to be freed here as WDI handler will
1523 	 * make a copy of pkt to send data to application
1524 	 */
1525 	qdf_nbuf_free(htt_msg);
1526 	return QDF_STATUS_SUCCESS;
1527 }
1528 #else
1529 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1530 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1531 {
1532 	return QDF_STATUS_E_NOSUPPORT;
1533 }
1534 #endif
1535 
1536 /**
1537  * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats
1538  * @htt_stats: htt stats info
1539  *
1540  * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message
1541  * contains sub messages which are identified by a TLV header.
1542  * In this function we will process the stream of T2H messages and read all the
1543  * TLV contained in the message.
1544  *
1545  * THe following cases have been taken care of
1546  * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer
1547  *		In this case the buffer will contain multiple tlvs.
1548  * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer.
1549  *		Only one tlv will be contained in the HTT message and this tag
1550  *		will extend onto the next buffer.
1551  * Case 3: When the buffer is the continuation of the previous message
1552  * Case 4: tlv length is 0. which will indicate the end of message
1553  *
1554  * return: void
1555  */
1556 static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats,
1557 					struct dp_soc *soc)
1558 {
1559 	htt_tlv_tag_t tlv_type = 0xff;
1560 	qdf_nbuf_t htt_msg = NULL;
1561 	uint32_t *msg_word;
1562 	uint8_t *tlv_buf_head = NULL;
1563 	uint8_t *tlv_buf_tail = NULL;
1564 	uint32_t msg_remain_len = 0;
1565 	uint32_t tlv_remain_len = 0;
1566 	uint32_t *tlv_start;
1567 	int cookie_val;
1568 	int cookie_msb;
1569 	int pdev_id;
1570 	bool copy_stats = false;
1571 	struct dp_pdev *pdev;
1572 
1573 	/* Process node in the HTT message queue */
1574 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1575 		!= NULL) {
1576 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1577 		cookie_val = *(msg_word + 1);
1578 		htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET(
1579 					*(msg_word +
1580 					HTT_T2H_EXT_STATS_TLV_START_OFFSET));
1581 
1582 		if (cookie_val) {
1583 			if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg)
1584 					== QDF_STATUS_SUCCESS) {
1585 				continue;
1586 			}
1587 		}
1588 
1589 		cookie_msb = *(msg_word + 2);
1590 		pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1591 		pdev = soc->pdev_list[pdev_id];
1592 
1593 		if (cookie_msb >> 2) {
1594 			copy_stats = true;
1595 		}
1596 
1597 		/* read 5th word */
1598 		msg_word = msg_word + 4;
1599 		msg_remain_len = qdf_min(htt_stats->msg_len,
1600 				(uint32_t) DP_EXT_MSG_LENGTH);
1601 		/* Keep processing the node till node length is 0 */
1602 		while (msg_remain_len) {
1603 			/*
1604 			 * if message is not a continuation of previous message
1605 			 * read the tlv type and tlv length
1606 			 */
1607 			if (!tlv_buf_head) {
1608 				tlv_type = HTT_STATS_TLV_TAG_GET(
1609 						*msg_word);
1610 				tlv_remain_len = HTT_STATS_TLV_LENGTH_GET(
1611 						*msg_word);
1612 			}
1613 
1614 			if (tlv_remain_len == 0) {
1615 				msg_remain_len = 0;
1616 
1617 				if (tlv_buf_head) {
1618 					qdf_mem_free(tlv_buf_head);
1619 					tlv_buf_head = NULL;
1620 					tlv_buf_tail = NULL;
1621 				}
1622 
1623 				goto error;
1624 			}
1625 
1626 			if (!tlv_buf_head)
1627 				tlv_remain_len += HTT_TLV_HDR_LEN;
1628 
1629 			if ((tlv_remain_len <= msg_remain_len)) {
1630 				/* Case 3 */
1631 				if (tlv_buf_head) {
1632 					qdf_mem_copy(tlv_buf_tail,
1633 							(uint8_t *)msg_word,
1634 							tlv_remain_len);
1635 					tlv_start = (uint32_t *)tlv_buf_head;
1636 				} else {
1637 					/* Case 1 */
1638 					tlv_start = msg_word;
1639 				}
1640 
1641 				if (copy_stats)
1642 					dp_htt_stats_copy_tag(pdev, tlv_type, tlv_start);
1643 				else
1644 					dp_htt_stats_print_tag(tlv_type, tlv_start);
1645 
1646 				msg_remain_len -= tlv_remain_len;
1647 
1648 				msg_word = (uint32_t *)
1649 					(((uint8_t *)msg_word) +
1650 					tlv_remain_len);
1651 
1652 				tlv_remain_len = 0;
1653 
1654 				if (tlv_buf_head) {
1655 					qdf_mem_free(tlv_buf_head);
1656 					tlv_buf_head = NULL;
1657 					tlv_buf_tail = NULL;
1658 				}
1659 
1660 			} else { /* tlv_remain_len > msg_remain_len */
1661 				/* Case 2 & 3 */
1662 				if (!tlv_buf_head) {
1663 					tlv_buf_head = qdf_mem_malloc(
1664 							tlv_remain_len);
1665 
1666 					if (!tlv_buf_head) {
1667 						QDF_TRACE(QDF_MODULE_ID_TXRX,
1668 								QDF_TRACE_LEVEL_ERROR,
1669 								"Alloc failed");
1670 						goto error;
1671 					}
1672 
1673 					tlv_buf_tail = tlv_buf_head;
1674 				}
1675 
1676 				qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word,
1677 						msg_remain_len);
1678 				tlv_remain_len -= msg_remain_len;
1679 				tlv_buf_tail += msg_remain_len;
1680 			}
1681 		}
1682 
1683 		if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1684 			htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1685 		}
1686 
1687 		qdf_nbuf_free(htt_msg);
1688 	}
1689 	return;
1690 
1691 error:
1692 	qdf_nbuf_free(htt_msg);
1693 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1694 			!= NULL)
1695 		qdf_nbuf_free(htt_msg);
1696 }
1697 
1698 void htt_t2h_stats_handler(void *context)
1699 {
1700 	struct dp_soc *soc = (struct dp_soc *)context;
1701 	struct htt_stats_context htt_stats;
1702 	uint32_t *msg_word;
1703 	qdf_nbuf_t htt_msg = NULL;
1704 	uint8_t done;
1705 	uint8_t rem_stats;
1706 
1707 	if (!soc || !qdf_atomic_read(&soc->cmn_init_done)) {
1708 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1709 			"soc: 0x%pK, init_done: %d", soc,
1710 			qdf_atomic_read(&soc->cmn_init_done));
1711 		return;
1712 	}
1713 
1714 	qdf_mem_zero(&htt_stats, sizeof(htt_stats));
1715 	qdf_nbuf_queue_init(&htt_stats.msg);
1716 
1717 	/* pull one completed stats from soc->htt_stats_msg and process */
1718 	qdf_spin_lock_bh(&soc->htt_stats.lock);
1719 	if (!soc->htt_stats.num_stats) {
1720 		qdf_spin_unlock_bh(&soc->htt_stats.lock);
1721 		return;
1722 	}
1723 	while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) {
1724 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1725 		msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET;
1726 		done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
1727 		qdf_nbuf_queue_add(&htt_stats.msg, htt_msg);
1728 		/*
1729 		 * Done bit signifies that this is the last T2H buffer in the
1730 		 * stream of HTT EXT STATS message
1731 		 */
1732 		if (done)
1733 			break;
1734 	}
1735 	rem_stats = --soc->htt_stats.num_stats;
1736 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
1737 
1738 	dp_process_htt_stat_msg(&htt_stats, soc);
1739 	/* If there are more stats to process, schedule stats work again */
1740 	if (rem_stats)
1741 		qdf_sched_work(0, &soc->htt_stats.work);
1742 }
1743 
1744 /*
1745  * dp_get_ppdu_info_user_index: Find and allocate a per-user descriptor for a PPDU,
1746  * if a new peer id arrives in a PPDU
1747  * pdev: DP pdev handle
1748  * @peer_id : peer unique identifier
1749  * @ppdu_info: per ppdu tlv structure
1750  *
1751  * return:user index to be populated
1752  */
1753 #ifdef FEATURE_PERPKT_INFO
1754 static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev,
1755 						uint16_t peer_id,
1756 						struct ppdu_info *ppdu_info)
1757 {
1758 	uint8_t user_index = 0;
1759 	struct cdp_tx_completion_ppdu *ppdu_desc;
1760 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1761 
1762 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1763 
1764 	while ((user_index + 1) <= ppdu_info->last_user) {
1765 		ppdu_user_desc = &ppdu_desc->user[user_index];
1766 		if (ppdu_user_desc->peer_id != peer_id) {
1767 			user_index++;
1768 			continue;
1769 		} else {
1770 			/* Max users possible is 8 so user array index should
1771 			 * not exceed 7
1772 			 */
1773 			qdf_assert_always(user_index <= CDP_MU_MAX_USER_INDEX);
1774 			return user_index;
1775 		}
1776 	}
1777 
1778 	ppdu_info->last_user++;
1779 	/* Max users possible is 8 so last user should not exceed 8 */
1780 	qdf_assert_always(ppdu_info->last_user <= CDP_MU_MAX_USERS);
1781 	return ppdu_info->last_user - 1;
1782 }
1783 
1784 /*
1785  * dp_process_ppdu_stats_common_tlv: Process htt_ppdu_stats_common_tlv
1786  * pdev: DP pdev handle
1787  * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv
1788  * @ppdu_info: per ppdu tlv structure
1789  *
1790  * return:void
1791  */
1792 static void dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev,
1793 		uint32_t *tag_buf, struct ppdu_info *ppdu_info)
1794 {
1795 	uint16_t frame_type;
1796 	uint16_t freq;
1797 	struct dp_soc *soc = NULL;
1798 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
1799 
1800 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1801 
1802 	tag_buf += 2;
1803 	ppdu_desc->num_users =
1804 		HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf);
1805 	tag_buf++;
1806 	frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf);
1807 
1808 	if ((frame_type == HTT_STATS_FTYPE_TIDQ_DATA_SU) ||
1809 			(frame_type == HTT_STATS_FTYPE_TIDQ_DATA_MU))
1810 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA;
1811 	else if ((frame_type == HTT_STATS_FTYPE_SGEN_MU_BAR) ||
1812 		 (frame_type == HTT_STATS_FTYPE_SGEN_BAR))
1813 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_BAR;
1814 	else
1815 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
1816 
1817 	tag_buf += 2;
1818 	ppdu_desc->tx_duration = *tag_buf;
1819 	tag_buf += 3;
1820 	ppdu_desc->ppdu_start_timestamp = *tag_buf;
1821 
1822 	ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
1823 					ppdu_desc->tx_duration;
1824 	/* Ack time stamp is same as end time stamp*/
1825 	ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
1826 
1827 	tag_buf++;
1828 
1829 	freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf);
1830 	if (freq != ppdu_desc->channel) {
1831 		soc = pdev->soc;
1832 		ppdu_desc->channel = freq;
1833 		if (soc && soc->cdp_soc.ol_ops->freq_to_channel)
1834 			pdev->operating_channel =
1835 		soc->cdp_soc.ol_ops->freq_to_channel(pdev->ctrl_pdev, freq);
1836 	}
1837 
1838 	ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf);
1839 }
1840 
1841 /*
1842  * dp_process_ppdu_stats_user_common_tlv: Process ppdu_stats_user_common
1843  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv
1844  * @ppdu_info: per ppdu tlv structure
1845  *
1846  * return:void
1847  */
1848 static void dp_process_ppdu_stats_user_common_tlv(
1849 		struct dp_pdev *pdev, uint32_t *tag_buf,
1850 		struct ppdu_info *ppdu_info)
1851 {
1852 	uint16_t peer_id;
1853 	struct cdp_tx_completion_ppdu *ppdu_desc;
1854 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1855 	uint8_t curr_user_index = 0;
1856 
1857 	ppdu_desc =
1858 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1859 
1860 	tag_buf++;
1861 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
1862 
1863 	curr_user_index =
1864 		dp_get_ppdu_info_user_index(pdev,
1865 					    peer_id, ppdu_info);
1866 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1867 
1868 	if (peer_id == DP_SCAN_PEER_ID) {
1869 		ppdu_desc->vdev_id =
1870 			HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(*tag_buf);
1871 	} else {
1872 		if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
1873 			return;
1874 	}
1875 
1876 	ppdu_user_desc->peer_id = peer_id;
1877 
1878 	tag_buf++;
1879 
1880 	if (HTT_PPDU_STATS_USER_COMMON_TLV_DELAYED_BA_GET(*tag_buf)) {
1881 		ppdu_user_desc->delayed_ba = 1;
1882 	}
1883 
1884 	if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) {
1885 		ppdu_user_desc->is_mcast = true;
1886 		ppdu_user_desc->mpdu_tried_mcast =
1887 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
1888 		ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast;
1889 	} else {
1890 		ppdu_user_desc->mpdu_tried_ucast =
1891 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
1892 	}
1893 
1894 	tag_buf++;
1895 
1896 	ppdu_user_desc->qos_ctrl =
1897 		HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf);
1898 	ppdu_user_desc->frame_ctrl =
1899 		HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf);
1900 	ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl;
1901 
1902 	if (ppdu_user_desc->delayed_ba) {
1903 		ppdu_user_desc->mpdu_success = 0;
1904 		ppdu_user_desc->mpdu_tried_mcast = 0;
1905 		ppdu_user_desc->mpdu_tried_ucast = 0;
1906 	}
1907 }
1908 
1909 
1910 /**
1911  * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv
1912  * @pdev: DP pdev handle
1913  * @tag_buf: T2H message buffer carrying the user rate TLV
1914  * @ppdu_info: per ppdu tlv structure
1915  *
1916  * return:void
1917  */
1918 static void dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev,
1919 		uint32_t *tag_buf,
1920 		struct ppdu_info *ppdu_info)
1921 {
1922 	uint16_t peer_id;
1923 	struct dp_peer *peer;
1924 	struct cdp_tx_completion_ppdu *ppdu_desc;
1925 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1926 	uint8_t curr_user_index = 0;
1927 	struct dp_vdev *vdev;
1928 
1929 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1930 
1931 	tag_buf++;
1932 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
1933 
1934 	curr_user_index =
1935 		dp_get_ppdu_info_user_index(pdev,
1936 					    peer_id, ppdu_info);
1937 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1938 	if (peer_id == DP_SCAN_PEER_ID) {
1939 		vdev =
1940 		       dp_get_vdev_from_soc_vdev_id_wifi3(pdev->soc,
1941 							  ppdu_desc->vdev_id);
1942 		if (!vdev)
1943 			return;
1944 		qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw,
1945 			     DP_MAC_ADDR_LEN);
1946 	} else {
1947 		peer = dp_peer_find_by_id(pdev->soc, peer_id);
1948 		if (!peer)
1949 			return;
1950 		qdf_mem_copy(ppdu_user_desc->mac_addr,
1951 			     peer->mac_addr.raw, DP_MAC_ADDR_LEN);
1952 		dp_peer_unref_del_find_by_id(peer);
1953 	}
1954 
1955 	ppdu_user_desc->peer_id = peer_id;
1956 
1957 	ppdu_user_desc->tid =
1958 		HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf);
1959 
1960 	tag_buf += 1;
1961 
1962 	ppdu_user_desc->user_pos =
1963 		HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_GET(*tag_buf);
1964 	ppdu_user_desc->mu_group_id =
1965 		HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_GET(*tag_buf);
1966 
1967 	tag_buf += 1;
1968 
1969 	ppdu_user_desc->ru_start =
1970 		HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf);
1971 	ppdu_user_desc->ru_tones =
1972 		(HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) -
1973 		HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1;
1974 
1975 	tag_buf += 2;
1976 
1977 	ppdu_user_desc->ppdu_type =
1978 		HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf);
1979 
1980 	tag_buf++;
1981 	ppdu_user_desc->tx_rate = *tag_buf;
1982 
1983 	ppdu_user_desc->ltf_size =
1984 		HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf);
1985 	ppdu_user_desc->stbc =
1986 		HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf);
1987 	ppdu_user_desc->he_re =
1988 		HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf);
1989 	ppdu_user_desc->txbf =
1990 		HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf);
1991 	ppdu_user_desc->bw =
1992 		HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf) - 2;
1993 	ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf);
1994 	ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf);
1995 	ppdu_user_desc->preamble =
1996 		HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf);
1997 	ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf);
1998 	ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf);
1999 	ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf);
2000 }
2001 
2002 /*
2003  * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv: Process
2004  * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
2005  * pdev: DP PDEV handle
2006  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
2007  * @ppdu_info: per ppdu tlv structure
2008  *
2009  * return:void
2010  */
2011 static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
2012 		struct dp_pdev *pdev, uint32_t *tag_buf,
2013 		struct ppdu_info *ppdu_info)
2014 {
2015 	htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf =
2016 		(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf;
2017 
2018 	struct cdp_tx_completion_ppdu *ppdu_desc;
2019 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2020 	uint8_t curr_user_index = 0;
2021 	uint16_t peer_id;
2022 
2023 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2024 
2025 	tag_buf++;
2026 
2027 	peer_id =
2028 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2029 
2030 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2031 		return;
2032 
2033 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2034 
2035 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2036 	ppdu_user_desc->peer_id = peer_id;
2037 
2038 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
2039 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
2040 					CDP_BA_64_BIT_MAP_SIZE_DWORDS);
2041 }
2042 
2043 /*
2044  * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv: Process
2045  * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
2046  * soc: DP SOC handle
2047  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
2048  * @ppdu_info: per ppdu tlv structure
2049  *
2050  * return:void
2051  */
2052 static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
2053 		struct dp_pdev *pdev, uint32_t *tag_buf,
2054 		struct ppdu_info *ppdu_info)
2055 {
2056 	htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf =
2057 		(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf;
2058 
2059 	struct cdp_tx_completion_ppdu *ppdu_desc;
2060 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2061 	uint8_t curr_user_index = 0;
2062 	uint16_t peer_id;
2063 
2064 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2065 
2066 	tag_buf++;
2067 
2068 	peer_id =
2069 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2070 
2071 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2072 		return;
2073 
2074 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2075 
2076 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2077 	ppdu_user_desc->peer_id = peer_id;
2078 
2079 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
2080 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
2081 					CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2082 }
2083 
2084 /*
2085  * dp_process_ppdu_stats_user_cmpltn_common_tlv: Process
2086  * htt_ppdu_stats_user_cmpltn_common_tlv
2087  * soc: DP SOC handle
2088  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv
2089  * @ppdu_info: per ppdu tlv structure
2090  *
2091  * return:void
2092  */
2093 static void dp_process_ppdu_stats_user_cmpltn_common_tlv(
2094 		struct dp_pdev *pdev, uint32_t *tag_buf,
2095 		struct ppdu_info *ppdu_info)
2096 {
2097 	uint16_t peer_id;
2098 	struct cdp_tx_completion_ppdu *ppdu_desc;
2099 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2100 	uint8_t curr_user_index = 0;
2101 	htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf =
2102 		(htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf;
2103 
2104 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2105 
2106 	tag_buf++;
2107 	peer_id =
2108 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf);
2109 
2110 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2111 		return;
2112 
2113 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2114 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2115 	ppdu_user_desc->peer_id = peer_id;
2116 
2117 	ppdu_user_desc->completion_status =
2118 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET(
2119 				*tag_buf);
2120 
2121 	ppdu_user_desc->tid =
2122 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf);
2123 
2124 
2125 	tag_buf++;
2126 	if (qdf_likely(ppdu_user_desc->completion_status ==
2127 			HTT_PPDU_STATS_USER_STATUS_OK)) {
2128 		ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi;
2129 		ppdu_user_desc->ack_rssi_valid = 1;
2130 	} else {
2131 		ppdu_user_desc->ack_rssi_valid = 0;
2132 	}
2133 
2134 	tag_buf++;
2135 
2136 	ppdu_user_desc->mpdu_success =
2137 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf);
2138 
2139 	tag_buf++;
2140 
2141 	ppdu_user_desc->long_retries =
2142 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf);
2143 
2144 	ppdu_user_desc->short_retries =
2145 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf);
2146 	ppdu_user_desc->retry_msdus =
2147 		ppdu_user_desc->long_retries + ppdu_user_desc->short_retries;
2148 
2149 	ppdu_user_desc->is_ampdu =
2150 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf);
2151 	ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu;
2152 
2153 }
2154 
2155 /*
2156  * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv: Process
2157  * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2158  * pdev: DP PDEV handle
2159  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2160  * @ppdu_info: per ppdu tlv structure
2161  *
2162  * return:void
2163  */
2164 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
2165 		struct dp_pdev *pdev, uint32_t *tag_buf,
2166 		struct ppdu_info *ppdu_info)
2167 {
2168 	htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf =
2169 		(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf;
2170 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2171 	struct cdp_tx_completion_ppdu *ppdu_desc;
2172 	uint8_t curr_user_index = 0;
2173 	uint16_t peer_id;
2174 
2175 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2176 
2177 	tag_buf++;
2178 
2179 	peer_id =
2180 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2181 
2182 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2183 		return;
2184 
2185 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2186 
2187 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2188 	ppdu_user_desc->peer_id = peer_id;
2189 
2190 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2191 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2192 			CDP_BA_64_BIT_MAP_SIZE_DWORDS);
2193 }
2194 
2195 /*
2196  * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv: Process
2197  * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2198  * pdev: DP PDEV handle
2199  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2200  * @ppdu_info: per ppdu tlv structure
2201  *
2202  * return:void
2203  */
2204 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
2205 		struct dp_pdev *pdev, uint32_t *tag_buf,
2206 		struct ppdu_info *ppdu_info)
2207 {
2208 	htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf =
2209 		(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf;
2210 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2211 	struct cdp_tx_completion_ppdu *ppdu_desc;
2212 	uint8_t curr_user_index = 0;
2213 	uint16_t peer_id;
2214 
2215 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2216 
2217 	tag_buf++;
2218 
2219 	peer_id =
2220 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2221 
2222 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2223 		return;
2224 
2225 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2226 
2227 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2228 	ppdu_user_desc->peer_id = peer_id;
2229 
2230 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2231 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2232 			CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2233 }
2234 
2235 /*
2236  * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv: Process
2237  * htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2238  * pdev: DP PDE handle
2239  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2240  * @ppdu_info: per ppdu tlv structure
2241  *
2242  * return:void
2243  */
2244 static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
2245 		struct dp_pdev *pdev, uint32_t *tag_buf,
2246 		struct ppdu_info *ppdu_info)
2247 {
2248 	uint16_t peer_id;
2249 	struct cdp_tx_completion_ppdu *ppdu_desc;
2250 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2251 	uint8_t curr_user_index = 0;
2252 
2253 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2254 
2255 	tag_buf += 2;
2256 	peer_id =
2257 	HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf);
2258 
2259 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2260 		return;
2261 
2262 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2263 
2264 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2265 	ppdu_user_desc->peer_id = peer_id;
2266 
2267 	tag_buf++;
2268 	ppdu_user_desc->tid =
2269 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_TID_NUM_GET(*tag_buf);
2270 	ppdu_user_desc->num_mpdu =
2271 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf);
2272 
2273 	ppdu_user_desc->num_msdu =
2274 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf);
2275 
2276 	ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu;
2277 
2278 	tag_buf += 2;
2279 	ppdu_user_desc->success_bytes = *tag_buf;
2280 
2281 }
2282 
2283 /*
2284  * dp_process_ppdu_stats_user_common_array_tlv: Process
2285  * htt_ppdu_stats_user_common_array_tlv
2286  * pdev: DP PDEV handle
2287  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2288  * @ppdu_info: per ppdu tlv structure
2289  *
2290  * return:void
2291  */
2292 static void dp_process_ppdu_stats_user_common_array_tlv(
2293 		struct dp_pdev *pdev, uint32_t *tag_buf,
2294 		struct ppdu_info *ppdu_info)
2295 {
2296 	uint32_t peer_id;
2297 	struct cdp_tx_completion_ppdu *ppdu_desc;
2298 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2299 	uint8_t curr_user_index = 0;
2300 	struct htt_tx_ppdu_stats_info *dp_stats_buf;
2301 
2302 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2303 
2304 	tag_buf++;
2305 	dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf;
2306 	tag_buf += 3;
2307 	peer_id =
2308 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf);
2309 
2310 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) {
2311 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2312 			"Invalid peer");
2313 		return;
2314 	}
2315 
2316 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2317 
2318 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2319 
2320 	ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes;
2321 	ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes;
2322 
2323 	tag_buf++;
2324 
2325 	ppdu_user_desc->success_msdus =
2326 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf);
2327 	ppdu_user_desc->retry_bytes =
2328 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf);
2329 	tag_buf++;
2330 	ppdu_user_desc->failed_msdus =
2331 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf);
2332 }
2333 
2334 /*
2335  * dp_process_ppdu_stats_flush_tlv: Process
2336  * htt_ppdu_stats_flush_tlv
2337  * @pdev: DP PDEV handle
2338  * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv
2339  *
2340  * return:void
2341  */
2342 static void dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev,
2343 						uint32_t *tag_buf)
2344 {
2345 	uint32_t peer_id;
2346 	uint32_t drop_reason;
2347 	uint8_t tid;
2348 	uint32_t num_msdu;
2349 	struct dp_peer *peer;
2350 
2351 	tag_buf++;
2352 	drop_reason = *tag_buf;
2353 
2354 	tag_buf++;
2355 	num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf);
2356 
2357 	tag_buf++;
2358 	peer_id =
2359 		HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf);
2360 
2361 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2362 	if (!peer)
2363 		return;
2364 
2365 	tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf);
2366 
2367 	if (drop_reason == HTT_FLUSH_EXCESS_RETRIES) {
2368 		DP_STATS_INC(peer, tx.excess_retries_per_ac[TID_TO_WME_AC(tid)],
2369 					num_msdu);
2370 	}
2371 
2372 	dp_peer_unref_del_find_by_id(peer);
2373 }
2374 
2375 /*
2376  * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv: Process
2377  * htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2378  * @pdev: DP PDEV handle
2379  * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2380  * @length: tlv_length
2381  *
2382  * return:QDF_STATUS_SUCCESS if nbuf as to be freed in caller
2383  */
2384 static QDF_STATUS
2385 dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev,
2386 					      qdf_nbuf_t tag_buf,
2387 					      uint32_t ppdu_id)
2388 {
2389 	uint32_t *nbuf_ptr;
2390 	uint8_t trim_size;
2391 
2392 	if ((!pdev->tx_sniffer_enable) && (!pdev->mcopy_mode) &&
2393 	    (!pdev->bpr_enable))
2394 		return QDF_STATUS_SUCCESS;
2395 
2396 	trim_size = ((pdev->mgmtctrl_frm_info.mgmt_buf +
2397 		      HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) -
2398 		      qdf_nbuf_data(tag_buf));
2399 
2400 	if (!qdf_nbuf_pull_head(tag_buf, trim_size))
2401 		return QDF_STATUS_SUCCESS;
2402 
2403 	qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) -
2404 			    pdev->mgmtctrl_frm_info.mgmt_buf_len);
2405 
2406 	nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(
2407 				tag_buf, sizeof(ppdu_id));
2408 	*nbuf_ptr = ppdu_id;
2409 
2410 	if (pdev->bpr_enable) {
2411 		dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc,
2412 				     tag_buf, HTT_INVALID_PEER,
2413 				     WDI_NO_VAL, pdev->pdev_id);
2414 	}
2415 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2416 		dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc,
2417 				     tag_buf, HTT_INVALID_PEER,
2418 				     WDI_NO_VAL, pdev->pdev_id);
2419 	}
2420 
2421 	return QDF_STATUS_E_ALREADY;
2422 }
2423 
2424 /**
2425  * dp_process_ppdu_tag(): Function to process the PPDU TLVs
2426  * @pdev: DP pdev handle
2427  * @tag_buf: TLV buffer
2428  * @tlv_len: length of tlv
2429  * @ppdu_info: per ppdu tlv structure
2430  *
2431  * return: void
2432  */
2433 static void dp_process_ppdu_tag(struct dp_pdev *pdev, uint32_t *tag_buf,
2434 		uint32_t tlv_len, struct ppdu_info *ppdu_info)
2435 {
2436 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2437 
2438 	switch (tlv_type) {
2439 	case HTT_PPDU_STATS_COMMON_TLV:
2440 		qdf_assert_always(tlv_len >=
2441 				sizeof(htt_ppdu_stats_common_tlv));
2442 		dp_process_ppdu_stats_common_tlv(pdev, tag_buf, ppdu_info);
2443 		break;
2444 	case HTT_PPDU_STATS_USR_COMMON_TLV:
2445 		qdf_assert_always(tlv_len >=
2446 				sizeof(htt_ppdu_stats_user_common_tlv));
2447 		dp_process_ppdu_stats_user_common_tlv(
2448 				pdev, tag_buf, ppdu_info);
2449 		break;
2450 	case HTT_PPDU_STATS_USR_RATE_TLV:
2451 		qdf_assert_always(tlv_len >=
2452 				sizeof(htt_ppdu_stats_user_rate_tlv));
2453 		dp_process_ppdu_stats_user_rate_tlv(pdev, tag_buf, ppdu_info);
2454 		break;
2455 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV:
2456 		qdf_assert_always(tlv_len >=
2457 				sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv));
2458 		dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
2459 				pdev, tag_buf, ppdu_info);
2460 		break;
2461 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV:
2462 		qdf_assert_always(tlv_len >=
2463 				sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv));
2464 		dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
2465 				pdev, tag_buf, ppdu_info);
2466 		break;
2467 	case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV:
2468 		qdf_assert_always(tlv_len >=
2469 				sizeof(htt_ppdu_stats_user_cmpltn_common_tlv));
2470 		dp_process_ppdu_stats_user_cmpltn_common_tlv(
2471 				pdev, tag_buf, ppdu_info);
2472 		break;
2473 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV:
2474 		qdf_assert_always(tlv_len >=
2475 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv));
2476 		dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
2477 				pdev, tag_buf, ppdu_info);
2478 		break;
2479 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV:
2480 		qdf_assert_always(tlv_len >=
2481 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv));
2482 		dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
2483 				pdev, tag_buf, ppdu_info);
2484 		break;
2485 	case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV:
2486 		qdf_assert_always(tlv_len >=
2487 			sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv));
2488 		dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
2489 				pdev, tag_buf, ppdu_info);
2490 		break;
2491 	case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV:
2492 		qdf_assert_always(tlv_len >=
2493 			sizeof(htt_ppdu_stats_usr_common_array_tlv_v));
2494 		dp_process_ppdu_stats_user_common_array_tlv(
2495 				pdev, tag_buf, ppdu_info);
2496 		break;
2497 	case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV:
2498 		qdf_assert_always(tlv_len >=
2499 			sizeof(htt_ppdu_stats_flush_tlv));
2500 		dp_process_ppdu_stats_user_compltn_flush_tlv(
2501 				pdev, tag_buf);
2502 		break;
2503 	default:
2504 		break;
2505 	}
2506 }
2507 
2508 /**
2509  * dp_ppdu_desc_deliver(): Function to deliver Tx PPDU status descriptor
2510  * to upper layer
2511  * @pdev: DP pdev handle
2512  * @ppdu_info: per PPDU TLV descriptor
2513  *
2514  * return: void
2515  */
2516 static
2517 void dp_ppdu_desc_deliver(struct dp_pdev *pdev,
2518 			  struct ppdu_info *ppdu_info)
2519 {
2520 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
2521 	struct dp_peer *peer = NULL;
2522 	qdf_nbuf_t nbuf;
2523 	uint16_t i;
2524 	uint32_t tlv_bitmap_expected;
2525 
2526 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
2527 		qdf_nbuf_data(ppdu_info->nbuf);
2528 
2529 	ppdu_desc->num_users = ppdu_info->last_user;
2530 	ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
2531 
2532 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
2533 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2534 		if (ppdu_info->is_ampdu)
2535 			tlv_bitmap_expected =
2536 				dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
2537 					ppdu_info->tlv_bitmap);
2538 	}
2539 	for (i = 0; i < ppdu_desc->num_users; i++) {
2540 
2541 		ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu;
2542 		ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu;
2543 
2544 		peer = dp_peer_find_by_id(pdev->soc,
2545 					  ppdu_desc->user[i].peer_id);
2546 		/**
2547 		 * This check is to make sure peer is not deleted
2548 		 * after processing the TLVs.
2549 		 */
2550 		if (!peer)
2551 			continue;
2552 
2553 		if (ppdu_info->tlv_bitmap != tlv_bitmap_expected) {
2554 			dp_peer_unref_del_find_by_id(peer);
2555 			continue;
2556 		}
2557 		if (ppdu_desc->user[i].tid < CDP_DATA_TID_MAX) {
2558 
2559 			dp_tx_stats_update(pdev->soc, peer,
2560 					&ppdu_desc->user[i],
2561 					ppdu_desc->ack_rssi);
2562 		}
2563 
2564 		dp_tx_rate_stats_update(peer, &ppdu_desc->user[i]);
2565 		dp_peer_unref_del_find_by_id(peer);
2566 	}
2567 
2568 	/*
2569 	 * Remove from the list
2570 	 */
2571 	TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
2572 	nbuf = ppdu_info->nbuf;
2573 	pdev->list_depth--;
2574 	qdf_mem_free(ppdu_info);
2575 
2576 	qdf_assert_always(nbuf);
2577 
2578 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
2579 		qdf_nbuf_data(nbuf);
2580 
2581 	/**
2582 	 * Deliver PPDU stats only for valid (acked) data frames if
2583 	 * sniffer mode is not enabled.
2584 	 * If sniffer mode is enabled, PPDU stats for all frames
2585 	 * including mgmt/control frames should be delivered to upper layer
2586 	 */
2587 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2588 		dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC, pdev->soc,
2589 				nbuf, HTT_INVALID_PEER,
2590 				WDI_NO_VAL, pdev->pdev_id);
2591 	} else {
2592 		if (ppdu_desc->num_mpdu != 0 && ppdu_desc->num_users != 0 &&
2593 				ppdu_desc->frame_ctrl & HTT_FRAMECTRL_DATATYPE) {
2594 
2595 			dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
2596 					pdev->soc, nbuf, HTT_INVALID_PEER,
2597 					WDI_NO_VAL, pdev->pdev_id);
2598 		} else
2599 			qdf_nbuf_free(nbuf);
2600 	}
2601 	return;
2602 }
2603 
2604 /**
2605  * dp_get_ppdu_desc(): Function to allocate new PPDU status
2606  * desc for new ppdu id
2607  * @pdev: DP pdev handle
2608  * @ppdu_id: PPDU unique identifier
2609  * @tlv_type: TLV type received
2610  *
2611  * return: ppdu_info per ppdu tlv structure
2612  */
2613 static
2614 struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id,
2615 			uint8_t tlv_type)
2616 {
2617 	struct ppdu_info *ppdu_info = NULL;
2618 
2619 	/*
2620 	 * Find ppdu_id node exists or not
2621 	 */
2622 	TAILQ_FOREACH(ppdu_info, &pdev->ppdu_info_list, ppdu_info_list_elem) {
2623 
2624 		if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) {
2625 			break;
2626 		}
2627 	}
2628 
2629 	if (ppdu_info) {
2630 		/**
2631 		 * if we get tlv_type that is already been processed for ppdu,
2632 		 * that means we got a new ppdu with same ppdu id.
2633 		 * Hence Flush the older ppdu
2634 		 */
2635 		if (ppdu_info->tlv_bitmap & (1 << tlv_type))
2636 			dp_ppdu_desc_deliver(pdev, ppdu_info);
2637 		else
2638 			return ppdu_info;
2639 	}
2640 
2641 	/**
2642 	 * Flush the head ppdu descriptor if ppdu desc list reaches max
2643 	 * threshold
2644 	 */
2645 	if (pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
2646 		ppdu_info = TAILQ_FIRST(&pdev->ppdu_info_list);
2647 		dp_ppdu_desc_deliver(pdev, ppdu_info);
2648 	}
2649 
2650 	/*
2651 	 * Allocate new ppdu_info node
2652 	 */
2653 	ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info));
2654 	if (!ppdu_info)
2655 		return NULL;
2656 
2657 	ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
2658 			sizeof(struct cdp_tx_completion_ppdu), 0, 4,
2659 			TRUE);
2660 	if (!ppdu_info->nbuf) {
2661 		qdf_mem_free(ppdu_info);
2662 		return NULL;
2663 	}
2664 
2665 	qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf),
2666 			sizeof(struct cdp_tx_completion_ppdu));
2667 
2668 	if (qdf_nbuf_put_tail(ppdu_info->nbuf,
2669 			sizeof(struct cdp_tx_completion_ppdu)) == NULL) {
2670 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2671 				"No tailroom for HTT PPDU");
2672 		qdf_nbuf_free(ppdu_info->nbuf);
2673 		ppdu_info->nbuf = NULL;
2674 		ppdu_info->last_user = 0;
2675 		qdf_mem_free(ppdu_info);
2676 		return NULL;
2677 	}
2678 
2679 	/**
2680 	 * No lock is needed because all PPDU TLVs are processed in
2681 	 * same context and this list is updated in same context
2682 	 */
2683 	TAILQ_INSERT_TAIL(&pdev->ppdu_info_list, ppdu_info,
2684 			ppdu_info_list_elem);
2685 	pdev->list_depth++;
2686 	return ppdu_info;
2687 }
2688 
2689 /**
2690  * dp_htt_process_tlv(): Function to process each PPDU TLVs
2691  * @pdev: DP pdev handle
2692  * @htt_t2h_msg: HTT target to host message
2693  *
2694  * return: ppdu_info per ppdu tlv structure
2695  */
2696 
2697 static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev,
2698 		qdf_nbuf_t htt_t2h_msg)
2699 {
2700 	uint32_t length;
2701 	uint32_t ppdu_id;
2702 	uint8_t tlv_type;
2703 	uint32_t tlv_length, tlv_bitmap_expected;
2704 	uint8_t *tlv_buf;
2705 	struct ppdu_info *ppdu_info = NULL;
2706 
2707 	uint32_t *msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
2708 
2709 	length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word);
2710 
2711 	msg_word = msg_word + 1;
2712 	ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word);
2713 
2714 
2715 	msg_word = msg_word + 3;
2716 	while (length > 0) {
2717 		tlv_buf = (uint8_t *)msg_word;
2718 		tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
2719 		tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
2720 		if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG))
2721 			pdev->stats.ppdu_stats_counter[tlv_type]++;
2722 
2723 		if (tlv_length == 0)
2724 			break;
2725 
2726 		tlv_length += HTT_TLV_HDR_LEN;
2727 
2728 		/**
2729 		 * Not allocating separate ppdu descriptor for MGMT Payload
2730 		 * TLV as this is sent as separate WDI indication and it
2731 		 * doesn't contain any ppdu information
2732 		 */
2733 		if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) {
2734 			pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf;
2735 			pdev->mgmtctrl_frm_info.mgmt_buf_len = tlv_length;
2736 			pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id;
2737 			msg_word =
2738 				(uint32_t *)((uint8_t *)tlv_buf + tlv_length);
2739 			length -= (tlv_length);
2740 			continue;
2741 		}
2742 
2743 		ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type);
2744 		if (!ppdu_info)
2745 			return NULL;
2746 		ppdu_info->ppdu_id = ppdu_id;
2747 		ppdu_info->tlv_bitmap |= (1 << tlv_type);
2748 
2749 		dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info);
2750 
2751 		/**
2752 		 * Increment pdev level tlv count to monitor
2753 		 * missing TLVs
2754 		 */
2755 		pdev->tlv_count++;
2756 		ppdu_info->last_tlv_cnt = pdev->tlv_count;
2757 		msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
2758 		length -= (tlv_length);
2759 	}
2760 
2761 	if (!ppdu_info)
2762 		return NULL;
2763 
2764 	pdev->last_ppdu_id = ppdu_id;
2765 
2766 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
2767 
2768 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2769 		if (ppdu_info->is_ampdu)
2770 			tlv_bitmap_expected =
2771 				dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
2772 					ppdu_info->tlv_bitmap);
2773 	}
2774 
2775 	/**
2776 	 * Once all the TLVs for a given PPDU has been processed,
2777 	 * return PPDU status to be delivered to higher layer
2778 	 */
2779 	if (ppdu_info->tlv_bitmap != 0 &&
2780 	    ppdu_info->tlv_bitmap == tlv_bitmap_expected)
2781 		return ppdu_info;
2782 
2783 	return NULL;
2784 }
2785 #endif /* FEATURE_PERPKT_INFO */
2786 
2787 /**
2788  * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW
2789  * @soc: DP SOC handle
2790  * @pdev_id: pdev id
2791  * @htt_t2h_msg: HTT message nbuf
2792  *
2793  * return:void
2794  */
2795 #if defined(WDI_EVENT_ENABLE)
2796 #ifdef FEATURE_PERPKT_INFO
2797 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
2798 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
2799 {
2800 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
2801 	struct ppdu_info *ppdu_info = NULL;
2802 	bool free_buf = true;
2803 
2804 	if (!pdev)
2805 		return true;
2806 
2807 	if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
2808 	    !pdev->mcopy_mode && !pdev->bpr_enable)
2809 		return free_buf;
2810 
2811 	ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg);
2812 
2813 	if (pdev->mgmtctrl_frm_info.mgmt_buf) {
2814 		if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv
2815 		    (pdev, htt_t2h_msg, pdev->mgmtctrl_frm_info.ppdu_id) !=
2816 		    QDF_STATUS_SUCCESS)
2817 			free_buf = false;
2818 
2819 		pdev->mgmtctrl_frm_info.mgmt_buf = NULL;
2820 		pdev->mgmtctrl_frm_info.mgmt_buf_len = 0;
2821 		pdev->mgmtctrl_frm_info.ppdu_id = 0;
2822 	}
2823 
2824 	if (ppdu_info)
2825 		dp_ppdu_desc_deliver(pdev, ppdu_info);
2826 
2827 	return free_buf;
2828 }
2829 #else
2830 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
2831 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
2832 {
2833 	return true;
2834 }
2835 #endif
2836 #endif
2837 
2838 /**
2839  * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats
2840  * @soc: DP SOC handle
2841  * @htt_t2h_msg: HTT message nbuf
2842  *
2843  * return:void
2844  */
2845 static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc,
2846 		qdf_nbuf_t htt_t2h_msg)
2847 {
2848 	uint8_t done;
2849 	qdf_nbuf_t msg_copy;
2850 	uint32_t *msg_word;
2851 
2852 	msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
2853 	msg_word = msg_word + 3;
2854 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2855 
2856 	/*
2857 	 * HTT EXT stats response comes as stream of TLVs which span over
2858 	 * multiple T2H messages.
2859 	 * The first message will carry length of the response.
2860 	 * For rest of the messages length will be zero.
2861 	 *
2862 	 * Clone the T2H message buffer and store it in a list to process
2863 	 * it later.
2864 	 *
2865 	 * The original T2H message buffers gets freed in the T2H HTT event
2866 	 * handler
2867 	 */
2868 	msg_copy = qdf_nbuf_clone(htt_t2h_msg);
2869 
2870 	if (!msg_copy) {
2871 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2872 				"T2H messge clone failed for HTT EXT STATS");
2873 		goto error;
2874 	}
2875 
2876 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2877 	qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy);
2878 	/*
2879 	 * Done bit signifies that this is the last T2H buffer in the stream of
2880 	 * HTT EXT STATS message
2881 	 */
2882 	if (done) {
2883 		soc->htt_stats.num_stats++;
2884 		qdf_sched_work(0, &soc->htt_stats.work);
2885 	}
2886 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2887 
2888 	return;
2889 
2890 error:
2891 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2892 	while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg))
2893 			!= NULL) {
2894 		qdf_nbuf_free(msg_copy);
2895 	}
2896 	soc->htt_stats.num_stats = 0;
2897 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2898 	return;
2899 
2900 }
2901 
2902 /*
2903  * htt_soc_attach_target() - SOC level HTT setup
2904  * @htt_soc:	HTT SOC handle
2905  *
2906  * Return: 0 on success; error code on failure
2907  */
2908 int htt_soc_attach_target(void *htt_soc)
2909 {
2910 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
2911 
2912 	return htt_h2t_ver_req_msg(soc);
2913 }
2914 
2915 
2916 #if defined(WDI_EVENT_ENABLE) && !defined(REMOVE_PKT_LOG)
2917 /*
2918  * dp_ppdu_stats_ind_handler() - PPDU stats msg handler
2919  * @htt_soc:	 HTT SOC handle
2920  * @msg_word:    Pointer to payload
2921  * @htt_t2h_msg: HTT msg nbuf
2922  *
2923  * Return: True if buffer should be freed by caller.
2924  */
2925 static bool
2926 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
2927 				uint32_t *msg_word,
2928 				qdf_nbuf_t htt_t2h_msg)
2929 {
2930 	u_int8_t pdev_id;
2931 	bool free_buf;
2932 	qdf_nbuf_set_pktlen(htt_t2h_msg, HTT_T2H_MAX_MSG_SIZE);
2933 	dp_debug("received HTT_T2H_MSG_TYPE_PPDU_STATS_IND");
2934 	pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
2935 	pdev_id = DP_HW2SW_MACID(pdev_id);
2936 	free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id,
2937 					      htt_t2h_msg);
2938 	dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc,
2939 		htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL,
2940 		pdev_id);
2941 	return free_buf;
2942 }
2943 #else
2944 static bool
2945 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
2946 				uint32_t *msg_word,
2947 				qdf_nbuf_t htt_t2h_msg)
2948 {
2949 	return true;
2950 }
2951 #endif
2952 
2953 #if defined(WDI_EVENT_ENABLE) && \
2954 	!defined(REMOVE_PKT_LOG)
2955 /*
2956  * dp_pktlog_msg_handler() - Pktlog msg handler
2957  * @htt_soc:	 HTT SOC handle
2958  * @msg_word:    Pointer to payload
2959  *
2960  * Return: None
2961  */
2962 static void
2963 dp_pktlog_msg_handler(struct htt_soc *soc,
2964 		      uint32_t *msg_word)
2965 {
2966 	uint8_t pdev_id;
2967 	uint32_t *pl_hdr;
2968 
2969 	dp_debug("received HTT_T2H_MSG_TYPE_PKTLOG");
2970 	pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word);
2971 	pdev_id = DP_HW2SW_MACID(pdev_id);
2972 	pl_hdr = (msg_word + 1);
2973 	dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc,
2974 		pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL,
2975 		pdev_id);
2976 }
2977 #else
2978 static void
2979 dp_pktlog_msg_handler(struct htt_soc *soc,
2980 		      uint32_t *msg_word)
2981 {
2982 }
2983 #endif
2984 /*
2985  * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler
2986  * @context:	Opaque context (HTT SOC handle)
2987  * @pkt:	HTC packet
2988  */
2989 static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
2990 {
2991 	struct htt_soc *soc = (struct htt_soc *) context;
2992 	qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
2993 	u_int32_t *msg_word;
2994 	enum htt_t2h_msg_type msg_type;
2995 	bool free_buf = true;
2996 
2997 	/* check for successful message reception */
2998 	if (pkt->Status != QDF_STATUS_SUCCESS) {
2999 		if (pkt->Status != QDF_STATUS_E_CANCELED)
3000 			soc->stats.htc_err_cnt++;
3001 
3002 		qdf_nbuf_free(htt_t2h_msg);
3003 		return;
3004 	}
3005 
3006 	/* TODO: Check if we should pop the HTC/HTT header alignment padding */
3007 
3008 	msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
3009 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
3010 	switch (msg_type) {
3011 	case HTT_T2H_MSG_TYPE_PEER_MAP:
3012 		{
3013 			u_int8_t mac_addr_deswizzle_buf[HTT_MAC_ADDR_LEN];
3014 			u_int8_t *peer_mac_addr;
3015 			u_int16_t peer_id;
3016 			u_int16_t hw_peer_id;
3017 			u_int8_t vdev_id;
3018 			u_int8_t is_wds;
3019 			struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
3020 
3021 			peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
3022 			hw_peer_id =
3023 				HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2));
3024 			vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
3025 			peer_mac_addr = htt_t2h_mac_addr_deswizzle(
3026 				(u_int8_t *) (msg_word+1),
3027 				&mac_addr_deswizzle_buf[0]);
3028 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3029 				QDF_TRACE_LEVEL_INFO,
3030 				"HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3031 				peer_id, vdev_id);
3032 
3033 			/*
3034 			 * check if peer already exists for this peer_id, if so
3035 			 * this peer map event is in response for a wds peer add
3036 			 * wmi command sent during wds source port learning.
3037 			 * in this case just add the ast entry to the existing
3038 			 * peer ast_list.
3039 			 */
3040 			is_wds = !!(dpsoc->peer_id_to_obj_map[peer_id]);
3041 			dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id,
3042 					       vdev_id, peer_mac_addr, 0,
3043 					       is_wds);
3044 			break;
3045 		}
3046 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
3047 		{
3048 			u_int16_t peer_id;
3049 			u_int8_t vdev_id;
3050 			u_int8_t mac_addr[HTT_MAC_ADDR_LEN] = {0};
3051 			peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
3052 			vdev_id = HTT_RX_PEER_UNMAP_VDEV_ID_GET(*msg_word);
3053 
3054 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
3055 						 vdev_id, mac_addr, 0);
3056 			break;
3057 		}
3058 	case HTT_T2H_MSG_TYPE_SEC_IND:
3059 		{
3060 			u_int16_t peer_id;
3061 			enum cdp_sec_type sec_type;
3062 			int is_unicast;
3063 
3064 			peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
3065 			sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
3066 			is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
3067 			/* point to the first part of the Michael key */
3068 			msg_word++;
3069 			dp_rx_sec_ind_handler(
3070 				soc->dp_soc, peer_id, sec_type, is_unicast,
3071 				msg_word, msg_word + 2);
3072 			break;
3073 		}
3074 
3075 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
3076 		{
3077 			free_buf = dp_ppdu_stats_ind_handler(soc, msg_word,
3078 							     htt_t2h_msg);
3079 			break;
3080 		}
3081 
3082 	case HTT_T2H_MSG_TYPE_PKTLOG:
3083 		{
3084 			dp_pktlog_msg_handler(soc, msg_word);
3085 			break;
3086 		}
3087 
3088 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
3089 		{
3090 			htc_pm_runtime_put(soc->htc_soc);
3091 			soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
3092 			soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
3093 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
3094 				"target uses HTT version %d.%d; host uses %d.%d",
3095 				soc->tgt_ver.major, soc->tgt_ver.minor,
3096 				HTT_CURRENT_VERSION_MAJOR,
3097 				HTT_CURRENT_VERSION_MINOR);
3098 			if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
3099 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3100 					QDF_TRACE_LEVEL_ERROR,
3101 					"*** Incompatible host/target HTT versions!");
3102 			}
3103 			/* abort if the target is incompatible with the host */
3104 			qdf_assert(soc->tgt_ver.major ==
3105 				HTT_CURRENT_VERSION_MAJOR);
3106 			if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
3107 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3108 					QDF_TRACE_LEVEL_WARN,
3109 					"*** Warning: host/target HTT versions"
3110 					" are different, though compatible!");
3111 			}
3112 			break;
3113 		}
3114 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
3115 		{
3116 			uint16_t peer_id;
3117 			uint8_t tid;
3118 			uint8_t win_sz;
3119 			uint16_t status;
3120 			struct dp_peer *peer;
3121 
3122 			/*
3123 			 * Update REO Queue Desc with new values
3124 			 */
3125 			peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
3126 			tid = HTT_RX_ADDBA_TID_GET(*msg_word);
3127 			win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
3128 			peer = dp_peer_find_by_id(soc->dp_soc, peer_id);
3129 
3130 			/*
3131 			 * Window size needs to be incremented by 1
3132 			 * since fw needs to represent a value of 256
3133 			 * using just 8 bits
3134 			 */
3135 			if (peer) {
3136 				status = dp_addba_requestprocess_wifi3(peer,
3137 						0, tid, 0, win_sz + 1, 0xffff);
3138 
3139 				/*
3140 				 * If PEER_LOCK_REF_PROTECT enbled dec ref
3141 				 * which is inc by dp_peer_find_by_id
3142 				 */
3143 				dp_peer_unref_del_find_by_id(peer);
3144 
3145 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3146 					QDF_TRACE_LEVEL_INFO,
3147 					FL("PeerID %d BAW %d TID %d stat %d"),
3148 					peer_id, win_sz, tid, status);
3149 
3150 			} else {
3151 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3152 					QDF_TRACE_LEVEL_ERROR,
3153 					FL("Peer not found peer id %d"),
3154 					peer_id);
3155 			}
3156 			break;
3157 		}
3158 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
3159 		{
3160 			dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg);
3161 			break;
3162 		}
3163 	case HTT_T2H_MSG_TYPE_PEER_MAP_V2:
3164 		{
3165 			u_int8_t mac_addr_deswizzle_buf[HTT_MAC_ADDR_LEN];
3166 			u_int8_t *peer_mac_addr;
3167 			u_int16_t peer_id;
3168 			u_int16_t hw_peer_id;
3169 			u_int8_t vdev_id;
3170 			bool is_wds;
3171 			u_int16_t ast_hash;
3172 
3173 			peer_id = HTT_RX_PEER_MAP_V2_SW_PEER_ID_GET(*msg_word);
3174 			hw_peer_id =
3175 			HTT_RX_PEER_MAP_V2_HW_PEER_ID_GET(*(msg_word + 2));
3176 			vdev_id = HTT_RX_PEER_MAP_V2_VDEV_ID_GET(*msg_word);
3177 			peer_mac_addr =
3178 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3179 						   &mac_addr_deswizzle_buf[0]);
3180 			is_wds =
3181 			HTT_RX_PEER_MAP_V2_NEXT_HOP_GET(*(msg_word + 3));
3182 			ast_hash =
3183 			HTT_RX_PEER_MAP_V2_AST_HASH_VALUE_GET(*(msg_word + 3));
3184 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3185 				  QDF_TRACE_LEVEL_INFO,
3186 				  "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3187 				  peer_id, vdev_id);
3188 
3189 			dp_rx_peer_map_handler(soc->dp_soc, peer_id,
3190 					       hw_peer_id, vdev_id,
3191 					       peer_mac_addr, ast_hash,
3192 					       is_wds);
3193 			break;
3194 		}
3195 	case HTT_T2H_MSG_TYPE_PEER_UNMAP_V2:
3196 		{
3197 			u_int8_t mac_addr_deswizzle_buf[HTT_MAC_ADDR_LEN];
3198 			u_int8_t *mac_addr;
3199 			u_int16_t peer_id;
3200 			u_int8_t vdev_id;
3201 			u_int8_t is_wds;
3202 
3203 			peer_id =
3204 			HTT_RX_PEER_UNMAP_V2_SW_PEER_ID_GET(*msg_word);
3205 			vdev_id = HTT_RX_PEER_UNMAP_V2_VDEV_ID_GET(*msg_word);
3206 			mac_addr =
3207 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3208 						   &mac_addr_deswizzle_buf[0]);
3209 			is_wds =
3210 			HTT_RX_PEER_UNMAP_V2_NEXT_HOP_GET(*(msg_word + 2));
3211 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3212 				  QDF_TRACE_LEVEL_INFO,
3213 				  "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3214 				  peer_id, vdev_id);
3215 
3216 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
3217 						 vdev_id, mac_addr,
3218 						 is_wds);
3219 			break;
3220 		}
3221 	default:
3222 		break;
3223 	};
3224 
3225 	/* Free the indication buffer */
3226 	if (free_buf)
3227 		qdf_nbuf_free(htt_t2h_msg);
3228 }
3229 
3230 /*
3231  * dp_htt_h2t_full() - Send full handler (called from HTC)
3232  * @context:	Opaque context (HTT SOC handle)
3233  * @pkt:	HTC packet
3234  *
3235  * Return: enum htc_send_full_action
3236  */
3237 static enum htc_send_full_action
3238 dp_htt_h2t_full(void *context, HTC_PACKET *pkt)
3239 {
3240 	return HTC_SEND_FULL_KEEP;
3241 }
3242 
3243 /*
3244  * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages
3245  * @context:	Opaque context (HTT SOC handle)
3246  * @nbuf:	nbuf containing T2H message
3247  * @pipe_id:	HIF pipe ID
3248  *
3249  * Return: QDF_STATUS
3250  *
3251  * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which
3252  * will be used for packet log and other high-priority HTT messages. Proper
3253  * HTC connection to be added later once required FW changes are available
3254  */
3255 static QDF_STATUS
3256 dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id)
3257 {
3258 	QDF_STATUS rc = QDF_STATUS_SUCCESS;
3259 	HTC_PACKET htc_pkt;
3260 
3261 	qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE);
3262 	qdf_mem_zero(&htc_pkt, sizeof(htc_pkt));
3263 	htc_pkt.Status = QDF_STATUS_SUCCESS;
3264 	htc_pkt.pPktContext = (void *)nbuf;
3265 	dp_htt_t2h_msg_handler(context, &htc_pkt);
3266 
3267 	return rc;
3268 }
3269 
3270 /*
3271  * htt_htc_soc_attach() - Register SOC level HTT instance with HTC
3272  * @htt_soc:	HTT SOC handle
3273  *
3274  * Return: QDF_STATUS
3275  */
3276 static QDF_STATUS
3277 htt_htc_soc_attach(struct htt_soc *soc)
3278 {
3279 	struct htc_service_connect_req connect;
3280 	struct htc_service_connect_resp response;
3281 	QDF_STATUS status;
3282 	struct dp_soc *dpsoc = soc->dp_soc;
3283 
3284 	qdf_mem_set(&connect, sizeof(connect), 0);
3285 	qdf_mem_set(&response, sizeof(response), 0);
3286 
3287 	connect.pMetaData = NULL;
3288 	connect.MetaDataLength = 0;
3289 	connect.EpCallbacks.pContext = soc;
3290 	connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
3291 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
3292 	connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler;
3293 
3294 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
3295 	connect.EpCallbacks.EpRecvRefill = NULL;
3296 
3297 	/* N/A, fill is done by HIF */
3298 	connect.EpCallbacks.RecvRefillWaterMark = 1;
3299 
3300 	connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
3301 	/*
3302 	 * Specify how deep to let a queue get before htc_send_pkt will
3303 	 * call the EpSendFull function due to excessive send queue depth.
3304 	 */
3305 	connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
3306 
3307 	/* disable flow control for HTT data message service */
3308 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
3309 
3310 	/* connect to control service */
3311 	connect.service_id = HTT_DATA_MSG_SVC;
3312 
3313 	status = htc_connect_service(soc->htc_soc, &connect, &response);
3314 
3315 	if (status != QDF_STATUS_SUCCESS)
3316 		return status;
3317 
3318 	soc->htc_endpoint = response.Endpoint;
3319 
3320 	hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint);
3321 	dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc,
3322 		dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE);
3323 
3324 	return QDF_STATUS_SUCCESS; /* success */
3325 }
3326 
3327 /*
3328  * htt_soc_initialize() - SOC level HTT initialization
3329  * @htt_soc: Opaque htt SOC handle
3330  * @ctrl_psoc: Opaque ctrl SOC handle
3331  * @htc_soc: SOC level HTC handle
3332  * @hal_soc: Opaque HAL SOC handle
3333  * @osdev: QDF device
3334  *
3335  * Return: HTT handle on success; NULL on failure
3336  */
3337 void *
3338 htt_soc_initialize(void *htt_soc, void *ctrl_psoc, HTC_HANDLE htc_soc,
3339 		   void *hal_soc, qdf_device_t osdev)
3340 {
3341 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
3342 
3343 	soc->osdev = osdev;
3344 	soc->ctrl_psoc = ctrl_psoc;
3345 	soc->htc_soc = htc_soc;
3346 	soc->hal_soc = hal_soc;
3347 
3348 	if (htt_htc_soc_attach(soc))
3349 		goto fail2;
3350 
3351 	return soc;
3352 
3353 fail2:
3354 	return NULL;
3355 }
3356 
3357 void htt_soc_htc_dealloc(struct htt_soc *htt_handle)
3358 {
3359 	htt_htc_misc_pkt_pool_free(htt_handle);
3360 	htt_htc_pkt_pool_free(htt_handle);
3361 }
3362 
3363 /*
3364  * htt_soc_htc_prealloc() - HTC memory prealloc
3365  * @htt_soc: SOC level HTT handle
3366  *
3367  * Return: QDF_STATUS_SUCCESS on Success or
3368  * QDF_STATUS_E_NOMEM on allocation failure
3369  */
3370 QDF_STATUS htt_soc_htc_prealloc(struct htt_soc *soc)
3371 {
3372 	int i;
3373 
3374 	HTT_TX_MUTEX_INIT(&soc->htt_tx_mutex);
3375 
3376 	soc->htt_htc_pkt_freelist = NULL;
3377 	/* pre-allocate some HTC_PACKET objects */
3378 	for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
3379 		struct dp_htt_htc_pkt_union *pkt;
3380 		pkt = qdf_mem_malloc(sizeof(*pkt));
3381 		if (!pkt)
3382 			return QDF_STATUS_E_NOMEM;
3383 
3384 		htt_htc_pkt_free(soc, &pkt->u.pkt);
3385 	}
3386 	return QDF_STATUS_SUCCESS;
3387 }
3388 
3389 /*
3390  * htt_soc_detach() - Free SOC level HTT handle
3391  * @htt_hdl: HTT SOC handle
3392  */
3393 void htt_soc_detach(void *htt_hdl)
3394 {
3395 	struct htt_soc *htt_handle = (struct htt_soc *)htt_hdl;
3396 
3397 	HTT_TX_MUTEX_DESTROY(&htt_handle->htt_tx_mutex);
3398 	qdf_mem_free(htt_handle);
3399 }
3400 
3401 /**
3402  * dp_h2t_ext_stats_msg_send(): function to contruct HTT message to pass to FW
3403  * @pdev: DP PDEV handle
3404  * @stats_type_upload_mask: stats type requested by user
3405  * @config_param_0: extra configuration parameters
3406  * @config_param_1: extra configuration parameters
3407  * @config_param_2: extra configuration parameters
3408  * @config_param_3: extra configuration parameters
3409  * @mac_id: mac number
3410  *
3411  * return: QDF STATUS
3412  */
3413 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
3414 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
3415 		uint32_t config_param_1, uint32_t config_param_2,
3416 		uint32_t config_param_3, int cookie_val, int cookie_msb,
3417 		uint8_t mac_id)
3418 {
3419 	struct htt_soc *soc = pdev->soc->htt_handle;
3420 	struct dp_htt_htc_pkt *pkt;
3421 	qdf_nbuf_t msg;
3422 	uint32_t *msg_word;
3423 	uint8_t pdev_mask = 0;
3424 
3425 	msg = qdf_nbuf_alloc(
3426 			soc->osdev,
3427 			HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ),
3428 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
3429 
3430 	if (!msg)
3431 		return QDF_STATUS_E_NOMEM;
3432 
3433 	/*TODO:Add support for SOC stats
3434 	 * Bit 0: SOC Stats
3435 	 * Bit 1: Pdev stats for pdev id 0
3436 	 * Bit 2: Pdev stats for pdev id 1
3437 	 * Bit 3: Pdev stats for pdev id 2
3438 	 */
3439 	mac_id = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
3440 
3441 	pdev_mask = 1 << DP_SW2HW_MACID(mac_id);
3442 	/*
3443 	 * Set the length of the message.
3444 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3445 	 * separately during the below call to qdf_nbuf_push_head.
3446 	 * The contribution from the HTC header is added separately inside HTC.
3447 	 */
3448 	if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) {
3449 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3450 				"Failed to expand head for HTT_EXT_STATS");
3451 		qdf_nbuf_free(msg);
3452 		return QDF_STATUS_E_FAILURE;
3453 	}
3454 
3455 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3456 		"-----%s:%d----\n cookie <-> %d\n config_param_0 %u\n"
3457 		"config_param_1 %u\n config_param_2 %u\n"
3458 		"config_param_4 %u\n -------------",
3459 		__func__, __LINE__, cookie_val, config_param_0,
3460 		config_param_1, config_param_2,	config_param_3);
3461 
3462 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
3463 
3464 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3465 	*msg_word = 0;
3466 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ);
3467 	HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask);
3468 	HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask);
3469 
3470 	/* word 1 */
3471 	msg_word++;
3472 	*msg_word = 0;
3473 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0);
3474 
3475 	/* word 2 */
3476 	msg_word++;
3477 	*msg_word = 0;
3478 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1);
3479 
3480 	/* word 3 */
3481 	msg_word++;
3482 	*msg_word = 0;
3483 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2);
3484 
3485 	/* word 4 */
3486 	msg_word++;
3487 	*msg_word = 0;
3488 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3);
3489 
3490 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0);
3491 
3492 	/* word 5 */
3493 	msg_word++;
3494 
3495 	/* word 6 */
3496 	msg_word++;
3497 	*msg_word = 0;
3498 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val);
3499 
3500 	/* word 7 */
3501 	msg_word++;
3502 	*msg_word = 0;
3503 	/*Using last 2 bits for pdev_id */
3504 	cookie_msb = ((cookie_msb << 2) | pdev->pdev_id);
3505 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb);
3506 
3507 	pkt = htt_htc_pkt_alloc(soc);
3508 	if (!pkt) {
3509 		qdf_nbuf_free(msg);
3510 		return QDF_STATUS_E_NOMEM;
3511 	}
3512 
3513 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
3514 
3515 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
3516 			dp_htt_h2t_send_complete_free_netbuf,
3517 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
3518 			soc->htc_endpoint,
3519 			1); /* tag - not relevant here */
3520 
3521 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
3522 	DP_HTT_SEND_HTC_PKT(soc, pkt);
3523 	return 0;
3524 }
3525 
3526 /* This macro will revert once proper HTT header will define for
3527  * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file
3528  * */
3529 #if defined(WDI_EVENT_ENABLE)
3530 /**
3531  * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW
3532  * @pdev: DP PDEV handle
3533  * @stats_type_upload_mask: stats type requested by user
3534  * @mac_id: Mac id number
3535  *
3536  * return: QDF STATUS
3537  */
3538 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
3539 		uint32_t stats_type_upload_mask, uint8_t mac_id)
3540 {
3541 	struct htt_soc *soc = pdev->soc->htt_handle;
3542 	struct dp_htt_htc_pkt *pkt;
3543 	qdf_nbuf_t msg;
3544 	uint32_t *msg_word;
3545 	uint8_t pdev_mask;
3546 
3547 	msg = qdf_nbuf_alloc(
3548 			soc->osdev,
3549 			HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ),
3550 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
3551 
3552 	if (!msg) {
3553 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3554 		"Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer");
3555 		qdf_assert(0);
3556 		return QDF_STATUS_E_NOMEM;
3557 	}
3558 
3559 	/*TODO:Add support for SOC stats
3560 	 * Bit 0: SOC Stats
3561 	 * Bit 1: Pdev stats for pdev id 0
3562 	 * Bit 2: Pdev stats for pdev id 1
3563 	 * Bit 3: Pdev stats for pdev id 2
3564 	 */
3565 	pdev_mask = 1 << DP_SW2HW_MACID(mac_id);
3566 
3567 	/*
3568 	 * Set the length of the message.
3569 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3570 	 * separately during the below call to qdf_nbuf_push_head.
3571 	 * The contribution from the HTC header is added separately inside HTC.
3572 	 */
3573 	if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) {
3574 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3575 				"Failed to expand head for HTT_CFG_STATS");
3576 		qdf_nbuf_free(msg);
3577 		return QDF_STATUS_E_FAILURE;
3578 	}
3579 
3580 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
3581 
3582 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3583 	*msg_word = 0;
3584 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
3585 	HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask);
3586 	HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word,
3587 			stats_type_upload_mask);
3588 
3589 	pkt = htt_htc_pkt_alloc(soc);
3590 	if (!pkt) {
3591 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3592 				"Fail to allocate dp_htt_htc_pkt buffer");
3593 		qdf_assert(0);
3594 		qdf_nbuf_free(msg);
3595 		return QDF_STATUS_E_NOMEM;
3596 	}
3597 
3598 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
3599 
3600 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
3601 			dp_htt_h2t_send_complete_free_netbuf,
3602 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
3603 			soc->htc_endpoint,
3604 			1); /* tag - not relevant here */
3605 
3606 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
3607 	DP_HTT_SEND_HTC_PKT(soc, pkt);
3608 	return 0;
3609 }
3610 #endif
3611